blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de171643d720ac13e8a745fb6fe61a49ef535492 | 431f9d1f7a84ee40520fd88fa6aa4e7b0d235047 | /geometric_controller/src/ss/trajectory_simulation.py | 1b5ab398a461b4320bbd39442869d76e517a8ecc | [] | no_license | indsy123/Quadrotor-Navigation-using-Receding-Horizon-planning | aa0571457292ea2b1eefcd2119332430083c7c42 | 255e573a42660420fa0d3ce6dac252df8d737c8c | refs/heads/master | 2020-07-26T13:14:34.814707 | 2020-03-24T21:38:16 | 2020-03-24T21:38:16 | 208,655,725 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,758 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 17:28:19 2017
This initial script generates a dummy trajectory. This should be replaced
eventually with what you want the trajectory to be or your own method to
generate the trajectory.
Basically I made my own message file called "Desired_trajectory"
that is a message type have position, velocity, acceleration and direction.
Velocity and acceleration need not be here as Lee's paper says the
trajectory is an (x,y,z) position of CoG and a direction.
The current trajectory is the one used in example (1) in the paper
"Geomentric tracking control of a quadrotor in SE(3)" by T Lee.
You can change it the way you want. just get a curve [x(t),y(t),z(t)] and a
direction [cos(pi*t), sin(pi*t),0] or as you fancy. Differentiate the x, y and
z to get velocities and accelerations.
While it is possible to get rid of velocities and accelerations here and
calculate them in the controller script,I found it was not resulting in much
saving in terms of time.
It will also be needed to change queue_size and publishing frequency in
"r = rospy.Rate(n). With this function my laptop can generate at the most
155 hz.
"""
__author__ = 'Indrajeet yadav'
__version__ = '0.1'
__license__ = 'Nil'
import numpy as np
import rospy
from isy_geometric_controller.msg import Desired_Trajectory
from isy_geometric_controller.msg import modifiedodometry
from nav_msgs.msg import Odometry
import time
import scipy
from scipy import special
class trajectory(object):
"calculates desired position, linear velocity, linear acceleration and direction"
def __init__(self, name_of_uav, time_instance):
self.time = time.time()
self.counter = 0
self.uav = name_of_uav
self.pub = rospy.Publisher('/desired_trajectory', Desired_Trajectory, queue_size = 10, tcp_nodelay = True)
self.T = 12
self.w = 2*np.pi/self.T
try:
#rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 10, tcp_nodelay = True)
rospy.Subscriber('/'+self.uav+'/odometry_sensor1/odometry', Odometry, self.callback, queue_size = 100, tcp_nodelay = True)
#rospy.Subscriber('/'+self.uav+'/odom', Odometry, self.callback, queue_size = 100, tcp_nodelay = True)
except:
print('problem subscribing to odometry topic')
def callback(self, data):
#print self.time
msg = Desired_Trajectory()
msg.header.stamp = data.header.stamp
#msg.header.stamp = rospy.Time.now()
t = time.time()
tt = t-self.time
if tt<=3:
#msg.desired_position.x = 1.0 * np.cos(self.w*tt)
#msg.desired_position.y = 1.0 * 0.5* np.sin(2*self.w*tt)
#msg.desired_position.z = 0.75 + 0.25*np.sin(self.w*tt)
msg.desired_velocity.x = 0#-1.0 * (self.w) * np.sin(self.w*tt)
msg.desired_velocity.y = 0#1.0*0.5 * (2*self.w) * np.cos(2*self.w*tt)
msg.desired_velocity.z = 0.5#0.25*self.w*np.cos(self.w*tt)
msg.desired_acceleration.x = 0#-1.0 * (self.w)**2 * np.cos(self.w*tt)
msg.desired_acceleration.y = 0#-1.0*0.5 * (2*self.w)**2 * np.sin(2*self.w*tt)
msg.desired_acceleration.z = 0#-0.25*self.w**2*np.sin(self.w*tt)
msg.desired_direction.x = 1#np.cos(2*self.w*tt)
msg.desired_direction.y = 0#np.sin(2*self.w*tt)
msg.desired_direction.z = 0
else:
#msg.desired_position.x = 1.0
#msg.desired_position.y = 0.0
#msg.desired_position.z = 0.15
msg.desired_velocity.x = 0.0
msg.desired_velocity.y = 0.0
msg.desired_velocity.z = 0
msg.desired_acceleration.x = 0.0
msg.desired_acceleration.y = 0.0
msg.desired_acceleration.z = 0
msg.desired_direction.x = 1
msg.desired_direction.y = 0
msg.desired_direction.z = 0
msg.controller = 1 # position controller
self.pub.publish(msg)
# may get rid of the code below evntually when the trajectory topic will be
# subscribed in the main controller script. Remember to initilize the
# "Trajectory" node in controller script eventually.
if __name__ == '__main__':
name = 'firefly'
#name = rospy.get_param('~vehicle_name')
rospy.init_node('Trajectory', anonymous=False, log_level=rospy.DEBUG)
r = rospy.Rate(200)
start_time = time.time()
try:
while not rospy.is_shutdown():
current_time = time.time()
t = current_time-start_time
#print t
traj = trajectory(name, current_time)
rospy.spin()
#print 'a' , time.time()-a
r.sleep()
except rospy.ROSInterruptException():
pass
| [
"indragt@udel.edu"
] | indragt@udel.edu |
4ad2bde6bdca921a681ab2a19739f7ec109f0855 | 72f55f2b9899af5cf60789b935a69f2e5ddfc814 | /src/analytics/migrations/0007_auto_20161212_0735.py | 030a645dc7873b36a13eae12fd481f3aeaae826b | [] | no_license | apapatp/svrup-learning-no-rest | cf4ab52de0a060207844a8fb3c58455b7142ff35 | 554d26245a9db2e22f4ed22928f4815b186e486b | refs/heads/master | 2021-01-12T09:32:15.866093 | 2016-12-30T09:47:43 | 2016-12-30T09:47:43 | 76,186,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('analytics', '0006_auto_20161212_0624'),
]
operations = [
migrations.AlterField(
model_name='pageview',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2016, 12, 12, 7, 35, 14, 118666, tzinfo=utc)),
preserve_default=True,
),
]
| [
"tolu@Tolus-MacBook-Pro.local"
] | tolu@Tolus-MacBook-Pro.local |
a8518cb7746c3200f3217bba2498fb3fe7e3c877 | 878eb4b539d77051dd7330389b90d988d9aef8f3 | /CAPITULO 7/Exercicio R.py | af3ef2eb5c93a5dc7848b95c4831130451504429 | [
"MIT"
] | permissive | LarmIg/Algoritmos-Python | a031badc9f607cbbc109ee4ca8bfe60d5636d867 | f2c9889705cacac007833f6ab9a413b06213f882 | refs/heads/master | 2022-11-25T05:36:26.313469 | 2020-07-25T11:59:36 | 2020-07-25T11:59:36 | 282,436,201 | 0 | 0 | MIT | 2020-07-25T12:08:13 | 2020-07-25T12:08:12 | null | UTF-8 | Python | false | false | 767 | py | # Elaborar um programa que efetue a leitura de dados em duas matrizes (A e B) de uma dimensão do tipo vetor, sendo a matriz A com dez elementos e a matriz B com cinco elementos. Os elementos a serem armazenados nas matrizes devem ser do tipo cadeia. Construir uma matriz C com a capacidade de armazenar um total de 15 elementos e executar a junção das matrizes A e B na matriz C. Apresentar os dados da matriz C em ordem alfabética descendente
A = []
B = []
C = []
for i in range(10):
A.append(str(input('Informe um valor para a Matriz A[{}]'.format(i))))
for i in range(5):
B.append(str(input('Informe um valor para a Matriz B[{}]'.format(i))))
C = A + B
C.sort(reverse = True)
for i in range(len(C)):
print('C[{}] = {}'.format(i, C[i]))
| [
"noreply@github.com"
] | LarmIg.noreply@github.com |
caebf84579717f9af88612898b4b4390d7755b86 | f62be83925849ab2841565ab264dedf1ee74a689 | /S&PTimeTest.py | 3e69a50e2aaa5ea747781acb9fb522f9a524ad58 | [] | no_license | evy555/Stock-day-of-week-return-analysis | 043501581615bfe1979878ad01cada990eb9cb08 | 28cee2a04475db6801a4fe8c8f0a51c9a5f2959b | refs/heads/master | 2020-04-01T20:59:05.613334 | 2016-06-08T22:46:08 | 2016-06-08T22:46:08 | 60,735,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,653 | py | import pandas as pd
import numpy as np
import os
import pandas.io.data
from pandas import Series, DataFrame
from pandas import ExcelWriter
from pandas import read_csv
import matplotlib.pyplot as plt
import datetime
from scipy.stats import ttest_1samp
import matplotlib.pyplot as plt
from random import randint
now = datetime.datetime.now()
list = '^GSPC'
start = None
while start is None:
try:
start = datetime.datetime(randint(1950,2015), randint(1,12), randint(1,31))
except:
pass
end = datetime.datetime(now.year, now.month, now.day)
df = pd.io.data.get_data_yahoo(list, start, end)['Adj Close']
df = DataFrame(df)
df['Returns'] = df.pct_change()
df['Date'] = df.index
df['Date'] = [time.date() for time in df['Date']]
l = df.index.values
for i in range(0,len(l)):
df.loc[l[i], 'DayoftheWeek'] = datetime.datetime.strptime(str(df.loc[l[i], 'Date']), '%Y-%m-%d').strftime('%A')
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
Monday = 0
MonCount = 0
Mon = []
Tuesday = 0
TueCount = 0
Tue = []
Wednesday = 0
WedCount = 0
Wed = []
Thursday = 0
ThuCount = 0
Thu = []
Friday = 0
FriCount = 0
Fri = []
#Need to loop through days and then loop through df to sum up all returns while also summing the total count. Then create average
for i in range(1,len(l)):
dump = 0
if df.loc[l[i], 'DayoftheWeek'] == 'Monday':
Monday = Monday + df.loc[l[i], "Returns"]
MonCount = MonCount + 1
Mon.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Tuesday':
Tuesday = Tuesday + df.loc[l[i], "Returns"]
TueCount = TueCount + 1
Tue.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Wednesday':
Wednesday = Wednesday + df.loc[l[i], "Returns"]
WedCount = WedCount + 1
Wed.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Thursday':
Thursday = Thursday + df.loc[l[i], "Returns"]
ThuCount = ThuCount + 1
Thu.append(df.loc[l[i],'Returns'])
if df.loc[l[i], 'DayoftheWeek'] == 'Friday':
Friday = Friday + df.loc[l[i], "Returns"]
FriCount = FriCount + 1
Fri.append(df.loc[l[i],'Returns'])
else:
dump = dump + df.loc[l[i], 'Returns']
dict = {'Monday': Monday/MonCount, 'Tuesday': Tuesday/TueCount, 'Wednesday': Wednesday/WedCount, 'Thursday': Thursday/ThuCount, 'Friday': Friday/FriCount}
dg = pd.Series(dict, name='DailyValue')
dff = DataFrame(dg)
dff['Day'] = dff.index
dff['Sorter'] = [5,1,4,2,3]
dff.sort_values(by = ['Sorter'], inplace = True)
#dff.sort(['Day'], ascending = True)
#dff.plot(kind='bar', grid = True, y = ['DailyValue'])
plt.show()
# Buy/Sell decision
for i in range(1,len(l)):
if df.loc[l[i], 'DayoftheWeek'] == 'Friday':
df.loc[l[i], "Signal"] = "Sell"
df.loc[l[i], "Market"] = 1
elif df.loc[l[i], 'DayoftheWeek'] == 'Monday':
df.loc[l[i], "Signal"] = "Buy"
df.loc[l[i], "Market"] = 0
else:
df.loc[l[i], 'Signal'] = "Hold"
df.loc[l[i], "Market"] = 1
# Investment calculations
df['Investment'] = ""
df['S&P500 Investment'] = ''
df['Investment'][0] = 10000
df['S&P500 Investment'][0] = 10000
for i in range(1,len(l)):
df.loc[l[i], 'S&P500 Investment'] = df.loc[l[i-1], 'S&P500 Investment'] * (1 + df.loc[l[i], 'Returns'])
if df.loc[l[i], "Signal"] == "Sell":
df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"])
elif df.loc[l[i], "Signal"] == "Buy":
df.loc[l[i], "Investment"] = df.loc[l[i-1], 'Investment']
elif df.loc[l[i], 'Signal'] == "Hold":
df.loc[l[i], 'Investment'] = df.loc[l[i-1], 'Investment'] * (1 + df.loc[l[i], "Returns"])
print(df.head())
#Excess Return over S&P500 Column
#for i in range(1,len(l)):
# df.loc[l[i], 'Excess Return'] = df.loc[l[i], 'Investment'] - df.loc[l[i], 'S&P500 Investment']
file = ExcelWriter('Time1.xlsx')
df.to_excel(file, 'Data')
file.close()
os.startfile('Time1.xlsx')
df.plot(y = ['Investment', 'S&P500 Investment'])
plt.show()
print("Average Monday return: %s" % (Monday/MonCount))
print("Average Tuesday return: %s" % (Tuesday/TueCount))
print("Average Wednesday return: %s" % (Wednesday/WedCount))
print("Average Thursday return: %s" % (Thursday/ThuCount))
print("Average Friday return: %s" % (Friday/FriCount))
print("1 sample t-tests for each day to test significance of daily returns against 0 are as follows:")
print(ttest_1samp(Mon,0))
print(ttest_1samp(Tue,0))
print(ttest_1samp(Wed,0))
print(ttest_1samp(Thu,0))
print(ttest_1samp(Fri,0))
| [
"evansj556@yahoo.com"
] | evansj556@yahoo.com |
290b82503d5a09f87feff4d7c52eaa5bb272622d | 848fad01ed3f55e4c9a47d227f7cbabfe4f4df73 | /utils/oss.py | 86ba32fb2bafa32bb59bdb18e0650710b4b7c800 | [] | no_license | cx2c/ali_sdk | 51934a7b941fdb710b849ee6d61ce0ea9a88474f | 9edb3ff58a71feb5917982d68a57854c10c26e15 | refs/heads/master | 2020-03-21T06:49:26.720386 | 2018-06-22T02:34:32 | 2018-06-22T02:34:32 | 138,244,060 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 122 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "w.z"
# Date: 2018/3/21
class Oss(object):
pass | [
"w.z@zhangweideMBPX.lan"
] | w.z@zhangweideMBPX.lan |
986bf659063dbb4023eaaf094cd1d3cccd06ebdb | 44dbb043e52f00c9a797b1bea8f1df50dd621842 | /os-example-4.py | 69064074cfa33ba2ae8384a237bc9351ebad664a | [] | no_license | peterdocter/standardmodels | 140c238d3bef31db59641087e3f3d5413d4baba1 | 7addc313c16b416d0970461998885833614570ad | refs/heads/master | 2020-12-30T16:59:30.489486 | 2016-12-13T06:32:03 | 2016-12-13T06:32:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | import os
# where are we?
cwd = os.getcwd()
print "1", cwd
# go down
os.chdir("samples")
print "2", os.getcwd()
# go back up
os.chdir(os.pardir)
print "3", os.getcwd() | [
"415074476@qq.com"
] | 415074476@qq.com |
b11a8a7651e0f8dc115584ee90faf956ed6a1f89 | 997449072baa8e50a143ae1152fd4fa83c8e1068 | /devel/.private/rrtplanner/lib/python2.7/dist-packages/rrtplanner/msg/_rrtResult.py | 7672fe8883172dee48ff70b467d5d95c919942d0 | [] | no_license | idrissahil/catkin_ws | c547a6f7be812cc0bb1a93042026f746d34e7e70 | b5d8b60c882b60bb19b8d4529257ca513b8256e3 | refs/heads/master | 2022-01-24T12:51:28.038620 | 2019-06-02T16:05:45 | 2019-06-02T16:05:45 | 175,048,655 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,030 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from rrtplanner/rrtResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import nav_msgs.msg
import std_msgs.msg
class rrtResult(genpy.Message):
_md5sum = "58d6f138c7de7ef47c75d4b7e5df5472"
_type = "rrtplanner/rrtResult"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Define the result
nav_msgs/Path path
================================================================================
MSG: nav_msgs/Path
#An array of poses that represents a Path for a robot to follow
Header header
geometry_msgs/PoseStamped[] poses
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['path']
_slot_types = ['nav_msgs/Path']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(rrtResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.path is None:
self.path = nav_msgs.msg.Path()
else:
self.path = nav_msgs.msg.Path()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v6 = val1.header
start = end
end += 4
(_v6.seq,) = _get_struct_I().unpack(str[start:end])
_v7 = _v6.stamp
_x = _v7
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v6.frame_id = str[start:end].decode('utf-8')
else:
_v6.frame_id = str[start:end]
_v8 = val1.pose
_v9 = _v8.position
_x = _v9
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v10 = _v8.orientation
_x = _v10
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v11 = val1.header
buff.write(_get_struct_I().pack(_v11.seq))
_v12 = _v11.stamp
_x = _v12
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v11.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v13 = val1.pose
_v14 = _v13.position
_x = _v14
buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))
_v15 = _v13.orientation
_x = _v15
buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v16 = val1.header
start = end
end += 4
(_v16.seq,) = _get_struct_I().unpack(str[start:end])
_v17 = _v16.stamp
_x = _v17
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v16.frame_id = str[start:end].decode('utf-8')
else:
_v16.frame_id = str[start:end]
_v18 = val1.pose
_v19 = _v18.position
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _get_struct_3d().unpack(str[start:end])
_v20 = _v18.orientation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _get_struct_4d().unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_4d = None
def _get_struct_4d():
global _struct_4d
if _struct_4d is None:
_struct_4d = struct.Struct("<4d")
return _struct_4d
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3d = None
def _get_struct_3d():
global _struct_3d
if _struct_3d is None:
_struct_3d = struct.Struct("<3d")
return _struct_3d
| [
"idrissahil3@gmail.com"
] | idrissahil3@gmail.com |
235af1bbc670e956e37e472b363d092d53a2e10f | 7927424f1983eecc7c7b2f0ebaf61ad552d2a7e7 | /zigzag.py | 1e4ea4b1030d84d3446c45f2f19960e1f1f9aafc | [] | no_license | 6reg/automate | 295931d3ecf0e69e01921cc45d452fadfd1e6581 | 11e5de461ece3d8d111f3dc13de088788baf19a2 | refs/heads/main | 2023-03-08T18:39:42.991280 | 2021-02-22T20:53:13 | 2021-02-22T20:53:13 | 334,780,031 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | import time, sys
indent = 0 # How many spaces to indent
indentIncreasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop.
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for the 1/10 of a second.
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 20:
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
except KeyboardInterrupt:
sys.exit()
| [
"mathiasgreg@gmail.com"
] | mathiasgreg@gmail.com |
05fd2afde8a2efa035b5c2ee861b1f0e9b62fc97 | 8bdf78e902a02e3bd175e759fc98fd37277247af | /youtube_dl/extractor/mangomolo.py | 2db503f2b13dc8499a6f665ef97d3e09cfcdf35b | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | oxidius2/youtube-dl | 191f5bde4992313308d2ab010cdb82ecd0d1b654 | 30d9e20938fa91ece09c376b67030647215d48df | refs/heads/master | 2017-03-20T13:01:36.106539 | 2016-09-16T21:06:55 | 2016-09-16T21:06:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,832 | py | # coding: utf-8
from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
int_or_none,
)
class MangomoloBaseIE(InfoExtractor):
def _get_real_id(self, page_id):
return page_id
def _real_extract(self, url):
page_id = self._get_real_id(self._match_id(url))
webpage = self._download_webpage(url, page_id)
hidden_inputs = self._hidden_inputs(webpage)
m3u8_entry_protocol = 'm3u8' if self._IS_LIVE else 'm3u8_native'
format_url = self._html_search_regex(
[
r'file\s*:\s*"(https?://[^"]+?/playlist.m3u8)',
r'<a[^>]+href="(rtsp://[^"]+)"'
], webpage, 'format url')
formats = self._extract_wowza_formats(
format_url, page_id, m3u8_entry_protocol, ['smil'])
self._sort_formats(formats)
return {
'id': page_id,
'title': self._live_title(page_id) if self._IS_LIVE else page_id,
'uploader_id': hidden_inputs.get('userid'),
'duration': int_or_none(hidden_inputs.get('duration')),
'is_live': self._IS_LIVE,
'formats': formats,
}
class MangomoloVideoIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:video'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/video\?.*?\bid=(?P<id>\d+)'
_IS_LIVE = False
class MangomoloLiveIE(MangomoloBaseIE):
IE_NAME = 'mangomolo:live'
_VALID_URL = r'https?://admin\.mangomolo.com/analytics/index\.php/customers/embed/index\?.*?\bchannelid=(?P<id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)'
_IS_LIVE = True
def _get_real_id(self, page_id):
return base64.b64decode(compat_urllib_parse_unquote(page_id).encode()).decode()
| [
"remitamine@gmail.com"
] | remitamine@gmail.com |
696193e4863c900c995b49d8854b2fd947ef2ebd | 9dc21ebb553fd116826c7cbae7d8c5eba47423d1 | /cloneGraph.py | 81681ac2a31cf11b69ac78e24d755d692f4aee77 | [] | no_license | KJSui/leetcode-2020 | a475a8b8481231757222c5afaad2856a92572f89 | 37cf89e7fb1351b1deff09271d9bb5852395054e | refs/heads/main | 2023-04-05T19:46:25.647605 | 2021-05-06T20:40:06 | 2021-05-06T20:40:06 | 365,031,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | class Solution:
def __init__(self):
self.copy = {}
def cloneGraph(self, node):
if not node:
return None
newNode = Node(node.val)
neight = []
for i in neight:
if i in self.copy:
neight.append(self.copy[i])
else:
neight.append(self.cloneGraph(i))
newNode.neighbors = neight
return newNode
| [
"jsui@digitalocean.com"
] | jsui@digitalocean.com |
2a77ffc8692138609c559a9df4f3206508debd09 | 0c153f489e523afdc33b950a6b9ee21af09e968e | /cpp/run_scripts/run_fom_basis.py | 57249f1dc5a1dc4ca8146c3f4d9c3f9afc8a949f | [] | no_license | Pressio/pressio-sisc-burgers1d | 86f1acb31d40d1aefa83b61bb4e8a7d70621cf1a | 671f45b7abd5dc59d574b6d26cc4a5f23ee90306 | refs/heads/master | 2021-01-26T01:04:20.594259 | 2020-04-26T11:32:00 | 2020-04-26T11:32:00 | 243,249,905 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,870 | py | #!/usr/bin/env python
import sys, os, time
import subprocess
import numpy as np
import os.path
from argparse import ArgumentParser
import re
import myutils, constants
def main(exeName):
# args for the executable
args = ("./"+exeName, "input.txt")
print("Starting basis runs")
# loop over mesh sizes
for iMesh in range(0, constants.num_meshes):
currentMeshSize = constants.mesh_sizes[iMesh]
print("Current currentMeshSize = ", currentMeshSize)
# create folder
parentDir='meshSize' + str(currentMeshSize)
if not os.path.exists(parentDir):
os.system('mkdir ' + parentDir)
# loop over various basis size
for i in range(0, constants.num_rom_sizes):
romSize = constants.rom_sizes[i]
print("Current romSize = ", romSize)
# based on the size of rom and number of ode steps,
# compute the sampling frequency
assert(constants.numStepsBasis % romSize == 0)
samplingFreq = int(constants.numStepsBasis/romSize)
# create input file
myutils.createInputFileFomForBasis(currentMeshSize, samplingFreq)
os.system("./" + exeName + " input.txt")
#popen = subprocess.Popen(args, stdout=subprocess.PIPE)
#popen.wait()
#output = popen.stdout.read()
# create dir for this number of basis
childDir=parentDir + '/basis' + str(romSize)
if not os.path.exists(childDir): os.system('mkdir ' + childDir)
# copy files there
os.system('mv input.txt ' + childDir)
os.system('mv basis.txt ' + childDir)
os.system('mv snapshots.txt ' + childDir)
os.system('mv yFom.txt ' + childDir)
print("Done with basis runs")
if __name__== "__main__":
parser = ArgumentParser()
parser.add_argument("-exe", "--exe", dest="exeName",
help="generate basis for fom")
args = parser.parse_args()
main(args.exeName)
| [
"fnrizzi@sandia.gov"
] | fnrizzi@sandia.gov |
ebc3ed1ffe0e2caca9b9f1ca118b77aa614a399c | 04a0ff31201c67a0e6a9654369ddd3f712303584 | /module_5/pages/basket_page.py | 9458b62174897720b13b36a5b9c95bcc2af01498 | [] | no_license | titun9/stepik_lessons | 58c73e136a7be2950f7071d637a495e182a1291f | 26515d9edb2a2b8f7a09a598405d5cecb6a94f7d | refs/heads/master | 2023-06-09T07:21:20.696296 | 2021-06-30T14:00:25 | 2021-06-30T14:00:25 | 366,700,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | from .base_page import BasePage
from .locators import BasketPageLocators
from .locators import BasePageLocators
class BasketPage(BasePage):
def get_correct_message_empty_basket(self):
language = self.browser.find_element(*BasePageLocators.LANGUAGE_PAGE).get_attribute("lang")
dictionary_message = {"ru": "Ваша корзина пуста", "en-gb": "Your basket is empty", \
"es": "Tu carrito esta vacío", "fr": "Votre panier est vide"}
message = dictionary_message[language]
return message
def should_be_message_empty_basket(self):
message = self.get_correct_message_empty_basket()
message_basket_empty = self.browser.find_element(*BasketPageLocators.MESSAGE_EMPTY_BASKET).text
assert message in message_basket_empty, f"No message that basket is empty"
def should_be_product_in_basket_page(self):
assert self.is_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \
"Basket is not empty"
def should_not_be_product_in_basket_page(self):
assert self.is_not_element_present(*BasketPageLocators.TABLE_ADDED_PRODUCT), \
"Basket is empty"
| [
"butkevichas@cheops-edu.ru"
] | butkevichas@cheops-edu.ru |
e2a2d639b617529303a24cb365818a069f9e4628 | 423e396e226494c34f99851cc050d929f3f144c8 | /posts/admin.py | cb3ff4597adc8ff8a87e027e420a3d4c0b3387da | [] | no_license | Marihuana-Kox/hw05_final | 1ff1a34cdcb9d66fe715ffbf8d9f5fb0d0ca2820 | 77a20ac2571fec13b979e763859de6f2bce43537 | refs/heads/master | 2022-12-09T13:53:21.195711 | 2020-03-10T17:45:21 | 2020-03-10T17:45:21 | 243,992,895 | 0 | 0 | null | 2022-12-08T07:24:27 | 2020-02-29T15:27:50 | Python | UTF-8 | Python | false | false | 1,129 | py | from django.contrib import admin
from .models import Post, Group, Comment
class PostAdmin(admin.ModelAdmin):
# перечисляем поля, которые должны отображаться в админке
list_display = ("pk", "text", "pub_date", "author")
# добавляем интерфейс для поиска по тексту постов
search_fields = ("text",)
# добавляем возможность фильтрации по дате
list_filter = ("pub_date", "author")
# это свойство сработает для всех колонок: где пусто - там будет эта строка
empty_value_display = '-пусто-'
class CommentAdmin(admin.ModelAdmin):
list_display = ("pk", "text", "author", "created")
search_fields = ("text",)
list_filter = ("created", "author")
# при регистрации модели Post источником конфигурации для неё назначаем класс PostAdmin
admin.site.register(Post, PostAdmin)
admin.site.register(Group)
admin.site.register(Comment, CommentAdmin)
| [
"yakuhs@yandex.ru"
] | yakuhs@yandex.ru |
97d55e2aec24c8c3c273787b6a0bfb6e207c6ee0 | c261f0e98eedb4f0d85e92bd6ab8f4ae47096269 | /lifeservice/schedule117/04美食下载团购糯米/getNuomiOtherCinemaMap.py | 7e6d7d90119847ca9a6a6e964889df38e7707452 | [] | no_license | ShenDezhou/CPP | 24379fe24f3c8588a7859ee586527d5cc6bfbe73 | 933c1e764a6ed2879b26aa548ff67153ca026bf6 | refs/heads/master | 2021-01-11T22:09:24.900695 | 2017-04-05T02:04:07 | 2017-04-05T02:04:07 | 78,928,291 | 0 | 1 | null | null | null | null | GB18030 | Python | false | false | 1,328 | py |
#coding=gb2312
nuomiCinemaMap = dict()
otherCinemaMap = dict()
input = '/fuwu/Merger/Output/movie/cinema_movie_rel.table'
for line in open(input):
segs = line.strip('\n').decode('gb2312', 'ignore').split('\t')
cinemaid, source, ting = segs[1], segs[3], segs[9]
if source.find(u'糯米') != -1:
if cinemaid not in nuomiCinemaMap:
nuomiCinemaMap[cinemaid] = []
if ting not in nuomiCinemaMap[cinemaid]:
nuomiCinemaMap[cinemaid].append(ting)
else:
if cinemaid not in otherCinemaMap:
otherCinemaMap[cinemaid] = []
if ting not in otherCinemaMap[cinemaid]:
otherCinemaMap[cinemaid].append(ting)
# 糯米影院的厅名称是否都被包含
for cinemaid in otherCinemaMap:
if cinemaid not in nuomiCinemaMap:
#print ('#%s\t%s\t%s' % (cinemaid, u'糯米', '\t'.join(nuomiCinemaMap[cinemaid]))).encode('gb2312', 'ignore')
continue
noMatchTingList = []
for ting in nuomiCinemaMap[cinemaid]:
if ting not in otherCinemaMap[cinemaid]:
noMatchTingList.append(ting)
if len(noMatchTingList) == 0:
continue
# 存在不一致的情况
normTing = '\t'.join(otherCinemaMap[cinemaid])
noMatchTing = '\t'.join(noMatchTingList)
print ('%s\t%s\t%s' % (cinemaid, u'非糯米', normTing)).encode('gb2312', 'ignore')
print ('%s\t%s\t%s' % (cinemaid, u'糯米', noMatchTing)).encode('gb2312', 'ignore')
| [
"bangtech@sina.com"
] | bangtech@sina.com |
7e1915a371be95b8b39560a1b5d338aaa86da5fe | d0e26d18017b825dd18919b0f87d0c99c15e1247 | /twitterclone/urls.py | f38223bc8a2886dc69db15f43acff166caa1a2ab | [] | no_license | davidstewy/twitterclone | 9e202f852514e72141c39e83f6d47e44f095be28 | 7e4395f9415854ca1edc3b81232f1303badb9543 | refs/heads/master | 2020-04-30T05:55:22.162908 | 2019-03-20T02:54:13 | 2019-03-20T02:54:13 | 176,638,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | """twitterclone URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from twitterclone.views import homepage
from twitteruser.urls import urlpatterns as userurls
from notification.urls import urlpatterns as notificationsurls
from tweet.urls import urlpatterns as tweeturls
urlpatterns = [
path('admin/', admin.site.urls),
path('', homepage, name='homepage'),
]
urlpatterns += userurls
urlpatterns += tweeturls
urlpatterns += notificationsurls
| [
"davidstewy@gmail.com"
] | davidstewy@gmail.com |
a497ba217122e7b18367fa57adc6a0602064311d | eb333acea85364d39f2811ae368dd35bc84392f0 | /exts/counting.py | 0b1623741328e7c6745febe4359c2f8f373a044b | [] | no_license | blueeidk/vendetta | 7312b37e469ba2abbb46be07ba84365086f0cac3 | e697dd3ebc224d50399dd8c4c0ee1d8f67085151 | refs/heads/master | 2023-04-12T19:22:13.009886 | 2021-05-10T20:29:42 | 2021-05-10T20:29:42 | 366,365,871 | 0 | 0 | null | 2021-05-11T12:01:11 | 2021-05-11T11:58:46 | null | UTF-8 | Python | false | false | 1,939 | py | import discord
from discord.ext import commands, tasks
from discord import Webhook, AsyncWebhookAdapter
class Counting(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.current_num = -1
self.fetch_num.start()
def cog_unload(self):
self.fetch_num.cancel()
@tasks.loop(seconds=60*1)
async def fetch_num(self):
await self.bot.wait_until_ready()
channel = self.bot.get_channel(self.bot.config["counting_channel"])
async for message in channel.history(limit=100):
try:
self.current_num = int(message.content)
break
except ValueError:
continue
if self.current_num == -1:
self.current_num = 0
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == self.bot.config["counting_channel"] and not message.author.bot:
await message.delete()
try:
if int(message.content) != self.current_num + 1:
raise ValueError
except ValueError:
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"],
adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num = 0
await message.channel.send("Looks like someone made a mistake! Lets start again:")
await message.channel.send("0")
return
webhook = Webhook.from_url(self.bot.config["counting_webhookurl"], adapter=AsyncWebhookAdapter(self.bot.session))
await webhook.send(message.content, username=message.author.name, avatar_url=message.author.avatar_url)
self.current_num += 1
def setup(bot):
bot.add_cog(Counting(bot)) | [
"niteblock@gmail.com"
] | niteblock@gmail.com |
f4fb165252962fe02564d44fc8d8a6cb9eaef1e9 | c591f5676468a7447f0e4f104c4889debb35c051 | /resources/idc/__init__.py | 4a6431ad2c6890dd3d7348b37981f6a9a2f2b983 | [] | no_license | zhagyilig/Adahome | 3f3bc1b664bd65964b8befa78405c07da3c8a228 | 76f08be7c21e90bb58803aa1c11be59f66332f42 | refs/heads/dev | 2022-12-12T11:51:30.341859 | 2019-07-10T04:22:12 | 2019-07-10T04:22:12 | 149,948,322 | 2 | 4 | null | 2022-12-08T01:01:36 | 2018-09-23T04:39:23 | HTML | UTF-8 | Python | false | false | 3,671 | py | # coding=utf-8
# author: zhangyiling
from django.shortcuts import render
from django.views.generic import TemplateView, ListView
from django.contrib.auth.mixins import LoginRequiredMixin # 登陆验证
from django.shortcuts import redirect # 页面跳转
from django.shortcuts import reverse # 反转解析url的'name='
from django.http import HttpResponse
from resources.models import Idc
import json
from resources.forms import CreateIdcForm
'''
1. 添加idc, 使用模版视图
'''
class AddidcTemView(LoginRequiredMixin, TemplateView):
template_name = 'resources/idc/add_idc.html'
def post(self, request):
'''
获取添加idc表单提交的数据
:param request:
:return:
'''
# print(request.POST) # 打印表单提交的数据
# print(reverse('success', kwargs={'next': 'user_list'}))
# 输出: /dashboard/success/user_list/
# print(redirect('success', next='user_list'))
# 输出: <HttpResponseRedirect status_code=302, "text/html; charset=utf-8", url="/dashboard/success/user_list/">
# reverse
# redirect: 两个的区别:reverse传入的是字典信息:kwargs;而redirect是arg,kwargs
""" 更新使用django表单验证
# 第一步: 获取表单数据
name = request.POST.get('name', '')
idc_name = request.POST.get('idc_name', '')
address = request.POST.get('address', '')
phone = request.POST.get('phone', '')
email = request.POST.get('email', '')
username = request.POST.get('username', '')
# 第二步: 验证数据, 这里只是简单的校验
error_msg = []
if not name:
error_msg.append('idc简称不能为空')
if not idc_name:
error_msg.append('idc_name不能为空')
if error_msg:
# print(error_msg)
return redirect('error', next='add_idc', msg=json.dumps(error_msg, ensure_ascii=False))
# 第三步: 实例化
idc = Idc()
idc.name = name
idc.idc_name = idc_name
idc.address = address
idc.phone = phone
idc.email = email
idc.username = username
try:
idc.save()
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
return redirect('success', next='idc_list') # 返回成功页面;next是success的关键参数名
# return redirect('error', next='user_list', msg='这是错误页面测试')# 返回错误页面;next/msg是error的关键参数名
"""
# 使用django表单验证
idcform = CreateIdcForm(request.POST) # request.POST 表单提交的数据
# print('idcform %s' %idcform)
if idcform.is_valid(): # 验证数据
idc = Idc(**idcform.cleaned_data) # cleaned_data 获取数据
try:
idc.save()
return redirect('success', next='idc_list')
except Exception as e:
return redirect('error', next='idc_list', msg=e.args)
else:
# print(json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False))
# return HttpResponse('')
error_msg = json.dumps(json.loads(idcform.errors.as_json()), ensure_ascii=False)
return redirect('error', next='idc_list', msg=error_msg)
'''
2.idc 详细信息列表, 使用ListView
'''
class IdcListView(LoginRequiredMixin, ListView):
template_name = 'resources/idc/idc_list.html'
model = Idc
paginate_by = 10 # 一个页面5个条目
ordering = 'id' # 列表按id排序
| [
"YilingZhang@YilingZhang.local"
] | YilingZhang@YilingZhang.local |
2cbf9ce5648b670ee81e72a542610d78690a54f4 | 1097ed333a4000634e68a590ee6ffc6129ae61e3 | /written_examination/matrix8.py | 017cb25ae0dcc0f546bd9b3cf05825723bb344a7 | [
"MIT"
] | permissive | AutuanLiu/Code-Storm2019 | 1bbe890c7ca0d033c32348173bfebba612623a90 | 8efc7c5475fd888f7d86c3b08a3c1c9e55c1ac30 | refs/heads/master | 2020-04-23T07:03:08.975232 | 2019-10-24T08:56:26 | 2019-10-24T08:56:26 | 170,995,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,513 | py | def getSum(i, j, n, m, maps): # [i, j]单阵入口,[n,m]矩阵维度数,maps矩阵
queue, sump, maps[i][j] = [[i, j]], maps[i][j], 0 # 初始化队列
while queue:
x, y = queue[0][0], queue[0][1] # 获取队列头元素
for dx, dy in zip((-1, -1, 0, 1, 1, 1, 0, -1), (0, 1, 1, 1, 0, -1, -1, -1)): # 8个方向
nx, ny = x + dx, y + dy
if -1 < nx < n and -1 < ny < m and maps[nx][ny] != 0:
queue.append([nx, ny]) # 入队
sump += maps[nx][ny] # 累计兵力
maps[nx][ny] = 0 # 累计过的单个区域兵力为0
del queue[0] # 出队
return sump # 返回单阵的兵力总和
if __name__ == '__main__':
maps = [[34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 30], [0, 23, 10, 5, 5, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0, 0, 30, 0, 40, 0],
[0, 9, 0, 0, 5, 0, 0, 0, 4, 4, 4, 4, 4, 0, 0, 0, 0, 30, 0, 0], [0, 8, 7, 7, 0, 5, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 7, 0, 9, 0],
[0, 9, 0, 0, 5, 0, 5, 0, 0, 12, 12, 0, 0, 0, 0, 10, 0, 0, 0, 9], [0, 0, 0, 0, 5, 0, 0, 5, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 12, 0, 0, 5, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0], [40, 30, 3, 6, 6, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 10, 0],
[0, 0, 20, 0, 0, 6, 6, 0, 0, 0, 0, 0, 0, 0, 5, 6, 5, 10, 10, 0], [40, 30, 3, 7, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 10, 0],
[0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 17, 0, 0, 6, 5, 7, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0],
[0, 20, 0, 0, 7, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0, 10, 0, 0, 0], [0, 30, 0, 7, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 10, 0, 50],
[0, 40, 7, 0, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 0, 0, 0, 0, 50, 0], [43, 30, 25, 10, 50, 0, 0, 0, 6, 6, 6, 6, 0, 0, 0, 0, 0, 50, 0, 0]]
n, m = 20, 20 # 输入行列
army = []
for i in range(20):
for j in range(20):
if maps[i][j] != 0:
army.append(getSum(i, j, n, m, maps)) # 获取每个单阵的兵力和
print('每个单阵兵力和:', army)
print('单阵兵力最多为:', max(army))
print('单阵兵力最少为:', min(army))
| [
"autuanliu@163.com"
] | autuanliu@163.com |
ada7809ed008445486cb53ed74ffb2f3f533ab06 | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem429.py | f131f4e79b05103324b498c75f6d6f5240e45cd3 | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 533 | py | # Problem 429
# Medium
# Asked by Stitch Fix
#
# Pascal's triangle is a triangular array of integers constructed with the
# following formula:
#
# The first row consists of the number 1.
#
# For each subsequent row, each element is the sum of the numbers directly
# above it, on either side.
#
# For example, here are the first few rows:
#
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1
#
# Given an input k, return the kth row of Pascal's triangle.
#
# Bonus: Can you do this using only O(k) space?
# | [
"carl.hinderer4@gmail.com"
] | carl.hinderer4@gmail.com |
a31be73325befa7634569a9b289ebac7e238c219 | f4bdd0d988ed63ed314f5703abd3543cded9f49e | /Amazon/Reviews & Big Data Analytics/Amazon_LDA.py | 32ae2a94f52d0aa94ba4eaf229433dab27abf4ff | [] | no_license | jessicakaye/Python-Projects | 643f0e1808163187cfe3db7d5adff800e2e3a98c | 8365e84f110b53df2bd54604f2206e9bc1f09617 | refs/heads/master | 2022-05-02T07:37:09.591545 | 2022-03-10T01:28:39 | 2022-03-10T01:28:39 | 253,980,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,244 | py | # Amazon_LDA.py
# 4/28/20
# @jessicakaye
# Used to conduct LDA on the top 10 most reviewed Amazon products in a dataset
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from time import time
from time import time
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from wordcloud import WordCloud
pd.set_option('display.max_columns', None)
# Load the dataset!
df = pd.read_json('AmazonData_text_processed_full.json', lines = True)
print(df)
print(df.describe())
# Let's drop those duplicates
df.drop_duplicates(['overall', 'reviewText', 'reviewTime', 'asin', 'reviewerID'], inplace=True)
#plot for all of the products
plt.figure(figsize=(16,10))
ax = sns.countplot(x='asin', data = df, palette = 'Set1', order=df['asin'].value_counts().index)
plt.xlabel('ASIN', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x()+p.get_width()/2.,
height + 10,
'{}'.format(height),
ha="center")
plt.title("Count of Reviews Per ASIN")
plt.savefig("Count of Reviews Per ASIN.png")
#Distribution of Ratings!
plt.figure()
ax = sns.countplot(x='overall', data=df, palette='Set1', order=df['overall'].value_counts().index)
plt.xlabel('overall', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per Rating")
plt.savefig("Count of Reviews Per Rating.png")
# Distribution of NPS Categories!
plt.figure()
ax = sns.countplot(x='nps_category', data=df, palette='Set1', order=df['nps_category'].value_counts().index)
plt.xlabel('nps_category', fontsize=12)
plt.ylabel('Count', fontsize=12)
total = float(len(df))
for p in ax.patches:
height = p.get_height()
ax.text(p.get_x() + p.get_width() / 2.,
height + 10,
'{0:.0%}'.format(height / total),
ha="center")
plt.title("Count of Reviews Per NPS Category")
plt.savefig("Count of Reviews Per NPS Category.png")
# Let's create a wordcloud!
wordcloud = WordCloud(background_color="white", max_words=5000, contour_width=3, contour_color='steelblue')
wordcloud.generate(df['filtered'].to_string())
# plot the wordcloud!
plt.figure(figsize=(16,10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.savefig('wordcloudoftop10products')
# Let's optimize our df and try using CountVectorizer
# I already have these columns from text processing in Spark, but I want to try the following in sklearn
amazon_df = df.drop(labels=['raw_features', 'features'], axis=1)
# Let's create a list of all of the different ASINs
list_asins = amazon_df.asin.unique()
sns.set_style('whitegrid')
# Helper function
def plot_10_most_common_words(asin, count_data, count_vectorizer):
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts += t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x: x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.figure(2, figsize=(15, 15 / 1.6180))
plt.subplot(title=f'10 most common words for {asin}')
sns.set_context("notebook", font_scale=1.25, rc={"lines.linewidth": 2.5})
sns.barplot(x_pos, counts, palette='husl')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.tight_layout()
plt.savefig(f'{asin}_topwords.png')
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def topics_words(model, feature_names, n_top_words):
topics = []
words =[]
for topic_idx, topic in enumerate(model.components_):
topics.append(topic_idx)
words.append([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]])
new_df = pd.DataFrame(list(zip(topics, words)), columns=['topicID', 'words'])
return new_df
n_top_words = 6
n_components = 7
all_words_and_topics = pd.DataFrame(columns=['topicID', 'words', 'asin', 'num documents'])
all_asins_df = pd.DataFrame(columns=list(amazon_df.columns.values))
# We want to find the top words per product. Let's create a loop.
for asin in list_asins:
asin_df = amazon_df.loc[amazon_df['asin'] == str(asin)]
asin_df.reset_index(inplace=True)
# Initialise the count vectorizer with the English stop words
# We are going to use the raw term count for LDA
print("Extracting tf features for LDA...")
stop_words = ENGLISH_STOP_WORDS
cv = CountVectorizer(stop_words='english', analyzer=lambda x:[w for w in x if w not in stop_words])
# Fit and transform the processed titles
t0 = time()
count_vector = cv.fit_transform(asin_df['filtered'])
print("done in %0.3fs." % (time() - t0))
print()
# Materialize the sparse data
data_dense = count_vector.todense()
# Compute Sparsicity = Percentage of Non-Zero cells
print("Sparsicity: ", ((data_dense > 0).sum() / data_dense.size) * 100, "%")
# Visualise the 10 most common words
plot_10_most_common_words(asin, count_vector, cv)
print("Fitting LDA models with tf features...")
lda = LatentDirichletAllocation(n_components=n_components, learning_method='online')
t0 = time()
# This is the Document - Topic Matrix
lda_output = lda.fit_transform(count_vector)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = cv.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
# Log Likelihood: Higher the better
print("Log Likelihood: ", lda.score(count_vector))
# Perplexity: Lower the better. Perplexity = exp(-1. * log-likelihood per word)
print("Perplexity: ", lda.perplexity(count_vector))
# See model parameters
# print(lda.get_params())
# column names
topicnames = ["Topic" + str(i) for i in range(lda.n_components)]
# index names
docnames = ["Doc" + str(i) for i in range(asin_df.shape[0])]
# Make the pandas dataframe
df_document_topic = pd.DataFrame(np.round(lda_output, 2), columns=topicnames)#, index=docnames)
# Get dominant topic for each document
dominant_topic = np.argmax(df_document_topic.values, axis=1)
df_document_topic['dominant_topic_weight'] = np.amax(df_document_topic, axis=1)
df_document_topic['dominant_topic'] = dominant_topic
print(df_document_topic)
asin_df = asin_df.join(df_document_topic['dominant_topic'].astype('int'), how = 'inner')
asin_df = asin_df.join(df_document_topic['dominant_topic_weight'], how='inner')
all_asins_df = pd.concat([all_asins_df, asin_df])
#What is the topic distribution across documents?
df_topic_distribution = df_document_topic['dominant_topic'].value_counts().reset_index(name="num documents")
df_topic_distribution.columns = ['topicID', 'num documents']
print(df_topic_distribution)
asintw = topics_words(lda, tf_feature_names, n_top_words)
asintw['asin'] = asin
asintw = asintw.merge(df_topic_distribution, on = "topicID", how = "inner")
all_words_and_topics = pd.concat([all_words_and_topics, asintw])
print(all_words_and_topics)
print(all_asins_df)
all_asins_df.to_csv('all_asins_and_indices.csv')
all_words_and_topics.to_csv('all_words_and_topics.csv')
#
#
# # plt.show()
| [
"noreply@github.com"
] | jessicakaye.noreply@github.com |
67b528a1d4897d406c2df773535234cf98e46ce4 | b7ada17734345131348d541d269c171ffbf88508 | /Clase 15-11-2019/EJM EXCEPCIONES.py | ffef2497de09d7ed5d0c969e35a71e143b8da847 | [] | no_license | PatrickPuente/Curso-Python-CEC-EPN | 709094e0e10c26b5bb4883649383c9660b227c32 | 83c9e4f85ca939f12d4fc536e46f58c4470ffa0d | refs/heads/master | 2020-09-11T16:18:56.670104 | 2019-11-16T17:43:50 | 2019-11-16T17:43:50 | 222,123,485 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import math
'''try:
y = 1/0
except ZeroDivisionError:
print("Zero Division")
except ArithmeticError:
print("Arithmetic Problem")
print("THE END")
#VAriantes
def badFun(n):
try:
return 1/n
except ArithmeticError:
print("Arithmetic Problem")
return None
badFun(0)
print("THE END")'''
'''def badFun(n):
try:
return n/0
except:
print("I did it again")
raise
try:
badFun(0)
except ArithmeticError:
print("dasdsa")'''
x = float(input("Enter a Number: "))
assert x>=0.0
x = math.sqrt(x)
print(x) | [
"noreply@github.com"
] | PatrickPuente.noreply@github.com |
897350387fa941830a98c5edbca3834b1d382a04 | 77e0adf27f8ce8ada31937045d31d063f6661434 | /noteapp/serializers.py | d79624bd60e6d29c39a0ea99f8d0c5c9c37ab2a7 | [] | no_license | naveenijeri/urbanstop_drf | f84185d6e1ba043e96535e67429d1cf421430eee | 33dfe71507cc02d85e5e1b1e19efc40eed24c4f4 | refs/heads/master | 2021-09-23T09:22:58.472057 | 2020-03-14T08:31:26 | 2020-03-14T08:31:26 | 247,235,337 | 0 | 0 | null | 2021-09-22T18:43:36 | 2020-03-14T07:56:29 | Python | UTF-8 | Python | false | false | 1,354 | py | from .models import NoteModel,UserModel
from rest_framework import serializers
class UserModelSerializer(serializers.ModelSerializer):
class Meta:
model=UserModel
fields=('username',)
class NoteModelSerializer(serializers.ModelSerializer):
user_note = UserModelSerializer(many=True)
class Meta:
model=NoteModel
fields=('id','note_text','created_date','updated_date','user_note')
def create(self, validated_data):
user_data = validated_data.pop('user_note')
note = NoteModel.objects.create(**validated_data)
for user_data in user_data:
UserModel.objects.create(notemodel=note, **user_data)
return note
def update(self, instance, validated_data):
user_data = validated_data.pop('user_note')
users = (instance.user_note).all()
users = list(users)
instance.note_text = validated_data.get('note_text', instance.note_text)
instance.created_date = validated_data.get('created_date', instance.created_date)
instance.updated_date = validated_data.get('updated_date', instance.updated_date)
instance.save()
for user_data in user_data:
user = users.pop(0)
user.username = user_data.get('username', user.username)
user.save()
return instance
| [
"naveen.ijeri123@gmail.com"
] | naveen.ijeri123@gmail.com |
254a54f04d7e2527304887a3982a7456e97068b4 | a088c5e4c4c2e6c722ba2df47c35f4f98d540412 | /eduzen_bot/plugins/messages/inline.py | 3469090624de031336b06b61a3e51716ad9cbd40 | [] | no_license | mikael85/bot | c884602363dba9efb716940981494987fa37e3d3 | 86751cf57061ae317804cfc19806ebb15d9ac8b4 | refs/heads/master | 2020-11-30T02:15:42.221636 | 2019-08-24T16:39:01 | 2019-08-24T16:39:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import logging
from uuid import uuid4
from telegram import InlineQueryResultArticle, InputTextMessageContent, ParseMode
from telegram.utils.helpers import escape_markdown
logger = logging.getLogger()
def code_markdown(bot, update):
query = update.inline_query.query
if not query:
return
results = [
InlineQueryResultArticle(
id=uuid4(),
title="code",
input_message_content=InputTextMessageContent(
f"```\n{query}\n```", parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(), title="Caps", input_message_content=InputTextMessageContent(query.upper())
),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)), parse_mode=ParseMode.MARKDOWN
),
),
]
bot.answer_inline_query(update.inline_query.id, results)
| [
"eduardo.a.enriquez@gmail.com"
] | eduardo.a.enriquez@gmail.com |
8bacb8e843f98006b0d409848f10edb92140f035 | f160cf4eb335ea799559312ac3d43a60c2c5848b | /library/zip_extract.py | e1f1faecce940706c2ead17d0b449c0c1525aa28 | [
"MIT"
] | permissive | baseplate-admin/Machine-Learning-Source-Code | c3389e0acb81e1f4c8e4c0cc763fcbc3781ef94e | a2203033d525c17b31584b52527c30e2c8aad1c4 | refs/heads/master | 2022-11-21T04:33:41.307477 | 2020-07-10T15:46:32 | 2020-07-10T15:46:32 | 277,730,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | def zip_extract():
import os
from zipfile import ZipFile
def zip_function():
print("We are extracting ZIP!!!")
where_is_zip=input("What is your zip location?")
what_is_zip_name=input("What is your zip name?")
what_is_zip_extension=input("What is your ZIP format?")
zip_join=os.path.join(where_is_zip,what_is_zip_name+ '.'+ what_is_zip_extension)
with ZipFile(zip_join,"r") as zip:
zip.extractall()
zip.printdir()
print("Enter a Number or It will cause ValueError.")
how_many_zip=int(input('How many zip do you want to extract?'))
try:
print("""
This is a number!!
Lets Go!!!
""")
for i in range(how_many_zip):
ask_if_zip_extract=input("""
Do you want to extract zip?
Enter 0 to skip extracting zip.
Enter 1 to to extract ZIP.
""")
if int(ask_if_zip_extract)==0:
zip_function(2)
elif int(ask_if_zip_extract)==1:
zip_function(1)
else:
print("Theres a problem with zip extract.")
except Exception as e:
print(e)
| [
"61817579+baseplate-admin@users.noreply.github.com"
] | 61817579+baseplate-admin@users.noreply.github.com |
a926afb7922e05c0385c644c79fe80df6e229e01 | ff983c83c59011c91ef1d28ef0b6ce6bfd843d8e | /cola.py | 9f4af97d65cbd2e4bbb9bc14d98eccfe9ac5f6b7 | [] | no_license | jiterman/Flights-Manager | 7af81f025342988ef5a9497dd79f0849e87ba43c | 197d49aa3f012846521d3e06a992fcf0d8b2b9d9 | refs/heads/master | 2022-11-05T11:43:53.566874 | 2020-06-22T01:09:16 | 2020-06-22T01:09:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | class Cola:
def __init__(self):
self.items = []
def encolar(self, x):
self.items.append(x)
def desencolar(self):
if self.esta_vacia():
raise ValueError("La cola esta vacia")
return self.items.pop(0)
def esta_vacia(self):
return len(self.items) == 0 | [
"noreply@github.com"
] | jiterman.noreply@github.com |
79e2b660e292e440ae352f3b6b11c484f59e6ad4 | ad00e2f10ae396a02ded81d90e31e90a8999fbc8 | /kaggle/DigitRecognizer/tensorflow-cnn2.py | c32ba7704e1c74578cabd9e8f115fde48eed94a7 | [] | no_license | yixiaoyang/SmallData | a8c2f8525cf12b6c2e719c5aca0dee1580ce7215 | 6643ac67a150e1d7fdb924c8dde501f8c72fd40f | refs/heads/master | 2021-01-17T09:55:31.630233 | 2020-04-02T18:19:26 | 2020-04-02T18:19:26 | 59,277,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,728 | py | # coding: utf-8
#!/usr/bin/python
import tensorflow as tf
import pandas as pd
import numpy as np
import time
class DigitsModelCNN(object):
def __init__(self):
self.train_input = tf.placeholder(tf.float32, shape=[None,784])
self.train_out = tf.placeholder(tf.float32, shape=[None,10])
self.keep_prob = tf.placeholder(tf.float32)
self.sess = tf.Session()
# 21000 =》100*210
self.batch_size = 100
self.epochs = 210*16
self.learn_rate = 5e-4
'''
@func Computes a 2-D convolution given 4-D input and filter tensors.
@param input 4-D input tensor of shape [batch, in_height, in_width, in_channels]
filter 4-D filter / kernel tensor of shape [filter_height, filter_width, in_channels, out_channels]
@return
'''
def conv2d(self, input, filter, stride_w=1, stride_h=1):
return tf.nn.conv2d(input, filter, strides=[1,stride_w,stride_h,1], padding='SAME')
'''
@func Performs the max pooling on the input.
@param input 4-D Tensor with shape [batch, height, width, channels] and type tf.float32
ksize A list of ints that has length >= 4. The size of the window for each dimension of the input tensor.
strides A list of ints that has length >= 4. The stride of the sliding window for each dimension of the input tensor
@return
'''
def max_pool_2x2(self, input, stride_w=2, stride_h=2):
return tf.nn.max_pool(input, ksize=[1,2,2,1], strides=[1,stride_w,stride_h,1], padding="SAME")
'''
@func outputs random values from a truncated normal distribution.
'''
def init_w(self,shape):
# the standard deviation is 0.1
value = tf.truncated_normal(shape=shape, stddev=0.1)
return tf.Variable(value)
'''
@func outputs random values as bias
'''
def init_b(self,shape):
value = tf.constant(0.1, shape=shape)
return tf.Variable(value)
'''
@note LeNet-5 Architecture
layer operation feature-maps kernel stride size activation
in input 1(gray image) - - 28*28 -
C1 convolution 16 5*5 1 28*28 relu
S2 avg pool 16 2*2 2 14*14 relu
C3 convolution 32 3*3 1 14*14 relu
S4 avg pool 32 2*2 2 7*7 relu
F5 full connected - - - 256 relu
out full connected - - - 10 -
'''
def build(self):
self.train_input = tf.placeholder(tf.float32, shape=[None,784])
self.input = tf.reshape(self.train_input, [-1, 28, 28, 1])
self.f_c1 = self.init_w([5,5,1,16])
self.b_c1 = self.init_b([16])
self.c1 = tf.nn.relu(self.conv2d(self.input, self.f_c1) + self.b_c1)
self.s2 = self.max_pool_2x2(self.c1)
self.f_c3 = self.init_w([5,5,16,32])
self.b_c3 = self.init_b([32])
self.c3 = tf.nn.relu(self.conv2d(self.s2, self.f_c3) + self.b_c3)
self.s4 = self.max_pool_2x2(self.c3)
self.w_f5 = self.init_w([7*7*32, 256])
self.b_f5 = self.init_b([256])
self.x_f5 = tf.reshape(self.s4, [-1,7*7*32])
self.f5 = tf.nn.relu(tf.matmul(self.x_f5, self.w_f5) + self.b_f5)
# out@10
self.f5_drop = tf.nn.dropout(self.f5, self.keep_prob)
self.w_out = self.init_w([256,10])
self.b_out = self.init_b([10])
self.out = tf.nn.softmax(tf.matmul(self.f5_drop, self.w_out) + self.b_out)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.out, labels=self.train_out))
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learn_rate).minimize(self.loss)
predict = tf.equal(tf.argmax(self.out,1), tf.argmax(self.train_out,1))
self.accuracy = tf.reduce_mean(tf.cast(predict, tf.float32))
def train(self, train_x, train_y, test_x, test_y, keep_prob=0.1):
print("start training")
self.sess.run(tf.global_variables_initializer())
batch_start = 0
batch_end = batch_start + self.batch_size
print(self.train_input.shape)
print(self.train_out.shape)
for epoch in range(self.epochs):
_, loss, prob = self.sess.run([self.optimizer, self.loss, self.out],feed_dict={
self.train_input : train_x[batch_start:batch_end],
self.train_out: train_y[batch_start:batch_end],
self.keep_prob : keep_prob
})
if epoch %100 == 0:
train_accuracy = self.sess.run(self.accuracy, feed_dict={
self.train_input: train_x[0:1024],
self.train_out: train_y[0:1024],
self.keep_prob: 1.0
})
validate_accuracy = self.sess.run(self.accuracy, feed_dict={
self.train_input: test_x,
self.train_out: test_y,
self.keep_prob: 1.0
})
print("epoch %d, training accuracy %g, validate accuracy %g" % (epoch, train_accuracy, validate_accuracy))
batch_start = batch_end
batch_end = batch_start + self.batch_size
if(batch_end > train_x.shape[0]):
print("reset batch")
batch_start = 0
batch_end = batch_start + self.batch_size
train_x, train_y = self.permutation(train_x, train_y)
print("training done")
def permutation(selfself, x, y):
sequence = np.random.permutation(x.shape[0])
return x[sequence], y[sequence]
def info(self):
print("c1,s2,c3,s4,c5 shape:")
print(self.c1.shape)
print(self.s2.shape)
print(self.c3.shape)
print(self.s4.shape)
print(self.f5.shape)
print('-'*16)
print(train_x.shape)
print(train_y.shape)
def dense_to_one_hot(labels_dense, num_classes):
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def load_data(filename, train_data=True, split=0.9):
data_frame = pd.read_csv(filename)
# (42000, 785)
print(data_frame.shape)
train_data_len = data_frame.shape[0]
train_data_split = int(train_data_len*split)
print(train_data_split)
train_x = data_frame.iloc[:train_data_split, 1:].values
train_x = train_x.astype(np.float)
train_x = np.multiply(train_x, 1.0/255.0)
train_y = data_frame.iloc[:train_data_split, 0].values
train_y = dense_to_one_hot(train_y,10)
validate_x = data_frame.iloc[train_data_split:, 1:].values
validate_x = validate_x.astype(np.float)
validate_x = np.multiply(validate_x, 1.0/255.0)
validate_y = data_frame.iloc[train_data_split:, 0].values
validate_y = dense_to_one_hot(validate_y,10)
print(train_x.shape)
print(train_y.shape)
print(validate_x.shape)
print(validate_y.shape)
return train_x, train_y, validate_x, validate_y
train_x, train_y, validate_x, validate_y = load_data('./data/train.csv')
print(train_y.shape)
print(train_y[0:4,])
cnn = DigitsModelCNN()
cnn.build()
cnn.info()
time_start = time.time()
cnn.train(train_x, train_y, validate_x, validate_y)
time_end = time.time()
print("total training time:")
print(time_end-time_start)
| [
"hityixiaoyang@gmail.com"
] | hityixiaoyang@gmail.com |
76b07fab07edb0667ffdda682c409887fdab50cc | 2cf99a155405b48bf14f872e1980ed948079e5dd | /test/test_router.py | a30b567e256a3ea2fe3ba97d23c6ab0b5d1539e8 | [
"MIT"
] | permissive | marrow/web.dispatch.route | c15309a26023d068b8f84ea4bbc221b674c1e6b8 | 92494bcad2e2a9a52d2e51eecfab910d829cc2de | refs/heads/master | 2021-01-25T04:01:46.245851 | 2016-02-15T07:54:36 | 2016-02-15T07:54:36 | 32,564,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,820 | py | # encoding: utf-8
import pytest
from web.dispatch.route.router import __DYNAMIC__, Router
from sample import Root
@pytest.fixture
def router():
return Router.from_object(Root)
def test_dynamic_repr():
assert repr(__DYNAMIC__) == '<dynamic element>'
def test_router_singleton():
assert Router.from_object(Root) is Router.from_object(Root)
def test_invalid_route():
router = Router()
with pytest.raises(ValueError):
router.parse("{bad:/}")
class TestRouterSample(object):
def test_single_static(self, router):
assert len(router.routes) == 1 # There's only a single top-level element.
assert 'user' in router.routes # It's "user".
assert len(router.routes['user']) == 2 # Which has a terminus and dynamic continuation.
assert router.routes['user'][None] == Root.root # The terminus is the "root" method.
assert router.routes['user'][None](Root()) == "I'm all people." # It really is.
def test_dynamic_username(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
def test_dynamic_username_action(self, router):
assert __DYNAMIC__ in router.routes['user']
dynamic = router.routes['user'][__DYNAMIC__]
assert len(dynamic) == 1
assert list(dynamic.keys())[0].match("GothAlice") # The regular expression matches.
assert len(list(dynamic.values())[0]) == 2
assert list(dynamic.values())[0][None] == Root.user
assert list(dynamic.values())[0][None](Root(), "GothAlice") == "Hi, I'm GothAlice"
| [
"alice@gothcandy.com"
] | alice@gothcandy.com |
846876364bc01fda2b044a0b561e2709369cd56c | 268d9c21243e12609462ebbd6bf6859d981d2356 | /Python/python_stack/Django/BeltReview/main/apps/books/models.py | fddd59aa3b548da3b7fdfa2c3d3484b1350a19f0 | [] | no_license | dkang417/cdj | f840962c3fa8e14146588eeb49ce7dbd08b8ff4c | 9966b04af1ac8a799421d97a9231bf0a0a0d8745 | refs/heads/master | 2020-03-10T03:29:05.053821 | 2018-05-23T02:02:07 | 2018-05-23T02:02:07 | 129,166,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,886 | py | from __future__ import unicode_literals
from django.db import models
from django import forms
from django.core.exceptions import ValidationError
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self,postData):
errors={}
#validate password
if len(postData['password']) < 8:
errors["password"] = "password should be more than 8 characters"
#checks that the passwords match
if postData['password'] != postData['confirm']:
errors["confirm"] = "passwords do not match"
return errors
class User(models.Model):
name = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = UserManager()
class AuthorManager(models.Manager):
def validate_author(request, postData):
errors = {}
return errors
class Author(models.Model):
author = models.CharField(max_length=255)
objects = AuthorManager()
class BookManager(models.Manager):
def validate_book(request,postData):
errors = {}
return errors
class Book(models.Model):
title = models.CharField(max_length=255)
author = models.ForeignKey(Author, related_name="books")
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
objects = BookManager()
class ReviewManager(models.Manager):
def validate_review(request, postData):
errors = {}
return errors
class Review(models.Model):
rating = models.IntegerField()
comment = models.TextField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
book = models.ForeignKey(Book, related_name="reviews")
user = models.ForeignKey(User, related_name="reviews")
objects = ReviewManager()
| [
"dkang417@gmail.com"
] | dkang417@gmail.com |
78e09543d9fe810959a5f9c88d88fc9890e0a11d | 228a253a698fd8ceb0af4e63187ee201004aca4e | /IotServer.py | d6306058174631582c8a438fc2b709bd31389722 | [] | no_license | mtpajula/iotLocalNetworkServer | 4b16a5d93f5dcaab98afaec1e37a317d35bb4649 | aa3c0187dff14c4bf568afa554f82cf13a2500f5 | refs/heads/master | 2021-05-11T14:34:57.921236 | 2018-02-23T17:40:29 | 2018-02-23T17:40:29 | 117,707,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,580 | py | # -*- coding: utf-8 -*-
from IotServerDevice import *
from time import sleep
import copy
import sys
class IotServer:
wait = 10
def __init__(self):
self.d = IotServerDevice()
def printer(self, category, message):
if category == "t1":
print("\n")
print(message)
print("======================================")
elif category == "t2":
print("\n")
print(message)
print("--------------------------------------")
elif category == "p":
print(message)
elif category == "error":
print(" ! ERROR: " + message)
'''
run in terminal command mode
Example: IotServer.py device=server command="reset devices"
'''
def send_command(self, device, command):
self.printer("p","Run in terminal command mode")
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
for d in self.d.c.devices:
if d.name == device:
d.receive_command('command', command)
if self.d.name == device:
self.d.receive_command('command', command)
# Send messages to db
self.send_message();
def close_db(self):
self.d.db.con.conn.close()
def send_message(self):
self.printer("t1","Send messages to db")
self.d.db.set_messages(self.d.c.devices)
self.d.db.set_messages([self.d])
'''
run in normal mode
'''
def run(self, schedule = False):
self.printer("p","Run in normal mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in schedule mode
'''
def runSchedule(self):
self.printer("p","Run in schedule mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# Get scheduled commands
self.printer("t1","Get scheduled commands")
self.d.db.get_schedules(self.d.c.devices)
self.d.db.get_schedules([self.d])
# get commands
self.printer("t1","Get commands")
self.d.db.get_commands(self.d.c.devices)
self.d.db.get_commands([self.d])
# Send messages to db
self.send_message();
'''
run in status mode
'''
def runStatus(self):
self.printer("p","Run in status mode")
# Get devs from db
#self.printer("t1","Load devices from db")
self.d.collect_iot(True)
# save statuses to db
self.printer("t1","Save statuses to db")
self.d.db.set_status(self.d.c.devices)
self.d.db.set_status([self.d])
# Send messages to db
self.send_message();
if __name__ == '__main__':
iot = IotServer()
if "schedule" in sys.argv:
iot.runSchedule()
iot.close_db()
sys.exit()
if "status" in sys.argv:
iot.runStatus()
iot.close_db()
sys.exit()
c = None
d = None
for ar in sys.argv:
if "command=" in ar:
arp = ar.split("=")
c = arp[1]
elif "device=" in ar:
arp = ar.split("=")
d = arp[1]
if c != None and d != None:
iot.send_command(d,c)
iot.close_db()
sys.exit()
iot.run()
iot.close_db()
| [
"mtpajula@gmail.com"
] | mtpajula@gmail.com |
52722c46ff54f9d588bdd4cd1a24506d64dacd60 | bcc2d156334d3680561b17cec82cbc31a5ea07ad | /String/22. Generate Parentheses.py | 2431fefda0dcde528d7eafd0b65a378afe0ebe31 | [] | no_license | kevinsshah/Leetcode | 72b14e226b6881bcd18913b2fa132b0e3f8dd6ef | 4419f46e6f6b1d96ff8b7066fce687cfa88e65a0 | refs/heads/master | 2020-03-25T23:00:49.851183 | 2018-09-08T04:13:27 | 2018-09-08T04:13:27 | 144,255,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,129 | py | # Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.
#
# For example, given n = 3, a solution set is:
#
# [
# "((()))",
# "(()())",
# "(())()",
# "()(())",
# "()()()"
# ]
class Solution(object):
def generateParenthesis(self, n):
"""
:type n: int
:rtype: List[str]
"""
# def helper(A = []):
# if len(A) == 2*n:
# if isValid(A):
# ans.append("".join(A))
# else:
# A.append("(")
# helper(A)
# A.pop()
# A.append(")")
# helper(A)
# A.pop()
# def isValid(A):
# bal = 0
# for c in A:
# if c == "(":
# bal+=1
# else:
# bal -= 1
# if bal < 0:
# return False
# return bal == 0
# ans = []
# helper()
# return ans
# def backtrack(S = '', left = 0, right = 0):
# if len(S) == 2*n:
# ans.append(S)
# return
# if left < n:
# backtrack(S+"(", left + 1, right)
# if right < left:
# backtrack(S+")", left, right + 1)
# ans = []
# backtrack()
# return ans
ans = []
def helper(left, right, string, ans):
if right < left:
return
if not left and not right:
ans.append(string)
return
if left:
helper(left - 1, right, string + "(", ans)
if right:
helper(left, right - 1, string + ")", ans)
helper(n, n, "", ans)
return ans | [
"shah.kevi@husky.neu.edu"
] | shah.kevi@husky.neu.edu |
12d896a3fb16ddce598c3c26b8715790f3f41155 | bb7ee0c29834864964a445cc7cc68a742937791c | /file_crawler_w_yts_downloader.py | 667d19c6ff4a1df526fb6ea31d1ddfe5ce354fed | [] | no_license | quadcube/Automated-Yify-Subtitle-Downloader | 6a5ef01f70cb44e77f602bf8fac529c9f3436cf1 | 2254fccdebe61fa2871123267556b11cd75bb4c7 | refs/heads/master | 2020-08-23T08:38:44.358378 | 2020-04-12T14:18:36 | 2020-04-12T14:18:36 | 216,580,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,176 | py | import os
import re
import urllib
import logging
import requests # pip install requests
from zipfile import ZipFile
from html2text import HTML2Text # pip install html2text
log_path = "/Users/quadcube/Project/Subtitle Tool"
log_name = "file_crawler_w_yts_downloader"
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s", handlers=[logging.FileHandler("{0}/{1}.log".format(log_path, log_name)), logging.StreamHandler()])
logger = logging.getLogger()
root_dir = "/Volumes/GoogleDrive/My Drive/Server Backup/WD_MyBookLive_2TB/Public/Shared Videos/" #os.getcwd()
root_url = "http://www.yifysubtitles.com" # 1) www.yifysubtitles.com 2) yts-subs.com (need refinement)
srt_language = ['English']
srt_manual_select = False
refresh_yts_srt = False # if YTS movie files are found, rename any srt files (.backup) in that folder and download the best srt
remove_invalid_srt = True
invalid_srt_size_threshold = 1024 # remove anything less than 1024 bytes if remove_invalid_srt = True
valid_movie_file_ext = ['.mp4', '.m4v', '.avi', '.mkv', '.mov', '.webm', '.flv', '.vob', '.rm', '.rmvb', '.wmv', '.m4v', '.mpeg', '.mpg', '.m2v', '.MTS', '.M2TS', '.TS']
def html2text(url):
raw_html = requests.get(url)
raw_html.raise_for_status() # raise exception if status code is not 200
h = HTML2Text()
h.ignore_links = False
return h.handle(raw_html.text) # html2text translate html to readable text
def main():
counter_movie = 0
counter_movie_w_srt = 0
counter_movie_dl_srt = 0
counter_movie_dl_srt_failed = 0
counter_movie_no_srt = 0
counter_no_movie = 0
for dir_name, subdir_list, file_list in os.walk(root_dir): # crawl thru current directory
if '/' in dir_name[len(root_dir):] or dir_name == root_dir:
continue # only transverse one level deep
else:
logger.debug('Found dir: {}'.format(dir_name))
found_srt = False
counter_movie += 1
for file_name in file_list:
if file_name.lower().endswith('.srt'):
if refresh_yts_srt == True and ('yts' in file_name.lower() or 'yify' in file_name.lower()):
logger.debug('Renaming srt file_list: {}'.format(file_list))
os.rename(dir_name + '/' + file_name, dir_name + '/' + file_name[:-4] + '.backup') # rename .srt to .backup
break
else:
logger.debug('Found file_list: {}'.format(file_list))
if remove_invalid_srt == True:
if os.stat(dir_name + '/' + file_name).st_size < invalid_srt_size_threshold:
logger.info('Removing file {}'.format(file_name))
os.remove(dir_name + '/' + file_name)
break
found_srt = True
counter_movie_w_srt += 1
break
if found_srt == False:
try:
found_movie = False
dir_name_list = dir_name[len(root_dir):].split("(", maxsplit=1)
dir_name_year = dir_name_list[1].split(")", maxsplit=1)[0]
search_query = dir_name_list[0].strip() # remove year and lead, trailing whitespace as yifisubtitle.com search query will return nothing
for i in range(search_query.count(' ') + 1): # i = 0, .replace() does nothing
if root_url == "http://www.yifysubtitles.com":
text_html = html2text(root_url + '/search?' + urllib.parse.urlencode({'q':search_query.replace(' ', ': ', i).replace(': ', ' ', i-1)})) # Try diff combinations of ":" in the search query
else: # yts-subs.com
text_html = html2text(root_url + '/search/' + urllib.parse.quote(search_query).replace(' ', ': ', i).replace(': ', ' ', i-1))
relevant_results = re.findall('\/movie-imdb\/.+\)\n+.\n+.+\n+.+year', text_html)
for result in relevant_results:
result_list = result.split(')\n\n[\n\n### ', maxsplit=1)
result_link = result_list[0]
result_name = result_list[1].split('\n\n')[0]
for j in range(5):
if result[-5 - j].isdigit(): # as long as not digit, backtrack until digit is found
result_year = result[-8 - j:-4 - j]
break
if result_name.lower() == search_query.lower().replace(' ', ': ', i).replace(': ', ' ', i-1) and dir_name_year == result_year:
logger.info('Found movie: {} Year: {}'.format(result_name, result_year))
found_movie = True
break
if found_movie == True:
break
if found_movie == True:
text_html = html2text(root_url + result_link)
#print(repr(text_html))
relevant_results = re.findall('\s\s\n\d{1,}\s?\|\s\s?\w+\s?\|\s\s?\[\s?subtitle\s.+\d\)\s\s\n\s\s\n', text_html, re.DOTALL) #re.findall('\s\s\n\d{1,}\s?\|\s\s?\w+\s?\|\s\s?\[\s?subtitle\s.+####\sTrailer', text_html, re.DOTALL)
if len(relevant_results) > 1:
logger.warning('Relevant result more than 1. {}'.format(dir_name))
if len(relevant_results) == 0:
logger.warning('No srt found on {}! {}'.format(root_url, dir_name))
else:
relevant_results = relevant_results[0].split(' \n')
subtitle_results = {}
subtitle_num = 0
for result in relevant_results:
if result != '':
if result[0].isnumeric():
result = result.replace('\n', '').replace(' ', '').split('|') # first remove the annoying \n, spaces and split according to tags
if result[1] in srt_language:
result_title_link = result[2].replace('[subtitle', '').split('](/subtitles')
subtitle_results[subtitle_num] = {'Rate': int(result[0]), 'Lang': result[1], 'Title': result_title_link[0], 'Link': '/subtitle' + result_title_link[1][:-1] + '.zip', 'Uploader': result[4][1:].split('](')[0] if result[3] == '' else result[3]}
#if srt_manual_select == True:
logger.info('({}) {}'.format(subtitle_num, subtitle_results[subtitle_num]))
subtitle_num += 1
if subtitle_num > 0: # check whether there's any filtered srt
if srt_manual_select == True and subtitle_num > 0:
while True:
try:
user_selection = int(input('Select subtitle (e.g. 0/1/2/...)'))
if user_selection < len(subtitle_results):
break
else:
raise
except:
print('Option is not valid!')
subtitle_results = subtitle_results[user_selection]
else: # Auto srt selection
subtitle_yts_rank = (None, 0) # subtitle_key, rating
subtitle_rank = (None, 0) # subtitle_key, rating
for subtitle_key, subtitle_value in subtitle_results.items():
if subtitle_yts_rank[1] <= subtitle_value['Rate'] and ('yts' in subtitle_value['Title'].lower() or 'yify' in subtitle_value['Title'].lower()): #prioritize YTS tags in title, since most movie files are obtained from YTS'
subtitle_yts_rank = (subtitle_key, subtitle_value['Rate'])
elif subtitle_rank[1] <= subtitle_value['Rate']:
subtitle_rank = (subtitle_key, subtitle_value['Rate'])
if subtitle_yts_rank[0] == None: # if YTS srt is not available, use non-YTS
subtitle_yts_rank = subtitle_rank
subtitle_results = subtitle_results[subtitle_yts_rank[0]]
logger.info(subtitle_results)
logger.debug(file_list)
movie_name = None
for file_name in file_list:
for file_type in valid_movie_file_ext:
if file_name.endswith(file_type):
found_movie = file_name.replace(file_type, '.srt')
break
if found_movie != None:
with open(dir_name + '/temp_srt.zip', 'wb') as srt_zip_file:
srt_zip_file.write(requests.get(root_url + subtitle_results['Link']).content) # TODO: yts-subs.com subtitles come from www.yifysubtitles.com, hence root_url won't work.
with ZipFile(dir_name + '/temp_srt.zip') as srt_zip_file:
srt_zip_file_list = srt_zip_file.namelist()
for srt_file in srt_zip_file_list:
if srt_file.lower().endswith('.srt'):
srt_zip_file.extract(srt_file, dir_name)
break
os.rename(dir_name + '/' + srt_file, dir_name + '/' + found_movie) # rename srt to match movie file
os.remove(dir_name + '/temp_srt.zip')
counter_movie_dl_srt += 1
else:
logger.warning('No filtered srt found on {}! {}'.format(root_url, dir_name))
counter_movie_no_srt += 1
else:
logger.warning('No movie found on {}! {}'.format(root_url, dir_name))
counter_no_movie += 1
except Exception as error:
logger.exception(error)
counter_movie_dl_srt_failed += 1
#logger.info(text_html)
# Errors caused by line 57 is due to missing year info in dir_name
# Errors caused by bad html response code, ignore since there's nothing to do about it
logger.debug('Current stat -> Movie: {}\tMovie w srt: {}\tMovie dl srt: {}\tMovie dl srt failed: {}\tMovie no srt failed: {}\tNo movie: {}'.format(counter_movie, counter_movie_w_srt, counter_movie_dl_srt, counter_movie_dl_srt_failed, counter_movie_no_srt, counter_no_movie))
logger.info('Final stat -> Movie: {}\tMovie w srt: {}\tMovie dl srt: {}\tMovie dl srt failed: {}\tMovie no srt failed: {}\tNo movie: {}'.format(counter_movie, counter_movie_w_srt, counter_movie_dl_srt, counter_movie_dl_srt_failed, counter_movie_no_srt, counter_no_movie))
logging.info('Completed. Exiting...')
if __name__== "__main__":
main()
| [
"noreply@github.com"
] | quadcube.noreply@github.com |
9616bdcb9ebc14028225fac131ca2aa6763cfb91 | 9e3205c13404f6bf2b36c96af7d0a9d2532596a0 | /cart_pole/dqn.py | a37de3641dfad0cf9d3e7d3c578e6d83d554f348 | [] | no_license | mminhou/openai | fce2da3e1b49da0b99a55087cc97e8890fb5a1f7 | 05418b83218f4f2b29d70deef4a41cde7ad6941e | refs/heads/master | 2020-03-11T07:36:33.644382 | 2018-04-17T07:04:13 | 2018-04-17T07:04:13 | 129,861,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,109 | py | import numpy as np
import random as random
from collections import deque
from cnn_tensorflow import CNN
# See https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf for model description
class DQN:
def __init__(self, num_actions, observation_shape, dqn_params, cnn_params):
self.num_actions = num_actions
self.epsilon = dqn_params['epsilon']
self.gamma = dqn_params['gamma']
self.mini_batch_size = dqn_params['mini_batch_size']
# memory
self.memory = deque(maxlen=dqn_params['memory_capacity'])
# initialize network
self.model = CNN(num_actions, observation_shape, cnn_params)
print("model initialized")
def select_action(self, observation):
"""
Selects the next action to take based on the current state and learned Q.
Args:
observation: the current state
"""
if random.random() < self.epsilon:
# with epsilon probability select a random action
action = np.random.randint(0, self.num_actions)
else:
# select the action a which maximizes the Q value
obs = np.array([observation])
q_values = self.model.predict(obs)
action = np.argmax(q_values)
return action
def update_state(self, action, observation, new_observation, reward, done):
"""
Stores the most recent action in the replay memory.
Args:
action: the action taken
observation: the state before the action was taken
new_observation: the state after the action is taken
reward: the reward from the action
done: a boolean for when the episode has terminated
"""
transition = {'action': action,
'observation': observation,
'new_observation': new_observation,
'reward': reward,
'is_done': done}
self.memory.append(transition)
def get_random_mini_batch(self):
"""
Gets a random sample of transitions from the replay memory.
"""
rand_idxs = random.sample(range(len(self.memory)), self.mini_batch_size)
mini_batch = []
for idx in rand_idxs:
mini_batch.append(self.memory[idx])
return mini_batch
def train_step(self):
"""
Updates the model based on the mini batch
"""
if len(self.memory) > self.mini_batch_size:
mini_batch = self.get_random_mini_batch()
Xs = []
ys = []
actions = []
for sample in mini_batch:
y_j = sample['reward']
# for nonterminals, add gamma*max_a(Q(phi_{j+1})) term to y_j
if not sample['is_done']:
new_observation = sample['new_observation']
new_obs = np.array([new_observation])
q_new_values = self.model.predict(new_obs)
action = np.max(q_new_values)
y_j += self.gamma*action
action = np.zeros(self.num_actions)
action[sample['action']] = 1
observation = sample['observation']
Xs.append(observation.copy())
ys.append(y_j)
actions.append(action.copy())
Xs = np.array(Xs)
ys = np.array(ys)
actions = np.array(actions)
self.model.train_step(Xs, ys, actions) | [
"exit19093@gmail.com"
] | exit19093@gmail.com |
66d3b82f69e86c48f0251452cf320598139f48d5 | f7108e688415975baf5e3290d9b210585e4faaed | /monkeybat2.1/date.py | 04e20469868384d3244bafb377ee7322bf43019a | [] | no_license | lijiansheng325/python-2019 | 20ef1a960bc1cd8f09c0133eafda2755d273e2a4 | a577992d71d7d36a93d9cbb7658887c9152173f1 | refs/heads/master | 2020-04-19T03:30:48.426503 | 2019-01-30T09:12:02 | 2019-01-30T09:12:02 | 167,936,368 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | class Date(object):
def __init__(self, day=0, month=0, year=0):
self.day = day
self.month = month
self.year = year
def __str__(self):
return "{0}-{1}-{2}".format(self.year, self.month, self.day)
@classmethod
def from_string(cls, date_as_string):
year, month, day = map(int, date_as_string.split('-'))
date1 = cls(day, month, year)
return date1
@staticmethod
def is_date_valid(date_as_string):
year, month, day = map(int, date_as_string.split('-'))
return day <= 31 and month <= 12 and year <= 3999
@staticmethod
def millenium(month, day):
return Date(month, day, 2000)
class DateTime(Date):
def __str__(self):
return "{0}-{1}-{2} - 00:00:00PM".format(self.year, self.month, self.day)
if __name__=="__main__":
s='3000-09-11'
if Date.is_date_valid(s):
date1 = Date.from_string(s)
print date1
date2 = DateTime.from_string(s)
print date2
millenium_new_year1 = Date.millenium(1, 1)
print millenium_new_year1
millenium_new_year2 = DateTime.millenium(10, 10)
print millenium_new_year2 | [
"lijiansheng325@163.com"
] | lijiansheng325@163.com |
2bfac6ff84eb132dbe0ca2d7e60294830f89405d | 697948f1b4e889258d64e4b641aa00f352c915d2 | /model/relation_prediction_semantic_loss/mydataloader.py | e0c59029b30751d753cdaf9484117914bd70a388 | [] | no_license | cheunglei/myLENSR | 6c8ad0376d907396b2db53f9ac42c76a001cd2eb | 063e50cc66dcc4390423150af89e95a9e0d2493a | refs/heads/master | 2021-03-21T02:02:16.576945 | 2020-05-18T08:02:47 | 2020-05-18T08:02:47 | 247,254,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,456 | py | from torch.utils.data import Dataset, DataLoader
from torch import Tensor
import numpy as np
import pickle as pk
class VRD_dataset(Dataset):
def __init__(self, train_set_keys, image_features_train, annotation_train, information):
self.train_set_keys = train_set_keys
self.image_features_train = image_features_train
self.annotation_train = annotation_train
self.information = information
def __len__(self):
return len(self.train_set_keys)
def __getitem__(self, idx):
img = self.train_set_keys[idx]
pairs = list(self.annotation_train[img].keys())
x = []
y = []
info = []
for i in range(len(pairs)):
key = pairs[i]
relation = self.annotation_train[img][key]
if relation == 100:
if np.random.random() < 0.01 and (self.information[img][key][1][1] != self.information[img][key][2][1]):
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
else:
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
x = Tensor(x)
y = Tensor(y).long()
# print ('debug',img,pairs,x,y,info)
return x, y, info
class VRD_dataset_test(Dataset):
def __init__(self, train_set_keys, image_features_train, annotation_train, information):
self.train_set_keys = train_set_keys
self.image_features_train = image_features_train
self.annotation_train = annotation_train
self.information = information
def __len__(self):
return len(self.train_set_keys)
def __getitem__(self, idx):
# print(idx)
img = self.train_set_keys[idx]
pairs = list(self.annotation_train[img].keys())
x = []
y = []
info = []
for i in range(len(pairs)):
key = pairs[i]
relation = self.annotation_train[img][key]
if self.information[img][key][1][1] != self.information[img][key][2][1]:
x.append(self.image_features_train[img][key])
y.append(relation)
info.append(self.information[img][key])
x = Tensor(x)
y = Tensor(y).long()
# print ('debug',img,pairs,x,y,info)
return x, y, info
| [
"948594226@qq.com"
] | 948594226@qq.com |
1c9cb402c43d4cdc6747cd94f70df60e1fb424bf | 4276667227d01d225bcc083e9d82439d52f6cd6c | /10.io_code/4.serialization.py | 8781bf10dcf222c4764dafc10d9adcaa30f0cc42 | [] | no_license | JianxiangChan/python_learning | 82e24498e96369c1a25c7cb557e80f7baf5e7961 | 488e6f6cb0591b8fce9261b072346c745b19cb2d | refs/heads/master | 2020-06-05T22:01:54.429817 | 2019-12-16T14:40:14 | 2019-12-16T14:40:14 | 192,557,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | # -*- coding: utf-8 -*-
import pickle
d = dict(name = 'bob', age = 20, score = 88)
print(pickle.dumps(d)) #use of dumps
with open('dump.txt','wb') as f:
pickle.dump(d,f)
with open('dump.txt','rb') as f:
d = pickle.load(f)
print(d)
import json
d = dict(name = 'bob', age = 20, score = 88)
print(json.dumps(d))
class Student(object):
def __init__(self,name,age,score):
self.name = name
self.age = age
self.score = score
s = Student('bob', 20 , 80)
def student2dict(std):
return {
'name' : std.name,
'age' : std.age,
'score' : std.score
}
print(json.dumps(s, default = student2dict))
print(json.dumps(s, default = lambda obj: obj.__dict__))
s = json.dumps(s, default = lambda obj: obj.__dict__)
def dict2student(d):
return Student(d['name'],d['age'],d['score'])
print(json.loads(s , object_hook = dict2student))
obj = dict(name='小明', age=20)
s = json.dumps(obj, ensure_ascii=False)
print(s)
s = json.dumps(obj)
print(s)
| [
"15651898806@163.com"
] | 15651898806@163.com |
2e2d00ecfeb31b0168a0130af2aa68e6f2967de9 | aa245f4e900ab0f27eee9b0fb2d7c9f7d4172269 | /tests/test_utils.py | 5c5bd201679fb0fdf8b3403da887b2dcab97dcbe | [
"MIT"
] | permissive | Vetrovec/chainee | ed4edd4e92637b29fcf5ff0493de6f6983e66e98 | 3a1a300f86ad8aeb385d8de7f766dd035c039f04 | refs/heads/master | 2022-04-05T13:54:38.804711 | 2020-02-01T14:11:16 | 2020-02-01T14:11:16 | 235,657,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,992 | py | from unittest import TestCase
import chainee.utils as utils
class TestUtils(TestCase):
def test_is_hex_string(self):
self.assertTrue(utils.is_hex_string("AbCdeF1234567890"), "is hex")
self.assertFalse(utils.is_hex_string("abcdefg"), "is not hex")
def test_validate_private_key(self):
self.assertTrue(
utils.validate_private_key("685CF62751CEF607271ED7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"is valid private key"
)
self.assertFalse(
utils.validate_private_key("0000000000000000000000000000000000000000000000000000000000000000"),
"is not valid private key"
)
self.assertFalse(
utils.validate_private_key("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"),
"is not valid private key"
)
def test_validate_address(self):
self.assertTrue(
utils.validate_address("0000000000000000000000000000000000000000"),
"is valid address"
)
self.assertTrue(
utils.validate_address("c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"),
"is valid address"
)
self.assertFalse(
utils.validate_address("1234567890"),
"is not valid address"
)
self.assertFalse(
utils.validate_address("abcdefghijklmnopqrstuvwxyzabcdefghijklmn"),
"is not valid address"
)
def test_sha3(self):
self.assertEqual(
utils.sha3("abcdef"),
"8b8a2a6bc589cd378fc57f47d5668c58b31167b2bf9e632696e5c2d50fc16002"
)
self.assertEqual(
utils.sha3("test", False),
"36f028580bb02cc8272a9a020f4200e346e276ae664e45ee80745574e2f5ab80"
)
def test_generate_private_key(self):
self.assertTrue(
utils.validate_private_key(utils.generate_private_key()),
"should generate valid private key"
)
def test_get_pub_key(self):
self.assertEqual(
utils.get_pub_key("685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"6b2cc423e68813a13b4f0b3c7666939d20f845a40104a3c85db2d8a3bcfd9517620075fac7de10a94073ab9a09a9a8dd28bb44adaaf24bf334a6c6258524dd08"
)
def test_address_from_public(self):
self.assertEqual(
utils.address_from_public("6b2cc423e68813a13b4f0b3c7666939d20f845a40104a3c85db2d8a3bcfd9517620075fac7de10a94073ab9a09a9a8dd28bb44adaaf24bf334a6c6258524dd08"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
def test_address_from_private(self):
self.assertEqual(
utils.address_from_private("685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
def test_sign(self):
self.assertEqual(
utils.sign("abcdef", "685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe"),
"b90e97baea96a2120a53d3ba34201705891e79beb8b86cfaf26a4e467264ac6e2481ffed9036a8403161d1d0bf7a7485f6e190d1ffdc1bccefd74fe6c547b30a01"
)
self.assertEqual(
utils.sign("test", "685cf62751cef607271ed7190b6a707405c5b07ec0830156e748c0c2ea4a2cfe", False),
"6f2dfa18ba808d126ef8d7664cbb5331a4464f6ab739f82981a179e47569550636daa57960b6bfeef2981ea61141ce34b2febe811394ce3b46ffde0ce121516101"
)
def test_recover(self):
self.assertEqual(
utils.recover("abcdef", "b90e97baea96a2120a53d3ba34201705891e79beb8b86cfaf26a4e467264ac6e2481ffed9036a8403161d1d0bf7a7485f6e190d1ffdc1bccefd74fe6c547b30a01"),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
self.assertEqual(
utils.recover("test", "6f2dfa18ba808d126ef8d7664cbb5331a4464f6ab739f82981a179e47569550636daa57960b6bfeef2981ea61141ce34b2febe811394ce3b46ffde0ce121516101", False),
"c70f4891d2ce22b1f62492605c1d5c2fc1a8ef47"
)
| [
"stepan.vetrovec@gmail.com"
] | stepan.vetrovec@gmail.com |
2e77e1bf2950b9ae5d4e921023ac91b6785e05f8 | 7474675ad1a50bd41792ef9c4de09924acbc8f17 | /KNN/iris.py | 85f0cf2fd3a28cafc5e979950791eb122826a8a8 | [] | no_license | itsmefarhan/MachineLearning | 5f2b756e31ab199701ac8f223c420634a0d04478 | 6df397f583222575ac9035350e76f6a9b9c0a2eb | refs/heads/master | 2020-09-05T09:24:56.605009 | 2019-11-11T20:07:39 | 2019-11-11T20:07:39 | 220,056,068 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
dataset = load_iris()
# print(dataset.keys())
# print(dataset.data)
X_train, X_test, y_train, y_test = train_test_split(dataset['data'], dataset['target'], test_size = 0.2, random_state = 0)
model = KNeighborsClassifier()
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accuracy = model.score(X_test, y_test)
print(accuracy)
cm = confusion_matrix(y_test, y_predict)
print(cm) | [
"farhan.farooq@live.com"
] | farhan.farooq@live.com |
dc6217c8436382f7a1d6ad3ae9face803e235091 | 931f1a920913dc21ea6cb5b4b591e05259abf490 | /input_files/create_text_hdfs.py | 414999ed6bf77e3288c4a4c21af9200eeb0fa107 | [] | no_license | cgeroux/big_data_benchmark | f7bf3dbce55ae234c4548704f74710fa2f57cfef | b612665d0bda6e20283148fd9ba7be398f8d24d2 | refs/heads/master | 2021-01-10T13:34:19.043658 | 2017-10-12T15:24:04 | 2017-10-12T15:24:04 | 53,532,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,341 | py | #!/usr/bin/env python
import random
import optparse as op
from subprocess import Popen, PIPE,list2cmdline
import os
def addParserOptions(parser):
"""Adds command line options
"""
#these options apply globally
parser.add_option("-f",dest="forceOverwrite",default=False,action="store_true"
,help="Forces overwriting of an existing output file [not default].")
parser.add_option("--line-length",dest="lineLength",type="int",default=80
,help="Set the length of lines in the file [default: %default]")
parser.add_option("--lines-split",dest="splitLines",default=True
,action="store_true"
,help="Separate file into lines of length LINELENGTH or less [default].")
parser.add_option("--lines-not-split",dest="splitLines",default=True
,action="store_false"
,help="File will be a single line [not default].")
parser.add_option("--file-size",dest="fileSize",type="int",default=1000
,help="The size of the file in bytes [default: %default bytes]")
parser.add_option("-o",dest="outputFileName",type="string"
,default="generated.txt",help="Specify the name of the output file "
+"and path within HDFS [default: \"%default\"].")
parser.add_option("--seed-file",dest="seedFile",default=1,help="Seed used "
+"for randomly choosing words from the dictionary [default: %default].")
parser.add_option("--dictionary-file",dest="dictionaryFile",type="string"
,default="english-wordlist.txt"
,help="Specify a file containing a list of words separated by newlines "
+"to be used as the language dictionary. This option has no effect if "
+"the option --randomly-generate-dict is specified "
+"[default: \"%default\"].")
parser.add_option("--randomly-generate-dict",dest="genDict",default=False
,action="store_true",help="If set will create a dictionary by selecting"
+" random letters for NUMWORDS words of a randomly chosen word length "
+"between MINWORDLENGTH and MAXWORDLENGTH. See \"Randomly generated "
+"dictionary options\" [default: %default].")
parser.add_option("--hdfs-upload-size",dest="hdfsUploadSize",type="int"
,default=100000000
,help="Size in bytes between uploads to HDFS [default: %default].")
randDictGroup=op.OptionGroup(parser,"Randomly generated dictionary options")
randDictGroup.add_option("--min-word-length",dest="minWordLength",default=1
,type="int",help="Sets the minimum word length [default: %default].")
randDictGroup.add_option("--max-word-length",dest="maxWordLength",default=10
,type="int",help="Sets the maximum word length [default: %default].")
randDictGroup.add_option("--num-words",dest="numWords",default=1000
,type="int",help="Sets the maximum word length [default: %default].")
randDictGroup.add_option("--seed-dict",dest="seedDict",default=1,help="Seed used "
+"for randomly generating dictionary [default: %default].")
parser.add_option_group(randDictGroup)
def parseOptions():
"""Parses command line options
"""
parser=op.OptionParser(usage="Usage: %prog [options]"
,version="%prog 1.0",description=r"Randomly generates the content of a text file in HDFS.")
#add options
addParserOptions(parser)
#parse command line options
return parser.parse_args()
def createGiberishDict(numWords,minWordLength,maxWordLength,seed=1):
"""Creates a dictionary of numWords created by randomly selecting a word
length between minWordLength and maxWordLength and the populating it with
randomly selected lower case letters.
"""
characterLow=97
characterHigh=122
random.seed(seed)
#create a dictionary of words
dictionary={}
for i in range(numWords):
length=random.randint(minWordLength,maxWordLength)
word=""
for j in range(length):
character=chr(random.randint(characterLow,characterHigh))
word+=character
dictionary[i]=word
return dictionary
def loadDictFromFile(fileName):
"""Loads a dicionary from a file containing words seperated by newline
characters.
"""
dictionary={}
count=0
for line in open(fileName,'r'):
line=line.strip()
line=line.replace("(a)","")
if len(line)>0:
dictionary[count]=line.strip()
count+=1
return dictionary
def performCommand(cmd,throwOnError=True):
#upload file to HDFS
process=Popen(cmd,stdout=PIPE,stderr=PIPE)
stdout,stderr=process.communicate()
returnCode=process.returncode
if throwOnError:
if (returnCode!=0):
raise Exception("error encounter while executing command "
+str(cmd)+" got stdout=\""+str(stdout)+"\" and stderr=\""
+str(stderr)+"\" and return code="+str(returnCode))
return returnCode
def main():
#parse command line options
(options,args)=parseOptions()
#create a dictionary to use to construct the file
if options.genDict:
dictionary=createGiberishDict(options.numWords
,options.minWordLength,options.maxWordLength
,seed=options.seedDict)
else:
dictionary=loadDictFromFile(options.dictionaryFile)
#should check if the hdfs file is there and remove it if it is
cmd=["hdfs","dfs","-stat",options.outputFileName]
returnCode=performCommand(cmd,throwOnError=False)#throwOnError=False since we will handle the error here
if(returnCode==0):
overwrite=False
if not options.forceOverwrite:
#check if we should overwrite it
overWriteResponse=raw_input("File exists, overwrite? (y/n)")
if overWriteResponse in ["y","Y","Yes","T","True","1"]:
overwrite=True
else:
overwrite=True
#remove the file
if overwrite:
cmd=["hdfs","dfs","-rm",options.outputFileName]
performCommand(cmd)
else:
print "Not overwriting pre-existing file in HDFS \"" \
+options.outputFileName+"\""
quit()
#create the command to upload to HDFS
tempFileName="tmp.txt"
cmd=["hdfs","dfs","-appendToFile",tempFileName,options.outputFileName]
#create file from the dictionary
sizeTotal=0
sizeToUpload=0
f=open(tempFileName,'w')
lenDict=len(dictionary.keys())-1
random.seed(options.seedFile)
sizePerHDFAppend=options.hdfsUploadSize
while(sizeTotal<options.fileSize):
#create a line to add to the file
line=""
lineLen=0
while(True):
wordKey=random.randint(0,lenDict)
word=dictionary[wordKey]
lineLen+=len(word)+1
if lineLen<options.lineLength:
line+=word+" "
else:
break
#write the line to the file
if options.splitLines:
line+="\n"
f.write(line)
sizeTotal+=len(line)
sizeToUpload+=len(line)
#if temporary file big enough upload to HDFS
if sizeToUpload>=sizePerHDFAppend:
print "uploading "+str(sizeToUpload)+" bytes to hdfs"
#close the file
f.close()
#upload file to HDFS
performCommand(cmd)
#remove file after upload and open a new file for the next chunk
os.remove(tempFileName)
f=open(tempFileName,'w')
sizeToUpload=0
#close the temporary file
f.close()
#upload any extra content written to the temporary file since last upload
if sizeToUpload>0:
print "uploading remaining "+str(sizeToUpload)+" bytes to hdfs"
performCommand(cmd)
#remove temporary file
os.remove(tempFileName)
if __name__ == "__main__":
main() | [
"chris.m.geroux@gmail.com"
] | chris.m.geroux@gmail.com |
7e0772e81bc42eb837cd3dce54f0f187bcad8970 | 3505132210ee8e48c2f216400aed6c2478075a86 | /feature_selection/find_signature.py~ | e0d9df6158e852a573058dd3eaff86b9c629a9bd | [] | no_license | yutsai84/Enron_POI_identifier | 7610da2403a63857c3963977096fef9565a95b3f | 03a27f997641fd97eaa78aec446b9b3704fd15df | refs/heads/master | 2019-04-03T12:10:48.198921 | 2018-04-23T02:47:28 | 2018-04-23T02:47:28 | 66,225,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | #!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### The words (features) and authors (labels), already largely processed.
### These files should have been created from the previous (Lesson 10)
### mini-project.
words_file = "../text_learning/your_word_data.pkl"
authors_file = "../text_learning/your_email_authors.pkl"
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (the
### remainder go into training)
### feature matrices changed to dense representations for compatibility with
### classifier functions in versions 0.15.2 and earlier
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train)
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features;
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150].toarray()
labels_train = labels_train[:150]
### your code goes here
from sklearn import tree
clf=tree.DecisionTreeClassifier()
clf.fit(features_train,labels_train)
pred=clf.predict(features_test)
print "pred=",pred
import sklearn
accuracy=sklearn.metrics.accuracy_score(pred,labels_test)
print "accuracy:\t",accuracy
#print importance>0.2 and its index
importances=clf.feature_importances_
import numpy as np
#indices=np.argsort(importances)[::-1] #sort descending
#print "Feature ranking:"
#for i in range(10):
# print "{} feature No.{} ({})".format(i+1,indices[i],importances[indices[i]])
for i in range(len(importances)):
if importances[i]>=0.2:
print "Feature No.{} with importance {}".format(i,importances[i])
#the output is 33614,0.76
#print which feature cause the problem
print "the features cause the problem: "vectorizer.get_feature_names()[i]
| [
"yuchengtsai84@gmail.com"
] | yuchengtsai84@gmail.com | |
1f43b2642f2cdbd247d3109f36b3583af0b787b8 | adc53c3aa155a93610261353df13ae0b25393f7a | /src/app/api/files.py | d9c2ebc273e444cc8a6e6769f8eb359a3c004451 | [] | no_license | alvinTaoOps/geofiles-api | fe9b95a63117cbfcceb7e404c0bd7c94b2bedfbe | 66bb1bd09d57f294a40ed8aec13ab58a2234ca6f | refs/heads/master | 2023-07-18T10:52:19.939089 | 2021-04-27T16:55:41 | 2021-04-27T16:55:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,897 | py | from typing import Optional, List
from fastapi import APIRouter, status, UploadFile, File, Header, Request
from ..db import files as files_repository
from ..utils.Exceptions import raise_422_exception, raise_401_exception, raise_404_exception, raise_410_exception
from ..utils.http import HTTPFactory
from ..core.validator import Validator, SupportedFormat
from ..core.convertors.helper_functions import convert_to_geojson as to_geojson, convert_to_cad as to_cad, \
convert_to_shp as to_shp
from fastapi.responses import FileResponse
from pathlib import Path
from geojson_pydantic.features import FeatureCollection
from .schemas import FileRecord, PublicFile
import os
router = APIRouter()
async def file_request_handler(file_uuid: str, request: Request, token: Optional[str] = Header(None)):
if not request.state.user:
raise_401_exception()
file_record = await files_repository.get_one(file_uuid)
if not file_record:
raise_410_exception()
if file_record.get("user_id") != request.state.user["user_id"]:
raise_401_exception()
if not Path(file_record.get("path")).exists():
raise_410_exception()
return FileRecord.parse_obj(dict(file_record))
@router.post("/upload/", status_code=status.HTTP_201_CREATED)
async def create_upload_file(request: Request, file: UploadFile = File(...),
token: Optional[str] = Header(None)):
filename, file_extension = os.path.splitext(file.filename)
if file_extension not in Validator.SUPPORTED_FORMAT:
raise_422_exception()
if not request.state.user:
raise_401_exception()
file_uuid = await files_repository.create_from_request(file, file_extension, request.state.user)
return file_uuid
@router.get("/{file_uuid}", status_code=status.HTTP_200_OK)
async def download_file(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
return FileResponse(
file_record.path, media_type=SupportedFormat.get_mime_type(file_record.type), filename=file_record.file_name)
@router.get("/{file_uuid}/format", status_code=status.HTTP_200_OK)
async def get_allowed_formats(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
available_format = SupportedFormat.get_available_format(file_record.type)
urls = [f"/{file_uuid}/to{export_format}" for export_format in available_format]
return urls
@router.get("/{file_uuid}/toGEOJSON", response_model=FeatureCollection, status_code=status.HTTP_200_OK)
async def convert_to_geojson(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
geojson_response = await to_geojson(file_record, stream=False)
if not geojson_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.json"
return FileResponse(
geojson_response, media_type='application/json', filename=file_name)
@router.get("/{file_uuid}/toCAD", status_code=status.HTTP_200_OK)
async def convert_to_dwg(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
dwg_response = await to_cad(file_record)
if not dwg_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.dxf"
return FileResponse(
dwg_response, media_type='application/dxf', filename=file_name)
@router.get("/{file_uuid}/toSHP", status_code=status.HTTP_200_OK)
async def convert_to_shp(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
shp_response = await to_shp(file_record)
if not shp_response:
raise_422_exception()
file_name = f"{os.path.splitext(file_record.file_name)[0]}.zip"
return FileResponse(
shp_response, media_type='application/zip', filename=file_name)
@router.get("/{file_uuid}/stream/geojson", response_model=FeatureCollection, status_code=status.HTTP_200_OK)
async def convert_to_geojson(request: Request, file_uuid: str, token: Optional[str] = Header(None)):
file_record = await file_request_handler(file_uuid, request)
geojson_response = await to_geojson(file_record, stream=True)
if not geojson_response:
raise_422_exception()
return FeatureCollection.parse_raw(geojson_response)
@router.get("/", status_code=status.HTTP_200_OK, response_model=List[PublicFile])
async def retrieve_users_files(request: Request, token: Optional[str] = Header(None)):
if not request.state.user:
raise_401_exception()
users_files = await files_repository.retrieve_users_files(request.state.user["user_id"])
return users_files
| [
"jossefaz@protonmail.com"
] | jossefaz@protonmail.com |
b8a62fa93f2532714aacb95518a96010cd6afe03 | fffa7b13491deadfc649dfd035099ef764d8d303 | /api/tests/mathematical_object_detail.py | 3ecfae51fd020c715c1a8504027fcc57a26800f4 | [
"MIT"
] | permissive | Gawaboumga/OEMS | 3b12b8bebbe4b29716e8be4e22034ec394af36da | 1e60fa1f350f4cf1ca2e48072e0b4228eeb15024 | refs/heads/master | 2022-12-14T11:15:55.797241 | 2019-01-22T10:22:42 | 2019-01-22T10:22:42 | 147,358,167 | 0 | 0 | MIT | 2022-12-08T01:26:59 | 2018-09-04T14:20:58 | Python | UTF-8 | Python | false | false | 4,231 | py | from rest_framework import status
from rest_framework.test import APITestCase
from django.test import override_settings
from django.urls import reverse
from oems.settings import TEST_MEDIA_ROOT
from api.models import MathematicalObject
from api.tests import utils
@override_settings(MEDIA_ROOT=TEST_MEDIA_ROOT)
class MathematicalObjectDetailTests(APITestCase):
def test_retrieve_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
def test_retrieve_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(type, response_data['type'])
self.assertEqual(function, response_data['functions'][0]['function'])
self.assertEqual(name, response_data['names'][0]['name'])
self.assertEqual(tag, response_data['tags'][0]['tag'])
self.assertEqual(convergence_radius, response_data['convergence_radius'])
def test_put_small_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
data = {
'latex': representation,
'type': type,
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
new_type = 'P'
data['type'] = new_type
response = self.client.put(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.data
self.assertEqual(representation, response_data['latex'])
self.assertEqual(new_type, response_data['type'])
def test_delete_full_mathematical_object(self):
utils.log_as(self, utils.UserType.STAFF)
representation = 'test'
type = 'S'
function = 'function'
name = 'name'
tag = 'tag'
convergence_radius = '|z < 1|'
data = {
'latex': representation,
'type': type,
'functions': [{'function': function}],
'names': [{'name': name}],
'tags': [{'tag': tag}],
'convergence_radius': convergence_radius
}
response = self.client.post(reverse('api:mathematical_objects'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.delete(reverse('api:mathematical_object', kwargs={'pk': response.data['id']}), data, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(MathematicalObject.objects.count(), 0)
| [
"yourihubaut@hotmail.com"
] | yourihubaut@hotmail.com |
c78554bfaf8bee6f13777307c2c97139d339f973 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02390/s457532968.py | 390a81631bac8de1e3a93db961d2ef9a82cb8ed1 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | import sys
line = sys.stdin.readline()
inp = int(line)
h,mod = inp//3600, inp%3600
m,mod = mod//60, mod%60
s = mod
print ("%d:%d:%d" % (h,m,s)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
3e4331ea4515d8ab9a244201033c44ae2211e3db | d4cd2476f8fa8a7d94e183a68bd0678971310c5b | /checkio/06_Ice_Base/06_IceBase_04_FunnyAddition.py | 9030b3fb8d1063f001b7c9e2d024d3d76144968e | [] | no_license | gwqw/LessonsSolution | b495579f6d5b483c30d290bfa8ef0a2e29515985 | 0b841b1ae8867890fe06a5f0dcee63db9a3319a3 | refs/heads/master | 2020-07-05T19:15:53.758725 | 2019-10-01T11:34:44 | 2019-10-01T11:34:44 | 202,744,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | def checkio(data):
"""The sum of two integer elements"""
return sum(data)
if __name__ == '__main__':
assert checkio([5, 5]) == 10, 'First'
assert checkio([7, 1]) == 8, 'Second'
print('All ok')
| [
"="
] | = |
b96c59645e8a2d9a6c3fc4d83acb6984da618953 | dfe50c0041a5dc23b63ea39369d115a8b74c56f0 | /array_167.py | e396760dac8bbbcd6d360a390f08503b38081aa2 | [] | no_license | cainingning/leetcode | 1c624caf6330d2e1af4835741e5f0748c3f9513b | 09b7121628df824f432b8cdd25c55f045b013c0b | refs/heads/master | 2021-07-07T14:28:09.207501 | 2019-02-22T08:48:55 | 2019-02-22T08:48:55 | 142,756,206 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | class Solution:
def twoSum(self, numbers, target):
"""
:type numbers: List[int]
:type target: int
:rtype: List[int]
"""
l_index = 0
r_index = len(numbers) - 1
while l_index < r_index:
if numbers[l_index] + numbers[r_index] == target:
return [l_index, r_index]
elif numbers[l_index] + numbers[r_index] < target:
l_index += 1
else:
r_index -= 1
return [] | [
"499814159@qq.com"
] | 499814159@qq.com |
4f2b19ca6ea2aa053e8a9553366d01288860bf6f | 5ee1c8378e374dd239752bcc79b44bcbbd89559a | /wsgi.py | 3368c2fb6bbe0e361458b3fcc7990de7fce240c8 | [
"Apache-2.0"
] | permissive | mahdikord/kordba | 302bdaf03afddef04c3e9b860c096a8d0f29514a | 20c71f636cfb4e49265c0f7984ac3373cd2e7ba4 | refs/heads/master | 2021-01-10T07:49:14.110378 | 2016-02-07T08:22:08 | 2016-02-07T08:22:08 | 51,240,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,537 | py | #!/usr/bin/env python
import os
def application(environ, start_response):
ctype = 'text/plain'
if environ['PATH_INFO'] == '/health':
response_body = "1"
elif environ['PATH_INFO'] == '/env':
response_body = ['%s: %s' % (key, value)
for key, value in sorted(environ.items())]
response_body = '\n'.join(response_body)
else:
ctype = 'text/html'
response_body = '''<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<title>Welcome to OpenShift</title>
<style>
/*!
* Bootstrap v3.0.0
*
* Copyright 2013 Twitter, Inc
* Licensed under the Apache License v2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Designed and built with all the love in the world @twitter by @mdo and @fat.
*/
.logo {
background-size: cover;
height: 58px;
width: 180px;
margin-top: 6px;
background-image: url(data:image/svg+xml;base64,<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 14.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 43363)  -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" width="180px"
	 height="58px" viewBox="-127.391 432.019 180 58" enable-background="new -127.391 432.019 180 58" xml:space="preserve">
<g id="Layer_1" display="none">
	<g display="inline">
		<path d="M-121.385,438.749c-0.416,0.361-1.006,0.541-1.771,0.541h-2.774v-7h2.874c0.612,0,1.099,0.155,1.462,0.464
			c0.362,0.31,0.544,0.76,0.544,1.353c0,0.359-0.084,0.651-0.253,0.874c-0.168,0.223-0.378,0.398-0.629,0.524
			c0.139,0.04,0.278,0.102,0.417,0.185s0.265,0.192,0.377,0.326c0.112,0.133,0.204,0.293,0.273,0.48s0.104,0.401,0.104,0.641
			C-120.761,437.852-120.969,438.389-121.385,438.749z M-122.312,433.514c-0.146-0.176-0.396-0.264-0.75-0.264h-1.88v1.8h1.88
			c0.173,0,0.322-0.024,0.445-0.074c0.123-0.05,0.223-0.116,0.3-0.199c0.077-0.083,0.133-0.177,0.17-0.283s0.055-0.215,0.055-0.328
			C-122.091,433.906-122.165,433.689-122.312,433.514z M-122.121,436.32c-0.214-0.207-0.52-0.31-0.92-0.31h-1.9v2.32h1.87
			c0.466,0,0.795-0.106,0.985-0.32s0.285-0.494,0.285-0.84C-121.801,436.81-121.908,436.527-122.121,436.32z"/>
		<path d="M-116.281,439.29v-0.506c-0.134,0.195-0.318,0.347-0.555,0.455s-0.492,0.162-0.765,0.162c-0.613,0-1.078-0.196-1.395-0.59
			c-0.316-0.393-0.475-0.98-0.475-1.76v-3.01h1.04v2.963c0,0.532,0.095,0.905,0.284,1.117c0.189,0.213,0.453,0.319,0.792,0.319
			c0.345,0,0.61-0.116,0.796-0.349c0.186-0.233,0.279-0.562,0.279-0.988v-3.063h1.04v5.25H-116.281z"/>
		<path d="M-112.697,433.165c-0.13,0.13-0.285,0.195-0.465,0.195c-0.187,0-0.345-0.065-0.475-0.195s-0.195-0.285-0.195-0.465
			c0-0.187,0.065-0.345,0.195-0.475s0.288-0.195,0.475-0.195c0.18,0,0.335,0.065,0.465,0.195s0.195,0.289,0.195,0.475
			C-112.501,432.88-112.567,433.035-112.697,433.165z M-113.682,439.29v-5.25h1.04v5.25H-113.682z"/>
		<path d="M-111.031,439.29v-6.75l1.04-0.54v7.29H-111.031z"/>
		<path d="M-105.921,439.16c-0.127,0.073-0.275,0.131-0.445,0.175c-0.17,0.043-0.358,0.065-0.565,0.065
			c-0.367,0-0.655-0.113-0.865-0.34s-0.315-0.577-0.315-1.05v-3.03h-0.75v-0.94h0.75v-1.5l1.01-0.54v2.04h1.3v0.94h-1.3v2.85
			c0,0.247,0.042,0.414,0.125,0.5c0.083,0.087,0.222,0.13,0.415,0.13c0.133,0,0.27-0.021,0.41-0.065s0.256-0.091,0.35-0.145
			L-105.921,439.16z"/>
		<path d="M-97.452,437.805c-0.12,0.343-0.287,0.633-0.5,0.87c-0.213,0.237-0.463,0.417-0.75,0.54
			c-0.287,0.124-0.6,0.185-0.94,0.185c-0.333,0-0.64-0.065-0.92-0.195c-0.28-0.13-0.523-0.315-0.73-0.555
			c-0.207-0.24-0.368-0.526-0.485-0.86s-0.175-0.707-0.175-1.12c0-0.426,0.06-0.81,0.18-1.15s0.285-0.628,0.495-0.865
			c0.21-0.237,0.457-0.417,0.74-0.54c0.284-0.124,0.592-0.185,0.925-0.185c0.333,0,0.643,0.065,0.93,0.195s0.535,0.312,0.745,0.545
			s0.374,0.519,0.49,0.855c0.116,0.337,0.175,0.708,0.175,1.115C-97.271,437.073-97.332,437.462-97.452,437.805z M-98.667,435.385
			c-0.237-0.317-0.565-0.475-0.985-0.475c-0.394,0-0.702,0.158-0.925,0.475c-0.223,0.316-0.335,0.735-0.335,1.255
			c0,0.58,0.12,1.021,0.36,1.325c0.24,0.304,0.557,0.455,0.95,0.455c0.193,0,0.37-0.046,0.53-0.14
			c0.16-0.094,0.296-0.219,0.41-0.375c0.113-0.157,0.2-0.342,0.26-0.555s0.09-0.44,0.09-0.68
			C-98.312,436.13-98.43,435.702-98.667,435.385z"/>
		<path d="M-92.812,439.29v-2.963c0-0.532-0.095-0.904-0.284-1.117c-0.189-0.213-0.453-0.319-0.791-0.319
			c-0.345,0-0.611,0.116-0.796,0.349c-0.186,0.233-0.279,0.562-0.279,0.988v3.063h-1.04v-5.25h1.04v0.506
			c0.133-0.195,0.318-0.347,0.555-0.455s0.492-0.162,0.765-0.162c0.613,0,1.078,0.197,1.395,0.59c0.316,0.394,0.475,0.98,0.475,1.76
			v3.01H-92.812z"/>
	</g>
</g>
<g id="Layer_6">
	<g>
		<path d="M-122.266,438.984c-0.39,0.344-0.955,0.516-1.695,0.516h-2.51v-7h2.56c0.28,0,0.535,0.035,0.765,0.105
			s0.43,0.176,0.6,0.319c0.17,0.143,0.301,0.324,0.395,0.544c0.093,0.22,0.14,0.479,0.14,0.779c0,0.386-0.093,0.693-0.28,0.923
			c-0.187,0.23-0.43,0.398-0.73,0.504c0.16,0.04,0.32,0.102,0.48,0.185c0.16,0.083,0.303,0.194,0.43,0.331
			c0.127,0.137,0.23,0.307,0.31,0.511s0.12,0.446,0.12,0.726C-121.681,438.121-121.875,438.641-122.266,438.984z M-123.071,433.504
			c-0.187-0.196-0.477-0.294-0.87-0.294h-1.75v2.17h1.69c0.433,0,0.743-0.108,0.93-0.323c0.187-0.216,0.28-0.476,0.28-0.781
			C-122.791,433.957-122.884,433.7-123.071,433.504z M-122.861,436.45c-0.267-0.24-0.63-0.36-1.09-0.36h-1.74v2.7h1.78
			c0.526,0,0.9-0.12,1.12-0.36c0.22-0.24,0.33-0.56,0.33-0.96C-122.46,437.03-122.594,436.69-122.861,436.45z"/>
		<path d="M-117.121,439.5v-0.64c-0.153,0.22-0.35,0.4-0.59,0.54s-0.527,0.21-0.86,0.21c-0.28,0-0.534-0.042-0.76-0.125
			c-0.227-0.083-0.42-0.213-0.58-0.39c-0.16-0.177-0.283-0.4-0.37-0.67c-0.087-0.27-0.13-0.595-0.13-0.975v-3.2h0.76v3.077
			c0,0.568,0.101,0.984,0.304,1.248s0.513,0.396,0.931,0.396c0.365,0,0.672-0.13,0.921-0.391s0.374-0.678,0.374-1.252v-3.077h0.76
			v5.25H-117.121z"/>
		<path d="M-113.906,433.155c-0.103,0.104-0.225,0.155-0.365,0.155c-0.153,0-0.284-0.052-0.39-0.155
			c-0.106-0.103-0.16-0.228-0.16-0.375c0-0.153,0.053-0.281,0.16-0.385s0.237-0.155,0.39-0.155c0.14,0,0.262,0.051,0.365,0.155
			c0.104,0.104,0.155,0.232,0.155,0.385C-113.751,432.927-113.803,433.052-113.906,433.155z M-114.661,439.5v-5.25h0.76v5.25
			H-114.661z"/>
		<path d="M-112.151,439.5v-6.87l0.76-0.42v7.29H-112.151z"/>
		<path d="M-108.721,434.89v3.412c0,0.232,0.039,0.396,0.115,0.489c0.077,0.093,0.215,0.14,0.415,0.14
			c0.153,0,0.285-0.012,0.395-0.035s0.225-0.062,0.345-0.115l-0.05,0.65c-0.147,0.06-0.295,0.105-0.445,0.135
			c-0.15,0.03-0.325,0.045-0.525,0.045c-0.329,0-0.579-0.088-0.751-0.264c-0.172-0.176-0.258-0.484-0.258-0.923v-3.532h-0.65v-0.64
			h0.65v-1.62l0.76-0.42v2.04h1.3v0.64H-108.721z"/>
		<path d="M-99.271,438.025c-0.12,0.344-0.284,0.633-0.49,0.87s-0.45,0.415-0.73,0.535c-0.28,0.12-0.58,0.18-0.9,0.18
			s-0.619-0.058-0.895-0.175c-0.277-0.117-0.515-0.29-0.715-0.52c-0.2-0.23-0.358-0.515-0.475-0.855s-0.175-0.733-0.175-1.18
			c0-0.446,0.06-0.84,0.18-1.18c0.12-0.34,0.283-0.625,0.49-0.855c0.207-0.23,0.45-0.405,0.73-0.525c0.28-0.12,0.58-0.18,0.9-0.18
			c0.32,0,0.618,0.057,0.895,0.17c0.276,0.113,0.515,0.283,0.715,0.51c0.2,0.227,0.358,0.509,0.475,0.845
			c0.117,0.337,0.175,0.729,0.175,1.175C-99.091,437.287-99.151,437.682-99.271,438.025z M-100.27,435.297
			c-0.279-0.345-0.648-0.518-1.106-0.518c-0.458,0-0.826,0.173-1.102,0.518c-0.276,0.345-0.414,0.866-0.414,1.562
			c0,0.697,0.138,1.223,0.414,1.578s0.643,0.533,1.102,0.533c0.458,0,0.827-0.178,1.106-0.533c0.279-0.355,0.418-0.881,0.418-1.578
			C-99.851,436.164-99.991,435.643-100.27,435.297z"/>
		<path d="M-94.421,439.5v-3.077c0-0.568-0.102-0.983-0.304-1.248c-0.202-0.264-0.513-0.396-0.931-0.396
			c-0.365,0-0.672,0.13-0.921,0.391s-0.374,0.678-0.374,1.252v3.077h-0.76v-5.25h0.76v0.64c0.153-0.22,0.35-0.4,0.59-0.54
			c0.24-0.14,0.526-0.21,0.86-0.21c0.28,0,0.533,0.042,0.76,0.125s0.42,0.213,0.58,0.39c0.16,0.177,0.283,0.4,0.37,0.67
			c0.086,0.27,0.13,0.595,0.13,0.975v3.2H-94.421z"/>
	</g>
</g>
<g id="Layer_5">
	<g>
		<path fill="#DB212F" d="M-119.063,465.698l-4.604,1.678c0.059,0.738,0.185,1.466,0.364,2.181l4.376-1.592
			C-119.068,467.224-119.12,466.462-119.063,465.698"/>
		<g>
			<g>
				<path fill="#DB212F" d="M-98.71,460.606c-0.321-0.663-0.693-1.303-1.122-1.905l-4.606,1.675
					c0.538,0.547,0.986,1.164,1.354,1.823L-98.71,460.606z"/>
			</g>
			<g>
				<path fill="#DB212F" d="M-108.841,459.301c0.959,0.449,1.787,1.057,2.488,1.773l4.604-1.677
					c-1.276-1.79-3.012-3.286-5.141-4.277c-6.583-3.071-14.434-0.213-17.505,6.369c-0.992,2.129-1.362,4.392-1.188,6.582
					l4.606-1.675c0.075-0.998,0.318-1.998,0.766-2.957C-118.218,459.164-113.116,457.309-108.841,459.301"/>
			</g>
		</g>
		<path fill="#EA2227" d="M-123.015,469.452l-4.376,1.594c0.401,1.594,1.101,3.11,2.057,4.458l4.596-1.67
			C-121.919,472.621-122.702,471.09-123.015,469.452"/>
		<path fill="#DB212F" d="M-103.93,467.715c-0.073,0.999-0.325,1.998-0.774,2.957c-1.994,4.277-7.094,6.134-11.371,4.14
			c-0.958-0.449-1.795-1.053-2.492-1.77l-4.594,1.673c1.271,1.789,3.007,3.285,5.137,4.279c6.582,3.069,14.434,0.211,17.502-6.372
			c0.994-2.129,1.362-4.391,1.185-6.578L-103.93,467.715z"/>
		<path fill="#EA2227" d="M-102.798,462.094l-4.374,1.592c0.811,1.457,1.195,3.134,1.071,4.819l4.594-1.672
			C-101.639,465.185-102.078,463.575-102.798,462.094"/>
		<path fill="#231F20" d="M-72.271,467.031c0-1.331-0.18-2.512-0.54-3.543c-0.344-1.049-0.837-1.931-1.478-2.651
			c-0.624-0.734-1.384-1.29-2.275-1.666c-0.876-0.392-1.845-0.586-2.909-0.586c-1.079,0-2.063,0.195-2.955,0.586
			c-0.892,0.39-1.659,0.955-2.299,1.689c-0.642,0.718-1.142,1.602-1.502,2.651c-0.345,1.047-0.516,2.236-0.516,3.565
			c0,1.33,0.171,2.52,0.516,3.566c0.36,1.031,0.853,1.915,1.479,2.651c0.64,0.718,1.399,1.273,2.275,1.665
			c0.892,0.376,1.875,0.563,2.956,0.563c1.062,0,2.039-0.195,2.931-0.586c0.892-0.391,1.659-0.947,2.3-1.665
			c0.642-0.736,1.134-1.626,1.478-2.675C-72.451,469.548-72.271,468.359-72.271,467.031L-72.271,467.031z M-75.649,467.076
			c0,1.675-0.353,2.956-1.055,3.848c-0.689,0.892-1.612,1.337-2.77,1.337c-1.158,0-2.095-0.453-2.815-1.36
			c-0.718-0.907-1.078-2.197-1.078-3.87c0-1.675,0.345-2.957,1.031-3.848c0.704-0.892,1.636-1.336,2.793-1.336
			s2.094,0.453,2.814,1.36C-76.009,464.114-75.649,465.403-75.649,467.076L-75.649,467.076z"/>
		<path fill="#231F20" d="M-55.075,464.051c0-0.876-0.149-1.634-0.446-2.275c-0.298-0.658-0.703-1.205-1.219-1.644
			c-0.518-0.437-1.12-0.758-1.807-0.96c-0.689-0.218-1.415-0.329-2.183-0.329h-7.179v16.422h3.285v-5.818h3.611
			c0.845,0,1.628-0.1,2.347-0.305c0.736-0.203,1.368-0.523,1.901-0.96c0.531-0.439,0.944-0.994,1.242-1.667
			C-55.224,465.826-55.075,465.005-55.075,464.051L-55.075,464.051z M-58.454,464.121c0,1.424-0.782,2.134-2.345,2.134h-3.824
			v-4.222h3.777c0.733,0,1.312,0.171,1.735,0.516C-58.672,462.877-58.454,463.401-58.454,464.121L-58.454,464.121z"/>
		<polygon fill="#231F20" points="-39.147,475.264 -39.147,472.05 -47.615,472.05 -47.615,468.086 -42.9,468.086 -42.9,464.896 
			-47.615,464.896 -47.615,462.057 -39.497,462.057 -39.497,458.842 -50.9,458.842 -50.9,475.264 		"/>
		<path fill="#231F20" d="M-21.292,475.264v-16.422h-3.238v7.812c0.016,0.344,0.023,0.695,0.023,1.055v0.986
			c0.016,0.297,0.023,0.524,0.023,0.679c-0.109-0.218-0.281-0.5-0.517-0.845c-0.219-0.358-0.43-0.695-0.633-1.008l-5.818-8.68
			h-3.144v16.422h3.236v-7.226c0-0.234-0.008-0.523-0.021-0.868v-1.032c0-0.36-0.008-0.688-0.023-0.986v-0.703
			c0.107,0.218,0.273,0.508,0.492,0.866c0.233,0.345,0.452,0.673,0.657,0.986l6.028,8.962H-21.292z"/>
		<path fill="#231F20" d="M-5.879,470.947c0-0.61-0.079-1.149-0.234-1.618c-0.157-0.47-0.424-0.899-0.798-1.291
			c-0.359-0.392-0.844-0.75-1.454-1.079c-0.61-0.328-1.37-0.657-2.275-0.986c-0.831-0.297-1.502-0.571-2.018-0.821
			c-0.502-0.25-0.892-0.5-1.173-0.75c-0.282-0.266-0.471-0.532-0.563-0.799c-0.095-0.282-0.142-0.593-0.142-0.937
			c0-0.329,0.056-0.634,0.163-0.916c0.126-0.297,0.313-0.555,0.565-0.773c0.266-0.22,0.601-0.392,1.008-0.518
			c0.407-0.14,0.892-0.21,1.454-0.21c0.829,0,1.541,0.133,2.136,0.399c0.608,0.25,1.211,0.626,1.805,1.126l1.174-1.431
			c-0.688-0.547-1.423-0.978-2.205-1.291c-0.766-0.313-1.696-0.469-2.791-0.469c-0.768,0-1.47,0.095-2.111,0.282
			c-0.626,0.187-1.166,0.468-1.618,0.844c-0.439,0.36-0.783,0.797-1.033,1.313c-0.25,0.518-0.376,1.104-0.376,1.76
			c0,0.594,0.078,1.118,0.235,1.572c0.172,0.453,0.438,0.868,0.798,1.244c0.376,0.358,0.86,0.703,1.454,1.032
			c0.61,0.313,1.36,0.626,2.252,0.938c0.75,0.266,1.376,0.532,1.877,0.797c0.502,0.25,0.899,0.508,1.196,0.773
			c0.313,0.266,0.532,0.555,0.658,0.868s0.187,0.657,0.187,1.033c0,0.876-0.32,1.563-0.961,2.063
			c-0.625,0.502-1.485,0.752-2.58,0.752c-0.845,0-1.628-0.181-2.346-0.54c-0.721-0.36-1.393-0.836-2.018-1.43l-1.221,1.36
			c0.657,0.657,1.454,1.205,2.394,1.642c0.952,0.422,1.994,0.634,3.12,0.634c0.859,0,1.625-0.118,2.299-0.352
			c0.672-0.234,1.244-0.555,1.711-0.96c0.469-0.408,0.821-0.892,1.056-1.455C-6.005,472.192-5.879,471.589-5.879,470.947
			L-5.879,470.947z"/>
		<polygon fill="#231F20" points="10.801,475.264 10.801,458.842 8.971,458.842 8.971,465.857 0.806,465.857 0.806,458.842 
			-1.024,458.842 -1.024,475.264 0.806,475.264 0.806,467.522 8.971,467.522 8.971,475.264 		"/>
		<rect x="16.289" y="458.842" fill="#231F20" width="1.832" height="16.422"/>
		<polygon fill="#231F20" points="33.25,460.507 33.25,458.842 23.609,458.842 23.609,475.264 25.438,475.264 25.438,467.617 
			29.943,467.617 29.943,465.95 25.438,465.95 25.438,460.507 		"/>
		<polygon fill="#231F20" points="48.008,460.507 48.008,458.842 36.512,458.842 36.512,460.507 41.344,460.507 41.344,475.264 
			43.176,475.264 43.176,460.507 		"/>
		<path fill="#231F20" d="M-41.526,488.261c-0.223,0.124-0.534,0.212-0.896,0.212c-0.649,0-1.049-0.399-1.049-1.234v-2.691h-0.665
			v-0.836h0.665v-1.331l0.896-0.479v1.809h1.155v0.836h-1.155v2.531c0,0.435,0.144,0.559,0.48,0.559
			c0.238,0,0.506-0.089,0.675-0.187L-41.526,488.261z M-45.843,486.387c-0.248-0.124-0.566-0.205-1.064-0.205
			c-0.587,0-0.959,0.268-0.959,0.693c0,0.462,0.294,0.773,0.896,0.773c0.49,0,0.916-0.303,1.128-0.596V486.387z M-45.843,488.375
			v-0.461c-0.318,0.319-0.773,0.558-1.279,0.558c-0.754,0-1.614-0.427-1.614-1.573c0-1.037,0.8-1.507,1.856-1.507
			c0.436,0,0.779,0.061,1.037,0.177v-0.346c0-0.506-0.311-0.792-0.878-0.792c-0.479,0-0.852,0.091-1.216,0.295l-0.354-0.693
			c0.443-0.275,0.94-0.419,1.597-0.419c1.039,0,1.749,0.508,1.749,1.565v3.195H-45.843z M-50.807,488.375v-2.787h-2.857v2.787
			h-0.932v-6.216h0.932v2.515h2.857v-2.515h0.934v6.216H-50.807z M-59.127,485.072c-0.204-0.275-0.63-0.61-1.092-0.61
			c-0.658,0-1.012,0.496-1.012,1.48c0,1.173,0.372,1.687,1.047,1.687c0.435,0,0.818-0.291,1.057-0.595V485.072L-59.127,485.072z
			 M-59.137,488.375v-0.443c-0.336,0.309-0.727,0.54-1.214,0.54c-1.006,0-1.796-0.727-1.796-2.503c0-1.599,0.872-2.354,1.841-2.354
			c0.471,0,0.913,0.25,1.169,0.533v-1.774l0.907-0.472v6.473H-59.137z M-64.979,484.442c-0.611,0-0.984,0.428-1.064,1.171h2.165
			C-63.921,484.976-64.223,484.442-64.979,484.442 M-62.981,486.37h-3.08c0.098,0.896,0.602,1.279,1.171,1.279
			c0.392,0,0.703-0.142,1.012-0.374l0.543,0.587c-0.409,0.39-0.897,0.612-1.607,0.612c-1.093,0-2.016-0.88-2.016-2.425
			c0-1.581,0.836-2.433,2.042-2.433c1.323,0,1.961,1.075,1.961,2.336C-62.956,486.122-62.971,486.271-62.981,486.37
			 M-69.695,483.039h-1.812v1.998h1.812c0.622,0,1.058-0.319,1.058-0.994C-68.637,483.396-69.063,483.039-69.695,483.039
			 M-69.063,485.836l1.27,2.541h-1.072l-1.237-2.46h-1.403v2.46h-0.913v-6.218h2.725c1.084,0,1.998,0.578,1.998,1.858
			C-67.697,485.011-68.22,485.624-69.063,485.836 M-78.013,490.019h-0.969l0.676-1.732l-1.715-4.572h1.004l0.762,2.281
			c0.146,0.409,0.356,1.102,0.411,1.36c0.079-0.278,0.274-0.94,0.418-1.343l0.789-2.298h0.969L-78.013,490.019z M-82.446,484.46
			c-0.435,0-0.814,0.293-1.057,0.594v1.963c0.204,0.276,0.632,0.614,1.095,0.614c0.654,0,1.011-0.498,1.011-1.482
			C-81.397,484.974-81.771,484.46-82.446,484.46 M-82.32,488.474c-0.473,0-0.915-0.248-1.173-0.533v0.435h-0.906v-6.001l0.906-0.472
			v2.255c0.338-0.309,0.728-0.54,1.216-0.54c1.004,0,1.796,0.729,1.796,2.504C-80.481,487.72-81.351,488.474-82.32,488.474"/>
		<path fill="#231F20" d="M-39.347,482.736c-0.029-0.023-0.069-0.035-0.124-0.035h-0.227v0.287h0.213
			c0.12,0,0.179-0.047,0.179-0.144C-39.306,482.797-39.32,482.762-39.347,482.736 M-39.247,483.004
			c-0.034,0.041-0.083,0.069-0.143,0.083l0.191,0.364h-0.134l-0.184-0.354h-0.183v0.354h-0.112V482.6h0.345
			c0.076,0,0.142,0.02,0.194,0.061c0.054,0.038,0.079,0.101,0.079,0.183C-39.192,482.909-39.209,482.962-39.247,483.004
			 M-38.92,482.768c-0.033-0.083-0.08-0.154-0.14-0.213c-0.059-0.058-0.13-0.104-0.211-0.136c-0.08-0.035-0.169-0.051-0.264-0.051
			c-0.092,0-0.179,0.016-0.262,0.051c-0.08,0.031-0.149,0.077-0.21,0.136c-0.06,0.06-0.106,0.131-0.143,0.213
			c-0.033,0.08-0.049,0.173-0.049,0.273c0,0.099,0.016,0.189,0.049,0.272c0.036,0.083,0.083,0.153,0.143,0.21
			c0.061,0.058,0.13,0.106,0.21,0.139c0.083,0.032,0.17,0.048,0.262,0.048c0.095,0,0.184-0.016,0.264-0.048
			c0.081-0.033,0.152-0.081,0.211-0.139c0.06-0.057,0.106-0.128,0.14-0.21c0.035-0.083,0.052-0.173,0.052-0.272
			C-38.869,482.941-38.885,482.848-38.92,482.768 M-38.822,483.354c-0.041,0.093-0.095,0.175-0.163,0.244
			c-0.069,0.065-0.15,0.118-0.244,0.156c-0.095,0.035-0.195,0.054-0.306,0.054c-0.108,0-0.208-0.02-0.303-0.054
			c-0.095-0.038-0.177-0.091-0.244-0.156c-0.069-0.069-0.124-0.151-0.163-0.244c-0.038-0.095-0.058-0.201-0.058-0.313
			c0-0.118,0.02-0.221,0.058-0.315c0.039-0.096,0.094-0.178,0.163-0.244c0.067-0.069,0.149-0.12,0.244-0.157
			c0.095-0.037,0.194-0.055,0.303-0.055c0.11,0,0.211,0.018,0.306,0.055c0.094,0.038,0.175,0.089,0.244,0.157
			c0.068,0.067,0.122,0.148,0.163,0.244c0.037,0.095,0.057,0.197,0.057,0.315C-38.765,483.153-38.785,483.26-38.822,483.354"/>
		<path fill="#221D1D" d="M51.717,459.262c-0.043-0.038-0.104-0.057-0.186-0.057h-0.346v0.441h0.326
			c0.182,0,0.271-0.075,0.271-0.221C51.783,459.353,51.764,459.297,51.717,459.262 M51.875,459.667
			c-0.055,0.061-0.129,0.104-0.219,0.127l0.289,0.553h-0.201l-0.279-0.541h-0.279v0.541h-0.17v-1.295h0.523
			c0.117,0,0.217,0.029,0.295,0.09c0.082,0.062,0.121,0.156,0.121,0.282C51.955,459.523,51.926,459.604,51.875,459.667
			 M52.371,459.307c-0.051-0.126-0.123-0.234-0.215-0.323c-0.088-0.091-0.197-0.162-0.322-0.211c-0.123-0.051-0.256-0.075-0.4-0.075
			c-0.141,0-0.273,0.024-0.396,0.075c-0.125,0.049-0.23,0.12-0.322,0.211c-0.092,0.088-0.162,0.197-0.213,0.323
			c-0.055,0.124-0.08,0.264-0.08,0.415c0,0.152,0.025,0.29,0.08,0.416c0.051,0.126,0.121,0.234,0.213,0.323
			c0.092,0.09,0.197,0.159,0.322,0.208c0.123,0.051,0.256,0.075,0.396,0.075c0.145,0,0.277-0.023,0.4-0.075
			c0.125-0.049,0.234-0.118,0.322-0.208c0.092-0.088,0.164-0.197,0.215-0.323s0.078-0.264,0.078-0.416
			C52.449,459.571,52.422,459.431,52.371,459.307 M52.52,460.203c-0.061,0.142-0.143,0.266-0.246,0.368
			c-0.107,0.105-0.229,0.184-0.373,0.238c-0.141,0.057-0.297,0.085-0.467,0.085c-0.166,0-0.32-0.028-0.465-0.085
			c-0.141-0.055-0.262-0.133-0.371-0.238c-0.102-0.102-0.186-0.226-0.244-0.368c-0.061-0.146-0.092-0.305-0.092-0.48
			c0-0.175,0.031-0.334,0.092-0.48c0.059-0.144,0.143-0.266,0.244-0.369c0.109-0.104,0.23-0.183,0.371-0.24
			c0.145-0.055,0.299-0.084,0.465-0.084c0.17,0,0.326,0.029,0.467,0.084c0.145,0.057,0.266,0.136,0.373,0.24
			c0.104,0.103,0.186,0.225,0.246,0.369c0.059,0.146,0.09,0.305,0.09,0.48C52.609,459.898,52.578,460.057,52.52,460.203"/>
	</g>
</g>
<g id="Layer_2">
</g>
<g id="Layer_4" display="none">
	<g display="inline">
		<path d="M-85.193,513.353c-3.295,0-5.483,2.655-5.483,7.425c0,4.771,2.288,7.492,5.588,7.492c3.295,0,5.478-2.654,5.478-7.426
			C-79.61,516.075-81.899,513.353-85.193,513.353 M-85.16,532.938c-6.154,0-10.359-4.5-10.359-12.094
			c0-7.587,4.272-12.16,10.432-12.16c6.116,0,10.324,4.501,10.324,12.093S-79.039,532.938-85.16,532.938"/>
		<path d="M-60.14,513.621h-5.415v6.049h5.485c2.184,0,3.362-1.009,3.362-3.061C-56.709,514.561-58.056,513.621-60.14,513.621
			 M-60.374,524.241h-5.182v8.328h-4.708v-23.516h10.291c4.439,0,8.107,2.454,8.107,7.459
			C-51.867,521.958-55.498,524.241-60.374,524.241"/>
		<polygon points="-46.994,532.567 -46.994,509.053 -30.65,509.053 -30.65,513.657 -42.289,513.657 -42.289,517.721 
			-35.529,517.721 -35.529,522.288 -42.289,522.288 -42.289,527.963 -30.145,527.963 -30.145,532.567 		"/>
		<path d="M-9.871,532.567l-8.647-12.83c-0.573-0.871-1.343-2.049-1.646-2.653c0,0.873,0.064,3.829,0.064,5.142v10.341h-4.637
			v-23.514h4.502l8.343,12.432c0.573,0.871,1.345,2.051,1.647,2.653c0-0.879-0.065-3.829-0.065-5.14v-9.947h4.638v23.514h-4.199
			V532.567z"/>
		<path d="M8.021,532.938c-3.193,0-6.053-1.381-7.9-3.258l1.746-1.949c1.783,1.713,3.836,2.823,6.258,2.823
			c3.129,0,5.08-1.544,5.08-4.031c0-2.187-1.312-3.426-5.617-4.971c-5.077-1.815-6.798-3.461-6.798-6.854
			c0-3.767,2.96-6.014,7.367-6.014c3.166,0,5.184,0.938,7.168,2.522l-1.682,2.049c-1.715-1.413-3.299-2.187-5.654-2.187
			c-3.226,0-4.574,1.612-4.574,3.46c0,1.953,0.878,3.057,5.585,4.738c5.215,1.881,6.829,3.629,6.829,7.121
			C15.828,530.085,12.934,532.938,8.021,532.938"/>
		<polygon points="35.999,532.567 35.999,521.485 24.295,521.485 24.295,532.567 21.672,532.567 21.672,509.053 24.295,509.053 
			24.295,519.098 35.999,519.098 35.999,509.053 38.623,509.053 38.623,532.567 		"/>
		<rect x="45.371" y="509.055" width="2.623" height="23.514"/>
		<polygon points="57.375,511.438 57.375,519.233 63.83,519.233 63.83,521.62 57.375,521.62 57.375,532.567 54.75,532.567 
			54.75,509.053 68.576,509.053 68.576,511.438 		"/>
		<polygon points="82.834,511.438 82.834,532.567 80.211,532.567 80.211,511.438 73.285,511.438 73.285,509.053 89.764,509.053 
			89.764,511.438 		"/>
		<path fill="#BC1C29" d="M-142.341,518.498l-7.872,2.861c0.103,1.26,0.318,2.504,0.623,3.725l7.473-2.723
			C-142.357,521.103-142.442,519.803-142.341,518.498"/>
		<path fill="#BC1C29" d="M-107.571,509.81c-0.548-1.129-1.181-2.224-1.919-3.256l-7.868,2.861c0.916,0.938,1.685,1.987,2.312,3.113
			L-107.571,509.81z"/>
		<path fill="#E22434" d="M-124.882,507.586c1.636,0.763,3.057,1.801,4.25,3.023l7.869-2.864c-2.182-3.052-5.148-5.604-8.782-7.297
			c-11.246-5.24-24.667-0.364-29.905,10.87c-1.701,3.631-2.332,7.494-2.038,11.231l7.871-2.86c0.128-1.7,0.547-3.407,1.311-5.044
			C-140.903,507.35-132.184,504.181-124.882,507.586"/>
		<path fill="#E22434" d="M-149.099,524.909l-7.475,2.717c0.688,2.719,1.88,5.309,3.516,7.607l7.853-2.851
			C-147.221,530.311-148.564,527.7-149.099,524.909"/>
		<path fill="#E22434" d="M-116.491,521.944c-0.126,1.698-0.551,3.408-1.319,5.045c-3.406,7.299-12.123,10.467-19.431,7.062
			c-1.636-0.766-3.067-1.799-4.258-3.02l-7.849,2.854c2.175,3.053,5.141,5.604,8.776,7.302c11.246,5.237,24.664,0.36,29.91-10.873
			c1.696-3.632,2.322-7.492,2.024-11.228L-116.491,521.944z"/>
		<path fill="#E22434" d="M-114.555,512.346l-7.475,2.724c1.39,2.481,2.043,5.344,1.833,8.221l7.85-2.854
			C-112.574,517.622-113.325,514.876-114.555,512.346"/>
		<path fill="#97101B" d="M-142.373,520.078c-0.019-0.524-0.012-1.051,0.032-1.58l-7.872,2.861c0.038,0.504,0.103,1.002,0.178,1.5
			L-142.373,520.078z"/>
		<path fill="#97101B" d="M-108.707,507.741c-0.25-0.4-0.507-0.8-0.781-1.187l-7.866,2.861c0.345,0.354,0.666,0.732,0.969,1.114
			L-108.707,507.741z"/>
		<path fill="#BC1C29" d="M-149.347,533.886c0.604,0.849,1.274,1.663,2,2.426l8.545-3.112c-1-0.627-1.902-1.353-2.699-2.166
			L-149.347,533.886z M-108.637,519.089l-7.854,2.856c-0.083,1.129-0.303,2.26-0.664,3.371l8.542-3.113
			C-108.547,521.159-108.559,520.119-108.637,519.089"/>
		<path d="M96.124,511.01c-0.082,0.198-0.194,0.368-0.339,0.511c-0.147,0.139-0.316,0.25-0.512,0.328
			c-0.197,0.078-0.41,0.115-0.646,0.115c-0.227,0-0.439-0.038-0.637-0.115c-0.196-0.079-0.366-0.188-0.516-0.328
			c-0.141-0.143-0.256-0.313-0.334-0.511c-0.087-0.197-0.128-0.417-0.128-0.659c0-0.241,0.041-0.461,0.128-0.657
			c0.078-0.2,0.193-0.37,0.334-0.511c0.148-0.144,0.318-0.25,0.516-0.329c0.197-0.077,0.412-0.116,0.637-0.116
			c0.236,0,0.449,0.039,0.646,0.116c0.194,0.079,0.363,0.186,0.512,0.329c0.145,0.141,0.257,0.311,0.339,0.511
			c0.081,0.196,0.122,0.417,0.122,0.657C96.246,510.593,96.205,510.813,96.124,511.01 M95.92,509.78
			c-0.073-0.175-0.17-0.323-0.296-0.444c-0.122-0.126-0.271-0.222-0.442-0.292c-0.169-0.067-0.354-0.104-0.554-0.104
			c-0.192,0-0.375,0.037-0.548,0.104c-0.168,0.07-0.315,0.166-0.438,0.292c-0.127,0.121-0.228,0.269-0.298,0.444
			c-0.072,0.173-0.109,0.361-0.109,0.571c0,0.207,0.037,0.4,0.109,0.573c0.07,0.173,0.171,0.321,0.298,0.445
			c0.124,0.123,0.272,0.217,0.438,0.286c0.174,0.072,0.354,0.104,0.548,0.104c0.198,0,0.385-0.033,0.554-0.104
			c0.172-0.069,0.321-0.164,0.442-0.286c0.126-0.124,0.224-0.272,0.296-0.445c0.074-0.173,0.107-0.364,0.107-0.573
			C96.029,510.141,95.994,509.95,95.92,509.78 M95.234,510.275c-0.072,0.086-0.172,0.143-0.297,0.174l0.399,0.763h-0.278
			l-0.384-0.746h-0.386v0.746h-0.235v-1.783h0.724c0.164,0,0.297,0.043,0.406,0.125c0.112,0.085,0.168,0.214,0.168,0.388
			C95.348,510.076,95.309,510.188,95.234,510.275 M95.02,509.717c-0.058-0.051-0.145-0.077-0.258-0.077h-0.477v0.604h0.447
			c0.252,0,0.377-0.101,0.377-0.301C95.111,509.842,95.078,509.764,95.02,509.717"/>
	</g>
</g>
<g id="Layer_3" display="none">
	
		<image display="inline" overflow="visible" width="217" height="96" xlink:href="../Desktop/Screen Shot 2013-11-19 at 4.51.37 PM.png"  transform="matrix(1 0 0 1 -145.2275 405.29)">
	</image>
</g>
</svg>
);
}
.logo a {
display: block;
width: 100%;
height: 100%;
}
*, *:before, *:after {
-moz-box-sizing: border-box;
box-sizing: border-box;
}
aside,
footer,
header,
hgroup,
section{
display: block;
}
body {
color: #404040;
font-family: "Helvetica Neue",Helvetica,"Liberation Sans",Arial,sans-serif;
font-size: 14px;
line-height: 1.4;
}
html {
font-family: sans-serif;
-ms-text-size-adjust: 100%;
-webkit-text-size-adjust: 100%;
}
ul {
margin-top: 0;
}
.container {
margin-right: auto;
margin-left: auto;
padding-left: 15px;
padding-right: 15px;
}
.container:before,
.container:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.container:after {
clear: both;
}
.row {
margin-left: -15px;
margin-right: -15px;
}
.row:before,
.row:after {
content: " ";
/* 1 */
display: table;
/* 2 */
}
.row:after {
clear: both;
}
.col-sm-6, .col-md-6, .col-xs-12 {
position: relative;
min-height: 1px;
padding-left: 15px;
padding-right: 15px;
}
.col-xs-12 {
width: 100%;
}
@media (min-width: 768px) {
.container {
width: 750px;
}
.col-sm-6 {
float: left;
}
.col-sm-6 {
width: 50%;
}
}
@media (min-width: 992px) {
.container {
width: 970px;
}
.col-md-6 {
float: left;
}
.col-md-6 {
width: 50%;
}
}
@media (min-width: 1200px) {
.container {
width: 1170px;
}
}
a {
color: #069;
text-decoration: none;
}
a:hover {
color: #EA0011;
text-decoration: underline;
}
hgroup {
margin-top: 50px;
}
footer {
margin: 50px 0 25px;
font-size: 11px;
}
h1, h2, h3 {
color: #000;
line-height: 1.38em;
margin: 1.5em 0 .3em;
}
h1 {
font-size: 25px;
font-weight: 300;
border-bottom: 1px solid #fff;
margin-bottom: .5em;
}
h1:after {
content: "";
display: block;
width: 100%;
height: 1px;
background-color: #ddd;
}
h2 {
font-size: 19px;
font-weight: 400;
}
h3 {
font-size: 15px;
font-weight: 400;
margin: 0 0 .3em;
}
p {
margin: 0 0 2em;
}
p + h2 {
margin-top: 2em;
}
html {
background: #f5f5f5;
height: 100%;
}
code {
background-color: white;
border: 1px solid #ccc;
padding: 1px 5px;
color: #888;
}
pre {
display: block;
padding: 13.333px 20px;
margin: 0 0 20px;
font-size: 13px;
line-height: 1.4;
background-color: #fff;
border-left: 2px solid rgba(120,120,120,0.35);
white-space: pre;
white-space: pre-wrap;
word-break: normal;
word-wrap: break-word;
overflow: auto;
font-family: Menlo,Monaco,"Liberation Mono",Consolas,monospace !important;
}
</style>
</head>
<body>
<section class='container'>
<hgroup>
<h1>Welcome to your Python application on OpenShift</h1>
</hgroup>
<div class="row">
<section class='col-xs-12 col-sm-6 col-md-6'>
<section>
<h2>Deploying code changes</h2>
<p>OpenShift uses A <a href="http://git-scm.com/">Git version control system</a> for your source code, and grants you access to it via the Secure Shell (SSH) protocol. In order to upload and download code to your application you need to give us your <a href="https://developers.openshift.com/en/managing-remote-connection.html">public SSH key</a>. You can upload it within the web console or install the <a href="https://developers.openshift.com/en/managing-client-tools.html">RHC command line tool</a> and run <code>rhc setup</code> to generate and upload your key automatically.</p>
<h3>Working in your local Git repository</h3>
<p>If you created your application from the command line and uploaded your SSH key, rhc will automatically download a copy of that source code repository (Git calls this 'cloning') to your local system.</p>
<p>If you created the application from the web console, you'll need to manually clone the repository to your local system. Copy the application's source code Git URL and then run:</p>
<pre>$ git clone <git_url> <directory_to_create>
# Within your project directory
# Commit your changes and push to OpenShift
$ git commit -a -m 'Some commit message'
$ git push</pre>
<ul>
<li><a href="https://developers.openshift.com/en/managing-modifying-applications.html">Learn more about deploying and building your application</a></li>
<li>See the README file in your local application Git repository for more information on the options for deploying applications.</li>
</ul>
</section>
</section>
<section class="col-xs-12 col-sm-6 col-md-6">
<h2>Managing your application</h2>
<h3>Web Console</h3>
<p>You can use the OpenShift web console to enable additional capabilities via cartridges, add collaborator access authorizations, designate custom domain aliases, and manage domain memberships.</p>
<h3>Command Line Tools</h3>
<p>Installing the <a href="https://developers.openshift.com/en/managing-client-tools.html">OpenShift RHC client tools</a> allows you complete control of your cloud environment. Read more on how to manage your application from the command line in our <a href="https://www.openshift.com/user-guide">User Guide</a>.
</p>
<h2>Development Resources</h2>
<ul>
<li><a href="https://developers.openshift.com/en/python-overview.html">Getting Started with Python on OpenShift</a></li>
<li><a href="https://developers.openshift.com">Developer Center</a></li>
<li><a href="https://www.openshift.com/user-guide">User Guide</a></li>
<li><a href="https://help.openshift.com">Help Center</a></li>
<li><a href="http://stackoverflow.com/questions/tagged/openshift">Stack Overflow questions for OpenShift</a></li>
<li><a href="http://git-scm.com/documentation">Git documentation</a></li>
</ul>
</section>
</div>
<footer>
<div class="logo"><a href="https://www.openshift.com/"></a></div>
</footer>
</section>
</body>
</html>'''
response_body = response_body.encode('utf-8')
status = '200 OK'
response_headers = [('Content-Type', ctype), ('Content-Length', str(len(response_body)))]
#
start_response(status, response_headers)
return [response_body ]
#
# Below for testing only
#
if __name__ == '__main__':
from wsgiref.simple_server import make_server
httpd = make_server('localhost', 8051, application)
# Wait for a single request, serve it and quit.
httpd.handle_request()
| [
"devmwheeler@live.com"
] | devmwheeler@live.com |
1d6ae632a35692b47fe5e5803717396272bfc1bd | ba84b4776efbfd114be6e489e206c61bcc93cf1d | /ScoreChanger.py | bcee5df8c7cda74ef7a3328cc951dd1ab5fdc825 | [] | no_license | ReiraH/Pinball-Machine | 5ad94267e3d4eb642aa03e4d4606e05cc6417431 | c4baee924bb8655a1464f6eebd0df0887bf95615 | refs/heads/master | 2020-03-21T05:20:39.219845 | 2018-06-21T11:03:06 | 2018-06-21T11:03:06 | 138,156,415 | 0 | 0 | null | 2018-06-21T11:03:07 | 2018-06-21T10:35:23 | null | UTF-8 | Python | false | false | 8,332 | py | import RPi.GPIO as GPIO
from time import sleep
class ScoreChanger(object):
HIGH = 0
LOW = 1
digitOnes = 24
digitTens = 23
digitHundreds = 15
digitThousands = 18
A = 0
B = 0
C = 0
D = 0
active = False
GPIO.setmode(GPIO.BCM)
GPIO.setup(digitOnes, GPIO.OUT)
GPIO.output(digitOnes, LOW)
GPIO.setup(digitTens, GPIO.OUT)
GPIO.output(digitTens, LOW)
GPIO.setup(digitHundreds, GPIO.OUT)
GPIO.output(digitHundreds, LOW)
GPIO.setup(digitThousands, GPIO.OUT)
GPIO.output(digitThousands, LOW)
print "HI I AM A SCORECHANGER!!!!"
state = 0
coilActive = False
timeEnabled = 0.0
maxTimeEnabled = 0.07
def changeScore(self,score, deltaTime):
if self.state == 0:
inputString = str(score)
while(inputString.__len__() != 4):
inputString = "0" + inputString
ScoreArray = list(inputString)
self.newA = int(ScoreArray[0])
self.atemp = self.newA
self.newB = int(ScoreArray[1])
self.btemp = self.newB
self.newC = int(ScoreArray[2])
self.ctemp = self.newC
self.newD = int(ScoreArray[3])
self.dtemp = self.newD
print str(self.newD)
if self.newA < self.A:
self.newA += 10
if self.newB < self.B:
self.newB += 10
if self.newC < self.C:
self.newC += 10
if self.newD < self.D:
self.newD += 10
self.state = 1
elif self.state == 1:
if self.coilActive == False:
if self.newA > self.A:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitThousands, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newA-=1
else:
self.state = 2
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitThousands, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 2:
if self.coilActive == False:
if self.newB > self.B:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitHundreds, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newB-=1
else:
self.state = 3
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitHundreds, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 3:
if self.coilActive == False:
if self.newC > self.C:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitTens, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newC-=1
else:
self.state = 4
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitTens, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 4:
if self.coilActive == False:
if self.newD > self.D:
self.timeEnabled+=deltaTime
if(self.timeEnabled>self.maxTimeEnabled):
GPIO.output(self.digitOnes, self.HIGH)
self.coilActive = True
self.timeEnabled = 0.0
self.newD-=1
else:
self.state = 5
else:
self.timeEnabled += deltaTime
if self.timeEnabled > self.maxTimeEnabled:
GPIO.output(self.digitOnes, self.LOW)
self.coilActive = False
self.timeEnabled = 0
elif self.state == 5:
self.A = self.atemp
self.B = self.btemp
self.C = self.ctemp
self.D = self.dtemp
self.state = 0
def changeScoreOld(self,score):
if self.active == False:
self.active = True
print "Program started"
print "set input function"
inputString = str(score)
while(inputString.__len__() != 4):
inputString = "0" + inputString
ScoreArray = list(inputString)
newA = int(ScoreArray[0])
atemp = newA
newB = int(ScoreArray[1])
btemp = newB
newC = int(ScoreArray[2])
ctemp = newC
newD = int(ScoreArray[3])
dtemp = newD
print str(newD)
if newA < self.A:
newA += 10
if newB < self.B:
newB += 10
if newC < self.C:
newC += 10
if newD < self.D:
newD += 10
print "HI I AM A SCORECHANGER!!!! Score: "+ inputString + "Last Score: " + str(self.A)+ str(self.B)+ str(self.C)+ str(self.D)
while(newA > self.A):
GPIO.output(self.digitThousands, self.HIGH)
sleep(0.15)
GPIO.output(self.digitThousands, self.LOW)
sleep(0.15)
newA-=1
while(newB > self.B):
GPIO.output(self.digitHundreds, self.HIGH)
sleep(0.15)
GPIO.output(self.digitHundreds, self.LOW)
sleep(0.15)
newB-=1
while(newC > self.C):
GPIO.output(self.digitTens, self.HIGH)
sleep(0.15)
GPIO.output(self.digitTens, self.LOW)
sleep(0.15)
newC-=1
while(newD > self.D):
GPIO.output(self.digitOnes, self.HIGH)
sleep(0.15)
GPIO.output(self.digitOnes, self.LOW)
sleep(0.15)
newD-=1
self.A = atemp
self.B = btemp
self.C = ctemp
self.D = dtemp
self.active = False
def resetScoreReels(self):
oneAmount = 10 - self.D
tenAmount = 10 - self.C
hundredAmount = 10 - self.B
thousandAmount = 10 - self.A
if oneAmount != 10:
for ones in range(0,oneAmount):
GPIO.output(digitOnes, HIGH)
sleep(0.1)
GPIO.output(digitOnes, LOW)
sleep(0.1)
if tenAmount != 10:
for tens in range(0,tenAmount):
GPIO.output(digitTens, HIGH)
sleep(0.1)
GPIO.output(digitTens, LOW)
sleep(0.1)
if hundredAmount != 10:
for hundreds in range(0,hundredAmount):
GPIO.output(digitHundreds, HIGH)
sleep(0.1)
GPIO.output(digitHundreds, LOW)
sleep(0.1)
if thousandAmount != 10:
for thousands in range(0,thousandAmount):
GPIO.output(digitThousands, HIGH)
sleep(0.1)
GPIO.output(digitThousands, LOW)
sleep(0.1)
| [
"noreply@github.com"
] | ReiraH.noreply@github.com |
051bf23137383141aa82658c92056367cacb34f9 | d5c159e43758e5bee418a75cbb856ff2bbd9e285 | /bitcoinexp/routing.py | 586038d988de0a21eb789a7c4e7609f61940d059 | [] | no_license | okcdbu/bitcoinexperiment | b2b1ab3f54de12fb215be890cf6f4d587bcaa146 | 46af6018210fddc64464a4a867540efc894b5b01 | refs/heads/master | 2023-05-24T06:33:43.703070 | 2021-06-08T08:11:19 | 2021-06-08T08:11:19 | 350,988,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | from flask import Flask, render_template
from bitcoinexp.trading import get_chart_data, run
from flask_socketio import SocketIO
import threading
app = Flask(__name__)
socketio = SocketIO(app)
thread_lock = threading.Lock()
@app.route("/")
@app.route("/chart")
def chart_visualization():
return render_template('chart.html')
@socketio.on("connect")
def init_data():
data = get_chart_data("BTC")
jsondata = data.to_json(orient='records') # get json data like {{open,high,low,close,date},...}
worker = threading.Thread(target=run, args=(socketio,))
worker.start()
socketio.emit('response', jsondata)
| [
"okcdbu@gmail.com"
] | okcdbu@gmail.com |
f23488ded619c675fe870811001ad1b85b57c931 | 4eaf9f8ef3eb2addf6a4fb0a6bc4f41b8584bbc6 | /Week10/src/button.py | 626572ee8c47c89608c588dd40fe26a8514f7b33 | [
"MIT"
] | permissive | Kids-Hack-Labs/Winter2021 | 3d6afd99ae0c77ae7a9767d08c6f89b9e92da34e | 4c66d5cf05045d2724db2393a0c2c581f314f903 | refs/heads/main | 2023-04-01T13:45:45.200124 | 2021-04-07T04:32:14 | 2021-04-07T04:32:14 | 329,418,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,819 | py | from pygame import Color, Rect, Surface
import pygame.mouse as pm
from src.text_generator import TextGenerator
class Button():
STATES = ("NONE","OUT","HOVER","DOWN","UP")
def __init__(self, button_text, text_info, button_info, func):
self.colours = {Button.STATES[1]:button_info["out"],
Button.STATES[2]:button_info["hover"],
Button.STATES[3]:button_info["down"],
Button.STATES[4]:button_info["up"]}
self.rect = Rect(button_info["rect"])
self.surf = Surface(self.rect.size)
self.text_surf = TextGenerator.generate_text(button_text, text_info, None)
self.text_rect = self.text_surf.get_rect()
self.text_rect.center = (self.rect.width/2, self.rect.height/2)
self.on_click = func
self.current_state = Button.STATES[1]
self.previous_state = Button.STATES[1]
self.active = True
def update(self, delta):
if self.active:
self.current_state = self.check_states()
if self.previous_state == Button.STATES[3] and\
self.current_state == Button.STATES[2]:
self.on_click()
self.previous_state = self.current_state
def render(self,target):
self.surf.fill(self.colours[self.current_state])
self.surf.blit(self.text_surf, self.text_rect)
target.blit(self.surf, self.rect)
def check_states(self):
mouse_pos = pm.get_pos()
mouse_buttons = pm.get_pressed()
if not self.rect.collidepoint(mouse_pos):
return Button.STATES[1]
else:
if not mouse_buttons[0]:
return Button.STATES[2]
else:
return Button.STATES[3]
def deactivate(self):
self.active = False
| [
"hercules.diascampos@kidshacklabs.com"
] | hercules.diascampos@kidshacklabs.com |
ab9064ed0cf5cdd9c40ea7d1980c735a9bd402c3 | ed98cf758a1aebb7a4415502a3672dcd5d480f91 | /app/email.py | 24033cf66ce2aa9d49a15899dba09de47e85f155 | [
"MIT"
] | permissive | eclectic-coding/microblog | 541a4e7c187def2a3511b8d7fc69cddb7e3e3b51 | 7193bb04d3073bb918aeb1e437fd72869555c467 | refs/heads/main | 2023-04-28T01:56:32.835458 | 2021-05-16T18:05:53 | 2021-05-16T18:05:53 | 356,620,088 | 0 | 0 | MIT | 2021-05-16T18:05:54 | 2021-04-10T15:20:27 | Python | UTF-8 | Python | false | false | 940 | py | from threading import Thread
from flask import render_template
from flask_mail import Message
from app import app, mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start()
def send_password_reset_email(user):
token = user.get_reset_password_token()
send_email('[Microblog] Reset Your Password',
sender=app.config['ADMINS'][0],
recipients=[user.email],
text_body=render_template('email/reset_password.txt',
user=user, token=token),
html_body=render_template('email/reset_password.html',
user=user, token=token)) | [
"noreply@github.com"
] | eclectic-coding.noreply@github.com |
066a5edb911a9b5069125b1aee9dfad1bbc78dbb | 7d74195bd00cbe8516670c8fe718e983106c9830 | /src/data_types/test_collections_ordereddict.py | ee4fe8c69fee1eec3bc707d6f7b10d39022930d8 | [] | no_license | masa4u/example_python | 7ab3d48020855ad493336afcd8d0c02eb3104b2b | 7bdee4cb8e90255b20353f7f95d3e879f6462638 | refs/heads/master | 2021-01-18T14:10:56.539659 | 2017-03-28T12:52:08 | 2017-03-28T12:52:08 | 30,511,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | d = {'banana': 3, 'apple':4, 'pear': 1, 'orange': 2}
from collections import OrderedDict
print OrderedDict(sorted(d.items(), key=lambda t:t[0]))
| [
"masa4u@gmail.com"
] | masa4u@gmail.com |
2ec70de8b0fa6c526ab26722c4d947d9f7a07da4 | 241c347e5842c19bb298b8422a4bc68e64350d66 | /machine_learner.py | 7fa09bca40979c59a871e5e4fa1155713a8286a7 | [] | no_license | ThePianoDentist/dota_talent_stats | 92956c43356ea8e8d752c15f1294978eff026545 | e2c3d1cec51d1e8b426c804f0331ee1221e3208b | refs/heads/master | 2021-01-23T03:43:12.700928 | 2017-09-29T12:49:54 | 2017-09-29T12:49:54 | 86,113,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,309 | py | import random
from keras.models import Sequential
from keras.layers import Dense
import numpy
import itertools
seed = 7 # random seed fixed so can reproduce things
numpy.random.seed(seed)
# TODO abstract model stuff away so can literally just give our hero id, and team and enemy ids.
# TODO i.e dont hardcode these numpy.zeros(230) everywhere
class Model:
def __init__(self, inputs, outputs, model, alpha, test_inputs, test_outputs):
self.model = model
self.inputs = inputs
self.outputs = outputs
self.ignoreHeroes = False
self.alpha = alpha # for http://stats.stackexchange.com/a/136542
self.test_inputs = test_inputs
self.test_outputs = test_outputs
def _net_predict(self, input_):
if self.ignoreHeroes:
input_ = input_[-4:]
return self.model.predict(numpy.array([input_]))
@property
def neuron_upper_limit(self):
# TODO assumes only 1 output field
upper_limit = len(self.inputs) / (self.alpha * (len(self.inputs[0]) + 1))
return upper_limit
def evaluate(self):
scores = self.model.evaluate(self.inputs, self.outputs)
# print("Evaluation: \n")
# print(scores)
# print("%s: %.2f%%" % (self.model.metrics_names[1], scores[1] * 100))
def predict(self, our_hero, friendly_heroes, enemy_heroes):
inputs = numpy.empty(230)
inputs.fill(-1.0)
for h in friendly_heroes:
inputs[DiscreteHeroModel.hero_id_to_index(h, our_hero.id, True)] = 1.0
for h in enemy_heroes:
inputs[DiscreteHeroModel.hero_id_to_index(h, our_hero.id, False)] = 1.0
skill_trees = [list(i) for i in itertools.product([-1.0, 1.0], repeat=4)]
for sk_tree in skill_trees:
temp_inputs = inputs
temp_inputs[-4:] = sk_tree
prediction = self._net_predict(temp_inputs)
rounded = [round(x[0], 4) for x in prediction]
print("\nSkill tree:")
print(temp_inputs[-4:])
print("\nPrediction: ")
print(rounded)
def test(self):
# TODO whats the best way to measure accuracy?
# do i need to be checking std_devs of inaccuracies as well?
inaccuracy = 0.0
actual_out_sum = predicted_out_sum = 0.0
for i, input_ in enumerate(self.test_inputs):
predicted_out = self._net_predict(input_)[0]
actual_out = self.test_outputs[i]
inaccuracy += abs(actual_out - predicted_out)
predicted_out_sum += predicted_out
actual_out_sum += actual_out
#inaccuracy /= len(self.test_outputs)
inaccuracy = abs(actual_out_sum - predicted_out_sum) / len(self.test_inputs)
print("Actual winrate: ", actual_out_sum/ len(self.test_inputs))
print("Predicted winrate: ", predicted_out_sum / len(self.test_inputs))
return inaccuracy
class SimpleModel(Model):
pass
class RandomForestDeicisonTreeModel(Model):
"does the 100 or so branches for each choice make this kind of hard? / poor performance?"
"could do same thing and turn it into binary choices to choose a hero or not"
"but just trading width for height"
pass
class DiscreteHeroModel(Model):
def __init__(self, inputs, outputs, alpha=2, test_inputs=None, test_outputs=None, ignore_heroes=False):
"""
:param inputs: the discrete representations of possible heros
- plus the 4 talent choices
- 0.5 represents never chose that talent
:param outputs: 1 for win. 0 for loss :)
"""
self.ignoreHeroes = ignore_heroes
# TODO tidy how inheritance occurring. how consturctors behave. this is messy
if self.ignoreHeroes:
self.inputs = numpy.array([inp[-4:] for inp in inputs])
self.test_inputs = numpy.array([inp[-4:] for inp in test_inputs])
dimension = 4
else:
self.inputs = numpy.array(inputs)
self.test_inputs = numpy.array(test_inputs)
dimension = 230
self.outputs = numpy.array(outputs)
self.test_outputs = numpy.array(test_outputs)
self.model = Sequential()
# TODO 80, 40, 72000. whats a number ¯\_(ツ)_/¯
self.model.add(Dense(115, input_dim=dimension, init='uniform', activation='relu'))
#self.model.add(Dense(260, input_dim=230, init='uniform', activation='relu'))
# self.model.add(Dense(133, init='uniform', activation='relu'))
# self.model.add(Dense(8, init='uniform', activation='relu'))
self.model.add(Dense(1, init='uniform', activation='sigmoid'))
# print(len(self.inputs))
# print(len(self.outputs))
self.model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model.fit(self.inputs, self.outputs, epochs=150, batch_size=100)#, verbose=0)
super().__init__(self.inputs, self.outputs, self.model, alpha, self.test_inputs, self.test_outputs)
@staticmethod
def hero_id_to_index(hero_id, our_hero_id, friendly):
start = 0 if friendly else 113
if hero_id < our_hero_id:
return start + hero_id - 1 # hero_ids start at 1, not 0
else:
return start + hero_id - 2 # we 'jump over' our_hero in the array
class DecomposedHeroModel(Model):
pass
class Net:
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
# inputs
# 4 friendly team-mates
# our hero
# 5 enemies
#
# ouput w/l
# hmmmmmmmmmmmmmm
# so the input arent numerical values where differences have meaning...they're just ids
# this isnt really a machine learning problem?
# this is more, we have different estimates with different errors
# how to combine to make most accurate guess :/
# as in we may have a game with these exact heroes and won it. but that 100% is less reliable
# than 1000s of games with a few hero matches with maybe 60% winrate
# so standard error = standard deviation / sqrt(sample size)
model = Sequential()
# random note: rectifier funcs over sigmoids > performance (dont do for output layer) | [
"jbknight07@gmail.com"
] | jbknight07@gmail.com |
67539a56c45da689a06a5d0dbec167da20875c44 | 0f85c7bfd4f29bcd856adc316cecc097fda744dc | /tests/test_ensure_db_indexes.py | b76b5876506cd87e0fd1691da623de883de60b0f | [
"MIT"
] | permissive | yandex/yandex-taxi-testsuite | 260f46731c9888a9efcc3372c3d92329f2fb4d56 | 8befda8c13ef58d83b2ea7d0444e34de0f67ac7f | refs/heads/develop | 2023-08-31T23:28:31.874786 | 2023-08-14T16:00:53 | 2023-08-14T16:00:53 | 244,937,107 | 150 | 41 | MIT | 2023-09-13T16:34:07 | 2020-03-04T15:35:09 | Python | UTF-8 | Python | false | false | 2,916 | py | import pymongo
import pytest
from testsuite.databases.mongo import ensure_db_indexes
@pytest.fixture(scope='session')
def mongodb_collections():
return ['sharded_collection']
@pytest.mark.parametrize(
'index_from_yaml, arg_and_kwargs',
[
({'key': 'field'}, ('field', {'background': True})),
(
{'key': 'field', 'background': False},
('field', {'background': False}),
),
(
{
'key': 'field',
'expireAfterSeconds': 2592000,
'sparse': True,
'unique': True,
'name': 'name',
},
(
'field',
{
'expireAfterSeconds': 2592000,
'sparse': True,
'unique': True,
'name': 'name',
'background': True,
},
),
),
(
{
'key': [
{'name': 'field', 'type': 'ascending'},
{'name': 'field_2', 'type': 'descending'},
{'name': 'field_3', 'type': '2d'},
{'name': 'field_4', 'type': '2dsphere'},
{'name': 'field_5', 'type': 'hashed'},
{'name': 'field_6', 'type': 'ascending'},
{'name': 'field_7', 'type': 'text'},
],
},
(
[
('field', pymongo.ASCENDING),
('field_2', pymongo.DESCENDING),
('field_3', pymongo.GEO2D),
('field_4', pymongo.GEOSPHERE),
('field_5', pymongo.HASHED),
('field_6', pymongo.ASCENDING),
('field_7', pymongo.TEXT),
],
{'background': True},
),
),
(
{
'key': 'field',
'partialFilterExpression': {
'is_added_to_balance': {'$eq': 'holded'},
},
},
(
'field',
{
'partialFilterExpression': {
'is_added_to_balance': {'$eq': 'holded'},
},
'background': True,
},
),
),
],
)
def test_arg_and_kwargs_generation(index_from_yaml, arg_and_kwargs):
# pylint: disable=protected-access
assert (
ensure_db_indexes._get_args_for_ensure_func(index_from_yaml)
== arg_and_kwargs
)
def test_sharded_collection(mongodb, pytestconfig):
if not pytestconfig.option.no_sharding:
return
mongodb.sharded_collection.insert({'_id': 'foo', '_shard_id': 0})
with pytest.raises(pymongo.errors.WriteError):
mongodb.sharded_collection.insert({'_id': 'bar'})
| [
"vitja@yandex-team.ru"
] | vitja@yandex-team.ru |
10a39221f5994440bcf13c5a105678bdd1ad321e | 08f60e7f496e76a4c6d5d8f6b671eb65fe7f4c7e | /env/Scripts/rst2man.py | cf0ea6a096d96e11d05be44d0d3c7949c0e96b1a | [] | permissive | Cell5/nfckey | dca892a0d647a3594fbb9af00615e388a8b54758 | 15a052e4877ad8eb4d71de3c92b2285e3e7d9d57 | refs/heads/master | 2022-11-27T03:45:29.944031 | 2018-11-16T09:38:01 | 2018-11-16T09:38:01 | 156,221,618 | 0 | 1 | BSD-3-Clause | 2022-11-19T01:38:13 | 2018-11-05T13:23:52 | JavaScript | UTF-8 | Python | false | false | 629 | py | #!c:\xampp\htdocs\nfckey\env\scripts\python.exe
# Author:
# Contact: grubert@users.sf.net
# Copyright: This module has been placed in the public domain.
"""
man.py
======
This module provides a simple command line interface that uses the
man page writer to output from ReStructuredText source.
"""
import locale
try:
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
from docutils.writers import manpage
description = ("Generates plain unix manual documents. " + default_description)
publish_cmdline(writer=manpage.Writer(), description=description)
| [
"exride@gmail.com"
] | exride@gmail.com |
60c21ecdefa93da86c1761960a9774855f951f81 | fab44b6672152764ad965291d645223ccbe6186a | /Undergrad_research(Machine Learning) Project/Machine Learning_undergraduate research project/lab2-part1/debugging2.py | 4e814c85f717f68d4f752899d62cf392491012d2 | [] | no_license | AndanteKim/AP_Archive | 45149c410dcdc8d4f2cd64422091de00f451f34b | bcec25375edc5c2f44598bd9f48a6de49e108d35 | refs/heads/master | 2023-02-23T20:33:08.650315 | 2021-01-28T23:51:23 | 2021-01-28T23:51:23 | 276,733,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | #!/usr/bin/env python
# This script is full of common errors you're likely to run into.
# To fix it, you need to debug it. Look at the error messages, use print
# statements, and trace your code by hand on paper to find and fix the bugs.
# This scripts calculates the fibonacci sequence in four different ways.
# Be sure to read the description at the top of each function.
# The goal is not to change the way in which the code is written but to find
# all the semantic and syntax errors.
#----------------
import numpy
# This function prints the first n numbers of the fibonacci sequence
#def print_n_fibonacci(n):
# a = 1.
# b = 1.
# print a
# print b
# counter = 2
# for i in range(n):
# newa = b
# b = a+b
# a = newa
# print b
# counter +=1
# print 'This function requested ', n, 'numbers and printed ',counter,'numbers'
#print 'output for print_n_fibonacc where n =',10,':'
#print_n_fibonacci(10)
#print
# This function prints the fibonacci sequence up to the number 610
#def print_fibonacci_upto610() :
# a,b = 1.,1.
# print a
# print b
# while b < 610:
# a,b = b,a+b
# print b
#print 'output for print_fibonacci_upto610:'
#print_fibonacci_upto610()
#print
# This function creates a list which contains the first n numbers of the
# fibonacci sequence and returns this list
#def create_fibonacci_list_uptoN(n):
# fibonacci = [1.,1.]
# for i in range(n):
# fibonacci.append(fibonacci[i]+fibonacci[i+1])
# return fibonacci
#print 'list return from create_fibonacci_list_uptoN where n =',10,':'
#fib = create_fibonacci_list_uptoN(10)
#print fib
#print 'The length of the returned list is', len(fib)
#print
# This function creates a numpy array which contais the fibonacci sequence
# up to the number 610
def create_fibonacci_array_upto610():
counter = 1
fibonacci = numpy.array([1.,1.])
while fibonacci[counter] < 610. :
fibonacci = numpy.append(fibonacci, fibonacci[counter-1] + fibonacci[counter])
counter += 1
return fibonacci
print 'array return from create_fibonacci_array_upto610:'
fib = create_fibonacci_array_upto610()
print fib
| [
"54167881+AndanteKim@users.noreply.github.com"
] | 54167881+AndanteKim@users.noreply.github.com |
b7ba80089f455b58d92760039c26578e86a680f3 | 3b380acf42684aaaa3201c241456e43920a40c1d | /paradeground/units/__init__.py | 19c57c101368292a30f2e9093d01635cbcbbd3f7 | [] | no_license | warp-one/electron | 484245c45a7947f5bbe3b87020b62df74eb884ca | 0147b3ff2e6320147562161ec2c9edea784b4015 | refs/heads/master | 2021-01-24T03:18:24.758276 | 2016-09-28T19:22:11 | 2016-09-28T19:22:11 | 41,637,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,473 | py | from math import sqrt, pi, sin, cos, tan, degrees
from random import randint
import pyglet
from tools import *
from units.behavior import *
from units.behavior import states
from selection import selectiontriangle as st
import settings
class Status(object):
name = "Buff"
def __init__(self, unit):
self.unit = unit
self.active = False
def trigger(self):
pass
def update(self, dt):
pass
class Speed(Status):
name = "Speed"
def __init__(self, unit, max_speed=600, acceleration=20, speed_bonus=30):
super(Speed, self).__init__(unit)
self.deceleration = acceleration
self.max_speed = 600
self.zones = set()
self.speed_bonus = speed_bonus
def trigger(self, zone):
self.zones.add(zone)
def deactivate(self, zone):
return
#self.zones.discard(zone)
def update(self, dt):
active = False
if self.zones:
max_speed = min([max([z.top_speed for z in self.zones]), self.unit.MAX_SPEED])
acceleration = max([z.acceleration for z in self.zones])
active = True
else:
max_speed = self.max_speed
speed_normal = (self.unit.current_speed - self.unit.BASE_SPEED)/(max_speed - self.unit.BASE_SPEED)
if active:
if self.unit.current_speed < max_speed:
self.unit.current_speed += min(acceleration, max_speed - self.unit.current_speed)
self.unit.flat_poly.colors = [self.unit.color[i%3] + int((255 - x)*speed_normal) if not randint(0, 5) else self.unit.color[i%3] for i, x in enumerate(self.unit.flat_poly.colors)]
else:
if self.unit.current_speed > self.unit.BASE_SPEED:
inactive_cap = max_speed - self.speed_bonus
if self.unit.current_speed > inactive_cap:
self.unit.current_speed = inactive_cap
else:
self.unit.current_speed -= min(self.deceleration/16, self.unit.current_speed - self.unit.BASE_SPEED)
self.unit.flat_poly.colors = [self.unit.color[i%3] + int((255 - x)*speed_normal) if not randint(0, 5) else int(self.unit.color[i%3]) for i, x in enumerate(self.unit.flat_poly.colors)]
else:
self.unit.flat_poly.colors = [int(self.unit.color[i%3]*.69) for i, x in enumerate(self.unit.flat_poly.colors)]
self.zones.clear()
class BasicUnit(pyglet.sprite.Sprite):
ROTATION_RATE = 1 * pi/180 # radians = degrees * pi/180
size = 32
radius = size/2
w = size
h = size
BASE_SPEED = 300.0 # pixels per frame
MAX_SPEED = 600.0
solid = True
image_factor = 1
selection_scale = 2 * image_factor
immobile = False
def __init__(self, team=None, *args, **kwargs):
super(BasicUnit, self).__init__(*args, **kwargs)
self.team = team
self.name = None
self.id = 0
# grid
self.prev = None
self.next = None
self.graphics = []
self.group = settings.FOREGROUND
self.sgroup = settings.MIDGROUND
self.rotate_tick = .1 #1 * pi/180.
self.rotation = 0
self.velocity = 0.
self.selectable = False
self.selected = False
self.selection_indicator = None
self.selection_rotation = 0
self.current_speed = self.BASE_SPEED
self.statuses = {}
def select(self):
if self.selectable and not self.is_selected():
self.selected = True
self.selection_indicator = st.SelectionTriangle(self)
self.graphics.append(self.selection_indicator.graphic)
def deselect(self):
if self.is_selected():
self.selected = False
if self.selection_indicator:
self.graphics.remove(self.selection_indicator.graphic)
self.selection_indicator.graphic.delete()
self.selection_indicator = None
def is_selected(self):
if self.selected:
return True
else:
return False
def suicide(self):
#self.spawn_death_animation()
for g in self.graphics:
g.delete()
self.delete()
def update(self, dt):
self.rotation -= .01
while self.rotation < 0:
self.rotation += 360
for s in self.statuses:
self.statuses[s].update(dt)
self.velocity = self.current_speed * dt
self.tick_graphics(dt)
def get_location(self):
return self.x, self.y
def tick_selection_rotation(self):
self.selection_rotation += self.ROTATION_RATE
def init_graphics(self):
pass
def tick_graphics(self, dt):
if self.selection_indicator:
self.selection_indicator.update(dt)
self.tick_selection_rotation()
def handle_collision(self, collider):
return self.solid
class ActiveUnit(BasicUnit):
def __init__(self, *args, **kwargs):
super(ActiveUnit, self).__init__(*args, **kwargs)
self.current_destination = (0, 0)
self.dx, self.dy = 0, 0
self.old_x, self.old_y = 0, 0
def move(self, dx, dy):
self.dx, self.dy = dx, dy
self.old_x, self.old_y = self.x, self.y
def rotate(self, dx, dy):
position = self.old_x, self.old_y
mark = self.x + dx, self.y + dy
# heading = get_angle_in_radians(position, mark)
# self.rotation = heading
def arrive(self):
self.current_destination = (0, 0)
self.brain.set_state("idleing")
self.stop()
self.leash_point = self.get_location()
def stop(self):
self.dx, self.dy = 0, 0
def receive_command(self, target, command=None, origin=(0, 0)):
if command == "MOVE":
x = target[0] + self.x - origin[0]
y = target[1] + self.y - origin[1]
self.current_destination = (x, y)
self.brain.set_state("movecommand")
elif command == "STOP":
self.current_destination = self.x, self.y
self.stop()
self.brain.set_state("idleing")
else:
self.current_destination = target
self.brain.set_state("movecommand")
def update(self, dt):
super(ActiveUnit, self).update(dt)
class ThinkingUnit(ActiveUnit):
def __init__(self, *args, **kwargs):
super(ThinkingUnit, self).__init__(*args, **kwargs)
self.brain = StateMachine()
self.leash_point = (0, 0)
self.alert_range = 200
self.target = None
self.wait_count = 0
idleing_state = states.UnitStateIdleing(self)
chasing_state = states.UnitStateChasing(self)
waiting_state = states.UnitStateWaiting(self)
command_state = states.UnitStateMoveCommand(self)
self.brain.add_state(idleing_state)
self.brain.add_state(chasing_state)
self.brain.add_state(waiting_state)
self.brain.add_state(command_state)
self.brain.set_state("idleing")
def update(self, dt):
super(ThinkingUnit, self).update(dt)
self.brain.think()
| [
"wrschuller@gmail.com"
] | wrschuller@gmail.com |
bddd1e68745eb9d0c4be78f83fbe5b77dccf95e0 | bff3b19be6408c671b99a8c08f8faee932460686 | /afnd6.py | 69873feb97141fca01ad456deafbdc69854124d0 | [] | no_license | OrionVi1998/Automatas | 47591e9bb9548674e2a885cc348bf300d0eaafb4 | 3969ad25b66684c635d10138ffd71adf61d21e7c | refs/heads/master | 2023-05-28T01:58:27.042093 | 2021-06-15T22:35:27 | 2021-06-15T22:35:27 | 376,657,207 | 0 | 0 | null | 2021-06-15T22:30:50 | 2021-06-13T23:10:41 | Python | UTF-8 | Python | false | false | 637 | py | grafo = {
0: [(0, "a"), (0, "b"), (1, "a")],
1: [(2, "b")],
2: [(3, "b")],
3: []
}
grafo2 = {
0: [(1, "a"), (2, "a")],
1: [(3, "b")],
2: [(5, "b")],
3: [(4, "a")],
4: [(1, "b")],
5: [(2, "a")]
}
def bfs(start):
queue = [(start, "")]
visited = []
while len(queue) > 0:
estado = queue.pop(0)
neighbours = grafo.get(estado[0])
print("estado ", estado, "vecinos: ", neighbours)
for edge in neighbours:
if edge not in visited:
visited.append(edge)
queue.append(edge)
print(edge)
bfs(0)
| [
"octaviov1998@gmail.com"
] | octaviov1998@gmail.com |
4f17a87004d2e33cbb26f6d49b7cb84a0b7ffef9 | 70532360ddfdd8006bf7044c117403ce837cef0a | /code/Rplot.py | cd1f9b2b402c74ca5ecf9502d4eba1665cd10a9b | [] | no_license | wsgan001/campus_wifi_analysis | 09a7944f5019f726682925c8785cdf5f7d8c469a | c470135691ff8faad3cb4755301e4f59389e2c5a | refs/heads/master | 2020-03-10T11:09:05.579870 | 2017-03-03T07:13:57 | 2017-03-03T07:13:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,312 | py | # -*- coding: utf-8 -*-
import fileinput
user = {}
for line in fileinput.input("../data/select/select_a"):
mac = line.strip().split(" ")[0]
user[mac] = True
fileinput.close()
with open("../data/plot/R_trace_all","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
with open("../data/plot/R_trace_online","w") as f:
f.write("mac time dura\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs = part[0], part[3:]
if user.has_key(mac):
for one in objs:
tag, rto = one.split("@")[0], str(int(one.split("@")[1].split(",")[0])/42)
if tag in ["0","1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","20","21","22","23"]:
f.write(mac+" "+tag+" "+rto+"\n")
fileinput.close()
jac = {}
for line in fileinput.input("../data/jaccount/jaccount_taged"):
part = line.strip().split(" ")
dev, mac, sex, sta, col, age = part[0], part[1], part[2], part[3], part[4], int(part[5])
if dev == "mobile":
jac[mac] = {'sex':sex, 'sta':sta, 'col':col, 'age':age}
if sex == "男性":
jac[mac]['sex'] = "Male"
elif sex == "女性":
jac[mac]['sex'] = "Female"
if age <= 20:
jac[mac]['age'] = "<=20"
elif age > 20 and age <=22 :
jac[mac]['age'] = "21~22"
elif age > 22:
jac[mac]['age'] = ">=23"
if col == "电子信息与电气工程学院":
jac[mac]['col'] = "TOP1"
elif col == "机械与动力工程学院":
jac[mac]['col'] = "TOP2"
elif col == "材料科学与工程学院":
jac[mac]['col'] = "TOP3"
elif col == "船舶海洋与建筑工程学院":
jac[mac]['col'] = "TOP4"
elif col == "安泰经济与管理学院":
jac[mac]['col'] = "TOP5"
fileinput.close()
with open("../data/plot/R_trace_all_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_all_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
with open("../data/plot/R_trace_online_cor","w") as f:
f.write("mac Acad Adm Ath Cant Hosp Lib Soc Supp Teach Other sex age\n")
for line in fileinput.input("../data/feature/trace_online_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"Acad":"0","Adm":"0","Ath":"0","Cant":"0","Hosp":"0","Lib":"0","Soc":"0","Supp":"0","Teach":"0","Other":"0"}
for one in objs:
tag, rto = one.split("@")[0], one.split("@")[1].split(",")[0]
if tag in ["Acad","Adm","Ath","Cant","Hosp","Lib","Soc","Supp","Teach","Other"]:
user[tag] = rto
f.write(mac+' '+user['Acad']+' '+user['Adm']+' '+user['Ath']+' '+user['Cant']+' '+user['Hosp']+' '+user['Lib']+' '+user['Soc']+' '+user['Supp']+' '+user['Teach']+' '+user['Other']+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
# 1:renren, 2:baidu, 3:sina, 4:taobao, 5:qq
mapping = {'1':'1','2':'1','3':'1','27':'1','46':'1','64':'1','69':'1',\
'5':'2','6':'2','21':'2','22':'2','26':'2','60':'2','63':'2','70':'2','77':'2','80':'2','93':'2','98':'2',\
'11':'3','15':'3','16':'3','17':'3','23':'3','24':'3','28':'3','29':'3','51':'3','82':'3','84':'3',\
'19':'4','23':'4','36':'4','39':'4','42':'4','56':'4','57':'4','58':'4','59':'4',\
'20':'5','31':'5','41':'5','45':'5','48':'5','86':'5',\
}
with open("../data/plot/R_trace_http_cor","w") as f:
f.write("mac renren baidu sina taobao qq sex age\n")
for line in fileinput.input("../data/feature/trace_http_statistic_filter"):
part = line.strip().split(" ")
mac, objs, user = part[0], part[3:], {"renren":0,"baidu":0,"sina":0,"taobao":0,"qq":0}
for one in objs:
tag, rto = one.split("@")[0], int(one.split("@")[1].split(",")[1])
if len(tag.split("+")) == 2 and tag.split("+")[0] == "WD" and ":" in tag:
tag = tag.split("+")[1]
hst, typ = tag.split(":")[0], tag.split(":")[1]
if mapping.has_key(hst):
top = mapping[hst]
if top == "1":
user['renren'] += rto
elif top == "2":
user['baidu'] += rto
elif top == "3":
user['sina'] += rto
elif top == "4":
user['taobao'] += rto
elif top == "5":
user['qq'] += rto
f.write(mac+' '+str(user['renren'])+' '+str(user['baidu'])+' '+str(user['sina'])+' '+str(user['taobao'])+' '+str(user['qq'])+' '+jac[mac]['sex']+' '+jac[mac]['age']+'\n')
fileinput.close()
| [
"mqiang@splunk.com"
] | mqiang@splunk.com |
a95329335b970233b588cd83bb48ba1a20a06e5b | 97e833b79e40f798019e45829d4c3eb91b852438 | /telegraph/appos.py | 0326616f7c814a68bea716949be213b918db56f4 | [] | no_license | AwkwardLiSFan/news-tone | b6069d6abb55b6e4eb8caf38ff27669669d66560 | fd55786991c3c1c4d4cbe3585026b14992bec69f | refs/heads/main | 2023-06-22T10:43:57.793472 | 2021-07-21T10:58:45 | 2021-07-21T10:58:45 | 388,087,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,288 | py | appos_list = {
"aren't" : "are not",
"can't" : "cannot",
"couldn't" : "could not",
"didn't" : "did not",
"doesn't" : "does not",
"don't" : "do not",
"hadn't" : "had not",
"hasn't" : "has not",
"haven't" : "have not",
"he'd" : "he would",
"he'll" : "he will",
"he's" : "he is",
"i'd" : "I would",
"i'd" : "I had",
"i'll" : "I will",
"i'm" : "I am",
"isn't" : "is not",
"it's" : "it is",
"it'll":"it will",
"i've" : "I have",
"let's" : "let us",
"mightn't" : "might not",
"mustn't" : "must not",
"shan't" : "shall not",
"she'd" : "she would",
"she'll" : "she will",
"she's" : "she is",
"shouldn't" : "should not",
"that's" : "that is",
"there's" : "there is",
"they'd" : "they would",
"they'll" : "they will",
"they're" : "they are",
"they've" : "they have",
"we'd" : "we would",
"we're" : "we are",
"weren't" : "were not",
"we've" : "we have",
"what'll" : "what will",
"what're" : "what are",
"what's" : "what is",
"what've" : "what have",
"where's" : "where is",
"who'd" : "who would",
"who'll" : "who will",
"who're" : "who are",
"who's" : "who is",
"who've" : "who have",
"won't" : "will not",
"wouldn't" : "would not",
"you'd" : "you would",
"you'll" : "you will",
"you're" : "you are",
"you've" : "you have",
"'re": " are",
"wasn't": "was not",
"we'll":" will",
"didn't": "did not"
}
| [
"noreply@github.com"
] | AwkwardLiSFan.noreply@github.com |
2a200f3a2374864f5dfb04e9acef5ed89b61e21d | 30b3fe3e33c090099f8d86e498b80e70da069822 | /solution.py | 9605a5aca6066e2072a43573499ef3283f88859a | [] | no_license | selvaramkumar/leetcode1451 | 5e967d2b6d89e7ce5c7345dcdbef3478e3fcb20a | bebf87f5beca2aa791fcd8f3b00ae1e6cf87364c | refs/heads/main | 2023-02-07T08:46:21.077083 | 2021-01-05T14:23:29 | 2021-01-05T14:23:29 | 327,020,442 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | from collections import OrderedDict
class Solution:
def arrangeWords(self, text: str) -> str:
temp=text.split(" ")
dict1={}
for i in temp:
if not len(i) in dict1:
dict1[len(i)]=i
else :
dict1[len(i)]=dict1[len(i)]+" "+i
res=""
dict2=OrderedDict(sorted(dict1.items()))
count=0
for key,value in dict2.items():
if count>=1:
res=res+" "+value[0].lower() + value[1:]
count=count+1
else:
res=res+value[0].upper() + value[1:]
count=count+1
return res
s=Solution()
str1="Keep calm and code on"
print(s.arrangeWords(str1))
| [
"sselvaramkumar@gmail.com"
] | sselvaramkumar@gmail.com |
43c10cdae7648e4ba849bdb25a0d0584082480de | a1678f80efe56423d08bea6a2843633b8a81dd34 | /DSALGO_String/firstNonRpeatingCharacterInStream.py | d49f0701ab89e0eff461dd81e21982da2b3f07ca | [] | no_license | NIDHISH99444/CodingNinjas | af60aa93dbfcf050e727949d41201f72973b0608 | b77b652cf0bf9b098ef9da4eff5eaecb7bfeaea5 | refs/heads/master | 2021-05-17T03:50:45.376843 | 2020-05-03T17:25:01 | 2020-05-03T17:25:01 | 250,608,228 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | from _collections import deque
def firstNonRepeating(string):
dict=[0]*26
q=deque()
for i in range(len(string)):
dict[ord(string[i])-ord('a')]+=1
q.append(string[i])
while len(q)!=0:
if dict[ord(q[0])-ord('a')]>1:
q.popleft()
else:
print(q[0],end=" ")
break
if len(q)==0:
print("-1",end=" ")
print()
firstNonRepeating("aabc")
firstNonRepeating("aac")
| [
"nidhish99444@gmail.com"
] | nidhish99444@gmail.com |
f34988ec1779777e353d26f3d66f85407eee93b7 | 91ad7dcbb7db4066e1bbcba01affa0a46eba1439 | /Plotter.py | b44ae256fcf4ed3cc63627793a4930bcdab84531 | [] | no_license | dcakagi/PnPSolver | 54e4c6f79037989e309aefe7debe670fee36ef5a | d77344034497cdd47e4605cfa21df7c10dbd729b | refs/heads/master | 2023-07-24T07:57:28.141307 | 2021-09-03T20:54:46 | 2021-09-03T20:54:46 | 393,401,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,830 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, TextArea
class ErrorPlotter:
def __init__(self, plots: list, error_window_size: int, error_units_: str, time_units_: str, secondary_axes: bool=False):
'''
Class to be used for plotting errors. Default settings will plot some provided error(s) vs. time, although a different
variable can be plotted along the x-axis by providing the data in the first argument of the update_plot() function
:param plots: List of plots to be graphed. Names of error plots provided in list will be the default main y-axes labels,
with secondary y-axes tracking the percent error
:param error_window_size: Number of prior timesteps to be used to calculate the mean error
:param error_units_: Units for measuring absolute error (m, cm, rad, etc.)
:param time_units_: Units of time to be plotted along the x-axis if plotting error vs. time
:param secondary_axes: Show secondary axis of percent error on plots
'''
self.state = None
self.num_plots = len(plots)
self.fig, self.axs = plt.subplots(self.num_plots, 1)
self.lines = []
self.times = []
self.twins = []
self.twin_lines = []
self.error_data = {}
self.perc_error_data = {}
self.window_size = error_window_size
self.error_units = error_units_
self.time_units = time_units_
self.error_window = None
self.perc_error_window = None
self.annotation_boxes = []
self.annotations = []
self.second_axis = secondary_axes
idx = 0
for ax in self.axs:
ax.set_ylabel(plots[idx] + " (" + self.error_units + ")")
if self.second_axis:
twin = ax.twinx()
self.twins.append(twin)
twin.set_ylabel(str(plots[idx] + " (Percent)"))
idx += 1
self.axs[-1].set_xlabel("Time (" + self.time_units + ")")
plt.ion()
self.init = False
def set_title(self, title):
self.axs[0].set_title(title)
def set_xlabel(self, label):
self.axs[-1].set_xlabel(label)
def set_main_ylabels(self, *labels):
idx = 0
for ax in self.axs:
ax.set_ylabel(labels[idx])
idx += 1
def set_secondary_ylabels(self, *labels):
if not self.second_axis:
return
idx = 0
for twin in self.twins:
twin.set_ylabel(labels[idx])
idx += 1
def get_average_errors(self):
error = np.mean(self.error_window, axis=1)
perc_error = None
if self.second_axis:
perc_error = np.mean(self.perc_error_window, axis=1)
return [error, perc_error]
def update_plot(self, time: float, *in_data: float):
'''
:param time: Timestep associated with updated data if plotting error vs. time, OR other independent variable (i.e. range) to plot error against
:param data: Data to be plotted. If plotting secondary axis of percent error, use form (error_i, percent_error_i, ...) for n plots
'''
plt.ion()
self.times.append(time)
if self.second_axis:
data = in_data[0:-1:2]
perc_data = in_data[1::2]
else:
data = in_data
perc_data = None
if not self.init:
self.error_window = np.array(data).reshape(-1, 1)
self.perc_error_window = None
if self.second_axis:
self.perc_error_window = np.array(perc_data).reshape(-1, 1)
ave_errors = self.get_average_errors()
for idx in range(0, len(data)):
self.error_data[idx] = [data[idx]]
line, = self.axs[idx].plot(time, data[idx], "r-")
self.lines.append(line)
ave_error = ave_errors[0][idx]
self.annotations.append([TextArea("Absolute Error (" + str(self.window_size) + " window): " + str("{:.3f} ".format(ave_error)) + self.error_units)])
ab = AnnotationBbox(self.annotations[idx][0], (0.01, 0.9), xycoords='axes fraction', alpha=1.0, pad=0.1, box_alignment=(0, 0))
self.axs[idx].add_artist(ab)
if self.second_axis:
self.axs[idx].tick_params(axis="y", colors=line.get_color())
self.axs[idx].yaxis.label.set_color(line.get_color())
self.perc_error_data[idx] = [perc_data[idx]]
twin_line, = self.twins[idx].plot(time, perc_data[idx], "b-", zorder=1)
self.twin_lines.append(twin_line)
self.twins[idx].tick_params(axis="y", colors=twin_line.get_color())
self.twins[idx].yaxis.label.set_color(twin_line.get_color())
self.axs[idx].set_zorder(self.twins[idx].get_zorder()+1)
self.axs[idx].patch.set_visible(False)
ave_perc_error = ave_errors[1][idx]
self.annotations[idx].append(TextArea("Percent Error (" + str(self.window_size) + " window): " + str("{:.3f}%".format(ave_perc_error))))
ab1 = AnnotationBbox(self.annotations[idx][1], (0.01, 0.8), xycoords='axes fraction', alpha=1.0, pad=0.1, box_alignment=(0, 0))
self.axs[idx].add_artist(ab1)
self.init = True
return
# Check if window(s) is/are at maximum size, delete oldest points if needed
if self.error_window.shape[1] == self.window_size:
self.error_window = np.delete(self.error_window, 0, 1)
if self.second_axis:
self.perc_error_window = np.delete(self.perc_error_window, 0, 1)
self.error_window = np.append(self.error_window, np.array(data).reshape(-1, 1), axis=1)
if self.second_axis:
self.perc_error_window = np.append(self.perc_error_window, np.array(perc_data).reshape(-1, 1), axis=1)
for idx in range(0, len(data)):
ave_errors = self.get_average_errors()
self.error_data[idx].append(data[idx])
self.lines[idx].set_data(self.times, self.error_data[idx])
ave_error = ave_errors[0][idx]
self.annotations[idx][0].set_text("Absolute Error (" + str(self.window_size) + " window): " + str("{:.3f} ".format(ave_error)) + self.error_units)
self.axs[idx].relim()
self.axs[idx].autoscale_view(True, True, True)
if self.second_axis:
self.perc_error_data[idx].append(perc_data[idx])
self.twin_lines[idx].set_data(self.times, self.perc_error_data[idx])
ave_perc_error = ave_errors[1][idx]
self.annotations[idx][1].set_text("Percent Error (" + str(self.window_size) + " window): " + str("{:.3f}%".format(ave_perc_error)))
self.twins[idx].relim()
#self.twins[idx].set_ylim(0, 100)
self.twins[idx].autoscale_view(True, True, True)
#plt.show()
plt.pause(0.0000001)
class PosePlotter:
def __init__(self, plots: [list], units: str, time_units: str, use_estimates: bool=True):
'''
:param plots: List of variable lists to plot on each axis. If a single variable is to be graphed it will be plotted vs time
:param units: Measurement units of plotted data (used for axis labeling)
:param time_units: Units of time to be plotted along the x-axis
'''
self.num_plots = len(plots)
self.fig, self.axs = plt.subplots(1, self.num_plots)
self.units = units
self.time_units = time_units
self.times = []
self.data_lines = []
self.est_lines = []
self.data = {}
self.est_data = {}
self.plots = plots
self.use_estimates = use_estimates
idx = 0
for ax in self.axs:
if len(plots[idx]) == 1:
ax.set_ylabel(plots[idx][0] + " (" + self.units + ")")
ax.set_xlabel("Time (" + self.time_units + ")")
elif len(plots[idx]) == 2:
ax.set_xlabel(plots[idx][0] + " (" + self.units + ")")
ax.set_ylabel(plots[idx][1] + " (" + self.units + ")")
else:
pass # Does not handle plotting three dimensions
idx += 1
plt.ion()
self.init = False
def update_plot(self, time: float, *in_data: float):
'''
:param time: Timestep associated with updated data
:param data: Data to be plotted, matching order of variables provided to class constructor, in form (data_i, est_data_i, ...)
'''
plt.ion()
self.times.append(time)
if self.use_estimates:
data = in_data[0:-1:2]
est_data = in_data[1::2]
else:
data = in_data
est_data = None
if not self.init:
for d in range(len(data)):
self.data[d] = [data[d]]
if self.use_estimates:
self.est_data[d] = [est_data[d]]
data_idx = 0
for p in range(self.num_plots):
if len(self.plots[p]) == 1:
data_line, = self.axs[p].plot(self.times, self.data[data_idx], "b-")
self.data_lines.append(data_line)
if self.use_estimates:
est_line, = self.axs[p].plot(self.times, self.est_data[data_idx], "r-")
self.est_lines.append(est_line)
self.axs[p].legend([self.data_lines[p], self.est_lines[p]], ["Actual " + self.plots[p][0], "Estimated " + self.plots[p][0]])
data_idx += 1
elif len(self.plots[p]) == 2:
data_line, = self.axs[p].plot(self.data[data_idx], self.data[data_idx + 1], "b-")
self.data_lines.append(data_line)
if self.use_estimates:
est_line, = self.axs[p].plot(self.est_data[data_idx], self.est_data[data_idx + 1], "r-")
self.est_lines.append(est_line)
self.axs[p].legend([self.data_lines[p], self.est_lines[p]], ["Actual " + self.plots[p][0] + ", " + self.plots[p][1],
"Estimated " + self.plots[p][1] + ", " + self.plots[p][1]])
data_idx += 2
else:
pass # No 3D plotting implemented
self.init = True
else:
for d in range(len(data)):
self.data[d].append(data[d])
if self.use_estimates:
self.est_data[d].append(est_data[d])
data_idx = 0
for p in range(self.num_plots):
if len(self.plots[p]) == 1:
self.data_lines[p].set_data(self.times, self.data[data_idx])
if self.use_estimates:
self.est_lines[p].set_data(self.times, self.est_data[data_idx])
data_idx += 1
elif len(self.plots[p]) == 2:
self.data_lines[p].set_data(self.data[data_idx], self.data[data_idx + 1])
if self.use_estimates:
self.est_lines[p].set_data(self.est_data[data_idx], self.est_data[data_idx + 1])
data_idx += 2
self.axs[p].relim()
self.axs[p].autoscale_view(True, True, True)
plt.pause(0.00001)
def set_xlabel(self, plot_idx, label):
self.axs[plot_idx].set_xlabel(label)
def set_ylabel(self, plot_idx, label):
self.axs[plot_idx].set_ylabel(label)
| [
"dcakagi@gmail.com"
] | dcakagi@gmail.com |
2b527ae08f8f0e1fc6300048d9138a988209d9aa | 3e3ce865b7746732fe4298435cfe5cb8b23f46e7 | /venv1/bin/easy_install-2.7 | 5fdfe2bb8764ea291ec2e732b2cdb5cb68fd2aab | [] | no_license | siddharth12456/Plivo | ba48735ff1edb655737ed569d65db5619cd7f4b4 | a6bd537b88add841325b88cd953b60b35636ddd4 | refs/heads/master | 2021-07-19T11:46:37.090810 | 2020-04-20T08:16:07 | 2020-04-20T15:46:01 | 132,721,649 | 0 | 0 | null | 2020-04-20T15:46:03 | 2018-05-09T07:52:32 | Python | UTF-8 | Python | false | false | 278 | 7 | #!/home/siddharth/PycharmProjects/PlivoAPI/venv1/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"sroy@sentient-energy.com"
] | sroy@sentient-energy.com |
4d876adb17ed372668e9f24105bb83023429a2af | ef9368cc0b4f1bfad3abae292be5c7677f11a8e4 | /EazyHacks/urls.py | 8cc74321382162d1e9bd6f86e1997887ef30302c | [] | no_license | prnvshrn/EazyHacks | 89fc519c034fb4c8c75ea91c7a83b50ce77d2a63 | 212c66c80de4bf4eb3eb76dda4479abcfe67d873 | refs/heads/master | 2021-09-05T21:26:55.891948 | 2018-01-31T04:36:36 | 2018-01-31T04:36:36 | 115,707,094 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | py | """EazyHacks URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from EazyHacks import views
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', views.openLogin, name='login'),
url(r'^AddHack.html/', views.openAddHack, name='add_hack'),
url(r'^BrowseHack.html/(?P<hack_type>[0-9]+)/', views.openBrowseHack, name='browse_hack'),
url(r'^HackDetails.html/(?P<hack_id>[0-9]+)/', views.openHackDetails, name='hack_details'),
url(r'^HackDetails.html/', views.openLogin ,name='hack_base'),
url(r'^Logout/',views.logOut,name='logout')
]
| [
"prnvshrn@gmail.com"
] | prnvshrn@gmail.com |
7ef0e32c2bc08328f7dda3f11c84b48d28e808b8 | 34096e5f3d6569e3aaee794bf8ccc0b04f2c8c8f | /docusign_esign/models/envelope_transfer_rule.py | 9850e0af941d967df7254ce7324591c2361dd884 | [
"MIT"
] | permissive | hunk/docusign-python-client | 5c96de8a08973fe1744d902b2a3873a7376a62c7 | a643c42c1236715e74eef6fc279a1b29da1b5455 | refs/heads/master | 2021-06-14T06:41:23.298368 | 2020-04-01T05:51:08 | 2020-04-01T05:51:08 | 254,482,059 | 0 | 0 | MIT | 2020-04-09T21:28:23 | 2020-04-09T21:28:23 | null | UTF-8 | Python | false | false | 9,506 | py | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class EnvelopeTransferRule(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, carbon_copy_original_owner=None, enabled=None, envelope_transfer_rule_id=None, event_type=None, from_group=None, from_user=None, modified_date=None, modified_user=None, to_folder=None, to_user=None):
"""
EnvelopeTransferRule - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'carbon_copy_original_owner': 'str',
'enabled': 'str',
'envelope_transfer_rule_id': 'str',
'event_type': 'str',
'from_group': 'Group',
'from_user': 'UserInformation',
'modified_date': 'str',
'modified_user': 'UserInformation',
'to_folder': 'Folder',
'to_user': 'UserInformation'
}
self.attribute_map = {
'carbon_copy_original_owner': 'carbonCopyOriginalOwner',
'enabled': 'enabled',
'envelope_transfer_rule_id': 'envelopeTransferRuleId',
'event_type': 'eventType',
'from_group': 'fromGroup',
'from_user': 'fromUser',
'modified_date': 'modifiedDate',
'modified_user': 'modifiedUser',
'to_folder': 'toFolder',
'to_user': 'toUser'
}
self._carbon_copy_original_owner = carbon_copy_original_owner
self._enabled = enabled
self._envelope_transfer_rule_id = envelope_transfer_rule_id
self._event_type = event_type
self._from_group = from_group
self._from_user = from_user
self._modified_date = modified_date
self._modified_user = modified_user
self._to_folder = to_folder
self._to_user = to_user
@property
def carbon_copy_original_owner(self):
"""
Gets the carbon_copy_original_owner of this EnvelopeTransferRule.
:return: The carbon_copy_original_owner of this EnvelopeTransferRule.
:rtype: str
"""
return self._carbon_copy_original_owner
@carbon_copy_original_owner.setter
def carbon_copy_original_owner(self, carbon_copy_original_owner):
"""
Sets the carbon_copy_original_owner of this EnvelopeTransferRule.
:param carbon_copy_original_owner: The carbon_copy_original_owner of this EnvelopeTransferRule.
:type: str
"""
self._carbon_copy_original_owner = carbon_copy_original_owner
@property
def enabled(self):
"""
Gets the enabled of this EnvelopeTransferRule.
:return: The enabled of this EnvelopeTransferRule.
:rtype: str
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this EnvelopeTransferRule.
:param enabled: The enabled of this EnvelopeTransferRule.
:type: str
"""
self._enabled = enabled
@property
def envelope_transfer_rule_id(self):
"""
Gets the envelope_transfer_rule_id of this EnvelopeTransferRule.
:return: The envelope_transfer_rule_id of this EnvelopeTransferRule.
:rtype: str
"""
return self._envelope_transfer_rule_id
@envelope_transfer_rule_id.setter
def envelope_transfer_rule_id(self, envelope_transfer_rule_id):
"""
Sets the envelope_transfer_rule_id of this EnvelopeTransferRule.
:param envelope_transfer_rule_id: The envelope_transfer_rule_id of this EnvelopeTransferRule.
:type: str
"""
self._envelope_transfer_rule_id = envelope_transfer_rule_id
@property
def event_type(self):
"""
Gets the event_type of this EnvelopeTransferRule.
:return: The event_type of this EnvelopeTransferRule.
:rtype: str
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""
Sets the event_type of this EnvelopeTransferRule.
:param event_type: The event_type of this EnvelopeTransferRule.
:type: str
"""
self._event_type = event_type
@property
def from_group(self):
"""
Gets the from_group of this EnvelopeTransferRule.
:return: The from_group of this EnvelopeTransferRule.
:rtype: Group
"""
return self._from_group
@from_group.setter
def from_group(self, from_group):
"""
Sets the from_group of this EnvelopeTransferRule.
:param from_group: The from_group of this EnvelopeTransferRule.
:type: Group
"""
self._from_group = from_group
@property
def from_user(self):
"""
Gets the from_user of this EnvelopeTransferRule.
:return: The from_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._from_user
@from_user.setter
def from_user(self, from_user):
"""
Sets the from_user of this EnvelopeTransferRule.
:param from_user: The from_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._from_user = from_user
@property
def modified_date(self):
"""
Gets the modified_date of this EnvelopeTransferRule.
:return: The modified_date of this EnvelopeTransferRule.
:rtype: str
"""
return self._modified_date
@modified_date.setter
def modified_date(self, modified_date):
"""
Sets the modified_date of this EnvelopeTransferRule.
:param modified_date: The modified_date of this EnvelopeTransferRule.
:type: str
"""
self._modified_date = modified_date
@property
def modified_user(self):
"""
Gets the modified_user of this EnvelopeTransferRule.
:return: The modified_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._modified_user
@modified_user.setter
def modified_user(self, modified_user):
"""
Sets the modified_user of this EnvelopeTransferRule.
:param modified_user: The modified_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._modified_user = modified_user
@property
def to_folder(self):
"""
Gets the to_folder of this EnvelopeTransferRule.
:return: The to_folder of this EnvelopeTransferRule.
:rtype: Folder
"""
return self._to_folder
@to_folder.setter
def to_folder(self, to_folder):
"""
Sets the to_folder of this EnvelopeTransferRule.
:param to_folder: The to_folder of this EnvelopeTransferRule.
:type: Folder
"""
self._to_folder = to_folder
@property
def to_user(self):
"""
Gets the to_user of this EnvelopeTransferRule.
:return: The to_user of this EnvelopeTransferRule.
:rtype: UserInformation
"""
return self._to_user
@to_user.setter
def to_user(self, to_user):
"""
Sets the to_user of this EnvelopeTransferRule.
:param to_user: The to_user of this EnvelopeTransferRule.
:type: UserInformation
"""
self._to_user = to_user
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"noreply@github.com"
] | hunk.noreply@github.com |
74b61650487cc870cd8e9dd2cda6ff92a8231e9d | fac2ed23a092fe8c07c30c6542f977e2244d57e3 | /문24.py | bc6d66ba577e4c6c0f17f198a2fd390df6fccb99 | [] | no_license | rhkdgh815/rhkdgh815 | d1fcf9b192ffb8eb1ccc4a2dd3d2d7997342ed8d | 5cb6380ba17fcc1bbffced4d8f0f5aab259ad155 | refs/heads/master | 2023-08-01T23:58:50.459446 | 2021-09-28T05:55:50 | 2021-09-28T05:55:50 | 403,934,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py | n1 = int(input())
n2 = int(input())
odd_sum = 0
even_ sum = 0
for i in range(n1+n2+1):
if i % 2 == 1 :
odd_sum += i
else:
even_sum += i
print("짝수:",even_sum,"홀수:",odd_sum) | [
"80893010+rhkdgh815@users.noreply.github.com"
] | 80893010+rhkdgh815@users.noreply.github.com |
2ed8d0c47dc05eb342a5011b55fde809be7ece77 | b038128c5ecd477403f1396ae7f5be29d6ade668 | /dataset/dataset.py | 25e313a0f9865d75871717ce8397d6f655d704c2 | [] | no_license | BAfsharmanesh/Kaggle_Indoor_Location_Navigation | 82fe8768b0a81f2bbc6e4a7c4d7d4f204f686b33 | e9379061c0a0cda1a02f9e373c967a4c48f487f6 | refs/heads/main | 2023-04-30T19:36:38.876825 | 2021-05-16T21:48:41 | 2021-05-16T21:48:41 | 367,980,247 | 0 | 0 | null | 2021-05-16T20:40:34 | 2021-05-16T20:36:18 | Python | UTF-8 | Python | false | false | 5,444 | py | import pandas as pd
from icecream import ic
from pytorch_lightning import LightningDataModule
from torch.utils.data import Dataset, DataLoader
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.model_selection import StratifiedKFold #,StratifiedGroupKFold
from config import Config
from utils.utils import time_function
import numpy as np
class IndoorDataset(Dataset):
def __init__(self, data, bssid_feats, rssi_feats, flag='TRAIN'):
self.data = data
self.flag = flag
self.bssid_feats = bssid_feats
self.rssi_feats = rssi_feats
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
tmp_data = self.data.iloc[index]
if self.flag == 'TRAIN':
return {
'BSSID_FEATS': tmp_data[self.bssid_feats].values.astype(int),
'RSSI_FEATS': tmp_data[self.rssi_feats].values.astype(np.float32),
'site_id': tmp_data['site_id'].astype(int),
'x': tmp_data['x'],
'y': tmp_data['y'],
'floor': tmp_data['floor'],
}
elif self.flag == 'TEST':
return {
'BSSID_FEATS': tmp_data[self.bssid_feats].values.astype(int),
'RSSI_FEATS': tmp_data[self.rssi_feats].values.astype(np.float32),
'site_id': tmp_data['site_id'].astype(int)
}
class IndoorDataModule(LightningDataModule):
def __init__(self, train_data, test_data, kfold=False):
self.train_data = train_data
self.test_data = test_data
self.kfold = kfold
def set_fold_num(self, fold_num):
self.fold_num = fold_num
def _init_feats(self):
self.bssid_feats = [f'bssid_{i}' for i in range(Config.num_wifi_feats)]
self.rssi_feats = [f'rssi_{i}' for i in range(Config.num_wifi_feats)]
def _init_wifi_bssids(self):
wifi_bssids = []
for i in range(100):
wifi_bssids += self.train_data[f'bssid_{i}'].values.tolist()
wifi_bssids += self.test_data[f'bssid_{i}'].values.tolist()
self.wifi_bssids = list(set(wifi_bssids))
self.wifi_bssids_size = len(self.wifi_bssids)
def _init_transforms(self):
self.wifi_bssids_encoder = LabelEncoder()
self.wifi_bssids_encoder.fit(self.wifi_bssids)
self.site_id_encoder = LabelEncoder()
self.site_id_encoder = self.site_id_encoder.fit(
self.train_data['site_id'])
self.rssi_normalizer = StandardScaler()
self.rssi_normalizer.fit(self.train_data[self.rssi_feats])
def _transform(self, data):
for bssid_feat in self.bssid_feats:
data[bssid_feat] = self.wifi_bssids_encoder.transform(
data[bssid_feat])
data['site_id'] = self.site_id_encoder.transform(data['site_id'])
data[self.rssi_feats] = self.rssi_normalizer.transform(
data[self.rssi_feats])
return data
def _kfold(self):
''' Group Kfold wrt path and Stratified Kfold wrt site_id
'''
skf = StratifiedKFold(n_splits=Config.fold_num,
shuffle=True, random_state=Config.seed)
self.train_data['site_id_f'] = self.train_data['site_id'] + self.train_data['floor'].astype(str)
for n, (train_index, val_index) in enumerate(
skf.split(
X = self.train_data['path'],
y = self.train_data['path']
)
):
self.train_data.loc[val_index, 'kfold'] = int(n)
@time_function
def prepare_data(self):
# Init cross validation
if self.kfold:
self._kfold()
# Init preprocessing
self._init_feats()
self._init_wifi_bssids()
self._init_transforms()
self.site_id_dim = len(self.train_data['site_id'].unique())
self.train_data = self._transform(self.train_data)
self.test_data = self._transform(self.test_data)
@time_function
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == 'fit' or stage is None:
if self.kfold:
train_df = self.train_data[self.train_data['kfold'] !=
self.fold_num].reset_index(drop=True)
val_df = self.train_data[self.train_data['kfold'] ==
self.fold_num].reset_index(drop=True)
self.train = IndoorDataset(
train_df, self.bssid_feats, self.rssi_feats, flag="TRAIN")
self.val = IndoorDataset(
val_df, self.bssid_feats, self.rssi_feats, flag="TRAIN")
# Assign test dataset for use in dataloader(s)
if stage == 'test' or stage is None:
self.test = IndoorDataset(
self.test_data, self.bssid_feats, self.rssi_feats, flag="TEST")
def train_dataloader(self):
return DataLoader(self.train, batch_size=Config.train_batch_size, num_workers=Config.num_workers, shuffle=True, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val, batch_size=Config.val_batch_size, num_workers=Config.num_workers, shuffle=True, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test, batch_size=Config.val_batch_size, num_workers=Config.num_workers, shuffle=False, pin_memory=True)
| [
"noreply@github.com"
] | BAfsharmanesh.noreply@github.com |
caff9c7cb685bc07ae6b58176aa41c8d83544348 | 9f0a4262c4402201df1cdd5674a679543f4a50b5 | /shaderLibrary_maya2017/resources/__init__.py | 05e522a865f16bd93dd2591fa2f1e5a4d20967ec | [] | no_license | subing85/subins-toolkits | 611b6b3b3012ccb023096f6e21d18d2bda5a534b | d02af1289ec3ee5bce6fa3d78c134a8847113aa6 | refs/heads/master | 2022-07-12T17:19:57.411454 | 2022-07-01T20:37:16 | 2022-07-01T20:37:16 | 168,826,548 | 11 | 2 | null | 2022-07-02T01:03:34 | 2019-02-02T11:51:25 | Mathematica | UTF-8 | Python | false | false | 1,087 | py | import os
from shaderLibrary_maya2017.utils import platforms
CURRENT_PATH = os.path.dirname(__file__)
MODULE = platforms.get_tool_kit()[0]
def getInputPath(module=None):
return os.path.join(
CURRENT_PATH, "inputs", "{}.json".format(module)
)
def getIconPath():
return os.path.join(CURRENT_PATH, "icons")
def getPreferencePath():
return os.path.join(getWorkspacePath(), "preference")
def getWorkspacePath():
return os.path.join(os.getenv("HOME"), "Documents", MODULE)
def getPublishDirectory():
return os.path.join(
os.environ["HOME"], "Walk_cycle", "characters"
)
def getResourceTypes():
data = {
"preference": getPreferencePath(),
"shader": getWorkspacePath(),
"generic": None,
}
return data
def getToolKitLink():
return "https://www.subins-toolkits.com"
def getToolKitHelpLink():
return "https://vimeo.com/314966208"
def getDownloadLink():
return "https://www.subins-toolkits.com/shader-library"
# end ####################################################################
| [
"subing85@gmail.com"
] | subing85@gmail.com |
1e5c3dec3126452c25e701e2cef0ece2a6572176 | 7556fc49cef701861ce456c962181c8a4d8522ce | /employee/models.py | e51481d0fb408a74056a647258631f3c29935d3c | [] | no_license | km-pythoner/job_market_cms | 7fa708e6bc0f14ac0936e863c971e2e62c0f6ed0 | 2e18f8822f6938098bcff7317dd9350d4d837540 | refs/heads/master | 2021-09-09T19:50:54.551274 | 2018-03-19T10:05:52 | 2018-03-19T10:05:52 | 125,135,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | from datetime import datetime
from django.db import models
from users.models import UserProfile
from employer.models import JobInfo
class EmployeeInfo(models.Model):
pass | [
"jj19901030"
] | jj19901030 |
317b8373cde4e8566b57759adc99ca00c1e5885f | d59a459f3b3bccfb6204a3f803fa465ea1297811 | /ipynbhpc/PBS.py | 3895e450292927f8ff6d1597d02e93c764db13c6 | [] | no_license | rainwoodman/ipynbhpc | 90fbce679b5ae5886222b90984f5453aeceefceb | 338973766328d5c83896daec18ae7e81514ae3b8 | refs/heads/master | 2021-01-20T00:58:26.675992 | 2015-05-24T18:46:54 | 2015-05-24T18:46:54 | 34,808,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,125 | py | import subprocess
import numpy
import xml.etree.ElementTree as ET
import re
import time
def submit(string):
pipe = subprocess.Popen(['qsub'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = pipe.communicate(string)[0]
match = re.match('([0-9]*)\..*', stdout)
if pipe.returncode or not match:
raise Exception("qsub failed: %s", stdout)
return match.group(1)
def status(jobid):
""" returns R, Q, E, C, or U(for unknown, eg jobid is not in qstat"""
try:
xml = subprocess.check_output(['qstat', '-x', str(jobid)])
tree = ET.fromstring(xml)
ele = tree.find('Job/job_state')
return ele.text
except subprocess.CalledProcessError:
return 'U'
def delete(jobid):
return subprocess.check_call(['qdel', str(jobid)])
def wait(jobid):
timeout = 10.
if not isinstance(jobid, (list, tuple, set)):
while status(jobid) in 'RQ':
time.sleep(timeout)
timeout *= 1.2
if timeout > 60.:
timeout = 60.
else:
for job in jobid:
wait(job)
| [
"yfeng1@berkeley.edu"
] | yfeng1@berkeley.edu |
bac58cc9c2e873327fcf4652f7150e09e1f24dbc | 9ee12b1d04a458ab84a042acc317c483bf10b53e | /TinyImagenet/keras_alexnet.py | e447fe8fade498b338e4828802796951bcbea1cb | [] | no_license | cvasfi/light-cnns | c938aa952444894575253e1885bcea2d1b09c68c | e181e6aac1aac3e499c5318143b3fffba54186e7 | refs/heads/master | 2021-01-21T10:49:22.172196 | 2017-10-19T19:11:52 | 2017-10-19T19:11:52 | 101,991,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,489 | py | from __future__ import division
import six
from keras.models import Model
from keras.layers import (
Input,
Activation,
Dense,
Flatten
)
from keras.layers.convolutional import (
Conv2D,
MaxPooling2D,
AveragePooling2D
)
from keras.layers.merge import add
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras import backend as K
from keras.layers.advanced_activations import PReLU
from keras.layers.core import Dropout
def _conv_relu(**conv_params):
filters = conv_params["filters"]
kernel_size = conv_params["kernel_size"]
strides = conv_params.setdefault("strides", (1, 1))
kernel_initializer = conv_params.setdefault("kernel_initializer", "he_normal")
padding = conv_params.setdefault("padding", "same")
kernel_regularizer = conv_params.setdefault("kernel_regularizer", l2(1.e-4))
def f(input):
conv = Conv2D(filters=filters, kernel_size=kernel_size,
strides=strides, padding=padding,
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(input)
return PReLU()(conv)
return f
class AlexNetBuilder(object):
@staticmethod
def build(input_shape, num_outputs):
# Permute dimension order if necessary
if K.image_dim_ordering() == 'tf':
input_shape = (input_shape[1], input_shape[2], input_shape[0])
input = Input(shape=input_shape)
c1 = _conv_relu(filters=96, kernel_size=(3, 3), strides=(1, 1))(input)
p1 = MaxPooling2D()(c1)
c2 = _conv_relu(filters=256, kernel_size=(5, 5), strides=(1, 1))(p1)
p2 = MaxPooling2D()(c2)
c3 = _conv_relu(filters=384, kernel_size=(3, 3), strides=(1, 1))(p2)
c4 = _conv_relu(filters=385, kernel_size=(3, 3), strides=(1, 1))(c3)
c5 = _conv_relu(filters=256, kernel_size=(3, 3), strides=(1, 1))(c4)
p3 = MaxPooling2D()(c5)
fl = Flatten()(p3)
fc1 = Dense(units=4096)(fl)
fc1_d = Dropout(rate=0.5)(fc1)
fc1_a= PReLU()(fc1_d)
fc2 = Dense(units=4096)(fc1_a)
fc2_a = Dropout(rate=0.5)(fc2)
fc2_d= PReLU()(fc2_a)
output = Dense(units=200,activation="softmax")(fc2_d)
model = Model(inputs=input, outputs=output)
return model
@staticmethod
def buildAlexnet(input_shape, num_outputs):
return AlexNetBuilder.build(input_shape, num_outputs)
| [
"yunus.ec@gmail.com"
] | yunus.ec@gmail.com |
3f8ff7bf52aee9a81f937005bb281f95f35481df | 4b5d7d9131cd342d0d54130d217cb10eff7c1bff | /lab4/algorithmTests.py | 904f07e9267a8788aa66254840d2d128f0696911 | [] | no_license | sklaboi/ochrona-danych-laboratorium | 48f8b02d2ab73d764e869c4a3a001088d34134e2 | 7701cc1e29afb2b7b3d8fb0a25a95b7d00d4d61d | refs/heads/master | 2021-05-26T18:10:36.085276 | 2012-04-02T10:22:49 | 2012-04-02T10:22:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | #!/usr/bin/python
import sys
import random
import math
import hashlib
from Crypto.Cipher import DES,AES
import time
des = DES.new("key12345")
des = DES.new("key12345",DES.MODE_CBC)
#encrypted = des.encrypt("secret12")
#print encrypted
aes = AES.new("1234567890123456",AES.MODE_CFB)
encrypted = aes.encrypt("test")
#print encrypted
haslo = sys.argv[1]
random.seed(time.time())
sol = ""
for s in range(8):
sol += str(random.randint(0,9))
print "sol:"
print sol
print "pass:"
password = hashlib.sha224(haslo).hexdigest()
for i in range(1000):
password = hashlib.sha224(password+str(sol)).hexdigest()
print password
| [
"gwiazdal@volt.iem.pw.edu.pl"
] | gwiazdal@volt.iem.pw.edu.pl |
d9a464be1a3be2b144f34de63add4214c3cfc0dd | 6cfc109684e689fd4fba01380f95ebdde567531d | /Lab2/prueba.py | 58c38427b1d982a7c6a10fd06c3ffd5445836209 | [] | no_license | jaoc1811/CI2692 | 83291c70277dbe05dc076f9bffcb5db44a9c9864 | ab185a695c0a7722ccdd8317e4d4130853e9c9ae | refs/heads/master | 2020-03-18T20:32:45.852537 | 2018-05-29T00:58:17 | 2018-05-29T00:58:17 | 131,191,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,992 | py | from common.base.basic import read_file
from common.base.basic import Random
def mergesort(A):
# Busca la cantidad de elementos del arreglo.
r = len(A)
# Si el arreglo es unitario esta ordenado por definicion.
if 1 == r: return A
# Crea dos nuevos sub-arreglos ordenados.
# L para el sub-arreglo de la izquierda (Left).
# R para el sub-arreglo de la derecha (Right).
L = mergesort(A[:(r/2)])
R = mergesort(A[(r/2):])
# Delvuelve el arreglo ordenado.
return merge(L,R)
def merge(L,R):
# Crea un nuevo arreglo vacio donde se guardaran los valores ordenados.
array = []
# Inicializa las variable para iterar sobre los sub-arreglos.
i,j = 0,0
# Inicializa las variables para ver si los arreglos ya han sido recorridos.
a,b = len(L),len(R)
# Mientras el valor del iterador este en el rango del sub arreglo, entra en el condicional.
while (i < a or j < b):
# El condicional fue implementado de esta manera ya que las guardias en python son
# deterministas. De esta forma la tercera y la cuarta guardia no dan error ya que
# entra en la primera o segunda guardia si el indice a comparar esta fuera del rango
# del arreglo.
if (i >= a): # Chequea si ya recorrio el arreglo L completo.
array.append(R[j])
j += 1
elif (j >= b): # Chequea si ya recorrio el arreglo R completo.
array.append(L[i])
i += 1
elif (L[i] <= R[j]): # Asigna el menor de los elementos.
array.append(L[i])
i += 1
elif (R[j] < L[i]): # Asigna el menor de los elementos.
array.append(R[j])
j += 1
#print array
return array
def insertion_sort(A):
for i in range(1, len(A)):
key = A[i]
j = i - 1
while j >= 0 and A[j] > key:
A[j+1] = A[j]
j = j - 1
A[j+1] = key
def freivalds(n, A, B, C):
def multiply(n, A, Z):
# Crea el vector a retornar
R = n * [0]
# Recorre los elementos del vector R y las filas de la matriz A
for i in range(n):
# Recorre los elementos del vector Z y los elementos de la fila i de A
for j in range(n):
R[i] = R[i] + (A[i][j] * Z[j])
return R
# Genera un vector Z lleno de ceros y unos
Z = n * [n]
for i in range(n):
Z[i] = Random(0,1)
# Multiplica B x Z, luego A x (B x Z) y C x Z
# Obteniendo 2 vectores x1 y x2 de largo n
Y = multiply(n, B, Z)
x1 = multiply(n, A, Y)
x2 = multiply(n, C, Z)
# Chequea si A x (B x Z) = C x Z
return x1 == x2
def amplified_freivalds(k, n, A, B, C):
for i in range(k):
r = freivalds(n, A, B, C)
if r == False:
return False
return True
def problema_3_8(A, x):
B = mergesort(A)
print B
R = False
for i in range(len(B) - 1):
start = i + 1
end = len(B) - 1
while start < end:
mid = (start + end) / 2
if B[mid] + B[i] == x:
R = True
break
elif B[mid] + B[i] < x:
start = mid + 1
elif B[mid] + B[i] > x:
end = mid - 1
if B[start] + B[i] == x:
R = True
return R
A = [ Random(0,2) for i in range(100)]
x = 71
#print A
print mergesort(A)
#print problema_3_8(A,x) | [
"jaoc1811@gmail.com"
] | jaoc1811@gmail.com |
3eee818cb29ce487b694fea16caba653f9d645ec | 629f909ebe19b22d068ec1a4719c9eb303ed2826 | /python_iugu/request/plan_request.py | 9d483102bd3f8d101761bc3b7ad17dd17da24c93 | [
"MIT"
] | permissive | guiflemes/python_iugu | f564ce3e653b228a6e71e82f5f26b1b364eb7f76 | e7efca84e76ebd5b99773f4e57a14f991fbcb520 | refs/heads/master | 2023-05-05T05:25:42.631921 | 2021-05-21T18:00:16 | 2021-05-21T18:00:16 | 327,623,059 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from python_iugu import enuns
@dataclass
class PlanRequest:
name: str = None
identifier: str = None
interval: int = None
interval_type: enuns.IntervalType = None
value_cents: int = None
payable_with: enuns.PayableWith = None
features: Optional[FeatureRequest] = None
billing_days: int = None
max_cycles: int = None
@dataclass
class FeatureRequest:
name: str
identifier: str
value: str
| [
"guilherme@campusinc.com.br"
] | guilherme@campusinc.com.br |
39b26a09d6fbe8fddb9e0b8211cadb3d9dd28529 | f418f6f3a4f1e6574103b4426150c6a26e233bfe | /criteo/src/xgboost.py | c52aa05e8e393e239ef1a069b3f22698c0755499 | [] | no_license | fengqi0423/hahaha | 495b8e6916cb553ce8dbeb02673b5c41489b93ab | 4bdd96a81eb1165bc0eb05ab41b0f1ac3c9cde8a | refs/heads/master | 2021-01-10T19:23:47.828477 | 2014-09-23T03:30:44 | 2014-09-23T03:30:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,098 | py | # Author: Tianqi Chen, Bing Xu
# module for xgboost
import ctypes
import os
# optinally have scipy sparse, though not necessary
import numpy
import numpy.ctypeslib
import scipy.sparse as scp
# set this line correctly
XGBOOST_PATH = '/usr/local/lib/libxgboostpy.so'
# entry type of sparse matrix
class REntry(ctypes.Structure):
_fields_ = [("findex", ctypes.c_uint), ("fvalue", ctypes.c_float) ]
# load in xgboost library
xglib = ctypes.cdll.LoadLibrary(XGBOOST_PATH)
xglib.XGDMatrixCreate.restype = ctypes.c_void_p
xglib.XGDMatrixNumRow.restype = ctypes.c_ulong
xglib.XGDMatrixGetLabel.restype = ctypes.POINTER( ctypes.c_float )
xglib.XGDMatrixGetWeight.restype = ctypes.POINTER( ctypes.c_float )
xglib.XGDMatrixGetRow.restype = ctypes.POINTER( REntry )
xglib.XGBoosterCreate.restype = ctypes.c_void_p
xglib.XGBoosterPredict.restype = ctypes.POINTER( ctypes.c_float )
def ctypes2numpy( cptr, length ):
# convert a ctypes pointer array to numpy
assert isinstance( cptr, ctypes.POINTER( ctypes.c_float ) )
res = numpy.zeros( length, dtype='float32' )
assert ctypes.memmove( res.ctypes.data, cptr, length * res.strides[0] )
return res
# data matrix used in xgboost
class DMatrix:
# constructor
def __init__(self, data=None, label=None, missing=0.0, weight = None):
# force into void_p, mac need to pass things in as void_p
self.handle = ctypes.c_void_p( xglib.XGDMatrixCreate() )
if data == None:
return
if isinstance(data,str):
xglib.XGDMatrixLoad(self.handle, ctypes.c_char_p(data.encode('utf-8')), 1)
elif isinstance(data,scp.csr_matrix):
self.__init_from_csr(data)
elif isinstance(data, numpy.ndarray) and len(data.shape) == 2:
self.__init_from_npy2d(data, missing)
else:
try:
csr = scp.csr_matrix(data)
self.__init_from_csr(csr)
except:
raise Exception("can not intialize DMatrix from"+str(type(data)))
if label != None:
self.set_label(label)
if weight !=None:
self.set_weight(weight)
# convert data from csr matrix
def __init_from_csr(self,csr):
assert len(csr.indices) == len(csr.data)
xglib.XGDMatrixParseCSR( self.handle,
( ctypes.c_ulong * len(csr.indptr) )(*csr.indptr),
( ctypes.c_uint * len(csr.indices) )(*csr.indices),
( ctypes.c_float * len(csr.data) )(*csr.data),
len(csr.indptr), len(csr.data) )
# convert data from numpy matrix
def __init_from_npy2d(self,mat,missing):
data = numpy.array( mat.reshape(mat.size), dtype='float32' )
xglib.XGDMatrixParseMat( self.handle,
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
mat.shape[0], mat.shape[1], ctypes.c_float(missing) )
# destructor
def __del__(self):
xglib.XGDMatrixFree(self.handle)
# load data from file
def load(self, fname, silent=True):
xglib.XGDMatrixLoad(self.handle, ctypes.c_char_p(fname.encode('utf-8')), int(silent))
# load data from file
def save_binary(self, fname, silent=True):
xglib.XGDMatrixSaveBinary(self.handle, ctypes.c_char_p(fname.encode('utf-8')), int(silent))
# set label of dmatrix
def set_label(self, label):
xglib.XGDMatrixSetLabel(self.handle, (ctypes.c_float*len(label))(*label), len(label) )
# set group size of dmatrix, used for rank
def set_group(self, group):
xglib.XGDMatrixSetGroup(self.handle, (ctypes.c_uint*len(group))(*group), len(group) )
# set weight of each instances
def set_weight(self, weight):
xglib.XGDMatrixSetWeight(self.handle, (ctypes.c_float*len(weight))(*weight), len(weight) )
# get label from dmatrix
def get_label(self):
length = ctypes.c_ulong()
labels = xglib.XGDMatrixGetLabel(self.handle, ctypes.byref(length))
return ctypes2numpy( labels, length.value );
# get weight from dmatrix
def get_weight(self):
length = ctypes.c_ulong()
weights = xglib.XGDMatrixGetWeight(self.handle, ctypes.byref(length))
return ctypes2numpy( weights, length.value );
# clear everything
def clear(self):
xglib.XGDMatrixClear(self.handle)
def num_row(self):
return xglib.XGDMatrixNumRow(self.handle)
# append a row to DMatrix
def add_row(self, row):
xglib.XGDMatrixAddRow(self.handle, (REntry*len(row))(*row), len(row) )
# get n-throw from DMatrix
def __getitem__(self, ridx):
length = ctypes.c_ulong()
row = xglib.XGDMatrixGetRow(self.handle, ridx, ctypes.byref(length) );
return [ (int(row[i].findex),row[i].fvalue) for i in range(length.value) ]
class Booster:
"""learner class """
def __init__(self, params={}, cache=[]):
""" constructor, param: """
for d in cache:
assert isinstance(d,DMatrix)
dmats = ( ctypes.c_void_p * len(cache) )(*[ d.handle for d in cache])
self.handle = ctypes.c_void_p( xglib.XGBoosterCreate( dmats, len(cache) ) )
self.set_param( {'seed':0} )
self.set_param( params )
def __del__(self):
xglib.XGBoosterFree(self.handle)
def set_param(self, params, pv=None):
if isinstance(params,dict):
for k, v in params.items():
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(k.encode('utf-8')),
ctypes.c_char_p(str(v).encode('utf-8')))
elif isinstance(params,str) and pv != None:
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(params.encode('utf-8')),
ctypes.c_char_p(str(pv).encode('utf-8')) )
else:
for k, v in params:
xglib.XGBoosterSetParam(
self.handle, ctypes.c_char_p(k.encode('utf-8')),
ctypes.c_char_p(str(v).encode('utf-8')) )
def update(self, dtrain):
""" update """
assert isinstance(dtrain, DMatrix)
xglib.XGBoosterUpdateOneIter( self.handle, dtrain.handle )
def boost(self, dtrain, grad, hess, bst_group = -1):
""" update """
assert len(grad) == len(hess)
assert isinstance(dtrain, DMatrix)
xglib.XGBoosterBoostOneIter( self.handle, dtrain.handle,
(ctypes.c_float*len(grad))(*grad),
(ctypes.c_float*len(hess))(*hess),
len(grad), bst_group )
def update_interact(self, dtrain, action, booster_index=None):
""" beta: update with specified action"""
assert isinstance(dtrain, DMatrix)
if booster_index != None:
self.set_param('interact:booster_index', str(booster_index))
xglib.XGBoosterUpdateInteract(
self.handle, dtrain.handle, ctypes.c_char_p(str(action)) )
def eval_set(self, evals, it = 0):
for d in evals:
assert isinstance(d[0], DMatrix)
assert isinstance(d[1], str)
dmats = ( ctypes.c_void_p * len(evals) )(*[ d[0].handle for d in evals])
evnames = ( ctypes.c_char_p * len(evals) )(
*[ctypes.c_char_p(d[1].encode('utf-8')) for d in evals])
xglib.XGBoosterEvalOneIter( self.handle, it, dmats, evnames, len(evals) )
def eval(self, mat, name = 'eval', it = 0 ):
self.eval_set( [(mat,name)], it)
def predict(self, data, bst_group = -1):
length = ctypes.c_ulong()
preds = xglib.XGBoosterPredict( self.handle, data.handle, ctypes.byref(length), bst_group)
return ctypes2numpy( preds, length.value )
def save_model(self, fname):
""" save model to file """
xglib.XGBoosterSaveModel(self.handle, ctypes.c_char_p(fname.encode('utf-8')))
def load_model(self, fname):
"""load model from file"""
xglib.XGBoosterLoadModel( self.handle, ctypes.c_char_p(fname.encode('utf-8')) )
def dump_model(self, fname, fmap=''):
"""dump model into text file"""
xglib.XGBoosterDumpModel(
self.handle, ctypes.c_char_p(fname.encode('utf-8')),
ctypes.c_char_p(fmap.encode('utf-8')))
def train(params, dtrain, num_boost_round = 10, evals = [], obj=None):
""" train a booster with given paramaters """
bst = Booster(params, [dtrain]+[ d[0] for d in evals ] )
if obj == None:
for i in range(num_boost_round):
bst.update( dtrain )
if len(evals) != 0:
bst.eval_set( evals, i )
else:
# try customized objective function
for i in range(num_boost_round):
pred = bst.predict( dtrain )
grad, hess = obj( pred, dtrain )
bst.boost( dtrain, grad, hess )
if len(evals) != 0:
bst.eval_set( evals, i )
return bst
| [
"feng.qi@hulu.com"
] | feng.qi@hulu.com |
2694809627d8fe84439bbd9857953fd90a2c72a8 | 8a62bbff9378187a898f336532bb49de18cb88e4 | /2020-phone-bpe-attention/scripts/create-phone-bpe-lexicon.py | 9cc4eba43457fe7795861e94c644ea94d3b34626 | [] | no_license | rwth-i6/returnn-experiments | e2cdecb67febe646d702282ced8c290f1dd8edd0 | a46021329c030af361e0becb25ea92afca9610ce | refs/heads/master | 2023-06-08T08:56:11.891782 | 2023-05-30T12:46:45 | 2023-05-30T12:46:45 | 67,426,132 | 159 | 52 | null | 2023-05-30T12:46:46 | 2016-09-05T14:07:48 | Python | UTF-8 | Python | false | false | 13,569 | py | #!/usr/bin/env python3
import xml.etree.ElementTree as ET
from xml.dom import minidom
import codecs
from returnn.LmDataset import Lexicon
from argparse import ArgumentParser
"""
create Lexicon, given bpe Vocab, lexicon and applied phones_bpe
"""
def convert(string_num):
if isinstance(string_num, str) and string_num.startswith("0"):
return "zero " + convert(string_num[1:])
num = int(string_num)
units = ("", "one ", "two ", "three ", "four ","five ", "six ", "seven ","eight ", "nine ", "ten ", "eleven ", "twelve ", "thirteen ", "fourteen ", "fifteen ","sixteen ", "seventeen ", "eighteen ", "nineteen ")
tens =("", "", "twenty ", "thirty ", "forty ", "fifty ","sixty ","seventy ","eighty ","ninety ")
if num<0:
return "minus "+convert(-num)
if num<20:
return units[num]
if num<100:
return tens[num // 10] +units[int(num % 10)]
if num<1000:
return units[num // 100] +"hundred " +convert(int(num % 100))
if num<1000000:
return convert(num // 1000) + "thousand " + convert(int(num % 1000))
if num < 1000000000:
return convert(num // 1000000) + "million " + convert(int(num % 1000000))
return convert(num // 1000000000)+ "billion "+ convert(int(num % 1000000000))
def hasNumber(inputString):
return any(char.isdigit() for char in inputString)
def separate(iString):
prev_char = iString[0]
tmp = []
new = iString[0]
for x, i in enumerate(iString[1:]):
if i.isalpha() and prev_char.isalpha():
new += i
elif i.isnumeric() and prev_char.isnumeric():
new += i
else:
tmp.append(new)
new = i
prev_char = i
if x == len(iString)-2:
tmp.append(new)
new = ''
if len(iString) > 1:
return tmp
return [iString]
def to_unicode_list(input_l):
res = []
for item in input_l:
res.append(to_unicode(item))
return res
def to_unicode(input):
text = input.split()
result = ""
for k in text:
result += phone_to_unicode[k]
return result
# map phone into unicode
phone_to_unicode = {'[LAUGHTER]': 'L',
'[NOISE]': 'N',
'[SILENCE]': 'S',
'[VOCALIZEDNOISE]': 'V',
'aa': 'a',
'ae': 'à',
'ah': 'á',
'ao': 'â',
'aw': 'ã',
'ax': 'ä',
'ay': 'å',
'b': 'b',
'ch': 'c',
'd': 'd',
'dh': 'ď',
'eh': 'e',
'el': 'è',
'en': 'é',
'er': 'ê',
'ey': 'ë',
'f': 'f',
'g': 'g',
'hh': 'h',
'ih': 'i',
'iy': 'ì',
'jh': 'j',
'k': 'k',
'l': 'l',
'm': 'm',
'n': 'n',
'ng': 'ñ',
'ow': 'o',
'oy': 'ò',
'p': 'p',
'r': 'r',
's': 's',
'sh': 'ś',
't': 't',
'th': 'ţ',
'uh': 'u',
'uw': 'ù',
'v': 'v',
'w': 'w',
'y': 'y',
'z': 'z',
'zh': 'ź',
' ': ' ',
'#1': '#1', # disambiquate symbols for homophones
'#2': '#2',
'#3': '#3',
'#4': '#4',
'#5': '#5',
'#6': '#6',
'#7': '#7',
'#8': '#8',
'#9': '#9',
'#10': '#10',
'#11': '#11',
'#12': '#12',
'#13': '#13',
'#14': '#14',
}
def main():
arg_parser = ArgumentParser()
arg_parser.add_argument("--bpe_vocab", required=True)
arg_parser.add_argument("--lexicon", required=True)
arg_parser.add_argument("--phones_bpe", required=True)
arg_parser.add_argument("--bpe", action="store_true")
arg_parser.add_argument("--char", action="store_true")
arg_parser.add_argument("--charbpe", action="store_true")
arg_parser.add_argument("--disamb", action="store_true")
arg_parser.add_argument("--output", required=True)
args = arg_parser.parse_args()
#if single char or phon need to comment the optional arg phones_bpe since if we dont use bpe
bpe1k_file = args.bpe_vocab
lexicon_file = args.lexicon
phones_bpe_file = args.phones_bpe
def create_specialTree(input):
if input == "</s>":
lemma = ET.SubElement(lex_root, 'lemma', special="sentence-end")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-END]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif input == "<s>":
lemma = ET.SubElement(lex_root, 'lemma', special="sentence-begin")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[SENTENCE-BEGIN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
elif input == "<unk>":
lemma = ET.SubElement(lex_root, 'lemma', special="unknown")
orth = ET.SubElement(lemma, 'orth')
synt = ET.SubElement(lemma, 'synt')
tok = ET.SubElement(synt, 'tok')
orth.text = '[UNKNOWN]'
tok.text = input
eval = ET.SubElement(lemma, 'eval')
# read the input phonemes file and parse it into dictionary
# output dictionary seq
with codecs.open(bpe1k_file, 'rU', 'utf-8') as file:
seq = {}
for line in file:
if line.startswith(('{', '}')):
continue
line = line.replace(',', '')
line = line.replace('\'', '')
key, value = line.strip().split(':')
value = value.strip()
seq[key] = value
# create the xml file structure
special_sign = ["L", "N", "S", "V"]
extra_sign = ["</s>", "<s>", "<unk>"]
# old lexicon handle
lex = Lexicon(lexicon_file)
count = 0
temp_lemmas = []
for word in lex.lemmas:
count += 1
if count > 9:
if args.char:
if hasNumber(lex.lemmas[word]['orth']):
word_ = ""
list_ = separate(lex.lemmas[word]['orth'])
for item in list_:
if item.isdigit():
word_ += convert(item)
temp_lemmas.append(word_.strip())
else:
temp_lemmas.append(lex.lemmas[word]['orth'])
# create new lexicon root
# create phonemes xml tree
lex_root = ET.Element('lexicon')
phone_inventory = ET.SubElement(lex_root, 'phoneme-inventory')
for key, v in sorted(seq.items()):
if key not in extra_sign:
phone = ET.SubElement(phone_inventory, 'phoneme')
p_sym = ET.SubElement(phone, 'symbol')
p_var = ET.SubElement(phone, 'variation')
if key in special_sign:
p_var.text = 'none'
if key == "L":
p_sym.text = "[LAUGHTER]"
elif key == "N":
p_sym.text = "[NOISE]"
elif key == "V":
p_sym.text = "[VOCALIZEDNOISE]"
else:
p_sym.text = "[SILENCE]"
else:
p_var.text = 'context'
p_sym.text = key
else:
if key == "<s>":
create_specialTree(key)
elif key == "</s>":
create_specialTree(key)
elif key == "<unk>":
create_specialTree(key)
for item in ["[NOISE]", "[VOCALIZEDNOISE]", "[LAUGHTER]"]:
lemma = ET.SubElement(lex_root, 'lemma')
orth = ET.SubElement(lemma, 'orth')
phon = ET.SubElement(lemma, 'phon', score="0.0")
phon.text = item
orth.text = item
synt = ET.SubElement(lemma, 'synt')
eval = ET.SubElement(lemma, 'eval')
# mapping phone sequences to word
phon_dict = {}
if args.char:
for word in lex.lemmas:
if hasNumber(word):
word_ = ""
list_ = separate(word)
for item in list_:
if item.isdigit():
word_ += convert(item)
phon_dict[word] = word_
else:
phon_dict[word] = word
#print(word, phon_dict[word])
else:
for word in lex.lemmas:
len_phons = len(lex.lemmas[word]["phons"])
list_of_phons = []
for x in range(len_phons):
list_of_phons.append(lex.lemmas[word]["phons"][x]["phon"])
if args.bpe:
phon_dict[word] = to_unicode_list(list_of_phons) #phone bpe
else:
phon_dict[word] = list_of_phons #single phone
if args.disamb:
duplicates = {} # phone -> count
for word, phones in sorted(phon_dict.items()):
for phone in phones:
if phone in duplicates:
phon_dict[word].remove(phone)
phon_dict[word].insert(0, '%s #%s' % (phone, duplicates[phone])) #bpe close#, not bpe far #
duplicates[phone] += 1
else:
duplicates[phone] = 1
# auxiliary write a output file
with open('word_phone.txt', 'w') as f:
print(phon_dict, file=f)
with open('file_to_map.txt', 'w') as file:
file.write('{\n')
for key, value in phon_dict.items():
file.write('{}:{},\n'.format(key, value))
file.write('}\n')
with open('file_to_map.txt', 'r') as inp:
with open('file_output.txt', 'w') as out:
for i in range(6):
inp.readline()
for line in inp:
if line.startswith('}'):
break
line = line.replace(',', '')
_, right = line.split(':')
lst = right[1:-2].split(',')
lst = [x.replace("'", "") for x in lst]
output = ' '.join(lst)
out.write('{}\n'.format(output)) #for other add \n, without for SingleChar
# here is the checkpoint, where ./subword-nmt/apply_bpe.py is called
# with input files: codes file and phone sequences that to be map (e.g file_output.txt)
# generate output: phones_bpe_file that will be used further
with open(phones_bpe_file, 'r') as file_r:
res_ = []
for line in file_r:
ls = line.strip().split()
phon_seq = []
merge = []
for item in ls:
if '@@' in item:
merge.append(item)
else:
merge.append(item)
phon_seq.append(' '.join(merge))
merge = []
res_.append(phon_seq)
dict_tmp = list(phon_dict.items())
for idx, x in enumerate(res_):
dict_tmp[4+idx] = (dict_tmp[4+idx][0], x)
phon_dict = dict(dict_tmp)
with open('unicode_phone.txt', 'w') as f:
print(phon_dict, file=f)
# we want to add same words (ignoring case) to the same lemma so we create a dict from orth to
# lemma to add a similar orth to the same lemma later. phon should be added only once to the lemma
# so we do that when we create the lemma
if args.char:
orth_to_lemma = {} # dict from orth to lemma
for idx, elem in enumerate(temp_lemmas):
elem_lower = elem.lower()
# wenn schon drinne ist, gucken wir einfach nach
if elem_lower in orth_to_lemma:
lemma = orth_to_lemma[elem_lower]
else:
# wenn nicht, berechnet!
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
#assert elem_lower in phon_dict
res = ""
for char in list(elem):
res+=char
res+=" "
phon = ET.SubElement(lemma, 'phon')
phon.text = res.strip()
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
# single char
# if args.char:
# orth_to_lemma = {}
# for idx, elem in enumerate(temp_lemmas):
# elem_lower = elem.lower()
# lemma = ET.SubElement(lex_root, 'lemma')
# orth = ET.SubElement(lemma, 'orth')
# orth.text = elem
# if elem_lower in orth_to_lemma:
# lemma = orth_to_lemma[elem_lower]
# else:
# res = ""
# for c in list(elem):
# res+= c
# res+= " "
# phon = ET.SubElement(lemma, 'phon')
# res = res + "<eow>"
# phon.text = res
# else:
# orth_to_lemma = {}
# for idx, elem in enumerate(temp_lemmas):
# elem_lower = elem.lower()
# lemma = ET.SubElement(lex_root, 'lemma')
# orth = ET.SubElement(lemma, 'orth')
# orth.text = elem
# if elem_lower in orth_to_lemma:
# lemma = orth_to_lemma[elem_lower]
# else:
# for p in phon_dict[elem_lower]:
# phon = ET.SubElement(lemma, 'phon')
# phon.text = p
else:
orth_to_lemma = {} # dict from orth to lemma
for idx, elem in enumerate(temp_lemmas):
elem_lower = elem.lower()
# wenn schon drinne ist, gucken wir einfach nach
if elem_lower in orth_to_lemma:
lemma = orth_to_lemma[elem_lower]
else:
# wenn nicht, berechnet!
lemma = ET.SubElement(lex_root, 'lemma')
orth_to_lemma[elem_lower] = lemma
assert elem_lower in phon_dict
for p in phon_dict[elem_lower]:
phon = ET.SubElement(lemma, 'phon')
phon.text = p
orth = ET.SubElement(lemma, 'orth')
orth.text = elem
if(args.output):
my_data = minidom.parseString(ET.tostring(lex_root)).toprettyxml(indent=" ")
with open(args.output, "w") as f:
f.write(my_data)
if __name__ == '__main__':
import better_exchook
better_exchook.install()
main()
| [
"thomas.ng@rwth-aachen.de"
] | thomas.ng@rwth-aachen.de |
864225aab249cfde9e18603e2f560f35df07377d | acce415d18f324fdcbd2df9d4bfae003c0b6560a | /user/urls.py | 8650a041a0109d2dcf93a0c0ff42c65a91bffd75 | [] | no_license | borsden/kanban | c9b08d34b779975b4cf3b8cc67e0e03f7816d37a | be0bfd22b8af61f78c407025b1706e57e5389ba4 | refs/heads/master | 2016-08-11T20:25:20.803053 | 2016-02-18T05:49:16 | 2016-02-18T05:49:16 | 48,171,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # coding=utf-8
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^current_user/$', views.CurrentUser.as_view()),
url(r'^update_user/$', views.UpdateUser.as_view()),
url(r'^login/$', views.LoginUser.as_view(), name='login'),
url(r'^logout/$', views.LogoutUser.as_view()),
url(r'^change_avatar/$', views.ChangeAvatar.as_view()),
url(r'^change_password/$', views.ChangePassword.as_view()),
)
| [
"borsden@gmail.com"
] | borsden@gmail.com |
6bcb9db3729f35fb8aec94089af0cb9395cbe3a6 | df513473a78ec2714025a43d673988e73d89dc9e | /IAM/detach_policy_group.py | 5b6926588309e84ec94664993b1c106c3aa09ec9 | [] | no_license | sgouda0412/AWS-With-Python | dfcef51c07696d13a46c63236cfcd130b4916256 | b3abfa7d324e17d22f81c7e53afc34df6f5d484c | refs/heads/master | 2023-03-17T18:18:49.692190 | 2020-03-04T13:35:48 | 2020-03-04T13:35:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import boto3
#detach policy from group using client
iam = boto3.client('iam') # IAM low level client object
response = iam.detach_group_policy(
GroupName='group1',
PolicyArn='arn:aws:iam::aws:policy/AdministratorAccess'
)
print(response)
#detach policy from group using resource
iam = boto3.resource('iam') #resource representing an AWS IAM
group = iam.Group('group2')
response = group.detach_policy(
PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess'
)
print(response)
| [
"mogal.mahesh33@gmail.com"
] | mogal.mahesh33@gmail.com |
6ed312e707abaa007c3cd93e7fdc80401b65f139 | f736f2392c6de4b8c6cd9d9bdff6de5c05d4a278 | /blog/coments/api/serializers.py | 5aa0958b3467c1a910fc2f7c2bcccf44198519e5 | [] | no_license | ricardocastilloisc/cursoDjangoBlog | a36f20021f72dc1b7b819c4f863e649707b3736a | 13bac0f3811e7fafc3f21ed979b53cf36aae6d91 | refs/heads/main | 2023-06-27T19:33:26.977679 | 2021-07-26T14:59:26 | 2021-07-26T14:59:26 | 389,442,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | from rest_framework import serializers
from coments.models import Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ['id','content', 'created_at', 'user', 'post'] | [
"ricardocastilloisc@gmail.com"
] | ricardocastilloisc@gmail.com |
831c204ef9a4257ac6f36dc2e05da942d2a695c0 | c59aafd22b33cad444d5702f23dd987ab8d29a69 | /src/fcn/__init__.py | 6c283a41bc672f44f0cc7127e5dea94d0a700541 | [] | no_license | pbecker93/DLRC-Unicorns | 9ddd0396f2c7d43de28903d3ddc92a430f59623e | 9a7956e7e401b1330ed62d7120ce73ea0465d8c2 | refs/heads/master | 2021-07-13T15:23:25.011871 | 2017-10-17T08:49:46 | 2017-10-17T08:49:46 | 106,546,851 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from .fcn_vgg import FCN
__all__=['FCN'] | [
"roel.wier@gmail.com"
] | roel.wier@gmail.com |
87990ee7c013adfed4d8152d526bab78f47feee2 | 9550ce4a80169d21b556b22679a9462f98438e32 | /app/urls.py | 32f3b1ab973c04cbcb9ce11ea3ea6d0850315945 | [
"Apache-2.0"
] | permissive | erics1996/questionnaire_django | 87cc44bd745eb810861349effc126ed3dfbd6508 | 1006c61eba1e9efec0801299938eb13c16a0b292 | refs/heads/master | 2022-12-15T04:47:39.042594 | 2020-09-02T17:34:33 | 2020-09-02T17:34:33 | 284,580,189 | 0 | 0 | Apache-2.0 | 2020-09-02T17:34:34 | 2020-08-03T02:02:20 | Python | UTF-8 | Python | false | false | 300 | py | from django.contrib import admin
from django.urls import path, re_path
from .views import backend
urlpatterns = [
path('', backend.IndexView.as_view()),
re_path('survey/(?P<pk>\d+)/', backend.SurveyDetailView.as_view()),
re_path('(?P<pk>\d+)/download/', backend.DownloadView.as_view())
] | [
"erics1996@yeah.net"
] | erics1996@yeah.net |
334f16eca95422f71e3a8b64fd17fd7ac3057b10 | da6df71f4bc31fae2874285ecfe688540d724910 | /pipelines/communication.py | 7fed316af16a903c6e0f2902402af1aa48c2a015 | [] | no_license | joseilberto/dog_bark_detection | 67be5551e1735e9bc03f3dcd4db60388f7e8af05 | 1ff993bc703727c38ed0463e546e539763c869e7 | refs/heads/master | 2023-03-11T01:32:42.215274 | 2021-02-20T22:29:52 | 2021-02-20T22:29:52 | 236,839,762 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from os.path import basename
import email
import numpy as np
import smtplib
import ssl
def create_body(files, message):
"""
Create the body of the e-mail from the keys in message and
"""
pattern_message = message["pattern"]
bark_messages = message["body_start"]
for file in files:
filename = basename(file)
name, date, hour, minute, seconds = "".join(filename.split(".")[0]).split("_")
bark_messages += pattern_message(name, hour, minute, seconds, date)
return bark_messages + message["body_end"] + message["signature"]
def send_files(files, sender, receiver, message, send_all = False):
"""
Parameters:
files (list of strings): All the files that will be sent to the receiver.
sender (dict): Dictionary with the data from sender (email, password, port and smtp server).
receiver (dict): Dictionary with the data from receiver (email).
message (dict): Dict containing the data to be used in the body of the text.
send_all (bool): Determine if it sends all files or randomly select two of them.
"""
context = ssl.create_default_context()
email_msg = MIMEMultipart()
email_msg["From"] = sender["email"]
email_msg["To"] = receiver["email"]
email_msg["Subject"] = message["subject"]
email_msg.attach(MIMEText(message["body"], "plain"))
send_files = (np.random.choice(files, size = 2, replace = False)
if not send_all else files)
for file in send_files:
with open(file, "rb") as attachment:
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
encoders.encode_base64(part)
part.add_header("Content-disposition",
f"attachment; filename= {basename(file)}",)
email_msg.attach(part)
text = email_msg.as_string()
with smtplib.SMTP_SSL(sender["smtp_server"], sender["port"],
context = context) as server:
server.login(sender["email"], sender["password"])
server.sendmail(sender["email"], receiver["email"], text)
print("{} File(s) sent from {} to {}".format(len(send_files),
sender["email"], receiver["email"]))
| [
"ilbertofjunior@gmail.com"
] | ilbertofjunior@gmail.com |
7cd9fa50c093dbb5c2b3d3496f38b231a56fb61e | 7ed70a9ee30990c5a195ddc96ebb8b3c174d4f6d | /hello/world.py | 0b79d944ce10636eccb90edcaae841f2818cfaa7 | [] | no_license | greenwell0912/helloworld-scripts | f69eee8462d226d3fe4286826832b4e0de8b2d9c | e75ed883ee0066ae6052b8e875aecbd6e1a079a0 | refs/heads/master | 2020-03-10T20:17:01.811318 | 2018-04-15T05:06:34 | 2018-04-15T05:06:34 | 129,567,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 124 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def main():
print("hello world!")
if __name__ == '__main__':
main()
| [
"hiroki6357@gmail.com"
] | hiroki6357@gmail.com |
21d9a316ce6cfdf96f3a9f5edaacf77894c81bf4 | e9d52dcf101aea0327c6b0d7e5244c91dfd62cf6 | /spexy/adv/samples/simple.py | e2df8a641ff75635616d8894582fa8f83e6bf7dd | [] | no_license | drufat/spexy | 6eba9f44a5539245486cd4ef8fefd24bdb7ade6a | 53255009c1830501986afbf6688142ddefe17b9a | refs/heads/master | 2021-09-18T19:51:47.313946 | 2018-07-19T05:09:02 | 2018-07-19T05:09:02 | 100,453,374 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | # Copyright (C) 2010-2016 Dzhelil S. Rufat. All Rights Reserved.
from sympy import sin, cos
def V(x, y):
return (-sin(y), sin(x))
def p(x, y):
return -cos(x) * cos(y)
| [
"drufat@caltech.edu"
] | drufat@caltech.edu |
be8bec20e05cbf5aa26e1cb824b5be2ffe259628 | 541cfbacae0805d6ef61041a23b9854c15be0d55 | /join_cases.py | 6c9a2063c9bf65acaa6e1515742da7e32673e713 | [] | no_license | qdouasbin/postproc_explo_airbus | 1b37444fe577d8527e71b35a580a2638c4c5b8fe | 64f102973bb3f13660c7e0ab557fa0ffe793c07a | refs/heads/main | 2023-06-05T11:15:11.673524 | 2021-07-01T10:29:29 | 2021-07-01T10:29:29 | 375,285,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | import os
import glob
import numpy as np
import pandas as pd
def join_subdirectory_csv_files(prefix, extension):
"""
1. Seek for csv files according to prefix.extension rule
2. concatenate all files
3. drop duplicates
4. re-index
5. dump clean concatenated file
"""
# Find all csv files in subdirectories
all_filenames = [_file for _file in sorted(glob.glob('*/{}.{}'.format(prefix, extension)))]
# combine all files in the list
# combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames])
combined_csv = pd.read_csv(all_filenames[0])
for _idx, _file in enumerate(all_filenames):
if _idx:
print("\t > %s" % _file)
_df = pd.read_csv(_file)
# combined_csv.merge(_df, how="inner")
combined_csv = pd.merge_ordered(combined_csv, _df, fill_method="ffill")
# Drop duplicates
combined_csv = combined_csv.drop_duplicates().reset_index(drop=True)
# export to csv
combined_csv.to_csv("%s.csv" % prefix, index=False, encoding='utf-8-sig')
if __name__ == "__main__":
# Join all csv files needed here
extension = "csv"
prefixes = ["avbp_local_probe_0", "avbp_mmm", "avbp_venting"]
for prefix in prefixes:
print(" > Joining %s.%s" % (prefix, extension))
join_subdirectory_csv_files(prefix, extension)
| [
"qdouasbin@cerfacs.fr"
] | qdouasbin@cerfacs.fr |
2aa3c4884a4fb9cc6a1dfb40a23627bc7126d8ab | 4e248704293e8b229d51cce077263364a98bb45f | /Lexical_analyzer/train.py | 46c35de8df5855ddcc221f0d83b0e0491e7537a1 | [] | no_license | VincentLee-EN/FibreTextAnalyzer | 0ba5c70c899f2f85aae6180ba75bb1031c6fd15d | 2de3f9d4f18498d24be829e0f9d3a6f2c373a82c | refs/heads/master | 2020-05-16T02:45:42.072795 | 2019-05-02T14:02:52 | 2019-05-02T14:02:52 | 181,429,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,112 | py | #encoding=utf8
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib import crf
import Lexical_analyzer.cws.model as modelDef
from Lexical_analyzer.cws.data import Data
tf.app.flags.DEFINE_string('dict_path', 'data/your_dict.pkl', 'dict path')
tf.app.flags.DEFINE_string('train_data', 'data/your_train_data.pkl', 'train data path')
tf.app.flags.DEFINE_string('ckpt_path', 'checkpoints/cws.finetune.ckpt/', 'checkpoint path')
tf.app.flags.DEFINE_integer('embed_size', 256, 'embedding size')
tf.app.flags.DEFINE_integer('hidden_size', 512, 'hidden layer node number')
tf.app.flags.DEFINE_integer('batch_size', 64, 'batch size')
tf.app.flags.DEFINE_integer('epoch', 9, 'training epoch')
tf.app.flags.DEFINE_float('lr', 0.01, 'learning rate')
tf.app.flags.DEFINE_string('save_path','checkpoints/cws.ckpt/','new model save path')
FLAGS = tf.app.flags.FLAGS
class BiLSTMTrain(object):
def __init__(self, data_train=None, data_valid=None, data_test=None, model=None):
self.data_train = data_train
self.data_valid = data_valid
self.data_test = data_test
self.model = model
def train(self):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
## finetune ##
# ckpt = tf.train.latest_checkpoint(FLAGS.ckpt_path)
# saver = tf.train.Saver()
# saver.restore(sess, ckpt)
# print('-->finetune the ckeckpoint:'+ckpt+'...')
##############
max_epoch = 5
tr_batch_size = FLAGS.batch_size
max_max_epoch = FLAGS.epoch # Max epoch
display_num = 5 # Display 5 pre epoch
tr_batch_num = int(self.data_train.y.shape[0] / tr_batch_size)
# tr_batch_num = tr_batch_size
display_batch = int(tr_batch_num / display_num)
saver = tf.train.Saver(max_to_keep=10)
for epoch in range(max_max_epoch):
_lr = FLAGS.lr
if epoch > max_epoch:
_lr *= 1
print('EPOCH %d, lr=%g' % (epoch + 1, _lr))
start_time = time.time()
_losstotal = 0.0
show_loss = 0.0
for batch in range(tr_batch_num):
fetches = [self.model.loss, self.model.train_op]
X_batch, y_batch = self.data_train.next_batch(tr_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: _lr,
self.model.batch_size: tr_batch_size,
self.model.keep_prob: 0.5}
_loss, _ = sess.run(fetches, feed_dict)
_losstotal += _loss
show_loss += _loss
if (batch + 1) % display_batch == 0:
valid_acc = self.test_epoch(self.data_valid, sess) # valid
print('\ttraining loss=%g ; valid acc= %g ' % (show_loss / display_batch,
valid_acc))
show_loss = 0.0
mean_loss = _losstotal / tr_batch_num
if (epoch + 1) % 1 == 0: # Save once per epoch
save_path = saver.save(sess, self.model.model_save_path+'_plus', global_step=(epoch + 1))
print('the save path is ', save_path)
print('\ttraining %d, loss=%g ' % (self.data_train.y.shape[0], mean_loss))
print('Epoch training %d, loss=%g, speed=%g s/epoch' % (
self.data_train.y.shape[0], mean_loss, time.time() - start_time))
# testing
print('**TEST RESULT:')
test_acc = self.test_epoch(self.data_test, sess)
print('**Test %d, acc=%g' % (self.data_test.y.shape[0], test_acc))
sess.close()
def test_epoch(self, dataset=None, sess=None):
_batch_size = 500
_y = dataset.y
data_size = _y.shape[0]
batch_num = int(data_size / _batch_size)
correct_labels = 0
total_labels = 0
fetches = [self.model.scores, self.model.length, self.model.transition_params]
for i in range(batch_num):
X_batch, y_batch = dataset.next_batch(_batch_size)
feed_dict = {self.model.X_inputs: X_batch, self.model.y_inputs: y_batch, self.model.lr: 1e-5,
self.model.batch_size: _batch_size,
self.model.keep_prob: 1.0}
test_score, test_length, transition_params = sess.run(fetches=fetches,
feed_dict=feed_dict)
for tf_unary_scores_, y_, sequence_length_ in zip(
test_score, y_batch, test_length):
tf_unary_scores_ = tf_unary_scores_[:sequence_length_]
y_ = y_[:sequence_length_]
viterbi_sequence, _ = crf.viterbi_decode(
tf_unary_scores_, transition_params)
correct_labels += np.sum(np.equal(viterbi_sequence, y_))
total_labels += sequence_length_
accuracy = correct_labels / float(total_labels)
return accuracy
def main(_):
Data_ = Data(dict_path=FLAGS.dict_path, train_data=FLAGS.train_data)
print('Corpus loading completed:',FLAGS.train_data)
data_train, data_valid, data_test = Data_.builderTrainData()
print('The training set, verification set, and test set split are completed!')
model = modelDef.BiLSTMModel(max_len=Data_.max_len,
vocab_size=Data_.word2id.__len__()+1,
class_num= Data_.tag2id.__len__(),
model_save_path=FLAGS.save_path,
embed_size=FLAGS.embed_size,
hs=FLAGS.hidden_size)
print('Model definition completed!')
train = BiLSTMTrain(data_train, data_valid, data_test, model)
train.train()
print('Model training completed!')
if __name__ == '__main__':
tf.app.run()
| [
"2392539432@qq.com"
] | 2392539432@qq.com |
a78acddf6eebc59cad1ebc0e8fdaf53ee0ce2702 | 44a7101ae18c84ffa0e3c674763ba7b500937773 | /root/Desktop/Scripts/pyinstaller-1.5.1/bh_sshRcmd/bh_sshRcmd.spec | 66707266787869a8fdd977ad9985b57711fe3880 | [] | no_license | Draft2007/Scripts | cbaa66ce0038f3370c42d93da9308cbd69fb701a | 0dcc720a1edc882cfce7498ca9504cd9b12b8a44 | refs/heads/master | 2016-09-05T20:05:46.601503 | 2015-06-23T00:05:02 | 2015-06-23T00:05:02 | 37,945,893 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 561 | spec | # -*- mode: python -*-
a = Analysis([os.path.join(HOMEPATH,'support/_mountzlib.py'), os.path.join(HOMEPATH,'support/useUnicode.py'), '/usr/local/tools/bh_sshRcmd.py'],
pathex=['/usr/local/tools/pyinstaller-1.5.1'])
pyz = PYZ(a.pure)
exe = EXE( pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
name=os.path.join('dist', 'bh_sshRcmd'),
debug=False,
strip=False,
upx=True,
console=1 )
app = BUNDLE(exe,
name=os.path.join('dist', 'bh_sshRcmd.app'))
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
8f634225763e18482cad60471aa5f39cadda7853 | a00eab2cfe9566641c4c5ec99909490543e734d5 | /BackPropagation/solutions/compare_loss_acc.py | abf09d21879aa494b1838e53417b23696449f17b | [] | no_license | indianvalantine/High-Dimensional-Deep-Learning | 55823c1d80ffee2e50bc20fcdf24f24cc6de8c14 | 47ee6263f40496e7ab5f6a030508ecd531732cb5 | refs/heads/master | 2022-12-27T21:00:28.090851 | 2020-09-28T14:04:44 | 2020-09-28T14:04:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | history = keras_model.history.history
fig = plt.figure(figsize=(20,5))
ax = fig.add_subplot(1,2,1)
ax.plot(history["loss"], label="keras", color="red")
ax.plot(history["val_loss"], label="keras_test", linestyle="dashed" ,color="red")
ax.plot(losses, label="numpy", color="blue")
ax.plot(losses_test, label="numpy_test", color="blue")
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Training loss")
ax.legend(loc='best')
ax = fig.add_subplot(1,2,2)
ax.plot(history["acc"], label="keras", color="red")
ax.plot(history["val_acc"], label="keras_test", linestyle="dashed" ,color="red")
ax.plot(accuracies, label="numpy", color="blue")
ax.plot(accuracies, label="numpy_test", color="blue")
ax.set_ylabel("accuracy")
ax.set_xlabel("Epochs")
ax.legend(loc='best')
ax.set_title("Accuracy") | [
"brendan.guillouet@gmail.com"
] | brendan.guillouet@gmail.com |
b93f375f3cedfc8c8ea2bc3dcac1516cf225aaa1 | f7bbc8246a49480f58b5295a14fd0955c32c093c | /Desktop/python trader/backtest data/strategy8.py | 361e722ad7fd1f00cc1ece891ce450ffab5d9c49 | [] | no_license | jobeSoffa/pythonTrader | cf66ea38cc95b1695e0ac66e13a713a81db78e2a | 6ef7b97d6dcb3726f65538bdbe6641bdb92bb6d3 | refs/heads/master | 2020-04-09T04:53:56.805565 | 2018-12-04T09:43:27 | 2018-12-04T09:43:27 | 160,042,254 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,890 | py | import trade
import candleCompressor
import candle
class strategy8(object):
highestBalance = 1000
highestDrawdown = 0
shouldPrint = True
inBuy = False
totalTrades = 0
winCounter = 0
lossCount = 0
com = 0 #.0001
pip = .0001
otherPip = 1/pip
maxTrades = 30
tempArr = []
candleArr = []
momArr = []
balance = 1000
tr = trade.Trader()
#cmp = candleCompressor.candleCompressor()
currentCandle = 0
length = 118
#strategy variables
riskReward = 8
stopLoss = 10
lotSizePercent = .001
movingAverage = 10
candles = 3 #number of 15m candles, 16 = 4hr
shouldPrint = False
def __init__(self, percent,cad,pip,length,shouldPrint):
self.shouldPrint = shouldPrint
self.length = length
self.lotSizePercent = percent
self.candles = cad
self.pip = pip
self.otherPip = 1/self.pip
self.tr = trade.Trader()
self.candleArr = []
self.tempArr = []
self.balance = 1000
def getNumTrades(self):
return self.totalTrades
def getWinRate(self):
return self.tr.getWinRate()
def drawdown(self,c):
if (self.balance+self.closeAll(c) > self.highestBalance):
self.highestBalance = self.balance+self.closeAll(c)
if ((self.highestBalance - (self.balance+self.closeAll(c))) / self.highestBalance > self.highestDrawdown):
self.highestDrawdown = (self.highestBalance - (self.balance+self.closeAll(c))) / self.highestBalance
return self.highestDrawdown
def update(self, h, l, print,c):
self.balance += self.tr.update(h, l, self.balance, print,c)
def len(self):
return len(self.candleArr)
def closeAll(self,c):
total = self.tr.closeAll(c)
return total
def calMomentum(self, length, arr):
farCandle = arr[len(arr)-1-length].getClose()
thisCandle = arr[len(arr)-1].getClose()
return thisCandle - farCandle
def calMomentum2(self, length, arr):
farCandle = arr[len(arr)-1-length]
thisCandle = arr[len(arr)-1]
return thisCandle - farCandle
def nextCandle(self,cand):
self.tempArr.append(cand)
self.currentCandle +=1
self.drawdown(cand.getClose())
if(self.currentCandle == self.candles):
thisCand = candleCompressor.candleCompressor().compress(self.tempArr)
thisMom = 0
momOfMom = 0
if(len(self.candleArr)>self.length+1):
#print("trade here")
if(len(self.candleArr)> self.length):
thisMom = self.calMomentum(self.length,self.candleArr)
self.momArr.append(thisMom)
if(len(self.momArr) > 3):
momOfMom = self.calMomentum2(1,self.momArr)
if(thisMom > 0 and momOfMom > 0 and not thisMom == 0 and not momOfMom == 0 and self.inBuy == False):
#print("buy")
self.balance += self.tr.crossClose(thisCand.getClose(),self.shouldPrint)
self.tr.crossOpen(thisCand.getClose(), self.com, True, self.balance, self.lotSizePercent,self.shouldPrint)
self.totalTrades += 1
self.inBuy = True
elif(thisMom < 0 and momOfMom < 0 and not thisMom == 0 and not momOfMom == 0 and self.inBuy ==True):
#print("sell")
self.balance += self.tr.crossClose(thisCand.getClose(),self.shouldPrint)
self.tr.crossOpen(thisCand.getClose(), self.com, False, self.balance, self.lotSizePercent,self.shouldPrint)
self.totalTrades += 1
self.inBuy = False
self.candleArr.append(thisCand)
self.currentCandle = 0
self.tempArr = []
| [
"otisjobe123@gmail.com"
] | otisjobe123@gmail.com |
77576f4bd93940f460a967a46375dcb841c71094 | 4a418036130cb63caa503719b4162cce9753459b | /nemo/collections/nlp/modules/common/transformer/transformer_modules.py | 63998217f09b5eaa659f8bbb583c263a6befd154 | [
"Apache-2.0"
] | permissive | kssteven418/Q-ASR | 89a7dac24d74556453e7b54b26289fd1466070c4 | aa1ec2ef78fd7606f8f365dfe3e66691a0e48178 | refs/heads/qasr | 2023-08-05T15:43:42.493513 | 2021-10-11T20:06:53 | 2021-10-11T20:06:53 | 353,027,973 | 33 | 1 | Apache-2.0 | 2021-03-30T17:33:26 | 2021-03-30T14:20:56 | Jupyter Notebook | UTF-8 | Python | false | false | 8,624 | py | # Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from torch import nn
from torch.nn.functional import gelu
__all__ = ["TransformerEmbedding"]
class FixedPositionalEncoding(nn.Module):
"""
Fixed positional encoding (embedding layer) from sine and cosine functions
of different frequencies according to https://arxiv.org/abs/1706.03762
Args:
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
"""
def __init__(self, hidden_size, max_sequence_length=512):
super().__init__()
pos_enc = torch.zeros(max_sequence_length, hidden_size)
position = torch.arange(0.0, max_sequence_length).unsqueeze(1)
coef = -math.log(10000.0) / hidden_size
div_term = torch.exp(coef * torch.arange(0.0, hidden_size, 2))
pos_enc[:, 0::2] = torch.sin(position * div_term)
pos_enc[:, 1::2] = torch.cos(position * div_term)
pos_enc.div_(math.sqrt(hidden_size))
self.register_buffer('pos_enc', pos_enc)
def forward(self, position_ids):
return torch.embedding(self.pos_enc, position_ids)
class TransformerEmbedding(nn.Module):
"""
Embedding from token and position embeddings.
Optionally add token_type embedding (e.g. type of the sentence in BERT).
Args:
vocab_size: size of the vocabulary
hidden_size: size of the embeddings in the model, also known as d_model
max_sequence_length: maximum allowed length of the input sequence
num_token_types: number of different token types
(e.g. tokens of sentence A and tokens of sentence B in BERT)
embedding_dropout: probability of dropout applied to embeddings
learn_positional_encodings: whether to learn positional encodings or
use fixed (sine-cosine) ones
"""
def __init__(
self,
vocab_size,
hidden_size,
max_sequence_length=512,
num_token_types=2,
embedding_dropout=0.0,
learn_positional_encodings=False,
):
super().__init__()
self.max_sequence_length = max_sequence_length
self.token_embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)
if learn_positional_encodings:
self.position_embedding = nn.Embedding(max_sequence_length, hidden_size)
else:
self.position_embedding = FixedPositionalEncoding(hidden_size, max_sequence_length)
self.token_type_embedding = nn.Embedding(num_token_types, hidden_size)
self.layer_norm = nn.LayerNorm(hidden_size, eps=1e-5)
self.dropout = nn.Dropout(embedding_dropout)
def forward(self, input_ids, token_type_ids=None, start_pos=0):
seq_length = input_ids.size(1)
if seq_length > self.max_sequence_length:
raise ValueError(
f"Input sequence is longer than maximum allowed sequence length for positional encoding. "
f"Got {seq_length} and {self.max_sequence_length}"
)
position_ids = torch.arange(
start=start_pos, end=start_pos + seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
token_embeddings = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = token_embeddings + position_embeddings
if token_type_ids is not None:
token_type_embeddings = self.token_type_embedding(token_type_ids)
embeddings = embeddings + token_type_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class MultiHeadAttention(nn.Module):
"""
Multi-head scaled dot-product attention layer.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
num_attention_heads: number of heads in multi-head attention
attn_score_dropout: probability of dropout applied to attention scores
attn_layer_dropout: probability of dropout applied to the output of the
whole layer, but before layer normalization
"""
def __init__(self, hidden_size, num_attention_heads, attn_score_dropout=0.0, attn_layer_dropout=0.0):
super().__init__()
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number "
"of attention heads (%d)" % (hidden_size, num_attention_heads)
)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.attn_head_size = int(hidden_size / num_attention_heads)
self.attn_scale = math.sqrt(math.sqrt(self.attn_head_size))
self.query_net = nn.Linear(hidden_size, hidden_size)
self.key_net = nn.Linear(hidden_size, hidden_size)
self.value_net = nn.Linear(hidden_size, hidden_size)
self.out_projection = nn.Linear(hidden_size, hidden_size)
self.attn_dropout = nn.Dropout(attn_score_dropout)
self.layer_dropout = nn.Dropout(attn_layer_dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attn_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, queries, keys, values, attention_mask):
# attention_mask is needed to hide the tokens which correspond to [PAD]
# in the case of BERT, or to hide the future tokens in the case of
# vanilla language modeling and translation
query = self.query_net(queries)
key = self.key_net(keys)
value = self.value_net(values)
query = self.transpose_for_scores(query) / self.attn_scale
key = self.transpose_for_scores(key) / self.attn_scale
value = self.transpose_for_scores(value)
# for numerical stability we pre-divide query and key by sqrt(sqrt(d))
attention_scores = torch.matmul(query, key.transpose(-1, -2))
if attention_mask is not None:
attention_scores = attention_scores + attention_mask.to(attention_scores.dtype)
attention_probs = torch.softmax(attention_scores, dim=-1)
attention_probs = self.attn_dropout(attention_probs)
context = torch.matmul(attention_probs, value)
context = context.permute(0, 2, 1, 3).contiguous()
new_context_shape = context.size()[:-2] + (self.hidden_size,)
context = context.view(*new_context_shape)
# output projection
output_states = self.out_projection(context)
output_states = self.layer_dropout(output_states)
return output_states
class PositionWiseFF(nn.Module):
"""
Position-wise feed-forward network of Transformer block.
Args:
hidden_size: size of the embeddings in the model, also known as d_model
inner_size: number of neurons in the intermediate part of feed-forward
net, usually is (4-8 x hidden_size) in the papers
ffn_dropout: probability of dropout applied to net output
hidden_act: activation function used between two linear layers
"""
def __init__(self, hidden_size, inner_size, ffn_dropout=0.0, hidden_act="relu"):
super().__init__()
self.dense_in = nn.Linear(hidden_size, inner_size)
self.dense_out = nn.Linear(inner_size, hidden_size)
self.layer_dropout = nn.Dropout(ffn_dropout)
ACT2FN = {"gelu": gelu, "relu": torch.relu}
self.act_fn = ACT2FN[hidden_act]
def forward(self, hidden_states):
output_states = self.dense_in(hidden_states)
output_states = self.act_fn(output_states)
output_states = self.dense_out(output_states)
output_states = self.layer_dropout(output_states)
return output_states
| [
"noreply@github.com"
] | kssteven418.noreply@github.com |
e7057bc48d0c58e842a5c16fe3711fae0386968b | 5c534f0a3912ef002834398c765ed1e3f98c9173 | /Quotes/test.py | 1b8164565ecfabfdb0762a61f09b818a5961c220 | [] | no_license | ormanya/Supyiel | 894c2acc7f05683f1cd9101a413f3c93fd69d149 | 77e291c5b73da2e292f6b38ff40aa2b3d70915cb | refs/heads/master | 2023-03-13T13:58:20.944904 | 2023-03-02T16:53:02 | 2023-03-02T16:53:02 | 80,935,297 | 8 | 0 | null | 2022-11-10T18:06:34 | 2017-02-04T17:24:40 | Python | UTF-8 | Python | false | false | 1,871 | py | ###
# Copyright (c) 2008,2012 Kevin Funk
# Copyright (c) 2014-2015 James Lu
# Copyright (c) 2016-2017 Ormanya
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
from supybot.test import *
import os
class QuotesTestCase(PluginTestCase):
plugins = ('Quotes',)
def testTay(self):
self.assertNotError("tay")
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| [
"liriel@sekrit.me"
] | liriel@sekrit.me |
09ee4a21ddc1b92f8f3846d847e7be6be388b97a | a8fd86dce16f7fec7a5f00ecf97270fb7a8243b9 | /phylo3.py | 02e5ff23a7be96c9c780ec7e9b98ff7b8ab5952b | [] | no_license | tomopfuku/mammalian_morphological_clocks | 8a8f68b498297f95b9222843de416912c50e2e3a | 80b3179cb8101ac654e516f71282d7bbba288934 | refs/heads/master | 2022-10-18T02:39:54.477321 | 2017-11-28T17:07:34 | 2017-11-28T17:07:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,086 | py | PREORDER = 0; POSTORDER = 1
BRANCHLENGTH = 0; INTERNODES = 1
#trying to deprecate this.
class Node:
def __init__(self):
self.data = {}
self.isroot = False
self.istip = False
self.label = None
self.length = 0
self.old_length = 0
self.parent = None
self.children = []
self.nchildren = 0
self.comment = None
#self.charst = 0.
#self.sigsq = 0.
#self.rate_class = 0
self.height = None
self.number = 0
self.occurrences = None
self.num_occurrences = None
def get_newick_repr(self,showbl=False,show_rate=False):
ret = ""
for i in range(len(self.children)):
if i == 0:
ret += "("
ret += self.children[i].get_newick_repr(showbl,show_rate)
if i == len(self.children)-1:
ret += ")"
else:
ret += ","
if self.label != None:
ret += self.label
if showbl == True:
ret += ":" + str(self.length)
if show_rate ==True:
ret += ":" + str(self.sigsq)
return ret
def add_child(self, child):
assert child not in self.children
self.children.append(child)
child.parent = self
self.nchildren += 1
def remove_child(self, child):
assert child in self.children
self.children.remove(child)
child.parent = None
self.nchildren -= 1
def prune_from_node(self):
for i in self.descendants("POSTORDER"):
if len(self.children) == 0:
self.prune()
def leaves(self):
return [ n for n in self.iternodes() if n.istip ]
def iternodes(self, order=PREORDER, v=None):
if order == PREORDER:
yield self
#print [i.label for i in self.children]
for child in self.children:
for d in child.iternodes(order):
yield d
if order == POSTORDER:
yield self
"""
def postorder_nodes(self):
[yield d for d in child.postorder_nodes() for child in self.children]
yield self
"""
def descendants(self, order=PREORDER, v=None):
if v is None:
v = []
#assert order in ("PREORDER", "POSTORDER")
for child in self.children:
if order == PREORDER:
v.append(child)
else:
v.insert(0, child)
if child.children:
child.descendants(order, v)
return v
def find_descendant(self, label):
if label == self.label:
return self
else:
for child in self.children:
n = child.find_descendant(label)
if n:
return n
return None
def prune(self):
p = self.parent
if p:
p.remove_child(self)
return p
def graft(self, node):
parent = self.parent
parent.remove_child(self)
n = Node()
n.add_child(self)
n.add_child(node)
parent.add_child(n)
def leaf_distances(self, store=None, measure=BRANCHLENGTH):
if store is None:
store = {}
leaf2len = {}
if self.children:
for child in self.children:
if measure == BRANCHLENGTH:
assert child.length is not None
dist = child.length
elif measure == INTERNODES:
dist = 1
else:
raise "InvalidMeasure"
child.leaf_distances(store, measure)
if child.istip:
leaf2len[child.label] = dist
else:
for k, v in store[child].items():
leaf2len[k] = v + dist
else:
leaf2len[self] = {self.label: 0}
store[self] = leaf2len
return store
def rootpath(self):
n = self
while 1:
yield n
if n.parent:
n = n.parent
else:
break
def tip_labels(self):
labs = []
for i in self.leaves():
labs.append(i.label)
return labs
def nnodes(self, type="internal"):
n = 0
if type == "internal":
for i in self.iternodes():
if i.istip or i == self:
continue
n += 1
elif type == "all":
for i in self.iternodes():
n+=1
elif type == "tips":
for i in self.iternodes():
if i.istip:
n+=1
return n
"""
# this returns all possible NNIs for a single bifurcating node with bifurcating children
# tree should probably be deep copied before using this
"""
def nni_set(self):
if len(self.children) != 2 or len(self.descendants()) < 3:
print "this only works on bifurcating selfs that parent multiple subtrees (ie. does not lead to only terminal edges)"
return None
subtrees = []
for child in self.children:
if child.istip == False:
assert len(child.children) == 2
for sub in child.children:
subtrees.append(sub)
subtrees += [i for i in self.children if i.istip] #add terminal subtree child --> 'c' in (a,b),c))
assert len(subtrees) == 3 or len(subtrees) == 4
nni_trees = []
for c1 in subtrees:
for c2 in subtrees:
p1 = c1.parent
p2 = c2.parent
if c1 == c2 or p1 == p2: #can't swap subtrees with same parent
continue
p1.remove_child(c1)
p1.add_child(c2)
p2.remove_child(c2)
p2.add_child(c1)
c1.parent = p2 #swap subtrees
c2.parent = p1
nni_trees.append(self.get_newick_repr())
nni_trees = list(set(nni_trees)) #remove duplicates
#print len(nni_trees)
return nni_trees
def reroot(oldroot, newroot):
oldroot.isroot = False
newroot.isroot = True
v = []
n = newroot
while 1:
v.append(n)
if not n.parent: break
n = n.parent
#print [ x.label for x in v ]
v.reverse()
for i, cp in enumerate(v[:-1]):
node = v[i+1]
# node is current node; cp is current parent
#print node.label, cp.label
cp.remove_child(node)
node.add_child(cp)
cp.length = node.length
return newroot
def getMRCATraverseFromPath(path1, curn2):
mrca = None
#find first match between this node and the first one
parent = curn2
x = True;
while x == True:
for i in range(len(path1)):
if parent == path1[i]:
mrca = parent
x = False
break
parent = parent.parent
return mrca
| [
"cfukuchi@umich.edu"
] | cfukuchi@umich.edu |
c23b86d447f850e4bd75066d30e311f702ae67d0 | 9b92b21f39870e1b8a0de6bc94ff08a66690b1ea | /sources/webapp/SyncronisationDAO.py | bf64956c5930f3244b62286e3d037dc75d5ef9a1 | [] | no_license | sebastiansIT/HTML5Podcatcher | ac5bb3cf128d4785f478b43e23ea57c62cfadce0 | f1d9f446df0333eec3ef59219b28d683b7f17c5f | refs/heads/master | 2023-06-25T19:01:39.039093 | 2021-05-08T05:51:47 | 2021-05-08T05:51:47 | 10,554,866 | 8 | 1 | null | 2023-03-04T03:04:49 | 2013-06-07T17:10:11 | JavaScript | UTF-8 | Python | false | false | 1,925 | py | import sqlite3
import datetime
import SyncronisationModel
import cgi, cgitb
cgitb.enable()
class Sqlite3DAO:
def __init__(self, fileName):
self.dbFileName = fileName
def DataBaseInitialisation():
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "CREATE TABLE SyncPoints(ID INTEGER PRIMARY KEY, Key VARCHAR(100) UNIQUE, Value TEXT) "
cursor.execute(sql)
connection.commit()
connection.close()
def Select(self, key=None):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "SELECT ID, Key, Value FROM SyncPoints"
try:
if key != None:
sql = sql + " WHERE Key = ?"
cursor.execute(sql, (key,))
entries = []
for row in cursor:
entry = SyncronisationModel.Point(row[0], row[1], row[2])
entries.append(entry)
except:
entries = ["error"]
connection.commit()
connection.close()
return entries
def Insert(self, key, value):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "INSERT INTO SyncPoints(Key, Value) VALUES (?, ?)"
cursor.execute(sql, (key, value))
connection.commit()
connection.close()
return self.Select(key=key)
def Update(self, key, value):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "UPDATE SyncPoints SET Value = ? WHERE Key = ?"
cursor.execute(sql, (value, key))
connection.commit()
connection.close()
return self.Select(key)
def Delete(self, key):
connection = sqlite3.connect(self.dbFileName)
cursor = connection.cursor()
sql = "DELETE FROM SyncPoints WHERE Key = ?"
cursor.execute(sql, (key,))
connection.commit()
connection.close()
def Save(self, key, value):
if len(self.Select(key)) > 0:
#return [SyncronisationModel.Point(7, "test", "{test}")]
return self.Update(key, value)
else:
return self.Insert(key, value) | [
"sebastian@human-injection.de"
] | sebastian@human-injection.de |
6e890dcf23489e8e89080c6b65f3762b23bdff4d | 72a22cde6b6ca91255f25a931909502115e4e47c | /Alfred/SwitchLayoutWorkflow/set.py | 4ae6ae60a0486bf6d86b48325f6a942a3ddc711a | [] | no_license | DATADEER/dvorak-mac-setup | 52de6f0062e75981cf6a0c6bc91de92f6095b24a | 2f5d0eb450be9c02fd74285cd526715abe358941 | refs/heads/master | 2020-05-17T08:00:25.408894 | 2020-03-15T11:42:10 | 2020-03-15T11:42:10 | 183,594,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import sys
import json
from os.path import expanduser
from collections import OrderedDict
import subprocess
CHOSEN_PROFILE = sys.argv[1]
CONFIG_PATH = '.config/karabiner/karabiner.json'
home = expanduser("~")
config = {}
with open('{}/{}'.format(home, CONFIG_PATH)) as conf_file:
config = json.load(conf_file, object_pairs_hook=OrderedDict)
for profile in config['profiles']:
profile['selected'] = profile['name'] == CHOSEN_PROFILE
with open('{}/{}'.format(home, CONFIG_PATH), 'w') as conf_file:
conf_file.write(json.dumps(config, indent=4, separators=(',', ': ')))
#log available keyboard layouts with issw -l
if(CHOSEN_PROFILE == "DVORAK" ):
#switch to US Layout
subprocess.run(["/usr/local/bin/issw", "com.apple.keylayout.US"])
else:
#switch to DEUTSCH Layout
subprocess.run(["/usr/local/bin/issw", "com.apple.keylayout.German"])
| [
"konto@datadeer.de"
] | konto@datadeer.de |
6925f9d279dd7fc2386a10b7f0527b1c88816f95 | a4ea525e226d6c401fdb87a6e9adfdc5d07e6020 | /src/azure-cli/azure/cli/command_modules/servicebus/aaz/latest/servicebus/topic/_list.py | 751ddf434b8c609435a955fc4eaa4a17a49bdf38 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | Azure/azure-cli | 13340eeca2e288e66e84d393fa1c8a93d46c8686 | a40fd14ad0b6e89720a2e58d4d9be3a6ce1535ca | refs/heads/dev | 2023-08-17T06:25:37.431463 | 2023-08-17T06:00:10 | 2023-08-17T06:00:10 | 51,040,886 | 4,018 | 3,310 | MIT | 2023-09-14T11:11:05 | 2016-02-04T00:21:51 | Python | UTF-8 | Python | false | false | 10,902 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus topic list",
)
class List(AAZCommand):
"""List all the topics in a namespace.
"""
_aaz_info = {
"version": "2022-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/topics", "2022-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.skip = AAZIntArg(
options=["--skip"],
help="Skip is only used if a previous operation returned a partial result. If a previous response contains a nextLink element, the value of the nextLink element will include a skip parameter that specifies a starting point to use for subsequent calls.",
fmt=AAZIntArgFormat(
maximum=1000,
minimum=0,
),
)
_args_schema.top = AAZIntArg(
options=["--top"],
help="May be used to limit the number of results to the most recent N usageDetails.",
fmt=AAZIntArgFormat(
maximum=1000,
minimum=1,
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.TopicsListByNamespace(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class TopicsListByNamespace(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$skip", self.ctx.args.skip,
),
**self.serialize_query_param(
"$top", self.ctx.args.top,
),
**self.serialize_query_param(
"api-version", "2022-01-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.accessed_at = AAZStrType(
serialized_name="accessedAt",
flags={"read_only": True},
)
properties.auto_delete_on_idle = AAZStrType(
serialized_name="autoDeleteOnIdle",
)
properties.count_details = AAZObjectType(
serialized_name="countDetails",
)
properties.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
properties.default_message_time_to_live = AAZStrType(
serialized_name="defaultMessageTimeToLive",
)
properties.duplicate_detection_history_time_window = AAZStrType(
serialized_name="duplicateDetectionHistoryTimeWindow",
)
properties.enable_batched_operations = AAZBoolType(
serialized_name="enableBatchedOperations",
)
properties.enable_express = AAZBoolType(
serialized_name="enableExpress",
)
properties.enable_partitioning = AAZBoolType(
serialized_name="enablePartitioning",
)
properties.max_message_size_in_kilobytes = AAZIntType(
serialized_name="maxMessageSizeInKilobytes",
)
properties.max_size_in_megabytes = AAZIntType(
serialized_name="maxSizeInMegabytes",
)
properties.requires_duplicate_detection = AAZBoolType(
serialized_name="requiresDuplicateDetection",
)
properties.size_in_bytes = AAZIntType(
serialized_name="sizeInBytes",
flags={"read_only": True},
)
properties.status = AAZStrType()
properties.subscription_count = AAZIntType(
serialized_name="subscriptionCount",
flags={"read_only": True},
)
properties.support_ordering = AAZBoolType(
serialized_name="supportOrdering",
)
properties.updated_at = AAZStrType(
serialized_name="updatedAt",
flags={"read_only": True},
)
count_details = cls._schema_on_200.value.Element.properties.count_details
count_details.active_message_count = AAZIntType(
serialized_name="activeMessageCount",
flags={"read_only": True},
)
count_details.dead_letter_message_count = AAZIntType(
serialized_name="deadLetterMessageCount",
flags={"read_only": True},
)
count_details.scheduled_message_count = AAZIntType(
serialized_name="scheduledMessageCount",
flags={"read_only": True},
)
count_details.transfer_dead_letter_message_count = AAZIntType(
serialized_name="transferDeadLetterMessageCount",
flags={"read_only": True},
)
count_details.transfer_message_count = AAZIntType(
serialized_name="transferMessageCount",
flags={"read_only": True},
)
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"]
| [
"noreply@github.com"
] | Azure.noreply@github.com |
30c2edac25e1e11650ef113d805ea63ffa3f6a75 | d2ec03d034bddc968ab850bf5a4593087f9eafe7 | /train_se3posenets.py | b2acac894a4ed0efd11d1ad2f16d4b81063f4585 | [] | no_license | abyravan/se3posenets-pytorch | b5c61550939a84e6df3f73c3372062a266f4f711 | d15a2db3f2e708fafd6912f877e53aec6eed5ee1 | refs/heads/master | 2020-05-15T13:44:13.853586 | 2019-09-06T02:49:04 | 2019-09-06T02:49:04 | 182,308,095 | 16 | 7 | null | null | null | null | UTF-8 | Python | false | false | 48,573 | py | # Global imports
import os
import sys
import shutil
import time
import numpy as np
import matplotlib.pyplot as plt
import random
# Torch imports
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
from torch.autograd import Variable
import torchvision
torch.multiprocessing.set_sharing_strategy('file_system')
# Local imports
import se3layers as se3nn
import data
import ctrlnets
import util
from util import AverageMeter, Tee, DataEnumerator
import helperfuncs as helpers
#### Setup options
# Common
import argparse
import options
parser = options.setup_comon_options()
# Loss options
parser.add_argument('--pt-wt', default=1, type=float,
metavar='WT', help='Weight for the 3D point loss - only FWD direction (default: 1)')
parser.add_argument('--use-full-jt-angles', action='store_true', default=False,
help='Use angles of all joints as inputs to the networks (default: False)')
# Define xrange
try:
a = xrange(1)
except NameError: # Not defined in Python 3.x
def xrange(*args):
return iter(range(*args))
################ MAIN
#@profile
def main():
# Parse args
global args, num_train_iter
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
args.batch_norm = not args.no_batch_norm
### Create save directory and start tensorboard logger
util.create_dir(args.save_dir) # Create directory
now = time.strftime("%c")
tblogger = util.TBLogger(args.save_dir + '/logs/' + now) # Start tensorboard logger
# Create logfile to save prints
logfile = open(args.save_dir + '/logs/' + now + '/logfile.txt', 'w')
backup = sys.stdout
sys.stdout = Tee(sys.stdout, logfile)
########################
############ Parse options
# Set seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# 480 x 640 or 240 x 320
if args.full_res:
print("Using full-resolution images (480x640)")
# Get default options & camera intrinsics
args.cam_intrinsics, args.cam_extrinsics, args.ctrl_ids = [], [], []
args.state_labels = []
for k in xrange(len(args.data)):
load_dir = args.data[k] #args.data.split(',,')[0]
try:
# Read from file
intrinsics = data.read_intrinsics_file(load_dir + "/intrinsics.txt")
print("Reading camera intrinsics from: " + load_dir + "/intrinsics.txt")
if args.full_res:
args.img_ht, args.img_wd = int(intrinsics['ht']), int(intrinsics['wd'])
else:
args.img_ht, args.img_wd = 240, 320 # All data except SE(2) data is at 240x320 resolution
args.img_scale = 1.0 / intrinsics['s'] # Scale of the image (use directly from the data)
# Setup camera intrinsics
sc = float(args.img_ht) / intrinsics['ht'] # Scale factor for the intrinsics
cam_intrinsics = {'fx': intrinsics['fx'] * sc,
'fy': intrinsics['fy'] * sc,
'cx': intrinsics['cx'] * sc,
'cy': intrinsics['cy'] * sc}
print("Scale factor for the intrinsics: {}".format(sc))
except:
print("Could not read intrinsics file, reverting to default settings")
args.img_ht, args.img_wd, args.img_scale = 240, 320, 1e-4
cam_intrinsics = {'fx': 589.3664541825391 / 2,
'fy': 589.3664541825391 / 2,
'cx': 320.5 / 2,
'cy': 240.5 / 2}
print("Intrinsics => ht: {}, wd: {}, fx: {}, fy: {}, cx: {}, cy: {}".format(args.img_ht, args.img_wd,
cam_intrinsics['fx'],
cam_intrinsics['fy'],
cam_intrinsics['cx'],
cam_intrinsics['cy']))
# Compute intrinsic grid & add to list
cam_intrinsics['xygrid'] = data.compute_camera_xygrid_from_intrinsics(args.img_ht, args.img_wd,
cam_intrinsics)
args.cam_intrinsics.append(cam_intrinsics) # Add to list of intrinsics
### BAXTER DATA
# Compute extrinsics
cam_extrinsics = data.read_cameradata_file(load_dir + '/cameradata.txt')
# Get dimensions of ctrl & state
try:
statelabels, ctrllabels, trackerlabels = data.read_statectrllabels_file(load_dir + "/statectrllabels.txt")
print("Reading state/ctrl joint labels from: " + load_dir + "/statectrllabels.txt")
except:
statelabels = data.read_statelabels_file(load_dir + '/statelabels.txt')['frames']
ctrllabels = statelabels # Just use the labels
trackerlabels = []
print("Could not read statectrllabels file. Reverting to labels in statelabels file")
#args.num_state, args.num_ctrl, args.num_tracker = len(statelabels), len(ctrllabels), len(trackerlabels)
#print('Num state: {}, Num ctrl: {}'.format(args.num_state, args.num_ctrl))
args.num_ctrl = len(ctrllabels)
print('Num ctrl: {}'.format(args.num_ctrl))
# Find the IDs of the controlled joints in the state vector
# We need this if we have state dimension > ctrl dimension and
# if we need to choose the vals in the state vector for the control
ctrlids_in_state = torch.LongTensor([statelabels.index(x) for x in ctrllabels])
print("ID of controlled joints in the state vector: ", ctrlids_in_state.view(1, -1))
# Add to list of intrinsics
args.cam_extrinsics.append(cam_extrinsics)
args.ctrl_ids.append(ctrlids_in_state)
args.state_labels.append(statelabels)
# Data noise
if not hasattr(args, "add_noise_data") or (len(args.add_noise_data) == 0):
args.add_noise_data = [False for k in xrange(len(args.data))] # By default, no noise
else:
assert(len(args.data) == len(args.add_noise_data))
if hasattr(args, "add_noise") and args.add_noise: # BWDs compatibility
args.add_noise_data = [True for k in xrange(len(args.data))]
# Get mean/std deviations of dt for the data
if args.mean_dt == 0:
args.mean_dt = args.step_len * (1.0 / 30.0)
args.std_dt = 0.005 # +- 10 ms
print("Using default mean & std.deviation based on the step length. Mean DT: {}, Std DT: {}".format(
args.mean_dt, args.std_dt))
else:
exp_mean_dt = (args.step_len * (1.0 / 30.0))
assert ((args.mean_dt - exp_mean_dt) < 1.0 / 30.0), \
"Passed in mean dt ({}) is very different from the expected value ({})".format(
args.mean_dt, exp_mean_dt) # Make sure that the numbers are reasonable
print("Using passed in mean & std.deviation values. Mean DT: {}, Std DT: {}".format(
args.mean_dt, args.std_dt))
# Image suffix
args.img_suffix = '' if (args.img_suffix == 'None') else args.img_suffix # Workaround since we can't specify empty string in the yaml
print('Ht: {}, Wd: {}, Suffix: {}, Num ctrl: {}'.format(args.img_ht, args.img_wd, args.img_suffix, args.num_ctrl))
# Read mesh ids and camera data (for baxter)
args.baxter_labels = data.read_statelabels_file(args.data[0] + '/statelabels.txt')
args.mesh_ids = args.baxter_labels['meshIds']
# SE3 stuff
assert (args.se3_type in ['se3euler', 'se3aa', 'se3quat', 'affine', 'se3spquat', 'se3aar']), 'Unknown SE3 type: ' + args.se3_type
args.delta_pivot = ''
print('Predicting {} SE3s of type: {}'.format(args.num_se3, args.se3_type))
# Sequence stuff
print('Step length: {}, Seq length: {}'.format(args.step_len, args.seq_len))
# Loss parameters
print('Loss scale: {}, Loss weights => PT: {}, CONSIS: {}'.format(
args.loss_scale, args.pt_wt, args.consis_wt))
# Weight sharpening stuff
if args.use_wt_sharpening:
print('Using weight sharpening to encourage binary mask prediction. Start iter: {}, Rate: {}, Noise stop iter: {}'.format(
args.sharpen_start_iter, args.sharpen_rate, args.noise_stop_iter))
# Loss type
norm_motion = ', Normalizing loss based on GT motion' if args.motion_norm_loss else ''
print('3D loss type: ' + args.loss_type + norm_motion)
# Wide model
if args.wide_model:
print('Using a wider network!')
if args.use_jt_angles:
print("Using Jt angles as input to the pose encoder")
if args.use_jt_angles_trans:
print("Using Jt angles as input to the transition model")
# DA threshold / winsize
print("Flow/visibility computation. DA threshold: {}, DA winsize: {}".format(args.da_threshold,
args.da_winsize))
if args.use_only_da_for_flows:
print("Computing flows using only data-associations. Flows can only be computed for visible points")
else:
print("Computing flows using tracker poses. Can get flows for all input points")
########################
############ Load datasets
# Get datasets
load_color = None
if args.reject_left_motion:
print("Examples where any joint of the left arm moves by > 0.005 radians inter-frame will be discarded. \n"
"NOTE: This test will be slow on any machine where the data needs to be fetched remotely")
if args.reject_right_still:
print("Examples where no joint of the right arm move by > 0.015 radians inter-frame will be discarded. \n"
"NOTE: This test will be slow on any machine where the data needs to be fetched remotely")
if args.add_noise:
print("Adding noise to the depths, actual configs & ctrls")
print("Baxter dataset")
valid_filter = lambda p, n, st, se, slab: data.valid_data_filter(p, n, st, se, slab,
mean_dt=args.mean_dt, std_dt=args.std_dt,
reject_left_motion=args.reject_left_motion,
reject_right_still=args.reject_right_still)
read_seq_func = data.read_baxter_sequence_from_disk
### Noise function
#noise_func = lambda d, c: data.add_gaussian_noise(d, c, std_d=0.02,
# scale_d=True, std_j=0.02) if args.add_noise else None
noise_func = lambda d: data.add_edge_based_noise(d, zthresh=0.04, edgeprob=0.35,
defprob=0.005, noisestd=0.005)
### Load functions
baxter_data = data.read_recurrent_baxter_dataset(args.data, args.img_suffix,
step_len = args.step_len, seq_len = args.seq_len,
train_per = args.train_per, val_per = args.val_per,
valid_filter = valid_filter,
cam_extrinsics=args.cam_extrinsics,
cam_intrinsics=args.cam_intrinsics,
ctrl_ids=args.ctrl_ids,
state_labels=args.state_labels,
add_noise=args.add_noise_data)
disk_read_func = lambda d, i: read_seq_func(d, i, img_ht = args.img_ht, img_wd = args.img_wd,
img_scale = args.img_scale, ctrl_type = args.ctrl_type,
num_ctrl=args.num_ctrl,
#num_state=args.num_state,
mesh_ids = args.mesh_ids,
#ctrl_ids=ctrlids_in_state,
#camera_extrinsics = args.cam_extrinsics,
#camera_intrinsics = args.cam_intrinsics,
compute_bwdflows=False,
#num_tracker=args.num_tracker,
dathreshold=args.da_threshold, dawinsize=args.da_winsize,
use_only_da=args.use_only_da_for_flows,
noise_func=noise_func,
load_color=load_color) # Need BWD flows / masks if using GT masks
train_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'train') # Train dataset
val_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'val') # Val dataset
test_dataset = data.BaxterSeqDataset(baxter_data, disk_read_func, 'test') # Test dataset
print('Dataset size => Train: {}, Validation: {}, Test: {}'.format(len(train_dataset), len(val_dataset), len(test_dataset)))
# Create a data-collater for combining the samples of the data into batches along with some post-processing
if args.evaluate:
# Load only test loader
args.imgdisp_freq = 10 * args.disp_freq # Tensorboard log frequency for the image data
sampler = torch.utils.data.dataloader.SequentialSampler(test_dataset) # Run sequentially along the test dataset
# torch.manual_seed(args.seed)
# if args.cuda:
# torch.cuda.manual_seed(args.seed)
# sampler = torch.utils.data.dataloader.RandomSampler(test_dataset) # Random sampler
test_loader = DataEnumerator(util.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, sampler=sampler,
pin_memory=args.use_pin_memory,
collate_fn=test_dataset.collate_batch))
else:
# Create dataloaders (automatically transfer data to CUDA if args.cuda is set to true)
train_loader = DataEnumerator(util.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=args.use_pin_memory,
collate_fn=train_dataset.collate_batch))
val_loader = DataEnumerator(util.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=args.use_pin_memory,
collate_fn=val_dataset.collate_batch))
########################
############ Load models & optimization stuff
assert not args.use_full_jt_angles, "Can only use as many jt angles as the control dimension"
print('Using state of controllable joints')
args.num_state_net = args.num_ctrl # Use only the jt angles of the controllable joints
### Load the model
num_train_iter = 0
num_input_channels = 3 # Num input channels
modelfn = ctrlnets.MultiStepSE3PoseModel
model = modelfn(num_ctrl=args.num_ctrl, num_se3=args.num_se3,
se3_type=args.se3_type, delta_pivot=args.delta_pivot,
input_channels=num_input_channels, use_bn=args.batch_norm, nonlinearity=args.nonlin,
init_posese3_iden=args.init_posese3_iden, init_transse3_iden=args.init_transse3_iden,
use_wt_sharpening=args.use_wt_sharpening, sharpen_start_iter=args.sharpen_start_iter,
sharpen_rate=args.sharpen_rate, pre_conv=args.pre_conv, decomp_model=args.decomp_model,
local_delta_se3=args.local_delta_se3,
wide=args.wide_model, use_jt_angles=args.use_jt_angles,
use_jt_angles_trans=args.use_jt_angles_trans, num_state=args.num_state_net,
full_res=args.full_res, noise_stop_iter=args.noise_stop_iter,
trans_type="default", posemask_type="default")
if args.cuda:
model.cuda() # Convert to CUDA if enabled
### Load optimizer
optimizer = helpers.load_optimizer(args.optimization, model.parameters(), lr=args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
# TODO: Save path to TB log dir, save new log there again
# TODO: Reuse options in args (see what all to use and what not)
# TODO: Use same num train iters as the saved checkpoint
# TODO: Print some stats on the training so far, reset best validation loss, best epoch etc
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
loadargs = checkpoint['args']
args.start_epoch = checkpoint['epoch']
if args.reset_train_iter:
num_train_iter = 0 # Reset to 0
else:
num_train_iter = checkpoint['train_iter']
try:
model.load_state_dict(checkpoint['state_dict']) # BWDs compatibility (TODO: remove)
except:
model.load_state_dict(checkpoint['model_state_dict'])
assert (loadargs.optimization == args.optimization), "Optimizer in saved checkpoint ({}) does not match current argument ({})".format(
loadargs.optimization, args.optimization)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
print("=> loaded checkpoint '{}' (epoch {}, train iter {})"
.format(args.resume, checkpoint['epoch'], num_train_iter))
best_loss = checkpoint['best_loss'] if 'best_loss' in checkpoint else float("inf")
best_floss = checkpoint['best_flow_loss'] if 'best_flow_loss' in checkpoint else float("inf")
best_fcloss = checkpoint['best_flowconsis_loss'] if 'best_flowconsis_loss' in checkpoint else float("inf")
best_epoch = checkpoint['best_epoch'] if 'best_epoch' in checkpoint else 0
best_fepoch = checkpoint['best_flow_epoch'] if 'best_flow_epoch' in checkpoint else 0
best_fcepoch = checkpoint['best_flowconsis_epoch'] if 'best_flowconsis_epoch' in checkpoint else 0
print('==== Best validation loss: {} was from epoch: {} ===='.format(best_loss, best_epoch))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
best_loss, best_floss, best_fcloss = float("inf"), float("inf"), float("inf")
best_epoch, best_fepoch, best_fcepoch = 0, 0, 0
########################
############ Test (don't create the data loader unless needed, creates 4 extra threads)
if args.evaluate:
# Delete train and val loaders
#del train_loader, val_loader
# TODO: Move this to before the train/val loader creation??
print('==== Evaluating pre-trained network on test data ===')
test_stats = iterate(test_loader, model, tblogger, len(test_loader), mode='test')
# Save final test error
helpers.save_checkpoint({
'args': args,
'test_stats': {'stats': test_stats,
'niters': test_loader.niters, 'nruns': test_loader.nruns,
'totaliters': test_loader.iteration_count(),
'ids': test_stats.data_ids,
},
}, False, savedir=args.save_dir, filename='test_stats.pth.tar')
# Close log file & return
logfile.close()
return
## Create a file to log different validation errors over training epochs
statstfile = open(args.save_dir + '/epochtrainstats.txt', 'w')
statsvfile = open(args.save_dir + '/epochvalstats.txt', 'w')
statstfile.write("Epoch, Loss, Ptloss, Consisloss, Flowerrsum, Flowerravg, Consiserr\n")
statsvfile.write("Epoch, Loss, Ptloss, Consisloss, Flowerrsum, Flowerravg, Consiserr\n")
########################
############ Train / Validate
args.imgdisp_freq = 5 * args.disp_freq # Tensorboard log frequency for the image data
train_ids, val_ids = [], []
for epoch in range(args.start_epoch, args.epochs):
# Adjust learning rate
adjust_learning_rate(optimizer, epoch, args.lr_decay, args.decay_epochs, args.min_lr)
# Train for one epoch
train_stats = iterate(train_loader, model, tblogger, args.train_ipe,
mode='train', optimizer=optimizer, epoch=epoch+1)
train_ids += train_stats.data_ids
# Evaluate on validation set
val_stats = iterate(val_loader, model, tblogger, args.val_ipe,
mode='val', epoch=epoch+1)
val_ids += val_stats.data_ids
# Find best losses
val_loss, val_floss, val_fcloss = val_stats.loss.avg, \
val_stats.ptloss.avg.sum(), \
val_stats.ptloss.avg.sum() + val_stats.consisloss.avg.sum()
is_best, is_fbest, is_fcbest = (val_loss < best_loss), (val_floss < best_floss), (val_fcloss < best_fcloss)
prev_best_loss, prev_best_floss, prev_best_fcloss = best_loss, best_floss, best_fcloss
prev_best_epoch, prev_best_fepoch, prev_best_fcepoch = best_epoch, best_fepoch, best_fcepoch
s, sf, sfc = 'SAME', 'SAME', 'SAME'
if is_best:
best_loss, best_epoch, s = val_loss, epoch+1, 'IMPROVED'
if is_fbest:
best_floss, best_fepoch, sf = val_floss, epoch+1, 'IMPROVED'
if is_fcbest:
best_fcloss, best_fcepoch, sfc = val_fcloss, epoch+1, 'IMPROVED'
print('==== [LOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, s, prev_best_loss, prev_best_epoch, best_loss, best_epoch))
print('==== [FLOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, sf, prev_best_floss, prev_best_fepoch, best_floss, best_fepoch))
print('==== [FCLOSS] Epoch: {}, Status: {}, Previous best: {:.5f}/{}. Current: {:.5f}/{} ===='.format(
epoch+1, sfc, prev_best_fcloss, prev_best_fcepoch, best_loss, best_fcepoch))
# Write losses to stats file
statstfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(epoch+1, train_stats.loss.avg,
train_stats.ptloss.avg.sum(),
train_stats.consisloss.avg.sum(),
train_stats.flowerr_sum.avg.sum()/args.batch_size,
train_stats.flowerr_avg.avg.sum()/args.batch_size,
train_stats.consiserr.avg.sum()))
statsvfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(epoch + 1, val_stats.loss.avg,
val_stats.ptloss.avg.sum(),
val_stats.consisloss.avg.sum(),
val_stats.flowerr_sum.avg.sum() / args.batch_size,
val_stats.flowerr_avg.avg.sum() / args.batch_size,
val_stats.consiserr.avg.sum()))
# Save checkpoint
helpers.save_checkpoint({
'epoch': epoch+1,
'args' : args,
'best_loss' : best_loss,
'best_flow_loss' : best_floss,
'best_flowconsis_loss' : best_fcloss,
'best_epoch' : best_epoch,
'best_flow_epoch' : best_fepoch,
'best_flowconsis_epoch': best_fcepoch,
'train_stats': {'stats': train_stats,
'niters': train_loader.niters, 'nruns': train_loader.nruns,
'totaliters': train_loader.iteration_count(),
'ids': train_ids,
},
'val_stats' : {'stats': val_stats,
'niters': val_loader.niters, 'nruns': val_loader.nruns,
'totaliters': val_loader.iteration_count(),
'ids': val_ids,
},
'train_iter' : num_train_iter,
'model_state_dict' : model.state_dict(),
'optimizer_state_dict' : optimizer.state_dict(),
}, is_best, is_fbest, is_fcbest, savedir=args.save_dir, filename='checkpoint.pth.tar') #_{}.pth.tar'.format(epoch+1))
print('\n')
# Delete train and val data loaders
del train_loader, val_loader
# Load best model for testing (not latest one)
print("=> loading best model from '{}'".format(args.save_dir + "/model_flow_best.pth.tar"))
checkpoint = torch.load(args.save_dir + "/model_flow_best.pth.tar")
num_train_iter = checkpoint['train_iter']
try:
model.load_state_dict(checkpoint['state_dict']) # BWDs compatibility (TODO: remove)
except:
model.load_state_dict(checkpoint['model_state_dict'])
print("=> loaded best checkpoint (epoch {}, train iter {})"
.format(checkpoint['epoch'], num_train_iter))
best_epoch = checkpoint['best_epoch'] if 'best_epoch' in checkpoint else 0
best_fepoch = checkpoint['best_flow_epoch'] if 'best_flow_epoch' in checkpoint else 0
best_fcepoch = checkpoint['best_flowconsis_epoch'] if 'best_flowconsis_epoch' in checkpoint else 0
print('==== Best validation loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_loss'],
best_epoch))
print('==== Best validation flow loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flow_loss'],
best_fepoch))
print('==== Best validation flow-consis loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flowconsis_loss'],
best_fcepoch))
# Do final testing (if not asked to evaluate)
# (don't create the data loader unless needed, creates 4 extra threads)
print('==== Evaluating trained network on test data ====')
args.imgdisp_freq = 10 * args.disp_freq # Tensorboard log frequency for the image data
sampler = torch.utils.data.dataloader.SequentialSampler(test_dataset) # Run sequentially along the test dataset
test_loader = DataEnumerator(util.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, sampler=sampler, pin_memory=args.use_pin_memory,
collate_fn=test_dataset.collate_batch))
test_stats = iterate(test_loader, model, tblogger, len(test_loader),
mode='test', epoch=args.epochs)
print('==== Best validation loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_loss'],
best_epoch))
print('==== Best validation flow loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flow_loss'],
best_fepoch))
print('==== Best validation flow-consis loss: {:.5f} was from epoch: {} ===='.format(checkpoint['best_flowconsis_loss'],
best_fcepoch))
# Save final test error
helpers.save_checkpoint({
'args': args,
'test_stats': {'stats': test_stats,
'niters': test_loader.niters, 'nruns': test_loader.nruns,
'totaliters': test_loader.iteration_count(),
'ids': test_stats.data_ids,
},
}, is_best=False, savedir=args.save_dir, filename='test_stats.pth.tar')
# Write test stats to val stats file at the end
statsvfile.write("{}, {}, {}, {}, {}, {}, {}\n".format(checkpoint['epoch'], test_stats.loss.avg,
test_stats.ptloss.avg.sum(),
test_stats.consisloss.avg.sum(),
test_stats.flowerr_sum.avg.sum() / args.batch_size,
test_stats.flowerr_avg.avg.sum() / args.batch_size,
test_stats.consiserr.avg.sum()))
statsvfile.close(); statstfile.close()
# Close log file
logfile.close()
################# HELPER FUNCTIONS
### Main iterate function (train/test/val)
def iterate(data_loader, model, tblogger, num_iters,
mode='test', optimizer=None, epoch=0):
# Get global stuff?
global num_train_iter
# Setup avg time & stats:
data_time, fwd_time, bwd_time, viz_time = AverageMeter(), AverageMeter(), AverageMeter(), AverageMeter()
# Save all stats into a namespace
stats = argparse.Namespace()
stats.loss, stats.ptloss, stats.consisloss = AverageMeter(), AverageMeter(), AverageMeter()
stats.flowerr_sum, stats.flowerr_avg = AverageMeter(), AverageMeter()
stats.motionerr_sum, stats.motionerr_avg = AverageMeter(), AverageMeter()
stats.stillerr_sum, stats.stillerr_avg = AverageMeter(), AverageMeter()
stats.consiserr = AverageMeter()
stats.data_ids = []
if mode == 'test':
# Save the flow errors and poses if in "testing" mode
stats.motion_err, stats.motion_npt, stats.still_err, stats.still_npt = [], [], [], []
stats.predposes, stats.predtransposes, stats.preddeltas, stats.ctrls = [], [], [], []
stats.poses = []
# stats.predmasks, stats.masks = [], []
# stats.gtflows, stats.predflows = [], []
# stats.pts = []
# Switch model modes
train = (mode == 'train')
if train:
assert (optimizer is not None), "Please pass in an optimizer if we are iterating in training mode"
model.train()
else:
assert (mode == 'test' or mode == 'val'), "Mode can be train/test/val. Input: {}"+mode
model.eval()
# Create a closure to get the outputs of the delta-se3 prediction layers
#predictions = {}
#def get_output(name):
# def hook(self, input, result):
# predictions[name] = result
# return hook
#model.transitionmodel.deltase3decoder.register_forward_hook(get_output('deltase3'))
# Point predictor
# NOTE: The prediction outputs of both layers are the same if mask normalization is used, if sigmoid the outputs are different
# NOTE: Gradients are same for pts & tfms if mask normalization is used, always different for the masks
ptpredlayer = se3nn.NTfm3D()
# Type of loss (mixture of experts = wt sharpening or sigmoid)
mex_loss = True
# Run an epoch
print('========== Mode: {}, Starting epoch: {}, Num iters: {} =========='.format(
mode, epoch, num_iters))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pt_wt, consis_wt = args.pt_wt * args.loss_scale, args.consis_wt * args.loss_scale
identfm = util.req_grad(torch.eye(4).view(1,1,4,4).expand(1,args.num_se3-1,4,4).narrow(2,0,3).to(device), False)
for i in xrange(num_iters):
# ============ Load data ============#
# Start timer
start = time.time()
# Get a sample
j, sample = data_loader.next()
stats.data_ids.append(sample['id'].clone())
# Get inputs and targets (as variables)
# Currently batchsize is the outer dimension
pts = util.req_grad(sample['points'].to(device), train) # Need gradients
ctrls = util.req_grad(sample['controls'].to(device), train) # Need gradients
fwdflows = util.req_grad(sample['fwdflows'].to(device), False) # No gradients
fwdvis = util.req_grad(sample['fwdvisibilities'].float().to(device), False)
# Get jt angles
jtangles = util.req_grad(sample['actctrlconfigs'].to(device), train) #[:, :, args.ctrlids_in_state].type(deftype), requires_grad=train)
# Measure data loading time
data_time.update(time.time() - start)
# ============ FWD pass + Compute loss ============#
# Start timer
start = time.time()
########## Run a FWD pass through the network
# Predict the poses and masks
pose0, initmask = model.forward_pose_mask([pts[:, 0], jtangles[:, 0]], train_iter=num_train_iter)
pose1 = model.forward_only_pose([pts[:, 1], jtangles[:, 1]])
poses = [pose0, pose1]
# Make next-pose predictions & corresponding 3D point predictions using the transition model
deltapose, transpose = model.forward_next_pose(pose0, ctrls[:, 0], jtangles[:, 0], None)
deltaposes = [deltapose]
transposes = [transpose]
# Make prediction of next pts
nextpts = ptpredlayer(pts[:,0], initmask, deltapose)
predpts = [nextpts]
########## Losses
### 3D loss
# If motion-normalized loss, pass in GT flows
inputs = nextpts - pts[:, 0] # Delta flow for that step (note that gradients only go to the mask & deltas)
targets = fwdflows[:, 0]
if args.motion_norm_loss:
motion = targets # Use either delta-flows or full-flows
currptloss = pt_wt * ctrlnets.MotionNormalizedLoss3D(inputs, targets, motion=motion,
loss_type=args.loss_type, wts=fwdvis[:, 0])
else:
currptloss = pt_wt * ctrlnets.Loss3D(inputs, targets, loss_type=args.loss_type, wts=fwdvis[:, 0])
### Consistency loss (between t & t+1)
# Poses from encoder @ t & @ t+1 should be separated by delta from t->t+1
# NOTE: For the consistency loss, the loss is only backpropagated to the encoder poses, not to the deltas
delta = deltapose.detach() # Break the graph here
nextpose_trans = se3nn.ComposeRtPair()(delta, poses[0])
currconsisloss = consis_wt * ctrlnets.BiMSELoss(nextpose_trans, poses[1])
# Append to total loss
loss = currptloss + currconsisloss
ptloss = torch.Tensor([currptloss.item()])
consisloss = torch.Tensor([currconsisloss.item()])
# Update stats
stats.ptloss.update(ptloss)
stats.consisloss.update(consisloss)
stats.loss.update(loss.item())
# Measure FWD time
fwd_time.update(time.time() - start)
# ============ Gradient backpass + Optimizer step ============#
# Compute gradient and do optimizer update step (if in training mode)
if (train):
# Start timer
start = time.time()
# Backward pass & optimize
optimizer.zero_grad() # Zero gradients
loss.backward() # Compute gradients - BWD pass
optimizer.step() # Run update step
# Increment number of training iterations by 1
num_train_iter += 1
# Measure BWD time
bwd_time.update(time.time() - start)
# ============ Visualization ============#
# Make sure to not add to the computation graph (will memory leak otherwise)!
with torch.no_grad():
# Start timer
start = time.time()
# Compute flow predictions and errors
# NOTE: I'm using CUDA here to speed up computation by ~4x
predflows = torch.cat([(x - pts[:,0]).unsqueeze(1) for x in predpts], 1)
flows = fwdflows
if args.use_only_da_for_flows:
# If using only DA then pts that are not visible will not have GT flows, so we shouldn't take them into
# account when computing the flow errors
flowerr_sum, flowerr_avg, \
motionerr_sum, motionerr_avg,\
stillerr_sum, stillerr_avg,\
motion_err, motion_npt,\
still_err, still_npt = helpers.compute_masked_flow_errors(predflows * fwdvis, flows) # Zero out flows for non-visible points
else:
flowerr_sum, flowerr_avg, \
motionerr_sum, motionerr_avg, \
stillerr_sum, stillerr_avg, \
motion_err, motion_npt, \
still_err, still_npt = helpers.compute_masked_flow_errors(predflows, flows)
# Update stats
stats.flowerr_sum.update(flowerr_sum); stats.flowerr_avg.update(flowerr_avg)
stats.motionerr_sum.update(motionerr_sum); stats.motionerr_avg.update(motionerr_avg)
stats.stillerr_sum.update(stillerr_sum); stats.stillerr_avg.update(stillerr_avg)
if mode == 'test':
stats.motion_err.append(motion_err); stats.motion_npt.append(motion_npt)
stats.still_err.append(still_err); stats.still_npt.append(still_npt)
# Save poses if in test mode
if (mode == 'test') and (args.detailed_test_stats):
stats.predposes.append([x.cpu().float() for x in poses])
stats.predtransposes.append([x.cpu().float() for x in transposes])
stats.preddeltas.append([x.cpu().float() for x in deltaposes])
stats.ctrls.append(ctrls.cpu().float())
stats.poses.append(sample['poses'])
# stats.predmasks.append(initmask.cpu().float())
# stats.masks.append(sample['masks'][:,0])
# stats.predflows.append(predflows.cpu())
# stats.gtflows.append(flows.cpu())
# stats.pts.append(sample['points'][:,0])
# Compute flow error per mask (if asked to)
#if args.disp_err_per_mask:
# flowloss_mask_sum_fwd, flowloss_mask_avg_fwd, _, _ = compute_flow_errors_per_mask(predflows,
# flows,
# sample['gtmasks'])
### Pose consistency error
# Compute consistency error for display
consiserror, consiserrormax = torch.zeros(args.seq_len), torch.zeros(args.seq_len)
for k in xrange(args.seq_len):
consiserrormax[k] = (poses[k+1] - transposes[k]).abs().max()
consiserror[k] = ctrlnets.BiAbsLoss(poses[k+1], transposes[k])
stats.consiserr.update(consiserror)
# Display/Print frequency
bsz = pts.size(0)
if i % args.disp_freq == 0:
### Print statistics
print_stats(mode, epoch=epoch, curr=i+1, total=num_iters,
samplecurr=j+1, sampletotal=len(data_loader),
stats=stats, bsz=bsz)
### Print stuff if we have weight sharpening enabled
if args.use_wt_sharpening:
try:
noise_std, pow = model.posemaskmodel.compute_wt_sharpening_stats(train_iter=num_train_iter)
except:
noise_std, pow = model.maskmodel.compute_wt_sharpening_stats(train_iter=num_train_iter)
print('\tWeight sharpening => Num training iters: {}, Noise std: {:.4f}, Power: {:.3f}'.format(
num_train_iter, noise_std, pow))
### Print time taken
print('\tTime => Data: {data.val:.3f} ({data.avg:.3f}), '
'Fwd: {fwd.val:.3f} ({fwd.avg:.3f}), '
'Bwd: {bwd.val:.3f} ({bwd.avg:.3f}), '
'Viz: {viz.val:.3f} ({viz.avg:.3f})'.format(
data=data_time, fwd=fwd_time, bwd=bwd_time, viz=viz_time))
### TensorBoard logging
# (1) Log the scalar values
iterct = data_loader.iteration_count() # Get total number of iterations so far
info = {
mode+'-loss': loss.item(),
mode+'-pt3dloss': ptloss.sum(),
mode+'-consisloss': consisloss.sum(),
mode+'-consiserr': consiserror.sum(),
mode+'-consiserrmax': consiserrormax.sum(),
mode+'-flowerrsum': flowerr_sum.sum()/bsz,
mode+'-flowerravg': flowerr_avg.sum()/bsz,
mode+'-motionerrsum': motionerr_sum.sum()/bsz,
mode+'-motionerravg': motionerr_avg.sum()/bsz,
mode+'-stillerrsum': stillerr_sum.sum() / bsz,
mode+'-stillerravg': stillerr_avg.sum() / bsz,
}
if mode == 'train':
info[mode+'-lr'] = args.curr_lr # Plot current learning rate
for tag, value in info.items():
tblogger.scalar_summary(tag, value, iterct)
# (2) Log images & print predicted SE3s
# TODO: Numpy or matplotlib
if i % args.imgdisp_freq == 0:
## Log the images (at a lower rate for now)
id = random.randint(0, sample['points'].size(0)-1)
# Render the predicted and GT poses onto the depth
depths = []
for k in xrange(args.seq_len+1):
gtpose = sample['poses'][id, k]
predpose = poses[k][id].cpu().float()
predposet = transposes[k-1][id].cpu().float() if (k > 0) else None
gtdepth = helpers.normalize_img(sample['points'][id,k,2:].expand(3,args.img_ht,args.img_wd).permute(1,2,0), min=0, max=3)
for n in xrange(args.num_se3):
# Pose_1 (GT/Pred)
if n < gtpose.size(0):
util.draw_3d_frame(gtdepth, gtpose[n], [0,0,1], args.cam_intrinsics[0], pixlength=15.0) # GT pose: Blue
util.draw_3d_frame(gtdepth, predpose[n], [0,1,0], args.cam_intrinsics[0], pixlength=15.0) # Pred pose: Green
if predposet is not None:
util.draw_3d_frame(gtdepth, predposet[n], [1,0,0], args.cam_intrinsics[0], pixlength=15.0) # Transition model pred pose: Red
depths.append(gtdepth)
depthdisp = torch.cat(depths, 1).permute(2,0,1) # Concatenate along columns (3 x 240 x 320*seq_len+1 image)
# Concat the flows, depths and masks into one tensor
flowdisp = torchvision.utils.make_grid(torch.cat([flows.narrow(0,id,1),
predflows.narrow(0,id,1)], 0).cpu().view(-1, 3, args.img_ht, args.img_wd),
nrow=args.seq_len, normalize=True, range=(-0.01, 0.01))
#depthdisp = torchvision.utils.make_grid(sample['points'][id].narrow(1,2,1), normalize=True, range=(0.0,3.0))
maskdisp = torchvision.utils.make_grid(torch.cat([initmask.narrow(0,id,1)], 0).cpu().view(-1, 1, args.img_ht, args.img_wd),
nrow=args.num_se3, normalize=True, range=(0,1))
# Show as an image summary
info = { mode+'-depths': util.to_np(depthdisp.unsqueeze(0)),
mode+'-flows' : util.to_np(flowdisp.unsqueeze(0)),
mode+'-masks' : util.to_np(maskdisp.narrow(0,0,1))
}
for tag, images in info.items():
tblogger.image_summary(tag, images, iterct)
## Print the predicted delta-SE3s
#deltase3s = predictions['deltase3'][id].view(args.num_se3, -1).cpu()
#if len(pivots) > 0:
# deltase3s = torch.cat([deltase3s, pivots[-1][id].view(args.num_se3,-1).cpu()], 1)
#print('\tPredicted delta-SE3s @ t=2:', deltase3s)
## Print the predicted mask values
print('\tPredicted mask stats:')
for k in xrange(args.num_se3):
print('\tMax: {:.4f}, Min: {:.4f}, Mean: {:.4f}, Std: {:.4f}, Median: {:.4f}, Pred 1: {}'.format(
initmask[id,k].max(), initmask[id,k].min(), initmask[id,k].mean(),
initmask[id,k].std(), initmask[id,k].view(-1).cpu().float().median(),
(initmask[id,k] - 1).abs().le(1e-5).sum()))
print('')
# Measure viz time
viz_time.update(time.time() - start)
### Print stats at the end
print('========== Mode: {}, Epoch: {}, Final results =========='.format(mode, epoch))
print_stats(mode, epoch=epoch, curr=num_iters, total=num_iters,
samplecurr=data_loader.niters+1, sampletotal=len(data_loader),
stats=stats)
print('========================================================')
# Return the loss & flow loss
return stats
### Print statistics
def print_stats(mode, epoch, curr, total, samplecurr, sampletotal,
stats, bsz=None):
# Print loss
bsz = args.batch_size if bsz is None else bsz
print('Mode: {}, Epoch: [{}/{}], Iter: [{}/{}], Sample: [{}/{}], Batch size: {}, '
'Loss: {loss.val:.4f} ({loss.avg:.4f})'.format(
mode, epoch, args.epochs, curr, total, samplecurr,
sampletotal, bsz, loss=stats.loss))
# Print flow loss per timestep
for k in xrange(args.seq_len):
print('\tStep: {}, Pt: {:.3f} ({:.3f}), '
'Consis: {:.3f}/{:.4f} ({:.3f}/{:.4f}), '
'Flow => Sum: {:.3f} ({:.3f}), Avg: {:.3f} ({:.3f}), '
'Motion/Still => Sum: {:.3f}/{:.3f}, Avg: {:.3f}/{:.3f}'
.format(
1 + k * args.step_len,
stats.ptloss.val[k], stats.ptloss.avg[k],
stats.consisloss.val[k], stats.consisloss.avg[k],
stats.consiserr.val[k], stats.consiserr.avg[k],
stats.flowerr_sum.val[k] / bsz, stats.flowerr_sum.avg[k] / bsz,
stats.flowerr_avg.val[k] / bsz, stats.flowerr_avg.avg[k] / bsz,
stats.motionerr_sum.avg[k] / bsz, stats.stillerr_sum.avg[k] / bsz,
stats.motionerr_avg.avg[k] / bsz, stats.stillerr_avg.avg[k] / bsz,
))
### Adjust learning rate
def adjust_learning_rate(optimizer, epoch, decay_rate=0.1, decay_epochs=10, min_lr=1e-5):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (decay_rate ** (epoch // decay_epochs))
lr = min_lr if (args.lr < min_lr) else lr # Clamp at min_lr
print("======== Epoch: {}, Initial learning rate: {}, Current: {}, Min: {} =========".format(
epoch, args.lr, lr, min_lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
args.curr_lr = lr
################ RUN MAIN
if __name__ == '__main__':
main()
| [
"barun@MacBook-Air-2.local"
] | barun@MacBook-Air-2.local |
f5d215c564dfad6c96246bd529b6f6afd273eafa | beac917ee396ffb33c4f13d2ceff188c3bf5148e | /app/evaluation.py | bdc7f21c84bae0c079063d2953eca979513fa410 | [] | no_license | Boj3alex/rpn-calculator | 75532b25b312feed163e7f0bf1e45887c35ad417 | 705c21e250a1105ae02ab4e620546e77fd1d805f | refs/heads/master | 2023-01-09T06:50:19.879472 | 2020-08-31T17:46:26 | 2020-08-31T17:46:26 | 290,067,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import re
floating_point_regex = '[0-9]*\.[0-9]*'
def do_operation(element1, element2, operator):
if operator == '+':
return element1 + element2
if operator == '-':
return element1 - element2
if operator == '*':
return element1 * element2
if operator == '/':
return int(element1 / element2)
if operator == '%':
return element1 % element2
def rpn_evaluation(rpn_exp):
results_list = []
operator_list = ['+', '-', '*', '/', '%']
try:
for element in rpn_exp.split():
if element in operator_list:
operator2 = results_list.pop()
operator1 = results_list.pop()
results_list.append(do_operation(operator1, operator2, element))
elif element.isnumeric():
results_list.append(int(element))
elif re.search(floating_point_regex, element):
raise Exception('Floating-point numbers are not accepted.')
else:
raise Exception('Invalid character')
except IndexError:
print('Invalid RPN expression')
return results_list.pop() if len(results_list) > 0 else 0
if __name__ == '__main__':
print('Type the RPN expression that you want to evaluate:')
rpn_exp = input()
print('The result of the RPN expression is:', rpn_evaluation(rpn_exp))
| [
"noreply@github.com"
] | Boj3alex.noreply@github.com |
03a7b76aa472ee4f249b294ee548e8d4b9c4d794 | a923a44d3c4815f645ca2ba84f973083c5dc29a1 | /audio.py | 7022ffd8026fa3ee5f185d610030341c99efd1f5 | [] | no_license | unparalleled-ysj/T2-TF2 | 49ca50fe1e844b64c75d91a22d294b83c7c449a9 | 5c0c22a569c68d6f63648c5f545fd78ffb261033 | refs/heads/master | 2022-11-13T17:20:33.963871 | 2020-07-06T04:15:52 | 2020-07-06T04:15:52 | 277,436,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,498 | py | import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
num_mels = 80
n_fft = 1024
sample_rate = 16000
hop_size = 200
win_size = 800
preemphasis_value = 0.97
min_level_db = -120
ref_level_db = 20
power = 1.2
griffin_lim_iters = 60
fmax = 7600
fmin = 50
max_abs_value = 4.
def dc_notch_filter(wav):
# code from speex
notch_radius = 0.982
den = notch_radius ** 2 + 0.7 * (1 - notch_radius) ** 2
b = np.array([1, -2, 1]) * notch_radius
a = np.array([1, -2 * notch_radius, den])
return signal.lfilter(b, a, wav)
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path):
wav = dc_notch_filter(wav)
wav = wav / np.abs(wav).max() * 0.999
f1 = 0.5 * 32767 / max(0.01, np.max(np.abs(wav)))
f2 = np.sign(wav) * np.power(np.abs(wav), 0.95)
wav = f1 * f2
#proposed by @dsmiller
wavfile.write(path, sample_rate, wav.astype(np.int16))
def preemphasis(wav, k):
return signal.lfilter([1, -k], [1], wav)
def inv_preemphasis(wav, k):
return signal.lfilter([1], [1, -k], wav)
def get_hop_size():
return hop_size
def linearspectrogram(wav):
D = _stft(preemphasis(wav, preemphasis_value))
S = _amp_to_db(np.abs(D)) - ref_level_db
return _normalize(S)
def melspectrogram(wav):
D = _stft(preemphasis(wav, preemphasis_value))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - ref_level_db
return _normalize(S)
def inv_linear_spectrogram(linear_spectrogram):
'''Converts linear spectrogram to waveform using librosa'''
D = _denormalize(linear_spectrogram)
S = _db_to_amp(D + ref_level_db) #Convert back to linear
return inv_preemphasis(_griffin_lim(S ** power), preemphasis_value)
def inv_mel_spectrogram(mel_spectrogram):
'''Converts mel spectrogram to waveform using librosa'''
D = _denormalize(mel_spectrogram)
S = _mel_to_linear(_db_to_amp(D + ref_level_db)) # Convert back to linear
return inv_preemphasis(_griffin_lim(S ** power), preemphasis_value)
def _griffin_lim(S):
'''librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
'''
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles)
for i in range(griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y)))
y = _istft(S_complex * angles)
return y
def _stft(y):
return librosa.stft(y=y, n_fft=n_fft, hop_length=get_hop_size(), win_length=win_size)
def _istft(y):
return librosa.istft(y, hop_length=get_hop_size(), win_length=win_size)
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis())
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis():
assert fmax <= sample_rate // 2
return librosa.filters.mel(sample_rate, n_fft, n_mels=num_mels,
fmin=fmin, fmax=fmax)
def _amp_to_db(x):
min_level = np.exp(min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S):
return (2 * max_abs_value) * ((S - min_level_db) / (-min_level_db)) - max_abs_value
def _denormalize(D):
return (((D + max_abs_value) * -min_level_db / (2 * max_abs_value)) + min_level_db)
| [
"unparalleled.ysj@qq.com"
] | unparalleled.ysj@qq.com |
dc0f1debf616d07e130ae2adb13b8209fd2e2f74 | 99afa83eda09cf552466ddf90314cb01d07b166a | /testapp/models.py | c1fa45c2c96048893e614bf9142070231858f126 | [] | no_license | jithinvijayan007/Lithoera | 358c9a6191d6510ac07229e7a92eadd89d70e14f | 33e3639e882f79b12541f92070dad74483fdfa72 | refs/heads/master | 2023-01-05T18:29:37.388869 | 2020-11-02T11:58:27 | 2020-11-02T11:58:27 | 309,316,888 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self, email, username, password=None):
if not email:
raise ValueError('Users must have an email address')
if not username:
raise ValueError('Users must have a username')
user = self.model(
email=self.normalize_email(email),
username=username,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, username, password):
user = self.create_user(
email=self.normalize_email(email),
password=password,
username=username,
)
user.is_admin = True
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
username = models.CharField(max_length=30, unique=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
last_login = models.DateTimeField(verbose_name='last login', auto_now=True)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = MyAccountManager()
def __str__(self):
return self.email
# For checking permissions. to keep it simple all admin have ALL permissons
def has_perm(self, perm, obj=None):
return self.is_admin
# Does this user have permission to view this app? (ALWAYS YES FOR SIMPLICITY)
def has_module_perms(self, app_label):
return True
| [
"jithinvijayan007@gmail.com"
] | jithinvijayan007@gmail.com |
fd3fd13935a93c20f91027c39f5327878e821fa3 | c72fb291300941c756c4fe4e7bbd443880214367 | /files/models.py | a6c1f3b0d75882226cbe0bbd77c225b9a7167397 | [] | no_license | garywangcn/django-3dshow | 1e4893331b70630cb989b62fb95d58703cc9bc9d | 4dad878ebbf13de89facd73c0d6d57860a01a0df | refs/heads/master | 2021-05-11T10:23:59.516091 | 2018-01-24T09:33:42 | 2018-01-24T09:33:42 | 118,099,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | from django.db import models
# Create your models here.
class Document(models.Model):
name = models.CharField(max_length=255, blank=False)
description = models.CharField(max_length=1000, null=True, blank=False)
picture = models.FileField(upload_to='documents/')
modelpackage = models.FileField(upload_to='documents/')
uploaded_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
| [
"15818651704@163.com"
] | 15818651704@163.com |
21064aaea82657175bb68471f1411164393e0210 | 657c80336bce1cc6158cd349ce208c5e680a4d0d | /contrib/projection/tests/projection/base_projection.py | de53d6895412de112d31a959926d9cdb47b6ef9c | [
"BSD-3-Clause"
] | permissive | Xinmudotmoe/pyglet | b37628618647bf3b1e3d7db28202a5e14c60450c | 144257c365ca85528c6a4c5bed8141e683d7a9b6 | refs/heads/master | 2021-05-29T22:05:40.676643 | 2015-10-24T05:55:49 | 2015-10-24T05:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | #!/usr/bin/python
# $Id:$
from pyglet.gl import *
def fillrect(x, y, width, height):
glBegin(GL_QUADS)
glVertex2f(x, y)
glVertex2f(x + width, y)
glVertex2f(x + width, y + height)
glVertex2f(x, y + height)
glEnd()
def rect(x, y, width, height):
glBegin(GL_LINE_LOOP)
glVertex2f(x, y)
glVertex2f(x + width, y)
glVertex2f(x + width, y + height)
glVertex2f(x, y + height)
glEnd()
| [
"leif.theden@gmail.com"
] | leif.theden@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.