blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ea5bc2549a9fd8f3a565025618302e4aaed97255 | Python | sophnim/PythonTutorial | /Function.py | UTF-8 | 717 | 4.15625 | 4 | [] | no_license | def test():
print('this is test')
test()
# this is test
def add(a,b):
return a+b
print(add(1,2))
# 3
# 여러값을 동시에 리턴
def add2(a,b):
return a,b,a+b
a,b,c = add2(1,2)
print(a,b,c)
# 1 2 3
# 리턴값이 여러개인 함수의 결과를 하나로 받으면 튜플에 저장된다
d = add2(1,2)
print(d)
# (1, 2, 3)
# 가변 인자
# 인자앞에 *를 붙인다
def vargfunc(*args):
for v in args:
print(v)
vargfunc(1,2,3,'a',5)
# 1
# 2
# 3
# a
# 5
def vargfunc2(format, *args):
print(format)
for v in args:
print(v)
vargfunc2("test", 1,2,3,4)
# test
# 1
# 2
# 3
# 4
# 인수 초기값 설정
def func(a = 1, b = 2):
print(a,b)
func(10)
# 10 2
| true |
b042f78ec030ba3ff9db507d7ee868f5d50eee51 | Python | milanyummy/yummy_leetcode | /912_sort.py | UTF-8 | 2,041 | 3.828125 | 4 | [] | no_license | def sortArray(nums):
#冒泡排序:两两比较,将大的元素向后交换
# flag = True
# while flag is True:
# flag = False
# for i in range (len(nums)-1):
# if nums[i] > nums[i+1]:
# nums[i], nums[i+1] = nums[i+1], nums[i]
# flag = True
# return nums
#选择排序:每一趟选择剩余元素中最小的
# for i in range(len(nums) - 1):
# min = i
# for j in range(i+1, len(nums)):
# if nums[i] > nums[j]:
# min = j
# if min != i:
# nums[i], nums[min] = nums[min], nums[i]
# return nums
#插入排序:将每个元素插入到已排序好的序列中
# for i in range (len(nums)):
# for j in range(i):
# if nums[i] < nums[j]:
# nums.insert(j, nums.pop(i))
# return nums
#二分插入排序:在序列有序部分中通过二分法找到新元素的位置gcgnxz
# for i in range (1, len(nums)):
# low = 0
# high = i-1
#
# while low <= high:
# m = int((low+high) / 2)
# if nums[i] < nums[m]:
# high = m -1
# else:
# low = m + 1
# nums.insert(low , nums.pop(i))#low == high +1
# return nums
#快速排序:选取一个基准值,小数在左大数在右,然后分区递归进行
def quickSort(qlist, start, end):
if start >= end:
return
pivot = qlist[start]
low = start
high = end
while low < high:
while low < high and qlist[high] >= pivot:
high -= 1
qlist[low] = qlist[high]
while low < high and qlist[low] < pivot:
low += 1
qlist[high] = qlist[low]
qlist[low] = pivot
quickSort(qlist, start, low-1)
quickSort(qlist, low+1, end)
quickSort(nums, 0, len(nums)-1)
return nums
nums = [5,1,1,2,0,0]
print(sortArray(nums)) | true |
e17c10f392ee1be779d7012c0798597d84f958ec | Python | sinead-cook/decompressive-craniectomy-midplane-finder | /src/findeyes.py | UTF-8 | 12,011 | 2.578125 | 3 | [] | no_license | import numpy as np
import core
import matplotlib.pyplot as plt
import matplotlib.image
import cv2
# import cv2.cv as cv
def single_slice(axis_no, thresholded_np, slice_no):
if axis_no == 0:
matplotlib.image.imsave('img.png', thresholded_np[slice_no,:,:])
elif axis_no == 1:
matplotlib.image.imsave('img.png', thresholded_np[:,slice_no,:])
elif axis_no == 2:
matplotlib.image.imsave('img.png', thresholded_np[:,:,slice_no])
else:
# print 'axis_no must be 0, 1 or 2'
return None
img = cv2.imread('img.png')
img = cv2.resize(img, None, fx=2, fy=2)
img = cv2.medianBlur(img,3)
cimg = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg,cv.CV_HOUGH_GRADIENT,1,50, param1=50,param2=30,minRadius=5,maxRadius=30)
try:
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
except:
pass
plt.clf()
plt.imshow(cimg)
plt.axis('equal')
plt.grid('on')
return plt.show()
def allSlices(axis_no, softtissue):
import matplotlib
import os
circlesData = np.zeros((softtissue.shape[axis_no], 2, 3))
for i in range(softtissue.shape[axis_no]):
# to change dimension, change where i is
if axis_no == 0:
matplotlib.image.imsave('img.png', softtissue[i,:,:])
img = cv2.imread('img.png')
elif axis_no == 1:
matplotlib.image.imsave('img.png', softtissue[:,i,:])
img = cv2.imread('img.png')
elif axis_no == 2:
matplotlib.image.imsave('img.png', softtissue[:,:,i])
img = cv2.imread('img.png')
else:
# print 'axis_no must be 0, 1 or 2'
return None
img = cv2.resize(img, None, fx=2, fy=2)
img = cv2.medianBlur(img,3)
cimg= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(cimg,cv2.HOUGH_GRADIENT,1,50,
param1=50,param2=30,minRadius=5,maxRadius=30)
try:
circlesData[i,:,:] = circles[0][0:2]
except:
pass
os.remove('img.png')
return circlesData
def indexedData(circlesData):
# hist_data is the same size as circles_data but has an additional column (for the explicit
# slice number). hist_data is x,y,z,r
histData2c = np.zeros((circlesData.shape[0], circlesData.shape[1], circlesData.shape[2]+1))
histData3c = histData2c
histData4c = histData2c
numCircles = 2
for i in range(histData2c.shape[0]):
histData2c[i,0:numCircles, 0:2] = circlesData[i,0:numCircles, 0:2]
#first 2 cols of every slice in circles data assigned to first 2 cols of
# every slice in hist_data
histData2c[i,0:numCircles, 3] = circlesData[i,0:numCircles, 2]
# 3rd col of every slice in circles data assigned to 4rd col of every slice
# in hist_data (radii data)
histData2c[i,0:numCircles,2]= i*2
# fill in index and stretch by factor of 2
numCircles = 3
for i in range(histData3c.shape[0]):
histData3c[i,0:numCircles, 0:2] = circlesData[i,0:numCircles, 0:2]
histData3c[i,0:numCircles, 3] = circlesData[i,0:numCircles, 2]
histData3c[i,0:numCircles,2]= i*2
numCircles = 4
for i in range(histData4c.shape[0]):
histData4c[i,0:numCircles, 0:2] = circlesData[i,0:numCircles, 0:2]
histData4c[i,0:numCircles, 3] = circlesData[i,0:numCircles, 2]
histData4c[i,0:numCircles,2]= i*2
return histData2c, histData3c, histData4c
def reshape(hist_data0, hist_data1, hist_data2):
# if axis_no is 0: 1st column is z, 2nd column is y, 3rd column is x.
# if axis_no is 1: 1st column is z, 2nd column is x, 3rd column is y.
# if axis_no is 2: 1st column is y, 2nd column is x, 3rd column is z.
x0 = hist_data0[:,:,2].ravel()
y0 = hist_data0[:,:,1].ravel()
z0 = hist_data0[:,:,0].ravel()
r0 = hist_data0[:,:,3].ravel()
x1 = hist_data1[:,:,1].ravel()
y1 = hist_data1[:,:,2].ravel()
z1 = hist_data1[:,:,0].ravel()
r1 = hist_data1[:,:,3].ravel()
x2 = hist_data2[:,:,1].ravel()
y2 = hist_data2[:,:,0].ravel()
z2 = hist_data2[:,:,2].ravel()
r2 = hist_data2[:,:,3].ravel()
x = np.append(x0,np.append(x1,x2))
y = np.append(y0,np.append(y1,y2))
z = np.append(z0,np.append(z1,z2))
r = np.append(r0,np.append(r1,r2))
hist_data = np.array([x,y,z,r]).T
return hist_data
def circlesData(softtissue, numCircles):
""" Fixes hist_data dimensions """
circlesData0 = allSlices(0, softtissue) # axis 0 circle detection
histData02c, histdata03c, histdata04c = indexedData(circlesData0)
circlesData1 = allSlices(1, softtissue) # axis 1 circle detection
histData12c, histdata13c, histdata14c = indexedData(circlesData1)
circlesData2 = allSlices(2, softtissue) # axis 1 circle detection
histData22c, histdata23c, histdata24c = indexedData(circlesData2)
histData2 = reshape(histData02c,histData12c,histData22c)
histData3 = reshape(histdata03c, histdata13c, histdata23c)
histData4 = reshape(histdata04c, histdata14c, histdata24c)
return histData2, histData3, histData4
def hist3d(hist_data):
H, edges = np.histogramdd(hist_data[:, 0:3])
# remove all the data points on the axes
H[0,:,:] = 0
H[:,0,:] = 0
H[:,:,0] = 0
flat_H = H.flatten()
mid_edges_x = np.zeros((len(edges[0])-1))
mid_edges_y = np.zeros((len(edges[1])-1))
mid_edges_z = np.zeros((len(edges[2])-1))
for i in range(len(mid_edges_x)):
mid_edges_x[i] = (edges[0][i]+edges[0][i+1])/2
for i in range(len(mid_edges_y)):
mid_edges_y[i] = (edges[1][i]+edges[1][i+1])/2
for i in range(len(mid_edges_z)):
mid_edges_z[i] = (edges[2][i]+edges[2][i+1])/2
z = np.tile(mid_edges_z,len(mid_edges_y)*len(mid_edges_x))
y = np.tile(np.repeat(mid_edges_y, len(mid_edges_z)), len(mid_edges_x))
x = np.repeat(mid_edges_x, len(mid_edges_x)*len(mid_edges_z))
data = np.array([x,y,z,flat_H])
for i in range(data.shape[0]):
data = np.array(data[:,data[i]!=0])
return data, H, edges
def hist3dAll(softtissue):
# circle_num should always be greater than 1. circle_num = 1 means 2 circles being picked out.
histData2, histData3, histData4 = circlesData(softtissue, 1) # 2c = 2 circles
data2c, H2c, edges2c = hist3d(histData2)
data3c, H3c, edges3c = hist3d(histData3)
data4c, H4c, edges4c = hist3d(histData4)
H = H2c+H3c+H4c
return H, edges2c, histData2
def ranges(H,edges):
ind = np.dstack(np.unravel_index(np.argsort(H.ravel()), H.shape))
index_1 = ind[:,-1,:][0] # x, y, z indices of 1st eye socket
index_2 = ind[:,-2,:][0] # x, y, z indices of 2nd eye socket
certainty = (H[ind[:,-3,:][0][0], ind[:,-3,:][0][1], ind[:,-3,:][0][2]])/(
H[ind[:,-2,:][0][0], ind[:,-2,:][0][1],ind[:,-2,:][0][2]])
firstEyeRange = np.array([[edges[0][index_1[0]], edges[0][index_1[0]+1]],
[edges[1][index_1[1]], edges[1][index_1[1]+1]],
[edges[2][index_1[2]], edges[2][index_1[2]+1]]])
secondEyeRange = np.array([[edges[0][index_2[0]], edges[0][index_2[0]+1]],
[edges[1][index_2[1]], edges[1][index_2[1]+1]],
[edges[2][index_2[2]], edges[2][index_2[2]+1]]])
return firstEyeRange, secondEyeRange, certainty
def mask_data(d, ranges):
logicals = [d[j,0]>=ranges[0,0] and
d[j,0]<=ranges[0,1] and
d[j,1]>=ranges[1,0] and
d[j,1]<=ranges[1,1] and
d[j,2]>=ranges[2,0] and
d[j,2]<=ranges[2,1]
for j in range(d.shape[0])]
e = np.array([np.multiply(d[:,j], logicals) for j in range(d.shape[1])])
socket = np.array(e[:,e[1]!=0]) # columns are x,y,z,r
return socket
def coords(histData2, firstEyeRange, secondEyeRange):
import scipy
from scipy.optimize import curve_fit
socket_1 = mask_data(histData2, firstEyeRange) # can be any of the hist_datas because only using
# the first 3 cols
socket_2 = mask_data(histData2, secondEyeRange)
socket_1 = core.rejectOutliers(socket_1)
socket_2 = core.rejectOutliers(socket_2)
def max_z_r(socket):
p = np.polyfit(socket[2], socket[3], deg=2)
def f(z): return p[0]*z**2 + p[1]*z + p[2]
max_z = scipy.optimize.fmin(lambda r: -f(r), 0) # put in disp=False
p = np.poly1d(p)
max_r = p(max_z)
return max_z, max_r
maxz1, maxr1 = max_z_r(socket_1)
maxz2, maxr2 = max_z_r(socket_2)
if maxz1 > np.amax(socket_1[2]) or maxz1 < np.amin(socket_1[2]):
maxz1 = np.array([np.mean(socket_1[2])])
if maxz2 > np.amax(socket_2[2]) or maxz2 < np.amin(socket_2[2]):
maxz2 = np.array([np.mean(socket_2[2])])
p = np.polyfit(socket_1[2], socket_1[0], deg=1)
def f(z): return p[0]*z + p[1]
x1=f(maxz1)
p = np.polyfit(socket_1[2], socket_1[1], deg=1)
def f(z): return p[0]*z + p[1]
y1=f(maxz1)
p = np.polyfit(socket_2[2], socket_2[0], deg=1)
def f(z): return p[0]*z + p[1]
x2=f(maxz2)
p = np.polyfit(socket_2[2], socket_2[1], deg=1)
def f(z): return p[0]*z + p[1]
y2=f(maxz2)
c1 = np.array([x1/2,y1/2,maxz1/2])[:,0]
c2 = np.array([x2/2,y2/2,maxz2/2])[:,0]
return c1,c2
def checkcoords(c1, c2, softtissue):
"""Plots figures checking that the coordinates were correctly chosen for the 2 eye sockets"""
c1 = c1.astype(int)
c2 = c2.astype(int)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6)) = plt.subplots(2,3)
a,b,c = softtissue.shape
ax1.imshow(softtissue[:,:,c1[2]])
ax1.plot(c1[1],c1[0], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax1.set_xlim([0,b])
ax1.set_ylim([0,a])
ax1.set_aspect('equal')
ax2.imshow(softtissue[:,c1[1],:])
ax2.plot(c1[2],c1[0], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax2.set_xlim([0,c])
ax2.set_ylim([0,a])
ax2.set_aspect('equal')
ax3.imshow(softtissue[c1[0],:,:])
ax3.plot(c1[2],c1[1], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax3.set_xlim([0,c])
ax3.set_ylim([0,b])
ax3.set_aspect('equal')
ax4.imshow(softtissue[:,:,c2[2]])
ax4.plot(c2[1],c2[0], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax4.set_xlim([0,b])
ax4.set_ylim([0,a])
ax4.set_aspect('equal')
ax5.imshow(softtissue[:,c2[1],:])
ax5.plot(c2[2],c2[0], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax5.set_xlim([0,c])
ax5.set_ylim([0,a])
ax5.set_aspect('equal')
ax6.imshow(softtissue[c2[0],:,:])
ax6.plot(c2[2],c2[1], marker='o', markersize=2,markeredgewidth=25, markeredgecolor='r')
ax6.set_xlim([0,c])
ax6.set_ylim([0,b])
ax6.set_aspect('equal')
plt.show()
return None
def anglesFromEyes(c1,c2, arrayShape):
# point that the plane goes through, p
c = 0.5*(c1+c2)
normal = (c1-c2)
normal = normal/np.linalg.norm(normal)
zaxis = np.array([0,0,1])
cosangle = np.dot(normal, zaxis)
angle = np.arcsin(cosangle)
angle1 = angle*360/np.pi/2.
xaxis = np.array([0,1,0])
cosangle = np.dot(normal, xaxis)
angle = np.arcsin(cosangle)
angle2 = angle*360/np.pi/2.
return angle1, angle2 #angles are in degrees
def correctSkews(angle1, angle2, array):
from scipy.ndimage.interpolation import rotate
rotated1 = rotate(array, angle1, mode='nearest', axes=(0,1))
angle1rad = angle1/360*2*np.pi
rotated2 = rotate(rotated1, angle2 ,mode='nearest', axes=(2,0))
return rotated1, rotated2 | true |
0d363b4b80c2b1c313ab95c3b4afd48a62397a66 | Python | ignatiusab/d2acq | /abilityToHero.py | UTF-8 | 1,339 | 2.515625 | 3 | [] | no_license | from bs4 import BeautifulSoup
import mechanize
import re
import urllib
from hashlib import sha256
br = mechanize.Browser()
br.set_handle_robots(False)
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1) \
AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/41.0.2228.0 Safari/537.36')]
out = open('abilities.csv', 'w')
def parseHero(heroLink):
html = br.open(heroLink)
soup = BeautifulSoup(html.read(), 'lxml')
abilities = soup.find_all(lambda elem: elem.name == 'div' and 'style' in elem.attrs and 'flex: 0 1 450px' in elem.attrs['style'])
for ability in abilities:
# get the ability sound file
btn = ability.find('a', title='Play', class_='sm2_button')
if btn is None:
continue
# get the name of the ability
name = ability.div.get_text('|').split('|')[0]
if name == 'Cleave':
continue
if name == 'Aegis of the Immortal':
continue
line = name + ',' + heroLink.split('/')[-1].replace('_', ' ') + '\n'
print line
out.write(line)
html = br.open('http://dota2.gamepedia.com/Heroes')
soup = BeautifulSoup(html.read(), 'lxml')
heroes = soup.find_all('img', width=80, height=45)
for hero in heroes:
link = 'http://dota2.gamepedia.com' + hero.parent.attrs['href']
parseHero(link)
| true |
1329f59efbc274bf63f374c9f9edd3e34ea24318 | Python | monishajjmm1923/python-programs | /filehandling.py | UTF-8 | 1,615 | 3.734375 | 4 | [] | no_license | #open a file
fileptr = open("file.py","r")
if fileptr:
print("file is opened successfully")
#To read a file using fileobj.read(<count>)
fileptr = open("file.py","r");
a = fileptr.read(9);
print(a)
#stores all the data of the file into the variable content
content = fileptr.readline();
# prints the type of the data stored in the file
print(type(content))
#prints the content of the file
print(content)
#closes the opened file
fileptr.close()
#read the whole file.
a = open("file.py","r");
#running a for loop
for i in a:
print(i) # i contains each line of the file
#add data in to the file
fileptr = open("file.py","a");
#appending the content to the file
fileptr.write("Python is the modern day language. It makes things so simple.")
#closing the opened file
fileptr.close()
#Using with statement
with open("pip.py",'r') as f:
content = f.read();
print(content)
# open the file in read mode
fileptr = open("file.py","r")
#initially the filepointer is at 0
print("The filepointer is at byte :",fileptr.tell())
#reading the content of the file
content = fileptr.read();
#after the read operation file pointer modifies. tell() returns the location
print("After reading, the filepointer is at:",fileptr.tell())
#renaming the file
import os;
#rename file2.txt to file3.txt
os.rename("file2.txt","file3.txt")
import os;
#deleting the file named file3.txt
os.remove("file3.txt")
import os;
#printing the current working directory
print(os.getcwd())
| true |
51fe7b90188569e3b5cd14a425b5c955cbf8bff3 | Python | NguyenNgocHaiIT/PythonBasic | /Day_9_OOP/HinhChuNhat.py | UTF-8 | 520 | 3.765625 | 4 | [] | no_license | class HinhChuNhat:
def __init__(self,dai, rong):
self.Dai = dai
self.Rong = rong
def DienTich(self):
return self.Dai * self.Rong
def ChuVi(self):
return (self.Dai + self.Rong) * 2
def to_string(self):
print("Chiều dài hình chữ nhật là : ",self.Dai)
print("Chiều rộng hình chữ nhật là : ", self.Rong)
print("Chu vi hình chữ nhật là : ",self.ChuVi())
print("Diện tích hình chữ nhật là : ",self.DienTich()) | true |
b6962c4e4fec1aefd22f6fa5d85ddffca81ff484 | Python | iamrajee/AI_lab | /labtest2/ball.py | UTF-8 | 4,327 | 2.59375 | 3 | [] | no_license | #--------constraints-
#ball cant bowled more than 1 player in a over
#---------------------------------Necessary import-------------------------#
import numpy as np
import math
import itertools
import random
#----------------------------------Change variable here-----------------------#
n_baller = 5 #no of ballers
n_over = 10#no of over
who_balled = [0,0,1,1,2,2,3,3,4,4]
n_wicket = 3#no of wicket
# runs = [1,2,3,4,6]#possible runs
strikerate = [(3,33),(3.5,30),(4,24),(4.5,18),(5,15)]
run_wicket_prob = []
for ele in strikerate:
run_wicket_prob.append((ele[0], (1/ele[1])*6))
n_over_balled = np.zeros(5)
print("run_wicket_prob = ",run_wicket_prob)
# print(n_over_balled)
olpblist = []
my_list = [0,1,2]
for tuple_ in itertools.product(my_list, repeat=5):
olpblist.append(tuple_)
# print(tuple_)
len_olpblist = len(olpblist)
#-------------------------------------Bellman value iteration function-------------------------#
def bellman (prev_V): #formula => V[st]=max_over_a{R[a]+sumation{P(st).V[s:t-1]}}, reward here is amount of run scored
# print("bellman",prev_V.shape)
V = np.full (prev_V.shape,0)
policy = np.zeros (prev_V.shape , dtype = int)
w = n_wicket-1
olpb = [2,2,2,2,2]
# actionlist = [0,0,1,1,2,2,3,3,4,4]
sum = 0
optimal_over = []
for o in range(n_over):
# print("i = ",i,"w = ",w,"olpb = ",olpb)
if w == -1:
print("******************allout***************************")
break
o=(n_over-1)-o
# for w in range(n_wicket):
# w = (n_wicket-1)-w
# for olpb in olpblist:
# olpb = list(olpb)
min_temp = 100000000
a_wrt_min = 0
actionlist = []
for i,ele in enumerate(olpb):
for j in range(ele):
actionlist.append(i)
for a in np.unique(actionlist):#minimum over action
# temp_actionlist = actionlist
curr_v = (1-run_wicket_prob[a][1]) * (run_wicket_prob[a][0] + V[o-1][w][olpblist.index(tuple(olpb))]) #when no is out
# temp_actionlist.remove(a)
# olpb[a] -=1
curr_v += run_wicket_prob[a][1] * (run_wicket_prob[a][0] + V[o-1][w-1][olpblist.index(tuple(olpb))]) #when one player is out
if(curr_v<min_temp):
min_temp = np.round(curr_v,3)
a_wrt_min = a
# print(a)
if a_wrt_min in actionlist:
# print("a_wrt_min = ",a_wrt_min, "olpb = ",olpb )
# print("o = ",o+1,"w = ",w,"olpb = ",olpb," ===> a_wrt_min = ",a_wrt_min)
# actionlist.remove(a_wrt_min)
olpb[a_wrt_min] -=1
if random.uniform(0,1) < run_wicket_prob[a_wrt_min][1]:
w-=1
V[o][w][olpblist.index(tuple(olpb))] = min_temp
policy[o][w][olpblist.index(tuple(olpb))] = a_wrt_min
print("o = ",o+1,"w = ",w+1,"a_wrt_min = ",a_wrt_min+1," ===> olpb = ",olpb,"min_temp = ",min_temp)
sum+=min_temp
optimal_over.append(a_wrt_min+1)
else:
print("warning")
print("optimal_run = ", sum," optimal_over = ", optimal_over)
return np.copy (V) ,np.copy(policy)
#--------------------------------------Calling-----------------------------------------------#
initial_V = np.zeros((n_over,n_wicket,len_olpblist)) #for each state ###why n_ball+1 bcz 0,1,....,n_ball ball left
policy = np.zeros (initial_V.shape , dtype = np.int)
final_V , policy= bellman (initial_V)
# print("final_V = \n", final_V)
# print("policy = \n")
# for ele in policy:
# print(ele)
left = 9
# w_left = 2
# for i in range(10):
# if i == 0:
# temp_olpb = [2,2,2,2,2]
# a = policy[left][w_left][olpblist.index(tuple(temp_olpb))]
# print(a)
# temp_olpb[a] -=1
# left -=1
# if random.uniform(0,1) < run_wicket_prob[a][1]:
# w_left-=1
# else:
# a = policy[left][w_left][olpblist.index(tuple(temp_olpb))]
# print(a)
# #---------------------------------------Saving-----------------------------------------------#
# np.set_printoptions(formatter={'float': '{: 0.0f}'.format})
# np.savetxt("policy.txt" , policy , fmt = "%i")
# np.savetxt("value.txt" , final_V , fmt = "%i")
| true |
d5a525d618ea8465264ae5f527ffb5dc4f295fa5 | Python | ericygu/StocksAndStringsDuo | /form_dictionary.py | UTF-8 | 1,253 | 3.4375 | 3 | [] | no_license | import json
from load_articles import read_articles
# format {'potato': 4, 'oil': 1}
def insert_dictionary(str, dictionary):
global net_words
words = str.split()
for word in words:
net_words += 1
if word in dictionary:
dictionary[word] += 1
else:
dictionary[word] = 1
def get_ratios(dictionary_1):
for key in dictionary_1.keys():
dictionary_1[key] = dictionary_1[key] / net_words
return dictionary_1
def write_dictionary(dictionary):
with open('dictionary.json', 'w') as fp:
json.dump(dictionary, fp)
def read_dictionary():
with open('dictionary.json') as f:
return json.load(f)
if __name__ == '__main__':
dict = {}
net_words = 0
articles = read_articles()
articles_length = len(articles)
# divineRatio = (instance of word)/networds
# after reading dictionary before writing it,
# writing it is below -- have to divide all the values of dictionary by the articles
for article in articles:
insert_dictionary(article["title"], dict)
insert_dictionary(article["description"], dict)
# convert to ratios...
dictionary = get_ratios(dict)
write_dictionary(dict)
print(len(dictionary))
| true |
e6a122696b9b6d9f7eb38fdc0c4b1fcba20a7757 | Python | cmcneile/BasicLatticeFit | /src/models.py | UTF-8 | 291 | 2.921875 | 3 | [] | no_license | # Collection of fit models
#
#
import math
import numpy as np
#
# Staggered fit model for two states
#
def stagg_2_state(t, a0, m0, a1, m1):
''' a0*np.exp(-m0*t) + (-1)**t*a1*np.exp(-m1*t) '''
ss =(-1)**t
ans = a0*np.exp(-m0*t) + ss*a1*np.exp(-m1*t)
return ans
| true |
2bf319521e00bb500bea8a9c99d7015e00052cd5 | Python | sameerkitkat/CodingProblems | /ProductOfArrayExceptSelf.py | UTF-8 | 417 | 3.21875 | 3 | [] | no_license | def arrayProductExceptSelf(arr):
n = len(arr)
L, R, ans = [0] * n, [0] * n, [0] * n
L[0] = 1
for i in range(1, n):
L[i] = arr[i - 1] * L[i - 1]
R[n - 1] = 1
for i in reversed(range(n - 1)):
R[i] = arr[i + 1] * R[i + 1]
for i in range(n):
ans[i] = L[i] * R[i]
return ans
if __name__ == '__main__':
arr = [1, 2, 3, 4]
print(arrayProductExceptSelf(arr))
| true |
a58aab7c39f29317bdf2b3e4ff814717a49699f5 | Python | DerekHJH/LearnPython | /Plot/squares.py | UTF-8 | 946 | 3.796875 | 4 | [] | no_license | import matplotlib.pyplot as plt;
x = list(range(1, 1001));
y = [x*x for x in range(1, 1001)];
#plt.plot(x, y, linewidth = 5);#Set the width of the drawn line;
plt.title("Square Numbers", fontsize = 24);
plt.xlabel("Value", fontsize = 14);
plt.ylabel("Square of Values", fontsize = 14);
plt.tick_params(axis = "both", which="major", labelsize = 14);#Set the type of the scale;
#plt.scatter(4, 4, s = 200);#Draw a single point at this coordinates and set the size;
#plt.scatter(x ,y, c = "red", edgecolor = "none", s = 40);
#plt.scatter(x ,y, c = [0.5, 0.5, 0.5], edgecolor = "none", s = 40);#RGB to 0 darker
plt.scatter(x ,y, c = y, cmap = plt.cm.Reds, edgecolor = "none", s = 40);
#y with smaller value is drawn with lighter color and vise versa; Blues, Reds....
plt.axis([0, 1100, 0, 1100000]);#The range of x and y to be presented;
plt.show();
#plt.savefig("Squares.png", bbox_inches = "tight");
#bbox to eliminate the extra white space
| true |
4b2deb0cb371921e0b05c1679749a1ce3d30efee | Python | parthvadhadiya/hello-world-program-in-Scikit-Learn | /sk-learn_example.py | UTF-8 | 787 | 3.1875 | 3 | [] | no_license |
training_set = {'Dog':[[1,2],[2,3],[3,1]], 'Cat':[[11,20],[14,15],[12,15]]}
testing_set = [15,20]
#ploting all data
import matplotlib.pyplot as plt
c = 'x'
for data in training_set:
print(data)
#print(training_set[data])
for i in training_set[data]:
plt.plot(i[0], i[1], c, color='c')
c = 'o'
plt.show()
#prepare X and Y
x = []
y = []
for group in training_set:
for features in training_set[group]:
x.append(features)
y.append(group)
#import model builing
from sklearn import preprocessing, neighbors
#initialize and fit
clf = neighbors.KNeighborsClassifier()
clf.fit(x, y)
#preprocess testing data
import numpy as np
testing_set = np.array(testing_set)
testing_set = testing_set.reshape(1,-1)
#predition
prediction = clf.predict(testing_set)
print(prediction)
| true |
87ebf1eaa669fdd7bb3b5f915c9408aff6f6bf31 | Python | shacharnatan/pythonProject | /leseons/tragil9.py | UTF-8 | 2,002 | 3.5 | 4 | [] | no_license | from time import sleep
from random import randint
def menu():
while ("true"):
print(" welcome !\nthis is are menu : \n1.dogs deatelis\n2.friends list\n3.enter a dictonary dns \n-----------")
sleep(1)
choise=(input("enter what u want 1-3?"))
if(choise=="1"):
dog()
elif(choise=="2"):
friend_list()
elif(choise=="3"):
dns_dictonary()
else:
print("only 1-3 !!!")
exit=input("do u want to exit yes/no?")
if(exit=="yes"):
print("bye bye nice to meet u")
break
else:
print("welcome back")
continue
def dog():
name=input("enter your dog name :")
age=int(input("enter your dog age:"))
print("your name dog is :" + name +"\nyour dog age in years off dog is : " +str(age*7))
def friend_list():
friend_list=[]
sleep(1)
print("now you gave us 5 names of your friends :")
print("boot.....")
sleep(2)
for i in range(5):
friend_list.append(input("enter a name of your friend :"))
print("bulidinid your friedns list....")
sleep(3)
print("your new list is : " +str(friend_list))
sleep(1)
name1=input("do u like to chake if your friend is in the list yes/no?")
if(name1=="yes"):
sleep(1)
name2=input("enter a name of your friend :")
if(name2 in friend_list):
print("cheking...")
sleep(1)
print("your friend is in the list !!")
else:
print("cheking...")
sleep(1)
print("your friend is isnt in the list!")
def dns_dictonary():
dns_dict={}
sleep(1)
print("now gives us a 4 url and ip adresses :")
for i in range(4):
sleep(2)
dns_dict.update({input("enter a url :"):input("enter a ip :")})
print("bulidinid your dns dict......")
sleep(2)
print("your dns dict is :" + str(dns_dict))
menu()
| true |
04323031b7b1211a529e9a659f7618702a332460 | Python | ZahariAT/HackBulgaria2019 | /week04/parse_money_tracker_data.py | UTF-8 | 306 | 2.84375 | 3 | [] | no_license | class RowsToList:
@staticmethod
def rows_to_list(all_user_data):
lst = []
with open(all_user_data, 'r') as f:
for row in f.readlines():
lst.append(row)
return lst
if __name__ == '__main__':
print(RowsToList.rows_to_list("money_tracker.txt")) | true |
779e4634daba19113d12cfda6aca7198a1bd70e1 | Python | justrunshixuefeng/django-test | /django_test/test1/authpermis/utils/permission.py | UTF-8 | 1,767 | 2.765625 | 3 | [] | no_license | from rest_framework.permissions import BasePermission
# 校验 SVIP 用户 才能访问的验证
class SVIPPermission(BasePermission):
message = "必须是SVIP才能访问"
def has_permission(self, request, view):
if request.user.user_type != 3:
return False
return True
# 除了 SVIP用户都可以访问的验证
class NosvipPermission(BasePermission):
message = '你没有权限去执行这个动作!!!'
def has_permission(self, request, view):
if request.user.user_type == 3:
return False
return True
# 只有老师可以查看所有学生的信息
class Check_student_Permission(BasePermission):
message = '你不是老师,无权查看学生信息'
def has_permission(self, request, view):
print(request.user)
user_type = request.user.type
print('当前用户的类型为:%s' % str(user_type))
if user_type == 1:
return False
if user_type == 2:
return True
return False
# 老师可以查看学生,但是老师只能更改自己学生的信息
class Update_age_Permission(BasePermission):
message = '你不是此学生的老师,乱改啥?'
def has_permission(self, request, view):
if request.user.type == 1:
return False
if request.user.type == 2:
return True
return False
def has_object_permission(self, request, view, obj):
# 老师的id
teacher_id = request.user.id
print('老师id为:%s' % teacher_id)
print('学生所属老师id:%s' % obj.teacher_id)
# 如果老师的id和学生所属老师id不一致
if obj.teacher_id != teacher_id:
return False
return True
| true |
217d2a99e86fe91a05c95964d97317ea5a1d2153 | Python | danlupi/hart-sdet-codechallenge | /HART_Weather_Assignment/src/WeatherBusiness.py | UTF-8 | 1,408 | 3 | 3 | [] | no_license | import requests
class ParkDecider:
def __init__(self, idea_temp=float(78),mock=False, parks=["CA,Anaheim", "FL,Orlando"], mock_winner='CA,Anaheim'):
self.parks = parks
self.idea_temp = idea_temp
#TODO decouple using
self.mock = mock
self.base_url = 'http://api.wunderground.com/api/06c8fec6a3511479/conditions/q/'
self.mock_winner = mock_winner
def getOptimalPark(self):
if self.mock:
return self.mock_winner
else:
winning_park = None
winning_diff = None
for park in self.parks:
temperature = self.getWeatherTemp(park) #TODO assume no repeat
temp_diff = abs(self.idea_temp - temperature)
if winning_diff is None or temp_diff < winning_diff:
winning_park = park
winning_diff = temp_diff
return winning_park
def getWeatherTemp(self, park):
temperature = None
if self.mock:
temperature = float(62.1)
else:
try:
state, city = park.split(',')
r = requests.get(self.base_url + state + '/' + city + '.json')
temperature = r.json()['current_observation']['temp_f']
except:
return "park defined or rest call threw an exception"
return float(temperature) | true |
55a391754caef5bc31b506058e8b00ab05cd6e98 | Python | dengjinyi4/myapitest | /emarurl/test_case/emargeturl/test.py | UTF-8 | 198 | 2.53125 | 3 | [] | no_license | __author__ = 'emar0901'
if __name__ == '__main__':
to_list=['dengjinyi@emar.com,jishenghui@emar.com,aidinghua@emar.com']
t=''.join(to_list)
print t
print type(t)
print to_list[0]
print type(to_list[0]) | true |
16417e8236ecb524c041b208dc4ae7140ff99d7d | Python | MeghaGupt/capstone-project-open-ended | /exchanges.py | UTF-8 | 1,233 | 2.984375 | 3 | [] | no_license | import pandas as pd
import requests
from configparser import ConfigParser
def read_config(filename='config.ini', section='eodhistoricaldata'):
""" Read configuration file and return a dictionary object
Args:
filename: name of the configuration file
section: section of database configuration
Return:
a dictionary of database parameters
Reference:
https://www.mysqltutorial.org/python-connecting-mysql-databases/
"""
# create parser and read ini configuration file
parser = ConfigParser()
parser.read(filename)
# get section, default to mysql
config = {}
if parser.has_section(section):
items = parser.items(section)
for item in items:
config[item[0]] = item[1]
else:
raise Exception('{0} not found in the {1} file'.format(section, filename))
return config
url = "https://eodhistoricaldata.com/api/exchanges-list/?api_token=" + read_config()['api_key'] + "&fmt=json"
#response = requests.get("https://eodhistoricaldata.com/api/exchanges-list/?api_token=609ab308c85079.79546813&fmt=json")
response = requests.get(url)
df = pd.DataFrame(response.json())
df.to_csv("data\exchanges.csv", sep= ',', header= True) | true |
bd5bd53bd6cbecd8c3492996671c38ff94c75a98 | Python | MBWalter/Orientation_Week | /variables_4.py | UTF-8 | 156 | 3.875 | 4 | [] | no_license | number = input("Number: ")
print("")
multiplication = int(number)
for i in range(1,6):
multi = i * multiplication
print(i ,"*",number ,": " ,multi)
| true |
e6b3a82f15001b85e3e8a4b5f5c9fb9284627422 | Python | akniels/Data_Structures | /Project_3/Problem_2.py | UTF-8 | 1,268 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 3 10:21:19 2020
@author: akniels1
"""
##new
def rotated_array_search(input_list, number):
"""
Find the index by searching in a rotated sorted array
Args:
input_list(array), number(int): Input array to search and the target
Returns:
int: Index or -1
"""
floor = 0
for item in input_list:
if item == number:
return floor
else:
floor+=1
return -1
def linear_search(input_list, number):
for index, element in enumerate(input_list):
if element == number:
return index
return -1
def test_function(test_case):
input_list = test_case[0]
number = test_case[1]
if linear_search(input_list, number) == rotated_array_search(input_list, number):
print("Pass")
else:
print("Fail")
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 6])
test_function([[6, 7, 8, 9, 10, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 8])
test_function([[6, 7, 8, 1, 2, 3, 4], 1])
test_function([[6, 7, 8, 1, 2, 3, 4], 10])
test_function([[1], 1])
test_function([[2], 1])
test_function([[], 1])
#print(rotated_array_search([6, 7, 8, 9, 10, 1, 2, 3, 4],1))
| true |
e1a71e2b462f3820b163a1113be104424eeb5e7d | Python | usamaelshazly/python_web_course | /Lecture_3/students_app_1.py | UTF-8 | 2,037 | 2.796875 | 3 | [] | no_license | from flask import Flask, jsonify, abort, request
from student import Student
app = Flask(__name__)
students = [
Student("10", "Ahmed", "Giza"),
Student("11", "Hany", "Cairo"),
Student("12", "Asmaa", "Alex")
]
###############################
@app.route("/students/", methods=["GET"])
def get_students():
data = [item.to_json() for item in students]
return jsonify(data)
@app.route("/students/<student_id>", methods=["GET"])
def get_student(student_id):
for item in students:
if item.id == student_id:
return jsonify(item.to_json())
else:
abort(404, "student not found")
@app.route("/students/", methods=["POST"])
def insert_student():
if not request.content_type == "application/json":
abort(400, "content type must be application/json")
data = request.get_json()
student = Student.from_json(data)
students.append(student)
return jsonify({"message": "success"}), 201
@app.route("/students/", methods=["PUT"])
def update_student():
if not request.content_type == "application/json":
abort(400, "content type must be application/json")
data = request.get_json()
new_student = Student.from_json(data)
for x in range(len(students)):
if students[x].id == new_student.id:
del students[x]
students.append(new_student)
return jsonify({"message": "success"})
else:
abort(404, "student not found")
@app.route("/students/<student_id>", methods=["DELETE"])
def delete_student(student_id):
for x in range(len(students)):
if students[x].id == student_id:
del students[x]
return jsonify({"message": "success"})
else:
abort(404, "student not found")
###############################
@app.errorhandler(404)
@app.errorhandler(400)
def on_error(error):
return jsonify({"message": error.description}), error.code
###############################
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| true |
7c5bc4a9e78eaebd248dfbf2a92a968389fca9ad | Python | solo4747/netflix | /FlaskWebProject1/FlaskWebProject1/app.py | UTF-8 | 3,547 | 2.65625 | 3 | [] | no_license | from flask import Flask, request, render_template
import pymysql
import pymysql.cursors
import pickle
import pandas as pd
import model
#Connection à la base de données.
connection = pymysql.connect(host='62.171.158.215',
user='netflix',
password='WildCodeSchool',
db='recommender',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def sign_up():
if request.method == "POST":
# récupération du nom du film
movie = request.form["movie"]
#Requête dans la base de données
with connection.cursor() as cursor:
sql = f"SELECT * FROM movies WHERE title = '{movie}'"
# Exécutez la requête (Execute Query).
cursor.execute(sql)
data = cursor.fetchall()
# data se présente comme list de dictionnaire -> [0], ensuite sélection de la colonne.
data = data[0]['title']
# Prédit la suggestion de film à partir du modèle LNN (model.py)
output = model.getMovies(data)
# pour ressortir le titre de la prédiction
trailers = []
titles = []
resumes = []
directors = []
actors = []
ratings = []
for i in range(10):
split_title = output[i].split()
title_omdb =''
for i in range(len(split_title)-1):
title_omdb = title_omdb + '+' + split_title[i]
title_omdb = title_omdb[1:].replace(',', '')
import requests
import json
from youtube_search import YoutubeSearch
response = json.loads(requests.get("http://www.omdbapi.com/?t="+title_omdb+"&apikey=8ab8c578").text)
if len(resumes) == 5:
break
else :
if response['Response'] == 'True' :
resume = response['Plot']
results = YoutubeSearch(str(response['Title'])+'trailer', max_results=10).to_dict()
trailer = "http://www.youtube.com"+str(results[0]['link'])
trailer = trailer.replace("watch?v=", "embed/")
trailers.append(trailer)
resumes.append(response['Plot'])
titles.append(response['Title'])
directors.append(response['Director'])
actors.append(response['Actors'])
ratings.append(response['imdbRating'])
return render_template("results.html", prediction=output, trailer = trailers, title = titles, resume=resumes, directors=directors, actors=actors, ratings=ratings)
return redirect(request.url)
return render_template("search.html")
@app.route('/autocomplete',methods=['POST', 'GET'])
def ajaxautocomplete():
with connection.cursor() as cursor:
sql = f"SELECT * FROM movies WHERE title LIKE '%%'"
query = cursor.execute(sql)
data = cursor.fetchall()
from flask import jsonify
results = [data[i]['title'] for i in range(len(data))]
return render_template("search.html", results=results)
if __name__ == '__main__':
app.run() | true |
e16ae03109193bd92c32e78907ae97123f102817 | Python | iastapov17/DS | /find_spy_1.py | UTF-8 | 1,242 | 2.71875 | 3 | [] | no_license | import csv
def csv_dict_reader(file_obj):
reader = csv.DictReader(file_obj, delimiter=',')
f = open('spy_1.txt', 'a')
temp = {}
for j, line in enumerate(reader):
if line['passengerdocument'] == '':
continue
if line['passengerbirthdate'] == '1970-01-01':
continue
if line['passengerdocument'] in temp:
for i in temp[line['passengerdocument']]:
if i['id_people'] != line['id_people'] and line['passengerbirthdate'] != i['passengerbirthdate']:
f.write("Возможный шпион!" + '\n')
keys = i.keys()
string = ''
for key in keys:
string += key + ': ' + i[key] + '; '
f.write(string + '\n')
string = ''
for key in keys:
string += key + ': ' + line[key] + '; '
f.write(string + '\n')
f.write('\n')
temp[line['passengerdocument']].append(line)
else:
temp[line['passengerdocument']] = [line]
if __name__ == "__main__":
with open("data.csv") as f_obj:
csv_dict_reader(f_obj)
| true |
d26bb0b638912df5ac34da2f300493d539fa2ff0 | Python | sztang/py_practice | /countdown_numbersRound.py | UTF-8 | 6,060 | 3.625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# a recreation of a game in the British game show Countdown
# details: https://en.wikipedia.org/wiki/Countdown_(game_show)#Numbers_round
# summary: contestants pick six numbers and a random three digit target number is generated
# contestants then try to reach the target number through a sequential calculation
# using the chosen base six numbers and the operators +, -, *, /
# this program generates the target number based on a user input of
# how many 'small' (1-10) and 'large' (25, 50, 75, 100) numbers to include in the base six
import random
countList = []
countListTemp = ()
numCount = 0
totalNumCount = 6
newVal = 0
result = 0
operationRecord = []
smallNumbers = [1,2,3,4,5,6,7,8,9,10] * 2
largeNumbers = [25,50,75,100]
# requests user input 7 numbers
# calls generator1 which calls operation, generator 2, generatorAll
# returns to main after generatorAll completes and calls returnResult
# prints operationRecord to show user how result is derived
def main():
global numCount, countList, totalNumCount, countListTemp, operationRecord
baseSelector()
countListTemp = tuple(countList)
y = input('Hit Enter to get the target number. ')
if y == '':
generator1()
else:
while y!= '':
y = input('Please hit Enter to get the target number.')
generator1()
while result > 999:
countList = list(countListTemp)
numCount = totalNumCount
operationRecord = []
print(f'restarted countList is {countList}')
generator1()
returnResult()
z = input('Baffled? Hit Enter to see answer key. ')
if z == '':
print(' '.join(operationRecord), f' = {result}')
else:
while z!= '':
z = input('Please hit Enter to see answer key.')
print(' '.join(operationRecord), f' = {result}')
print('Thank you for trying, at least.')
rerun = input('Hit Enter if you have nothing better to do and want to try again. Hit Any Key + Enter to end.')
if rerun == '':
print('\n\n\n\n\n\n\n\n')
globalReset()
main()
else:
print('Go live your life then.')
def baseSelector():
global smallNumbers, largeNumbers, numCount, countList
print('Small numbers are 1 through 10 (two sets). \nLarge numbers are 25, 50, 75, 100.\nThese numbers are shuffled.')
xSmall = int(input('How many small numbers would you like? '))
xLarge = int(totalNumCount - xSmall)
y = input(f'You will have {xLarge} large numbers. Hit Enter to proceed or Any Key + Enter to reselect. ')
if y == '':
for i in range(1, int(xSmall+1)):
baseNumberIndex = random.randint(0,int(len(smallNumbers) - 1))
baseNumber = smallNumbers[baseNumberIndex]
countList.append(baseNumber)
numCount += 1
smallNumbers.remove(baseNumber)
for i in range(1, int(xLarge+1)):
baseNumberIndex = random.randint(0,int(len(largeNumbers) - 1))
baseNumber = largeNumbers[baseNumberIndex]
countList.append(baseNumber)
numCount += 1
largeNumbers.remove(baseNumber)
print(f'Your base numbers are: {countList}')
else: baseSelector()
# generator1: generate first number to populate result
def generator1():
global numCount, countList, operationRecord, totalNumCount, result
for x in countList:
i1 = random.randint(0,totalNumCount-1)
x = countList[i1] if x else generator1()
result = x
print(f'The first value is {result}')
operationRecord.append(str(x))
numCount -= 1
countList.remove(x)
print('The list is now: ', countList)
generatorAll()
break
# generatorAll: loops to pick 2nd, 3rd, 4th, 5th...nth value from list
# updates result till base list is empty
def generatorAll():
global numCount, countList, newVal, result, operationRecord, totalNumCount
for x in countList:
if numCount <= int(totalNumCount-1) and numCount > 0:
numCountIndex = int(int(numCount) - 1)
i = random.randint(0,numCountIndex)
print(f'i is {i}')
x = countList[i]
newVal = x
print(f'newVal is now {newVal}')
operator(result, newVal)
operationRecord.append(str(newVal))
numCount -= 1
countList.remove(x)
print(f'numCount is now {numCount}')
print('The list is now: ', countList)
generatorAll()
# operator generates random operations to perform on the new value generated
# checks that result is a positive integer; if not, performs operator again
def operator(z,zz):
global result
operateCode = random.randint(1,4)
if operateCode == 1:
result = int(z + zz)
print(f'The result is now {z} + {zz} = {result}')
operationRecord.append(' + ')
elif operateCode == 2:
if z - zz > 0:
result = int(z - zz)
print(f'The result is now {z} - {zz} = {result}')
operationRecord.append(' - ')
else:
operator(z,zz)
elif operateCode == 3:
result = int(z * zz)
print(f'The result is now {z} * {zz} = {result}')
operationRecord.append(' * ')
elif operateCode == 4:
if z % zz == 0:
result = int(z / zz)
print(f'The result is now {z} / {zz} = {result}')
operationRecord.append(' / ')
else:
operator(z,zz)
def returnResult():
global result, countList, countListTemp
print(f'\n\n\n\n\n\n\n\n\n\n\nYour set is {countListTemp}.\nThe target to achieve is {result}. Your 30s begins now.')
# Resets all objects to allow program to run again given user input to reset
def globalReset():
global countList, countListTemp, operationRecord, smallNumbers, largeNumbers
countList = []
countListTemp = ()
operationRecord = []
smallNumbers = [1,2,3,4,5,6,7,8,9,10] * 2
largeNumbers = [25,50,75,100]
if __name__ == '__main__': main() | true |
61eb36cea35f0ddffbed342c522dffa05a2ed63a | Python | luowanqian/MachineLearning | /RL/Multi-armedBandits/agent.py | UTF-8 | 1,427 | 3.09375 | 3 | [] | no_license | import numpy as np
class Agent:
def __init__(self, k_arm=10, initial=0.0):
self.k = k_arm
self.initial = initial
self.indices = np.arange(self.k)
def init(self):
# estimation for each action
self.q_estimation = np.zeros(self.k) + self.initial
# number of chosen times for each action
self.action_count = np.zeros(self.k)
self.action = None
self.time = 0
def act(self):
pass
def step(self, reward):
action = self.action
self.time += 1
self.action_count[action] += 1
step_size = 1.0 / self.action_count[action]
self.q_estimation[action] += (reward - self.q_estimation[action]) * step_size
class GreedyAgent(Agent):
def __init__(self, k_arm=10, initial=0.0):
super().__init__(k_arm=k_arm, initial=initial)
def act(self):
q_best = np.max(self.q_estimation)
self.action = np.random.choice(np.where(self.q_estimation == q_best)[0])
return self.action
class EpsilonGreedyAgent(GreedyAgent):
def __init__(self, k_arm=10, initial=0.0, epsilon=0.0):
super().__init__(k_arm=k_arm, initial=initial)
self.epsilon = epsilon
def act(self):
if np.random.rand() < self.epsilon:
self.action = np.random.choice(self.indices)
return self.action
else:
return super().act()
| true |
94d0178446969d449100234959ed7c300217ce3c | Python | handnn18411c/SnakeGame | /snake_head.py | UTF-8 | 1,770 | 3.53125 | 4 | [] | no_license | import pygame
import time
class SnakeHead():
def __init__(self, x, y, width, height, vel):
self.x = x
self.y = y
self.width = width
self.height = height
self.vel = vel
self.direction = "RIGHT"
self.turn = self.direction
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
def createSnake(self, screen):
pygame.draw.rect(screen, (255, 0, 0),
(self.x, self.y, self.width, self.height))
def getRect(self):
rect = pygame.Rect(self.x, self.y, self.width, self.height)
return rect
def move(self):
keys = pygame.key.get_pressed()
# Lập trình quẹo
if keys[pygame.K_LEFT]:
self.turn = "LEFT"
if keys[pygame.K_RIGHT]:
self.turn = "RIGHT"
if keys[pygame.K_UP]:
self.turn = "UP"
if keys[pygame.K_DOWN]:
self.turn = "DOWN"
# Lập trình hướng
if self.turn == "LEFT" and self.direction != "RIGHT":
self.direction = "LEFT"
if self.turn == "RIGHT" and self.direction != "LEFT":
self.direction = "RIGHT"
if self.turn == "UP" and self.direction != "DOWN":
self.direction = "UP"
if self.turn == "DOWN" and self.direction != "UP":
self.direction = "DOWN"
# Lập trình di chuyển
if self.direction == "LEFT" and self.x > 0:
self.x -= self.vel
if self.direction == "RIGHT" and self.x < (500 - self.width):
self.x += self.vel
if self.direction == "UP" and self.y > 0:
self.y -= self.vel
if self.direction == "DOWN" and self.y < (600 - self.height):
self.y += self.vel
| true |
b0f0119d4c4901649e743e33e9a03ef0296a37ec | Python | wojtekminda/Python3_Trainings | /SMG_homework/09_2_Replacing_words.py | UTF-8 | 1,528 | 4.71875 | 5 | [] | no_license | '''
Write replace_words() function which takes two arguments:
• A text (str) in which some of the words are going to be replaced.
• A mapping (dict) from words to be replaced (str) to the new replacement words (str).
The function returns text with all words which appear in the mapping replaced.
A word is defined as having a single space or the beginning of the string on its left and a single space
or the end of the string on its right.
Example usage of the function:
TEXT = "We went to Wellington to find a nice place to eat. We think Wellington is nice!"
modified = replace_words(TEXT, {"Wellington": "Warsaw", "nice": "cool"})
print(modified)
# prints: We went to Warsaw to find a cool place to eat. We think Warsaw is nice!
print(replace_words(modified, {"We": "You"}))
# prints: You went to Warsaw to find a cool place to eat. You think Warsaw is nice!
Note that in the above example TEXT contains words "nice" and "nice!", so only the first one is replaced.
Just split on a single space!
'''
def replace_words(to_replace, replacement_dict):
to_replace = to_replace.split()
for i, word in enumerate(to_replace):
if word in replacement_dict:
to_replace[i] = replacement_dict[word]
return ' '.join(to_replace)
text = "We went to Wellington to find a nice place to eat. We think Wellington is nice!"
print(text)
replacement_dictionary = {"Wellington": "Warsaw", "nice": "cool"}
print(replacement_dictionary)
modified = replace_words(text, replacement_dictionary)
print(modified)
| true |
224f4e1ae22a15de37d3db330f4f15d5df512041 | Python | konakallaANUSHA/python-deep-learning | /icp1/t2.py | UTF-8 | 699 | 3.96875 | 4 | [] | no_license | x = 'python'
y = (x.replace('on', ''))
print(y)
print(y[::-1])
tr= input("enter a string: ")
z=tr[:-2]
print(z)
print(z[::-1])
t= input("enter a string: ")
replaced_t = t.replace('python', 'pythons')
print ('Original string:', t)
print ('Replaced string:', replaced_t)
num1String = input('Please enter an integer: ')
num2String = input('Please enter a second integer: ')
num1 = int(num1String)
num2 = int(num2String)
print ('Here is some output')
#print num1,' plus ',num2,' equals ',num1+num2 --python2
#print 'Thanks for playing'
print (num1,' plus ',num2,' equals ',num1+num2)
print (num1,' by ',num2,' equals ',num1/num2)
print (num1,' mod ',num2,' equals ',num1 % num2)
| true |
a8b101e33e2bf83a3b1c4dbe7b99629ca8919b24 | Python | mathandy/svgpathtools | /test/test_groups.py | UTF-8 | 11,605 | 2.9375 | 3 | [
"MIT"
] | permissive | """Tests related to SVG groups.
To run these tests, you can use (from root svgpathtools directory):
$ python -m unittest test.test_groups.TestGroups.test_group_flatten
"""
from __future__ import division, absolute_import, print_function
import unittest
from svgpathtools import Document, SVG_NAMESPACE, parse_path, Line, Arc
from os.path import join, dirname
import numpy as np
# When an assert fails, show the full error message, don't truncate it.
unittest.util._MAX_LENGTH = 999999999
def get_desired_path(name, paths):
return next(p for p in paths
if p.element.get('{some://testuri}name') == name)
class TestGroups(unittest.TestCase):
def check_values(self, v, z):
# Check that the components of 2D vector v match the components
# of complex number z
self.assertAlmostEqual(v[0], z.real)
self.assertAlmostEqual(v[1], z.imag)
def check_line(self, tf, v_s_vals, v_e_relative_vals, name, paths):
# Check that the endpoints of the line have been correctly transformed.
# * tf is the transform that should have been applied.
# * v_s_vals is a 2D list of the values of the line's start point
# * v_e_relative_vals is a 2D list of the values of the line's
# end point relative to the start point
# * name is the path name (value of the test:name attribute in
# the SVG document)
# * paths is the output of doc.paths()
v_s_vals.append(1.0)
v_e_relative_vals.append(0.0)
v_s = np.array(v_s_vals)
v_e = v_s + v_e_relative_vals
actual = get_desired_path(name, paths)
self.check_values(tf.dot(v_s), actual.start)
self.check_values(tf.dot(v_e), actual.end)
def test_group_transform(self):
# The input svg has a group transform of "scale(1,-1)", which
# can mess with Arc sweeps.
doc = Document(join(dirname(__file__), 'negative-scale.svg'))
path = doc.paths()[0]
self.assertEqual(path[0], Line(start=-10j, end=-80j))
self.assertEqual(path[1], Arc(start=-80j, radius=(30+30j), rotation=0.0, large_arc=True, sweep=True, end=-140j))
self.assertEqual(path[2], Arc(start=-140j, radius=(20+20j), rotation=0.0, large_arc=False, sweep=False, end=-100j))
self.assertEqual(path[3], Line(start=-100j, end=(100-100j)))
self.assertEqual(path[4], Arc(start=(100-100j), radius=(20+20j), rotation=0.0, large_arc=True, sweep=False, end=(100-140j)))
self.assertEqual(path[5], Arc(start=(100-140j), radius=(30+30j), rotation=0.0, large_arc=False, sweep=True, end=(100-80j)))
self.assertEqual(path[6], Line(start=(100-80j), end=(100-10j)))
self.assertEqual(path[7], Arc(start=(100-10j), radius=(10+10j), rotation=0.0, large_arc=False, sweep=True, end=(90+0j)))
self.assertEqual(path[8], Line(start=(90+0j), end=(10+0j)))
self.assertEqual(path[9], Arc(start=(10+0j), radius=(10+10j), rotation=0.0, large_arc=False, sweep=True, end=-10j))
def test_group_flatten(self):
# Test the Document.paths() function against the
# groups.svg test file.
# There are 12 paths in that file, with various levels of being
# nested inside of group transforms.
# The check_line function is used to reduce the boilerplate,
# since all the tests are very similar.
# This test covers each of the different types of transforms
# that are specified by the SVG standard.
doc = Document(join(dirname(__file__), 'groups.svg'))
result = doc.paths()
self.assertEqual(12, len(result))
tf_matrix_group = np.array([[1.5, 0.0, -40.0],
[0.0, 0.5, 20.0],
[0.0, 0.0, 1.0]])
self.check_line(tf_matrix_group,
[183, 183], [0.0, -50],
'path00', result)
tf_scale_group = np.array([[1.25, 0.0, 0.0],
[0.0, 1.25, 0.0],
[0.0, 0.0, 1.0]])
self.check_line(tf_matrix_group.dot(tf_scale_group),
[122, 320], [-50.0, 0.0],
'path01', result)
self.check_line(tf_matrix_group.dot(tf_scale_group),
[150, 200], [-50, 25],
'path02', result)
self.check_line(tf_matrix_group.dot(tf_scale_group),
[150, 200], [-50, 25],
'path03', result)
tf_nested_translate_group = np.array([[1, 0, 20],
[0, 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_group
).dot(tf_nested_translate_group),
[150, 200], [-50, 25],
'path04', result)
tf_nested_translate_xy_group = np.array([[1, 0, 20],
[0, 1, 30],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_group
).dot(tf_nested_translate_xy_group),
[150, 200], [-50, 25],
'path05', result)
tf_scale_xy_group = np.array([[0.5, 0, 0],
[0, 1.5, 0.0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_scale_xy_group),
[122, 320], [-50, 0],
'path06', result)
a_07 = 20.0*np.pi/180.0
tf_rotate_group = np.array([[np.cos(a_07), -np.sin(a_07), 0],
[np.sin(a_07), np.cos(a_07), 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_rotate_group),
[183, 183], [0, 30],
'path07', result)
a_08 = 45.0*np.pi/180.0
tf_rotate_xy_group_R = np.array([[np.cos(a_08), -np.sin(a_08), 0],
[np.sin(a_08), np.cos(a_08), 0],
[0, 0, 1]])
tf_rotate_xy_group_T = np.array([[1, 0, 183],
[0, 1, 183],
[0, 0, 1]])
tf_rotate_xy_group = tf_rotate_xy_group_T.dot(
tf_rotate_xy_group_R).dot(
np.linalg.inv(tf_rotate_xy_group_T))
self.check_line(tf_matrix_group.dot(tf_rotate_xy_group),
[183, 183], [0, 30],
'path08', result)
a_09 = 5.0*np.pi/180.0
tf_skew_x_group = np.array([[1, np.tan(a_09), 0],
[0, 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_skew_x_group),
[183, 183], [40, 40],
'path09', result)
a_10 = 5.0*np.pi/180.0
tf_skew_y_group = np.array([[1, 0, 0],
[np.tan(a_10), 1, 0],
[0, 0, 1]])
self.check_line(tf_matrix_group.dot(tf_skew_y_group),
[183, 183], [40, 40],
'path10', result)
# This last test is for handling transforms that are defined as
# attributes of a <path> element.
a_11 = -40*np.pi/180.0
tf_path11_R = np.array([[np.cos(a_11), -np.sin(a_11), 0],
[np.sin(a_11), np.cos(a_11), 0],
[0, 0, 1]])
tf_path11_T = np.array([[1, 0, 100],
[0, 1, 100],
[0, 0, 1]])
tf_path11 = tf_path11_T.dot(tf_path11_R).dot(np.linalg.inv(tf_path11_T))
self.check_line(tf_matrix_group.dot(tf_skew_y_group).dot(tf_path11),
[180, 20], [-70, 80],
'path11', result)
def check_group_count(self, doc, expected_count):
count = 0
for _ in doc.tree.getroot().iter('{{{0}}}g'.format(SVG_NAMESPACE['svg'])):
count += 1
self.assertEqual(expected_count, count)
def test_nested_group(self):
# A bug in the flattened_paths_from_group() implementation made it so that only top-level
# groups could have their paths flattened. This is a regression test to make
# sure that when a nested group is requested, its paths can also be flattened.
doc = Document(join(dirname(__file__), 'groups.svg'))
result = doc.paths_from_group(['matrix group', 'scale group'])
self.assertEqual(len(result), 5)
def test_add_group(self):
# Test `Document.add_group()` function and related Document functions.
doc = Document(None)
self.check_group_count(doc, 0)
base_group = doc.add_group()
base_group.set('id', 'base_group')
self.assertTrue(doc.contains_group(base_group))
self.check_group_count(doc, 1)
child_group = doc.add_group(parent=base_group)
child_group.set('id', 'child_group')
self.assertTrue(doc.contains_group(child_group))
self.check_group_count(doc, 2)
grandchild_group = doc.add_group(parent=child_group)
grandchild_group.set('id', 'grandchild_group')
self.assertTrue(doc.contains_group(grandchild_group))
self.check_group_count(doc, 3)
sibling_group = doc.add_group(parent=base_group)
sibling_group.set('id', 'sibling_group')
self.assertTrue(doc.contains_group(sibling_group))
self.check_group_count(doc, 4)
# Test that we can retrieve each new group from the document
self.assertEqual(base_group, doc.get_or_add_group(['base_group']))
self.assertEqual(child_group, doc.get_or_add_group(
['base_group', 'child_group']))
self.assertEqual(grandchild_group, doc.get_or_add_group(
['base_group', 'child_group', 'grandchild_group']))
self.assertEqual(sibling_group, doc.get_or_add_group(
['base_group', 'sibling_group']))
# Create a new nested group
new_child = doc.get_or_add_group(
['base_group', 'new_parent', 'new_child'])
self.check_group_count(doc, 6)
self.assertEqual(new_child, doc.get_or_add_group(
['base_group', 'new_parent', 'new_child']))
new_leaf = doc.get_or_add_group(
['base_group', 'new_parent', 'new_child', 'new_leaf'])
self.assertEqual(new_leaf, doc.get_or_add_group([
'base_group', 'new_parent', 'new_child', 'new_leaf']))
self.check_group_count(doc, 7)
path_d = ('M 206.07112,858.41289 L 206.07112,-2.02031 '
'C -50.738,-81.14814 -20.36402,-105.87055 52.52793,-101.01525 '
'L 103.03556,0.0 '
'L 0.0,111.11678')
svg_path = doc.add_path(path_d, group=new_leaf)
self.assertEqual(path_d, svg_path.get('d'))
path = parse_path(path_d)
svg_path = doc.add_path(path, group=new_leaf)
self.assertEqual(path_d, svg_path.get('d'))
# Test that paths are added to the correct group
new_sibling = doc.get_or_add_group(
['base_group', 'new_parent', 'new_sibling'])
doc.add_path(path, group=new_sibling)
self.assertEqual(len(new_sibling), 1)
self.assertEqual(path_d, new_sibling[0].get('d'))
| true |
030bd173a70b465ec5e94402ec813f81849d0048 | Python | neilxdim/PythonWorking | /U4/LDA_sklearn.py | UTF-8 | 2,702 | 2.796875 | 3 | [] | no_license | from sklearn.lda import LDA
from sklearn.decomposition import PCA as sklearnPCA
import matplotlib.pyplot as plt
from sklearn import datasets
import pandas as pd
iris = datasets.load_iris()
df=pd.DataFrame(data=iris.data, columns=iris.feature_names)
df['target']=iris.target
df['target_names']=df['target'].map(lambda x: iris.target_names[x])
df=df.reindex_axis(df.columns[[0,1,2,3,5,4]],axis=1)
# split data table into data X and class labels y
X = df.ix[:,0:4].values
y = iris.target
label_dict = {0: 'Setosa', 1: 'Versicolor', 2:'Virginica'}
# Standardizing
# from sklearn.preprocessing import StandardScaler
# X_std = StandardScaler().fit_transform(X)
# LDA
sklearn_lda = LDA(n_components=2)
X_lda_sklearn = sklearn_lda.fit_transform(X, y)
def plot_scikit_lda(X, y, title, mirror=1):
# fig, ax = plt.subplots()
# ax=plt.subplot(111)
for label,marker,color in zip(
range(0,3),('^', 's', 'o'),('blue', 'red', 'green')):
plt.scatter(x=X[:,0][y == label]*mirror,
y=X[:,1][y == label],
marker=marker,
color=color,
alpha=0.5,
label=label_dict[label]
)
plt.xlabel('LD1')
plt.ylabel('LD2')
leg = plt.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.5)
plt.title(title)
# hide axis ticks
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
# ax.spines["top"].set_visible(False)
# ax.spines["right"].set_visible(False)
# ax.spines["bottom"].set_visible(False)
# ax.spines["left"].set_visible(False)
plt.grid()
plt.tight_layout
plt.show()
fig=plt.figure(figsize=(12,6))
plt.subplot(1,2,1)
plot_scikit_lda(X_lda_sklearn, y, title='LDA via scikit-learn')
# kMeans on decomposed data
from scipy.cluster.vq import kmeans, vq, whiten
centroids, dist =kmeans(X_lda_sklearn,3)
idx, idxdist = vq(X_lda_sklearn, centroids)
# lazy move to align kmeans' labels with target labels
x0 = (idx==idx[0]).nonzero()
x1 = (idx==idx[75]).nonzero()
x2 = (idx==idx[-1]).nonzero()
idx[x0], idx[x1], idx[x2] = 0,1,2
plt.subplot(1,2,2)
# plt.scatter(X_lda_sklearn[:,0], X_lda_sklearn[:,1], c=idx.reshape(150,1), alpha=.8, s=40)
plot_scikit_lda(X_lda_sklearn, idx, title='LDA via scikit-learn')
plt.scatter(x=X_lda_sklearn[idx!=y, 0],
y=X_lda_sklearn[idx!=y, 1],
marker='x',
color='k',
alpha=0.75, s=100)
plt.title('kMeans on LDA data')
# plt.axis('tight')
print "kMeans accuracy on decomposed data:", str((idx==y).sum()) + "/" + str(len(y))
| true |
44d90415da630d905203c2d010d2a983b97b8d21 | Python | Anshumanformal/data-analysis-mini-project | /data analysis mini project.py | UTF-8 | 969 | 4.34375 | 4 | [] | no_license | """
Name: Python Data Analysis
Purpose: Read CSV File and store data in dictionary
Algorithm:
Step 1: Opening File in read mode and looping through data
Step 2: Printing the data just to ensure successful read
"""
import os
print("A Simple Data Analysis Program")
print()
d1 = {}
with open(os.path.join(r'<YOUR FILE LOCATION HERE>','Emissions.csv'), 'r') as file:
# Read in file object and splitting it with '\n'
f1 = file.read().split('\n')
for data in f1:
# Updating the dictionary file | Splitting the string by COMMA(,) - Store first value as KEY
# and Store other value as VALUE
a1 = data.split(',')[0] # data is itself a list coming from f1.
a2 = data.split(',')[1:] # data is itself a list coming from f1.
d1.update({ a1: a2})
for x, y in d1.items():
print(x, end=" - ")
print(y)
print()
print("All data from Emissions.csv has been read into a dictionary.")
| true |
3d5cae6e349bc1eb2e0478eec3139aadeaab302b | Python | varishtyagi786/My-Projects-Using-Python | /scaler.py | UTF-8 | 76 | 2.65625 | 3 | [] | no_license | A=[1,2,3,4]
B=[5,6,7,8,9]
num=[((a,b) for a in A for b in B)]
print(num) | true |
9bffccea5139bae973e4ceb3ff491b249ca5e34a | Python | hsauers5/BankingApplication | /accounts_manager.py | UTF-8 | 1,410 | 3.078125 | 3 | [] | no_license | from account import Account
class AccountsManager:
def __init__(self, database_connector):
self.database_connector = database_connector
def fetch_accounts(self, username):
query = f'SELECT * FROM accounts WHERE Username = "{username}";'
res = self.database_connector.execute_query(query)
accounts = [Account(acc) for acc in res]
return accounts
def fetch_account(self, account_id):
query = f'SELECT * FROM accounts WHERE ID = {account_id}'
res = self.database_connector.execute_query(query)
accounts = [Account(acc) for acc in res]
if not accounts:
return None
else:
return accounts[0]
def fetch_account_balance(self, username, account_type):
accounts_data = self.fetch_accounts(username)
for account in accounts_data:
if account.type == account_type:
return account.balance
return False
def user_has_account(self, username, account_id):
accounts = self.fetch_accounts(username)
for account in accounts:
if account.id == int(account_id):
return True
return False
def account_has_sufficient_funds(self, account_id, amount):
acct = self.fetch_account(account_id)
if amount <= acct.balance:
return True
else:
return False
| true |
d0efe0a3d8311f14882d2233b1c918dfcc5d68cf | Python | HBGDki/ODSC16-Hackathon | /xgb_n_row_per_subj.py | UTF-8 | 8,580 | 2.765625 | 3 | [] | no_license |
# coding: utf-8
# In[1]:
import matplotlib.pyplot as plt
import matplotlib
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
get_ipython().magic(u'matplotlib inline')
import seaborn.apionly as sns
# ignore pandas warnings
import warnings
warnings.simplefilter('ignore')
import time
start = time.time()
# In[2]:
# load data
data = pd.read_csv('training_ultrasound.csv')
# remove agedays > 0 ( we just only focus pre-birth measurements)
data = data[data['AGEDAYS']<0]
# drop rows with missing data in any of the 5 main columns
ultrasound = ['HCIRCM', 'ABCIRCM', 'BPDCM', 'FEMURCM']
target = 'BWT_40'
data.dropna(subset=ultrasound+[target], inplace=True)
# correct faulty data
data.loc[data['STUDYID']==2, 'PARITY'] = data.loc[data['STUDYID']==2, 'PARITY'] + 1
# In[3]:
data = data.drop_duplicates(subset=(ultrasound+['SUBJID']))
# ## Model
# In[4]:
# select basic vars
df = data[['SUBJID'] + ultrasound + ['GAGEDAYS', 'SEXN', 'PARITY', 'GRAVIDA'] + [target]]
# In[5]:
df.isnull().sum()
# In[6]:
# there is missing data for parity and gravida: this happens for first pregnancy --> fill with 1s
df.fillna(1, inplace=True)
# replace sex values to 0 and 1
df['SEXN'] = df['SEXN'].replace([1,2], [0,1])
# Generate a DF with several rows per baby. Each row represents the current measurement together to the previous (is there is not a previous, filled with NA)
# In[7]:
vars_previous = ['GAGEDAYS'] + ultrasound
# In[8]:
df = df.sort_values(by=['SUBJID','GAGEDAYS'], ascending=[True,True])
# In[9]:
shifted_df = df[['SUBJID'] + ultrasound + ['GAGEDAYS']].shift(1)
shifted_df.columns = shifted_df.columns + '_prev'
# In[10]:
shifted_df['SUBJID'] = df['SUBJID']
shifted_df = shifted_df[shifted_df['SUBJID'] == shifted_df['SUBJID_prev']].drop(['SUBJID','SUBJID_prev'], axis=1)
# In[11]:
df_m = df.merge(shifted_df,how='left',left_index=True,right_index=True)
# In[12]:
df_m = df_m.merge(df_m.groupby('SUBJID')[['SUBJID']].count(),
how='left',left_on='SUBJID',right_index=True,suffixes=('', '_count'))
# In[13]:
df_m = df_m.ix[:,:]
df_m.head(10)
# ### Split train/test data
# In[14]:
# sklearn imports
from sklearn.model_selection import train_test_split, KFold, GroupKFold, cross_val_score, RandomizedSearchCV
from sklearn.metrics import mean_absolute_error
from aux_fun import *
# In[15]:
gkf = GroupKFold(n_splits=5)
# In[16]:
# df to np arrays
X = df_m.drop(target,axis=1).values
groups_for_train_test_split = X[:,0]
Y = df_m[target].values
# train-test split
train_idx, test_idx = list(gkf.split(X, Y, groups=groups_for_train_test_split))[0]
x_train, y_train = X[train_idx], Y[train_idx]
x_test, y_test = X[test_idx], Y[test_idx]
groups_for_cv = x_train[:,0]
no_of_measurements = x_test[:,-1]
x_train = x_train[:,1:-1]
x_test = x_test[:,1:-1]
# ### CV strategy
# In[17]:
gkf_cv = list(gkf.split(x_train,y_train,groups_for_cv))
# # XGBoost
# In[18]:
from xgboost import XGBRegressor
xgb = XGBRegressor()
# In[19]:
params_grid = {
'max_depth': np.arange(1,6),
'subsample': np.arange(0.7,1.0,0.1),
'learning_rate': np.arange(0.02,0.1,0.01),
'n_estimators': np.arange(50,1000,200)
}
# In[20]:
random_search = RandomizedSearchCV(xgb, param_distributions=params_grid, n_iter=50,
n_jobs=-1, scoring='mean_absolute_error', cv=gkf_cv, random_state=0, verbose=2)
random_search.fit(x_train,y_train)
# In[21]:
best_params = random_search.cv_results_['params'][np.flatnonzero(random_search.cv_results_['rank_test_score'] == 1)[0]]
report(random_search.cv_results_)
# In[22]:
scores = list()
# evaluate model with best alpha given by CV
xgb.set_params(**best_params)
for train_k, test_k in gkf_cv:
xgb.fit(x_train[train_k],y_train[train_k])
w_true_k = y_train[test_k]
w_pred_k = xgb.predict(x_train[test_k])
scores.append(mean_absolute_error(w_true_k, w_pred_k))
print('Weight error: %0.4f +- %0.4f' % (np.mean(scores),2*np.std(scores)))
# #### Fit whole train with best hyperparameters
# In[23]:
xgb.fit(x_train,y_train)
# In[24]:
w_true = y_test
w_pred = xgb.predict(x_test)
abs_error = mean_absolute_error(w_true, w_pred)
pct_error = abs_error / w_true
print('Test mean abs error: ', abs_error)
print('Mean relative error: %0.4f' % pct_error.mean())
# # Plot confidence bins
# In[25]:
pct_error = np.abs(w_true-w_pred)/w_true*100
mean_pct_error = pct_error.mean()
# In[26]:
t = x_test[:,4]
week_bins = np.digitize(x=t, bins=np.arange(0,t.max(),14))
data_plot = pd.DataFrame({'t':t, 'pct_error':pct_error, 'no_of_measurements': no_of_measurements.astype(int)})
pct_error_binned_df = pd.DataFrame(np.concatenate((pct_error.reshape(-1,1),week_bins.reshape(-1,1)),axis=1),
columns=['y_test','bin'])
pct_error_binned_df = pct_error_binned_df.groupby('bin').agg([np.mean,np.std,'count'])
pct_error_binned_df.columns = pct_error_binned_df.columns.droplevel()
reescaled_x = pct_error_binned_df.index.to_series().values*14-7
# In[27]:
times_sigma = 1
pct_error_binned_df['upper'] = pct_error_binned_df['mean'] + times_sigma*pct_error_binned_df['std']
pct_error_binned_df['lower'] = pct_error_binned_df['mean'] - times_sigma*pct_error_binned_df['std']
pct_error_binned_df['lower'] *= pct_error_binned_df['lower'] > 0
# In[28]:
fig = plt.figure(figsize=(9,4))
gs = gridspec.GridSpec(1,2,width_ratios=[3,1])
ax = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
sns.regplot(x=t,y=pct_error, scatter_kws={'alpha':0.1},fit_reg=False,ax=ax)
ax.plot(reescaled_x,pct_error_binned_df['mean'],label='mean',lw=2,color='k')
ax.fill_between(reescaled_x, pct_error_binned_df['lower'], pct_error_binned_df['upper'],
facecolor='grey', alpha=0.2, label=r'$\pm \sigma$ interval')
ax.set_xlim(t.min(),t.max())
ax.set_ylim(0,40)
ax.set_xlabel('GAGEDAYS of measurement')
ax.set_ylabel('% error')
ax.set_title('Influence of the time of measurement\n on the error (out of sample)\n')
ax.hlines(mean_pct_error,xmin=0,xmax=350,colors='r',linestyles='dashed',label='overall mean')
ax.legend()
sns.kdeplot(pct_error, vertical=True,legend=False, shade=True, lw=1, ax=ax2)
ax2.set_title('KDE')
ax2.set_ylabel('')
ax2.set_ylim(0,40)
ax2.set_xlim(0,0.1)
plt.setp(ax2.get_yticklabels(), visible=False)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.show();
# In[29]:
ax = sns.lmplot(x='t',y='pct_error', hue='no_of_measurements', data=data_plot, fit_reg=False,
scatter_kws={'alpha':0.5}, palette=sns.color_palette("coolwarm", 7), aspect=1.2).ax
ax.plot(reescaled_x,pct_error_binned_df['mean'],label='mean',lw=2,color='k')
ax.fill_between(reescaled_x, pct_error_binned_df['lower'], pct_error_binned_df['upper'],
facecolor='grey', alpha=0.2, label=r'$\pm \sigma$ interval')
ax.set_xlim(t.min(),t.max())
ax.set_ylim(0,40)
ax.set_xlabel('GAGEDAYS of measurement')
ax.set_ylabel('% error')
ax.set_title('Influence of the time of measurement\n on the error (out of sample)\n')
ax.hlines(mean_pct_error,xmin=0,xmax=350,colors='r',linestyles='dashed',label='overall mean')
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[-2:],labels=labels[-2:])
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True);
# In[30]:
ax = sns.lmplot(x='t',y='pct_error', hue='no_of_measurements', data=data_plot, fit_reg=False,
scatter_kws={'alpha':0.5}, palette=sns.color_palette("coolwarm", 7), aspect=1.2).ax
ax.set_ylim(0,30)
ax.set_xlim(t.min(),t.max())
ax.set_xlabel('GAGEDAYS of measurement')
ax.set_ylabel('% error')
ax.hlines(mean_pct_error,xmin=0,xmax=350,colors='k',label='mean', lw=2)
ax.hlines(data_plot['pct_error'].quantile(0.75),xmin=0,xmax=350,colors='b',linestyles='dashed',label='q3', lw=2)
ax.hlines(data_plot['pct_error'].quantile(0.5),xmin=0,xmax=350,colors='k',linestyles='dashed',label='median')
ax.hlines(data_plot['pct_error'].quantile(0.25),xmin=0,xmax=350,colors='b',linestyles='dashed',label='q1', lw=2)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[-4:],labels=labels[-4:])
ax.spines['top'].set_visible(True)
ax.spines['right'].set_visible(True);
# In[31]:
time.time() - start
# In[32]:
print('Latest execution: %s' % pd.datetime.now())
| true |
1032c677c34a7ebd317d68aa28890ed9866a540a | Python | abhishekkr/tutorials_as_code | /talks-articles/machine-learning/implementations/speech-recognition/offline-speech-recognition-using-pocketsphinx.py | UTF-8 | 2,104 | 2.875 | 3 | [
"MIT"
] | permissive | """
https://pypi.org/project/SpeechRecognition/3.2.0/
### Prepare
* install required python packages
```
pip3 install SpeechRecognition gtts pygame pyaudio pocketsphinx
```
* might see errors for audio development packages missing, then will need to install audio development libraries like below
```
## for Fedora, use pacman, apt-get or whatever pkg-manager you use
sudo dnf install -y pulseaudio-libs-devel
```
"""
import os
import speech_recognition as sr
import pocketsphinx
from gtts import gTTS
#quiet the endless 'insecurerequest' warning
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
from pygame import mixer
mixer.init()
def microphone_index():
mic_name = "default"
try:
mic_name = os.environ["SR_MIC"]
except:
print("using %s microphone, can customized providing entire name" % ("SR_MIC"))
for idx, name in enumerate(sr.Microphone.list_microphone_names()):
if name == mic_name:
return idx
return None
def persist_n_play(tts):
try:
tts.save("response.mp3")
mixer.music.load('response.mp3')
mixer.music.play()
except Exception as e:
print(e)
print("-----------------------")
def recognize_audio(recognizer, audio):
try:
response = recognizer.recognize_sphinx(audio)
#response = recognizer.recognize_google(audio)
print("I think you said '" + response + "'")
tts = gTTS(text="I think you said: "+str(response), lang='en')
persist_n_play(tts)
except sr.UnknownValueError:
print("Sphinx could not understand audio")
except sr.RequestError as e:
print("Sphinx error; {0}".format(e))
while (True == True):
# obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone(device_index=microphone_index()) as source:
print("Please wait. Calibrating microphone... please wait 5 seconds")
# listen for 5 second and create the ambient noise energy level
r.adjust_for_ambient_noise(source, duration=5)
print("Say something!")
audio = r.listen(source, phrase_time_limit=5)
recognize_audio(r, audio)
| true |
0affe56f1e719ffb8e7d4215fce181ffba3a6541 | Python | ai-kmu/etc | /algorithm/2022/0913_1191_K-Concatenation_Maximum_Sum/Juwan.py | UTF-8 | 648 | 2.734375 | 3 | [] | no_license | class Solution:
def kConcatenationMaxSum(self, arr: List[int], k: int) -> int:
def max_val_subarr(arr):
max_val = 0
m = len(arr)
temp = 0
for i in range(m):
temp += arr[i]
max_val = max(temp, max_val)
if temp < 0:
temp = 0
return max_val
if k < 3:
return max_val_subarr(arr*k)%(10**9+7)
a = max_val_subarr(arr)
b = max_val_subarr(arr*2)
return max([a, b, b + sum(arr)*(k - 2)])%(10**9+7)
| true |
3627296bf61695d7ca2a39816bacc4d003521c56 | Python | SahaRahul/big-data | /Assignment-3/local/task3/reduce.py | UTF-8 | 708 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python
import sys
current_medallion = None
vehicle_data = False
trip_fares = []
for line in sys.stdin:
tag_medallion, values = line.strip().split('&', 1)
medallion, tag = tag_medallion.strip().split(',', 1)
if medallion != current_medallion:
for trip_fare in trip_fares:
if vehicle_data:
print ("KEY: %s VALUE: %s" % (current_medallion, trip_fare + ',' + vehicle_data))
trip_fares = []
vehicle_data = False
if tag == 'trip_fare': # task1 - 1st in output
trip_fares.append(values)
elif tag == 'license': # license
vehicle_data = values
current_medallion = medallion | true |
d18cad2f5d9a51cc0c5019a211bb9c4864dbf22f | Python | gbrs/EGE_current | /#26_27423.py | UTF-8 | 2,055 | 3.375 | 3 | [] | no_license | '''
Системный администратор раз в неделю создаёт архив пользовательских
файлов. Однако объём диска, куда он помещает архив, может быть меньше,
чем суммарный объём архивируемых файлов. Известно, какой объём занимает
файл каждого пользователя.
По заданной информации об объёме файлов пользователей и свободном объёме
на архивном диске определите максимальное число пользователей,
чьи файлы можно сохранить в архиве, а также максимальный размер
имеющегося файла, который может быть сохранён в архиве, при условии,
что сохранены файлы максимально возможного числа пользователей.
'''
# считывание данных из файла и складывание их в список lst,
# сортировка списка
with open('#26_27423.txt') as f:
s, n = map(int, f.readline().split())
lst = []
for i in range(n):
lst.append(int(f.readline()))
lst.sort()
# print(lst)
# суммируем элементы пока их сумма sm меньше заданного s
sm = 0
for i in range(n):
if sm + lst[i] <= s:
sm += lst[i]
mx_i = i
else:
break
# print(sm, mx_i, lst[mx_i])
# бежим от последнего элемента предыдущего этапа,
# пытаясь заменять последний элемент нашего списка
# на следующий (больший) элемент last
last = lst[mx_i]
for i in range(mx_i + 1, n):
if sm - last + lst[i] <= s:
sm = sm - last + lst[i]
last = lst[i]
else:
break
print(mx_i + 1, last)
| true |
fd603c47ad7bfad691c258841a8f5be103ccb912 | Python | oshanis/covid19-stressors | /youtube/video_extraction.py | UTF-8 | 1,627 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | import argparse
import csv
import youtube_init
def get_video_ids(youtube, options):
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
q=options.q,
part='id,snippet',
maxResults=options.max_results
).execute()
videos = [('id','title')]
channels = [('id','title')]
playlists = [('id','title')]
# Add each result to the appropriate list, and then display the lists of
# matching videos, channels, and playlists.
for search_result in search_response.get('items', []):
if search_result['id']['kind'] == 'youtube#video':
videos.append((search_result['id']['videoId'],search_result['snippet']['title']))
elif search_result['id']['kind'] == 'youtube#channel':
channels.append((search_result['id']['channelId'],search_result['snippet']['title']))
elif search_result['id']['kind'] == 'youtube#playlist':
playlists.append((search_result['id']['playlistId'],search_result['snippet']['title']))
write_to_file("videos", videos)
write_to_file("channels", channels)
write_to_file("playlists", playlists)
def write_to_file(type_of_data, data):
with open('youtube/data/'+type_of_data+'.csv', 'w') as f:
csv.writer(f).writerows(data)
def main():
youtube = youtube_init.init()
parser = argparse.ArgumentParser()
parser.add_argument('--q', help='Search term', default='coronavirus unemployment')
parser.add_argument('--max-results', help='Max results', default=25)
args = parser.parse_args()
get_video_ids(youtube, args)
if __name__ == "__main__":
main() | true |
4c5352628afa50df149edd9b0a1f9b04d0039f94 | Python | c940606/leetcode | /Ones and Zeroes.py | UTF-8 | 2,371 | 3.828125 | 4 | [] | no_license | class Solution(object):
def findMaxForm(self, strs, m, n):
"""
在计算机界中,我们总是追求用有限的资源获取最大的收益。
现在,假设你分别支配着 m 个 0 和 n 个 1。另外,还有一个仅包含 0 和 1 字符串的数组。
你的任务是使用给定的 m 个 0 和 n 个 1 ,找到能拼出存在于数组中的字符串的最大数量。每个 0 和 1 至多被使用一次。
注意:
给定 0 和 1 的数量都不会超过 100。
给定字符串数组的长度不会超过 600。
---
示例 1:
输入: Array = {"10", "0001", "111001", "1", "0"}, m = 5, n = 3
输出: 4
解释: 总共 4 个字符串可以通过 5 个 0 和 3 个 1 拼出,即 "10","0001","1","0" 。
---
示例 2:
输入: Array = {"10", "0", "1"}, m = 1, n = 1
输出: 2
解释: 你可以拼出 "10",但之后就没有剩余数字了。更好的选择是拼出 "0" 和 "1" 。
:type strs: List[str]
:type m: int
:type n: int
:rtype: int
"""
if not strs:
return 0
nums = len(strs)
dp = [[[0] * (n + 1) for _ in range(m + 1)] for _ in range(nums + 1)]
# print(dp)
for i in range(1, nums + 1):
temp_n = len(strs[i - 1])
zero_nums = strs[i - 1].count("0")
one_nums = temp_n - zero_nums
# print(zero_nums,one_nums)
for j in range(m+1):
for k in range(n+1):
# print(i,k)
# print(zero_nums,one_nums)
if j >= zero_nums and k >= one_nums:
dp[i][j][k] = max(dp[i - 1][j][k], dp[i - 1][j - zero_nums][k - one_nums] + 1)
else:
dp[i][j][k] = dp[i - 1][j][k]
# print(dp)
return dp[-1][-1][-1]
def findMaxForm1(self, strs, m, n):
if not strs:
return 0
nums = len(strs)
dp = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(nums):
# 先计算有多少0和1
zero_nums = 0
one_nums = 0
for alp in strs[i]:
if alp == "0":
zero_nums += 1
else:
one_nums += 1
for j in range(m,-1,-1):
for k in range(n,-1,-1):
if j >= zero_nums and k >= one_nums:
dp[j][k] = max(dp[j][k],dp[j-zero_nums][k-one_nums]+1)
# print(dp)
return dp[-1][-1]
a = Solution()
print(a.findMaxForm(["10", "0001", "111001", "1", "0"], m=5, n=3))
print(a.findMaxForm1(["10", "0001", "111001", "1", "0"], m=5, n=3))
# print(a.findMaxForm(["10", "0", "1"], m=1, n=1))
# print(a.findMaxForm(["110110001001100","0000011"],19,1))
| true |
30a702b363df93783d9318b51422d1072cb6d645 | Python | ncarnahan19/Python_Garbage | /LetterWriter.py | UTF-8 | 1,217 | 3.296875 | 3 | [] | no_license | print("Class (AACC):")
AnneArundel = input()
print("Class (Liberty):")
LibertyClass = input()
print("Reason class should be substituted: \"This class is a \":")
reason = input()
print("Do we have a syllabus:")
gotIt = input()
if gotIt == 'yes':
haveASyllabus = 'a course Syllabus, '
else:
haveASyllabus = ''
letterContents = '''Good Evening Mr. Donahoo,
I am requesting a substitution for my required class, ''' + LibertyClass + '''. The class which I am requesting count as a substitution,
''' + AnneArundel + ''' taken at Anne Arundel Community College, ''' + reason + ''' Attached is a ''' + haveASyllabus + '''substitution request form and a course description. Thank you for your consideration!
Best Regards,
Nicholas Carnahan
443-875-8559
'''
print(letterContents)
print("Are you happy with the output?")
response = input()
if response.lower() == 'y':
# Write contents to file
fileName = LibertyClass + '_SubstitutionRequestLetter'
letterWordDoc = open(fileName, 'w')
letterWordDoc.write(letterContents)
letterWordDoc.close()
else:
print("End Process")
'''>>> baconFile = open('bacon.txt', 'w')
>>> baconFile.write('Hello world!\n')
13
>>> baconFile.close()
''' | true |
f81894ca60ca1ca8730add3add6ee1806d563695 | Python | nakamura196/hi | /src/create_rdf_dump.py | UTF-8 | 6,178 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | import urllib.request
from bs4 import BeautifulSoup
import csv
from time import sleep
import pandas as pd
import json
import urllib.request
import os
from PIL import Image
from rdflib import URIRef, BNode, Literal, Graph
from rdflib.namespace import RDF, RDFS, FOAF, XSD
from rdflib import Namespace
result = {}
df = pd.read_excel("data2/images.xlsx", sheet_name=0, header=None, index_col=None)
r_count = len(df.index)
c_count = len(df.columns)
image_map = {}
for j in range(1, r_count):
id = df.iloc[j, 0]
url = df.iloc[j, 1]
if id not in image_map:
image_map[id] = []
image_map[id].append(url)
break
df = pd.read_excel("data2/metadata_edited.xlsx", sheet_name=0,
header=None, index_col=None)
r_count = len(df.index)
c_count = len(df.columns)
map = {}
g = Graph()
for i in range(1, c_count):
label = df.iloc[0, i]
uri = df.iloc[1, i]
type = df.iloc[2, i]
if not pd.isnull(type):
obj = {}
map[i] = obj
obj["label"] = label
obj["uri"] = uri
obj["type"] = type
for j in range(3, r_count):
subject = df.iloc[j, 0]
subject = URIRef(subject)
for i in map:
value = df.iloc[j, i]
if not pd.isnull(value) and value != 0:
obj = map[i]
p = URIRef(obj["uri"])
if obj["type"].upper() == "RESOURCE":
g.add((subject, p, URIRef(value)))
else:
g.add((subject, p, Literal(value)))
g.serialize(destination='data2/dump.rdf')
'''
g.serialize(destination=path+'.rdf')
json_path = path+'.json'
f2 = open(json_path, "wb")
f2.write(g.serialize(format='json-ld'))
f2.close()
with open(json_path) as f:
df = json.load(f)
with open(path+"_min.json", 'w') as f:
json.dump(df, f, ensure_ascii=False,
sort_keys=True, separators=(',', ': '))
with open('data/data_all.csv', 'r') as f:
reader = csv.reader(f)
header = next(reader) # ヘッダーを読み飛ばしたい時
for row in reader:
# print(row)
title1 = row[0]
id1 = row[1]
if id1 not in result:
# result[title1] = {}
result[id1] = {
"title" : title1,
"children" : {}
}
tmp = result[id1]["children"]
title2 = row[2]
id2 = row[3]
if id2 not in tmp:
tmp[id2] = {
"title": title2,
"children": {}
}
tmp = tmp[id2]["children"]
no = row[4]
desc = row[5]
if no not in tmp:
tmp[no] = {
"desc" : desc,
"images" : []
}
tmp = tmp[no]
img = row[6]
tmp["images"].append(img)
f = open('data/temp.json', 'r')
json_dict = json.load(f)
count = 0
for id1 in result:
print("*"+id1)
obj1 = result[id1]["children"]
title1 = result[id1]["title"]
for id2 in obj1:
print("**"+id2)
obj2 = obj1[id2]["children"]
title2 = obj1[id2]["title"]
for no in obj2:
obj3 = obj2[no]
dir1 = "../docs/data/"+id1+"/"+id2
os.makedirs(dir1, exist_ok=True)
file = dir1+"/" + str(no).zfill(4) + ".json"
count += 1
if os.path.exists(file):
continue
# obj = json_dict.copy()
obj = {
"@context": "http://iiif.io/api/presentation/2/context.json",
"@type": "sc:Manifest",
"license": "http://creativecommons.org/licenses/by-nc-sa/4.0/",
"attribution": "Historiographical Institute The University of Tokyo 東京大学史料編纂所",
"logo": "http://www.hi.u-tokyo.ac.jp/favicon.ico",
"within": "http://www.hi.u-tokyo.ac.jp/publication/dip/index.html",
"sequences": [
{
"@type": "sc:Sequence",
"label": "Current Page Order",
"viewingHint": "non-paged",
"canvases": []
}
],
"viewingDirection": "right-to-left"
}
obj["label"] = title1+"・"+title2+"・"+no
obj["description"] = obj3["desc"]
obj["@id"] = "https://nakamura196.github.io/hi/"+file.replace("../docs/", "")
obj["sequences"][0]["@id"] = obj["@id"]+"/sequence/normal"
canvases = obj["sequences"][0]["canvases"]
width = -1
height = -1
for i in range(len(obj3["images"])):
img_url = obj3["images"][i]
tmp = {
"@type": "sc:Canvas",
"thumbnail": {},
"images": [
{
"@type": "oa:Annotation",
"motivation": "sc:painting",
"resource": {
"@type": "dctypes:Image",
"format": "image/jpeg",
}
}
]
}
tmp["@id"] = obj["@id"]+"/canvas/p"+str(i+1)
tmp["label"] = "["+str(i+1)+"]"
tmp["thumbnail"]["@id"] =img_url.replace(".jpg", "_r25.jpg")
if i == 0:
obj["thumbnail"] = tmp["thumbnail"]["@id"]
img = Image.open(urllib.request.urlopen(img_url))
width, height = img.size
tmp["images"][0]["resource"]["width"] = width
tmp["images"][0]["resource"]["height"] = height
tmp["width"] = width
tmp["height"] = height
tmp["images"][0]["@id"] = obj["@id"]+"/annotation/p"+str(i+1)+"-image"
tmp["images"][0]["resource"]["@id"] = img_url
tmp["images"][0]["on"] = tmp["@id"]
canvases.append(tmp)
f2 = open(file, 'w')
json.dump(obj, f2, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
print(count)
'''
| true |
ff830d271d28292523ed7a52165f0d20aa0363ad | Python | JavaRod/SP_Python220B_2019 | /students/will_chang/lesson02/assignment/charges_calc.py | UTF-8 | 3,366 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
"""
Returns total price paid for individual rentals
"""
import logging
import argparse
import json
import datetime
import math
log_format = "%(asctime)s %(filename)s:%(lineno)-3d %(levelname)s %(message)s"
log_file = datetime.datetime.now().strftime("%Y-%m-%d")+'_charges_calc.log'
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(file_handler)
logger.addHandler(console_handler)
if level == '0':
logger.disabled = True
elif level == '1':
logger.setLevel(logging.ERROR)
file_handler.setLevel(logging.ERROR)
console_handler.setLevel(logging.ERROR)
elif level == '2':
logger.setLevel(logging.WARNING)
file_handler.setLevel(logging.WARNING)
console_handler.setLevel(logging.WARNING)
elif level == '3':
logger.setLevel(logging.DEBUG)
file_handler.setLevel(logging.DEBUG)
console_handler.setLevel(logging.DEBUG)
def parse_cmd_arguments():
"""Allow for input and output file to be specified, and allow debug option"""
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-i', '--input', help='input JSON file', required=True)
parser.add_argument('-o', '--output', help='ouput JSON file', required=True)
parser.add_argument('-d', '--debug', help='logging function level', required=False, default='0')
return parser.parse_args()
def load_rentals_file(filename):
"""Load input file"""
try:
with open(filename) as file:
data = json.load(file)
except FileNotFoundError:
logging.debug("FileNotFoundError in load_rentals_file function.")
logging.error("Input file %s is unable to be located.", filename)
exit(0)
return data
def calculate_additional_fields(data):
"""Calculate total days, total price, sqrt total price, and unit cost"""
for key, value in data.items():
try:
rental_start = datetime.datetime.strptime(value['rental_start'], '%m/%d/%y')
rental_end = datetime.datetime.strptime(value['rental_end'], '%m/%d/%y')
except ValueError:
logging.debug("ValueError in calculate_additional_fields function.")
logging.warning("(%s) Date format does not match 'm/d/y' format.", key)
try:
value['total_days'] = (rental_end - rental_start).days
value['total_price'] = value['total_days'] * value['price_per_day']
value['sqrt_total_price'] = math.sqrt(value['total_price'])
value['unit_cost'] = value['total_price'] / value['units_rented']
except ValueError:
logging.debug("ValueError in calculate_additional_fields function.")
logging.warning("(%s) Start date cannot be later than end date.", key)
return data
def save_to_json(filename, data):
"""Save output file"""
with open(filename, 'w') as file:
json.dump(data, file)
if __name__ == "__main__":
ARGS = parse_cmd_arguments()
config_log(ARGS.debug)
DATA = load_rentals_file(ARGS.input)
DATA = calculate_additional_fields(DATA)
save_to_json(ARGS.output, DATA)
| true |
fc06b30f6a4e6a24667d1847d88763e5eb7dfe0d | Python | ayang629/IoTNetSim | /run.py | UTF-8 | 908 | 2.75 | 3 | [] | no_license | import logging
import os
import sys
import netsim_Vis
from netsim_YAML import run
from netsim_NS3 import experiment
if __name__ == "__main__":
if len(sys.argv) >= 2 and os.path.isfile(sys.argv[1]): #check cmd arg
topology = dict()
try:
topology = run(sys.argv[1]) #parse yaml
except Exception as e:
logging.exception("YAML Parsing stage failed.")
try:
info_to_feed_into_visualizations = experiment(topology) #NS3 exp
except Exception as e:
logging.exception("NS3 Experimentation failed.")
try:
netsim_Vis.visA(info_to_feed_into_visualizations) #some visualization
#... and so on
except Exception as e:
logging.exception("Visualizations failed")
sys.exit(0) #everything ended ok
sys.exit("Invalid argument provided to script.") #something went wrong
| true |
7ca2bf7285ad1cfe7535b32ac0fa15a52ec5ee50 | Python | Leeyoungsup/python_pratice | /출석과제4/응용예제1.py | UTF-8 | 1,300 | 3.390625 | 3 | [] | no_license | import random
dice1,dice2,dice3,dice4,dice5,dice6=[0]*6
throwCount,serialCount=0,0
if __name__=="__main__":
while True:
throwCount += 1
dice1=random.randrange(1,7)
dice2=random.randrange(1,7)
dice3=random.randrange(1,7)
dice4=random.randrange(1,7)
dice5=random.randrange(1,7)
dice6=random.randrange(1,7)
if dice1==dice2==dice3==dice4==dice5==dice6:
print('6개의 주사위가 모두 동일한 숫자가 나옴-->',dice1,dice2,dice3,dice4,dice5,dice6)
break
elif(dice1==1 or dice2==1 or dice3==1 or dice4==1 or dice5==1 or dice6==1)and\
(dice1==2 or dice2==2 or dice3==2 or dice4==2 or dice5==2 or dice6==2)and\
(dice1==3 or dice2==3 or dice3==3 or dice4==3 or dice5==3 or dice6==3)and\
(dice1==4 or dice2==4 or dice3==4 or dice4==4 or dice5==4 or dice6==4)and\
(dice1==5 or dice2==5 or dice3==5 or dice4==5 or dice5==5 or dice6==5)and\
(dice1==6 or dice2==6 or dice3==6 or dice4==6 or dice5==6 or dice6==6):
serialCount += 1
print("6개가 동일한 숫자가 나올 때까지 주사위를 던진 횟수-->",throwCount)
print("6개가 동일한 숫자가 나올 때까지, 1~6의 연속번호가 나온 횟수-->",serialCount)
| true |
aa63a0378bff6e5f50e058ab0a29834db6071884 | Python | gengzi/PycharmProject | /XklProject/test/log/Student.py | UTF-8 | 2,998 | 2.828125 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
class Student(object):
"""
`studentId` '学生编号',
`schoolName` '学校名称',
`academyName` '学院',
`profession` '专业',
`academyId` '学历',
`grade` '入学年份',
`nickName` '姓名',
`pinyinStr` '姓名拼音',
`bornDate` '出生日期',
`hometown` '所在地',
`gender` 性别 数字 1男 0女 -1 不知道,
`source` '手机类型',
"""
# def __init__(self,studentId,schoolName,academyName,profession,academyId,grade,nickName,pinyinStr,bornDate,hometown,gender,source):
# self.studentId = studentId
# self.schoolName = schoolName
# self.academyName = academyName
# self.profession = profession
# self.academyId = academyId
# self.grade = grade
# self.nickName = nickName
# self.pinyinStr = pinyinStr
# self.bornDate = bornDate
# self.hometown = hometown
# self.gender = gender
# self.source = source
@property
def studentId(self):
return self._studentId
@studentId.setter
def studentId(self, value):
self._studentId = value
@property
def schoolName(self):
return self._schoolName
@schoolName.setter
def schoolName(self, value):
self._schoolName = value
@property
def academyName(self):
return self._academyName
@academyName.setter
def academyName(self, value):
self._academyName = value
@property
def profession(self):
return self._profession
@profession.setter
def profession(self, value):
self._profession = value
@property
def academyId(self):
return self._academyId
@academyId.setter
def academyId(self, value):
self._academyId = value
@property
def grade(self):
return self._grade
@grade.setter
def grade(self, value):
self._grade = value
@property
def nickName(self):
return self._nickName
@nickName.setter
def nickName(self, value):
self._nickName = value
@property
def pinyinStr(self):
return self._pinyinStr
@pinyinStr.setter
def pinyinStr(self, value):
self._pinyinStr = value
@property
def bornDate(self):
return self._bornDate
@bornDate.setter
def bornDate(self, value):
self._bornDate = value
@property
def hometown(self):
return self._hometown
@hometown.setter
def hometown(self, value):
self._hometown = value
@property
def gender(self):
return self._gender
@gender.setter
def gender(self, value):
self._gender = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
#测试
# stu = Student()
# stu.score = 1001
#
# print stu.score
| true |
1ede0107d86e112d1f8afe1a038ce5ea2dca0c7e | Python | jbxiang/valuation-of-financial-model | /square_test.py | UTF-8 | 2,723 | 2.703125 | 3 | [] | no_license | import market_environment as me
import datetime as dt
import constant_short_rate as constant
import geometric_brownian_motion as geometric
import matplotlib.pyplot as plt
import simulation_class as sim
import jump_diffusion as jump
import square_root_diffusion as square
me_gbm = me.market_environment('me_gbm' , dt.datetime(2015, 1, 1))
me_gbm.add_constant('initial_value' , 36.)
me_gbm.add_constant('volatility' , 0.2)
me_gbm.add_constant('final_date' , dt.datetime(2015, 12, 31))
me_gbm.add_constant('currency' , 'EUR')
me_gbm.add_constant('frequency' , 'M')
# monthly frequency (respective month end)
me_gbm.add_constant('paths' , 10000)
csr = constant.constant_short_rate('csr',0.05)
me_gbm.add_curve('discount_curve',csr)
gbm = geometric.geometric_brownian_motion('gbm',me_gbm)
gbm.generate_time_grid()
#print(gbm.time_grid)
paths_1 = gbm.get_instrument_values()
#print(paths_1)
gbm.update(volatility=0.5)
paths_2 = gbm.get_instrument_values()
plt.figure(figsize= (8,4))
p1 = plt.plot(gbm.time_grid,paths_1[:,:10],'b')
p2 = plt.plot(gbm.time_grid,paths_2[:,:10],'r-.')
plt.grid(True)
l1 = plt.legend([p1[0],p2[0]],['low volatility','high volatility'],loc=2)
plt.gca().add_artist(l1)
plt.xticks(rotation=30)
plt.show()
me_jd = me.market_environment('me_jd',dt.datetime(2015,1,1))
me_jd.add_constant('lambda',0.3)
me_jd.add_constant('mu',-0.75)
me_jd.add_constant('delta',0.1)
me_jd.add_environment(me_gbm)
jd = jump.jump_diffusion('jd',me_jd)
paths_3 = jd.get_instrument_values()
jd.update(lamb=0.9)#改变跳动频率
paths_4 = jd.get_instrument_values()
plt.figure(figsize= (8,4))
p1 = plt.plot(gbm.time_grid,paths_3[:,:10],'b')
p2 = plt.plot(gbm.time_grid,paths_4[:,:10],'r-.')
plt.grid(True)
l1 = plt.legend([p1[0],p2[0]],['low intensity','high intensity'],loc=3)
plt.gca().add_artist(l1)
plt.xticks(rotation=30)
plt.show()
me_srd = me.market_environment('me_srd' , dt.datetime(2015, 1, 1))
me_srd.add_constant('initial_value' , .25)
me_srd.add_constant('volatility' , 0.05)
me_srd.add_constant('final_date' , dt.datetime(2015, 12, 31))
me_srd.add_constant('currency' , 'EUR')
me_srd.add_constant('frequency' , 'W')
# monthly frequency (respective month end)
me_srd.add_constant('paths' , 10000)
me_srd.add_constant('kappa' , 4.0)
me_srd.add_constant('theta' , 0.2)
me_srd.add_curve('discount_curve',constant.constant_short_rate('r',0.0))
srd = square.square_root_diffusion('srd',me_srd)
srd_paths = srd.get_instrument_values()[:,:10]
plt.figure(figsize= (8,4))
plt.plot(srd.time_grid,srd.get_instrument_values()[:,:10])
plt.axhline(me_srd.get_constant('theta'),color='r',ls='-',lw=2.0)
plt.grid(True)
plt.xticks(rotation=30)
plt.show() | true |
7c98474a214a65d29561a9c4fa4f10208829715f | Python | jersson/mit-intro-cs-python | /week-02/99-problem-set-02/problem-01/problem.py | UTF-8 | 522 | 3.171875 | 3 | [
"MIT"
] | permissive | balance = 42
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
# output>Remaining balance: 31.38
#program to be pasted begin here
month = 1
remainingBalance = balance
while month <= 12:
minimumPayment = remainingBalance * monthlyPaymentRate
unpaidBalance = remainingBalance - minimumPayment
interest = annualInterestRate * unpaidBalance / 12.0
remainingBalance = unpaidBalance + interest
month += 1
remainingBalance = round(remainingBalance, 2)
print("Remaining balance: {}".format(remainingBalance))
| true |
d53927b8b85f78d9ee88c9e09b36a82c6c620e34 | Python | quintelabm/PrmFitting | /teste-uq/MonteCarlo_EDO/SA_ODE.py | UTF-8 | 1,868 | 2.734375 | 3 | [] | no_license | from scipy.integrate import odeint
import numpy as np
import seaborn as sns
import chaospy as cp
import uncertainpy as un
sns.set()
def dinamica_Extracelular(y, t, delta, epsilon, p, c):
# parametros do sistema de 3 equacoes descrito abaixo
s = 1.3*10**5
beta = 5*10**-8
d = 0.01
# inicializa com zeros
dy = np.zeros(3)
# equacoes: y[0] = T, y[1] = I, y[2] = V
dy[0] = s - beta*y[2]*y[0] - d*y[0]
dy[1] = beta*y[2]*y[0] - delta*y[1]
dy[2] = (1 - epsilon)*p*y[1] - c*y[2]
return dy
def solver(delta, epsilon, p, c):
# passo
h = 0.1
days = 30
# Dias simulados
t_range = np.linspace(0, days, int(days/h))
# condicoes iniciais
T0 = 2.9168*10**6
I0 = 8.7186*10**5
#AVERAGE_PAT
V0 = 10**6.47991433
yinit = np.array([T0,I0,V0], dtype='f')
sol = odeint(dinamica_Extracelular, yinit, t_range, args=(delta, epsilon, p, c))
v = sol[:,2]
log_v = np.log10(v)
return t_range, log_v
if __name__ == "__main__":
model = un.Model(
run = solver,
labels=["Tempo (dias)",
"Carga viral (log10)"]
)
delta_m = 0.07
epsilon_m = 0.999
p_m = 12.0
c_m = 19.0
# create distributions
delta_dist=cp.Uniform(delta_m*0.9, delta_m*1.1)
epsilon_dist=cp.Uniform(epsilon_m*0.9, epsilon_m)
p_dist=cp.Uniform(p_m*0.9, p_m*1.1)
c_dist=cp.Uniform(c_m*0.9, c_m*1.1)
# define parameter dictionary
parameters = {"delta": delta_dist,
"epsilon": epsilon_dist,
"p": p_dist,
"c": c_dist
}
# set up UQ
UQ = un.UncertaintyQuantification(
model=model,
parameters=parameters
)
data = UQ.monte_carlo(nr_samples=100) | true |
890a83f6f42250c8ce00ec9df3eaa9328552017a | Python | adamjford/CMPUT296 | /Lecture-2013-01-30/example-orig.py | UTF-8 | 1,767 | 3.9375 | 4 | [] | no_license | """
Graph example
G = (V, E)
V is a set
E is a set of edges, each edge is an unordered pair
(x, y), x != y
"""
import random
# from random import sample
# from random import *
def neighbours_of(G, v):
"""
>>> G = ( {1, 2, 3}, { (1, 2), (1, 3) })
>>> neighbours_of(G, 1) == { 2, 3 }
True
>>> neighbours_of(G, 3) == { 1 }
True
>>> neighbours_of(G, 1)
{3, 2}
"""
(V, E) = G
neighbours = set()
for (x,y) in E:
if v == x: neighbours.add(y)
if v == y: neighbours.add(x)
return neighbours
def generate_random_graph(n, m):
V = set(range(n))
E = set()
max_num_edges = n * (n-1) // 2
if m > max_num_edges:
raise ValueError("For {} vertices, you want {} edges, but can only have a maximum of {}".format(n, m, max_num_edges))
while len(E) < m:
pair = random.sample(V, 2)
E.add(tuple([min(pair), max(pair)]))
return (V, E)
n = 20
m = 5
G = generate_random_graph(n, m)
(V, E) = G
print(G)
print("Number of edges is {}, we want {}".format(len(E), m))
start = random.choice(list(V))
stop = random.choice(list(V))
cur = start
print("Starting at {}".format(cur))
if len(neighbours_of(G, cur)) == 0:
raise Exception("Bad luck, {} has no neighbours".format(cur))
num_steps = 0
max_num_steps = 1000
while cur != stop and num_steps < max_num_steps:
num_steps += 1
# pick a neighbour of cur at random
neighbours = neighbours_of(G, cur)
# print(neighbours)
# pick one of the neighbours
# cur = random.sample(neighbours, 1)[0]
# or
cur = random.choice(list(neighbours))
print("At {}".format(cur))
print("Finished at {}".format(cur))
"""
if __name__ == "__main__":
import doctest
doctest.testmod()
"""
| true |
48cc7f60a7456526d82bb8f11df3be509c1d1fa4 | Python | dynafa/keras_examples | /train_pretrained_network.py | UTF-8 | 2,191 | 2.53125 | 3 | [] | no_license | #!/home/minami/tf2.0/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
import h5py
import numpy as np
hdf5_path = 'dataset.hdf5'
subtract_mean = False
# open the hdf5 file
df = h5py.File(hdf5_path, "r")
# subtract the training mean
if subtract_mean:
mm = df["train_mean"][0, ...]
mm = mm[np.newaxis, ...]
# Total number of samples
train = df["train_img"].shape
validate = df["val_img"].shape
test = df["test_img"].shape
print(train)
print(validate)
print(test)
modelname = 'DNN_cat_dog_model.h5'
newmodelname = 'DNN_cat_dog_improved.h5'
epochs = 100
dims_X = 100
dims_Y = 100
train_images, train_labels, test_images, test_labels = \
df["train_img"], df["train_labels"], df["test_img"], df["test_labels"]
train_images = train_images[:train[0]].reshape(-1, dims_X * dims_Y, 3) / 255.0
test_images = test_images[:test[0]].reshape(-1, dims_X * dims_Y, 3) / 255.0
train_labels = train_labels[:15000]
test_labels = test_labels[:5000]
# Recreate the exact same model, including its weights and the optimizer
saved_model = tf.keras.models.load_model(modelname)
# Show the model architecture
saved_model.summary()
# Re-evaluate the model
loss, acc = saved_model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
print(saved_model.optimizer.get_config())
# To get learning rate
# To set learning rate
K.set_value(saved_model.optimizer.lr, 0.001)
K.set_value(saved_model.optimizer.decay, 0.01)
K.set_value(saved_model.optimizer.momentum, 0.9)
print(K.get_value(saved_model.optimizer.lr))
print(K.get_value(saved_model.optimizer.decay))
print(K.get_value(saved_model.optimizer.momentum))
# print(saved_model.optimizer.get_config())
input("Ready?")
saved_model.fit(train_images, train_labels, epochs=epochs)
saved_model.save(newmodelname)
print("Model saved as %s" % newmodelname)
print("Completed training network for %s epochs" % epochs)
loss, acc = saved_model.evaluate(test_images, test_labels, verbose=0)
print("Trained model, accuracy: {:5.2f}%".format(100*acc))
print(loss)
| true |
8a9c7fa3d4e6a957a0d3cfdba3e5dff11a8e3e98 | Python | chinesefirewall/Robotics | /lab06/lab06_task04.py | UTF-8 | 7,119 | 2.96875 | 3 | [
"MIT"
] | permissive | '''Niyi Solomon Adebayo
'''
import numpy as np
import cv2
import time
import easygopigo3 as go
#from easygopigo3 import EasyGoPiGo3 as go
## robot driving
drive = go.EasyGoPiGo3()
# Open the camera
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)
def default_values():
global hH, hS, hV, lH, lS, lV
file_name = open("trackbar.txt",'r')
obj = file_name.readline()
bars = obj.split(",")
#bars = bars.strip()
lH =int(bars[0])
hH =int(bars[1])
lS =int(bars[2])
hS =int(bars[3])
lV =int(bars[4])
hV =int(bars[5])
file.close()
return
# A callback function for each trackbar parameter
# write the new value into the global variable everytime
def updatelH(new_value):
global hH, hS, hV, lH, lS, lV
lH = new_value
return
def updatehH(new_value):
global hH, hS, hV, lH, lS, lV
hH = new_value
return
def updatelS(new_value):
global hH, hS, hV, lH, lS, lV
lS = new_value
return
def updatehS(new_value):
global hH, hS, hV, lH, lS, lV
hS = new_value
return
def updatelV(new_value):
global hH, hS, hV, lH, lS, lV
lV = new_value
return
def updatehV(new_value):
global hH, hS, hV, lH, lS, lV
hV = new_value
return
def updateKernelValue(new_value):
global kernel_size
kernel_size = new_value
#write the new value into the global variable
######################
try:
default_values()
except:
print("default value not set...manually getting values")
# colour detection limits
# initial limits
lH = 0
hH = 162
lS = 50
hS = 233
lV = 193
hV = 255
kernel_size = 5
##################
# --------------------- create track bar for each param --------------------------
cv2.namedWindow('Processed')
cv2.createTrackbar("Low H", 'Processed', lH, 255, updatelH)
cv2.createTrackbar("High H", 'Processed', hH, 255, updatehH)
cv2.createTrackbar("Low S", 'Processed', lS, 255, updatelS)
cv2.createTrackbar("High S", 'Processed', hS, 255, updatehS)
cv2.createTrackbar("Low V", 'Processed', lV, 255, updatelV)
cv2.createTrackbar("High V", 'Processed', hV, 255, updatehV)
cv2.createTrackbar("Kernel size", 'Processed', kernel_size, 100, updateKernelValue)
## -------------- detector parameters ----------------------
def blob_detector():
blobparams = cv2.SimpleBlobDetector_Params()
blobparams.filterByConvexity = False
blobparams.minDistBetweenBlobs = 2000
blobparams.minArea = 200
blobparams.filterByColor = True
blobparams.maxArea = 30000
blobparams.filterByInertia = False
blobparams.filterByArea = True
blobparams.filterByCircularity = False
detector = cv2.SimpleBlobDetector_create(blobparams)
return detector
#current time of capture
start_time = time.time()
'''
Processing time for this frame = Current time – time when previous frame processed
So fps at the current moment will be :
FPS = 1/ (Processing time for this frame)
source: https://www.geeksforgeeks.org/python-displaying-real-time-fps-at-which-webcam-video-file-is-processed-using-opencv/
'''
detector = blob_detector()
while True:
# Read the image from the camera
width = 256
height = 120
for i in range(3):
ret, video = cap.read()
video = video[height:width]
print('video ', len(video[0]))
# median blur
#frame_blurred = cv2.medianBlur(video,1+2*kernel_size)
# gaussian
# frame_blurred = cv2.GaussianBlur(video, (1 + 2 * kernel_size, 1 + 2 * kernel_size), 0)
# You will need this later
# frame = cv2.cvtColor(frame, ENTER_CORRECT_CONSTANT_HERE)
lowerThresh = np.array([lH, lS, lV])
upperThresh = np.array([hH, hS, hV])
thresholded = cv2.inRange(video, lowerThresh, upperThresh)
a = thresholded.shape # to get fram height and width
f_width = a[1]
f_height = a[0]
print('frame height is ', a[0], ' and frame width s ', a[1])
#thresholded = cv2.dilate(thresholded, iterations = 1 )
thresholded = cv2.rectangle(thresholded, (0,0), (f_width-1, f_height-1),(255),2)
# thresholded = cv2.inRange(frame_blurred , lowerThresh, upperThresh)
thresholded_img = 255 - thresholded
#thresholded_img = thresholded
# outimage = cv2.bitwise_and(video, video, mask=thresholded)
keypoints = detector.detect(thresholded_img)
i=0
# puts points
for i in range(len(keypoints)):
cv2.putText(thresholded_img, str(int(keypoints[i].pt[0])) + " " + str(int(keypoints[i].pt[1])),
(int(keypoints[i].pt[0]), int(keypoints[i].pt[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)
# cv2.putText(video, str(int(keypoints[i].pt[0])) + " " + str(int(keypoints[i].pt[1])),
# (int(keypoints[i].pt[0]), int(keypoints[i].pt[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)
# performs masking on an original image, original image gets original image value if mask is 255
# outimage = cv2.bitwise_and(frame, frame, mask = thresholded)
current_time = time.time()
diff = (current_time - start_time)
start_time = current_time
# Write some text onto the frame (FPS number)
#cv2.putText(video, str(np.floor(1 / diff)), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.putText(thresholded_img, str(np.floor(1 / diff)), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
# puts detected points on original image
img_with_keypoints = cv2.drawKeypoints(video, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
######################
# for key in keypoints:
# x1 = key.pt[0]
# x2 = key.pt[1]
# s = key.size
# print('x1: ', x1, 'x2: ', x2, 'diameter: ', s)
#
#####################
middle_number_allowance1 = 270
middle_number_allowance2 = 370
try:
### robot control
drive.set_speed(50)
print('length of key point: ',len(keypoints))
x1 = keypoints[0].pt[0]
x2 = keypoints[0].pt[1]
print('x1: ', x1, 'x2: ', x2)
if x1 > middle_number_allowance1 and x1 < middle_number_allowance2:
print("it is in center ")# do nothing
drive.stop()
elif x1 < middle_number_allowance1:
print('spin left')
drive.spin_left()
elif x1 > middle_number_allowance2:
drive.spin_right()
print('spin right')
except:
print('No keypoints detected')
# Display the resulting frame
cv2.imshow('Original vid', img_with_keypoints)
cv2.imshow('Thresh vid', thresholded_img)
#cv2.imshow('Blurred vid', frame_blurred)
# Quit the program when 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
print('closing program')
cap.release()
cv2.destroyAllWindows()
file = open("trackbar.txt","w+")
file.write(str(lH)+str(",")+str(hH)+str(",")+str(lS)+str(",")+str(hS)+str(",")+str(lV)+str(",")+str(hV))
file.close()
###########################
# x = 130 to 370
# y = 32 to 42
| true |
64007ee45ba7d5625f929b7cf5a643eccdad5596 | Python | VIVKA/fminvest | /app/utils/system.py | UTF-8 | 2,771 | 2.828125 | 3 | [] | no_license | import sys
import pickle
import datetime
from functools import wraps
# Thanks, https://goshippo.com/blog/measure-real-size-any-python-object/
def get_size(obj, seen=None):
"""Recursively finds size of objects"""
size = sys.getsizeof(obj)
if seen is None:
seen = set()
obj_id = id(obj)
if obj_id in seen:
return 0
# Important mark as seen *before* entering recursion to gracefully handle
# self-referential objects
seen.add(obj_id)
if isinstance(obj, dict):
size += sum([get_size(v, seen) for v in obj.values()])
size += sum([get_size(k, seen) for k in obj.keys()])
elif hasattr(obj, '__dict__'):
size += get_size(obj.__dict__, seen)
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes, bytearray)):
size += sum([get_size(i, seen) for i in obj])
return size
____global_cache = {}
def daycache(method): # noqa: E302
@wraps(method)
def cached(*args, **kw):
global ____global_cache
dayToken = datetime.date.today().isoformat()
if dayToken not in ____global_cache:
____global_cache.clear()
____global_cache[dayToken] = {}
token = '{}{}{}'.format(
str(method.__name__),
pickle.dumps(args, 1),
pickle.dumps(kw, 1),
)
if token not in ____global_cache[dayToken]:
____global_cache[dayToken][token] = method(*args, **kw)
return ____global_cache[dayToken][token]
return cached
def daycacheassetmethod(method):
def cached(*args, **kw):
global ____global_cache
dayToken = datetime.date.today().isoformat()
if dayToken not in ____global_cache:
____global_cache[dayToken] = {}
token = '{}-{}-{}-{}'.format(
str(args[0].ticker),
str(method.__name__),
pickle.dumps(args[1:], 1),
pickle.dumps(kw, 1),
)
if token not in ____global_cache[dayToken]:
____global_cache[dayToken][token] = method(*args, **kw)
return ____global_cache[dayToken][token]
return cached
def hourcacheassetmethod(method):
def cached(*args, **kw):
global ____global_cache
hourToken = datetime.datetime.now().strftime("%Y-%m-%d %H")
if hourToken not in ____global_cache:
____global_cache[hourToken] = {}
token = '{}-{}-{}-{}'.format(
str(args[0].ticker),
str(method.__name__),
pickle.dumps(args[1:], 1),
pickle.dumps(kw, 1),
)
if token not in ____global_cache[hourToken]:
____global_cache[hourToken][token] = method(*args, **kw)
return ____global_cache[hourToken][token]
return cached
| true |
1bf2e310c691b0be549f69bf8c59e44db62b6863 | Python | daniiloleshchuk/Python-Lab11-12 | /managers/BouquetManager.py | UTF-8 | 3,551 | 3.453125 | 3 | [] | no_license | from models.Flower import Flower
class BouquetManager:
def __init__(self):
self.flowers_in_bouquet = []
def add_flowers_to_bouquet(self, *flowers_to_add: Flower):
for flower in flowers_to_add:
self.flowers_in_bouquet.append(flower)
def remove_flowers_from_bouquet(self, *flowers_to_remove: Flower):
for flower in flowers_to_remove:
self.flowers_in_bouquet.remove(flower)
def find_flower_price_lower_than(self, price_to_compare: int):
"""
>>> rose = Flower("flower", "red", 40, 70, "rose")
>>> fialka = Flower("flower", "purple", 20, 35, "fialka")
>>> romashka = Flower("flower", "white", 10, 20, "romashka")
>>> bouquet = BouquetManager()
>>> bouquet.add_flowers_to_bouquet(rose, fialka, romashka)
>>> result = bouquet.find_flower_price_lower_than(60)
>>> [flower.price_in_uah for flower in result]
[35, 20]
"""
result: list = []
for flower in self.flowers_in_bouquet:
if flower.price_in_uah < price_to_compare:
result.append(flower)
return result
def find_flowers_height_bigger_than(self, height_in_sm_to_compare: int):
"""
>>> rose = Flower("flower", "red", 40, 70, "rose")
>>> fialka = Flower("flower", "purple", 20, 35, "fialka")
>>> romashka = Flower("flower", "white", 10, 20, "romashka")
>>> bouquet = BouquetManager()
>>> bouquet.add_flowers_to_bouquet(rose, fialka, romashka)
>>> result = bouquet.find_flowers_height_bigger_than(15)
>>> [flower.height_in_sm for flower in result]
[40, 20]
"""
result: list = []
for flower in self.flowers_in_bouquet:
if flower.height_in_sm > height_in_sm_to_compare:
result.append(flower)
return result
def sort_flowers_by_height(self, reverse=True):
"""
>>> rose = Flower("flower", "red", 40, 70, "rose")
>>> fialka = Flower("flower", "purple", 20, 35, "fialka")
>>> romashka = Flower("flower", "white", 10, 20, "romashka")
>>> bouquet = BouquetManager()
>>> bouquet.add_flowers_to_bouquet(rose, fialka, romashka)
>>> bouquet.sort_flowers_by_height(reverse=False)
>>> [flower.height_in_sm for flower in bouquet.flowers_in_bouquet]
[10, 20, 40]
>>> bouquet.sort_flowers_by_height(reverse=True)
>>> [flower.height_in_sm for flower in bouquet.flowers_in_bouquet]
[40, 20, 10]
"""
self.flowers_in_bouquet.sort(key=lambda flower: flower.height_in_sm, reverse=reverse)
def sort_flowers_by_price(self, reverse=False):
"""
>>> rose = Flower("flower", "red", 40, 70, "rose")
>>> fialka = Flower("flower", "purple", 20, 35, "fialka")
>>> romashka = Flower("flower", "white", 10, 20, "romashka")
>>> bouquet = BouquetManager()
>>> bouquet.add_flowers_to_bouquet(rose, fialka, romashka)
>>> bouquet.sort_flowers_by_price()
>>> [flower.price_in_uah for flower in bouquet.flowers_in_bouquet]
[20, 35, 70]
>>> bouquet.sort_flowers_by_price(reverse=True)
>>> [flower.price_in_uah for flower in bouquet.flowers_in_bouquet]
[70, 35, 20]
"""
self.flowers_in_bouquet.sort(key=lambda flower: flower.price_in_uah, reverse=reverse)
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=False, extraglobs={'bouquet': BouquetManager()})
| true |
62d485d6553fca7559cc82c8fa1e02ebd91ab1e1 | Python | danielanatolie/Data-Science-Concepts | /linearRegression.py | UTF-8 | 874 | 3.921875 | 4 | [] | no_license | # Linear Regression - fitting a straight line to a set up observations
# Gradient descent is an alternative to linear regression
# Coefficient of determination (R-squared), how well does the
# line fit for the data (1 being a perfect fit)
import numpy as np
from pylab import *
pageSpeeds = np.random.normal(3.0, 1.0, 1000)
purchaseAmount = 100 - (pageSpeeds + np.random.normal(0.,0.1,1000))*3
plt.scatter(pageSpeeds, purchaseAmount)
plt.show()
from scipy import stats
slope, intercept, r_value, p_value, std_err = stats.linregress(pageSpeeds, purchaseAmount)
print r_value ** 2 #Linear relationship between web speed and purchace
#Plotting a regression line:
import matplotlib.pyplot as plt
def predict(x):
return slope * x * intercept
fitLine = predict(pageSpeeds)
plt.scatter(pageSpeeds, purchaseAmount)
plt.plot(pageSpeeds, fitLine, c='r')
plt.show()
| true |
d2589b0c3002b211bdda63febf1dbcef59f061d4 | Python | minority4u/image-classification-tf | /src/visualization/utils.py | UTF-8 | 6,416 | 2.859375 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import itertools
import logging
import os
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.utils.class_weight import compute_sample_weight
from src.utils_io import save_plot
from collections import Counter
def plot_history(history):
"""
Plots Training an d Validation History
:param history: Return Value of Model.Fit (Keras)
:return: none
"""
loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' not in s]
val_loss_list = [s for s in history.history.keys() if 'loss' in s and 'val' in s]
acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' not in s]
val_acc_list = [s for s in history.history.keys() if 'acc' in s and 'val' in s]
if len(loss_list) == 0:
logging.debug('Loss is missing in history')
return
## As loss always exists
epochs = range(1, len(history.history[loss_list[0]]) + 1)
## Loss
plt.figure(3)
for l in loss_list:
plt.plot(epochs, history.history[l], 'b',
label='Training loss (' + str(str(format(history.history[l][-1], '.5f')) + ')'))
for l in val_loss_list:
plt.plot(epochs, history.history[l], 'g',
label='Validation loss (' + str(str(format(history.history[l][-1], '.5f')) + ')'))
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
## Accuracy
plt.figure(3)
for l in acc_list:
plt.plot(epochs, history.history[l], 'r',
label='Training accuracy (' + str(format(history.history[l][-1], '.5f')) + ')')
for l in val_acc_list:
plt.plot(epochs, history.history[l], 'c',
label='Validation accuracy (' + str(format(history.history[l][-1], '.5f')) + ')')
plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.savefig('history.png')
plt.clf()
def plot_confusion_matrix(cm, classes, path_to_save, filename,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
:param cm: Confusion Matrix (SKlearn)
:param classes: One-Hot encoded classes
:param pathtosave: path to store image
:param normalize:
:param title:
:param cmap:
:return: none
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
save_plot(plt.gcf(), path_to_save, filename)
#plt.tight_layout()
#plt.savefig(path_to_save)
#plt.clf()
def create_reports(ground_truth, predicted_classes, class_names, config, report_name, f_name_suffix):
"""
Create validation report (confusion matrix, train/val history)
:param ground_truth: One-Hot encoded truth (validation_generator)
:param predicted_classes: Predicted results of model.predict...
:param validation_generator: validation_generator with batch size = testsize
:param config: config of experiment to read paths
:return: none
"""
logging.info('Classes: {0}'.format(len(class_names)))
path_to_save = os.path.join(config['report_path'], report_name)
target_names = class_names
counter = Counter(ground_truth)
max_val = float(max(counter.values()))
#class_weights = {class_id: max_val / num_images for class_id, num_images in counter.items()}
class_weights = compute_sample_weight(class_weight='balanced', y=ground_truth)
logging.info('ground truth: {}'.format(len(ground_truth)))
logging.info('ground truth: {}'.format(ground_truth))
logging.info('predict classes: {}'.format(len(predicted_classes)))
logging.info('predict classes: {}'.format(predicted_classes))
logging.info('class weights: {}'.format(len(class_weights)))
logging.info(class_weights)
cm = confusion_matrix(y_true=ground_truth, y_pred=predicted_classes, sample_weight=class_weights)
logging.info('\n' + classification_report(y_true=ground_truth, y_pred=predicted_classes, target_names=target_names, sample_weight=class_weights))
logging.info('Accuracy: {}'.format(accuracy_score(y_true=ground_truth, y_pred=predicted_classes, sample_weight=class_weights)))
plt.figure()
plot_confusion_matrix(cm, classes=target_names, normalize=False,
title='Confusion matrix, without normalization', path_to_save=path_to_save, filename=f_name_suffix + '_confusion_matrix.png')
plt.figure()
plot_confusion_matrix(cm, classes=target_names, normalize=True,
title='Normalized confusion matrix', path_to_save=path_to_save,filename = f_name_suffix + '_confusion_matrix_normalized.png')
def plot_history(history, config):
path_to_save = './data/reports/'
path_to_save = os.path.join(path_to_save, config.get('experiment_name', 'unnamed'))
filename = 'history_plot.png'
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
save_plot(plt.gcf(), path_to_save, filename)
#plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
save_plot(plt.gcf(), path_to_save, filename)
#plt.show() | true |
0b1fbb14a3a20ac25445a5b8e9ba899193546eec | Python | MeghaJK/python-programs | /Nprime.py | UTF-8 | 216 | 3.5625 | 4 | [] | no_license | #to find first N prime numbers
num=int(input("Enter the value of num:"))
for a in range(2,num+1):
k=0
for i in range(2,a//2+1):
if(a%i==0):
k=k+1
if(k==0):
print(a)
| true |
6f42fd6dc2f4bdcdf3b1779d1cd7daa14b195099 | Python | nuts3745/atcoder | /abc194/b.py | UTF-8 | 990 | 2.75 | 3 | [] | no_license | #!usr/bin/env python3
import bisect
import math
import sys
from collections import defaultdict, deque
from heapq import heappop, heappush
from itertools import permutations
def LI(): return [int(x) for x in sys.stdin.readline().split()]
def I(): return int(sys.stdin.readline())
def LS(): return [list(x) for x in sys.stdin.readline().split()]
def S():
res = list(sys.stdin.readline())
if res[-1] == "\n":
return res[:-1]
return res
def IR(n: int):
return [I() for _ in range(n)]
def LIR(n: int):
return [LI() for _ in range(n)]
def SR(n: int):
return [S() for _ in range(n)]
def LSR(n: int):
return [LS() for _ in range(n)]
sys.setrecursionlimit(1000000)
mod = 1000000007
def solve():
n = I()
li = LIR(n)
li.sort()
a, b = mod, mod
for i in range(1, n):
a = min(li[i][0], a)
b = min((li[i][1]), b)
print(min(min(a, b), li[0][0]+li[0][1]))
return
if __name__ == "__main__":
solve()
| true |
c88185771357af0deb07abe389f86d46581eab5e | Python | thijs781/wop2 | /snelheid.py | UTF-8 | 1,806 | 2.84375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
u_veer0 = 0.3
k_veer = 200
massa_car = 1.5
overbrenging = 1 / 24
mu_dynamisch = 0.45
x_rempunt = 6.1
rho = 1.2
oppervlak_car = 0.140
cw_car = 0.9
cr = 0.03
g = 9.81
hoek = 3.63 * np.pi / 180
x0 = 0
v0 = 0
def f_aandrijving(X):
u_veer = u_veer0 - X * overbrenging
if u_veer > 0:
f_veer = u_veer * k_veer
f_aandrijf = f_veer * overbrenging
return f_aandrijf
else:
return 0
# def f_rem(X,V):
# if X > x_rempunt and V > 0:
# return massa_car*mu_dynamisch*g #alle wielen blokkeren
# else:
# return 0
def f_wrijving(X, V):
f_luchtvrijving = 0.5 * rho * oppervlak_car *cw_car* V ** 2
if V > 0 and X < x_rempunt:
f_lagers = cr * massa_car * g
else:
f_lagers = 0
return f_luchtvrijving + f_lagers
def versnelling(X, V):
som_krachten = f_aandrijving(X) - Fz_X(X) - f_wrijving(X, V)
return som_krachten / massa_car
def Fz_X(X):
if X > 1.22 and X < 6.1:
return massa_car * g * np.sin(hoek)
else:
return 0
def integreer(tijdspan):
vf = 0
a = np.zeros(len(tijdspan))
v = np.zeros(len(tijdspan))
x = np.zeros(len(tijdspan))
x[0] = x0
v[0] = v0
delta_t = tijdspan[1] - tijdspan[0]
for n in range(len(tijdspan)):
a[n] = versnelling(x[n], v[n])
if n < len(tijdspan) - 1:
v[n + 1] = v[n] + delta_t * a[n]
x[n + 1] = x[n] + delta_t * v[n]
if x[n] > 6.1:
vf = v[n]
break
return a, v, x, vf
tijd = np.linspace(0, 10, 101)
a, v, x, vf = integreer(tijd)
plt.plot(tijd, x, label='positie')
plt.plot(tijd, v, label='snelheid')
plt.plot(tijd, a, label='versnelling')
plt.xlabel('tijd [s]')
plt.legend()
plt.grid()
plt.show()
| true |
813cce181940c90ca63420a9a97c8ad1aa2e1ca5 | Python | nishi951/pyrednet | /.~pyrednet.py | UTF-8 | 24,303 | 2.734375 | 3 | [] | no_license |
import torch
import torch.cuda
import torch.optim as optim
# import torch.cuda as torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import pickle as pkl
import os
import numpy as np
cuda = True
def weights_init(m):
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform(m.weight)
nn.init.constant(m.bias, 0)
class Prediction(nn.Module):
"""Prediction (Ahat) module
Equals:
SatLU(ReLU(Conv(input))) if layer = 0
ReLU(Conv(input)) if layer > 0
"""
def __init__(self, layer, inputChannels, outputChannels, filterSize,
pixel_max=1.):
super(Prediction, self).__init__()
self.layer = layer
self.pixel_max = pixel_max
self.conv = nn.Conv2d(inputChannels, outputChannels, filterSize, padding=(filterSize-1)//2)
weights_init(self.conv)
self.relu = nn.ReLU()
def forward(self, x_in):
# print("layer: {} prediction_in: {}".format(self.layer, x_in.shape))
x = self.conv(x_in)
x = self.relu(x)
if self.layer == 0:
x = torch.clamp(x, max=self.pixel_max)
# print("layer: {}\n\tprediction_in: {}\n\tprediction_out: {}".format(self.layer, x_in.shape, x.shape))
return x
#pred = Prediction(1, 3, 6, 3)
#weights_init(pred.conv)
#print(pred.conv.bias)
class Target(nn.Module):
"""Target (A) module
Equals:
input (x) if layer = 0
MaxPool(ReLU(Conv(input))) if layer > 0
"""
def __init__(self, layer, inputChannels, outputChannels, filterSize):
super(Target, self).__init__()
self.layer = layer
self.conv = nn.Conv2d(inputChannels, outputChannels, filterSize, padding=(filterSize-1)//2)
self.maxpool = nn.MaxPool2d(2)
self.relu = nn.ReLU()
weights_init(self.conv)
def forward(self, x_in):
x = self.conv(x_in)
x = self.relu(x)
x_out = self.maxpool(x)
# print("layer: {}\n\ttarget_in: {}\n\ttarget_out: {}".format(self.layer, x_in.shape, x.shape))
return x_out
class Error(nn.Module):
"""Error (E) module
Input
-----
Images Ahat and A to be compared. Must have same number of channels.
Output
------
[ReLU(Ahat - A); ReLU(A - Ahat)]
where concatenation is performed along the channel (feature) dimension
aka the 0th dimension if A is of dimension.
"""
def __init__(self, layer):
super(Error, self).__init__()
self.layer = layer
self.relu = nn.ReLU()
def forward(self, prediction, target):
# print(target.shape)
# print(prediction.shape)
d1 = self.relu(target - prediction)
d2 = self.relu(prediction - target)
x = torch.cat((d1, d2), -3)
# print("layer: {}\n\terror_in (x2): {}\n\terror_out: {}".format(self.layer, prediction.shape, x.shape))
return x
# Representation Module Utility
class HardSigmoid(nn.Module):
"""
Re-implementation of HardSigmoid from Theano. (Rolfo, Nishimura 2017)
Constrained to be 0 when x <= min_val and
1 when x >= max_val,
and to be linear in between the two ranges.
"""
def __init__(self, min_val, max_val):
super(HardSigmoid, self).__init__()
self.min_val = min_val
self.max_val = max_val
self.hardtanh = nn.Hardtanh(min_val, max_val)
def forward(self, x_in):
x = self.hardtanh(x_in)
x = (x - self.min_val)/(self.max_val - self.min_val)
return x
# m = (nn.Hardtanh(-2.5,2.5) + torch.Tensor(2.5))/5
# m = HardSigmoid(-2.5, 2.5)
# x = torch.autograd.Variable(torch.randn(2))
# print(x)
# print(m(x))
class Representation(nn.Module):
"""Representation (R) module
A ConvLSTM (https://arxiv.org/pdf/1506.04214.pdf) unit.
Actually, it's implemented differently in the prednet paper,
so we decided to mimic that implementation as much as possible.
"""
def __init__(self, layer, numLayers,
R_stack_sizes, A_stack_sizes,
kernel_size,
c_width_height=None,
peephole=False, hardsigmoid_min=-2.5, hardsigmoid_max=2.5):
# keras: Conv2D(filters aka out_channels, kernel_size, strides=(1, 1), padding='valid', data_format=None
# KERAS: Conv2D(self.R_stack_sizes[l], self.R_filt_sizes[l], padding='same', activation=act, data_format=self.data_format))
# torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True)
# in_channels -> Lotter. should be self.R_stack_sizes[l] + e_stack_sizes[l] + if(not last) R_stack_sizes[l+1]
# out_channels -> Lotter self.R_stack_sizes[l],
# kernel_size -> Lotter self.R_filt_sizes[l]. all 3s
super(Representation, self).__init__()
self.layer = layer
self.numLayers = numLayers
self.peephole = peephole
### Keep track of sizes of inputs ###
inputChannels = {}
self.E_stack_sizes = tuple(2*stack_size for stack_size in A_stack_sizes)
self.C_stack_size = R_stack_sizes[layer] # the cell size for this layer
if (layer == numLayers-1):
for gate in ['i', 'f', 'o', 'c']:
inputChannels[gate] = R_stack_sizes[layer] + self.E_stack_sizes[layer]
else:
for gate in ['i', 'f', 'o', 'c']:
inputChannels[gate] = R_stack_sizes[layer] + self.E_stack_sizes[layer] + R_stack_sizes[layer+1]
### END ###
if peephole:
# self.Wc = {} # Parameters for hadamard (elementwise) products
# for gate in ['i', 'f', 'o']:
# self.Wc[gate] = torch.Parameter(torch.FloatTensor(inputChannels['c'], c_width_height[0], c_width_height[1]))
self.Wc_i = torch.Parameter(torch.FloatTensor(inputChannels['c'], c_width_height[0], c_width_height[1]))
self.Wc_f = torch.Parameter(torch.FloatTensor(inputChannels['c'], c_width_height[0], c_width_height[1]))
self.Wc_o = torch.Parameter(torch.FloatTensor(inputChannels['c'], c_width_height[0], c_width_height[1]))
# self.conv = {}
# self.act = {}
outputChannels = R_stack_sizes[layer]
# for gate in ['i', 'f', 'o', 'c']:
# print((kernel_size-1)/2)
# self.conv[gate] = nn.Conv2d(inputChannels[gate], outputChannels, kernel_size, padding=(kernel_size-1)//2)
self.conv_i = nn.Conv2d(inputChannels[gate], outputChannels, kernel_size, padding=(kernel_size-1)//2)
self.conv_f = nn.Conv2d(inputChannels[gate], outputChannels, kernel_size, padding=(kernel_size-1)//2)
self.conv_o = nn.Conv2d(inputChannels[gate], outputChannels, kernel_size, padding=(kernel_size-1)//2)
self.conv_c = nn.Conv2d(inputChannels[gate], outputChannels, kernel_size, padding=(kernel_size-1)//2)
weights_init(self.conv_i)
weights_init(self.conv_f)
weights_init(self.conv_o)
weights_init(self.conv_c)
# self.act['i'] = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
# self.act['f'] = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
# self.act['o'] = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
# self.act['c'] = nn.Tanh()
# self.act['h'] = nn.Tanh()
self.act_i = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
self.act_f = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
self.act_o = HardSigmoid(hardsigmoid_min, hardsigmoid_max)
self.act_c = nn.Tanh()
self.act_h = nn.Tanh()
# Upsampling
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
def forward(self, e_prev, r_prev, c_prev, r_above):
"""
e_prev: the error input at (l, t-1)
r_prev: the input to ahat of the representation cell at (l, t-1)
c_prev: the cell (internal) of the representation cell at (l, t-1)
r_above: the input to ahat of the rep.cell at (l+1, t) -- to ignore if l = L
# Lotter implementation, which is bastardized CLSTM
"""
stacked_inputs = torch.cat((r_prev, e_prev), -3)
if (self.layer < self.numLayers-1):
r_above_up = self.upsample(r_above)
stacked_inputs = torch.cat((stacked_inputs, r_above_up), -3)
# Calculate hidden cell update:
# First get gate updates
# gates = {}
# for gate in ['i', 'f', 'o']:
# gates[gate] = self.conv[gate](stacked_inputs)
# if self.peephole:
# gates['f'] += torch.mul(self.Wc['f'], c_prev)
# gates['i'] += torch.mul(self.Wc['i'], c_prev)
# Compute gates
i = self.conv_i(stacked_inputs)
f = self.conv_f(stacked_inputs)
o = self.conv_o(stacked_inputs)
if self.peephole:
f = f + self.Wc_f * c_prev
i = i + self.Wc_i * c_prev
i = self.act_i(i)
f = self.act_f(f)
# Update hidden cell
# print('gates f', gates['f'].shape)
# print('gates i', gates['i'].shape)
# print('gates o', gates['o'].shape)
# print('stacked inputs', stacked_inputs.shape)
# print('cprev', c_prev.shape)
# print(c_prev.shape)
# print(stacked_inputs.shape)
# print(gates['f'].shape)
# print("layer: {}\n\tr_in: {}\n\tr_out: {} \
# \n\tc_in: {}\n\tc_out: {}\n\terror".format(self.layer,
# r_prev.shape, r.shape,
# c_prev.shape, c.shape,
# e_prev.shape))
# c = gates['f'] * c_prev + gates['i'] * \
# self.act['c'](self.conv['c'](stacked_inputs))
# Update hidden cell
c = f * c_prev + i * self.act_c(self.conv_c(stacked_inputs))
# o gate uses current cell, not previous cell
if self.peephole:
o = o + self.Wc_o * c
o = self.act_o(o)
# r = torch.mul(gates['o'], self.act['c'](c))
r = o * self.act_h(c)
# print(r.shape)
# print("layer: {}\n\tr_in: {}\n\tr_out: {} \
# \n\tc_in: {}\n\tc_out: {}\n\terror".format(self.layer,
# r_prev.shape, r.shape,
# c_prev.shape, c.shape,
# e_prev.shape))
return r, c
class PredNet(nn.Module):
"""Full stack of layers in the prednet architecture
|targets|, |errors|, |predictions|, and |representations|
are all lists of nn.Modules as defined in their respective class
definitions.
If the prednet has L prednet layers, then
targets has length L-1
errors, predictions, and representations have length L
"""
def __init__(self, targets, errors,
predictions, representations,
numLayers, R_stack_sizes, stack_sizes, heights, widths):
super(PredNet, self).__init__()
self.targets = nn.ModuleList(targets)
self.errors = nn.ModuleList(errors)
self.predictions = nn.ModuleList(predictions)
self.representations = nn.ModuleList(representations)
assert targets[0] is None # First target layer is just the input
self.numLayers = numLayers
self.R_stack_sizes = R_stack_sizes
self.stack_sizes = stack_sizes
self.heights = heights
self.widths = widths
def forward(self, x_in, r_prev, c_prev, e_prev):
"""
Arguments:
----------
|r_prev| and |c_prev| are lists of previous values of
the outputs r and hidden cells c of the representation cells
|e_prev| is the list of errors for the last iteration
|x_in| is the next minibatch of inputs, of size
(num_examples, num_channels, width, height)
"""
r = [None for _ in range(self.numLayers)]
c = [None for _ in range(self.numLayers)]
e = [None for _ in range(self.numLayers)]
# First, update all the representation layers:
# Do the top layer first
last = self.numLayers - 1
r[last], c[last] = \
self.representations[last](e_prev[last], r_prev[last],
c_prev[last], None)
for layer in range(last-1, -1, -1):
r[layer], c[layer] = self.representations[layer](e_prev[layer],
r_prev[layer],
c_prev[layer],
r_prev[layer+1])
# Bottom layer gets input instead of a target cell
prediction = self.predictions[0](r[0])
e[0] = self.errors[0](prediction, x_in)
# layer 1 through layer numLayers-1
for layer in range(1, self.numLayers):
target = self.targets[layer](e[layer-1])
prediction = self.predictions[layer](r[layer])
e[layer] = self.errors[layer](prediction, target)
return r, c, e
def init_representations(self):
"""
Return the initial states of the representations, the cells, and the errors.
"""
R_init = [Variable(torch.zeros(self.R_stack_sizes[layer], self.heights[layer], self.widths[layer])).cuda()
for layer in range(len(self.R_stack_sizes))]
E_init = [Variable(torch.zeros(2*self.stack_sizes[layer], self.heights[layer], self.widths[layer])).cuda()
for layer in range(len(self.stack_sizes))]
C_init = [Variable(torch.zeros(self.R_stack_sizes[layer], self.heights[layer], self.widths[layer])).cuda()
for layer in range(len(self.stack_sizes))]
return R_init, C_init, E_init
# convlayer = nn.Conv2d(3, 6, 3)
# data = Variable(torch.randn(4, 3, 5, 5))
# data2 = Variable(torch.randn(4, 3, 5, 5))
# out = torch.cat((data, data2), -3)
# print(out.shape)
# out2 = out.expand(4, -1, -1, -1, -1)
# print(out2.shape)
### Loss Function ###
def PredNetLoss(Errors, layer_weights, time_weights, batch_size):
"""
Computes the weighted L1 Loss over time and over all the layers
Parameters
----------
Errors - list of lists of error tensors E, where
Errors[i][j] is the error tensor of the ith time step
at the jth prednet layer.
time_weights - weights that govern how much each time step contributes
to the overall loss.
layer_weights - lambdas that govern how much each prednet layer error
contributes to the overall loss
"""
overallLoss = Variable(torch.zeros(1)).cuda()
# overallLoss = Variable(torch.cuda.zeros(1))
for i, t_weight in enumerate(time_weights):
timeLoss = Variable(torch.zeros(1)).cuda()
# timeLoss = Variable(torch.cuda.zeros(1))
for j, l_weight in enumerate(layer_weights):
E = Errors[i][j]
timeLoss = timeLoss + l_weight*torch.mean(E)
# print(type(t_weight))
# print(type(timeLoss))
# print(type(overallLoss))
overallLoss = overallLoss + t_weight * timeLoss
overallLoss = overallLoss/batch_size
return overallLoss
### Training procedure ###
def train(train_data, num_epochs, epoch_size, batch_size, optimizer,
prednet, loss_weights):
"""
Parameters
----------
train_data - Iterator that produces input tensors of size
batch_size x time x channels x width x height
suitable for input into the network.
num_epochs - number of passes over the training data
optimizer - torch.optim object
prednet - model, for forward and backward calls
loss_weights - tuple of (layer_loss_weights, time_loss_weights) - parameters
for computing the loss
"""
# Initialize the optimizer.
optimizer.zero_grad()
losses = []
print("Training...")
# Iterate through the time dimension
for epoch in range(num_epochs):
print("Epoch {}".format(epoch))
epochLoss = []
for it, data in enumerate(train_data):
if it == epoch_size: # Don't pass over entire training set.
break
data = Variable(data)
batch_size, timesteps, channels, width, height = data.shape
r, c, e = prednet.init_representations()
r = [rep.expand(batch_size, -1, -1, -1) for rep in r]
c = [cell.expand(batch_size, -1, -1, -1) for cell in c]
e = [err.expand(batch_size, -1, -1, -1) for err in e]
errorCells = []
for t in range(timesteps):
r, c, e = prednet(data[:,t,:,:,:], r, c, e)
errorCells.append(e)
# Compute the loss (custom):
loss = PredNetLoss(errorCells, layer_loss_weights, time_loss_weights, batch_size)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
optimizer.step()
# Save loss somehow
epochLoss.append(loss.data[0])
print("\tIteration: {} loss: {}".format(it, loss.data[0]))
losses.append(epochLoss)
return losses
save_model = True # if weights will be saved
# weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5') # where weights will be saved
# json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
# Data files
# train_file = os.path.join(DATA_DIR, 'X_train.hkl')
# train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
# val_file = os.path.join(DATA_DIR, 'X_val.hkl')
# val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')
# Model parameters
numLayers = 4
pixel_max = 1.0
nt = 10 # Number of frames in each training example video
n_channels, im_height, im_width = (3, 128, 160)
widths = [im_width//(2**layer) for layer in range(numLayers)]
heights = [im_height//(2**layer) for layer in range(numLayers)]
# c_width_height = Only necessary if peephole=true
input_shape = (n_channels, im_height, im_width) #if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
stack_sizes = (n_channels, 48, 96, 192)
E_stack_sizes = tuple(2*stack_size for stack_size in stack_sizes)
R_stack_sizes = stack_sizes
A_filt_sizes = (3, 3, 3)
Ahat_filt_sizes = (3, 3, 3, 3)
R_filt_sizes = (3, 3, 3, 3)
# layer_loss_weights = Variable(torch.FloatTensor(np.array([1., 0., 0., 0.])),
# requires_grad=False)# weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
layer_loss_weights = Variable(torch.cuda.FloatTensor(np.array([1., 0., 0., 0.])),
requires_grad=False)# weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
#layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
nt = 10 # number of timesteps used for sequences in training
# time_loss_weights = Variable(torch.FloatTensor(1./ (nt - 1) * np.ones((nt,1))),
# requires_grad=False) # equally weight all timesteps except the first
time_loss_weights = Variable(torch.cuda.FloatTensor(1./ (nt - 1) * np.ones((nt,1))),
requires_grad=False) # equally weight all timesteps except the first
time_loss_weights[0] = 0
loss_weights = (layer_loss_weights, time_loss_weights)
### Initialize the network ###
targets = [None for _ in range(numLayers)]
predictions = [None for _ in range(numLayers)]
representations = [None for _ in range(numLayers)]
errors = [None for _ in range(numLayers)]
for layer in range(numLayers):
predictions[layer] = Prediction(layer,
R_stack_sizes[layer],
stack_sizes[layer],
Ahat_filt_sizes[layer])
# print(R_filt_sizes[layer])
representations[layer] = Representation(layer,
numLayers,
R_stack_sizes,
stack_sizes,
R_filt_sizes[layer]
)
errors[layer] = Error(layer)
if layer > 0:
targets[layer] = Target(layer,
2*stack_sizes[layer-1],
stack_sizes[layer],
A_filt_sizes[layer-1]
)
prednet = PredNet(targets, errors, predictions, representations,
numLayers, R_stack_sizes, stack_sizes, heights, widths)
prednet.cuda() # Comment out for cpu
### Load the dataset ###
# from Lotter: train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
DATA_DIR = './kitti_data/'
#train_file = os.path.join(DATA_DIR, 'X_train.hkl')
#train_sources = os.path.join(DATA_DIR, 'sources_train.p')
train_file = './kitti_data/X_train.p'
train_source = './kitti_data/sources_train.p'
val_file = './kitti_data/X_val.p'
val_source = './kitti_data/sources_val.p'
nt = 10 # number of timesteps used for sequences in training
class KittiDataset(Dataset):
def __init__(self, data_file, source_file, nt, transform=None, output_mode='error', shuffle=False):
"""
Args:
data_file (string): Path to the hickle file with dimensions (n_imgs, height, width, num channels)
for train_file: (41396, 128, 160, 3)
source_file (string): hickle file of list with all the images, with length n_imgs
transform (callable, optional): Optional transform to be applied
on a sample.
nt: number of timesteps for sequences in training
# do we need to consider channels first/last?
"""
# self.X = torch.FloatTensor(pkl.load(open(data_file, 'rb')).astype(np.float32)/255)
self.X = torch.cuda.FloatTensor(pkl.load(open(data_file, 'rb')).astype(np.float32)/255)
self.sources = pkl.load(open(source_file, 'rb'))
self.nt = nt
self.possible_starts = np.array([i for i in range(self.X.shape[0] - self.nt) if self.sources[i] == self.sources[i + self.nt - 1]])
if shuffle:
self.possible_starts = np.random.permutation(self.possible_starts)
print(len(self.possible_starts))
def __len__(self):
return len(self.possible_starts)
# DEFINE AN EXAMPLE AS NT TIMESTEPS OF A SEQUENCE
def __getitem__(self, idx):
data = self.X[idx:idx+nt,:,:,:]
# want dimensions to be (time, channels, width, height)
data = torch.transpose(data, 1, 3)
data = torch.transpose(data, 2, 3)
return data
#
# dataset = KittiDataset(train_file_pkl, train_source_pkl, nt)
dataset = KittiDataset(val_file, val_source, nt)
### Train the network ###
# Replace this with something like training with adam
# Training parameters
num_epochs = 100
batch_size = 4
epoch_size = 500
N_seq_val = 100 # number of sequences to use for validation
learning_rate = 1e-3
print('lr', learning_rate)
optimizer = optim.Adam(prednet.parameters(), lr=learning_rate)
loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# pin_memory=True
losses = train(loader, num_epochs, epoch_size, batch_size, optimizer,
prednet, loss_weights)
import matplotlib.pyplot as plt
flat_losses = [item for sublist in losses for item in sublist]
plt.plot(np.log(flat_losses))
plt.title("Training loss")
plt.xlabel("iteration")
plt.ylabel("log(Loss)")
plt.show()
| true |
4126eedc32763cf2d0ae47238f57926f33608511 | Python | bitsofgit/DeepLearningKeras | /Keras/FashionMnist.py | UTF-8 | 5,378 | 3.09375 | 3 | [] | no_license | # fashion mnist db
from __future__ import print_function
import keras
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
# suppresses level 1 and 0 warning messages
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"]="2"
# number of class
# 0 T-shirt/top, 1 Trouser, 2 Pullover, 3 Dress, 4 Coat, 5 Sandal, 6 Shirt, 7 Sneaker, 8 Bag, 9 Ankle boot,
num_classes = 10
# sizes of batch and # of epochs of data
batch_size = 128 # number of samples per gradient update.
epochs = 24 # number of iterations to train the data
# input image dimensions
img_rows, img_cols = 28, 28 # image is 28 x 28
# the data
# x_train has 60K images of 28 x 28. Each cell containing 0-255 greyscale number. 8 bit greyscale can have 0-255.
# y_train has 60K labels. Ex 9 which means Ankle boot
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# Deal with format issues with different backends (Tensorflow, Theano, CNTK etc)
# channels for images are generally either 3 for RGB and 1 for gray scale
# below number 1 denotes that its gray scale
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else: # Tensor flow uses 'channels_last' so will fall in this else block
# converts shape from (60000, 28, 28) 3D to (60000, 28, 28, 1) 4D meaning every single value goes in an array of its own
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols,1)
# converts shape from (10000, 28, 28) to (10000, 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols,1)
#input_shape becomes (28,28,1)
input_shape = (img_rows, img_cols, 1)
# Type convert and scale the test and training data
# Every value is converted to float and then divided by 255. Earlier the values were between 0 and 255 so after division
# everything becomes between 0 and 1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices. One-hot encoding
# so label 3 will become => 0 0 0 1 0 0 0 0 0 0 and 1 => 0 1 0 0 0 0 0 0 0 0
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# Define the model
# 2D because image is 2D
model = Sequential()
# First layer is Conv2D layer
# 32 is number of filters. Each filter is a 3x3 matrix denoted by kernel_size
# input_shape has to be told because this is the first layer
# if activation is provided, it is applied in the end
# so after this layer is done, we will have a 26X26 matrix for the image
model.add(Conv2D(32, kernel_size=(3,3), activation='relu', input_shape=input_shape))
# Commenting out this pool layer because some studies have suggested that early pooling helps in
# increasing accuracy
# model.add(MaxPooling2D(pool_size=(2,2)))
# Second Conv2D layer with 64 filters and each filter of 3x3 matrix and relu activation
# output shape will now be 24x24
model.add(Conv2D(64, (3,3), activation='relu'))
# Pooling layer
# Does MaxPool with pool matrix of 2x2.
# Strides are 2 so every alternate 2x2 matrix is selected and max value is picked
# output matrix will be 12x12
model.add(MaxPooling2D(pool_size=(2,2), strides=2))
# Flatten Layer
# flattens the whole input
model.add(Flatten())
# Dense layer for classification
# Dense layer performs the operation output = activation(dot(input, kernel) + bias)
# activation is relu
# kernel is a weight matrix created by the layer
# bias is a bias vector created by the layer if use_bias = true
# 128 is the dimensionality of the output shape
model.add(Dense(128, activation='relu'))
# Dropout layer
# 0.5 is the rate that means that 0.5 of the input units will be dropped
# rate is between 0 and 1
# this is to avoid overfitting of data
# overfitting means a model that learns the training data too well
model.add(Dropout(0.5))
# Another Dense layer whose output shape dimension is 10
# softmax is a math function that is generally used in the final classification layer
# it basically finds the max value
model.add(Dense(num_classes, activation='softmax'))
model.summary()
# compile
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])
# train the data
# batch_size = number of samples per gradient update. Default is 32.
# verbose 0 silent, 1 progress bar, 2 one line per epoch
# validation_data is used for evaluation of loss at the end of each epoch. This data is not used for training.
hist = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
# Evaluate
score = model.evaluate(x_test, y_test, verbose = 2)
print('Test loss: ', score[0])
print('Test accuracy:', score[1])
# Plot data
import numpy as np
import matplotlib.pyplot as plt
epoch_list = list(range(1, len(hist.history['acc'])+1)) #values for x axis
plt.plot(epoch_list, hist.history['acc'], epoch_list, hist.history['val_acc'])
# plt.legend('Training Accuracy', 'Validation Accuracy')
plt.show()
# Save the model
model.save('FashionMnist.model') | true |
b547b9713800e5cb0eb2404f74305baa6d8dd1be | Python | adarshtri/personalautomation | /foldermanager/config/managers/configmanager.py | UTF-8 | 3,950 | 3.03125 | 3 | [] | no_license | import ast
from foldermanager.exceptions import foldermanagerexceptions
from abc import ABC, abstractmethod
class ConfigurationManager(object):
def __init__(self, configuration_file):
self._configuration = None
self._configuration_file = configuration_file
self._read_configuration_file()
self._parse_configuration_for_correctness()
def _read_configuration_file(self):
"""
:return: dictionary of configuration present in the configuraton file provided
"""
conf_file_pointer = None
# opening configuration file pointer, raise exception if file not exists
try:
conf_file_pointer = open(self._configuration_file, 'r')
except FileNotFoundError as fe:
raise foldermanagerexceptions.ConfigurationFileNotFoundException(
message="Could not find the configuration file {}.".format(self._configuration_file),
errors=None)
configuration_string = conf_file_pointer.read()
configuration_dictionary = None
# converting configuration string to dictionary
try:
configuration_dictionary = ast.literal_eval(node_or_string=configuration_string)
except SyntaxError as se:
raise foldermanagerexceptions.InvalidConfigurationFileException(
message="Invalid configuration. Check if the file is in proper json format.",
errors=None
)
self._configuration = configuration_dictionary
def _parse_configuration_for_correctness(self):
for utility_type in self._configuration:
if utility_type not in ConfigurationConstants.VALID_FOLDER_MANAGER_UTILITIES:
raise foldermanagerexceptions.InvalidUtilityConfigurationType(
"Utility type \"{}\" is not supported.".format(utility_type),
None)
else:
parser = ConfigurationConstants.VALID_FOLDER_MANAGER_UTILITIES[utility_type](
self._configuration[utility_type])
try:
parser.parse()
except foldermanagerexceptions.ConfigurationFileParseException as cfpe:
self._configuration = None
raise foldermanagerexceptions.ConfigurationFileParseException(message=cfpe.message, errors=cfpe.message)
def get_configuration(self):
return self._configuration
class ConfigurationFileParser(ABC):
def __init__(self, configuration):
self._configuration = configuration
@abstractmethod
def parse(self) -> bool:
pass
class KeepItCleanConfigurationFileParser(ConfigurationFileParser):
def __init__(self, configuration):
super().__init__(configuration=configuration)
def parse(self) -> bool:
"""
:return: Boolean, True if the configuration passed in matched the specified structured else False
"""
configuration = self._configuration
for key in configuration:
if not isinstance(configuration[key], list):
raise foldermanagerexceptions.ConfigurationFileParseException(
message="Invalid configuration file json format. Kindly visit the documentation for more details.",
errors=None)
for each_conf in configuration[key]:
if "src" not in each_conf or "dest" not in each_conf:
raise foldermanagerexceptions.ConfigurationFileParseException(
message="Missing parameter {} in one of the configurations.".format("src/dest"),
errors=None
)
return True
class ConfigurationConstants:
# to implement strategy pattern
VALID_FOLDER_MANAGER_UTILITIES = {
"keepitclean": KeepItCleanConfigurationFileParser
}
KEEPITCLEAN_CONFIGURATION = "keepitclean"
| true |
36b94c2bfd334fa7d613e7b36f35cd361e30fe90 | Python | iomega/special-substructure-search | /Code/Utilities/Parsers/hmdb_parser.py | UTF-8 | 4,537 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 14 10:31:25 2018
Parses all data from original Human Metabolome database (hmdb) CLASS database
and writes a new uniform hmdb CLASS database and returns and writes a
dictionary with all data.
Command line: python3 hmdb_parser.py HMDBCLASStry.txt
Command line: python3 hmdb_parser.py HMDBCLASS.txt
@author: stokm006
"""
from sys import argv
def parse_file(input_file):
""" takes all text from hmdb database file and returns a list of lists with
NPs which is easy in use
input_file: hmdb database txt file
"""
all_lines = input_file.split('\n')
all_info_list = []
for line in all_lines:
line = line.split('\t')
info_per_row_list = []
for value in line:
my_string = ""
value = value.strip('\'"')
if len(value) == 0:
value = "NA"
my_string += value
info_per_row_list += [my_string]
all_info_list += [info_per_row_list]
return all_info_list
def write_CLASS_txtfile(input_file_name, data):
""" takes all text from hmdb list and writes an 'uniform' hmdb CLASS
database.
input_file_name: name of txt file that will be created
data: hmdb list created with parse_file()
"""
output_file = open(input_file_name, 'w')
output_file.write('Human Metabolome CLASS database')
output_file.write('\n\n')
for line in data:
output_file.write(str(line) +'\n')
def make_dict(data_for_dict):
""" takes all text from hmdb list and makes a dictionary.
data_for_dict: hmdb list created with parse_file()
"""
column_name_list = data_for_dict[0]
db_list = data_for_dict[1:]
column_list1 = []
column_list2 = []
column_list3 = []
column_list4 = []
column_list5 = []
column_list6 = []
column_list7 = []
column_list8 = []
column_list9 = []
column_list10 = []
column_list11 = []
hmdb_dict = {}
for line in db_list:
my_string1 = ''
my_string2 = ''
my_string3 = ''
my_string4 = ''
my_string5 = ''
my_string6 = ''
my_string7 = ''
my_string8 = ''
my_string9 = ''
my_string10 = ''
my_string11 = ''
my_string1 = line[0]
column_list1 += [my_string1]
my_string2 += line[1]
column_list2 += [my_string2]
my_string3 += line[2]
column_list3 += [my_string3]
my_string4 += line[3]
column_list4 += [my_string4]
my_string5 += line[4]
column_list5 += [my_string5]
my_string6 += line[5]
column_list6 += [my_string6]
my_string7 += line[6]
column_list7 += [my_string7]
my_string8 += line[7]
column_list8 += [my_string8]
my_string9 += line[8]
column_list9 += [my_string9]
my_string10 += line[9]
column_list10 += [my_string10]
my_string11 += line[10]
column_list11 += [my_string11]
hmdb_dict[column_name_list[0]] = column_list1
hmdb_dict[column_name_list[1]] = column_list2
hmdb_dict[column_name_list[2]] = column_list3
hmdb_dict[column_name_list[3]] = column_list4
hmdb_dict[column_name_list[4]] = column_list5
hmdb_dict[column_name_list[5]] = column_list6
hmdb_dict[column_name_list[6]] = column_list7
hmdb_dict[column_name_list[7]] = column_list8
hmdb_dict[column_name_list[8]] = column_list9
hmdb_dict[column_name_list[9]] = column_list10
hmdb_dict[column_name_list[10]] = column_list11
return (hmdb_dict)
def write_dict_txtfile(input_file_name, data_dict):
""" takes all text from hmdb dictionary and writes it in a text file.
input_file_name: name of txt file that will be created
data_dict: hmdb dictionary created with make_dict()
"""
output_file = open(input_file_name, 'w')
output_file.write('Human Metabolome database')
output_file.write('\n\n')
for keys, values in data_dict.items():
output_file.write(str(keys)+', '+str(values)+'\n')
if __name__ == "__main__":
with open(argv[1]) as file_object:
input_file = file_object.read()
parsed_data = parse_file(input_file)
write_CLASS_txtfile("hm_CLASS_database", parsed_data)
my_dictionary = make_dict(parsed_data)
write_dict_txtfile("hm_database", my_dictionary)
| true |
0a985cd0ad3f48342c3f4d844af6abc54e13dba8 | Python | MahiletBehailu/Competitive | /comptlabs/lab1/multiplication.py | UTF-8 | 1,044 | 3.265625 | 3 | [] | no_license | def multiplication(a):
anssign=""
num1=""
num2=""
num1sign="+"
num2sign="+"
product=""
d=a.split("*")
num1=d[0]
num2=d[1]
if(num1.startswith("-")):
num1=num1[1:]
num1sign="-"
if(num2.startswith("-")):
num2=num2[1:]
num2sign="-"
if((num1sign=="-" and num2sign=="+") or(num2sign=="-" and num1sign=="+")):
anssign="-"
product=multiply(num1,num2)
if(product!="0"):
product=anssign+product
return product
def multiply(a,b):
asize=len(a)
bsize=len(b)
partial=""
summ=""
product="0"
if asize<bsize:
temp=a
a=b
b=temp
asize=len(a)
bsize=len(b)
i=bsize-1
while i>=0:
j=asize-1
carry=0
summ=""
while j>=0:
c=int(b[i])*int(a[j])+carry
carry = c//10
summ=str (c%10)+summ
j-=1
if(carry!=0):
summ=str(carry)+summ
partial=partial+","+summ
i-=1
if(partial.startswith(",")):
partial=partial[1:]
partarray=partial.split(",")
j=1
for i in range(0,len(partarray)):
product=str(int(partarray[i])*j+int(product))
j*=10
return product
print(multiplication("1234*-4231"))
| true |
d7c1a755c56df046e1de82fda127fb4802e0d9ca | Python | nickdelgrosso/NeuralNetImageAnnotation | /ImageAnnotation.py | UTF-8 | 81,907 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 14 19:25:32 2016
Contains classes and functions that represent (sets of) images
and their annotations
Classes:
1. class Annotation(object):
Class that holds an individual image annotation
2. class AnnotatedImage(object):
Class that hold a multichannel image and its annotations
Images are represented in a [x * y * n_channels] matrix
Annotations are represented as a list of Annotation objects
3. class AnnotatedImageSet(object):
Class that represents a dataset of annotated images and organizes
the dataset for feeding in machine learning algorithms
Functions:
def zoom( image, y, x, zoom_size ):
Crops an image to the area of tuple/list zoom_size around the
supplied y, x coordinates. Pads out of range values.
def morph( image, rotation=0, scale_xy=(1,1), noise_level=0 ):
Morphs image based on supplied parameters
>> To do: recenter morphed image??
def morphed_zoom( image, y, x, zoom_size, pad_value=0,
rotation=0, scale_xy=(1,1), noise_level=0 ):
Crops image or image list to area of zoom_size around centroid
def image2vec( image ):
Concatenates a 2d image or image_list to a single 1d vector
def vec2image( lin_image, n_channels, image_size ):
Constructs an image_list from a single 1d vector
def vec2RGB( lin_image, n_channels, image_size,
channel_order=(0,1,2), amplitude_scaling=(1,1,1) ):
Constructs a 3d RGB image from a single 1d vector
def image_grid_RGB( lin_im_mat, n_x=10, n_y=6, image_size,
channel_order=(0,1,2),
amplitude_scaling=(1.33,1.33,1), line_color=0 ):
Constructs a 3d numpy.ndarray tiled with a grid of RGB images. If
more images are supplied that can be tiled, it chooses and displays
a random subset.
@author: pgoltstein
"""
DEFAULT_ZOOM=(33,33)
########################################################################
### Imports
########################################################################
import numpy as np
from skimage import measure
from skimage.io import imread
from scipy import ndimage
from scipy.io import loadmat,savemat
from os import path
import glob
import matplotlib.pyplot as plt
import seaborn as sns
########################################################################
### Functions
########################################################################
def zoom( image, y, x, zoom_size, normalize=False, pad_value=0 ):
"""Crops a(n) (list of) image(s) to the area of tuple/list zoom_size
around the supplied y, x coordinates. Pads out of range values.
image: Single 2d numpy.ndarray or list of 2d numpy.ndarrays
y, x: Center coordinates
zoom_size: Size of zoomed image (y,x)
normalize: Normalizes to max
pad_value: Value for out of range coordinates
returns zoomed image"""
if isinstance(image,list):
image_list = []
for ch in range(len(image)):
image_list.append( zoom( image[ch], y, x, zoom_size, pad_value ) )
return image_list
else:
ix_y = np.int16( np.round( 1 + y - ((zoom_size[0]+1) / 2) )
+ np.arange( 0, zoom_size[0] ) )
ix_x = np.int16( np.round( 1 + x - ((zoom_size[1]+1) / 2) )
+ np.arange( 0, zoom_size[0] ) )
max_ix_exceed = -1 * np.min( ( np.min(ix_y), np.min(ix_x),
image.shape[0]-np.max(ix_y)-1, image.shape[1]-np.max(ix_x)-1 ) )
if max_ix_exceed > 0:
image_temp = np.zeros((image.shape+max_ix_exceed+1))+pad_value
image_temp[0:image.shape[0],0:image.shape[1]] = image
if normalize:
zoom_im = image_temp[ np.ix_(ix_y,ix_x) ]
zoom_im = zoom_im - zoom_im.min()
return zoom_im / zoom_im.max()
else:
return image_temp[ np.ix_(ix_y,ix_x) ]
else:
if normalize:
zoom_im = image[ np.ix_(ix_y,ix_x) ]
zoom_im = zoom_im - zoom_im.min()
return zoom_im / zoom_im.max()
else:
return image[ np.ix_(ix_y,ix_x) ]
def morph( image, rotation=0, scale_xy=(1,1), noise_level=0 ):
"""Morphs (list of) image(s) based on supplied parameters
image: Single 2d numpy.ndarray or list of 2d numpy.ndarrays
rotation: Rotation of annotation in degrees (0-360 degrees)
scale_xy: Determines fractional scaling on x/y axis.
Min-Max = (0.5,0.5) - (2,2)
noise_level: Standard deviation of random Gaussian noise
returns morped_image"""
if isinstance( image, list ):
image_list = []
for ch in range(len(image)):
image_list.append( morph( image[ch],
rotation, scale_xy, noise_level ) )
return image_list
else:
# Rotate
if rotation != 0:
image = ndimage.interpolation.rotate(image, rotation, reshape=False)
# Scale
if scale_xy[0] != 1 or scale_xy[1] != 1:
image = ndimage.interpolation.zoom( image, scale_xy )
# Add noise
if noise_level:
noise_mask = np.random.normal(size=image.shape) * noise_level
image = image + (image * noise_mask)
return image
def morphed_zoom( image, y, x, zoom_size, pad_value=0, normalize=False,
rotation=0, scale_xy=(1,1), noise_level=0 ):
"""Crops image or image list to area of zoom_size around centroid
image: Single 2d numpy.ndarray or list of 2d numpy.ndarrays
y, x: Center coordinates
zoom_size: (y size, x size)
pad_value: Value for out of range coordinates
normalize: Normalizes to max
rotation: Rotation of annotation in degrees (0-360 degrees)
scale_xy: Determines fractional scaling on x/y axis.
Min-Max = (0.5,0.5) - (2,2)
noise_level: Level of random noise
returns tuple holding (morped_zoom, morped_annotation)"""
im = zoom( image=image, y=y, x=x,
zoom_size=(zoom_size[0]*3,zoom_size[1]*3),
normalize=False, pad_value=pad_value )
im = morph( image=im,
rotation=rotation, scale_xy=scale_xy, noise_level=noise_level )
if isinstance( im, list ):
y_pos, x_pos = (im[0].shape[0]-1)/2, (im[0].shape[1]-1)/2
else:
y_pos, x_pos = (im.shape[0]-1)/2, (im.shape[1]-1)/2
return zoom( im, y=y_pos, x=x_pos, zoom_size=zoom_size,
normalize=normalize, pad_value=pad_value )
def image2vec( image ):
"""Concatenates a 2d image or image_list to a single 1d vector
image: single 2d numpy.ndarray or list of 2d numpy.ndarrays
returns 1d vector with all pixels concatenated"""
image_1d = []
if isinstance( image, list ):
for ch in range(len(image)):
image_1d.append(image[ch].ravel())
else:
image_1d.append(image.ravel())
return np.concatenate( image_1d )
def vec2image( lin_image, n_channels, image_size ):
"""Constructs an image_list from a single 1d vector
lin_image: 1d image vector with all pixels concatenated
n_channels: Number of image channels
image_size: 2 dimensional size of the image (y,x)
returns single or list of 2d numpy.ndarrays"""
if n_channels > 1:
channels = np.split( lin_image, n_channels )
image = []
for ch in range(n_channels):
image.append( np.reshape( channels[ch], image_size ) )
else:
image = np.reshape( lin_image, image_size )
return image
def vec2RGB( lin_image, n_channels, image_size,
channel_order=(0,1,2), amplitude_scaling=(1,1,1) ):
"""Constructs a 3d RGB image from a single 1d vector
lin_image: 1d image vector with all pixels concatenated
n_channels: Number of image channels
image_size: 2 dimensional size of the image (y,x)
channel_order: tuple indicating which channels are R, G and B
amplitude_scaling: Additional scaling of each channel separately
returns 3d numpy.ndarray"""
image = vec2image( lin_image, n_channels, image_size )
RGB = np.zeros((image_size[0],image_size[1],3))
if n_channels > 1:
for nr,ch in enumerate(channel_order):
RGB[:,:,nr] = image[ch]
else:
for ch in range(3):
RGB[:,:,ch] = image
return RGB
def image_grid_RGB( lin_images, n_channels, image_size, annotation_nrs=None,
n_x=10, n_y=6, channel_order=(0,1,2), auto_scale=False,
amplitude_scaling=(1.33,1.33,1),
line_color=0, return_borders=False ):
""" Constructs a 3d numpy.ndarray tiled with a grid of RGB images. If
more images are supplied that can be tiled, it chooses and displays
a random subset.
lin_images: 2d matrix with on each row an image vector with all
pixels concatenated or a list with images
n_channels: Number of image channels
image_size: 2 dimensional size of the image (y,x)
annotation_nrs: List with nr of the to be displayed annotations
n_x: Number of images to show on x axis of grid
n_y: Number of images to show on y axis of grid
channel_order: Tuple indicating which channels are R, G and B
auto_scale: Scale each individual image to its maximum (T/F)
amplitude_scaling: Intensity scaling of each color channel
line_color: Intensity (gray scale) of line between images
return_borders: Returns a matrix of same size marking borders with 1
Returns numpy.ndarray (x,y,RGB)
"""
# Get indices of images to show
if annotation_nrs is None:
annotation_nrs = list(range(lin_images.shape[0]))
n_images = len(annotation_nrs)
if n_images <= n_x*n_y:
im_ix = list(range(n_images))
else:
im_ix = np.random.choice( n_images, n_x*n_y, replace=False )
# Get coordinates of where images will go
y_coords = []
offset = 0
for i in range(n_y):
offset = i * (image_size[0] + 1)
y_coords.append(offset+np.array(range(image_size[0])))
max_y = np.max(y_coords[i]) + 1
x_coords = []
offset = 0
for i in range(n_x):
offset = i * (image_size[1] + 1)
x_coords.append(offset+np.array(range(image_size[1])))
max_x = np.max(x_coords[i]) + 1
rgb_coords = np.array(list(range(3)))
# Fill grid
im_count = 0
center_shift = []
grid = np.zeros((max_y,max_x,3))+line_color
borders = np.zeros((max_y,max_x,3)) + 1
for y in range(n_y):
for x in range(n_x):
if im_count < n_images:
rgb_im = vec2RGB( lin_images[ im_ix[ annotation_nrs[im_count] ], : ],
n_channels=n_channels, image_size=image_size,
channel_order=channel_order,
amplitude_scaling=amplitude_scaling )
if auto_scale:
rgb_im = rgb_im / rgb_im.max()
grid[np.ix_(y_coords[y],x_coords[x],rgb_coords)] = rgb_im
borders[np.ix_(y_coords[y],x_coords[x],rgb_coords)] = 0
center_shift.append( \
( y_coords[y][0] + (0.5*image_size[0]) -0.5,
x_coords[x][0] + (0.5*image_size[0]) -0.5 ) )
else:
break
im_count += 1
if return_borders:
return grid, center_shift, borders
else:
return grid, center_shift
def split_samples( m_samples, n_groups, ratios=None ):
"""Splits the total number of samples into n_groups according to the
relative ratios (compensates for rounding errors)
m_samples: Total number of samples
n_groups: Number of sample groups to return
ratios: List with relative ratio of each group
returns list with sample counts per group"""
if ratios is None:
ratios = n_groups * [ (1/n_groups),]
else:
ratios = np.array(ratios)
ratios = ratios/ratios.sum()
# Calculate minimum number of positive and negative samples and round err
g_samples = []
g_round_ratios = []
for g in range(n_groups):
g_samples.append( np.int16( m_samples * ratios[g] ) )
g_round_ratios.append( (m_samples * ratios[g]) % 1 )
# Find how many samples are still missing
n_missing = m_samples - np.sum(g_samples)
# Assign missing samples by relative remainder fractional chance to groups
if n_missing > 0:
ratio_group_ids = list(range(len(g_round_ratios)))
for s in range(n_missing):
rand_num = np.random.rand(1)
for g in range(len(g_round_ratios)):
if rand_num < np.sum(g_round_ratios[:(g+1)]):
g_samples[ratio_group_ids[g]] += 1
del g_round_ratios[g]
del ratio_group_ids[g]
break
return g_samples
def get_labeled_pixel_coordinates( bin_image, exclude_border=(0,0,0,0) ):
"""Get the x and y pixels coordinates of all labeled pixels in a
binary image, excluding the pixels outside of the border
bin_image: Binary image (numpy array)
exclude_border: exclude annotations that are a certain distance
to each border. Pix from (left, right, up, down)
returns tuple y_pix,x_pix with numpy.array pixel coordinates"""
# Get lists with all pixel coordinates
y_res,x_res = bin_image.shape
(pix_x,pix_y) = np.meshgrid(np.arange(x_res),np.arange(y_res))
# Get lists with coordinates of all labeled pixels
lab_pix_x = pix_x.ravel()[bin_image.ravel() == 1]
lab_pix_y = pix_y.ravel()[bin_image.ravel() == 1]
# Exclude all pixels that are too close to the border
if np.max(exclude_border) > 0:
include_pix = \
np.logical_and( np.logical_and( np.logical_and(
lab_pix_x > exclude_border[0],
lab_pix_x < (x_res-exclude_border[1]) ),
lab_pix_y > exclude_border[2] ),
lab_pix_y < (y_res-exclude_border[3]) )
lab_pix_x = lab_pix_x[ include_pix ]
lab_pix_y = lab_pix_y[ include_pix ]
# Return pixel coordinates
return lab_pix_y,lab_pix_x
########################################################################
### Class Annotation
########################################################################
class Annotation(object):
"""Class that holds an individual image annotation"""
def __init__( self, body_pixels_yx, annotation_name="Neuron",
type_nr=1, group_nr=0):
"""Initialize.
body_pixels_yx: list/tuple of (y,x) coordinates
or a 2d binary image mask
annotation_name: string
type_nr: int
group_nr: int
"""
# Store supplied parameters
if isinstance( body_pixels_yx, list ):
self.body = np.array(np.int16(body_pixels_yx))
elif body_pixels_yx.shape[1] == 2:
self.body = np.array(np.int16(body_pixels_yx))
else:
self.body = np.transpose( \
np.nonzero( np.array( np.int16(body_pixels_yx) ) ) )
self.name = str(annotation_name)
self.type_nr = int(type_nr)
self.group_nr = int(group_nr)
def __str__(self):
return "Annotation at (y={:.1f},x={:.1f}), group={:.0f}, "\
"name={!s}".format(self._y, self._x, self._group_nr, self.name)
@property
def body(self):
"""Returns body coordinates"""
return self._body
@body.setter
def body(self,body_pixels_yx):
"""Sets body coordinates and calculates associated centroids"""
self._body = np.array(body_pixels_yx)
self._y = self._body[:,0].mean()
self._x = self._body[:,1].mean()
temp_mask = np.zeros( self._body.max(axis=0)+3 )
temp_mask[ self._body[:,0]+1, self._body[:,1]+1 ] = 1
self._perimeter = measure.find_contours(temp_mask, 0.5)[0]-1
self._size = self._body.shape[0]
@property
def x(self):
"""Returns read-only centroid x coordinate"""
return self._x
@property
def y(self):
"""Returns read-only centroid y coordinate"""
return self._y
@property
def group_nr(self):
"""Returns read-only group number"""
return self._group_nr
@group_nr.setter
def group_nr(self,group_nr):
"""Sets group number"""
self._group_nr = int(group_nr)
@property
def type_nr(self):
"""Returns read-only type number"""
return self._type_nr
@type_nr.setter
def type_nr(self,type_nr):
"""Sets type number to integer"""
self._type_nr = int(type_nr)
@property
def perimeter(self):
"""Returns read-only stored list of perimeter (y,x) coordinates"""
return self._perimeter
@property
def size(self):
"""Returns read-only size of annotation (number of pixels)"""
return self._size
def zoom(self, image, zoom_size, pad_value=0, normalize=False ):
"""Crops image to area of tuple/list zoom_size around centroid
image: Single 2d numpy.ndarray
zoom_size: (y size, x size)
pad_value: Value for out of range coordinates
normalize: Normalizes to max
returns zoomed image"""
return zoom( image=image, y=self._y, x=self._x, zoom_size=zoom_size,
normalize=normalize, pad_value=pad_value )
def morphed_zoom(self, image, zoom_size, pad_value=0, normalize=False,
rotation=0, scale_xy=(1,1), noise_level=0 ):
"""Crops image to area of tuple/list zoom_size around centroid
image: Single 2d numpy.ndarray
zoom_size: (y size, x size)
pad_value: Value for out of range coordinates
normalize: Normalizes to max
rotation: Rotation of annotation in degrees (0-360 degrees)
scale_xy: Determines fractional scaling on x/y axis.
Min-Max = (0.5,0.5) - (2,2)
noise_level: Level of random noise
returns tuple holding (morped_zoom, morped_annotation)"""
return morphed_zoom( image, self._y, self._x, zoom_size=zoom_size,
pad_value=pad_value, normalize=normalize, rotation=rotation,
scale_xy=scale_xy, noise_level=noise_level )
def mask_body(self, image, dilation_factor=0,
mask_value=1, keep_centroid=True):
"""Draws mask of all body pixels in image
image: Single 2d numpy.ndarray
dilation_factor: >0 for dilation, <0 for erosion
mask_value: Value to place in image
keep_centroid: Prevents mask from disappearing altogether with
negative dilation factors
returns masked image"""
if dilation_factor==0:
# Just mask the incoming image
image[ self._body[:,0], self._body[:,1] ] = mask_value
else:
# Draw mask on temp image, dilate, get pixels, then draw in image
temp_mask = np.zeros_like(image,dtype=bool)
temp_mask[ self._body[:,0],self._body[:,1] ] = True
if dilation_factor>0:
for _ in range(dilation_factor):
temp_mask = ndimage.binary_dilation(temp_mask)
elif dilation_factor<0:
for _ in range(-1*dilation_factor):
temp_mask = ndimage.binary_erosion(temp_mask)
temp_body = np.array(np.where(temp_mask == True)).transpose()
image[ temp_body[:,0], temp_body[:,1] ] = mask_value
if keep_centroid:
image[self._y.astype(int),self._x.astype(int)] = mask_value
def mask_centroid(self, image, dilation_factor=0, mask_value=1):
"""Draws mask of centroid pixel in image
image: Single 2d numpy.ndarray
dilation_factor: >0 for padding the centroid with surrounding points
mask_value: Value to place in image
returns masked image"""
if dilation_factor==0:
# Just mask the incoming image
image[self._y.astype(int),self._x.astype(int)] = mask_value
else:
# Draw mask on temp image, dilate, get pixels, then draw in image
temp_mask = np.zeros_like(image,dtype=bool)
temp_mask[self._y.astype(int),self._x.astype(int)] = True
for _ in range(dilation_factor):
temp_mask = ndimage.binary_dilation(temp_mask)
temp_body = np.array(np.where(temp_mask == True)).transpose()
image[ temp_body[:,0], temp_body[:,1] ] = mask_value
########################################################################
### Class AnnotatedImage
########################################################################
class AnnotatedImage(object):
"""Class that hold a multichannel image and its annotations
Images are represented in a list of [x * y] matrices
Annotations are represented as a list of Annotation objects"""
def __init__( self, image_data=None, annotation_data=None,
exclude_border=None, detected_centroids=None, detected_bodies=None,
labeled_centroids=None, labeled_bodies=None,
include_annotation_typenr=None, downsample=None):
"""Initialize image list and channel list
channel: List or tuple of same size images
annotation: List or tuple of Annotation objects
exclude_border: 4-Tuple containing border exclusion region
(left,right,top,bottom), dictionary, or
file name of mat file holding the parameters
as separate variables
detected_centroids: Binary image with centroids labeled
detected_bodies: Binary image with bodies labeled
labeled_centroids: Image with annotation centroids labeled by number
labeled_bodies: Image with annotation bodies labeled by number
downsample: Downsample to be imported images, borders
and ROI's by a certain factor
"""
self._downsample = downsample
self._bodies = None
self._body_dilation_factor = 0
self._centroids = None
self._centroid_dilation_factor = 0
self._include_annotation_typenrs = None
self._y_res = 0
self._x_res = 0
self._channel = []
self._annotation = []
self._exclude_border = {'left': 0, 'right': 0, 'top': 0, 'bottom': 0}
self._exclude_border_tuple = (0,0,0,0)
if image_data is not None:
self.channel = image_data
if annotation_data is not None:
self.annotation = annotation_data
if exclude_border is not None:
self.exclude_border = exclude_border
self.detected_centroids = detected_centroids
self.detected_bodies = detected_bodies
self.labeled_centroids = labeled_centroids
self.labeled_bodies = labeled_bodies
def __str__(self):
return "AnnotatedImage (#ch={:.0f}, #ann={:.0f}, " \
"brdr={:d},{:d},{:d},{:d})".format( self.n_channels,
self.n_annotations, self.exclude_border['left'],
self.exclude_border['right'], self.exclude_border['top'],
self.exclude_border['bottom'])
# **********************************
# ***** Describing properties *****
@property
def y_res(self):
"""Returns the (read-only) size of the y-dimension of the images"""
return self._y_res
@property
def x_res(self):
"""Returns the (read-only) size of the x-dimension of the images"""
return self._x_res
@property
def im_size(self):
"""Returns the (read-only) size of the image as tuple"""
return (self._y_res,self._x_res)
@property
def n_channels(self):
"""Returns the (read-only) number of image channels"""
return len(self._channel)
@property
def n_annotations(self):
"""Returns the (read-only) number of annotations"""
return len(self._annotation)
@property
def downsamplingfactor(self):
"""Returns the (read-only) downsampling factor"""
return self._downsample
@property
def class_labels(self):
"""Returns the class labels that are set for training"""
class_labels = [0,]
class_labels.extend(list(self.include_annotation_typenrs))
return class_labels
# ************************************
# ***** Handling the image data *****
@property
def channel(self):
"""Returns list with all image channels"""
return self._channel
@channel.setter
def channel(self, image_data):
"""Sets the internal list with all image channels to np.ndarray copies
of the supplied list with -to numpy.ndarray convertable- image data
image_data: single image, or list with images that are converable to
a numpy.ndarray"""
self._channel = []
self._bodies = None
self._centroids = None
y_res_old,x_res_old = self.y_res,self.x_res
if isinstance( image_data, list):
for im in image_data:
if self.downsamplingfactor is not None:
self._channel.append( ndimage.interpolation.zoom( \
np.array(im), 1/self.downsamplingfactor ) )
else:
self._channel.append( np.array(im) )
else:
if self.downsamplingfactor is not None:
self._channel.append( ndimage.interpolation.zoom( \
np.array(image_data), 1/self.downsamplingfactor ) )
else:
self._channel.append( np.array(image_data) )
self._y_res,self._x_res = self._channel[0].shape
# Update masks if there are annotations and the image resolution changed
if self.n_annotations > 0 and ( (y_res_old != self.y_res)
or (x_res_old != self.x_res) ):
self._set_bodies()
self._set_centroids()
@property
def exclude_border(self):
"""Returns dictionary with border exclusion parameters"""
return self._exclude_border
@property
def exclude_border_tuple(self):
"""Returns dictionary with border exclusion parameters"""
return self._exclude_border_tuple
@exclude_border.setter
def exclude_border( self, exclude_border ):
"""Sets the exclude_border parameter dictionary
exclude_border: 4-Tuple containing border exclusion region (left,
right,top,bottom), dictionary, or file name of mat
file holding the parameters as separate variables
named ExclLeft, ExclRight, ExclTop, ExclBottom
Returns dictionary {'left': #, 'right': #, 'top': #, 'bottom': #}
"""
if isinstance(exclude_border,list) or isinstance(exclude_border,tuple):
self._exclude_border['left'] = exclude_border[0]
self._exclude_border['right'] = exclude_border[1]
self._exclude_border['top'] = exclude_border[2]
self._exclude_border['bottom'] = exclude_border[3]
elif isinstance(exclude_border,dict):
self._exclude_border['left'] = exclude_border['left']
self._exclude_border['right'] = exclude_border['right']
self._exclude_border['top'] = exclude_border['top']
self._exclude_border['bottom'] = exclude_border['bottom']
elif isinstance(exclude_border,str):
mat_data = loadmat(exclude_border)
self._exclude_border['left'] = int(mat_data['ExclLeft'])
self._exclude_border['right'] = int(mat_data['ExclRight'])
self._exclude_border['top'] = int(mat_data['ExclTop'])
self._exclude_border['bottom'] = int(mat_data['ExclBottom'])
if self.downsamplingfactor is not None:
self._exclude_border['left'] = \
int(np.round(self._exclude_border['left']/self.downsamplingfactor))
self._exclude_border['right'] = \
int(np.round(self._exclude_border['right']/self.downsamplingfactor))
self._exclude_border['top'] = \
int(np.round(self._exclude_border['top']/self.downsamplingfactor))
self._exclude_border['bottom'] = \
int(np.round(self._exclude_border['bottom']/self.downsamplingfactor))
self._exclude_border_tuple = \
( int(self._exclude_border['left']), int(self._exclude_border['right']),
int(self._exclude_border['top']), int(self._exclude_border['bottom']) )
def add_image_from_file(self, file_name, file_path='.',
normalize=True, use_channels=None):
"""Loads image or matlab cell array, scales individual channels to
max (1), and adds it as a new image channel
file_name: String holding name of image file
file_path: String holding file path
normalize: Normalize to maximum of image
use_channels: tuple holding channel numbers/order to load (None=all)
"""
y_res_old,x_res_old = self.y_res,self.x_res
# Load from .mat file with cell array
if str(file_name[-4:]) == ".mat":
mat_data = loadmat(path.join(file_path,file_name))
n_channels = mat_data['Images'].shape[1]
if use_channels is None:
use_channels = list(range(n_channels))
for ch in use_channels:
im_x = np.float64(np.array(mat_data['Images'][0,ch]))
if normalize:
im_x = im_x - im_x.min()
im_x = im_x / im_x.max()
if self.downsamplingfactor is not None:
self._channel.append( ndimage.interpolation.zoom( \
im_x, 1/self.downsamplingfactor ) )
else:
self._channel.append(im_x)
# Load from actual image
else:
im = np.float64(imread(path.join(file_path,file_name)))
# Perform normalization (max=1) and add to channels
if im.ndim == 3:
n_channels = np.size(im,axis=2)
if use_channels is None:
use_channels = list(range(n_channels))
for ch in use_channels:
im_x = im[:,:,ch]
if normalize:
im_x = im_x - im_x.min()
im_x = im_x / im_x.max()
if self.downsamplingfactor is not None:
self._channel.append( ndimage.interpolation.zoom( \
im_x, 1/self.downsamplingfactor ) )
else:
self._channel.append(im_x)
else:
if normalize:
im = im - im.min()
im = im / im.max()
if self.downsamplingfactor is not None:
self._channel.append( ndimage.interpolation.zoom( \
im, 1/self.downsamplingfactor ) )
else:
self._channel.append(im)
# Set resolution
self._y_res,self._x_res = self._channel[0].shape
# Update masks if there are annotations and the image resolution changed
if self.n_annotations > 0 and ( (y_res_old != self.y_res)
or (x_res_old != self.x_res) ):
self._set_bodies()
self._set_centroids()
def RGB( self, channel_order=(0,1,2), amplitude_scaling=(1,1,1) ):
"""Constructs an RGB image from the image list
channel_order: tuple indicating which channels are R, G and B
amplitude_scaling: Additional scaling of each channel separately
returns 3d numpy.ndarray"""
RGB = np.zeros((self.y_res,self.x_res,3))
for ch in range(len(channel_order)):
if channel_order[ch] < self.n_channels:
RGB[:,:,ch] = self.channel[channel_order[ch]] * amplitude_scaling[ch]
RGB[RGB>1] = 1
return RGB
def crop( self, left, top, width, height ):
"""Crops the image channels, annotations and borders
left: Left most pixel in cropped image (0 based)
top: Top most pixel in cropped image (0 based)
width: Width of cropped region
height: Height of cropped region
"""
# Crop channels
new_channel_list = []
for nr in range(self.n_channels):
new_channel_list.append( self._channel[nr][top:top+height,left:left+width] )
# Crop annotations
new_annotation_list = []
for an in self.annotation:
an_mask = np.zeros((self.y_res,self.x_res))
an.mask_body( image=an_mask )
new_an_mask = an_mask[top:top+height,left:left+width]
if new_an_mask.sum() > 0:
new_annotation_list.append( Annotation( body_pixels_yx=new_an_mask,
annotation_name=an.name, type_nr=an.type_nr, group_nr=an.group_nr) )
# Crop borders
brdr = self.exclude_border.copy()
brdr['left'] = np.max( [ brdr['left']-left, 0 ] )
brdr['top'] = np.max( [ brdr['top']-top, 0 ] )
crop_from_right = self.x_res-(left+width)
brdr['right'] = np.max( [ brdr['right']-crop_from_right, 0 ] )
crop_from_bottom = self.x_res-(left+width)
brdr['bottom'] = np.max( [ brdr['bottom']-crop_from_bottom, 0 ] )
# Update annotations and channels
self.annotation = new_annotation_list
self.channel = new_channel_list
self.exclude_border = brdr
# *****************************************
# ***** Handling the annotation data *****
@property
def annotation(self):
"""Returns list with all image annotations"""
return self._annotation
@annotation.setter
def annotation(self, annotation_data):
"""Sets the internal list of all image annotations to copies of the
supplied list of class annotation() annotations
annotation_data: instance of, or list with annotations of the
annotation class"""
self._annotation = []
if not isinstance( annotation_data, list):
annotation_data = [annotation_data]
type_nr_list = []
for an in annotation_data:
if self.downsamplingfactor is not None:
body_pixels = np.round( an.body / self.downsamplingfactor )
else:
body_pixels = an.body
self._annotation.append( Annotation(
body_pixels_yx=body_pixels,
annotation_name=an.name,
type_nr=an.type_nr,
group_nr=an.group_nr) )
type_nr_list.append(an.type_nr)
# Update masks if there is at least one image channel
if self.include_annotation_typenrs is None:
self.include_annotation_typenrs = type_nr_list
if self.n_channels > 0:
self._set_bodies()
self._set_centroids()
def import_annotations_from_mat(self, file_name, file_path='.'):
"""Reads data from ROI.mat file and fills the annotation_list.
file_name: String holding name of ROI file
file_path: String holding file path
"""
# Load mat file with ROI data
mat_data = loadmat(path.join(file_path,file_name))
annotation_list = []
type_nr_list = []
nROIs = len(mat_data['ROI'][0])
for c in range(nROIs):
body = mat_data['ROI'][0][c]['body']
body = np.array([body[:,1],body[:,0]]).transpose()
body = body-1 # Matlab (1-index) to Python (0-index)
type_nr = int(mat_data['ROI'][0][c]['type'][0][0])
name = str(mat_data['ROI'][0][c]['typename'][0])
group_nr = int(mat_data['ROI'][0][c]['group'][0][0])
annotation_list.append( Annotation( body_pixels_yx=body,
annotation_name=name, type_nr=type_nr, group_nr=group_nr ) )
type_nr_list.append(type_nr)
if self.include_annotation_typenrs is None:
self.include_annotation_typenrs = type_nr_list
self.annotation = annotation_list
def export_annotations_to_mat(self, file_name,
file_path='.', upsample=None):
"""Writes annotations to ROI.mat file
file_name: String holding name of ROI file
file_path: String holding file path
upsample: Upsampling factor"""
if upsample is not None:
upsamplingfactor = upsample
elif self.downsamplingfactor is not None:
print("AnnotatedImage was downsampled by factor of {}".format( \
self.downsamplingfactor) + ", upsampling ROI's for export ")
upsamplingfactor = self.downsamplingfactor
else:
upsamplingfactor = None
# Upsample ROI's before export
if upsamplingfactor is not None:
annotation_export_list = []
for an in self.annotation:
annotation_mask = np.zeros_like(self._channel[0])
an.mask_body(image=annotation_mask)
annotation_mask = ndimage.interpolation.zoom( \
annotation_mask, self.downsamplingfactor )
annotation_export_list.append( Annotation(
body_pixels_yx=annotation_mask>0.5, annotation_name=an.name,
type_nr=an.type_nr, group_nr=an.group_nr) )
else:
annotation_export_list = self.annotation
# Export ROIs
nrs = []
groups = []
types = []
typenames = []
xs = []
ys = []
sizes = []
perimeters = []
bodys = []
for nr,an in enumerate(annotation_export_list):
nrs.append(nr)
groups.append(an.group_nr)
types.append(an.type_nr)
typenames.append(an.name)
xs.append(an.x+1)
ys.append(an.y+1)
sizes.append(an.size)
perimeter = np.array( \
[an.perimeter[:,1],an.perimeter[:,0]] ).transpose()+1
perimeters.append(perimeter)
body = np.array( [an.body[:,1],an.body[:,0]] ).transpose()+1
bodys.append(body)
savedata = np.core.records.fromarrays( [ nrs, groups, types,
typenames, xs, ys, sizes, perimeters, bodys ],
names = [ 'nr', 'group', 'type', 'typename', 'x', 'y',
'size', 'perimeter', 'body'] )
savemat(path.join(file_path,file_name), {'ROI': savedata} )
print("Exported annotations to: {}".format(
path.join(file_path,file_name)+".mat"))
# ******************************************
# ***** Handling the annotated bodies *****
@property
def bodies(self):
"""Returns an image with annotation bodies masked"""
return self._bodies
@property
def bodies_typenr(self):
"""Returns an image with annotation bodies masked by type_nr"""
return self._bodies_type_nr
def _set_bodies(self):
"""Sets the internal body annotation mask with specified parameters"""
self._bodies = np.zeros_like(self._channel[0])
self._bodies_type_nr = np.zeros_like(self._channel[0])
for nr in range(self.n_annotations):
if self._annotation[nr].type_nr in self.include_annotation_typenrs:
self._annotation[nr].mask_body(self._bodies,
dilation_factor=self._body_dilation_factor,
mask_value=nr+1, keep_centroid=True)
self._bodies_type_nr[self._bodies==nr+1] = \
self._annotation[nr].type_nr
@property
def body_dilation_factor(self):
"""Returns the body dilation factor"""
return(self._body_dilation_factor)
@body_dilation_factor.setter
def body_dilation_factor(self, dilation_factor):
"""Updates the internal body annotation mask with dilation_factor"""
self._body_dilation_factor = dilation_factor
self._set_bodies()
# *********************************************
# ***** Handling the annotated centroids *****
@property
def centroids(self):
"""Returns an image with annotation centroids masked"""
return self._centroids
@property
def centroids_typenr(self):
"""Returns an image with annotation centroids masked by type_nr"""
return self._centroids_type_nr
def _set_centroids(self):
"""Sets the internal centroids annotation mask with specified
parameters"""
self._centroids = np.zeros_like(self._channel[0])
self._centroids_type_nr = np.zeros_like(self._channel[0])
for nr in range(self.n_annotations):
if self._annotation[nr].type_nr in self.include_annotation_typenrs:
self._annotation[nr].mask_centroid(self._centroids,
dilation_factor=self._centroid_dilation_factor,
mask_value=nr+1)
self._centroids_type_nr[self._centroids==nr+1] = \
self._annotation[nr].type_nr
@property
def centroid_dilation_factor(self):
"""Returns the centroid dilation factor"""
return(self._centroid_dilation_factor)
@centroid_dilation_factor.setter
def centroid_dilation_factor(self, dilation_factor):
"""Updates the internal centroid annotation mask with dilation_factor"""
self._centroid_dilation_factor = dilation_factor
self._set_centroids()
# ***************************************************
# ***** Loading and saving of Annotated Images *****
def load(self,file_name,file_path='.'):
"""Loads image and annotations from .npy file"""
combined_annotated_image = np.load(path.join(file_path,file_name)).item()
self.channel = combined_annotated_image['image_data']
self.annotation = combined_annotated_image['annotation_data']
self.exclude_border = combined_annotated_image['exclude_border']
self.include_annotation_typenrs = \
combined_annotated_image['include_annotation_typenrs']
self.detected_centroids = combined_annotated_image['detected_centroids']
self.detected_bodies = combined_annotated_image['detected_bodies']
self.labeled_centroids = combined_annotated_image['labeled_centroids']
self.labeled_bodies = combined_annotated_image['labeled_bodies']
print("Loaded AnnotatedImage from: {}".format(
path.join(file_path,file_name)))
def save(self,file_name,file_path='.'):
"""Saves image and annotations to .npy file"""
combined_annotated_image = {}
combined_annotated_image['image_data'] = self.channel
combined_annotated_image['annotation_data'] = self.annotation
combined_annotated_image['exclude_border'] = self.exclude_border
combined_annotated_image['include_annotation_typenrs'] = \
self.include_annotation_typenrs
combined_annotated_image['detected_centroids'] = self.detected_centroids
combined_annotated_image['detected_bodies'] = self.detected_bodies
combined_annotated_image['labeled_centroids'] = self.labeled_centroids
combined_annotated_image['labeled_bodies'] = self.labeled_bodies
np.save(path.join(file_path,file_name), combined_annotated_image)
print("Saved AnnotatedImage as: {}".format(
path.join(file_path,file_name)+".npy"))
# ************************************************
# ***** Generate NN training/test data sets *****
@property
def include_annotation_typenrs(self):
"""Includes only ROI's with certain typenrs in body and centroid masks
"""
return self._include_annotation_typenrs
@include_annotation_typenrs.setter
def include_annotation_typenrs(self, include_typenrs):
"""Sets the nrs to include, removes redundancy by using sets"""
if isinstance(include_typenrs,int):
annotation_typenrs = set([include_typenrs,])
elif include_typenrs is None:
type_nr_list = []
for an in self.annotation:
type_nr_list.append(an.type_nr)
annotation_typenrs = set(type_nr_list)
else:
annotation_typenrs = set(include_typenrs)
if 0 in annotation_typenrs:
annotation_typenrs.remove(0)
self._include_annotation_typenrs = annotation_typenrs
if self.n_channels > 0:
self._set_centroids()
self._set_bodies()
def get_batch( self, zoom_size, annotation_type='Bodies', m_samples=100,
return_size=None, return_annotations=False,
sample_ratio=None, annotation_border_ratio=None,
normalize_samples=False, segment_all=False,
morph_annotations=False, rotation_list=None,
scale_list_x=None, scale_list_y=None, noise_level_list=None ):
"""Constructs a 2d matrix (m samples x n pixels) with linearized data
half of which is from within an annotation, and half from outside
zoom_size: 2 dimensional size of the image (y,x)
annotation_type: 'Bodies' or 'Centroids'
m_samples: number of training samples
return_size: Determines size of annotations that are returned
If None, it defaults to zoom_size
return_annotations: Returns annotations in addition to
samples and labels. If False, returns empty
list. Otherwise set to 'Bodies' or 'Centroids'
sample_ratio: List with ratio of samples per groups (sum=1)
annotation_border_ratio: Fraction of samples drawn from 2px border
betweem positive and negative samples
normalize_samples: Scale each individual channel to its maximum
segment_all: Segments all instead of single annotations (T/F)
morph_annotations: Randomly morph the annotations
rotation_list: List of rotation values to choose from in degrees
scale_list_x: List of horizontal scale factors to choose from
scale_list_y: List of vertical scale factors to choose from
noise_level_list: List of noise levels to choose from
Returns tuple with samples as 2d numpy matrix, labels as
2d numpy matrix and if requested annotations as 2d numpy matrix
or otherwise an empty list as third item"""
# Set return_size
if return_size is None:
return_size = zoom_size
# Calculate number of samples per class
class_labels = sorted(self.class_labels)
n_classes = len(class_labels)
if sample_ratio is not None:
if len(sample_ratio) > n_classes:
sample_ratio = sample_ratio[:n_classes]
m_class_samples = split_samples(
m_samples, n_classes, ratios=sample_ratio )
# Get number of border annotations (same strategy as above)
if annotation_border_ratio is not None:
m_class_borders = list(range(n_classes))
for c in range(n_classes):
m_class_samples[c],m_class_borders[c] = split_samples(
m_class_samples[c], 2,
ratios=[1-annotation_border_ratio,annotation_border_ratio] )
# Get labeled image for identifying annotations
if annotation_type.lower() == 'centroids':
im_label = self.centroids
im_label_class = self.centroids_typenr
elif annotation_type.lower() == 'bodies':
im_label = self.bodies
im_label_class = self.bodies_typenr
# Get labeled image for return annotations
if return_annotations is not False:
if return_annotations.lower() == 'centroids':
return_im_label = self.centroids
elif return_annotations.lower() == 'bodies':
return_im_label = self.bodies
# Predefine output matrices
samples = np.zeros( (m_samples,
self.n_channels*zoom_size[0]*zoom_size[1]) )
if return_annotations is not False:
annotations = np.zeros( (m_samples, return_size[0]*return_size[1]) )
labels = np.zeros( (m_samples, n_classes) )
count = 0
# Loop over output classes
for c in range(n_classes):
# Get image where only border pixels are labeled (either pos or neg)
if annotation_border_ratio is not None:
brdr_val = 1 if class_labels[c] == 0 else 0
im_label_er = ndimage.binary_erosion(
ndimage.binary_erosion( im_label_class==class_labels[c],
border_value=brdr_val ), border_value=brdr_val )
im_label_border = im_label_class==class_labels[c]
im_label_border[im_label_er>0] = 0
# Get lists of all pixels that fall in one class
pix_y,pix_x = get_labeled_pixel_coordinates( \
im_label_class==class_labels[c],
exclude_border=self.exclude_border_tuple )
if annotation_border_ratio is not None:
brdr_pix_y,brdr_pix_x = get_labeled_pixel_coordinates( \
im_label_border,
exclude_border=self.exclude_border_tuple )
# Get list of random indices for pixel coordinates
if len(pix_x) < m_class_samples[c]:
print("!! Warning: fewer samples of class {} (n={})".format( \
c, len(pix_x)) + " than requested (m={})".format(m_class_samples[c]))
print(" Returning duplicate samples...")
random_px = np.random.choice( len(pix_x),
m_class_samples[c], replace=True )
else:
random_px = np.random.choice( len(pix_x),
m_class_samples[c], replace=False )
if annotation_border_ratio is not None:
if len(brdr_pix_x) < m_class_borders[c]:
print("!! Warning: fewer border samples of class {} (n={})".format( \
c, len(brdr_pix_x)) + " than requested (m={})".format(m_class_borders[c]))
print(" Returning duplicate samples...")
random_brdr_px = np.random.choice( len(brdr_pix_x),
m_class_borders[c], replace=True )
else:
random_brdr_px = np.random.choice( len(brdr_pix_x),
m_class_borders[c], replace=False )
# Loop samples
for p in random_px:
nr = im_label[pix_y[p], pix_x[p]]
if not morph_annotations:
samples[count,:] = image2vec( zoom( self.channel,
pix_y[p], pix_x[p],
zoom_size=zoom_size, normalize=normalize_samples ) )
if return_annotations and not segment_all:
annotations[count,:] = image2vec( zoom( \
return_im_label==nr, pix_y[p], pix_x[p],
zoom_size=return_size, normalize=normalize_samples ) )
elif return_annotations and segment_all:
annotations[count,:] = image2vec( zoom( \
return_im_label>0, pix_y[p], pix_x[p],
zoom_size=return_size, normalize=normalize_samples ) )
else:
rotation = float(np.random.choice( rotation_list, 1 ))
scale = ( float(np.random.choice( scale_list_y, 1 )), \
float(np.random.choice( scale_list_x, 1 )) )
noise_level = float(np.random.choice( noise_level_list, 1 ))
samples[count,:] = image2vec( morphed_zoom( self.channel,
pix_y[p], pix_x[p], zoom_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=noise_level ) )
if return_annotations and not segment_all:
annotations[count,:] = image2vec( morphed_zoom( \
(return_im_label==nr).astype(np.float),
pix_y[p], pix_x[p], return_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=0 ) )
elif return_annotations and segment_all:
annotations[count,:] = image2vec( morphed_zoom( \
(return_im_label>0).astype(np.float),
pix_y[p], pix_x[p], return_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=0 ) )
labels[count,c] = 1
count = count + 1
# Positive border examples
if annotation_border_ratio is not None:
for p in random_brdr_px:
nr = im_label[brdr_pix_y[p], brdr_pix_x[p]]
if not morph_annotations:
samples[count,:] = image2vec( zoom( self.channel,
brdr_pix_y[p], brdr_pix_x[p],
zoom_size=zoom_size, normalize=normalize_samples ) )
if return_annotations and not segment_all:
annotations[count,:] = image2vec( zoom( return_im_label==nr,
brdr_pix_y[p], brdr_pix_x[p],
zoom_size=return_size, normalize=normalize_samples ) )
elif return_annotations and segment_all:
annotations[count,:] = image2vec( zoom( return_im_label>0,
brdr_pix_y[p], brdr_pix_x[p],
zoom_size=return_size, normalize=normalize_samples ) )
else:
rotation = float(np.random.choice( rotation_list, 1 ))
scale = ( float(np.random.choice( scale_list_y, 1 )), \
float(np.random.choice( scale_list_x, 1 )) )
noise_level = float(np.random.choice( noise_level_list, 1 ))
samples[count,:] = image2vec( morphed_zoom( self.channel,
brdr_pix_y[p], brdr_pix_x[p], zoom_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=noise_level ) )
if return_annotations and not segment_all:
annotations[count,:] = image2vec( morphed_zoom(
(return_im_label==nr).astype(np.float),
brdr_pix_y[p], brdr_pix_x[p], return_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=0 ) )
elif return_annotations and segment_all:
annotations[count,:] = image2vec( morphed_zoom(
(return_im_label>0).astype(np.float),
brdr_pix_y[p], brdr_pix_x[p], return_size,
rotation=rotation, scale_xy=scale,
normalize=normalize_samples, noise_level=0 ) )
labels[count,c] = 1
count = count + 1
# Return samples, labels, annotations etc
if return_annotations:
annotations[annotations<0.5]=0
annotations[annotations>=0.5]=1
return samples,labels,annotations
else:
return samples,labels,[]
def generate_cnn_annotations_cb(self, min_size=None, max_size=None,
dilation_factor_centroids=0, dilation_factor_bodies=0,
re_dilate_bodies=0 ):
"""Generates annotations from CNN detected bodies. If detected
centroids are present, it uses those to identify single annotations
and uses the detected bodies to get the outlines
min_size: Minimum number of pixels of the annotations
max_size: Maximum number of pixels of the annotations
dilation_factor_centroids: Dilates or erodes centroids before
segentation(erosion will get rid of
'speccles', dilations won't do much good)
dilation_factor_bodies: Dilates or erodes annotation bodies
before segmentation
re_dilate_bodies: Dilates or erodes annotation bodies
after segmentation
"""
# Check if centroids are detected
if self.detected_centroids is None:
do_centroids = False
else:
do_centroids = True
detected_bodies = np.array(self.detected_bodies)
if do_centroids:
detected_centroids = np.array(self.detected_centroids)
# Remove annotated pixels too close to the border artifact region
if self.exclude_border['left'] > 0:
detected_bodies[ :, :self.exclude_border['left'] ] = 0
if self.exclude_border['right'] > 0:
detected_bodies[ :, -self.exclude_border['right']: ] = 0
if self.exclude_border['top'] > 0:
detected_bodies[ :self.exclude_border['top'], : ] = 0
if self.exclude_border['bottom'] > 0:
detected_bodies[ -self.exclude_border['bottom']:, : ] = 0
# # Split centroids that are too long and thin
# if do_centroids:
# # print("Splitting lengthy centroids {:3d}".format(0),
# # end="", flush=True)
# for nr in range(1,n_centroid_labels+1):
# # print((3*'\b')+'{:3d}'.format(nr), end='', flush=True)
# mask = centroid_labels==nr
# props = measure.regionprops(mask)
# print(props.equivalent_diameter)
# # print((3*'\b')+'{:3d}'.format(nr))
# Dilate or erode centroids
if do_centroids:
if dilation_factor_centroids>0:
for _ in range(dilation_factor_centroids):
detected_centroids = \
ndimage.binary_dilation(detected_centroids)
elif dilation_factor_centroids<0:
for _ in range(-1*dilation_factor_centroids):
detected_centroids = \
ndimage.binary_erosion(detected_centroids)
# Dilate or erode bodies
if dilation_factor_bodies>0:
for _ in range(dilation_factor_bodies):
detected_bodies = ndimage.binary_dilation(detected_bodies)
elif dilation_factor_bodies<0:
for _ in range(-1*dilation_factor_bodies):
detected_bodies = ndimage.binary_erosion(detected_bodies)
# Get rid of centroids that have no bodies associated with them
if do_centroids:
detected_centroids[detected_bodies==0] = 0
# Get labeled centroids and bodies
if do_centroids:
centroid_labels = measure.label(detected_centroids, background=0)
n_centroid_labels = centroid_labels.max()
print("Found {} putative centroids".format(n_centroid_labels))
body_labels = measure.label(detected_bodies, background=0)
n_body_labels = body_labels.max()
print("Found {} putative bodies".format(n_body_labels))
# Nothing labeled, no point to continue
if n_centroid_labels == 0 or n_body_labels == 0:
print("Aborting ...")
return 0
# If only bodies, convert labeled bodies annotations
if not do_centroids:
print("Converting labeled body image into annotations {:3d}".format(0),
end="", flush=True)
ann_body_list = []
for nr in range(1,n_body_labels+1):
print((3*'\b')+'{:3d}'.format(nr), end='', flush=True)
body_mask = body_labels==nr
an_body = Annotation( body_pixels_yx=body_mask)
ann_body_list.append(an_body)
print((3*'\b')+'{:3d}'.format(nr))
else:
# Convert labeled centroids into centroid and body annotations
print("Converting labeled centroids and bodies into annotations {:3d}".format(0),
end="", flush=True)
ann_body_list = []
ann_body_nr_list = []
ann_centr_list = []
for nr in range(1,n_centroid_labels+1):
print((3*'\b')+'{:3d}'.format(nr), end='', flush=True)
mask = centroid_labels==nr
an_centr = Annotation( body_pixels_yx=mask)
ann_centr_list.append(an_centr)
body_nr = body_labels[int(an_centr.y),int(an_centr.x)]
ann_body_nr_list.append(body_nr)
body_mask = body_labels==body_nr
an_body = Annotation( body_pixels_yx=body_mask)
ann_body_list.append(an_body)
print((3*'\b')+'{:3d}'.format(nr))
# Loop centroid annotations to remove overlap of body annotations
print("Removing overlap of annotation {:3d}".format(0), end="", flush=True)
for nr1 in range(len(ann_centr_list)):
print((3*'\b')+'{:3d}'.format(nr1), end='', flush=True)
# Find out if the centroid shares the body with another centroid
shared_list = []
for nr2 in range(len(ann_centr_list)):
if (ann_body_nr_list[nr1] == ann_body_nr_list[nr2]) \
and (ann_body_nr_list[nr1] > 0):
shared_list.append(nr2)
# If more than one centroid owns the same body, split it
if len(shared_list) > 1:
# for each pixel, calculate the distance to each centroid
D = np.zeros((ann_body_list[nr1].body.shape[0],len(shared_list)))
for n,c in enumerate(shared_list):
cy, cx = ann_centr_list[c].y, ann_centr_list[c].x
for p,(y,x) in enumerate(ann_body_list[c].body):
D[p,n] = np.sqrt( ((cy-y)**2) + ((cx-x)**2) )
# Find the closest centroid for each pixel
closest_cntr = np.argmin(D,axis=1)
# For each centroid, get a new annotation with closest pixels
for n,c in enumerate(shared_list):
B = ann_body_list[c].body[closest_cntr==n,:]
new_ann = Annotation(body_pixels_yx=B)
ann_body_nr_list[c] = 0
ann_body_list[c] = new_ann
print((3*'\b')+'{:3d}'.format(nr1))
# Remove too small annotations
if min_size is not None:
remove_ix = []
for nr in range(len(ann_body_list)):
if ann_body_list[nr].body.shape[0] < min_size:
remove_ix.append(nr)
if len(remove_ix) > 0:
print("Removing {} annotations where #pixels < {}".format(
len(remove_ix), min_size))
for ix in reversed(remove_ix):
del ann_body_list[ix]
# Remove too large annotations
if max_size is not None:
remove_ix = []
for nr in range(len(ann_body_list)):
if ann_body_list[nr].body.shape[0] > max_size:
remove_ix.append(nr)
if len(remove_ix) > 0:
print("Removing {} annotations where #pixels > {}".format(
len(remove_ix), max_size))
for ix in reversed(remove_ix):
del ann_body_list[ix]
# Dilate or erode annotated bodies
if re_dilate_bodies != 0:
print("Dilating annotated bodies by a factor of {}: {:3d}".format(
re_dilate_bodies,0), end="", flush=True)
for nr in range(len(ann_body_list)):
print((3*'\b')+'{:3d}'.format(nr+1), end='', flush=True)
masked_image = np.zeros(self.detected_bodies.shape)
ann_body_list[nr].mask_body(
image=masked_image, dilation_factor=re_dilate_bodies)
ann_body_list[nr] = Annotation( body_pixels_yx=masked_image)
print((3*'\b')+'{:3d}'.format(nr+1))
# Set the internal annotation list
self.annotation = ann_body_list
def image_grid_RGB( self, image_size, image_type='image', annotation_nrs=None,
n_x=10, n_y=6, channel_order=(0,1,2),
normalize_samples=False, auto_scale=False,
amplitude_scaling=(1.33,1.33,1), line_color=0 ):
""" Constructs a 3d numpy.ndarray tiled with a grid of RGB images from
the annotations. If more images are requested than can be tiled,
it chooses and displays a random subset.
image_size: 2 dimensional size of the zoom-images (y,x)
image_type: 'image', 'bodies', 'centroids'
annotation_nrs: List with nr of the to be displayed annotations
n_x: Number of images to show on x axis of grid
n_y: Number of images to show on y axis of grid
channel_order: Tuple indicating which channels are R, G and B
auto_scale: Scale each individual image to its maximum (T/F)
normalize_samples: Scale each individual channel to its maximum
amplitude_scaling: Intensity scaling of each color channel
line_color: Intensity (gray scale) of line between images
Returns numpy.ndarray (y,x,RGB) and a list with center_shifts (y,x)
"""
# Get indices of images to show
if annotation_nrs is None:
annotation_nrs = list(range(self.n_annotations))
n_images = len(annotation_nrs)
# Get coordinates of where images will go
y_coords = []
offset = 0
for i in range(n_y):
offset = i * (image_size[0] + 1)
y_coords.append(offset+np.array(range(image_size[0])))
max_y = np.max(y_coords[i]) + 1
x_coords = []
offset = 0
for i in range(n_x):
offset = i * (image_size[1] + 1)
x_coords.append(offset+np.array(range(image_size[1])))
max_x = np.max(x_coords[i]) + 1
rgb_coords = np.array(list(range(3)))
# Fill grid
im_count = 0
rgb_im = np.zeros((image_size[0],image_size[1],3))
grid = np.zeros((max_y,max_x,3))+line_color
center_shift = []
for y in range(n_y):
for x in range(n_x):
if im_count < n_images:
for ch in range(3):
if image_type.lower() == 'image':
im = self.channel[channel_order[ch]]
if image_type.lower() == 'centroids':
im = self.centroids>0.5
if image_type.lower() == 'bodies':
im = self.bodies>0.5
rgb_im[:,:,ch] = zoom( im,
self.annotation[annotation_nrs[im_count]].y,
self.annotation[annotation_nrs[im_count]].x, image_size,
normalize=normalize_samples, pad_value=0 )
if auto_scale:
rgb_im = rgb_im / rgb_im.max()
grid[np.ix_(y_coords[y],x_coords[x],rgb_coords)] = rgb_im
center_shift.append( \
( y_coords[y][0] + (0.5*image_size[0]) -0.5,
x_coords[x][0] + (0.5*image_size[0]) -0.5 ) )
else:
break
im_count += 1
return grid, center_shift
########################################################################
### Class AnnotatedImageSet
########################################################################
class AnnotatedImageSet(object):
"""Class that represents a dataset of annotated images and organizes
the dataset for feeding in machine learning algorithms"""
def __init__(self, downsample=None):
"""Initializes
downsample: Downsample to be imported images, borders
and ROI's by a certain factor
"""
# initializes the list of annotated images
self._downsample = downsample
self.ai_list = []
self._body_dilation_factor = 0
self._centroid_dilation_factor = 0
self._include_annotation_typenrs = None
self._n_channels = 0
def __str__(self):
return "AnnotatedImageSet (# Annotated Images = {:.0f}" \
")".format(self.n_annot_images)
# **********************************
# ***** Read only properties *****
@property
def n_annot_images(self):
return len(self.ai_list)
@property
def n_channels(self):
return self._n_channels
@property
def downsamplingfactor(self):
"""Returns the (read-only) downsampling factor"""
return self._downsample
# ********************************************
# ***** Handling the annotation typenr *****
@property
def class_labels(self):
"""Returns the class labels that are set for training"""
class_labels = [0,]
class_labels.extend(list(self.include_annotation_typenrs))
return class_labels
@property
def include_annotation_typenrs(self):
"""Returns the annotation typenrs"""
return self._include_annotation_typenrs
@include_annotation_typenrs.setter
def include_annotation_typenrs(self, annotation_typenrs):
"""Updates the internal annotation typenr if not equal to last set nrs
"""
if isinstance(annotation_typenrs,int):
annotation_typenrs = set([annotation_typenrs,])
elif annotation_typenrs is None:
pass
else:
annotation_typenrs = set(annotation_typenrs)
if isinstance(annotation_typenrs,set):
if 0 in annotation_typenrs:
annotation_typenrs.remove(0)
if annotation_typenrs != self._include_annotation_typenrs:
new_annotation_type_nrs = set()
for nr in range(self.n_annot_images):
if self.ai_list[nr].include_annotation_typenrs != annotation_typenrs:
self.ai_list[nr].include_annotation_typenrs = annotation_typenrs
if annotation_typenrs is None:
new_annotation_type_nrs.update(self.ai_list[nr].include_annotation_typenrs)
if annotation_typenrs is not None:
self._include_annotation_typenrs = annotation_typenrs
else:
self._include_annotation_typenrs = new_annotation_type_nrs
# ********************************************
# ***** Handling cropping of annot-ims *****
def crop( self, left, top, width, height ):
"""Crops the image channels, annotations and borders
left: Left most pixel in cropped image (0 based)
top: Top most pixel in cropped image (0 based)
width: Width of cropped region
height: Height of cropped region
"""
for nr in range(self.n_annot_images):
self.ai_list[nr].crop(left, top, width, height )
# *******************************************
# ***** Handling the annotated bodies *****
@property
def body_dilation_factor(self):
"""Returns the body dilation factor"""
return(self._body_dilation_factor)
@body_dilation_factor.setter
def body_dilation_factor(self, dilation_factor):
"""Updates the internal body annotation mask with dilation_factor"""
if dilation_factor != self._body_dilation_factor:
for nr in range(self.n_annot_images):
self.ai_list[nr].body_dilation_factor = dilation_factor
self._body_dilation_factor = dilation_factor
# **********************************************
# ***** Handling the annotated centroids *****
@property
def centroid_dilation_factor(self):
"""Returns the centroid dilation factor"""
return(self._centroid_dilation_factor)
@centroid_dilation_factor.setter
def centroid_dilation_factor(self, dilation_factor):
"""Updates the internal centroid annotation mask with dilation_factor"""
if dilation_factor != self._centroid_dilation_factor:
for nr in range(self.n_annot_images):
self.ai_list[nr].centroid_dilation_factor = dilation_factor
self._centroid_dilation_factor = dilation_factor
# ********************************************
# ***** Produce training/test data set *****
def data_sample(self, zoom_size, annotation_type='Bodies', m_samples=100,
return_size=None, return_annotations=False,
sample_ratio=None, annotation_border_ratio=None,
normalize_samples=False, segment_all=False,
morph_annotations=False, rotation_list=None,
scale_list_x=None, scale_list_y=None, noise_level_list=None ):
"""Constructs a random sample of with linearized annotation data,
organized in a 2d matrix (m samples x n pixels) half of which is
from within an annotation, and half from outside. It takes equal
amounts of data from each annotated image in the list.
zoom_size: 2 dimensional size of the image (y,x)
annotation_type: 'Bodies' or 'Centroids'
m_samples: number of training samples
return_size: Determines size of annotations that are returned
If None, it defaults to zoom_size
return_annotations: Returns annotations in addition to
samples and labels. If False, returns empty
list. Otherwise set to 'Bodies' or 'Centroids'
sample_ratio: List with ratio of samples per groups (sum=1)
annotation_border_ratio: Fraction of samples drawn from 2px border
betweem positive and negative samples
normalize_samples: Scale each individual channel to its maximum
segment_all: Segments all instead of single annotations (T/F)
morph_annotations: Randomly morph the annotations
rotation_list: List of rotation values to choose from in degrees
scale_list_x: List of horizontal scale factors to choose from
scale_list_y: List of vertical scale factors to choose from
noise_level_list: List of noise levels to choose from
Returns tuple with samples as 2d numpy matrix, labels as
2d numpy matrix and if requested annotations as 2d numpy matrix
or otherwise an empty list as third item"""
# Set return_size
if return_size is None:
return_size = zoom_size
# Get number of classes
n_classes = len(self.class_labels)
# Calculate number of pixels in linearized image
n_pix_lin = self.ai_list[0].n_channels * zoom_size[0] * zoom_size[1]
# List with start and end sample per AnnotatedImage
m_set_samples_list = np.round( np.linspace( 0, m_samples,
self.n_annot_images+1 ) )
# Predefine output matrices
samples = np.zeros( (m_samples, n_pix_lin) )
if return_annotations is not False:
annotations = np.zeros( (m_samples, return_size[0]*return_size[1]) )
else:
annotations = []
labels = np.zeros( (m_samples, n_classes) )
# Loop AnnotatedImages
for s in range(self.n_annot_images):
# Number of samples for this AnnotatedImage
m_set_samples = int(m_set_samples_list[s+1]-m_set_samples_list[s])
# Get samples, labels, annotations
s_samples,s_labels,s_annotations = \
self.ai_list[s].get_batch(
zoom_size, annotation_type=annotation_type,
m_samples=m_set_samples,
return_size=return_size, return_annotations=return_annotations,
sample_ratio=sample_ratio,
annotation_border_ratio=annotation_border_ratio,
normalize_samples=normalize_samples, segment_all=segment_all,
morph_annotations=morph_annotations,
rotation_list=rotation_list, scale_list_x=scale_list_x,
scale_list_y=scale_list_y, noise_level_list=noise_level_list )
# put samples, labels and possibly annotations in
samples[int(m_set_samples_list[s]):int(m_set_samples_list[s+1]),:] \
= s_samples
labels[int(m_set_samples_list[s]):int(m_set_samples_list[s+1]),:] \
= s_labels
if return_annotations is not False:
annotations[int(m_set_samples_list[s]):int(m_set_samples_list[s+1]),:] \
= s_annotations
return samples,labels,annotations
# **************************************
# ***** Load data from directory *****
def load_data_dir_tiff_mat(self, data_directory,
normalize=True, use_channels=None, exclude_border=None):
"""Loads all Tiff images or *channel.mat and accompanying ROI.mat
files from a single directory that contains matching sets of .tiff
or *channel.mat and .mat files
data_directory: path
normalize: Normalize to maximum of image
use_channels: tuple holding channel numbers/order to load (None=all)
exclude_border: Load border exclude region from file
"""
# Get list of all .tiff file and .mat files
image_files = glob.glob(path.join(data_directory,'*channels.mat'))
if len(image_files) == 0:
image_files = glob.glob(path.join(data_directory,'*.tiff'))
mat_files = glob.glob(path.join(data_directory,'*ROI*.mat'))
# Exclude border files
if isinstance(exclude_border,str):
if exclude_border.lower() == 'load':
brdr_files = glob.glob(path.join(data_directory,'*Border*.mat'))
# Loop files and load images and annotations
print("\nLoading image and annotation files:")
annotation_type_nrs = set()
for f, (image_file, mat_file) in enumerate(zip(image_files,mat_files)):
image_filepath, image_filename = path.split(image_file)
mat_filepath, mat_filename = path.split(mat_file)
print("{:2.0f}) {} -- {}".format(f+1,image_filename,mat_filename))
# Create new AnnotatedImage, add images and annotations
anim = AnnotatedImage(downsample=self.downsamplingfactor)
if self.include_annotation_typenrs is not None:
anim.include_annotation_typenrs = self.include_annotation_typenrs
anim.add_image_from_file( image_filename, image_filepath,
normalize=normalize, use_channels=use_channels )
anim.import_annotations_from_mat( mat_filename, mat_filepath )
if isinstance(exclude_border,str):
if exclude_border.lower() == 'load':
anim.exclude_border = brdr_files[f]
if isinstance(exclude_border,list) \
or isinstance(exclude_border,tuple):
anim.exclude_border = exclude_border
# Check if the number of channels is the same
if len(self.ai_list) == 0:
self._n_channels = anim.n_channels
else:
if self._n_channels != anim.n_channels:
print("!!! CRITICAL WARNING !!!")
print("-- Number of channels is not equal for all annotated images --")
# Append AnnotatedImage to the internal list
print(" - "+anim.__str__())
self.ai_list.append(anim)
annotation_type_nrs.update(anim.include_annotation_typenrs)
if self.include_annotation_typenrs is None:
self.include_annotation_typenrs = annotation_type_nrs
| true |
06bf8da3e9e178701fa7b33d8b06c9f1293bdde5 | Python | ShokuninSan/deep-q-learning-from-paper-to-code | /frozen_lake_deterministic_policy.py | UTF-8 | 577 | 3.21875 | 3 | [] | no_license | import gym
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
n_games = 1000
policy = {0: 1, 1:0, 2: 0, 3: 0, 4: 1, 5: 0, 6: 1, 7: 0, 8: 2, 9: 1, 10: 1, 11: 0, 12: 0, 13: 2, 14: 2, 15: 0}
win_pct = []
rewards = []
for g in range(n_games):
state = env.reset()
while True:
new_state, reward, is_done, _ = env.step(policy[state])
state = new_state
if is_done:
rewards.append(reward)
break
if g % 10 == 0:
win_pct.append(sum(rewards[-10:]) / 10)
plt.plot(win_pct)
plt.show()
| true |
45cdbbdc98888ddeaa79769c46ca50d9f177476c | Python | almaan/sepal | /compare/compare.py | UTF-8 | 2,970 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
import numpy as np
import pandas as pd
import os.path as osp
import os
from scipy.stats import spearmanr,pearsonr
def match(sort_vals,
cnt,
fun):
# sorted vals; one columns
cnt_vals = fun(cnt)
cnt_vals = pd.DataFrame(cnt_vals,
index = cnt.columns,
columns = ['x']
)
inter = sort_vals.index.intersection(cnt_vals.index)
cnt_vals = cnt_vals.loc[inter,:]
sort_vals = sort_vals.loc[inter,:]
return (sort_vals.values.flatten(),
cnt_vals.values.flatten())
def evaluate_methods(mets,cnt,funs):
corr_list = {}
for name,fun in funs.items():
corr_list.update({name:{}})
for met,vals in mets.items():
X,Y = match(vals,cnt,fun)
corr = spearmanr(X,Y)
corr_list[name].update({met:corr})
return corr_list
def process_methods(methods):
genes = None
met_list = {}
for met,vals in methods.items():
_tmp = pd.read_csv(vals['file'],
sep = vals['sep'],
header = 0,
index_col = 0)
if 'genes' in vals.keys():
_tmp.index = _tmp[vals['genes']].values
_tmp = _tmp[[vals['column']]]
if genes is None:
genes = _tmp.index
else:
genes = genes.intersection(_tmp.index)
met_list.update({met:_tmp})
for k,v in met_list.items():
met_list[k] = v.loc[genes,:]
return met_list
#-------------
sum_fun = lambda x: x.sum(axis=0)
var_fun = lambda x: x.var(axis=0)
funs = dict(total_sum = sum_fun,
variance = var_fun)
cnt_pth = "../data/real/mob.tsv.gz"
cnt = pd.read_csv(cnt_pth, header = 0, index_col = 0, sep = '\t')
methods = dict(sepal = dict( file = "../res/mob/20200407115358366240-top-diffusion-times.tsv",
column = "average",
sep = '\t',
),
SpatialDE = dict(file = "SpatialDE-MOB_final_results.csv",
column = 'qval',
sep = ',',
genes = 'g',
),
SPARK = dict(file = "SPARK-mob.tsv",
column = 'combined_pvalue',
sep = '\t',
),
)
mets = process_methods(methods)
res = evaluate_methods(mets,cnt,funs)
out_dir = os.getcwd()
for _r in res.keys():
_tmp = pd.DataFrame(res[_r])
_tmp.index = ['spearman ($\\rho$)','p-value']
print("Correlation with : {}".format(_r))
print(_tmp,end="\n\n")
with open(osp.join(out_dir,_r + "-comp-results.tsv"),"w") as f:
ostream = _tmp.to_latex()
ostream = ostream.replace("lrrr","l|c|c|c")
f.writelines(ostream)
| true |
f4548c6b7272c4b35f6186e0dd2aa45bf0d8f95c | Python | dnovichkov/FiddlerSessionReplay | /fiddler_session_replay/session_senders.py | UTF-8 | 1,335 | 2.546875 | 3 | [] | no_license | import json
import logging
import os
import requests
from fiddler_session_replay.data_extracters import get_request
def send_request(filename: str):
"""
Send request from file
:param filename:
:return:
"""
logging.debug(f'Send request from {filename}')
url, method, headers, body = get_request(filename)
return send_data(body, headers, method, url)
def send_data(body, headers, method, url):
if not url or not method:
return
if body:
logging.debug(f'Send {method}-request to {url} with data')
response = requests.request(method, url, headers=headers, data=json.dumps(body)).content
logging.debug(response)
return
logging.debug(f'Send {method}-request to {url} without data')
response = requests.request(method, url, headers=headers)
logging.debug(response)
return
def get_full_requests_filenames(folder_name):
full_folder_name = folder_name + '/raw/'
files = [full_folder_name + f for f in os.listdir(full_folder_name) if f.endswith('_c.txt')]
logging.debug(files)
return files
def send_request_files(folder_name):
"""
Send files from unpacked Fiddler file
:param folder_name:
:return:
"""
files = get_full_requests_filenames(folder_name)
for file in files:
send_request(file)
| true |
557591779f438feeb9e383190ba9d6e6de777f47 | Python | rkgwood/snorlax | /main.py | UTF-8 | 1,692 | 3.015625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
def _expand_cd_name(short_cd):
if pd.isnull(short_cd):
return short_cd
short_cd = short_cd.split(':')[0]
cd_mappings = {'BX': 'Bronx',
'BK': 'Brooklyn',
'MN': 'Manhattan',
'QN': 'Queens',
'SI': 'Staten Island'}
borough = cd_mappings[short_cd[:2]]
cd_number = str(int(short_cd[2:]))
return "%s CD %s" % (borough, cd_number)
ship_df = pd.DataFrame.from_csv('data/Furman_Center_SHIP_Properties.csv')
ship_df['CD'] = ship_df.CD.map(_expand_cd_name)
units_by_cd = pd.DataFrame({'total_units': ship_df.groupby('CD')['Unit Count'].sum()}).reset_index()
social_indicators_df = pd.DataFrame.from_csv('data/Social_Indicators_Report_Data_By_Community_District.csv')
poverty_rate_by_cd = social_indicators_df[social_indicators_df.Indicator == 'Poverty Rate: Number of New Yorkers in or Near Poverty (2009-2013 average)']
pop_df = pd.DataFrame.from_csv('data/New_York_City_Population_By_Community_Districts.csv')
pop_df['Borough'] = pop_df.index
pop_df['CD'] = pop_df.apply(lambda row: "%s CD %s" % (row['Borough'], row['CD Number']), axis=1)
merged_df = pd.merge(units_by_cd, poverty_rate_by_cd, on='CD', how='inner')
merged_df = pd.merge(merged_df, pop_df, on='CD', how='inner')
merged_df['ship_units_per_capita'] = merged_df.apply(lambda row: 100 * row['total_units'] / row['2010 Population'], axis=1)
merged_df.index = merged_df['CD']
merged_df['poverty_rate'] = pd.to_numeric(merged_df['2013'])
merged_df[['ship_units_per_capita', 'poverty_rate']].plot(kind='bar')
plt.ylabel('percent')
plt.xlabel('Community District')
plt.show() | true |
0911524d2ce1140e226d6fec6c02326e4323d240 | Python | fidemin/socket-programming-python | /echo_client.py | UTF-8 | 696 | 3.28125 | 3 | [] | no_license | import socket
import sys
if __name__ == '__main__':
host = sys.argv[1]
port = int(sys.argv[2])
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
print(f'[INFO] connected to {host}:{port}')
while True:
# make prompts
print('>>', end=' ')
# send input data to server
input_data = input()
s.sendall(input_data.encode(encoding='utf-8'))
# receive data from server
received_data = s.recv(1024)
print(f'[INFO] received data: {received_data.decode("utf-8")}')
finally:
print('[INFO] socket closed')
s.close()
| true |
85118f33682be378e66ad5d5eca05205df259099 | Python | hariprasetia/PythonScripts | /scripts/python/is_pangram.py | UTF-8 | 537 | 4.0625 | 4 | [
"MIT"
] | permissive | def is_pangram(string):
pangram = [False] * 26
for i in range(len(string)):
if string[i] >= 'A' and string[i] <= 'Z':
pangram[ord(string[i]) - ord('A')] = True
if string[i] >= 'a' and string[i] <= 'z':
pangram[ord(string[i]) - ord('a')] = True
for i in pangram:
if not i:
return False
return True
def main():
pangram = is_pangram('The quick brown fox jumps over the lazy dog')
if pangram:
print('Given string is a pangram')
else:
print('Given string is not a pangram')
if __name__ == '__main__':
main()
| true |
ff5830694f6da350826926fa189e5d2d994bb63c | Python | thedarkknight513v2/daohoangnam-codes | /C4E18 Sessions/Homework/Session_2/Exercise_1_BMI.py | UTF-8 | 550 | 4.15625 | 4 | [] | no_license | # 1. Write a program that asks user their height (cm) and weight (kg), and then calculate their BMI (Body Mass Index):
height_in_cm = int(input("Input your height in cm"))
height_in_m = height_in_cm / 100
weight_in_kg = int(input("Input your weight in kg"))
BMI = int(weight_in_kg / (height_in_m * height_in_m))#
print("Your BMI is ", BMI)
if BMI < 16:
print("Severely underweight")
elif 16 <= BMI < 18.5:
print("Underweight")
elif 18.5 <= BMI < 25:
print("Normal")
elif 25 <= BMI < 30:
print("Overweight")
else:
print("Obese")
| true |
7c8dcee74fd291701e7b7c9ad1a7b739240061d3 | Python | pjh177787/amp | /MP3/Perceptron.py | UTF-8 | 7,738 | 2.890625 | 3 | [] | no_license | from random import seed, randrange
from copy import deepcopy
import numpy as np
class Preceptron:
def __init__(self, trainfile_name, testfile_name):
self.training_classes = []
self.training_labels = []
self.testing_classes = []
self.testing_labels = []
self.weight_list = np.zeros((10, 1025))
self.confusion_matrix = np.zeros((10, 10))
self.parse_files(trainfile_name, testfile_name)
def parse_files(self, trainfile_name, testfile_name):
trainfile = open(trainfile_name, 'r')
for line in trainfile:
if len(line) < 32:
for ch in line:
if ch.isdigit():
self.training_labels.append(int(ch))
trainfile.seek(0)
for label in self.training_labels:
image = []
for i in range(32):
image_line = trainfile.readline()
for j in range(32):
image.append(int(image_line[j]))
image_line = trainfile.readline()
for ch in image_line:
if ch.isdigit() and int(ch) != label:
print('TRAINFILE ALIGN ERROR')
self.training_classes.append(image)
trainfile.close()
testfile = open(testfile_name, 'r')
for line in testfile:
if len(line) < 32:
for ch in line:
if ch.isdigit():
self.testing_labels.append(int(ch))
testfile.seek(0)
for label in self.testing_labels:
image = []
for i in range(32):
image_line = testfile.readline()
for j in range(32):
image.append(int(image_line[j]))
image_line = testfile.readline()
for ch in image_line:
if ch.isdigit() and int(ch) != label:
print('TESTFILE ALIGN ERROR')
self.testing_classes.append(image)
testfile.close()
# Make a prediction with weights
def predict(self, row, weights):
activation = weights[-1]
for i in range(len(row) - 1):
activation += weights[i] * row[i]
if activation >= 0:
return 1, activation
else:
return 0, activation
def train_weights(self, training_data, target_labels, learning_rate, num_epoch):
learning_curve = np.zeros((10, num_epoch))
for label in range(10):
data_set = []
for idx in range(len(training_data)):
if label == target_labels[idx]:
row = training_data[idx] + [1]
else:
row = training_data[idx] + [0]
data_set.append(row)
learning_rate_new = learning_rate
for epoch in range(num_epoch):
total_error = 0
for row in data_set:
prediction = self.predict(row, self.weight_list[label])[0]
error = row[-1] - prediction
total_error += error**2
self.weight_list[label][-1] = self.weight_list[label][-1] + learning_rate_new*error
for i in range(len(row) - 1):
self.weight_list[label][i] = self.weight_list[label][i] + learning_rate_new*error*row[i]
# print('Digit#%d, epoch #%d, learning_rate = %.3f, error = %.3f' %(label, epoch, learning_rate_new, total_error))
learning_curve[label][epoch] = total_error
learning_rate_new *= 0.9
# if total_error < learning_rate:
# break
return learning_curve
def perceptron_train(self, learning_rate = 0.05, num_epoch = 10):
return self.train_weights(self.training_classes, self.training_labels, learning_rate, num_epoch)
def perceptron_test(self, bias_en = True):
predictions = []
correct_counts = [0 for i in range(10)]
total_counts = [0 for i in range(10)]
correct = 0
each = 0
line = 0
largest_posterior = [[float('-inf'), " "] for i in range(10)]
smallest_posterior = [[float('inf'), " "] for i in range(10)]
if bias_en:
bias = [1]
else:
bias = [0]
for idx in range(len(self.testing_labels)):
maxi = float('-inf')
mini = float('inf')
predicted = 0
label = self.testing_labels[idx]
candidates = []
for each_possibility in range(10):
row = self.testing_classes[idx] + bias
actuation, possibility = self.predict(row, self.weight_list[each_possibility])
if possibility > maxi:
predicted = each_possibility
maxi = possibility
if possibility < mini:
mini = possibility
if actuation == 1:
candidates.append((each_possibility, possibility))
# print(candidates)
predictions.append(predicted)
if maxi > largest_posterior[label][0]:
largest_posterior[label][0] = maxi
largest_posterior[label][1] = line
if mini < smallest_posterior[label][0]:
smallest_posterior[label][0] = mini
smallest_posterior[label][1] = line
self.confusion_matrix[predicted][label] += 1
if label == predicted:
correct += 1
correct_counts[label] += 1
total_counts[label] += 1
each += 1
line += 33
correct_prec = correct / each
for i in range(10):
for j in range(10):
num = self.confusion_matrix[i][j]
self.confusion_matrix[i][j] = num/total_counts[j]
print('For each digit, show the test examples from that class that have the highest and lowest posterior probabilities according to your classifier.')
print(largest_posterior)
print('\n')
print(smallest_posterior)
print('Classification Rate For Each Digit:')
for i in range(10):
print(i, correct_counts[i]/total_counts[i])
print('Confusion Matrix:')
for i in range(10):
print(self.confusion_matrix[i])
print(predictions)
print(correct_prec)
# confusion_tuple = [((i, j), self.confusion_matrix[i][j]) for j in range(10) for i in range(10)]
# confusion_tuple = list(filter(lambda x: x[0][0] != x[0][1], confusion_tuple))
# confusion_tuple.sort(key = lambda x: -x[1])
# for i in range(4):
# feature1_pre = self.training_classes[confusion_tuple[i][0][0]]
# feature1 = [[chardict['1'] for chardict in row] for row in feature1_pre]
# feature2_pre = self.training_classes[confusion_tuple[i][0][1]]
# feature2 = [[chardict['1'] for chardict in row] for row in feature2_pre]
# fig = [None for k in range(3)]
# axes = [None for k in range(3)]
# heatmap = [None for k in range(3)]
# features = [feature1,feature2, list(np.array(feature1) - np.array(feature2))]
# for k in range(3):
# fig[k], axes[k] = plt.subplots()
# heatmap[k] = axes[k].pcolor(features[k], cmap="jet")
# axes[k].invert_yaxis()
# axes[k].xaxis.tick_top()
# plt.tight_layout()
# plt.colorbar(heatmap[k])
# # plt.show()
# plt.savefig('src/binaryheatmap%.0f%d.png' % (i + 1, k + 1) )
| true |
40ca121c93fb321886f9302532856e579b747274 | Python | anilkumar0470/git_practice | /practice_info/reg_exp_prac.py | UTF-8 | 332 | 3.28125 | 3 | [] | no_license | import re
pattern = 'this'
text = 'does this text match the this pattern?'
ma = re.search(pattern,text)
s = ma.start()
e = ma.end()
print 'found "%s" in "%s" from %d to %d ("%s")' % \
(ma.re.pattern,ma.string,s,e,text[s:e])
#print "found %s in %s from %d to %d (%s)" % \
# (ma.re.pattern,ma.string,s,e,text[s:e]) | true |
b78f6a7db848bc0dffb9b0e16b2ef40e7490d646 | Python | linjungz/my-voice-translator | /src/app.py | UTF-8 | 1,745 | 2.578125 | 3 | [] | no_license | import translator
import json
import os
def lambda_handler(event, context):
print(event)
s3_key = event['key']
source_language_code = event['source_language_code']
request_id = event['request_id']
print(s3_key)
#Transcribe:
bucketName = os.environ['BucketName']
#bucketName = 'voice-translator-translatorbucket-kvuovdlq7o4a'
job_uri = 's3://' + bucketName + '/' + s3_key
job_name = request_id
print(job_uri)
print(job_name)
transcript = translator.transcribe(job_name, job_uri, source_language_code)
if transcript == "" :
raise Exception('Transcript is empty')
source_language_code = 'zh'
#Result:
codes = {
'EnglishUS': {
'translate_code': 'en',
'polly_code': 'en-US',
},
'French': {
'translate_code': 'fr',
'polly_code': 'fr-FR',
},
'Japanese' : {
'translate_code': 'ja',
'polly_code': 'ja-JP',
},
'Korean' : {
'translate_code': 'ko',
'polly_code': 'ko-KR'
}
}
for language in codes:
#Translate:
print(language)
codes[language]['translate_text'] = translator.translate(
transcript,
source_language_code,
codes[language]['translate_code'])
print(codes[language]['translate_text'])
#Polly:
codes[language]['polly_url'] = translator.polly(
codes[language]['translate_text'],
codes[language]['polly_code'],
request_id,
bucketName)
print(codes[language]['polly_url'])
codes['transcript'] = transcript
return codes
| true |
79dca0fe0e75a8d36c45a2c056825659bd6285c0 | Python | sshyran/openvino-nncf-pytorch | /beta/tests/tensorflow/test_transformations.py | UTF-8 | 11,800 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | """
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
from beta.nncf.tensorflow.graph.transformations import commands
from beta.nncf.tensorflow.graph.transformations.layout import TFTransformationLayout
from nncf.common.graph.transformations.commands import TransformationPriority
from nncf.common.graph.transformations.commands import TransformationType
from nncf.common.graph.transformations.commands import TargetType
def test_insertion_commands_union_invalid_input():
cmd_0 = commands.TFInsertionCommand(commands.TFBeforeLayer('layer_0'))
cmd_1 = commands.TFInsertionCommand(commands.TFAfterLayer('layer_0'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
priority_types = ["same", "different"]
@pytest.mark.parametrize("case", priority_types, ids=priority_types)
def test_insertion_command_priority(case):
def make_operation_fn(priority_value):
def operation_fn():
return priority_value
return operation_fn
cmds = []
if case == 'same':
for idx in range(3):
cmds.append(
commands.TFInsertionCommand(
commands.TFBeforeLayer('layer_0'),
make_operation_fn(idx)
))
else:
priorites = sorted(list(TransformationPriority), key=lambda x: x.value, reverse=True)
for priority in priorites:
cmds.append(
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
make_operation_fn(priority.value),
priority
))
res_cmd = cmds[0]
for cmd in cmds[1:]:
res_cmd = res_cmd + cmd
res = res_cmd.insertion_objects
assert len(res) == len(cmds)
assert all(res[i]() <= res[i + 1]() for i in range(len(res) - 1))
def test_removal_command_union():
cmd_0 = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
cmd_1 = commands.TFRemovalCommand(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.union(cmd_1)
def test_add_insertion_command_to_multiple_insertion_commands_same():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 1
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_0'
assert res[1]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_different():
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda:'cmd_0')
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn
)
m_cmd.add_insertion_command(cmd_0)
m_cmd.add_insertion_command(cmd_1)
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_add_insertion_command_to_multiple_insertion_commands_invalid_input():
m_cmd = commands.TFMultipleInsertionCommands(commands.TFLayerWeight('layer_0', 'weights_0'))
cmd = commands.TFRemovalCommand(commands.TFLayer('layer_0'))
with pytest.raises(Exception):
m_cmd.add_insertion_command(cmd)
def test_multiple_insertion_commands_union_invalid_input():
cmd_0 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_0'))
cmd_1 = commands.TFMultipleInsertionCommands(commands.TFLayer('layer_1'))
with pytest.raises(Exception):
cmd_0.add_insertion_command(cmd_1)
def test_multiple_insertion_commands_union():
check_fn_0 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_0'
cmd_0 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0')
m_cmd_0 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_0,
commands=[cmd_0]
)
check_fn_1 = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name and \
dst.weights_attr_name == 'weight_1'
cmd_1 = commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda:'cmd_1')
m_cmd_1 = commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn_1,
commands=[cmd_1]
)
m_cmd = m_cmd_0 + m_cmd_1
res_cmds = m_cmd.commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
def test_transformation_layout_insertion_case():
transformation_layout = TFTransformationLayout()
check_fn = lambda src, dst: \
dst.type == TargetType.OPERATION_WITH_WEIGHTS and \
src.layer_name == dst.layer_name
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_0',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_1'),
lambda: 'cmd_1',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_2',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_0'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'cmd_3',
TransformationPriority.PRUNING_PRIORITY)
]),
commands.TFMultipleInsertionCommands(
target_point=commands.TFLayer('layer_1'),
check_target_points_fn=check_fn,
commands=[
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_0'),
lambda: 'cmd_4',
TransformationPriority.PRUNING_PRIORITY),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_1', 'weight_1'),
lambda: 'cmd_5',
TransformationPriority.PRUNING_PRIORITY)
]),
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 2
assert res_transformations[0].type == TransformationType.MULTI_INSERT
assert res_transformations[0].target_point.type == TargetType.LAYER
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[1].type == TransformationType.MULTI_INSERT
assert res_transformations[1].target_point.type == TargetType.LAYER
assert res_transformations[1].target_point.layer_name == 'layer_1'
res_cmds = res_transformations[0].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_3' and res[1]() == 'cmd_0'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_1'
res_cmds = res_transformations[1].commands
assert len(res_cmds) == 2
res = res_cmds[0].insertion_objects
assert len(res) == 2
assert res[0]() == 'cmd_4' and res[1]() == 'cmd_2'
res = res_cmds[1].insertion_objects
assert len(res) == 1
assert res[0]() == 'cmd_5'
def test_transformation_layout_removal_case():
transformation_layout = TFTransformationLayout()
command_list = [
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'sparsity_operation',
TransformationPriority.SPARSIFICATION_PRIORITY),
commands.TFRemovalCommand(commands.TFOperationWithWeights('layer_0', 'weight_0', 'sparsity_operation')),
commands.TFInsertionCommand(
commands.TFAfterLayer('layer_0'),
lambda: 'layer_1'
),
commands.TFRemovalCommand(commands.TFLayer('layer_1')),
commands.TFInsertionCommand(
commands.TFLayerWeight('layer_0', 'weight_0'),
lambda: 'pruning_operation',
TransformationPriority.PRUNING_PRIORITY
)
]
for cmd in command_list:
transformation_layout.register(cmd)
res_transformations = transformation_layout.transformations
assert len(res_transformations) == 5
assert res_transformations[0].type == TransformationType.INSERT
assert res_transformations[0].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[0].target_point.layer_name == 'layer_0'
assert res_transformations[0].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].type == TransformationType.REMOVE
assert res_transformations[1].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[1].target_point.layer_name == 'layer_0'
assert res_transformations[1].target_point.weights_attr_name == 'weight_0'
assert res_transformations[1].target_point.operation_name == 'sparsity_operation'
assert res_transformations[2].type == TransformationType.INSERT
assert res_transformations[2].target_point.type == TargetType.AFTER_LAYER
assert res_transformations[2].target_point.layer_name == 'layer_0'
assert res_transformations[3].type == TransformationType.REMOVE
assert res_transformations[3].target_point.type == TargetType.LAYER
assert res_transformations[3].target_point.layer_name == 'layer_1'
assert res_transformations[4].type == TransformationType.INSERT
assert res_transformations[4].target_point.type == TargetType.OPERATION_WITH_WEIGHTS
assert res_transformations[4].target_point.layer_name == 'layer_0'
assert res_transformations[4].target_point.weights_attr_name == 'weight_0'
| true |
b5f7cf71cf9fe32b973f8d77d5ad018fd4a16bc7 | Python | mameen-omar/EAI320_Practical-5 | /SOFTWARE/ID3.py | UTF-8 | 18,083 | 3.453125 | 3 | [] | no_license | #Mohamed Ameen Omar
#u16055323
#EAI PRACTICAL ASSIGNMENT 5
#2018
import csv
import math
import copy
class Stack:
def __init__(self):
self.list = []
def isEmpty(self):
if(self.list == []):
return True
return False
def push(self, item):
self.list.append(item)
def pop(self):
return self.list.pop()
def peek(self):
return self.list[len(self.items)-1]
def size(self):
return len(self.list)
class Node:
def __init__(self, category = None, parent = None, child = []):
self.category = category #category
self.value = None #only assigned if it is a split node
self.parent = parent #parent of the node
self.children = child
self.subset = None # for the subset for which this node represnts
self.isCategoryNode = False #if it is a category or a option
self.isDecisionNode = False #if it is a decision
def isLeaf(self):
if(self.child is None):
return True
return False
class ID3:
def __init__(self):
self.root = None
self.fileName = ""
self.mainData = None #each index is a dictonary for the data given
self.numAttributes = 0
self.categories = ""
self.default = None #boolean for the default files
self.defaultFilename = "restaurant(1).csv"
self.defaultCategories = ["Alt", "Bar", "Fri", "Hun", "Pat", "Price", "Rain", "Res", "Type", "Est", "WillWait"]
self.testedDecisions = []
self.numNodes = 0
self.numAttNodes = 0
self.numDecNodes = 0
self.numCatNodes = 0
self.runID3()
#dummy fucntion to do admin work for the program and make it run
def runID3(self):
anotherFile = input("Would you like to build a decision tree for the data in restaurant(1).csv? (Please input Y or N)\n")
while(anotherFile.upper() != "Y" and anotherFile.upper() != "N"):
string = "You entered"
string = string + " "
string= string + anotherFile
string = string + (" which is invalid, please input \"Y\" or \"N\"\n")
anotherFile = input(string)
print("Building Tree...")
if(anotherFile.upper() == "Y"):
self.default = True
self.buildDefaultTree()
else:
self.default = False
self.buildOtherTree()
self.countNodes()
print("Tree has been built")
print()
print("The Tree has:")
print(self.numNodes, end = " ")
print("Nodes.")
print(self.numCatNodes, end = " ")
print("Category Nodes.")
print(self.numAttNodes, end = " ")
print("Attribute Value Nodes.")
print(self.numDecNodes, end = " ")
print("Decision Nodes.")
test = (input("Would you like to input a test case?\n")).upper()
while(test.upper() != "Y" and test.upper() != "N"):
string = "You entered"
string = string + " "
string= string + test
string = string + (" which is invalid, please input \"Y\" or \"N\"\n")
test = input(string)
if(test.upper() == "N"):
print("The program has ended.")
return
print("Please enter the name of the file for which you would like to know the outcome.")
print("Please include the file extension (.csv) as well")
testFile = input("Test file Name: ")
testList = self.readTestFile(testFile)
print("Testing attributes in file: ", testFile, end = "")
print(" with the sample data")
print("Retrieving Decision....")
self.testFunc(testList)
again = input("Would you like to test another file?\n")
while(again.upper() != "Y" and again.upper() != "N"):
string = "You entered"
string = string + " "
string= string + again
string = string + (" which is invalid, please input \"Y\" or \"N\"\n")
again = input(string)
while(again.upper() == "Y"):
print("Please enter the name of the file for which you would like to know the outcome.")
print("Please include the file extension (.csv) as well")
testFile = input("Test file Name: ")
testList = self.readTestFile(testFile)
print("Testing attributes in file: ", testFile, end = "")
print(" with the sample data")
print("Retrieving Decision....")
self.testFunc(testList)
again = input("Would you like to test another file?\n")
while(again.upper() != "Y" and again.upper() != "N"):
string = "You entered"
string = string + " "
string= string + again
string = string + (" which is invalid, please input \"Y\" or \"N\"\n")
again = input(string)
if(again.upper() == "N"):
print("A reminder, the decisions for the input test cases were: ")
count = 1
for x in self.testedDecisions:
print("Decision ", count, end = " ")
print("was:", x)
count = count + 1
print("The program has ended")
return
#takes in a list of dicnonaries and returns the dicisions
def testFunc(self,testList):
tempDict = testList[0]
myNode = self.root
decision = False
while(decision == False):
if(myNode.isDecisionNode == True):
decision = True
print()
print("At a decision Node")
print("Congratulations we have found a decision")
print("Based off of the sample data, the final decision for this test case is predicted to be: ", myNode.value)
self.testedDecisions.append(myNode.value)
elif(myNode.isCategoryNode == True):
print()
print("At a Category Node.")
print("The category being compared is: ", myNode.category)
for child in myNode.children:
if(tempDict[myNode.category] == child.value):
myNode = child
else:
print()
print("At an attribute value Node.")
print("Attribute is :", myNode.value)
myNode = myNode.children[0]
#builds the tree for the defualt given csv in the prac spec
def buildDefaultTree(self):
self.fileName = self.defaultFilename
self.categories = self.defaultCategories
self.numAttributes = len(self.categories)
self.readFile(self.fileName, self.categories)
self.build()
#builds a tree that isnt for the default csv
def buildOtherTree(self):
self.categories = []
print("Please note, the data needs to be in a csv file with the target value or final decision being the last value in every row.")
name = input("Please input the name of the file name to be used to build the tree.\n")
self.fileName = name
numAttributes = input("How many attributes does the data consist of?\n")
y = 1
while(y <= int(numAttributes)):
string = "Please enter attribute number " + str(y) + "\n"
att = input(string)
y = y+1
self.categories.append(att)
for x in self.categories:
print(x)
self.readFile(self.fileName, self.categories)
self.build()
#reads a test file
def readTestFile(self,fileName):
temp = []
csvfile = open(fileName)
cat = copy.deepcopy(self.categories)
del cat[-1]
temp = list(csv.DictReader(csvfile, cat)) #make the DictReader iterator a list
for decision in temp:
for k,v in decision.items():
decision[k] = v.replace(" ", "")
return temp
#helper to read the file and sort out main data
def readFile(self, fileName, categories):
csvfile = open(fileName)
self.mainData = list(csv.DictReader(csvfile, categories)) #make the DictReader iterator a list
for decision in self.mainData:
for k,v in decision.items():
decision[k] = v.replace(" ", "")
# returns the entropy value for a category within a given data subset
#name is the name of the catgory who's entropy we are getting
#"data" is the dicntonary list which we are using to get the entropy
def getEntropy(self,name,data):
if(data is None):
return 1
variableNames = self.getVariableValues(data,name)#for the category we are testing
decisions = [] #different decisions that can result
for x in data:
new = True
if(decisions == []):
decisions.append(x[self.categories[self.numAttributes-1]])
else:
for y in decisions:
if(y == x[self.categories[self.numAttributes-1]]):
new = False
if(new == True):
decisions.append(x[self.categories[self.numAttributes-1]])
# now we have the decision names and the different variable names
totalOcc = len(data)
entropy = 0.0
tempEnt = 0.0
for attribute in variableNames:
tempEnt = 0.0
attOcc = self.getAttributeOccurrences(attribute,name,data)
for decision in decisions:
temp = 0.0
tempDecOcc = self.getAttDecOccurrences(attribute,name,decision,data)
if(tempDecOcc == 0):
temp = 0
else:
temp = (tempDecOcc/attOcc) * math.log2(tempDecOcc/attOcc)
tempEnt = tempEnt + temp
# print((attOcc)/totalOcc)
tempEnt = ((-1 * attOcc)/totalOcc)*tempEnt
entropy = entropy + tempEnt
return entropy
#returns the amount of times a attribute value= attribute, is found within a category = category
#within the data dictionary list
def getAttributeOccurrences(self, attribute,category,data):
occ = 0
for x in data:
if(x[category] == attribute):
occ = occ +1
return occ
#returns number of occurences of an attribute in a catrgory for a specific decision
#within the "data" dictonary list
def getAttDecOccurrences(self,attribute,category,decision,data):
occ = 0.0
for x in self.mainData:
if(x[category] == attribute):
if(x[self.categories[self.numAttributes -1]] == decision):
occ = occ+1
return occ
#takes in a data subset, which is a list of dictonaries, sees if it is pure
#by checking that the decision for all is the same
def isPure(self,data):
if(len(data) == 1):
return True
decisionIndex = self.categories[self.numAttributes-1]
decisionKey = data[0][decisionIndex]
for decision in data:
if(decision[decisionIndex] != decisionKey):
return False
return True
#returns the name of the category which has the lowest entropy
#data is a list of dictionaries for which each dictionary attributes(keys) still need to be split
def getLowestEntropy(self,data):
temp = data[0]
returnKey = ""
entropy = -1.0
keys = []
for k in temp:
if(k != self.categories[self.numAttributes-1]):
keys.append(k)
for key in keys:
temp = self.getEntropy(key,data)
if(entropy == -1.0):
entropy = temp
returnKey = key
elif(entropy > temp):
entropy = temp
returnKey = key
return returnKey
#builds the tree recursively
def build(self, node = None):
if(self.root is None):
temp = Node()
temp.isDecisionNode = False
temp.isCategoryNode = True
temp.parent = None
temp.subset = copy.deepcopy(self.mainData)
splitCat = self.getLowestEntropy(temp.subset)
temp.category = splitCat
childVals = self.getVariableValues(temp.subset,splitCat)
self.root = temp
for child in childVals:
tempChild = None
tempChild = Node()
tempChild.category = self.root.category
tempChild.isCategoryNode = False
tempChild.isDecisionNode = False
tempChild.value = child
tempChild.children = []
tempChild.parent = self.root
tempSub = copy.deepcopy(self.root.subset)
tempSub = self.removeRowFromList(tempSub,temp.category,tempChild.value)
tempSub = self.removeKeyFromDicList(tempSub,tempChild.category)
tempChild.subset = tempSub
self.root.children.append(tempChild)
for child in self.root.children:
if(child.isDecisionNode != True):
self.build(child)
else:
if(self.isPure(node.subset) == True):
temp = Node()
temp.parent = node
temp.category = node.category
temp.isDecisionNode = True
temp.isCategoryNode = False
temp.subset = copy.deepcopy(node.subset)
temp.value = self.getDecision(temp.subset)
node.children.append(temp)
temp.children = []
return
else:
#node is our parent
#temp is our new category node
temp = Node()
temp.isCategoryNode = True
temp.isDecisionNode = False
temp.subset = copy.deepcopy(node.subset)
temp.children = []
temp.parent = node
node.children.append(temp)
temp.category = self.getLowestEntropy(temp.subset)
catValues = self.getVariableValues(temp.subset,temp.category)
#temp is sorted now get children for temp
for val in catValues:
child = Node()
child.value = val
child.category = temp.category
child.isCategoryNode = False
child.isDecisionNode = False
child.children = []
child.parent = temp
child.subset = self.removeRowFromList(copy.deepcopy(temp.subset), child.category,val)
child.subset = self.removeKeyFromDicList(copy.deepcopy(child.subset), child.category)
temp.children.append(child)
for child in temp.children:
self.build(child)
#returns a list of all variable names or different attrbiute names
#within a given data set = data and a catergory = category
def getVariableValues(self,data,category):
vals = []
for row in data:
if(vals == []):
vals.append(row[category])
else:
new = True
for x in vals:
if(x == row[category]):
new = False
if(new == True):
vals.append(row[category])
return vals
#returns the decision
def getDecision(self,data):
if(self.isPure(data) == False):
return None
return data[0][self.categories[self.numAttributes -1]]
#removes a key from a dictionary list
def removeKeyFromDicList(self,data,key):
for row in data:
del row[key]
return data
#removes all rows from a data subset that is not for this category value
#data = list of dictonaries, splitCategory = the category for which we are checking a value
#val = the value within the category for which we only want the rows
def removeRowFromList(self,data,splitCategory,val):
remove = True
while(remove == True):
remove = False
for row in data:
if(row[splitCategory] != val):
data.remove(row)
remove = True
return data
#counts nodes and types of nodes
def countNodes(self):
if(self.root == None):
return 0
count = 0
temp = Stack()
temp.push(self.root)
while(temp.isEmpty() == False):
node = temp.pop()
count = count +1
if(node.isCategoryNode == True):
self.numCatNodes += 1
elif(node.isDecisionNode == True):
self.numDecNodes += 1
else:
self.numAttNodes +=1
for child in node.children:
temp.push(child)
self.numNodes = count
return count
test = ID3()
print()
print("The initial entropy values for the given data set:")
for cat in range (0,len(test.categories)-1):
print("The Entropy for ", end = "")
print(test.categories[cat], end = " ")
print("is ", end = "")
print(test.getEntropy(test.categories[cat],test.mainData)) | true |
dd86a58242712c02029daa556022990c4ccc2102 | Python | kupcimat/iot | /kupcimat/validator.py | UTF-8 | 426 | 2.65625 | 3 | [
"MIT"
] | permissive | import os.path
import yamale
def validate_yaml(schema_file: str, data_file: str):
if not os.path.isfile(schema_file):
raise RuntimeError(f"Schema yaml file is missing: {schema_file}")
if not os.path.isfile(data_file):
raise RuntimeError(f"Data yaml file is missing: {data_file}")
schema = yamale.make_schema(schema_file)
data = yamale.make_data(data_file)
yamale.validate(schema, data)
| true |
8ff107f5661d19278dfdb1cc56a68852be588746 | Python | BrianSantoso/pixelsort | /pixelsort.py | UTF-8 | 3,316 | 3.09375 | 3 | [] | no_license | from PIL import Image
import numpy as np
def hue(x):
r = x[0] / 255
g = x[1] / 255
b = x[2] / 255
Cmax = max(r, g, b)
Cmin = min(r, g, b)
delta = Cmax - Cmin
if delta == 0:
hue = 0
elif Cmax == r:
hue = 60 * (((g-b)/delta)%6)
elif Cmax == g:
hue = 60 * ((b-r)/delta + 2)
else:
hue = 60 * ((r-g)/delta + 4)
return hue
def lightness(x):
r = x[0] / 255
g = x[1] / 255
b = x[2] / 255
Cmax = max(r, g, b)
Cmin = min(r, g, b)
return (Cmax + Cmin) / 2
def saturation(x):
r = x[0] / 255
g = x[1] / 255
b = x[2] / 255
Cmax = max(r, g, b)
Cmin = min(r, g, b)
delta = Cmax - Cmin
if delta == 0:
sat = 0
else:
sat = delta / (1 - abs(2 * lightness(x) - 1))
return sat
mode = {
'sum-rgb': lambda x: x[0] + x[1] + x[2], # sort by sum of rgb values (grayscale)
'red': lambda x: x[0], # sort by red value
'green': lambda x: x[1], # sort by green value
'blue': lambda x: x[2], # sort by blue value
'yellow': lambda x: x[0] + x[1], # sort by yellow value
'cyan': lambda x: x[1] + x[2], # sort by cyan value
'magenta': lambda x: x[0] + x[2], #sort by magenta value
'luma': lambda x: 0.02126 * x[0] + 0.7152 * x[1] + 0.0722 * x[2], # sort by human color perception (luminosity)
'hue': hue,
'saturation': saturation,
'lightness': lightness
}
def pixelsort(image_name, mode, row=True, reverse=False, start=lambda x: False, stop=lambda x: False):
# PARAMETERS
# image_name: name of image file
# mode: mode to sort by
# row: sort rows if True, otherwise sort by columns
# reverse: sort in reverse if True
picture = Image.open(image_name)
if row: # convert numpy array to regular python list
pixels = np.array(picture).tolist()
else:
# if you want to sort columns instead of row, just flip the image over its diagonal
pixels = np.array(picture).transpose((1, 0, 2))
print(pixels.shape)
pixels = pixels.tolist()
new_pixels = []
for y in pixels:
# sort each row (or column)
index_start = index_of_first(y, 0, start)
if index_start < 0:
index_start = 0
index_stop = index_of_first(y, index_start + 1, stop)
if index_stop < 0:
index_stop = len(y)
segment_to_sort = y[index_start:index_stop]
segment_to_sort.sort(key=mode, reverse=reverse)
new_pixels.append(y[:index_start] + segment_to_sort + y[index_stop:])
new_pixels = np.asarray(new_pixels, dtype='uint8')
if not row:
# flip back over the diagonal if sorting by columns
new_pixels = new_pixels.transpose((1, 0, 2))
# convert back to image
im = Image.fromarray(new_pixels, 'RGB')
im.show()
return im
def index_of_first(arr, index, predicate):
for i in range(index, len(arr)):
if predicate(arr[i]):
return i
return -1
def save_as(image, name='sorted.jpg'):
image.save(name)
start = lambda x: x[0] + x[1] + x[2] < 360
stop = lambda x: x[0] + x[1] + x[2] > 360
# image = pixelsort('cloud.jpg', mode['luma'], row=False, reverse=True)
image = pixelsort('paint.png', mode['lightness'], row=False, reverse=True)
# image = pixelsort('einstein.jpg', lambda, True) # 'sort image's rows by red'
# save_as(image, 'pixelsorted3.jpg')
# image = pixelsort('image.jpg', mode['red'], True)
# save_as(image, 'pixelsort.jpg') | true |
c0101a56b3b9687f2a976fddbebf7b499f462fac | Python | tbjorch/WordAnalytics | /scraper_service/service/sitemap_scraper.py | UTF-8 | 2,777 | 2.78125 | 3 | [] | no_license | # standard library
import logging
from typing import List
# 3rd party modules
import requests
from requests import Response
from bs4 import BeautifulSoup
# internal modules
from dto import AddUrlDTO, UrlDTO
from scraper_service.service import rpc
from service.error import UnwantedArticleException
def start(yearmonth: str) -> None:
try:
url_list: List[AddUrlDTO] = get_news_urls_from_sitemap(yearmonth)
counter: int = 0
existing_news: List[UrlDTO] = rpc.get_urls_by_yearmonth(yearmonth)
existing_ids: List[str] = [url.id for url in existing_news]
for url in url_list:
if url.id not in existing_ids:
rpc.post_url(url)
counter += 1
logging.info(f"Inserted {counter} URLs to database")
except Exception as e:
logging.error(f"Error when scraping sitemap {e}")
def get_news_urls_from_sitemap(yearmonth: str) -> List[AddUrlDTO]:
sitemap_url: str = \
f"https://www.aftonbladet.se/sitemaps/files/{yearmonth}-articles.xml"
sitemap_content: BeautifulSoup = _fetch_sitemap_as_soup_object(sitemap_url)
return _scrape_sitemap_soup(yearmonth, sitemap_content, list())
def _scrape_sitemap_soup(
yearmonth: str,
soup: BeautifulSoup,
value_list: List
) -> List[AddUrlDTO]:
# find all loc tags and extract the news url value into a list
for item in soup.find_all("loc"):
try:
# TODO: ändra till DTO med metod som konverterar till json.
add_url_dto = AddUrlDTO(
id=item.get_text().split("/")[
item.get_text().split("/").index("a") + 1
],
url=item.get_text(),
yearmonth=yearmonth,
undesired_url=False,
)
add_url_dto = _check_if_undesired_url(add_url_dto)
value_list.append(add_url_dto)
except UnwantedArticleException as e:
logging.warning(e)
except Exception as e:
logging.error(
f"Error {e} when scraping sitemap for url {item.get_text()}"
)
return value_list
def _check_if_undesired_url(add_url_dto: AddUrlDTO):
undesired_urls = [
"www.aftonbladet.se/autotest",
"special.aftonbladet.se",
"www.aftonbladet.se/nyheter/trafik",
"www.aftonbladet.se/sportbladet"
]
for string in undesired_urls:
if string in add_url_dto.url:
add_url_dto.undesired_url = True
return add_url_dto
def _fetch_sitemap_as_soup_object(url: str) -> BeautifulSoup:
res: Response = requests.get(url, timeout=3)
if res.status_code == 404:
raise Exception(f"Can't find sitemap on url {url}")
return BeautifulSoup(res.content, "lxml")
| true |
2770b339ed6091faafec61d8b378e6d743c83cbd | Python | Egor-Krivov/parallel_programming | /equations/plot.py | UTF-8 | 766 | 2.671875 | 3 | [] | no_license | from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
grid = []
out = ''
while True:
try:
line = input()
if line == "":
print(input())
print(input())
break;
else:
time_line = [float(s) for s in line.split()]
grid.append(time_line)
except EOFError:
break
grid = np.array(list(reversed(grid)))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.linspace(0.0, 1.0, grid.shape[1]), np.linspace(0.0, 1.0, grid.shape[0])
X, Y = np.meshgrid(X, Y)
Z = grid
ax.axis()
ax.plot_wireframe(X, Y, Z, rstride=grid.shape[0] // 20 + 1, cstride=grid.shape[1] // 20 + 1)
ax.set_xlabel('T')
ax.set_ylabel('X')
plt.show() | true |
0c9fad2a5332a274a6581b0756d27c5e319eb174 | Python | michelle294/python--lists | /LIST.PY | UTF-8 | 833 | 3.953125 | 4 | [] | no_license | Games = [ "Running", "Football", "Volleyball", "javelling", "wrestling"]
print (Games [-5])
print (Games [-3])
print(Games)
#loop through the list
for Games in Games:
print(Games)\
#check if item exists
if "Football" in Games:
print("Football is there")
#methods
print(len(Games))
#Add an element to the Games list:
Games=['Running', 'Football','Volleyball','javelling','wrestling']
Games.append("jumping")
#insert the value "jumping" as the second element of the Games list:
Games=["Running","Football","Volleyball","javelling","wrestling"]
Games.insert (1, "jumping")
#Remove the second element of the Games list:
Games=['Running', 'Football','Volleyball','javelling','wrestling']
Games.pop(1)
#Reverse the order of the Games lists:
Games=["Running","Football","Volleyball","javelling","wrestling"]
Games.reverse()
print(Games) | true |
77e8618546470c72ec10b5d27532dfe0d785b144 | Python | klieth/advent-of-code | /2021/d7/python/main.py | UTF-8 | 899 | 3.625 | 4 | [] | no_license | import sys
def move_linear(positions):
return min(sum(map(lambda x: abs(x - position), positions)) for position in range(min(positions), max(positions) + 1))
def move_triangular(positions):
def triangular_sum(x):
return int((x * (x + 1)) / 2)
return min(sum(map(lambda x: triangular_sum(abs(x - position)), positions)) for position in range(min(positions), max(positions) + 1))
if __name__ == '__main__':
if len(sys.argv) < 2:
raise Exception("filename not speciifed, specify filename as first argument")
filename = sys.argv[1]
lines = None
with open(filename) as f:
lines = [line.strip() for line in f]
if not lines:
raise Exception("no lines found")
positions = list(map(lambda x: int(x), lines[0].split(',')))
print("part 1: " + str(move_linear(positions)))
print("part 2: " + str(move_triangular(positions)))
| true |
af6ed630ca36b16a6589d72f3c6fbf6701172437 | Python | epfl-si/amm | /src/api/apikeyhandler.py | UTF-8 | 864 | 2.6875 | 3 | [
"MIT"
] | permissive | """(c) All rights reserved. ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE, Switzerland, VPSI, 2017"""
from .apikey import APIKey
from .redis import exists, get_apikeys, save_key
class ApiKeyHandler:
@staticmethod
def validate(access, secret):
"""
Check that the APIkey is valid
"""
if access is None or secret is None:
return None
username = exists(access, secret)
if username:
return username
return None
@staticmethod
def get_keys(username):
"""
Returns the APIKeys of the given user
"""
return get_apikeys(username=username)
@staticmethod
def generate_keys(username):
"""
Generate an APIKey for the given user
"""
the_key = APIKey()
save_key(username, the_key)
return the_key
| true |
6b261a08c2ee14c6860468000d067de6ad01dac4 | Python | ErikvdT/Vasily | /oneping/cogs/ping.py | UTF-8 | 2,841 | 2.921875 | 3 | [
"MIT"
] | permissive | from json import load
import discord
from discord.ext import commands
from lib.parse import parse
async def get_member(ctx, member_role):
# convert given string to list of member objects
try:
# first assume input is a role
role = await discord.ext.commands.RoleConverter().convert(ctx, member_role)
member_lst = role.members
except:
try:
# else search for user
member = await discord.ext.commands.MemberConverter().convert(ctx, member_role)
member_lst = [member]
except:
await ctx.send(f"Could not find {member_role}")
return
return member_lst
class OnePing(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def ping(self, ctx, *args):
"""
Ping any number of roles or user.
Usage: combine roles or usernames with any operator followed by an optional message
+: add members of the two elements
-: subtract members from first element
/: get the intersection of the two elements
Example: !ping role - user message"""
with open("config.json", 'r') as config_file:
configs = load(config_file)
msg = ''
eval_lst = []
operator = False
word = False
# check syntax for alternating word and operator
# and create list of items to evaluate
# words without operator separation are interpreted as message to send
for arg in args:
if arg in {'+', '-', '/'} and not operator:
eval_lst += arg
operator = True
word = False
elif arg in {'+', '-', '/'} and operator:
ctx.send("Invalid syntax. Use !help for more info.")
return
elif word:
msg += f" {arg}"
word = True
else:
# nest member lists in evaluation list
eval_lst.append(await get_member(ctx, arg))
word = True
# get list of members to ping
member_lst = await parse(eval_lst)
tmp_role = await ctx.guild.create_role(name = configs["tmp_role"], mentionable = True, reason = f"Ping cmd from Vasily invoked by {ctx.message.author}")
msg = tmp_role.mention + msg + f"\n- from {ctx.message.author.nick}"
for member in member_lst:
await member.add_roles(tmp_role, reason = f"Ping cmd from Vasily invoked by {ctx.message.author}")
# delete invoking message
await ctx.message.delete()
await ctx.send(msg)
await tmp_role.delete(reason = f"Ping cmd from Vasily invoked by {ctx.message.author}")
return
def setup(client):
client.add_cog(OnePing(client))
| true |
468f205cde07f4f6c1b69ccb7d4484acb3dee3bf | Python | kaushikroychowdhury/Audio-Exploration | /code/model training/Traditional_ML Pipeline.py | UTF-8 | 12,129 | 2.59375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import sklearn.preprocessing as sp
from sklearn.model_selection import train_test_split, GridSearchCV, learning_curve, validation_curve
from statsmodels.api import OLS
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn import svm
from sklearn import metrics
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
desired_width=500
pd.set_option('display.width', desired_width)
np.set_printoptions(linewidth=desired_width)
pd.set_option('display.max_columns',20)
data = pd.read_csv("Audio_Features_Extraction.csv")
#InputColumns = [chroma_stft_mean,chroma_stft_var,chroma_cens_mean,chroma_cens_var,chroma_cqt_mean,chroma_cqt_var,melspectrogram_mean,melspectrogram_var,mfcc_mean,mfcc_var,rms_mean,rms_var,spec_bandwith_mean,spec_bandwith_var,spec_centroid_mean,spec_centroid_var,spec_contrast_mean,spec_contrast_var,spec_flatness_mean,spec_flatness_var,spec_rolloff_mean,spec_rolloff_var,tonnetz_mean,tonnetz_var,crossing_rate_mean,crossing_rate_var]
# Distribution of the Dataset ..
# print(data.info())
# print(data.describe(include = "all"))
data = data.drop(labels="Unnamed: 0", axis=1)
labels = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
d = dict(zip(labels,range(1,11)))
data['labels'] = data['labels'].map(d, na_action='ignore')
# print(data.describe(include = "all"))
mean_data = data.loc[:, 'chroma_stft_mean':'crossing_rate_mean':2]
mean_data["labels"] = data["labels"]
mean_data["labels"] = data["labels"].values
var_data = data.loc[:, 'chroma_stft_var':'crossing_rate_var':2]
var_data["labels"] = data["labels"]
var_data["labels"] = data["labels"].values
# let's see the distribution of Different Features according to mean and variance ..
# plt.subplots(figsize = (15,10))
# fig = sns.PairGrid(mean_data)
# fig.map_diag(sns.kdeplot)
# fig.map_offdiag(sns.kdeplot, color = 'b')
# plt.title("Different Feature Mean")
# print(plt.show())
#
# # plt.subplots(figsize = (15,10))
# fig = sns.PairGrid(var_data)
# fig.map_diag(sns.kdeplot)
# fig.map_offdiag(sns.kdeplot, color = 'b')
# plt.title("Different Feature Variance")
# print(plt.show())
# fig, ax =sns.pairplot(mean_data, hue='labels', plot_kws={'alpha':0.1})
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# print(plt.show())
# fig, ax =sns.pairplot(var_data, hue='labels')
# ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# print(plt.show())
## PDF's of Mean ..
meancol = list(mean_data)[:-1]
varcol = list(var_data.columns)[:-1]
#
# fig, axes = plt.subplots(3, 4, figsize=(24, 15))
# fig.suptitle('PDF of mean(Features)')
#
# sns.histplot(ax=axes[0, 0], x= mean_data[meancol[0]], kde = True)
# sns.histplot(ax=axes[0, 1], x= mean_data[meancol[1]], kde = True)
# sns.histplot(ax=axes[0, 2], x= mean_data[meancol[2]], kde = True)
# sns.histplot(ax=axes[0, 3], x= mean_data[meancol[3]], kde = True)
#
# sns.histplot(ax=axes[1, 0], x= mean_data[meancol[4]], kde = True)
# sns.histplot(ax=axes[1, 1], x= mean_data[meancol[5]], kde = True)
# sns.histplot(ax=axes[1, 2], x= mean_data[meancol[6]], kde = True)
# sns.histplot(ax=axes[1, 3], x= mean_data[meancol[7]], kde = True)
#
# sns.histplot(ax=axes[2, 0], x= mean_data[meancol[8]], kde = True)
# sns.histplot(ax=axes[2, 1], x= mean_data[meancol[9]], kde = True)
# sns.histplot(ax=axes[2, 2], x= mean_data[meancol[10]], kde = True)
# sns.histplot(ax=axes[2, 3], x= mean_data[meancol[11]], kde = True)
# print(plt.show())
# sns.histplot(mean_data[meancol[12]], kde = True)
# print(plt.show())
# fig, axes = plt.subplots(3, 4, figsize=(24, 15))
# fig.suptitle('PDF of Variance(Features)')
# sns.histplot(ax=axes[0, 0], x= var_data[varcol[0]], kde = True)
# sns.histplot(ax=axes[0, 1], x= var_data[varcol[1]], kde = True)
# sns.histplot(ax=axes[0, 2], x= var_data[varcol[2]], kde = True)
# sns.histplot(ax=axes[0, 3], x= var_data[varcol[3]], kde = True)
#
# sns.histplot(ax=axes[1, 0], x= var_data[varcol[4]], kde = True)
# sns.histplot(ax=axes[1, 1], x= var_data[varcol[5]], kde = True)
# sns.histplot(ax=axes[1, 2], x= var_data[varcol[6]], kde = True)
# sns.histplot(ax=axes[1, 3], x= var_data[varcol[7]], kde = True)
#
# sns.histplot(ax=axes[2, 0], x= var_data[varcol[8]], kde = True)
# sns.histplot(ax=axes[2, 1], x= var_data[varcol[9]], kde = True)
# sns.histplot(ax=axes[2, 2], x= var_data[varcol[10]], kde = True)
# sns.histplot(ax=axes[2, 3], x= var_data[varcol[11]], kde = True)
# print(plt.show())
#
# sns.histplot(var_data[varcol[12]], kde = True)
# print(plt.show())
### //////////////////////////////////////// 3D visualization ///////////////////////////////
# from mpl_toolkits.mplot3d import Axes3D
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
#
# x = data[meancol[0]]
# y = data[varcol[0]]
# z = data[meancol[1]]
#
# ax.set_xlabel(meancol[0])
# ax.set_ylabel(varcol[0])
# ax.set_zlabel(meancol[1])
#
# ax.scatter(x, y, z)
#
# print(plt.show())
### //////////////////////////////////////////////////////////////////////////////////////////
inputs = data.drop('labels', axis=1)
# scale = sp.StandardScaler()
#
# # inputs['crossing_rate_var'], inputs['spec_flatness_mean'], inputs['spec_flatness_var'] = np.log(inputs['crossing_rate_var']), np.log(inputs['spec_flatness_mean']), np.log(inputs['spec_flatness_var'])
# sns.histplot(var_data[varcol[12]], kde = True)
# print(plt.show())
# scale = sp.MinMaxScaler()
# scale_inputs = scale.fit_transform(inputs)
# print(scale_inputs)
Targets = data['labels']
# x_train, x_test, y_train, y_test = train_test_split(inputs, Targets, test_size=0.2, random_state=1, shuffle=True)
# mod = OLS(y_train, x_train )
# f = mod.fit()
# print(f.summary())
columns = ['melspectrogram_var', 'mfcc_var', 'spec_flatness_mean', 'spec_flatness_var','tonnetz_var' , 'chroma_stft_mean','spec_bandwith_var', 'spec_rolloff_mean', 'tonnetz_mean', 'crossing_rate_var', 'chroma_cqt_mean', 'chroma_stft_var']
inputs = inputs.drop(columns, axis = 1)
x_train, x_test, y_train, y_test = train_test_split(inputs, Targets, test_size=0.2, random_state=1, shuffle=True)
mod = OLS(y_train, x_train )
f = mod.fit()
print(f.summary())
print(" ")
scale = sp.StandardScaler()
scaled_inputs = scale.fit_transform(inputs)
x_train, x_test, y_train, y_test = train_test_split(scaled_inputs, Targets, test_size=0.2, random_state=1, shuffle=True)
print("FITTING & TESTING DIFFERENT CLASSIFICATION MODELS ( with scaled data )")
## Random Forest Classifier
model = RandomForestClassifier(n_estimators = 200)
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print("Random Forest : ", metrics.accuracy_score(prediction, y_test)*100)
# Decision Tree Classifier
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print("Decision Tree : ", metrics.accuracy_score(prediction, y_test)*100)
# SVM ( Support Vector Machine )
model = svm.SVC()
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print("Support Vector Machine : ", metrics.accuracy_score(prediction, y_test)*100)
# KNN (K Nearest Neighbour Classifier )
model = KNeighborsClassifier(n_neighbors=5)
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print("K Nearest Neighbours : ", metrics.accuracy_score(prediction, y_test)*100)
# Gaussian Naive Bayes Algorithm
model = GaussianNB()
model.fit(x_train, y_train)
prediction = model.predict(x_test)
print("Naive Bayes Algorithm : ", metrics.accuracy_score(prediction, y_test)*100)
## Two best models are SVM and Random Forest Classifier with 61% and 64.5% respectively
## Hyper-parameter tuning these two models ..
### Random Forest Classifier .. ( Tuning Process )
#HYPER-PARAMS for Random Forest Classifier
# Number of trees in random forest
# n_estimators = [int(x) for x in np.linspace(start=200, stop=300, num=10)]
# # Number of Features to consider at every Split
# max_features = ['auto','sqrt']
# # Maximum number of levels in tree
# max_depth = [4,8,10]
# # min number of samples required to split a node
# min_samples_split = [2,5]
# # min num of samples required ateach leaf node
# min_samples_leaf = [1,2]
# # method of selecting Samples for training each tree
# bootstrap = [True,False]
#
# ### creating Param_grid ..
# param_grid = { 'n_estimators' : n_estimators,
# 'max_features' : max_features,
# 'max_depth' : max_depth,
# 'min_samples_split' : min_samples_split,
# 'min_samples_leaf' : min_samples_leaf,
#
# 'bootstrap' : bootstrap}
# print(param_grid)
#
# rf_model = RandomForestClassifier()
# rf_grid_model = GridSearchCV(estimator=rf_model, param_grid = param_grid, cv=3, verbose=2, n_jobs = -1, return_train_score = True, scoring = 'f1_macro')
# clf = rf_grid_model.fit(x_train, y_train)
#
# test_scores = clf.cv_results_['mean_test_score']
# train_scores = clf.cv_results_['mean_train_score']
#
# plt.plot(test_scores, label='test')
# plt.plot(train_scores, label='train')
# plt.legend(loc='best')
# print(plt.show())
#
# print(rf_grid_model.best_params_)
#
# print(f'Train Accuracy : {rf_grid_model.score(x_train,y_train):.3f}')
# print(f'Test Accuracy : {rf_grid_model.score(x_test,y_test):.3f}')
### Hyper-param Tuning for SVC ...
# model = svm.SVC()
# param_grid = {'C': [0.1, 1 ,5, 10],
# 'kernel': ['rbf','poly','sigmoid','linear'],
# 'degree' : [1,2,3,]}
# SVC_grid_model = GridSearchCV(model, param_grid=param_grid, verbose=2, n_jobs = -1,cv=3, return_train_score = True, scoring = 'f1_macro')
# clf = SVC_grid_model.fit(x_train,y_train)
#
# test_scores = clf.cv_results_['mean_test_score']
# train_scores = clf.cv_results_['mean_train_score']
#
# plt.plot(test_scores, label='test')
# plt.plot(train_scores, label='train')
# plt.legend(loc='best')
# print(plt.show())
#
# print(SVC_grid_model.best_params_)
#
# print(f'Train Accuracy : {SVC_grid_model.score(x_train,y_train):.3f}')
# print(f'Test Accuracy : {SVC_grid_model.score(x_test,y_test):.3f}')
# //////////////////////////////////////////////////////////////////////////////////////////////
# train_sizes, train_scores, test_scores = learning_curve(model, x_train, y_train, verbose=2, n_jobs = -1,cv=3, shuffle=True, scoring='accuracy')
# train_scores_mean = np.mean(train_scores, axis=1)
# test_scores_mean = np.mean(test_scores, axis=1)
#
# print(train_sizes)
#
# _, ax = plt.subplots(figsize = (10,5))
# ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
# label="Training score")
# ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
# label="Cross-validation score")
# ax.legend(loc="best")
# print(plt.show())
# param_range= [0.1, 1 ,5, 10]
# train_scores, test_scores = validation_curve(model, x_train, y_train, param_name='C', param_range= param_range, verbose=2, n_jobs = -1,cv=3, scoring='accuracy')
# train_scores_mean = np.mean(train_scores, axis=1)
# test_scores_mean = np.mean(test_scores, axis=1)
#
# # Calculating mean and standard deviation of training score
# mean_train_score = np.mean(train_scores, axis=1)
# std_train_score = np.std(train_scores, axis=1)
#
# # Calculating mean and standard deviation of testing score
# mean_test_score = np.mean(test_scores, axis=1)
# std_test_score = np.std(test_scores, axis=1)
#
# # Plot mean accuracy scores for training and testing scores
# plt.plot(param_range, mean_train_score,
# label="Training Score", color='b')
# plt.plot(param_range, mean_test_score,
# label="Cross Validation Score", color='g')
#
# # Creating the plot
# plt.title("Validation Curve with SVC")
# plt.xlabel("C")
# plt.ylabel("Accuracy")
# plt.tight_layout()
# plt.legend(loc='best')
# print(plt.show())
| true |
7099c82be5fb90a2835a4f461e353a7a82263028 | Python | mkuhn/se_protein | /go_per_se.py | UTF-8 | 1,828 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python2.7
# encoding: utf-8
from __future__ import print_function
import sys
import os
import re
from collections import defaultdict
q_threshold = 0.01
def readSE():
metabolizing_proteins = set( s.split()[0] for s in open("metabolizing_proteins") )
fh_in = open("protein_se_pv.tsv")
fh_in.next()
current_se = None
best_protein = None
best_q = q_threshold
for line in fh_in:
(se, protein, p, q) = line.strip("\n").split("\t")
if se != current_se:
if best_protein:
yield current_se, best_protein, best_q
current_se = se
best_protein = None
best_q = q_threshold
q = float(q)
if q <= best_q:
for _protein in re.findall(r"ENSP\d+", protein):
if _protein not in metabolizing_proteins:
best_q = q
best_protein = protein
break
yield current_se, best_protein, best_q
def main():
go_classification = {}
for line in open("go_classification.tsv"):
(protein, go_id, go_name) = line.strip("\n").split("\t")
# by accident, sorting alphabetically is a good priority list
if protein not in go_classification or go_name < go_classification[protein][1]:
go_classification[protein] = (go_id, go_name)
go_classification["ENSP00000231509"] = ("GO:0004879", "nuclear receptor")
for se, protein, q in readSE():
go_id, go_name = "?", "?"
for _protein in re.findall(r"ENSP\d+", protein):
if _protein in go_classification:
go_id, go_name = go_classification[_protein]
break
print(se, protein, q, go_id, go_name, sep="\t")
if __name__ == '__main__':
main()
| true |
61d679fbf2767a8e62d7d3eba06fa90916bc5dd9 | Python | Aasthaengg/IBMdataset | /Python_codes/p02899/s249482952.py | UTF-8 | 174 | 3.015625 | 3 | [] | no_license | n = int(input())
a = list(map(int, input().split()))
ans_list = [None for _ in range(n)]
for i in range(0, n):
ans_list[a[i] - 1] = str(i + 1)
print(" ".join(ans_list))
| true |
35265ef28ffd3b0c9ca716a6102bbd619570e688 | Python | kwamena98/Time-Conversion | /timeconversion.py | UTF-8 | 528 | 2.6875 | 3 | [] | no_license | import math
import os
import re
import sys
def timeConversion(s):
hh=re.split('(\d+)',s)
j=hh[6]
if j=="PM" and int(hh[1])==12:
return ("{}:{}:{}".format(hh[1],hh[3],hh[5]))
elif j=="PM" and int(hh[1]) <=12:
hi=int(hh[1])+12
l=hh[3]
h=hh[5]
return ("{}:{}:{}".format(hi,l,h))
elif j=="AM" and int(hh[1]) ==12:
return("00:{}:{}".format(hh[3],hh[5]))
elif j=="AM" and int(hh[1]) <=12:
return ("{}:{}:{}".format(hh[1],hh[3],hh[5]))
| true |
95fdc4dddcaea48497392f6c59270e4e115bc337 | Python | c0ver1/Course-Design-for-Mathematical-Foundations-of-Cyberspace-Security | /CourseDesign2.py | UTF-8 | 1,524 | 3.140625 | 3 | [] | no_license | import math
from time import time
p=35291161
q=35291153
n=p*q
fn=(p-1)*(q-1)
i=1
flag=0
while(flag<=10):
i=i+1
while(not(fn%i)):
i=i+1
flag=flag+1
e=i #这里e取与fn互素的数从小到大排列的第11个
def GED(r1,r2): #广义欧几里得除法 求私钥
q=r1//r2
r=r1%r2
s1=1
t1=0
s2=0
t2=1
s=s2
t=t2
while(r):
s=s2*(-q)+s1
s1=s2
s2=s
t=t2*(-q)+t1
t1=t2
t2=t
q=r2//r
temp=r
r=r2%r
r2=temp
return s
def mrf(b,p,m): #模重复平方法
leng=0
s=[]
while p!=0:
s.append(p%2)
p=p//2
leng+=1
a=1
for i in range(leng):
a=(a*(b**s[i]))%m
b=(b**2)%m
return a
print('公钥为'+str((e,n)))
ma=67119253
print('对'+str(ma)+'进行加密')
ca=mrf(ma,e,n) #加密运算
#ca=pow(ma,e,n) #实际上使用Python内置的模幂函数pow(a,b,c)也可以快速求出a^b(mod c) 原理为快速幂算法
print('加密得:'+str(ca))
a=GED(e,fn) #得到私钥
print('私钥为'+str((a,n)))
cb=mrf(ca,a,n) #解密运算
#cb=pow(ca,a,n)
print('解密得:'+str(cb))
def decomPrime(pn): #pn为由两个素数相乘得到的数,该函数用于找出这两个素数
m=int(math.sqrt(pn))
if m%2==0:
m=m+1
while m>1:
if pn%m==0:
return m,pn//m
m=m-2
m1,m2=decomPrime(n)
e1=a%(m1-1)
e2=a%(m2-1)
b1=mrf(ca,e1,m1)
b2=mrf(ca,e2,m2)
M1=m2
M2=m1
M_1=GED(M1,m1)
M_2=GED(M2,m2)
result=(b1*M1*M_1+b2*M2*M_2)%n
print('使用中国剩余定理加速解密得:'+str(result))
| true |
5121362ed1f2339650df90ac4f1625a985a2595f | Python | 18684092/AdvProg | /ProjectEuler/General-first-80-problems/problem80.py | UTF-8 | 988 | 3.921875 | 4 | [] | no_license | ###############
# Problem 80 #
###############
"""
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
"""
import time, math
from decimal import *
getcontext().prec = 110
print("Problem 80")
start = time.time()
total = 0
for n in range(1,101):
nStr = str(Decimal(n).sqrt())[0:101]
if len(nStr) > 2:
for digit in nStr:
if digit != ".":
total += int(digit)
end = time.time()
print("Square root digital exspansion",total)
print("Time taken:", int((end - start)*100) / 100, "Seconds")
print()
| true |
12a965b5f1151ecd50a83c666092ebc3043794f5 | Python | rnzhiw/Parallel_hyperparameter_optimization_for_loan_default_prediction | /give-me-some-credit/Give me some credit/data_plot.py | UTF-8 | 675 | 2.6875 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import warnings
warnings.filterwarnings('ignore')
data_train = pd.read_csv('data/cs-training.csv')
data_test_a = pd.read_csv('data/cs-test.csv')
# 解决Seaborn中文显示问题并调整字体大小
sns.set(font='SimHei')
data_train['loanAmnt'].value_counts()
#data_train['loanAmnt'].value_counts().plot.hist()
plt.figure(figsize=(16,12))
plt.subplot(221)
sub_plot_1=sns.distplot(data_train['loanAmnt'])
sub_plot_1.set_title("训练集", fontsize=18)
plt.subplot(222)
sub_plot_2=sns.distplot(data_test_a['loanAmnt'])
sub_plot_2.set_title("测试集", fontsize=18) | true |
46cec894ed0d9610d59b501783259da6886deb36 | Python | cwood89/100DaysOfCode | /Python/CodeWars/launchCode.py | UTF-8 | 267 | 3.125 | 3 | [] | no_license | def carParking(n, available):
lot = [[0] * n] * n
available = ()
for i in range(n):
for j in range(n):
if lot[i][j] == 0:
available = (i, j)
lot[i][j] = 1
return available
carParking(5, available)
| true |
d8906955d3fca10763fb982a1b5bcfd0f985fb28 | Python | SuanFaRuoJi/coding_exercises | /leetcode/188_bset_time_to_buy_and_sell_stock_4/zkj_python.py | UTF-8 | 708 | 3.078125 | 3 | [] | no_license | class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
ans = 0
if k > len(prices) // 2:
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
ans += prices[i] - prices[i - 1]
return ans
cash = [0] * len(prices)
for i in range(1, k + 1):
if len(prices) < 2 * i:
return ans
s = cash[2 * (i - 1)] - prices[2 * (i - 1)]
for j in range(2 * (i - 1) + 1, len(prices)):
c = cash[j]
cash[j] = max(prices[j] + s, cash[j - 1])
s = max(s, c - prices[j])
ans = cash[-1]
return ans
| true |
c44f04dac62f488c4ca942aa495906a5b58c9f9d | Python | kiryong-lee/Algorithm | /programmers/12940.py | UTF-8 | 90 | 2.65625 | 3 | [] | no_license |
import math
def solution(n, m):
gcd = math.gcd(n, m)
return [gcd, n * m // gcd]
| true |
d85744cec75b643995b49bb823f8bd75ada2a3ba | Python | mynameisalantao/CNN-for-animal-image-classification | /Prob1_CNN-Train.py | UTF-8 | 14,469 | 2.984375 | 3 | [] | no_license | #------------------------------ Import Module --------------------------------#
import numpy as np
import cv2
import os
import tensorflow as tf
import math
import matplotlib.pyplot as plt
#---------------------------------- Reminded ---------------------------------#
# windows读取文件可以用\,但在字符串里面\被作为转义字符使用
# 那么python在描述路径时有两种方式:
# 'd:\\a.txt',转义的方式
# r'd:\a.txt',声明字符串不需要转义
# 推荐使用此写法“/",可以避免很多异常
#--------------------------------- Parameter ---------------------------------#
image_heigh=60 # 統一圖片高度
image_width=60 # 統一圖片寬度
data_number=1000 # 每種類動物要取多少筆data來train
data_test_number=400 # 取多少筆testing data來算正確率
race=10 # 總共分為10種動物
batch_size=50 # 多少筆data一起做訓練
layer1_node=60 # 第1層的節點數
layer2_node=60 # 第2層的節點數
layer3_node=1024 # 第3層的節點數
layer4_node=100 # 第4層的節點數
output_node=race # 輸出層的節點數(輸出)
epoch_num=12 # 執行多少次epoch
record_train_accuracy=[] # 紀錄每次epoch的訓練正確率
record_test_accuracy=[] # 紀錄每次epoch的測試正確率
record_xentropy=[] # 紀錄每次epoch的cross entropy
#---------------------------------- Function ---------------------------------#
# 讀取圖片
def read_image(path,data_number):
imgs = os.listdir(path) # 獲得該路徑下所有的檔案名稱
#total_image=np.zeros([len(imgs),image_heigh,image_width,3])
total_image=np.zeros([data_number,image_heigh,image_width,3])
# 依序將每張圖片儲存進矩陣total_image當中
#for num_image in range(0,len(imgs)):
for num_image in range(0,data_number):
filePath=path+'//'+imgs[num_image] # 圖片路徑
cv_img=cv2.imread(filePath) # 取得圖片
total_image[num_image,:,:,:] = cv2.resize(cv_img, (image_heigh, image_width), interpolation=cv2.INTER_CUBIC) # resize並且存入total_image當中
return total_image
# 產生Weight參數
def weight_generate(shape):
initialize=tf.truncated_normal(shape,stddev=1/math.sqrt(float(image_heigh*image_width)))
return tf.Variable(initialize)
# 產生Bias參數
def bias_generate(shape):
initialize=tf.truncated_normal(shape,stddev=1/math.sqrt(float(image_heigh*image_width)))
return tf.Variable(initialize)
# Convoluation
def conv(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
# Max Pooling
def max_pooling(x):
return tf.nn.max_pool(x,ksize=[1,3,3,1],strides=[1,2,2,1],padding='SAME')
def get_batch(training_data_index,get_batch_number,training_data):
# training_data_index是存資料在training_data的index和其對應的label號碼
# get_batch_number為要取第幾個batch的資料
temp_data=training_data_index[int(get_batch_number*batch_size):int((get_batch_number+1)*batch_size),0]
batch_data=training_data[temp_data.astype(int),:]
temp_label=training_data_index[int(get_batch_number*batch_size):int((get_batch_number+1)*batch_size),1]
batch_label=np.eye(race)[temp_label.astype(int),:]
return batch_data,batch_label
#-------------------------------- Input Data ---------------------------------#
# 建立training data的index
training_data_index=np.zeros([data_number*race,2]) # 第1行為data的index,第2行為其對應的label
training_data_index[:,0]=np.linspace(0,data_number*race-1,data_number*race) # data的index先按順序
# 建立testing data的index
testing_data_index=np.zeros([data_test_number*race,2]) # 第1行為data的index,第2行為其對應的label
testing_data_index[:,0]=np.linspace(0,data_test_number*race-1,data_test_number*race) # data的index先按順序
# 傳入training data
training_data=np.zeros([data_number*race,image_heigh,image_width,3]) # training data
#path=os.getcwd() # 取得目前jupyter所在的位置(路徑)
path=r'/home/alantao/deep learning/DL HW2'
training_data_path=path+'/animal-10/train' # 取得training data所在的路徑
file_name = os.listdir(training_data_path)
for file_num in range(0,race):
filePath=training_data_path+'//'+file_name[file_num] # 資料路徑
training_data[file_num*data_number:file_num*data_number+data_number,:,:,:]=read_image(filePath,data_number)
training_data_index[file_num*data_number:file_num*data_number+data_number,1]=file_num # 放入其對應的種族(0~9)
# 傳入testing data
testing_data=np.zeros([data_test_number*race,image_heigh,image_width,3]) # training data
#path=os.getcwd() # 取得目前jupyter所在的位置(路徑)
testing_data_path=path+'/animal-10/val' # 取得testing data所在的路徑
file_name = os.listdir(testing_data_path)
for file_num in range(0,race):
filePath=testing_data_path+'//'+file_name[file_num] # 資料路徑
testing_data[file_num*data_test_number:(file_num+1)*data_test_number,:,:,:]=read_image(filePath,data_test_number)
testing_data_index[file_num*data_test_number:(file_num+1)*data_test_number,1]=file_num # 放入其對應的種族(0~9)
# 修改資料型態
#training_data=training_data.reshape([-1,image_heigh*image_width,3]) # 把每個顏色的2為圖片拉長
training_data=training_data.reshape([-1,image_heigh*image_width*3]) # 把每個顏色的2為圖片(連同RGB)拉長
testing_data=testing_data.reshape([-1,image_heigh*image_width*3]) # 把每個顏色的2為圖片(連同RGB)拉長
#----------------------------------- CNN -------------------------------------#
# 建立Session
sess=tf.InteractiveSession()
# 輸入點設置 data 與 label
images_placeholder=tf.placeholder(tf.float32,shape=(None,image_heigh*image_width*3))
label_placeholder=tf.placeholder(tf.float32,shape=(None,race))
x_image=tf.reshape(images_placeholder,[-1,image_heigh,image_width,3]) # 轉回圖片的size
## 建立網路
# 第1層 Convolution
W1=weight_generate([4,4,3,layer1_node]) # convolution的patch為3*3,輸入3channal(RGB),輸出layer1_node個feature map
b1=bias_generate([layer1_node])
hidden1=conv(x_image,W1)+b1 # x_image用W1的patch做conv,接著再加上b1的偏差
hidden1=tf.nn.relu(hidden1) # 通過 ReLU激活函數
hidden1=max_pooling(hidden1) # 通過 Max pooling減少維度
# 第2層 Convolution
W2=weight_generate([4,4,layer1_node,layer2_node]) # convolution的patch為3*3,輸入layer1_node channal,輸出layer2_node個feature map
b2=bias_generate([layer2_node])
hidden2=conv(hidden1,W2)+b2 # hidden1用W2的patch做conv,接著再加上b2的偏差
hidden2=tf.nn.relu(hidden2) # 通過 ReLU激活函數
hidden2=max_pooling(hidden2) # 通過 Max pooling減少維度
# 將第2層的輸出拉平(目前有layer2_node張feature map,每張大小為(image_heigh/4)*(image_width/4))
hidden2_flat=tf.reshape(hidden2,[-1,int((image_heigh/4)*(image_width/4)*layer2_node)]) # 拉平
# 第3層 Fully connected
W3=weight_generate([int((image_heigh/4)*(image_width/4)*layer2_node),layer3_node]) # 因為經過2次Max pooling,feature map會是原圖片的1/4倍
b3=bias_generate([layer3_node])
hidden3=tf.matmul(hidden2_flat,W3)+b3 # hidden3_flat用W3矩陣相乘,接著再加上b3的偏差
hidden3=tf.nn.relu(hidden3) # 通過 ReLU激活函數
# 第4層 Fully connected
W4=weight_generate([layer3_node,layer4_node])
b4=bias_generate([layer4_node])
hidden4=tf.matmul(hidden3,W4)+b4 # hidden4用W4矩陣相乘,接著再加上b4的偏差
hidden4=tf.nn.relu(hidden4) # 通過 ReLU激活函數
# 第5層 Fully connected
W5=weight_generate([layer4_node,output_node])
b5=bias_generate([output_node])
output=tf.matmul(hidden4,W5)+b5 # hidden4用W5矩陣相乘,接著再加上b5的偏差
output=tf.nn.softmax(output) # 通過 softmax激活函數
# 評估模型
cross_entropy=-tf.reduce_sum(label_placeholder*tf.log(output))
training_method=tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # 用Adam做參數修正,學習率10^-4,最小化Loss function=Cross entropy
accuracy_judge=tf.equal(tf.argmax(output,1),tf.argmax(label_placeholder,1)) # 輸出的機率最大者是否與label標記者相等
accuracy=tf.reduce_mean(tf.cast(accuracy_judge,'float')) # 轉為float並且做平均(多筆data)
# 激活模型
Loss_record=[] # 紀錄Loss
sess.run(tf.global_variables_initializer()) # 激活所有變數
for epoch_times in range(0,epoch_num): # 要執行多次epoch
print('epoch times=',epoch_times)
for batch_times in range(0,int(data_number*race/batch_size)): # 全部的資料可以分成多少個batch
get_x,get_y=get_batch(training_data_index,batch_times,training_data) # 取得一個batch的資料(data與label)
# 做training
training_method.run(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
# 計算正確率(每個batch都會算一次training正確率)
training_accuracy=accuracy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
print('training_accuracy=',training_accuracy)
#record_train_accuracy.append(training_accuracy)
# 計算正確率(testing data的正確率)
#get_x=testing_data # 全部的testing data
#temp_label=testing_data_index[:,1]
#get_y=np.eye(race)[temp_label.astype(int),:]
#testing_accuracy=accuracy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#record_test_accuracy.append(testing_accuracy)
# 印出經過此batch後的 training accuracy 和 testing accuracy
#print('training accuracy=',training_accuracy,', testing accuracy=',testing_accuracy)
# 計算正確率(每個epoch都會算一次training正確率)
#temp_data=training_data_index[:,0] # 獲得training data的index
#get_x=training_data[temp_data.astype(int),:] # 全部的training data
#temp_label=training_data_index[:,1]
#get_y=np.eye(race)[temp_label.astype(int),:]
#training_accuracy=accuracy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#record_train_accuracy.append(training_accuracy)
# 觀察Loss
#temp_data=training_data_index[:,0] # 獲得training data的index
#get_x=training_data[temp_data.astype(int),:] # 全部的training data
#temp_label=training_data_index[:,1]
#get_y=np.eye(race)[temp_label.astype(int),:]
#Loss=cross_entropy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#Loss=Loss/data_number
#print('Loss=',Loss)
#Loss_record.append(Loss)
# 計算正確率(epoch的正確率)
#get_x=testing_data # 全部的testing data
#temp_label=testing_data_index[:,1]
#get_y=np.eye(race)[temp_label.astype(int),:]
#testing_accuracy=accuracy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#record_test_accuracy.append(testing_accuracy)
# 印出經過此batch後的 training accuracy 和 testing accuracy
#print('training accuracy=',training_accuracy,', testing accuracy=',testing_accuracy)
# 計算正確率(每個種類的正確率)
#race_accuracy=[]
#for file_num in range(0,race):
# get_x=testing_data[file_num*data_test_number:(file_num+1)*data_test_number,:] # 某種類testing data
# temp_label=testing_data_index[file_num*data_test_number:(file_num+1)*data_test_number,1]
# get_y=np.eye(race)[temp_label.astype(int),:]
# testing_accuracy=accuracy.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
# race_accuracy.append(testing_accuracy)
#print(race_accuracy)
# 每做完1次epoch就做shuffle
np.random.shuffle(training_data_index) # shuffle
# 找出辨識錯誤與正確的圖片
correct_and_error_image_index=np.zeros([race,2]) # 每個種族當中挑出分類正確與錯誤的index
for file_num in range(0,race):
print('Race=',file_num)
filePath=training_data_path+'//'+file_name[file_num]
imgs = os.listdir(filePath) # 取得該路徑下的所有圖片
# 找正確的
print('found right')
for test_num in range(0,data_test_number):
get_x=testing_data[file_num*data_test_number+test_num,:] # 取得該筆data
get_x=get_x.reshape([-1,10800])
temp_label=testing_data_index[file_num*data_test_number+test_num,1] # 其所對應的label
get_y=np.eye(race)[temp_label.astype(int),:] # label轉成one-hot vector
get_y=get_y.reshape([-1,10])
out=output.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#print(out)
find_label = np.argmax(out)
#print(find_label)
if find_label==temp_label: # 為其所對應的真實label
correct_and_error_image_index[file_num,0] # 紀錄正確
imagepath=filePath+'//'+imgs[test_num]
print('currect_image=',imagepath)
break
# 找錯誤的
print('found error')
for test_num in range(0,data_test_number):
get_x=testing_data[file_num*data_test_number+test_num,:] # 取得該筆data
get_x=get_x.reshape([-1,10800])
temp_label=testing_data_index[file_num*data_test_number+test_num,1] # 其所對應的label
get_y=np.eye(race)[temp_label.astype(int),:] # label轉成one-hot vector
get_y=get_y.reshape([-1,10])
out=output.eval(feed_dict={images_placeholder:get_x,label_placeholder:get_y})
#print(out)
find_label = np.argmax(out)
#print(find_label)
if find_label!=temp_label: # 並非其所對應的真實label
correct_and_error_image_index[file_num,1] # 紀錄錯誤
imagepath=filePath+'//'+imgs[test_num]
print('error_image=',imagepath)
print('error to',find_label)
break
# 印出結果
#plt.figure(2)
#plt.plot(record_train_accuracy)
#plt.plot(record_test_accuracy)
#plt.xlabel('Number of epoch')
#plt.ylabel('Accuracy')
#plt.show()
#plt.figure(2)
#plt.plot(Loss_record)
#plt.xlabel('Number of batch')
#plt.ylabel('Cross entropy')
#plt.show()
#cv2.imshow('123', k1)
#cv2.waitKey(0)
| true |
0a9eb9e9838ac0395d07c5d089cb40988e7d7b35 | Python | mofiebiger/DublinBus | /Analytics/get_prediction_stops_version.py | UTF-8 | 3,981 | 2.8125 | 3 | [] | no_license | import config
import json
import numpy as np
import pandas as pd
import xgboost as xgb
import requests as req
import holidays as hol
ie_holidays = hol.Ireland()
from datetime import date, time, datetime
def set_season(x):
winter = [11,12,1]
autumn = [10,9,8]
spring = [4,3,2]
if x in winter:
return 'Winter'
elif x in autumn:
return 'Autumn'
elif x in spring:
return 'Spring'
else:
return 'Summer'
def find_closest_(weather):
"""
find the weather data closest to the current time stamp
"""
Now = datetime.now()
server_time_fault = 1565867883 - 1565863997
current_timestamp = datetime.timestamp(Now) + server_time_fault
stamps = []
for t in weather:
stamps.append(t['time'])
min_val = 100000000000000000
min_idx = 0
for idx, val in enumerate(stamps):
if ((val - current_timestamp) < min_val):
min_val = val - current_timestamp
min_idx = idx
return weather[min_idx]
def prediction_route(StopA, StopB, PDate, PTime):
"""
Return an estimate of travel time, in seconds, for a given journey.
inputs:
---------------------------------------
(str) PDate: YYYY-MM-DD
(str) PTime: HH:MM
(str) StopA: Start Stop
(str) StopB: End Stop
Outputs:
---------------------------------------
(int) Travel Time: Seconds
"""
# =========================== Import Model ========================= #
model = xgb.Booster()
model.load_model(f"ModelFiles/StopModels/{StopA}_{StopB}.model")
# ====================== Dateand Time objects ====================== #
ddate = date(int(PDate[:4]), int(PDate[5:7]), int(PDate[-2:]))
dtime = time(int(PTime[:2]), int(PTime[-2:]))
# ========================== Weather Data ========================== #
Now = datetime.now()
day_diff = ddate.day - Now.day
hour_diff = dtime.hour - Now.hour
if day_diff > 2:
weather = full_weather['daily']
weather = find_closest_(weather)
else:
weather = full_weather['hourly']
weather = find_closest_(weather)
# ======================== Inputs DataFrame ======================== #
predictors = ['temperature','humidity', 'windSpeed', 'rain', 'hour', 'holiday', 'weekend',
'month','season_Winter','season_Autumn','season_Summer','season_Spring',
'icon_clear-day', 'icon_clear-night', 'icon_cloudy', 'icon_fog',
'icon_partly-cloudy-day', 'icon_partly-cloudy-night', 'icon_rain','icon_wind']
# Make dataframe of inputs.
inputs = pd.DataFrame(np.zeros(len(predictors))).T
inputs.columns = predictors
inputs.hour = dtime.hour
inputs.month= ddate.month
# ========================= Weather Columns ======================== #
inputs.temperature = weather['temperature']
inputs.humidity = weather['humidity']
inputs.windSpeed = weather['windSpeed']
# convert in inches of liquid water per hour to mm
inputs.rain = float(weather['precipIntensity'])/0.0394
# ========================= Weekday/Weekend ======================== #
if ddate.weekday() in [5,6]:
inputs.weekday=False
else:
inputs.weekday=True
# ===================== One Hot Encoded Columns ==================== #
inputs["icon_{0}".format(weather['icon'])]=1
inputs["season_{0}".format(set_season(ddate.month))]=1
# ========================= Applying Model ========================= #
inputdata = xgb.DMatrix(inputs)
estimate = model.predict(inputdata)
# ========================= Returning Data ========================= #
return int(round(estimate.tolist()[0],0)) | true |
b48f3698baddb97328233d500c1b765c44e4e3ff | Python | ZoltonZ12/home_tasks | /3_task/car.py | UTF-8 | 7,344 | 3.453125 | 3 | [] | no_license | # coding utf-8
import pickle
data = []
#f = open('bd.pickle','wb') создание базы раскоментировать в первый запуск.
#pickle.dump(data,f,2)
#f.close()
while 1:
todo = input('чего изволите ? 1-ВВЕСТИ или 2- ВЫВЕСТИ ? а возможно 3-УДАЛИТЬСЯ ?...\n')
if todo.lower() == 'ввести' or todo.lower() == '1':
data_file = open('bd.pickle','rb')
data = pickle.load(data_file)
data_file.close()
print('введите марку автомобиля и мощьность')
while 1:
mark = input('Марка : \n')
if mark.isalpha():
break
else:
print('марка должна содержать только буквеные символы')
while 1:
power = input('Мощьность :\n')
if power.isdigit():
break
else:
print('мощьность должна содержать только цыфры')
data.append((mark,power))
f = open('bd.pickle','wb')
pickle.dump(data,f,2)
f.close()
elif todo.lower() == 'вывести' or todo.lower() == '2':
data_file = open('bd.pickle','rb')
data = pickle.load(data_file)
data_file.close()
if len(data) == 0:
print('база пока ещё пуста, её надо наполнить')
continue
while 1:
sub_do= input('Хотите использовать фильтр ? 1-да , 2-нет ')
if sub_do.lower() == 'да' or sub_do.lower() == '1':
type_sort = input('выберите тип сортировки.\n 1 - сортировка по мощности \n 2 - сортировка по названию \n')
if type_sort == '1':
type__sub_sort = input('хотите найти мощьность .\n 1 - равную \n 2 - больше, чем .. \n 3 - меньше, чем .. \n 4 - больше чем Х и меньше чем У.. \n' )
if type__sub_sort == '1':
val=input('введите значение мощьности \n')
if val.isdigit():
for j in [i for i in data if i[1] == val]:
print(j)
break
else:
print('не верной указаны параметры фильтра(скорее всего не цыфры)')
continue
elif type__sub_sort == '2':
val=input('введите значение мощьности \n')
if val.isdigit():
for j in [i for i in data if i[1] > val]:
print(j)
break
else:
print('не верной указаны параметры фильтра(скорее всего не цыфры)')
continue
elif type__sub_sort == '3':
val=input('введите значение мощьности \n')
if val.isdigit():
for j in [i for i in data if i[1] < val]:
print(j)
break
else:
print('не верной указаны параметры фильтра(скорее всего не цыфры)')
continue
elif type__sub_sort == '4':
val=input('введите значение мощьности в виде Х,У \n')
if val.split(',')[0].isdigit() and val.split(',')[1].isdigit():
for j in [i for i in data if i[1] < val.split(',')[1] and i[1] > val.split(',')[0]]:
print(j)
break
else:
print('не верной указаны параметры фильтра(скорее всего не цыфры)')
continue
pass
else:
continue
elif type_sort == '2':
type__sub_sort = input('хотите найти по:\n 1 - вхождению части слова в имя модели \n 2 - точному совпадению модели \n ' )
if type__sub_sort == '1':
val=input('введите часть слова \n')
if val.isalpha():
for j in [i for i in data if val in i[0] ]:
print(j)
break
else:
print('не верной указаны параметры фильтра(скорее всего не буквы)')
continue
elif type__sub_sort == '2':
val=input('введите искомую модель \n')
if val.isalpha():
print('/////')
for j in [i for i in data if val == i[0]]:
print( len(j))
if len([i for i in data if val == i[0]])==0:# не успеваю доделать. не выходит сюда. не понятно почему. тестить дальше не могу.
print('результатов удовлетворяющих поиску - не найдено')
else:
print(j )
break
else:
print('не верной указаны параметры фильтра(скорее всего не буквы)')
continue
else:
pass
else:
print('неверно выбраны параметры фильтрации... \n')
continue
elif sub_do.lower() == 'нет' or sub_do.lower() == '2':
data.sort()
print('выводиться без фильтров')
for i in data:
print(i)
break
else:
data.sort()
print('выводиться без фильтров')
for i in data:
print(i)
break
elif todo.lower() == 'удалиться' or todo.lower() == '3':
print('no exit!!!!!')
break
else:
print('нужно набрать: ввести ,вывести или выйти \n')
| true |
8226b58c2a529c6cff042013ea2d7507430e0cb0 | Python | sanchesthiago/curso_phyton | /desafio011_tinta.py | UTF-8 | 250 | 3.53125 | 4 | [] | no_license | print('======Desafio012 - Tinta m² ======\n')
lar=int(input('Qual a largura da parede:'))
altu=int(input('Qual a altura da parede:'))
mq=lar*altu
lt=mq/2
print('Voce terá {} m² para pintar\nPrecisara de {} litros de Tinta '.format(mq,lt))
| true |
c71bbf9e90753914df1663acac9f2d8f34d86838 | Python | sviha1982/PeptideMassCalculator | /src/file_handler.py | UTF-8 | 2,222 | 2.90625 | 3 | [] | no_license | import pandas as pd
import streamlit as st
import os
from pathlib import Path
@st.cache
def load_cache_df(file_name: str):
return pd.read_csv(os.path.join("data", file_name), index_col=0)
def load_df(file_name: str):
return pd.read_csv(os.path.join("data", file_name), index_col=0)
@st.cache
def create_data(df):
mono_ms_df = df[["character", "mono"]]
avg_ms_df = df[["character", "avg"]]
return mono_ms_df, avg_ms_df
def validate_input(df, user_input: str, column: str):
try:
if len(user_input) == 0:
raise ValueError("Error: User input should not be empty.")
temp_splits = user_input.split(",")
name = temp_splits[0]
mono = temp_splits[1]
avg = temp_splits[2]
description = temp_splits[3]
mono = float(mono)
avg = float(avg)
description = str(description)
if len(name) < 2:
raise ValueError("Error: User input too short. Please enter at least 2 characters.")
if len(name) > 20:
raise ValueError("Error: User input too long. Please enter no more than 20 characters.")
duplicate_check = df[df[column] == name]
if not duplicate_check.empty:
name = duplicate_check[column][0]
mono = duplicate_check["mono"][0]
avg = duplicate_check["avg"][0]
else:
df = save_new_entry(df, column, name, mono, avg, description)
return mono, avg, df
except ValueError as ex:
raise
def save_new_entry(df, column: str, name: str, mono: float, avg: float, description: str):
if len(df.columns) == 3:
df = df.append({f"{column}": name, "mono": mono, "avg": avg}, ignore_index=True)
df.to_csv(os.path.join("data", f"{column}.csv"), index=True)
else:
df = df.append({f"{column}": name, "mono": mono, "avg": avg, "description": description}, ignore_index=True)
df.to_csv(os.path.join("data", f"{column}.csv"), index=True)
return df
def get_ms(df, user_input: str, column: str):
predefined_ms = df[df[column] == user_input].reset_index()
mono_ms = predefined_ms["mono"][0]
avg_ms = predefined_ms["avg"][0]
return mono_ms, avg_ms
| true |