seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
6440384227 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import requests
from bs4 import BeautifulSoup
word=input("enter word:")
data =requests.get("https://www.collinsdictionary.com/dictionary/english-hindi/"+word)
soup=BeautifulSoup(data.text,"html.parser")
soup.prettify()
d=soup.find('span',{'class':'quote newline'})
print(d.contents[0])
| chhn23/myprojects | one word dictionary.py | one word dictionary.py | py | 396 | python | en | code | 0 | github-code | 90 |
36775740182 | import streamlit as st
from PIL import Image
from eval import load_class_data, predict, prepare_model, preprocess_image
def main():
class_data = load_class_data("label_num_to_disease_map.json")
# model selection with string for future drop-down menu
model = prepare_model("INCEPTION")
st.title("Cassava Disease Classification")
image_file = st.file_uploader("Upload your image", type=["png", "jpg", "jpeg"])
if image_file is not None:
image = Image.open(image_file)
col1, col2, col3 = st.columns([0.2, 5, 0.2])
col2.image(image, use_column_width=True)
predicted_class = predict(preprocess_image(image), model)
st.markdown(f"**Predicted class:** {class_data[predicted_class][0]}")
if predicted_class != len(class_data) - 1:
st.markdown(
f"**[Wikipedia page]({class_data[predicted_class][1]}) about disease.**"
)
if __name__ == "__main__":
main()
| p-wojciechowski/cassava-classification | main.py | main.py | py | 970 | python | en | code | 0 | github-code | 90 |
9657056712 | import gym
from gym import spaces
import numpy as np
import math
import pprint
def normalize(board):
n = np.linspace(start=0, stop=1, num=12)
board = [[int(np.log2(j)) if j != 0 else int(j) for j in i] for i in board]
board = [[n[j] for j in i] for i in board]
return np.array(board)
class GameBoardEnv(gym.Env):
def __init__(self):
self.observation_space = spaces.Box(0, 3000, (16, 1), dtype=int)
self.action_space = spaces.Discrete(4)
def reset(self):
self.score = 0
self.reward = 0
self.ended = 0
self.won = 0
self.board = np.zeros((4, 4))
i, j = np.random.randint(0, 16, 2)
while [math.trunc(i/4), i%4] == [math.trunc(j/4), j%4]:
i, j = np.random.randint(0, 16, 2)
self.board[math.trunc(i/4), i%4] = np.random.choice([2, 4])
self.board[math.trunc(j/4), j%4] = np.random.choice([2, 4])
return normalize(self.board).flatten()
def step(self, action):
rec = self.board
if action == 0:
self.board = np.flip(self.move(np.flip(self.board, axis=1)), axis=1)
elif action == 1:
self.board = self.move(self.board)
elif action == 2:
self.board = np.transpose(self.move(np.transpose(self.board)))
elif action == 3:
self.board = np.transpose(np.flip(self.move(np.flip(np.transpose(self.board), axis=1)), axis=1))
if np.all(self.board != 0) or (rec == self.board).all():
pass
else:
i = np.random.randint(0, 16)
while self.board[math.trunc(i/4)][i%4] != 0:
i = np.random.randint(0, 16)
self.board[math.trunc(i/4)][i%4] = 2
board1 = np.flip(self.move(np.flip(self.board, axis=1), True), axis=1)
board2 = self.move(self.board, True)
board3 = np.transpose(self.move(np.transpose(self.board), True))
board4 = np.transpose(np.flip(self.move(np.flip(np.transpose(self.board), axis=1), True), axis=1))
if np.any(self.board >= 2048):
self.won = 1
self.ended = 1
elif (board1 == board2).all() and (board1 == board3).all() and (board1 == board4).all() and (board2 == board3).all() and (board3 == board4).all():
self.ended = 1
return normalize(self.board).flatten(), self.reward, self.ended, {'score': self.score, 'won': self.won}
def move(self, b, test = False):
result = []
if not test:
self.reward = 0
for i in b:
vector = i.tolist()
for j in range(1, len(vector)):
if j == 0:
continue
else:
k = j-1
while k > 0 and vector[k] == 0:
k -= 1
if vector[k] == 0:
vector[k] = vector[j]
vector[j] = 0
elif vector[k] == vector[j]:
vector[k] *= 2
if not test:
self.score += vector[k]
self.reward += vector[k]
vector[k] = str(vector[k])
vector[j] = 0
else:
if k+1 == j:
continue
else:
vector[k+1] = vector[j]
vector[j] = 0
result.append([int(float(x)) for x in vector])
result = np.array(result)
return result
def render(self):
pprint.pprint(self.board)
if __name__ == "__main__":
game = GameBoardEnv()
game.reset()
game.render()
while not game.ended:
print(game.step(int(input())))
print()
| dgg1dbg/g-2048 | g_2048/game_board.py | game_board.py | py | 3,830 | python | en | code | 1 | github-code | 90 |
15594360143 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 20:23:53 2018
@author: KushDani
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataFrame = pd.read_csv('/Users/KushDani/Downloads/data.csv')
B,M = dataFrame.diagnosis.value_counts()
#print(dataFrame.head())
plt.bar(['Benign','Malignant'],[B,M], color=['green', 'red'], align='center')
plt.xlabel('Diagnosis')
plt.ylabel('Count')
plt.show()
#print(dataFrame.shape)
data = dataFrame.values #format DataFrame into numpy array
#print(data[0])
#print(len(data))
np.random.shuffle(data) #shuffle data
#print(data[0])
#split data into training and testing sets
trainingSet, testSet = data[:469,:], data[469:,:]
#extract corresponding labels into their own lists
trainingLabels, testLabels = trainingSet[:,1], testSet[:,1]
#modify data to for training
trainingLabels[trainingLabels == 'M'] = 1
trainingLabels[trainingLabels == 'B'] = 0
trainingLabels = trainingLabels.astype(np.float)
trainingSet[:,1] = np.nan #take diagnosis out of trainingSet
#to let model rely on labels and other relevant data
trainingSet = trainingSet.astype(np.float)
print(type(trainingSet))
print(trainingLabels[0])
#converts String to float for model fitting purposes
"""for i in range(0,len(trainingLabels) - 1):
if(trainingLabels[i] == 'M'):
trainingLabels[i] = 1
else:
trainingLabels[i] = 0"""
#modify test labels
"""for i in range(0,len(trainingSet) - 1):
if(trainingSet[i,1] == 'M'):
trainingSet[i,1] = 1
else:
trainingSet[i,1] = 0"""
print(type(trainingLabels[0]))
model = keras.Sequential()
#input layer
model.add(keras.layers.Dense(16, input_shape=(33,),
kernel_initializer= 'normal',
activation=tf.nn.leaky_relu))
#first hidden layer
model.add(keras.layers.Dropout(0.1))
#could hypothetically use regular relu activation
model.add(keras.layers.Dense(16, input_shape=(16,),
kernel_initializer= 'normal',
activation=tf.nn.leaky_relu))
model.add(keras.layers.Dropout(0.1))
#output layer
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.compile(optimizer=tf.train.AdamOptimizer(0.0009),
loss = 'binary_crossentropy',
metrics=['accuracy'])
model.fit(trainingSet, trainingLabels, epochs=20, verbose=1, validation_split=0.2,
callbacks=[keras.callbacks.EarlyStopping(monitor='acc',
patience=5)])
#model.evaluate(testSet, testLabels, verbose=0)
#^^^may need to work with larger batch size when fitting model next time
#CURRENT BEST VALIDATION ACCURACY IS 70%
| kdani7777/BreastCancerSmartDiagnosis | diagnosis.py | diagnosis.py | py | 2,797 | python | en | code | 0 | github-code | 90 |
27914046282 | __author__='yuan'
from collections import namedtuple
User=namedtuple('User',['name','age','height','edu'])
# user=User('Tom',28,175)
user_tuple=('Tom',28,175)
user_list=['Tom',28,175]
user_dict={
'name':'Jack',
'age':19,
'height':175,
'edu':'master',
}
user=User(*user_tuple,edu='master')
# print(user)
# name,age,*other=user
# print(name,age,other)
user_info_dict=user._asdict()
user=User._make(user_dict)
print(user_info_dict)
# 获得user_dict的key值
print(user.name,user.age,user.height)
| ningmuning/python | PythonDemo/collection/demo1.py | demo1.py | py | 512 | python | en | code | 0 | github-code | 90 |
29294791154 | #
# Practical Test 4
#
# testAccounts.py - program to test functions of accounts.py
#
# Student Name :
# Student Number :
# Date/prac time :
#
from accounts import BankAccount
def balances():
print('\n#### Balances of All Accounts####\n')
total = 0
for i in range(len(my_accounts)):
print("Name: ", my_accounts[i].name, "\tNumber: ", my_accounts[i].num, \
"\tBalance: ", my_accounts[i].bal)
total = total + my_accounts[i].bal
print("\t\t\t\t\tTotal: ", total)
print('\n#### Bank Accounts ####\n')
my_accounts = []
# add code for tasks here
balances() | vlanducci/FOP | Random/testAccounts.py | testAccounts.py | py | 609 | python | en | code | 1 | github-code | 90 |
6450068381 | # -*- coding: utf-8 -*-
import sys
def check_printlog(parser, logitdefault, debug):
if parser.has_option('general', 'log_activities'):
logit = parser.get('general', 'log_activities').lower()
if logit == 'yes':
logit = True
if debug:
print >> sys.stderr, ("[debug] experms will print a log")
elif logit == 'no':
logit = False
if debug:
print >> sys.stderr, ("[debug] experms won't print a log")
elif logit == '':
logit = logitdefault
if debug:
print >> sys.stderr, ("[debug] 'log_activities' defaults to "
"%s" % logitdefault)
else:
print >> sys.stderr, ("Error: 'log_activities' "
"must be either 'yes' or 'no'")
logit = None
else:
logit = logitdefault
if debug:
print >> sys.stderr, ("[debug] experms won't print a log")
return logit
| open-dynaMIX/experms | src/experms/configfile/check_printlog.py | check_printlog.py | py | 1,034 | python | en | code | 2 | github-code | 90 |
35003963617 | from unittest.util import sorted_list_difference
precios = []
for i in range(2):
precios.append(int(input("Introduce un nuevo precio: ")))
print("Los precios son ingresados; ")
print(precios)
preciomax = max(precios)
print(preciomax)
| spmiranda3/ciclos | ejercicio4.py | ejercicio4.py | py | 250 | python | es | code | 0 | github-code | 90 |
18372925789 | from collections import defaultdict as dd
n = int(input())
A = list(map(int, input().split()))
odd_ac = dd(int)
even_ac = dd(int)
for i, a in enumerate(A):
if i % 2 == 0:
even_ac[i] = even_ac[i-2] + a
else:
odd_ac[i] = odd_ac[i-2] + a
#print(odd_ac)
#print(even_ac)
ans = []
for dam in range(n):
is_odd = dam % 2
total = 0
if is_odd:
total += odd_ac[n-2] - odd_ac[dam-2]
total -= even_ac[n-1] - even_ac[dam-1]
total += even_ac[dam-1]
total -= odd_ac[dam-2]
else:
total += even_ac[n-1] - even_ac[dam-2]
total -= odd_ac[n-2] - odd_ac[dam-1]
total -= even_ac[dam-2]
total += odd_ac[dam-1]
ans.append(total)
print(*ans, sep=" ") | Aasthaengg/IBMdataset | Python_codes/p02984/s518011461.py | s518011461.py | py | 736 | python | en | code | 0 | github-code | 90 |
24221400046 | fname = input("enter file name you want to read: ")
path = "/Users/bigdaddy/Desktop/Python_Data_Science/CourseEra/PythonForEverybody/Course2PythonDataStructure/"
print('file path is : ',path)
try:
filecontent = open(path + fname)
except:
con = input("wrong file name entered: if you want to continue press Y or N : ")
print(con)
if con.upper() == "Y":
fname = input("enter file name you want to read: ")
else:
quit()
for line in filecontent:
line = line.rstrip()
print(line.upper())
| akkiankit/Practice_DataScience | CourseEra/PythonForEverybody/Course2PythonDataStructure/FileHandling_1.py | FileHandling_1.py | py | 533 | python | en | code | 0 | github-code | 90 |
70458436778 | from collections import defaultdict
from copy import deepcopy
def def_list():
return []
class Elf:
id: int
choices: list[str] = ["N", "S", "W", "E"]
def cycle_decision(self):
tmp = self.choices[0]
self.choices = self.choices[1:]
self.choices.append(tmp)
def get_lines(filename: str):
lines = []
f = open(filename, "r")
for line in f:
lines.append(line)
f.close()
return lines
# end_get_lines
def elf_decision(grid, x, y) -> str:
if grid[y][x+1] == '.' and grid[y][x-1] == '.' and grid[y+1][x] == '.' and grid[y-1][x] == '.'\
and grid[y-1][x+1] == '.' and grid[y-1][x-1] == '.' and grid[y+1][x+1] == '.' and grid[y+1][x-1] == '.':
return ""
for d in grid[y][x].choices:
if d == "N" and grid[y-1][x] == '.' and grid[y-1][x-1] == '.' and grid[y-1][x+1] == '.':
return "N"
if d == "S" and grid[y+1][x] == '.' and grid[y+1][x-1] == '.' and grid[y+1][x+1] == '.':
return "S"
if d == "W" and grid[y][x-1] == '.' and grid[y-1][x-1] == '.' and grid[y+1][x-1] == '.':
return "W"
if d == "E" and grid[y][x+1] == '.' and grid[y-1][x+1] == '.' and grid[y+1][x+1] == '.':
return "E"
return ""
def simulate_elves(lines: list[str]) -> tuple[int, int]:
grid = []
grid_10 = []
loop_count = 0
# insert padding for grid
lines.insert(0, "." * len(lines[1].strip()))
lines.append("." * len(lines[1].strip()))
for line in lines:
line = f".{line.strip()}."
grid.append([c if c == '.' else Elf() for c in f"{line.strip()}"])
while True:
loop_count += 1
# key = coord
# val = list of initial elf positions
proposals = defaultdict(def_list)
# get all elves decisions, 1st half of round
for y in range(len(grid)):
for x in range(len(grid[y])):
if isinstance(grid[y][x], Elf):
decision = elf_decision(grid, x, y)
grid[y][x].cycle_decision()
if decision == "N":
proposals[(x, y-1)].append((x, y))
elif decision == "S":
proposals[(x, y+1)].append((x, y))
elif decision == "W":
proposals[(x-1, y)].append((x, y))
elif decision == "E":
proposals[(x+1, y)].append((x, y))
# elves have stopped moving
if len(proposals) == 0:
break
# act on all elves decisions, 2nd half of round
for new_coord in proposals:
if len(proposals[new_coord]) == 1:
o_x, o_y = proposals[new_coord][0]
n_x, n_y = new_coord
grid[n_y][n_x] = grid[o_y][o_x]
grid[o_y][o_x] = '.'
# pad out grid
if any(isinstance(c, Elf) for c in grid[0]):
grid.insert(0, ['.' for _ in range(len(grid[0]))])
if any(isinstance(c, Elf) for c in grid[-1]):
grid.append(['.' for _ in range(len(grid[0]))])
if any(isinstance(grid[y][0], Elf) for y in range(len(grid))):
for y in range(len(grid)):
grid[y].insert(0, '.')
if any(isinstance(grid[y][-1], Elf) for y in range(len(grid))):
for y in range(len(grid)):
grid[y].append('.')
if loop_count == 10:
grid_10 = deepcopy(grid)
x0, x1, y0, y1 = len(grid_10[0]), 0, len(grid_10), 0
elf_count = 0
# gets minimal size of the grid that will contain the elves
for y in range(len(grid_10)):
for x in range(len(grid_10[y])):
if isinstance(grid_10[y][x], Elf):
if x < x0:
x0 = x
if x > x1:
x1 = x
if y < y0:
y0 = y
if y > y1:
y1 = y
elf_count += 1
board_size = (x1-x0+1) * (y1-y0+1)
ground_tiles = board_size - elf_count
return (ground_tiles, loop_count)
# end_get_score
if __name__ == "__main__":
lines = get_lines("input.txt")
tiles, loop_count = simulate_elves(lines)
print(f"Empty ground tiles: {tiles}")
print(f"Elves stop moving at loop: {loop_count}")
| Benjababe/Advent-of-Code | 2022/Day 23/d23.py | d23.py | py | 4,354 | python | en | code | 0 | github-code | 90 |
37442069481 | import torch.utils.data as data
import torchvision.transforms as tfs
from torchvision.transforms import functional as FF
import os,sys
from tqdm import tqdm
sys.path.append('.')
sys.path.append('..')
import numpy as np
import torch
import random , glob
from PIL import Image
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
from torchvision.utils import make_grid
from metrics import *
from option import opt,cwd
from tools import pad_pil
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def tensorShow(tensors,titles=None):
'''
t:BCWH
'''
fig=plt.figure(figsize=(20,20))
for tensor,tit,i in zip(tensors,titles,range(len(tensors))):
img = make_grid(tensor)
npimg = img.numpy()
ax = fig.add_subplot(211+i)
ax.imshow(np.transpose(npimg, (1, 2, 0)))
ax.set_title(tit)
plt.show()
class EHDataset(data.Dataset):
def __init__(self,path,mode,format):
super(EHDataset,self).__init__()
self.mode=mode
ins=glob.glob(os.path.join(path,mode,'low','*.'+format))
self.lows=[]
self.highs=[]
for im in tqdm(ins):
low=Image.open(im);self.lows.append(low)
high=Image.open(im.replace('low','high'));self.highs.append(high)
def __getitem__(self, index):
low=self.lows[index]
high=self.highs[index]
if self.mode=='train':
i,j,h,w=tfs.RandomCrop.get_params(low,output_size=(opt.crop_size,opt.crop_size))
low=FF.crop(low,i,j,h,w)
high=FF.crop(high,i,j,h,w)
if self.mode!='train':#must can be divisible by opt.divisor
low=pad_pil(low,opt.divisor)
high=pad_pil(high,opt.divisor)
low,high=self.augData(low.convert('RGB'),high.convert('RGB'))
return low,high
def augData(self,data,target):
if self.mode=='train':
rand_hor=random.randint(0,1)
rand_ver=random.randint(0,1)
rand_rot=random.randint(0,3)
data=tfs.RandomHorizontalFlip(rand_hor)(data)
target=tfs.RandomHorizontalFlip(rand_hor)(target)
data=tfs.RandomVerticalFlip(rand_ver)(data)
target=tfs.RandomVerticalFlip(rand_ver)(target)
if rand_rot:
data=FF.rotate(data,90*rand_rot)
target=FF.rotate(target,90*rand_rot)
data=tfs.ToTensor()(data)
target=tfs.ToTensor()(target)
data=tfs.Normalize(mean=[0.0629,0.0606,0.0558],std=[0.0430,0.0412,0.0425])(data)
return data ,target
def __len__(self):
return len(self.lows)
class AttentionGuidedDataset(data.Dataset):#dir:dataset/test/(enhance|dark|lowlight)/*.png
def __init__(self,path,mode,subset,format):#subset:dark/lowlight
super(AttentionGuidedDataset,self).__init__()
self.mode=mode
ins=glob.glob(os.path.join(path,mode,subset,'*.'+format))
self.lows=[]
self.highs=[]
for im in tqdm(ins):
self.lows.append(im)
self.highs.append(im.replace(subset,'enhance'))
# low=Image.open(im);self.lows.append(low)
# high=Image.open(im.replace(subset,'enhance'));self.highs.append(high)
def __getitem__(self, index):
low=Image.open(self.lows[index])
high=Image.open(self.highs[index])
minWid=min(low.size)
if self.mode=='train':
if opt.crop_size>minWid:
crop_size=minWid-minWid%opt.divisor
i,j,h,w=tfs.RandomCrop.get_params(low,output_size=(crop_size,crop_size))#不够crop的话,就用稍小的size来crop
low=FF.crop(low,i,j,h,w);low=low.resize((opt.crop_size,opt.crop_size),Image.BILINEAR)
high=FF.crop(high,i,j,h,w);high=high.resize((opt.crop_size,opt.crop_size),Image.BILINEAR)
else :
i,j,h,w=tfs.RandomCrop.get_params(low,output_size=(opt.crop_size,opt.crop_size))
low=FF.crop(low,i,j,h,w)
high=FF.crop(high,i,j,h,w)
if self.mode!='train':#must can be divisible by opt.divisor
low=pad_pil(low,opt.divisor)
high=pad_pil(high,opt.divisor)
low,high=self.augData(low.convert('RGB'),high.convert('RGB'))
return low,high
def augData(self,data,target):
if self.mode=='train':
rand_hor=random.randint(0,1)
rand_ver=random.randint(0,1)
rand_rot=random.randint(0,3)
data=tfs.RandomHorizontalFlip(rand_hor)(data)
target=tfs.RandomHorizontalFlip(rand_hor)(target)
data=tfs.RandomVerticalFlip(rand_ver)(data)
target=tfs.RandomVerticalFlip(rand_ver)(target)
if rand_rot:
data=FF.rotate(data,90*rand_rot)
target=FF.rotate(target,90*rand_rot)
data=tfs.ToTensor()(data)
target=tfs.ToTensor()(target)
data=tfs.Normalize(mean=[0.0629,0.0606,0.0558],std=[0.0430,0.0412,0.0425])(data)
return data ,target
def __len__(self):
return len(self.lows)
def get_train_loader(trainset=opt.trainset):
path=os.path.join(opt.data,trainset)
print(path)
if trainset=='LOL':
loader=DataLoader(EHDataset(path,'train','png'),batch_size=opt.bs,shuffle=True)
if trainset=='AttentionGuided':
loader=DataLoader(AttentionGuidedDataset(path,'train',opt.subset,'png'),batch_size=opt.bs,shuffle=True)
return loader
def get_eval_loader(trainset=opt.trainset):
path=os.path.join(opt.data,trainset)
if trainset=='LOL':
loader=DataLoader(EHDataset(path,'eval','png'),batch_size=1,shuffle=False)
if trainset=='AttentionGuided':
loader=DataLoader(AttentionGuidedDataset(path,'test',opt.subset,'png'),batch_size=1,shuffle=False)
return loader
def get_eval_train_loader(trainset=opt.trainset):#查看是否overfit,和eval数据集一样有15张,从train集合的子集
path=os.path.join(opt.data,trainset)
if trainset=='LOL':
loader=DataLoader(EHDataset(path,'eval_train','png'),batch_size=1,shuffle=False)
if trainset=='AttentionGuided':
loader=DataLoader(AttentionGuidedDataset(path,'eval_train',opt.subset,'png'),batch_size=1,shuffle=False)
return loader
if __name__ == "__main__":
#python data_utils.py --trainset=AttentionGuided --subset=dark lowlight
from tools import get_illumination
t_loader=get_train_loader()
# t_loader=get_eval_loader()
# t_loader=get_eval_train_loader()
for _,(input,gt) in enumerate(t_loader):
# ssim1=ssim(input,gt)
i1=get_illumination(input)
i=get_illumination(gt)
tensorShow([input,gt],[f'{i1}',f'{i}'])
# path='/Users/wangzhilin/Downloads/data/LightEnchancement/LOL'
# da=EHDataset(path,'eval','png')
pass
| zhilin007/LightEnhancement | net/data_utils.py | data_utils.py | py | 7,047 | python | en | code | 0 | github-code | 90 |
40984449624 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import messages
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views.generic import TemplateView
from django_celery_beat.models import PeriodicTask, CrontabSchedule
import json
from .forms import *
from .tasks import *
from .models import *
class Sendman(TemplateView):
template_name = "sendman/sender.html"
def dispatch(self, request, *args, **kwargs):
templates = Template.objects.all()
recipients = SubscriberList.objects.all()
intervals = CrontabSchedule.objects.all()
schedules = []
for i in intervals:
schedules.append((i.id, i))
context = {
"recipients": recipients,
"templates": templates,
"schedules": schedules
}
if request.method == 'POST':
tmpl = request.POST.get('template', None)
rcpt_list = request.POST.get('recipients', None)
repeat = request.POST.get('repeat', None)
schedule = request.POST.get('schedules', None)
template = Template.objects.get(name=tmpl)
recipients = SubscriberList.objects.get(name=rcpt_list)
if repeat:
if not schedule:
minute = "*" if not request.POST.get('minute') else request.POST.get('minute')
hour = "*" if not request.POST.get('hour') else request.POST.get('hour')
day_of_month = "*" if not request.POST.get('day_of_month') else request.POST.get('day_of_month')
month = "*" if not request.POST.get('month') else request.POST.get('month')
day_of_week = "*" if not request.POST.get('day_of_week') else request.POST.get('day_of_week')
schedule, created = CrontabSchedule.objects.get_or_create(
minute=minute,
hour=hour,
day_of_month=day_of_month,
month_of_year=month,
day_of_week=day_of_week,
)
else:
id = int(schedule.split('(')[1].split(",")[0])
schedule = CrontabSchedule.objects.get(id=id)
task = PeriodicTask.objects.create(
crontab=schedule,
name='crontab:{}'.format(schedule),
task='send_mail_task',
args=json.dumps([template.id, recipients.id])
)
else:
send_mail_task.delay(template.id, recipients.id)
SendHistory.objects.create(template=template, rcpt_list=recipients)
context['message'] = "Рассылка успешно выполнена!"
return render(request, self.template_name, context)
class ShowTemplates(TemplateView):
template_name = "sendman/templates.html"
def dispatch(self, request, *args, **kwargs):
templates = Template.objects.all()
context = {
"templates": templates,
}
return render(request, self.template_name, context)
class NewTemplate(TemplateView):
template_name = "sendman/new_template.html"
def dispatch(self, request, *args, **kwargs):
if request.method == 'POST':
form = AddTemplateForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, "Макет добавлен!")
return redirect(reverse("send_email"))
else:
form = AddTemplateForm()
context = {
"form": form
}
return render(request, self.template_name, context)
def deleteTemplate(request, pk):
template_name = "sendman/templates.html"
template = Template.objects.get(pk=pk)
template.delete()
templates = Template.objects.all()
context = {
"templates": templates,
}
return render(request, template_name, context)
class ShowHistory(TemplateView):
template_name = "sendman/history.html"
def dispatch(self, request, *args, **kwargs):
history = SendHistory.objects.all()
context = {
"history": history
}
return render(request, self.template_name, context)
class ShowLists(TemplateView):
template_name = "sendman/recipients.html"
def dispatch(self, request, *args, **kwargs):
recipients = SubscriberList.objects.all()
context = {
"recipients": recipients,
}
return render(request, self.template_name, context)
def showList(request, pk):
template_name = "sendman/list.html"
list = get_object_or_404(SubscriberList, pk=pk)
if request.method == 'POST':
form = AddSubscriberListForm(request.POST, instance=list)
list_name = request.POST.get('name', None)
subscribers = request.POST.get('subscribers', None)
number = 0
if form.is_valid():
form.save()
list = SubscriberList.objects.get(name=list_name)[0]
subscriber_info = subscribers.split(';')
for info in subscriber_info:
name = info.split(' ')[-3]
surname = info.split(' ')[-2]
email = info.split(' ')[-1]
number = number + 1
new = Subscriber.objects.get_or_create(name=name, surname=surname, email=email)[0]
new.list.add(list.pk)
list.number = 0 if not number else number
messages.success(request, "Список изменен!")
return redirect(reverse("recipients"))
else:
form = AddSubscriberListForm(instance=list)
context = {
"form": form
}
return render(request, template_name, context)
def newList(request):
template_name = "sendman/list.html"
newlist = True
if request.method == 'POST':
form = AddSubscriberListForm(request.POST)
list_name = request.POST.get('name', None)
subscribers = request.POST.get('subscribers', None)
number = 0
if form.is_valid():
form.save()
list = SubscriberList.objects.get_or_create(name=list_name)[0]
subscriber_info = subscribers.split(';')
if subscriber_info:
for info in subscriber_info:
name = info.split(' ')[-3]
surname = info.split(' ')[-2]
email = info.split(' ')[-1]
number = number + 1
new = Subscriber.objects.get_or_create(name=name, surname=surname, email=email)[0]
new.list.add(list.pk)
list.number = 0 if not number else number
messages.success(request, "Список создан!")
return redirect(reverse("recipients"))
else:
form = AddSubscriberListForm()
context = {
"form": form,
"newlist": newlist
}
return render(request, template_name, context)
def deleteList(request, pk):
template_name = "sendman/recipients.html"
list = SubscriberList.objects.get(pk=pk)
list.delete()
recipients = SubscriberList.objects.all()
context = {
"recipients": recipients,
}
return render(request, template_name, context)
| schMok0uTr0nie/sendmail | sendman/views.py | views.py | py | 7,460 | python | en | code | 0 | github-code | 90 |
69936181738 | import requests
from datetime import datetime, timedelta
import json
import csv
import os
def obtener_temperatura_pronostico(api_key):
# Función para obtener el pronóstico de temperatura
latitud = 40.0271087
longitud = -3.9115161
url_pronostico = f'https://api.openweathermap.org/data/2.5/onecall?lat={latitud}&lon={longitud}&exclude=current,minutely,daily&appid={api_key}&units=metric'
try:
# Realizar una solicitud a la API de OpenWeather para obtener el pronóstico
response_pronostico = requests.get(url_pronostico)
response_pronostico.raise_for_status() # Comprobar si la solicitud fue exitosa (sin errores HTTP)
# Convertir los datos de respuesta a formato JSON
datos_pronostico = response_pronostico.json()
pronostico = {}
# Obtener el pronóstico desde las 00:00 hasta las 23:00 horas del mismo día
base_time = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) # Obtener la hora actual y ponerla a las 00:00:00
for hora in range(24): # solo crea un range de 24, como un for de 24 iteraciones
print(hora)
fecha_hora = base_time + timedelta(hours=hora) # Generar la fecha e incrementar las horas para cada pronóstico horario
print(fecha_hora)
temperatura = datos_pronostico['hourly'][hora]['temp'] # Obtener la temperatura del pronóstico horario
print(temperatura)
pronostico[fecha_hora] = temperatura # Guardar la temperatura en el diccionario de pronóstico
return pronostico
except requests.exceptions.RequestException as e:
print(f'Error al obtener el pronóstico: {e}')
return {}
# Clave de API de OpenWeatherMap
api_key = 'APIKEY HERE' #request a api key for your project
# Generar el pronóstico de temperatura
forecast = obtener_temperatura_pronostico(api_key)
# Mostrar el pronóstico para cada hora desde las 00:00:00 hasta las 23:00:00
for hora, temperatura in forecast.items():
print(f"{hora.strftime('%H:%M:%S')}: {temperatura}°C")
#===============================
# Obtener la fecha actual para formar el nombre de los archivos
fecha_actual = datetime.now().strftime('%Y-%m-%d')
# Crear la carpeta específica en Ubuntu (si no existe)
carpeta_destino = f'/home/alfonso/data/{fecha_actual}/' # Reemplaza con la ruta real de la carpeta
import os
os.makedirs(carpeta_destino, exist_ok=True)
# Convertir las claves del diccionario de datetime a cadenas
forecast_str_keys = {hora.strftime('%Y-%m-%d %H:%M:%S'): temperatura for hora, temperatura in forecast.items()}
# Guardar el pronóstico en formato JSON
json_file = os.path.join(carpeta_destino, f'pronostico_temperatura_{fecha_actual}.json')
with open(json_file, 'w') as f:
json.dump(forecast_str_keys, f, indent=4)
# Guardar el pronóstico en formato CSV
csv_file = os.path.join(carpeta_destino, f'pronostico_temperatura_{fecha_actual}.csv')
with open(csv_file, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Fecha y Hora', 'Temperatura (°C)'])
for hora, temperatura in forecast.items():
writer.writerow([hora.strftime('%Y-%m-%d %H:%M:%S'), temperatura])
# Mostrar el pronóstico para cada hora desde las 00:00:00 hasta las 23:00:00
for hora, temperatura in forecast.items():
print(f"{hora.strftime('%H:%M:%S')}: {temperatura}°C")
| rrpp/get_daily_power_prices_by_hour | forecast_s_1.3.py | forecast_s_1.3.py | py | 3,470 | python | es | code | 0 | github-code | 90 |
19444554455 | from django.shortcuts import render,HttpResponse
from all_models.models import *
from apps.common.func.WebFunc import *
import openpyxl,xlrd,json,platform
from django.http import StreamingHttpResponse
from urllib import parse
from apps.ui_task.services.PageObjectService import PageObjectService
from apps.version_manage.services.common_service import VersionService
from apps.common.config import commonWebConfig
logger = logging.getLogger("django")
def uiAddSimpleTaskPage(request):
context = {}
text = {}
text["pageTitle"] = "UI测试任务"
text["subPageTitle"] = "用例文件查看"
context["text"] = text
context["option"] = "add"
context["uiAddSimpleTaskPage"] = "current-page"
context["businessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
return render(request,"ui_test/ui_task/ui_simple_task.html",context)
def saveSimpleTask(request):
#<QueryDict: {'sheetNameList': ['["OnlyWebCase"]'], 'fileName': ['CaseAndroid.xls'], 'userName': ['wangjl01'], 'businessLineId': ['1'], 'moduleId': ['117'], 'sourceList[]': ['安卓App', '苹果App'], 'taskTitle': ['asdfasdfas'], 'taskDesc': ['zzzzz']}>
taskId = request.POST.get("taskId","")
sheetNameList = request.POST.get("sheetNameList")
fileName = request.POST.get("fileName")
fileAddBy = request.POST.get("userName")
businessLineId = request.POST.get("businessLineId")
moduleId = request.POST.get("moduleId")
sourceList = request.POST.get("sourceList")
taskTitle = request.POST.get("taskTitle")
taskDesc = request.POST.get("taskDesc")
emailList = request.POST.get("emailList")
print("emailList:", emailList)
sheetnameStr = ""
for tmpSheetName in eval(sheetNameList):
sheetnameStr += "%s," % tmpSheetName
sheetnameStr = sheetnameStr[:-1]
if taskId:
uiSimpleTask = TbUiTaskSimple.objects.get(taskId=taskId)
else:
uiSimpleTask = TbUiTaskSimple()
uiSimpleTask.title = taskTitle
uiSimpleTask.taskdesc = taskDesc
uiSimpleTask.businessLineId = int(businessLineId)
uiSimpleTask.moduleId = int(moduleId)
uiSimpleTask.sourceGroup = sourceList
uiSimpleTask.fileAddBy = fileAddBy
uiSimpleTask.sheetName = sheetnameStr
uiSimpleTask.fileName = fileName
uiSimpleTask.emailList = emailList
uiSimpleTask.addBy_id = request.session.get("loginName")
uiSimpleTask.save()
uiSimpleTask.taskId = "UI_TASK_%d" % uiSimpleTask.id
uiSimpleTask.save()
return HttpResponse(ApiReturn(ApiReturn.CODE_OK).toJson())
def show_ui_simple_task_page(request):
request.session['groupLevel1'] = groupLevel1
request.session['groupLevel2'] = groupLevel2
request.session['isReleaseEnv'] = isRelease
context = {}
if not isRelease:
context["env"] = "test"
context["uiShowSimpleTaskPage"] = "current-page"
context["userName"] = request.session.get("userName")
context["checkBusinessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
context["checkModules"] = dbModelListToListDict(ModulesService.getAllModules())
#文本
text = {}
text["pageTitle"] = "UI任务查看"
context["text"] = text
context["page"] = 1
addUserLog(request,"UI测试->查看任务->页面展示->成功","PASS")
return render(request,"ui_test/ui_task/show_ui_simple_task_page.html",context)
def show_ui_test_resultListCheck(request):
# ui_test.updateUiTestList()
page = request.POST.get("page")
if isInt(page):
page = int(page)
else:
addUserLog(request, "UI测试->查看任务->获取数据->页面参数不合法", "FAIL")
return HttpResponse("<script>alert('请验证页数参数');</script>")
checkArr = json.loads(parse.unquote(request.POST.get("checkArr")))
orderBy = request.POST.get("orderBy")
if isSqlInjectable(orderBy):
addUserLog(request, "UI测试->查看文件->获取数据->SQL注入检测时发现查询条件非法", "FAIL")
return HttpResponse("<script>alert('查询条件非法');</script>")
execSql = "SELECT i.*,u.userName from tb_ui_task_simple i LEFT JOIN tb_user u ON i.addBy = u.loginName WHERE i.state = 1 "
checkList = []
for key in checkArr:
if checkArr[key] == "":
continue
elif key == "caseFounder" :
checkList.append("%%%s%%" % checkArr[key])
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and (i.addBy LIKE %s or u.userName LIKE %s) """
continue
elif key == "module":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and m.moduleName LIKE %s """
continue
elif key == "businessLine":
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and b.bussinessLineName LIKE %s """
continue
checkList.append("%%%s%%" % checkArr[key])
execSql += """ and i.%s """ % key
execSql += """ LIKE %s"""
execSql += """ ORDER BY %s""" % orderBy
context = pagination(sqlStr=execSql,attrList=checkList,page=page,pageNum=commonWebConfig.interFacePageNum)
context["myAppPackages"] = dbModelListToListDict(TbUiPackage.objects.filter(addBy=request.session.get("loginName"),state=1))
context["envModules"] = HttpConfService.queryUIRunHttpConfSort(request)
for contextIndex in context["pageDatas"]:
contextIndex["businessLineName"] = TbBusinessLine.objects.get(id=contextIndex["businessLineId"]).bussinessLineName
contextIndex["moduleName"] = TbModules.objects.get(id=contextIndex["moduleId"]).moduleName
contextIndex["addByName"] = TbUser.objects.get(loginName=contextIndex["addBy"]).userName
contextIndex["fileAddByName"] = TbUser.objects.get(loginName=contextIndex["fileAddBy"]).userName
response = render(request, "ui_test/ui_task/subPages/ui_simple_task_pagelist.html",context)
addUserLog(request, "UI测试->查看任务->获取数据->成功", "PASS")
return response
def executeSimpleTask(request):
#<QueryDict: {'sheetNameList': ['["OnlyWebCase"]'], 'fileName': ['CaseAndroid.xls'], 'userName': ['wangjl01'], 'businessLineId': ['1'], 'moduleId': ['117'], 'sourceList[]': ['安卓App', '苹果App'], 'taskTitle': ['asdfasdfas'], 'taskDesc': ['zzzzz']}>
taskId = request.POST.get("taskId")
uiTask = TbUiTaskSimple.objects.filter(taskId = taskId).all()
if uiTask:
uiTask = uiTask[0]
envList = eval(request.POST.get("envList"))
if len(envList) == 0:
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR, message="至少选择一个环境!").toJson())
packageList = json.loads(request.POST.get("packageList"))
if len(packageList) == 0:
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR, message="至少选择一个app包!").toJson())
isSendEmail = request.POST.get("isSendEmail")
emailList = json.loads(request.POST.get("emailList"))
for tmpEnv in envList:
for tmpPackage in packageList:
tmpUITaskExecute = TbUITestExecute()
tmpUITaskExecute.taskId = uiTask.taskId
tmpUITaskExecute.title = uiTask.title
tmpUITaskExecute.taskdesc = uiTask.taskdesc
tmpUITaskExecute.businessLineId = uiTask.businessLineId
tmpUITaskExecute.moduleId = uiTask.moduleId
tmpUITaskExecute.sourceGroup = uiTask.sourceGroup
tmpUITaskExecute.tasklevel = uiTask.tasklevel
tmpUITaskExecute.fileAddBy = uiTask.fileAddBy
tmpUITaskExecute.fileName = uiTask.fileName
tmpUITaskExecute.sheetName = uiTask.sheetName
tmpUITaskExecute.emailList = emailList
tmpUITaskExecute.isSendEmail = isSendEmail
tmpUITaskExecute.packageId = tmpPackage
tmpUITaskExecute.httpConfKey = tmpEnv
tmpUITaskExecute.reportDir = ""
tmpUITaskExecute.execStatus = 1
tmpUITaskExecute.addBy = request.session.get("loginName")
tmpUITaskExecute.save(force_insert=True)
tcpStr = '{"do":31,"UITaskExecuteId":"%s"}' % tmpUITaskExecute.id
retApi = send_tcp_request_to_uiport(tcpStr)
if retApi.code != 10000:
return HttpResponse(retApi.toJson())
return HttpResponse(ApiReturn(ApiReturn.CODE_OK,message=uiTask.title).toJson())
else:
return HttpResponse(ApiReturn(ApiReturn.CODE_ERROR,message="没有找到任务,错误的任务id[%s]" % taskId).toJson())
def ui_operationTask(request):
taskId = request.GET.get("taskId","")
option = request.GET.get("option","")
if taskId == "":
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message="缺少taskId参数").toJson())
try:
taskData = TbUiTaskSimple.objects.filter(state=1).get(taskId=taskId)
except Exception as e:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message="taskId查不到数据").toJson())
text = {}
context = {}
context["uiAddSimpleTaskPage"] = "current-page"
if option == "copy":
text["pageTitle"] = "拷贝任务"
text["subPageTitle"] = "UI任务拷贝"
elif option == "edit":
text["pageTitle"] = "编辑任务"
text["subPageTitle"] = "UI任务编辑"
elif option == "check":
text["pageTitle"] = "查看任务"
text["subPageTitle"] = "UI任务查看"
else:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message="option参数:值错误").toJson())
context["text"] = text
context["option"] = option
context["taskId"] = taskId
context["businessLine"] = dbModelListToListDict(BusinessService.getAllBusinessLine())
return render(request,"ui_test/ui_task/ui_simple_task.html",context)
def getTaskForTaskId(request):
taskId = request.POST.get("taskId","")
if taskId == "":
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING, message="缺少taskId参数").toJson())
try:
taskData = TbUiTaskSimple.objects.filter(state=1).get(taskId=taskId)
except Exception as e:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message="taskId查不到数据").toJson())
context = dbModelToDict(taskData)
return HttpResponse(ApiReturn(body=context).toJson())
def delSimpleTask(request):
taskId = request.GET.get("taskId","")
if taskId == "":
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING, message="缺少taskId参数").toJson())
try:
taskData = TbUiTaskSimple.objects.filter(state=1).get(taskId=taskId)
except Exception as e:
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message="taskId查不到数据").toJson())
taskData.state = 0
taskData.save()
return HttpResponse(ApiReturn().toJson())
def getTaskRunDetailsForTaskId(request):
taskId = request.GET.get("taskId")
taskDataDict = {}
taskDataDict["taskId"] = taskId
uiTask = TbUiTaskSimple.objects.filter(taskId=taskId, state=1)
uiTaskLists = dbModelListToListDict(uiTask)
if len(uiTaskLists) !=0 :
uiTaskList = uiTaskLists[0]
taskDataDict["title"] = uiTaskList["title"]
taskDataDict["taskdesc"] = uiTaskList["taskdesc"]
taskDataDict["addBy"] = uiTaskList["addBy_id"]
taskDataDict["addTime"] = uiTaskList["addTime"]
taskDataDict["modTime"] = uiTaskList["modTime"]
taskDataDict["emailList"] = uiTaskList["emailList"]
context = {}
context["taskData"] = taskDataDict
context["envModules"] = HttpConfService.queryUIRunHttpConfSort(request)
context["myAppPackages"] = dbModelListToListDict(TbUiPackage.objects.filter(addBy=request.session.get("loginName"), state=1))
return render(request,"ui_test/ui_task/subPages/ui_task_Run_DetailsPage.html",context)
def addPageObject(request):
pageObjectDataRequest = json.loads(request.POST.get("pageObjectData"))
print("pageObjectDataRequest:", pageObjectDataRequest)
logger.info("addPageObject %s" % request.POST.get("pageObjectData"))
poKey = pageObjectDataRequest["POKey"]
poTitle = pageObjectDataRequest["POTitle"]
poDesc = pageObjectDataRequest["PODesc"]
addBy = request.session.get("loginName")
print("addBy:", addBy)
pageObjectResult = TbUiPageObject.objects.filter(poKey=poKey)
if len(pageObjectResult) == 0:
pageObject = TbUiPageObject()
pageObject.poKey = poKey
pageObject.poTitle = poTitle
pageObject.poDesc = poDesc
pageObject.addBy = addBy
pageObject.state = 1
pageObject.save()
return HttpResponse(ApiReturn().toJson())
else:
if pageObjectResult[0].state == 0:
pageObjectResult[0].state = 1
pageObjectResult[0].poTitle = poTitle
pageObjectResult[0].poDesc = poDesc
pageObjectResult[0].addBy = addBy
pageObjectResult[0].save()
return HttpResponse(ApiReturn().toJson())
else:
logger.info("addPageObject pageObject添加失败")
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING, message="pageObject添加失败,请检查账号是否重复").toJson())
def getPageObject(request):
context = {}
pageObjectList = []
pageObjectList.extend(dbModelListToListDict(TbUiPageObject.objects.filter()))
pageObjectSorted = sorted(pageObjectList, key=lambda pageObject: pageObject["id"], reverse=True)
context["pageDatas"] = sorted(pageObjectSorted, key=lambda pageObject: pageObject["state"], reverse=True)
response = render(request, "ui_main/page_object/SubPages/page_object_add_subpage.html",context)
return response
def getPageObjectForId(request):
pageObjectId = request.POST.get("pageObjectId")
try:
pageObjectData = TbUiPageObject.objects.get(id=pageObjectId)
except Exception as e:
message = "pageObject查询出错 %s" % e
logger.error(message)
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING,message=message).toJson())
return HttpResponse(ApiReturn(body=dbModelToDict(pageObjectData)).toJson())
def editPageObject(request):
try:
requestDict =json.loads(request.POST.get("pageObjectData"))
requestDict["modTime"] = datetime.datetime.now()
PageObjectService.updatePageObject(requestDict)
except Exception as e:
print(traceback.format_exc())
message = "编辑pageObject发生异常 %s" % e
logger.info(message)
return HttpResponse(ApiReturn(code=ApiReturn.CODE_WARNING, message=message).toJson())
return HttpResponse(ApiReturn().toJson())
def delPageObject(request):
pageObjectId = request.POST.get("pageObjectId", "")
if not pageObjectId:
return HttpResponse(ApiReturn(ApiReturn.CODE_WARNING, message="pageObjectId参数错误").toJson())
try:
pageObjectData = TbUiPageObject.objects.get(state=1, id=pageObjectId)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_WARNING, message="pageObjectId查询错误 %s" % e).toJson())
pageObjectData.state = 0
pageObjectData.save()
return HttpResponse(ApiReturn().toJson())
def resetPageObject(request):
pageObjectId = request.POST.get("pageObjectId", "")
if not pageObjectId:
return HttpResponse(ApiReturn(ApiReturn.CODE_WARNING, message="pageObjectId参数错误").toJson())
try:
pageObjectData = TbUiPageObject.objects.get(state=0, id=pageObjectId)
except Exception as e:
return HttpResponse(ApiReturn(ApiReturn.CODE_WARNING, message="pageObjectId查询错误 %s" % e).toJson())
pageObjectData.state = 1
pageObjectData.save()
return HttpResponse(ApiReturn().toJson()) | LianjiaTech/sosotest | AutotestWebD/apps/ui_task/views/ui_task_simple.py | ui_task_simple.py | py | 15,890 | python | en | code | 489 | github-code | 90 |
29006934103 | import cvxpy as cvx
import cvxpy.settings as s
from cvxpy.lin_ops.tree_mat import prune_constants
import cvxpy.problems.iterative as iterative
from cvxpy.tests.base_test import BaseTest
import numpy as np
class TestConvolution(BaseTest):
""" Unit tests for convolution. """
def test_1D_conv(self):
"""Test 1D convolution.
"""
n = 3
x = cvx.Variable(n)
f = [1, 2, 3]
g = [0, 1, 0.5]
f_conv_g = [0., 1., 2.5, 4., 1.5]
expr = cvx.conv(f, g)
assert expr.is_constant()
self.assertEqual(expr.shape, (5, 1))
self.assertItemsAlmostEqual(expr.value, f_conv_g)
expr = cvx.conv(f, x)
assert expr.is_affine()
self.assertEqual(expr.shape, (5, 1))
# Matrix stuffing.
prob = cvx.Problem(cvx.Minimize(cvx.norm(expr, 1)),
[x == g])
result = prob.solve()
self.assertAlmostEqual(result, sum(f_conv_g), places=3)
self.assertItemsAlmostEqual(expr.value, f_conv_g)
# # Expression trees.
# prob = Problem(Minimize(norm(expr, 1)))
# self.prob_mat_vs_mul_funcs(prob)
# result = prob.solve(solver=SCS, expr_tree=True, verbose=True)
# self.assertAlmostEqual(result, 0, places=1)
def prob_mat_vs_mul_funcs(self, prob):
data, dims = prob.get_problem_data(solver=cvx.SCS)
A = data["A"]
objective, constr_map, dims, solver = prob.canonicalize(cvx.SCS)
all_ineq = constr_map[s.EQ] + constr_map[s.LEQ]
var_offsets, var_sizes, x_length = prob._get_var_offsets(objective,
all_ineq)
constraints = constr_map[s.EQ] + constr_map[s.LEQ]
constraints = prune_constants(constraints)
Amul, ATmul = iterative.get_mul_funcs(constraints, dims,
var_offsets, var_sizes,
x_length)
vec = np.array(list(range(1, x_length+1)))
# A*vec
result = np.zeros(A.shape[0])
Amul(vec, result)
self.assertItemsAlmostEqual(A*vec, result)
Amul(vec, result)
self.assertItemsAlmostEqual(2*A*vec, result)
# A.T*vec
vec = np.array(list(range(A.shape[0])))
result = np.zeros(A.shape[1])
ATmul(vec, result)
self.assertItemsAlmostEqual(A.T*vec, result)
ATmul(vec, result)
self.assertItemsAlmostEqual(2*A.T*vec, result)
def mat_from_func(self, func, rows, cols):
"""Convert a multiplier function to a matrix.
"""
test_vec = np.zeros(cols)
result = np.zeros(rows)
matrix = np.zeros((rows, cols))
for i in range(cols):
test_vec[i] = 1.0
func(test_vec, result)
matrix[:, i] = result
test_vec *= 0
result *= 0
return matrix
def test_conv_prob(self):
"""Test a problem with convolution.
"""
import numpy as np
N = 5
y = np.random.randn(N, 1)
h = np.random.randn(2, 1)
x = cvx.Variable(N)
v = cvx.conv(h, x)
obj = cvx.Minimize(cvx.sum(cvx.multiply(y, v[0:N])))
print((cvx.Problem(obj, []).solve()))
| johnjaniczek/SFCLS | venv/lib/python3.5/site-packages/cvxpy/tests/test_convolution.py | test_convolution.py | py | 3,299 | python | en | code | 12 | github-code | 90 |
71061845416 | import random
from typing import Optional
import pygame
from pygame.sprite import Sprite, Group
from src.settings import BackgroundStarSettings as BG_Settings, Settings, PlayerDirection
class BackgroundStars(Sprite):
stars = Group()
star_direction: Optional[PlayerDirection] = None
@staticmethod
def get_star_size() -> (int, int):
"""
:return a random size for star
"""
return random.choice(
(BG_Settings.small, BG_Settings.large, BG_Settings.medium)
)
@staticmethod
def get_star_speed() -> int:
"""
:return a random size for star
"""
# return random.choice(
# (BG_Settings.medium_speed,
# BG_Settings.slow_speed,
# )
# )
return random.randint(0, BG_Settings.speed)
@staticmethod
def get_starting_location_x():
"""
return location for x coord
"""
return random.randrange(0, Settings.screen_width)
@staticmethod
def get_starting_location_y() -> int:
"""
return location for y coord
"""
return random.randrange(0, Settings.screen_height)
def __init__(self):
super().__init__(self.stars) # add all stars to background
self.size: (int, int) = self.get_star_size()
self.x = self.get_starting_location_x() # x position
self.y = self.get_starting_location_y() # y position
self.rect = pygame.Rect(self.x, self.y, self.size[0], self.size[-1])
self.speed = (self.get_star_speed() * random.choice([1, -1])) / BG_Settings.speed_factor
self.color = BG_Settings.color
def render(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
@classmethod
def get_star_direction(cls):
"""
set direction on every update call
:return:
"""
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT]:
cls.star_direction = PlayerDirection.left
elif keys[pygame.K_LEFT]:
cls.star_direction = PlayerDirection.right
else:
cls.star_direction = PlayerDirection.up
def update(self):
"""
Update stars position based on generated settings
:return:
"""
x_out_of_bounds = Settings.screen_width < self.x or self.x < 0
y_out_of_bounds = Settings.screen_height < self.y or self.y < 0
if x_out_of_bounds:
self.x = Settings.screen_width if self.star_direction == PlayerDirection.left else \
0 if self.star_direction == PlayerDirection.right else self.get_starting_location_x()
self.y = self.get_starting_location_y()
if y_out_of_bounds:
self.x = self.get_starting_location_y()
self.y = Settings.screen_height if self.star_direction == PlayerDirection.down else \
0 if self.star_direction == PlayerDirection.up else self.get_starting_location_x()
else:
self.x += self.speed
self.y += self.speed
if self.star_direction == PlayerDirection.right:
self.x += 4
elif self.star_direction == PlayerDirection.left:
self.x -= 4
elif self.star_direction == PlayerDirection.up:
self.y += 2
self.rect.x = self.x
self.rect.y = self.y
@classmethod
def update_stars(cls):
cls.get_star_direction()
cls.stars.update()
@classmethod
def create_stars(cls):
for i in range(BG_Settings.number_of_stars):
cls()
@classmethod
def render_stars(cls, screen):
screen.fill((0, 0, 0))
star: BackgroundStars
for star in cls.stars:
star.render(screen)
| Joel-Edem/space_ranger | src/componnets/background_stars.py | background_stars.py | py | 3,776 | python | en | code | 0 | github-code | 90 |
8986864650 | from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
import numpy as np
def plot_surf():
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.arange(-1, 1.00, 0.05)
y = np.arange(-1, 1.00, 0.05)
x, y = np.meshgrid(x, y)
# z = x**2 - y**2 # saddle points
# monkey saddle point function, the point (0,0) is a critical saddle point
z = x**3 - 3*x*(y**2) # monkey saddle point
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap=cm.afmhot, linewidth=0, antialiased=False)
#ax.set_zlim(-1.01, 1.01)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
plot_surf()
#initial states
init = [-0.75, -0.75]
| ahmadyan/Duplex | test/plots/surf.py | surf.py | py | 880 | python | en | code | 6 | github-code | 90 |
21888742341 | #coding=utf-8
#Python单元测试框架——unittest
##对Math类进行单元测试
from clator import SS
import unittest
class TestMath(unittest.TestCase):
def setUp(self):
print ("test start")
def test_add(self):
j=SS(5,10)
self.assertEqual(j.add(),15)
def tearDown(self):
print ("test end")
if __name__ == '__main__':
suite=unittest.TestSuite()
suite.addTest(TestMath("test_add"))
runner=unittest.TextTestRunner()
runner.run(suite)
| carrotWu/pythonProjrct | unitTest/test_Math.py | test_Math.py | py | 501 | python | en | code | 1 | github-code | 90 |
38235447163 | import math
import time
from abc import ABC
from qgis.core import *
from algorithms.GdalUAV.transformation.coordinates.CoordinateTransform import CoordinateTransform
from ModuleInstruments.DebugLog import DebugLog
from algorithms.GdalUAV.processing.FindPathData import FindPathData
from algorithms.AStarMethodGrid import AStarMethodGrid
from algorithms.GdalUAV.base.MethodBasedOnHallAndGrid import MethodBasedOnHallAndGrid
from algorithms.GdalUAV.base.SearchMethodBase import SearchMethodBase
from algorithms.GdalUAV.processing.calculations.ObjectsCalculations import get_distance
from algorithms.GdalUAV.processing.GeometryPointExpand import GeometryPointExpand
from algorithms.GdalUAV.qgis.visualization.Visualizer import Visualizer
class SeparationMethod(MethodBasedOnHallAndGrid, SearchMethodBase, ABC):
def __init__(self, method, tolerance, findpathdata: FindPathData, debuglog: DebugLog):
hall_width = 100
super().__init__(findpathdata, debuglog, hall_width)
self.find_path_data = findpathdata
self.method = method
self.tolerance = tolerance
cell_start = self.grid.difine_point(self.starting_point_geometry)
self.starting_point_expand = GeometryPointExpand(self.starting_point_geometry, cell_start.n_row,
cell_start.n_column)
cell_target = self.grid.difine_point(self.target_point_geometry)
self.target_point_expand = GeometryPointExpand(self.target_point_geometry, cell_target.n_row,
cell_target.n_column)
self.__vector_geometry = QgsGeometry.fromPolylineXY([self.starting_point, self.target_point])
self.points_to_search = []
def __distance_from_start_point(self, pare):
x_full_difference = pare[0] - self.starting_point.x()
y_full_difference = pare[1] - self.starting_point.y()
result = math.sqrt(x_full_difference ** 2 + y_full_difference ** 2)
return (result * result) ** 0.5
def __distance_to_target_point(self, pare):
x_full_difference = self.target_point.x() - pare[0]
y_full_difference = self.target_point.y() - pare[1]
result = math.sqrt(x_full_difference ** 2 + y_full_difference ** 2)
return (result * result) ** 0.5
def __distance_from_one_begin_to_next(self, begin, next):
x_full_difference = next[0] - begin[0]
y_full_difference = next[1] - begin[1]
result = math.sqrt(x_full_difference ** 2 + y_full_difference ** 2)
return (result * result) ** 0.5
def run(self):
from_start_to_target = get_distance(self.starting_point, self.target_point)
# just start common method
if from_start_to_target < self.tolerance:
pass
else:
self.debuglog.start_block("set geometry to the grid block")
self._set_geometry_to_grid()
self.debuglog.end_block("set geometry to the grid block")
geometry = self.grid.get_multipolygon_by_points(self.starting_point_expand, self.target_point_expand)
pares = []
geometry_list = geometry.asGeometryCollection()
# to delete repeated geometry
points_except_repeats = []
for part in geometry_list:
intersections = self.__vector_geometry.intersection(part)
if intersections:
try:
points = intersections.asPolyline()
# to delete repeat geometry
rep = []
for i in points:
rep.append([i.x(), i.y()])
if rep not in points_except_repeats:
points_except_repeats.append(rep)
pares.append(rep)
except:
multi = intersections.asMultiPolyline()
for points in multi:
# to delete repeat geometry
rep = []
for i in points:
rep.append([i.x(), i.y()])
if rep not in points_except_repeats:
points_except_repeats.append(rep)
pares.append(rep)
pares.sort(key=lambda x: self.__distance_from_start_point(x[0]))
pares.insert(0, [[0, 0], [self.starting_point.x(), self.starting_point.y()]])
pares.append([[self.target_point.x(), self.target_point.y()], [0, 0]])
search_vectors = []
for i in range(len(pares) - 1):
new_pare = [[pares[i][1][0], pares[i][1][1]], [pares[i + 1][0][0], pares[i + 1][0][1]]]
search_vectors.append(new_pare)
vectors_geometry = []
for vect in search_vectors:
point1 = QgsPointXY(vect[0][0], vect[0][1])
point2 = QgsPointXY(vect[1][0], vect[1][1])
line = QgsGeometry.fromPolylineXY([point1, point2])
vectors_geometry.append(line)
for vect in vectors_geometry:
if vect.length() < 6:
vectors_geometry.remove(vect)
x_full_difference = self.target_point.x() - self.starting_point.x()
y_full_difference = self.target_point.y() - self.starting_point.y()
result = math.sqrt(x_full_difference ** 2 + y_full_difference ** 2)
correction_x = x_full_difference / result
correction_y = y_full_difference / result
self.points_to_search = []
current_p = vectors_geometry[0].asPolyline()[0]
current_vector_index = 0
while True:
self.points_to_search.append(current_p)
if self.__distance_to_target_point(current_p) < self.tolerance:
self.points_to_search.append(self.target_point)
break
save_vector_index = current_vector_index
for i in range(save_vector_index, len(vectors_geometry)):
line = vectors_geometry[i].asPolyline()
point1 = line[0]
point2 = line[1]
a = self.__distance_from_start_point(point2)
b = self.__distance_from_start_point(point1)
if self.__distance_from_one_begin_to_next(current_p, point2) > self.tolerance:
if self.__distance_from_one_begin_to_next(current_p, point1) < self.tolerance:
current_p = QgsPointXY(current_p.x() + correction_x * self.tolerance,
current_p.y() + correction_y * self.tolerance)
break
else:
best_point = None
value = -1
current_vector = vectors_geometry[current_vector_index].asPolyline()
corrections_here = [0.5, 1.5, 5, 10, 20]
points = []
for cor in corrections_here:
points.append(QgsPointXY(
current_vector[1].x() - correction_x * cor,
current_vector[1].y() - correction_y * cor))
for point in points:
point_geom = QgsGeometry.fromPointXY(point)
cell = self.grid.difine_point(point_geom)
if cell.geometry is not None:
d = cell.geometry.distance(point_geom)
if d > value:
value = d
best_point = point
current_p = best_point
break
else:
current_vector_index = i
points_to_search_geom = [QgsGeometry.fromPointXY(x) for x in self.points_to_search]
Visualizer.update_layer_by_geometry_objects(r"C:\Users\Neptune\Desktop\Voronin qgis\shp\points_import.shp",
points_to_search_geom)
Visualizer.update_layer_by_geometry_objects(r"C:\Users\Neptune\Desktop\Voronin qgis\shp\min_path.shp",
vectors_geometry)
list_of_path = []
for i in range(len(points_to_search_geom) - 1):
self.find_path_data.start_point = points_to_search_geom[i]
self.find_path_data.target_point = points_to_search_geom[i + 1]
self.debuglog = DebugLog()
algor = self.method(self.find_path_data, self.debuglog)
algor.run()
for i in algor.final_path:
list_of_path.append(i)
Visualizer.update_layer_by_feats_objects(r"C:\Users\Neptune\Desktop\Voronin qgis\shp\min_path.shp",
list_of_path)
if __name__ == '__main__':
QgsApplication.setPrefixPath(r'C:\OSGEO4~1\apps\qgis', True)
qgs = QgsApplication([], False)
qgs.initQgis()
my_time = time.perf_counter()
n = 1
for i in range(n):
proj = QgsProject.instance()
proj.read(r'C:\Users\Neptune\Desktop\Voronin qgis\Voronin qgis.qgs')
point1 = QgsGeometry.fromPointXY(QgsPointXY(4429486.8, 5954990.5))
point2 = QgsGeometry.fromPointXY(QgsPointXY(4426529.1, 5957649.7))
path = r"C:\Users\Neptune\Desktop\Voronin qgis\shp\Строения.shp"
obstacles = QgsVectorLayer(path)
source_list_of_geometry_obstacles = CoordinateTransform.get_list_of_poligons_in_3395(obstacles, proj)
find_path_data = FindPathData(proj, point1, point2, obstacles, r"C:\Users\Neptune\Desktop\Voronin qgis\shp",
False,
source_list_of_geometry_obstacles)
debug_log = DebugLog()
check = SeparationMethod(AStarMethodGrid, 1000, find_path_data, debug_log)
check.run()
print(debug_log.get_info())
my_time = (time.perf_counter() - my_time) / n
print(my_time)
| Vladimir-Voronin/uav_find_path | algorithms/SeparationMethod.py | SeparationMethod.py | py | 10,450 | python | en | code | 0 | github-code | 90 |
35763617074 | #Stock awal
inventory = {
"tehpucukjkt": {'Warehouse': 'jakarta',
'Category': 'FMCG',
'Rack Location': 'J1',
'Product Name': 'teh pucuk',
'Quantity (pcs)' : 1000},
"indomiejkt": {'Warehouse': 'jakarta',
'Category': 'FMCG',
'Rack Location': 'J1',
'Product Name': 'indomie',
'Quantity (pcs)' : 500},
"ayamjkt": {'Warehouse': 'jakarta',
'Category': 'FRESH',
'Rack Location': 'JF1',
'Product Name': 'ayam potong',
'Quantity (pcs)' : 10},
"waferbdg": {'Warehouse': 'bandung',
'Category': 'FMCG',
'Rack Location': 'B1',
'Product Name': 'wafer tango',
'Quantity (pcs)' : 750},
"spritebdg": {'Warehouse': 'bandung',
'Category': 'FMCG',
'Rack Location': 'B1',
'Product Name': 'sprite 500ml',
'Quantity (pcs)' : 800},
"telorbdg": {'Warehouse': 'bandung',
'Category': 'FRESH',
'Rack Location': 'BF1',
'Product Name': 'telor ayam',
'Quantity (pcs)' : 100}
}
#Functions part
def menu_awal():
print ('Berikut Ini List Barang yang Tersedia\n')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
print(f'{inventory[i]["Warehouse"]}\t\t| {inventory[i]["Category"]}\t\t| {inventory[i]["Rack Location"]}\t\t|{inventory[i]["Product Name"]}\t| {int(inventory[i]["Quantity (pcs)"])}')
def showwarehouse():
wh= input('Masukan nama warehouse yang mau ditampilkan: ')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
if wh.lower() in inventory[i]["Warehouse"]:
print('{}\t\t|{}\t\t|{}\t\t|{}\t|{}'.format(inventory[i]["Warehouse"],inventory[i]["Category"],inventory[i]["Rack Location"],inventory[i]["Product Name"],inventory[i]["Quantity (pcs)"]))
else:
continue
def showproduct():
pd= input('Masukan nama product yang mau ditampilkan: ')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
if pd.lower() in inventory[i]["Product Name"]:
print('{}\t\t|{}\t\t|{}\t\t|{}\t|{}'.format(inventory[i]["Warehouse"],inventory[i]["Category"],inventory[i]["Rack Location"],inventory[i]["Product Name"],inventory[i]["Quantity (pcs)"]))
else:
continue
def showcat():
category=input('''
Pilihan category yang tersedia
1. FMCG
2. FRESH
Masukan pilihan category yang ingin ditampilkan:
''')
if category=='1':
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
if 'FMCG' in inventory[i]["Category"]:
print("{}\t\t|{}\t\t|{}\t\t|{}\t|{}".format(inventory[i]["Warehouse"],inventory[i]["Category"],inventory[i]["Rack Location"],inventory[i]["Product Name"],inventory[i]["Quantity (pcs)"]))
else:
continue
elif category=='2':
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
if 'FRESH' in inventory[i]["Category"]:
print("{}\t\t|{}\t\t|{}\t\t|{}\t|{}".format(inventory[i]["Warehouse"],inventory[i]["Category"],inventory[i]["Rack Location"],inventory[i]["Product Name"],inventory[i]["Quantity (pcs)"]))
else:
continue
else:
print('MASUKAN PILIHAN YANG BENAR!')
def showrack():
rack=input('Masukan lokasi rack yang diinginkan: ')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for i in inventory.keys():
if rack.upper() in inventory[i]["Rack Location"]:
print('{}\t\t|{}\t\t|{}\t\t|{}\t|{}'.format(inventory[i]["Warehouse"],inventory[i]["Category"],inventory[i]["Rack Location"],inventory[i]["Product Name"],inventory[i]["Quantity (pcs)"]))
else:
continue
def tambahstockbaru():
category_list1 = ['FMCG']
category_list2 = ['FRESH']
newkeys = input('Masukan Unique Keys baru : ')
if newkeys.lower() not in inventory.keys():
newwh = input('Masukan lokasi warehouse: ')
newcat = input('Masukan category: ')
if newcat.upper() in category_list1:
newrack = input('Masukan lokasi rack product: ')
newname = input('Masukan nama product: ')
newqty = int(input('Masukan quantity (pcs) barang: '))
elif newcat.upper() in category_list2:
newrack = input('Masukan lokasi rack product: ')
newname = input('Masukan nama product: ')
newqty = int(input('Masukan quantity (pcs) barang: '))
else:
print('CATEGORY TERSEBUT TIDAK ADA')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
print (f"{newwh}\t\t|{newcat}\t\t|{newrack}\t\t|{newname}\t|{newqty}")
while True:
x=input (f'''Apakah data yang ingin di update diatas sudah benar?
ya/tidak: ''').lower()
if x == 'ya':
inventory[newkeys]= {"Warehouse": newwh.lower(), "Category": newcat.upper(), "Rack Location": newrack.upper(), "Product Name": newname.lower(), "Quantity (pcs)": newqty}
print('Data berhasil ditambahkan')
break
elif x == 'tidak':
print('Barang batal ditambahkan')
break
else:
print('Masukan pilihan yang benar!')
else:
print('UNIQUE KEYS YANG DIINGINKAN TIDAK ADA HARAP MASUKAN UNIQUE KEYS YANG BENAR!')
def updatestockbarang():
category_list1 = ['FMCG']
category_list2 = ['FRESH']
keysupdate= input('Masukan unique keys yang mau di update: ')
if keysupdate.lower() in inventory.keys():
WH_update= input('Masukan lokasi warehouse: ')
catupdate= input('Masukan jenis product: ')
if catupdate.upper() in category_list1:
rackupdate = input('Masukan lokasi rack product: ')
nameupdate= input('Masukan nama product: ')
qtyupdate= int(input('Masukan quantity (pcs) barang: '))
elif catupdate.upper() in category_list2:
rackupdate = input('Masukan lokasi rack product: ')
nameupdate= input('Masukan nama product: ')
qtyupdate= int(input('Masukan quantity (pcs) barang: '))
else:
print('CATEGORY TERSEBUT TIDAK ADA')
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
print (f"{WH_update}\t\t|{catupdate}\t\t|{rackupdate}\t\t|{nameupdate}\t|{qtyupdate}")
while True:
x=input (f'''Apakah data yang ingin di update diatas sudah benar?
ya/tidak: ''').lower()
if x == 'ya':
inventory[keysupdate]= {"Warehouse": WH_update.lower(), "Category": catupdate.upper(), "Rack Location": rackupdate.upper(), "Product Name": nameupdate.lower(), "Quantity (pcs)": qtyupdate}
print('Data berhasil diupdate')
break
elif x == 'tidak':
print('Barang batal diupdate')
break
else:
print('Masukan pilihan yang benar!')
else:
print('UNIQUE KEYS YANG DIINGINKAN TIDAK ADA HARAP MASUKAN UNIQUE KEYS YANG BENAR!')
def barangkeluar():
keyskeluar= input('Masukan keys yang mau keluar: ')
if keyskeluar.lower() in inventory.keys():
qtykeluar= int(input('Masukan jumlah barang yang keluar: '))
if qtykeluar < inventory[keyskeluar]['Quantity (pcs)']:
print('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print('================================================================================')
for i in inventory:
if keyskeluar==i:
print(f'{inventory[i]["Warehouse"]}\t\t| {inventory[i]["Category"]}\t\t| {inventory[i]["Rack Location"]}\t\t|{inventory[i]["Product Name"]}\t| {qtykeluar}')
while True:
x = input(f'''Apakah anda yakin ingin mengeluarkan {inventory[i]["Product Name"]} dengan kuantitas sebanyak {qtykeluar} ini?
ya/tidak: ''').lower()
if x == 'ya':
inventory[keyskeluar]['Quantity (pcs)'] = inventory[keyskeluar]['Quantity (pcs)']-qtykeluar
print(f'Barang yang dikeluarkan sebanyak {qtykeluar}')
break
elif x == 'tidak':
print('Barang batal dikeluarkan')
break
else:
print('Masukan menu yang benar')
elif qtykeluar == inventory[keyskeluar]['Quantity (pcs)']:
print('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print('================================================================================')
for i in inventory:
if keyskeluar==i:
print(f'{inventory[i]["Warehouse"]}\t\t| {inventory[i]["Category"]}\t\t| {inventory[i]["Rack Location"]}\t\t|{inventory[i]["Product Name"]}\t| {qtykeluar}')
while True:
x = input(f'''Apakah anda yakin ingin mengeluarkan {inventory[i]["Product Name"]} dengan kuantitas sebanyak {qtykeluar} ini?
ya/tidak: ''').lower()
if x == 'ya':
inventory[keyskeluar]['Quantity (pcs)'] = inventory[keyskeluar]['Quantity (pcs)']-qtykeluar
print(f'''Barang yang dikeluarkan sebanyak {qtykeluar}
stock {inventory[i]["Product Name"]} sudah habis harap restock kembali!''')
break
elif x == 'tidak':
print('Barang batal dikeluarkan')
break
else:
print('Masukan menu yang benar')
elif qtykeluar > inventory[keyskeluar]['Quantity (pcs)']:
print('JUMLAH STOCK YANG TERSEDIA TIDAK CUKUP')
else:
print('MASUKAN JUMLAH STOCK YANG BENAR')
else:
print('UNIQUE KEYS YANG DIINGINKAN TIDAK ADA HARAP MASUKAN UNIQUE KEYS YANG BENAR!')
def restock():
restockkey= input('Masukan keys yang mau di restock: ')
if restockkey.lower() in inventory.keys():
restock_qty= int(input('Masukan jumlah barang yang mau di restock: '))
print('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print('================================================================================')
for i in inventory:
if restockkey==i:
print(f'{inventory[i]["Warehouse"]}\t\t| {inventory[i]["Category"]}\t\t| {inventory[i]["Rack Location"]}\t\t|{inventory[i]["Product Name"]}\t| {restock_qty}')
while True:
x = input(f'''Apakah anda yakin ingin menambahkan {inventory[i]["Product Name"]} dengan kuantitas sebanyak {restock_qty} ini?
ya/tidak: ''').lower()
if x == 'ya':
inventory[restockkey]['Quantity (pcs)'] = inventory[restockkey]['Quantity (pcs)']+restock_qty
print(f'Barang yang ditambahkan sebanyak {restock_qty}')
break
elif x == 'tidak':
print('Barang batal ditambahkan')
break
else:
print('Masukan menu yang benar')
def sortstock():
sort_stock = sorted(inventory.items(), key=lambda x: x[1]['Quantity (pcs)']) #x[1] karena dalam dictionary apabila ingin memanggil keys dalam keys harus x[1] apabila x[0] maka akan memanggil keys saja
print ('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print ('================================================================================')
for key, value in sort_stock:
print(f"{value['Warehouse']}\t\t|{value['Category']}\t\t|{value['Rack Location']}\t\t|{value['Product Name']}\t|{value['Quantity (pcs)']}")
def delete():
keysdelete = input('Masukan keys yang ingin dihapus: ').lower()
if keysdelete in inventory.keys():
print('Warehouse\t|Category\t| Rack Location\t| Product Name\t| Quantity (pcs)')
print('================================================================================')
for i in inventory:
if keysdelete == i:
print(f'{inventory[i]["Warehouse"]}\t\t| {inventory[i]["Category"]}\t\t| {inventory[i]["Rack Location"]}\t\t|{inventory[i]["Product Name"]}\t| {int(inventory[i]["Quantity (pcs)"])}')
while True:
x = input('''Apakah anda yakin ingin menghapus barang ini?
ya/tidak: ''').lower()
if x == 'ya':
del inventory[keysdelete]
print('Barang telah dihapus dari inventory.')
break
elif x == 'tidak':
print('Penghapusan barang dibatalkan.')
break
else:
print('Masukan menu yang benar.')
else:
print('UNIQUE KEYS YANG INGIN DIHAPUS TIDAK ADA')
#Menu
while True :
menu = input(
'''
Selamat Datang Di Gudang Revalde
List Menu
1. Menampilkan Stock yang Ada
2. Menambah Barang
3. Menghapus Barang
4. Mengeluarkan Barang
5. Restock Barang
6. Cek Stock
7. Exit Program
Masukan Menu Yang Anda Inginkan : '''
)
if menu=='1':
while True:
extramenu = input(
'''
Ingin menampilkan
1. Semua stock di inventory
2. Semua stock berdasarkan warehouse
3. Semua stock berdasarkan category
4. Semua stock berdasarkan lokasi rack
5. Semua stock berdasarkan nama product
6. Kembali ke menu awal
7. Exit
Pilih menu yang anda inginkan: '''
)
if extramenu == '1':
menu_awal()
elif extramenu == '2':
showwarehouse()
elif extramenu == '3':
showcat()
elif extramenu == '4':
showrack()
elif extramenu == '5':
showproduct()
elif extramenu == '6':
break
elif extramenu == '7':
print('TERIMA KASIH')
exit()
else:
print('MASUKAN MENU YANG BENAR')
continue
input('Tekan ENTER untuk melanjutkan...')
elif menu=='2':
while True:
menu2 = input('''
1. Menambahkan Stock Baru
2. Merubah Barang yang Sudah ada
3. Kembali ke menu awal
4. Exit
Pilih menu yang diinginkan: '''
)
if menu2 == '1':
tambahstockbaru()
elif menu2 == '2':
updatestockbarang()
elif menu2 == '3':
break
elif menu2 == '4':
print('TERIMA KASIH')
exit()
else:
print('Masukan pilihan menu yang benar')
continue
input('Tekan ENTER untuk melanjutkan...')
elif menu=='3':
delete()
elif menu=='4':
barangkeluar()
elif menu=='5':
restock()
elif menu=='6':
sortstock()
elif menu == '7' :
print('TERIMA KASIH!')
break
else :
print ('Masukan Menu Yang Benar!!!') | revalderaditya/Warehouse-Inventory-System | Capstone Project Module 1.py | Capstone Project Module 1.py | py | 17,267 | python | ms | code | 0 | github-code | 90 |
34345650673 | import os
AWS_S3_BUCKET_NAME = "Diamond-Price"
MONGO_DATABASE_NAME = "DimondPricePrediction"
MONGO_COLLECTION_NAME = "Diamond_Price"
TARGET_COLUMN = "price"
MONGO_DB_URL="mongodb+srv://pgmahajanott:pgmahajanott@cluster0.mevcvot.mongodb.net/?retryWrites=true&w=majority"
MODEL_FILE_NAME = "model"
MODEL_FILE_EXTENSION = ".pkl"
artifact_folder = "artifacts" | Prashant9511/DiamondPricePrediction | src/constant/__init__.py | __init__.py | py | 361 | python | en | code | 0 | github-code | 90 |
18398921949 | #13:12
n,q = map(int,input().split())
import heapq
import sys
input = sys.stdin.readline
event = []
for _ in range(n):
s,t,x = map(int,input().split())
heapq.heappush(event,(s-x,t-x,x))
t = 0
now = []
for _ in range(q):
d = int(input())
if event:
while event[0][0] <= d:
tmp = heapq.heappop(event)
heapq.heappush(now,(tmp[2],tmp[1]))
if not event:
break
if now:
while now[0][1] <= d:
heapq.heappop(now)
if not now:
print(-1)
break
else:
print(now[0][0])
else:
print(-1) | Aasthaengg/IBMdataset | Python_codes/p03033/s728466019.py | s728466019.py | py | 557 | python | en | code | 0 | github-code | 90 |
29263791521 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import colorfield.fields
class Migration(migrations.Migration):
dependencies = [
("app", "0006_sourceline_tags_json"),
]
operations = [
migrations.CreateModel(
name="DiagramSymbol",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("position", models.IntegerField()),
("x", models.IntegerField()),
("y", models.IntegerField()),
("pen", colorfield.fields.ColorField(max_length=10)),
(
"sourceline",
models.ForeignKey(to="app.SourceLine", on_delete=models.CASCADE),
),
],
),
]
| johntellsall/shotglass | shotglass/app/migrations/0007_diagramsymbol.py | 0007_diagramsymbol.py | py | 1,050 | python | en | code | 17 | github-code | 90 |
18562917799 | import sys
readline = sys.stdin.readline
h, w, d = map(int, readline().split())
A = [list(map(int, readline().split())) for _ in range(h)]
q = int(readline())
LR = [tuple(map(lambda x:int(x)-1, readline().split())) for _ in range(q)]
D = dict()
for hi in range(h):
for wi in range(w):
D[A[hi][wi]-1] = [hi,wi]
DP = [0]*(h*w)
for i in range(d, h*w):
px, py = D[i-d]
x, y = D[i]
DP[i] = DP[i-d] + abs(x-px) + abs(y-py)
for l,r in LR:
print(DP[r]-DP[l]) | Aasthaengg/IBMdataset | Python_codes/p03426/s950806130.py | s950806130.py | py | 481 | python | en | code | 0 | github-code | 90 |
30750395803 | import os
from sys import path, argv
path.append("/home/hklee/work/mylib")
from hk_plot_tool import Image_Plot
import hk_tool_box
import hk_gglensing_tool
import numpy
import h5py
import hk_FQlib
import time
# import c4py
import galsim
from astropy.cosmology import FlatLambdaCDM
from astropy.coordinates import SkyCoord
from astropy import units
from astropy import constants as const
param_path = argv[1]
stack_file = argv[2]
segment_file = argv[3]
len_z = float(argv[4]) # redshift
# cosmology
omega_m0 = 0.31
omega_lam0 = 1 - omega_m0
h = 0.6735
H_0 = 100 * h
cosmos = FlatLambdaCDM(H_0, Om0=omega_m0)
# Halo parameters
Mass = 3*10 ** 13 # M_sun/h
conc = 6 # concentration
# len_z = 0.2 # redshift
halo_position = galsim.PositionD(0, 0) # arcsec
com_dist_len = cosmos.comoving_distance(len_z).value * h # Mpc/h
print("Lens plane at z = %.2f, %.5f Mpc/h" % (len_z, com_dist_len))
# lens profile
CF = hk_gglensing_tool.Cosmos_flat(omega_m0, 100*h)
CF.NFW_profile_galsim((0,0), Mass, conc, len_z)
separation_bin_num = 1
Rmin, Rmax = 0.05, 0.07 # Mpc/h
separation_bin = hk_tool_box.set_bin_log(Rmin, Rmax, separation_bin_num+1)
# read the parameters
h5f = h5py.File(param_path + "/%s"%stack_file, "r")
src_z = h5f["/z"][()]
# src_z_m = h5f["/z_m"][()]
src_ra = h5f["/ra"][()]
src_dec = h5f["/dec"][()]
src_g1 = h5f["/gamma1"][()]
src_g2 = h5f["/gamma2"][()]
h5f.close()
gt = numpy.sqrt(src_g1**2 + src_g2**2)
# # the measured ellipticity
src_num = src_g1.shape[0]
rng = numpy.random.RandomState(numpy.random.randint(1, 10000, 1))
e = rng.normal(0, 0.1, src_num)
theta = rng.uniform(0, 2*numpy.pi, src_num)
e1 = numpy.cos(2*theta)
e2 = numpy.sin(2*theta)
src_e1 = e1 + src_g1
src_e2 = e2 + src_g2
# position and separation angle
pos_len = SkyCoord(ra=0 * units.deg, dec=0 * units.deg, frame="fk5")
pos_src = SkyCoord(ra=src_ra * units.deg, dec=src_dec * units.deg, frame="fk5")
separation_radian = pos_len.separation(pos_src).radian
separation_radius = separation_radian * com_dist_len
print("Separation: ",separation_radius.min(), separation_radius.max(),src_ra.max())
position_angle = pos_len.position_angle(pos_src).radian
sin_2theta = numpy.sin(2 * position_angle)
cos_2theta = numpy.cos(2 * position_angle)
# sin_4theta = numpy.sin(4 * position_angle)
# cos_4theta = numpy.cos(4 * position_angle)
src_gt = src_g1 * cos_2theta - src_g2 * sin_2theta
src_gx = src_g1 * sin_2theta + src_g2 * cos_2theta
src_et = src_e1 * cos_2theta - src_e2 * sin_2theta
src_ex = src_e1 * sin_2theta + src_e2 * cos_2theta
h5f = h5py.File(param_path + "/%s"%segment_file, "w")
for i in range(separation_bin_num):
idx1 = separation_radius >= separation_bin[i]
idx2 = separation_radius < separation_bin[i+1]
idx = idx1 & idx2
print("%.4f ~ %.4f Mpc/h %d"%(separation_bin[i], separation_bin[i+1], idx.sum()))
h5f["/%d/z"%i] = src_z[idx]
# h5f["/%d/z_m"%i] = src_z_m[idx]
h5f["/%d/ra"%i] = src_ra[idx]
h5f["/%d/dec"%i] = src_dec[idx]
h5f["/%d/gamma1"%i] = src_g1[idx]
h5f["/%d/gamma2"%i] = src_g2[idx]
h5f["/%d/gamma_t"%i] = src_gt[idx]
h5f["/%d/gamma_x"%i] = src_gx[idx]
h5f["/%d/e_t"%i] = src_et[idx]
h5f["/%d/e_x"%i] = src_ex[idx]
h5f["/%d/radius"%i] = separation_radius[idx]
h5f["/%d/radian"%i] = separation_radian[idx]
h5f.close() | hekunlie/astrophy-research | galaxy-galaxy lensing/simu/segment_file.py | segment_file.py | py | 3,308 | python | en | code | 2 | github-code | 90 |
38865071406 | from PIL import Image
# load both the given images
word_matrix = Image.open("word_matrix.png")
mask = Image.open("mask.png")
# convert both the images into same size
matrix_x,matrix_y=word_matrix.size
mask = mask.resize((matrix_x,matrix_y))
# make mask a bit transparent
mask.putalpha(100)
# put transparent-ish mask on the matrix
word_matrix.paste(im=mask, box=(0, 0), mask=mask)
word_matrix.show()
# save it as a new image
word_matrix.save('Word_Matrix_Solution.png') | jonwk/Python-Stuff | Images/Word_Matrix_Problem.py | Word_Matrix_Problem.py | py | 474 | python | en | code | 0 | github-code | 90 |
70093305256 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__=='__main__':
# Lista 1: Nombre de los jugadores.
players = ['Alvaro Revoredo', 'Mike Frist', 'Paula Jimenez','Gonzalo Chacaltana','Felipe Ayala']
# Lista 2: País de procedencia.
countries = ['Uruguay','Brasil','México','Perú','Chile']
# Lista 3: Puntaje
scores = [89.2,81.8,83.4,82.6,80.9]
print("\nResultado ordenado por puntaje de menor a mayor")
# Creamos una lista de diccionarios a partir de las 3 listas, mediante una sintaxis de compresión.
competition = [{'score':scores[i], 'player':players[i],'country':countries[i]} for i in range(len(players))]
for data in sorted(competition, key=lambda x: x['score'], reverse=False):
print(f"Jugador: {data['player'].ljust(30)}Pais: {data['country'].ljust(15)}Puntaje: {data['score']}")
# Devuelve
# Jugador: Felipe Ayala Pais: Chile Puntaje: 80.9
# Jugador: Mike Frist Pais: Brasil Puntaje: 81.8
# Jugador: Gonzalo Chacaltana Pais: Perú Puntaje: 82.6
# Jugador: Paula Jimenez Pais: México Puntaje: 83.4
# Jugador: Alvaro Revoredo Pais: Uruguay Puntaje: 89.2
print("\nResultado ordenado por puntaje de mayor a menor")
for data in sorted(competition, key=lambda x: x['score'], reverse=True):
print(f"Jugador: {data['player'].ljust(30)}Pais: {data['country'].ljust(15)}Puntaje: {data['score']}")
# Devuelve
# Jugador: Alvaro Revoredo Pais: Uruguay Puntaje: 89.2
# Jugador: Paula Jimenez Pais: México Puntaje: 83.4
# Jugador: Gonzalo Chacaltana Pais: Perú Puntaje: 82.6
# Jugador: Mike Frist Pais: Brasil Puntaje: 81.8
# Jugador: Felipe Ayala Pais: Chile Puntaje: 80.9 | gchacaltana/python_snippets | lambda.py | lambda.py | py | 1,943 | python | es | code | 0 | github-code | 90 |
38305024090 |
# this function return a new string which is three copies of the front
# front = three first chars
def front3(str):
s = ""
if len(str) < 3:
s = str + str + str
else:
s = str[:3] + str[:3] + str[:3]
return s
print(front3("Java"))
print(front3("Chocolate"))
print(front3("abc"))
| jemtca/CodingBat | Python/Warmup-1/front3.py | front3.py | py | 289 | python | en | code | 0 | github-code | 90 |
25571688584 | from __future__ import absolute_import
import importlib
import os
import pkgutil
import re
import sys
import unittest
import coverage
TEST_MODULE_REGEX = r"^.*_test$"
# Determines the path og a given path relative to the first matching
# path on sys.path. Useful for determining what a directory's module
# path will be.
def _relativize_to_sys_path(path):
for sys_path in sys.path:
if path.startswith(sys_path):
relative = path[len(sys_path) :]
if not relative:
return ""
if relative.startswith(os.path.sep):
relative = relative[len(os.path.sep) :]
if not relative.endswith(os.path.sep):
relative += os.path.sep
return relative
raise AssertionError("Failed to relativize {} to sys.path.".format(path))
def _relative_path_to_module_prefix(path):
return path.replace(os.path.sep, ".")
class Loader(object):
"""Test loader for setuptools test suite support.
Attributes:
suite (unittest.TestSuite): All tests collected by the loader.
loader (unittest.TestLoader): Standard Python unittest loader to be ran per
module discovered.
module_matcher (re.RegexObject): A regular expression object to match
against module names and determine whether or not the discovered module
contributes to the test suite.
"""
def __init__(self):
self.suite = unittest.TestSuite()
self.loader = unittest.TestLoader()
self.module_matcher = re.compile(TEST_MODULE_REGEX)
def loadTestsFromNames(self, names, module=None):
"""Function mirroring TestLoader::loadTestsFromNames, as expected by
setuptools.setup argument `test_loader`."""
# ensure that we capture decorators and definitions (else our coverage
# measure unnecessarily suffers)
coverage_context = coverage.Coverage(data_suffix=True)
coverage_context.start()
imported_modules = tuple(
importlib.import_module(name) for name in names
)
for imported_module in imported_modules:
self.visit_module(imported_module)
for imported_module in imported_modules:
try:
package_paths = imported_module.__path__
except AttributeError:
continue
self.walk_packages(package_paths)
coverage_context.stop()
coverage_context.save()
return self.suite
def walk_packages(self, package_paths):
"""Walks over the packages, dispatching `visit_module` calls.
Args:
package_paths (list): A list of paths over which to walk through modules
along.
"""
for path in package_paths:
self._walk_package(path)
def _walk_package(self, package_path):
prefix = _relative_path_to_module_prefix(
_relativize_to_sys_path(package_path)
)
for importer, module_name, is_package in pkgutil.walk_packages(
[package_path], prefix
):
module = None
if module_name in sys.modules:
module = sys.modules[module_name]
else:
spec = importer.find_spec(module_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
self.visit_module(module)
def visit_module(self, module):
"""Visits the module, adding discovered tests to the test suite.
Args:
module (module): Module to match against self.module_matcher; if matched
it has its tests loaded via self.loader into self.suite.
"""
if self.module_matcher.match(module.__name__):
module_suite = self.loader.loadTestsFromModule(module)
self.suite.addTest(module_suite)
def iterate_suite_cases(suite):
"""Generator over all unittest.TestCases in a unittest.TestSuite.
Args:
suite (unittest.TestSuite): Suite to iterate over in the generator.
Returns:
generator: A generator over all unittest.TestCases in `suite`.
"""
for item in suite:
if isinstance(item, unittest.TestSuite):
for child_item in iterate_suite_cases(item):
yield child_item
elif isinstance(item, unittest.TestCase):
yield item
else:
raise ValueError(
"unexpected suite item of type {}".format(type(item))
)
| grpc/grpc | src/python/grpcio_tests/tests/_loader.py | _loader.py | py | 4,512 | python | en | code | 39,468 | github-code | 90 |
40794457686 | import locale
from flask import Blueprint, Response, render_template, request, session, current_app
from src.blueprints.database import connect_db
from src.blueprints.decode_keyword import decode_keyword
from src.blueprints.format_data import format_requests
from src.blueprints.auth import login_required
from src.blueprints.exceptions import RequestNotFoundError, RequestStatusError, ItemIssuedError, ItemNotInRequestError, IllegalIssueError, IncompleteIssueError, SelfRoleError, SelfNotFoundError
locale.setlocale(locale.LC_ALL, 'en_PH.utf8')
bp_request = Blueprint("bp_request", __name__, url_prefix = "/requests")
# route for requests
@bp_request.route('/', methods=["GET"])
def requests ():
return render_template("requests/requests.html", active = "requests")
# route for request search
@bp_request.route('/search', methods = ["GET"])
def search_requests ():
keywords = [] if "keywords" not in request.args else [decode_keyword(x).lower() for x in request.args.get("keywords").split(" ")]
filters = [] if "filter" not in request.args else request.args.get("filter").split(",")
conditions = []
for x in keywords:
conditions.append(f"ItemID LIKE '%{x}%' OR ItemName LIKE '%{x}%' OR ItemDescription LIKE '%{x}%' OR RequestedBy LIKE '%{x}%' OR Purpose LIKE '%{x}%' OR Category LIKE '%{x}%'")
if len(filters) > 0: conditions.append(f'LOWER(StatusName) in {str(filters).replace("[", "(").replace("]", ")")}')
w = f"({' AND '.join(conditions)})" if len(conditions) > 0 else ""
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT RequestID, RequestedBy, DATE_FORMAT(RequestDate, '%d %b %Y') AS RequestDate, StatusName as Status, Purpose, ItemID, ItemName, Category, ItemDescription, RequestQuantity, SUM(QuantityIssued), AvailableStock, Unit, Remarks FROM request INNER JOIN request_status USING (StatusID) INNER JOIN request_item USING (RequestID) INNER JOIN stock USING (ItemID){' WHERE RequestID IN (SELECT DISTINCT RequestID FROM request INNER JOIN request_item USING (RequestID) INNER JOIN item USING (ItemID) WHERE ' + w + ')' if w != '' else ''} GROUP BY request_item.ItemID, RequestID ORDER BY RequestID DESC, ItemID")
requests = db.fetchall()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return { "requests": format_requests(requests, "user" in session.keys()) }
# route for request denial
@bp_request.route('/deny', methods = ["POST"])
@login_required
def deny_request ():
req = request.get_json()['RequestID']
remarks = request.get_json()['Remarks']
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT StatusID FROM request WHERE RequestID = {req}")
f = db.fetchone()
if f is None: raise RequestNotFoundError(request = req)
if f[0] != 1: raise RequestStatusError(from_status = f[0], to_status = 5)
db.execute(f"UPDATE request SET StatusID = 5, ActingAdmin = '{session['user']['Username']}', DateCancelled = CURDATE() WHERE RequestID = {req}")
for x in remarks:
if x["Remarks"] is not None: db.execute(f"UPDATE request_item SET Remarks = '{x['Remarks']}' WHERE RequestID = {req} && ItemID = '{x['ItemID']}'")
cxn.commit()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return Response(status = 200)
# route for request cancellation
@bp_request.route('/cancel', methods = ["POST"])
def cancel_request ():
req = request.get_json()['RequestID']
remarks = request.get_json()['Remarks']
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT StatusID FROM request WHERE RequestID = {req}")
f = db.fetchone()
if f is None: raise RequestNotFoundError(request = req)
if f[0] in [4, 5, 6]: raise RequestStatusError(from_status = f[0], to_status = 6)
db.execute(f"UPDATE request SET StatusID = 6, DateCancelled = CURDATE() WHERE RequestID = {req}")
for x in remarks:
if x["Remarks"] is not None: db.execute(f"UPDATE request_item SET Remarks = '{x['Remarks']}' WHERE RequestID = {req} && ItemID = '{x['ItemID']}'")
cxn.commit()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return Response(status = 200)
# route for request receipt
@bp_request.route('/receive', methods = ["POST"])
def receive_request ():
req = request.get_json()['RequestID']
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT StatusID FROM request WHERE RequestID = {req}")
f = db.fetchone()
if f is None: raise RequestNotFoundError(request = req)
if f[0] != 3: raise RequestStatusError(from_status = f[0], to_status = 4)
db.execute(f"UPDATE request SET StatusID = 4, DateReceived = CURDATE(), TimeReceived = CURTIME() WHERE RequestID = {req}")
db.execute(f"SELECT ItemID, QuantityIssued, RequestQuantity, Remarks FROM request_item WHERE RequestID = {req}")
items = db.fetchall()
for i in items:
toIssue = i[1]
db.execute(f"SELECT DeliveryID, AvailableUnit, DeliveryPrice FROM delivery LEFT JOIN expiration USING (DeliveryID) WHERE ItemID = '{i[0]}' && IsExpired = 0 && AvailableUnit > 0 ORDER BY delivery.DeliveryDate ASC, Time ASC;")
deliveries = db.fetchall()
price = {}
while(toIssue > 0 and len(deliveries) > 0):
db.execute(f"UPDATE delivery SET AvailableUnit = {deliveries[0][1] - min(deliveries[0][1], toIssue)} WHERE DeliveryID = {deliveries[0][0]}")
if deliveries[0][2] in price: price[deliveries[0][2]] = price[deliveries[0][2]] + min(deliveries[0][1], toIssue)
else: price[deliveries[0][2]] = min(deliveries[0][1], toIssue)
if i[3] is None: db.execute(f"INSERT INTO request_item (RequestID, ItemID, RequestPrice, QuantityIssued, RequestQuantity) VALUES ({req}, '{i[0]}', {deliveries[0][2]}, {price[deliveries[0][2]]}, {i[2]}) ON DUPLICATE KEY UPDATE RequestPrice = {deliveries[0][2]}, QuantityIssued = {price[deliveries[0][2]]}")
else: db.execute(f"INSERT INTO request_item (RequestID, ItemID, RequestPrice, QuantityIssued, Remarks, RequestQuantity) VALUES ({req}, '{i[0]}', {deliveries[0][2]}, {price[deliveries[0][2]]}, '{i[3]}', {i[2]}) ON DUPLICATE KEY UPDATE RequestPrice = {deliveries[0][2]}, QuantityIssued = {price[deliveries[0][2]]}")
toIssue = toIssue - min(deliveries[0][1], toIssue)
deliveries = deliveries[1:]
db.execute(f"SELECT COUNT(*) FROM request_item LEFT JOIN item USING (ItemID) WHERE RequestID = {req} && Price >= 15000 && QuantityIssued > 0;")
g = db.fetchone()
if(g[0] > 0):
db.execute(f"UPDATE request SET hasPropertyApproved = 1 WHERE RequestID = {req};")
cxn.commit()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return Response(status = 200)
# route for individual issue of request item
@bp_request.route('/issue/item', methods = ["POST"])
@login_required
def issue_item ():
body = request.get_json()
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT RoleID FROM user WHERE Username = '{session['user']['Username']}'")
f = db.fetchone()
if f is None: raise SelfNotFoundError(username = session['user']['Username'])
if f[0] == 2 and f[0] != session['user']['RoleID']: raise SelfRoleError(username = session['user']['Username'], role = f[0])
db.execute(f"SELECT StatusID FROM request WHERE RequestID = {body['RequestID']}")
f = db.fetchone()
if f is None: raise RequestNotFoundError(request = body['RequestID'])
#if f[0] != 2: raise IllegalIssueError(request = body['RequestID'])
db.execute(f"SELECT QuantityIssued FROM request_item WHERE RequestID = {body['RequestID']} AND ItemID = '{body['ItemID']}'")
g = db.fetchone()
if g is None: raise ItemNotInRequestError(item = body['ItemID'], request = body['RequestID'])
if g[0] is not None: raise ItemIssuedError(item = body['ItemID'], request = body['RequestID'])
db.execute(f"UPDATE request_item SET QuantityIssued = {body['QuantityIssued']} WHERE RequestID = {body['RequestID']} AND ItemID = '{body['ItemID']}'")
cxn.commit()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return Response(status = 200)
# route for request issue
@bp_request.route('/issue', methods = ["POST"])
@login_required
def issue_request ():
req = request.get_json()['RequestID']
remarks = request.get_json()['Remarks']
cxn = None
try:
cxn = connect_db()
db = cxn.cursor()
db.execute(f"SELECT RoleID FROM user WHERE Username = '{session['user']['Username']}'")
f = db.fetchone()
if f is None: raise SelfNotFoundError(username = session['user']['Username'])
if f[0] == 2 and f[0] != session['user']['RoleID']: raise SelfRoleError(username = session['user']['Username'], role = f[0])
db.execute(f"SELECT StatusID FROM request WHERE RequestID = {req}")
f = db.fetchone()
if f is None: raise RequestNotFoundError(request = req)
#if f[0] != 2: raise RequestStatusError(from_status = f[0], to_status = 3)
db.execute(f"SELECT QuantityIssued FROM request_item WHERE RequestID = {req}")
g = all([x[0] is not None for x in db.fetchall()])
if not g: raise IncompleteIssueError(request = req)
db.execute(f"UPDATE request SET StatusID = 3, IssuedBy = '{session['user']['Username']}', DateIssued = CURDATE() WHERE RequestID = {req}")
for x in remarks:
if x["Remarks"] is not None: db.execute(f"UPDATE request_item SET Remarks = '{x['Remarks']}' WHERE RequestID = {req} && ItemID = '{x['ItemID']}'")
cxn.commit()
except Exception as e:
current_app.logger.error(str(e))
return { "error": str(e) }, 500
finally:
if cxn is not None: cxn.close()
return Response(status = 200)
| lomohoga/sIMS | src/blueprints/bp_request.py | bp_request.py | py | 10,687 | python | en | code | 0 | github-code | 90 |
5665212462 | import layers
import tensorflow as tf
from datahelper import *
import logging
import time
class network:
reportFrequency = 50
def __init__(self):
self.global_step = tf.Variable(0, trainable=False)
self.dropoutRate = tf.placeholder(tf.float32, name="DropoutRate")
self.session = None
self.dataHelper = None
self.layers = []
self.layers.append(layers.conv(7, 96, 50, 1))
self.layers.append(layers.maxPool(2))
self.layers.append(layers.conv(5, 192, 96, 2))
self.layers.append(layers.maxPool(2))
self.layers.append(layers.conv(3, 512, 192, 1))
self.layers.append(layers.maxPool(2))
self.layers.append(layers.conv(2, 4096, 512, 1))
self.layers.append(layers.dense(3*4096, 4096, dropout_rate=self.dropoutRate, reshape_needed=True))
self.layers.append(layers.dense(4096, 2048, dropout_rate=self.dropoutRate))
self.layers.append(layers.dense(2048, 2, name="FinalResult"))
self.input = tf.placeholder(tf.float32, [None, 60, 40, 50], name="DefaultInput")
self.normalizedInput = tf.truediv(tf.sub(self.input, tf.constant(128.)), tf.constant(128.), name = "NormalizedInput")
self.results = []
self.results.append(self.layers[0].result(self.normalizedInput))
for i in xrange(1, len(self.layers)):
try:
self.results.append(self.layers[i].result(self.results[i-1]))
except:
print(i)
raise
self.finalResult = self.results[len(self.results) - 1]
self.reallyFinalResult = tf.identity(self.finalResult, name="finalesResult")
print(self.reallyFinalResult.get_shape())
self.labels = tf.placeholder(tf.float32, [None, 2], name="Labels")
self.crossEntropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(self.finalResult, self.labels))
self.train_step = tf.train.AdamOptimizer(epsilon=0.001, learning_rate=0.0001).minimize(self.crossEntropy, global_step=self.global_step)
self.correct_prediction = tf.equal(tf.argmax(self.finalResult, 1), tf.argmax(self.labels, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32), name="Accuracy")
self.saver = None
def test(self, path):
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
self.dataHelper = datahelper(path)
data = self.dataHelper.getsingledata()
print(data.data.shape)
print(len(data.labels))
res = self.results[len(self.results)-1].eval(feed_dict={
self.input: data.data, self.labels: data.labels, self.dropoutRate: 0.5})
print("finshed, output %g", res)
def train(self, path, epochs, batchsize):
counter = 0
maxAcc = 0.
saver = tf.train.Saver()
self.costs = []
self.session = tf.InteractiveSession()
self.session.run(tf.global_variables_initializer())
self.dataHelper = datahelper(path)
print("started")
logging.basicConfig(filename="logs" + os.sep + time.ctime() + '.log', level=logging.DEBUG)
logging.info("epochs: "+str(epochs))
logging.info("batch size: "+str(batchsize))
logging.info("test data proportion: "+str(1 - datahelper.testProportion))
logging.info("Started at" + time.ctime())
for i in xrange(epochs):
newbatch = self.dataHelper.getnextbatch(batchsize)
if i % network.reportFrequency == 0 and i > 0:
results = self.session.run([self.accuracy, self.crossEntropy],feed_dict={self.input: newbatch.data, self.labels: newbatch.labels, self.dropoutRate: 1})
self.costs.append(results[1])
print(results)
self.train_step.run(feed_dict={self.input: newbatch.data, self.labels: newbatch.labels, self.dropoutRate: 0.6})
logging.info("Finished training at" + time.ctime())
testdata = self.dataHelper.gettestdata()
finAcc = 0
test_len = 0
correctMen = 0
correctWomen = 0
totalMen = 0
totalWomen = 0
for batch in testdata:
acc = self.session.run([self. accuracy, self.reallyFinalResult], feed_dict={
self.input: batch.data, self.labels: batch.labels, self.dropoutRate: 1})
finAcc += acc[0] * len(batch.data)
resultList = acc[1].tolist()
for idx in range(len(batch.labels)):
if batch.labels[idx][0] == 1:
totalMen += 1
if resultList[idx].index((max(resultList[idx]))) == batch.labels[idx].index(max(batch.labels[idx])):
correctMen += 1
else:
totalWomen += 1
if resultList[idx].index((max(resultList[idx]))) == batch.labels[idx].index(max(batch.labels[idx])):
correctWomen += 1
test_len += len(batch.data)
finAcc = finAcc / test_len
print(finAcc)
name = time.ctime()
if not os.path.exists('models' + os.sep + name):
os.makedirs('models' + os.sep + name)
logging.info("Total women in test set: " + str(totalWomen))
logging.info("Total men in test set: " + str(totalMen))
logging.info("Correcly classified women: " + str(correctWomen))
logging.info("Correctly classifed men: " + str(correctMen))
saver.save(self.session, 'models' + os.sep + name + os.sep + 'my-model')
logging.info("final accuracy %g" % finAcc)
logging.info("Finished run at" + time.ctime())
costString = "\n"
for cost in self.costs:
costString += str(cost)+"\n"
logging.info("costs: "+costString)
| plubon/thesis | network.py | network.py | py | 5,823 | python | en | code | 0 | github-code | 90 |
19228942888 | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
# 对一副图像进行傅立叶变换,显示频谱,取其5,50,150为截至频率,进行频率域平滑,锐化,显示图像
img = cv.imread('../Project1/lena_top.jpg',0)
dft = cv.dft(np.float32(img),flags = cv.DFT_COMPLEX_OUTPUT)
dft_shift = np.fft.fftshift(dft)
magnitude_spectrum = 20*np.log(cv.magnitude(dft_shift[:,:,0],dft_shift[:,:,1]))
rows, cols = img.shape
crow,ccol = int(rows/2) , int(cols/2)
# create a mask first, center square is 1, remaining all zeros
mask = np.zeros((rows,cols,2),np.uint8)
# 取中心50像素的方框
mask[crow-50:crow+50, ccol-50:ccol+50] = 1
# apply mask and inverse DFT
fshift = dft_shift*mask
f_ishift = np.fft.ifftshift(fshift)
img_back = cv.idft(f_ishift)
img_back = cv.magnitude(img_back[:,:,0],img_back[:,:,1])
mask5 = np.zeros((rows,cols,2),np.uint8)
# 取中心15像素的方框
mask5[crow-15:crow+15, ccol-15:ccol+15] = 1
# apply mask and inverse DFT
fshift5 = dft_shift*mask5
f_ishift5 = np.fft.ifftshift(fshift5)
img_back5 = cv.idft(f_ishift5)
img_back5 = cv.magnitude(img_back5[:,:,0],img_back5[:,:,1])
mask150 = np.zeros((rows,cols,2),np.uint8)
# 取中心100像素的方框
mask150[crow-100:crow+100, ccol-100:ccol+100] = 1
# apply mask and inverse DFT
fshift150 = dft_shift*mask150
f_ishift150 = np.fft.ifftshift(fshift150)
img_back150 = cv.idft(f_ishift150)
img_back150 = cv.magnitude(img_back150[:,:,0],img_back150[:,:,1])
# 高通 反应细节
masksharp = np.ones((rows,cols,2),np.uint8)
# 取除了中心100像素的外围边框
masksharp[crow-100:crow+100, ccol-100:ccol+100] = 0
# apply mask and inverse DFT
fshiftsharp = dft_shift*masksharp
f_ishiftsharp = np.fft.ifftshift(fshiftsharp)
img_backsharp = cv.idft(f_ishiftsharp)
img_backsharp = cv.magnitude(img_backsharp[:,:,0],img_backsharp[:,:,1])
plt.figure(figsize=(20,8))
plt.subplot(231),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(232),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.subplot(233),plt.imshow(img_back5, cmap = 'gray')
plt.title('Magnitude Spectrum5 '), plt.xticks([]), plt.yticks([])
plt.subplot(234),plt.imshow(img_back, cmap = 'gray')
plt.title('Magnitude Spectrum50 '), plt.xticks([]), plt.yticks([])
plt.subplot(235),plt.imshow(img_back150, cmap = 'gray')
plt.title('Magnitude Spectrum 150 '), plt.xticks([]), plt.yticks([])
plt.subplot(236),plt.imshow(img_backsharp, cmap = 'gray')
plt.title('Magnitude sharp 150 '), plt.xticks([]), plt.yticks([])
plt.show() | mvchain/cryptovault-ios | ToPay/opencvlearn.py | opencvlearn.py | py | 2,628 | python | en | code | 1 | github-code | 90 |
71216821736 | import cv2 as cv
import numpy as np
# from pynput.mouse import Button, Controller
import wx
import math
import time
import ctypes
# mouse = Controller()
app = wx.App(False)
(sx,sy) = wx.GetDisplaySize()
# (camx,camy) = (320,240)
(camx,camy) = wx.GetDisplaySize()/2
cam = cv.VideoCapture(0)
cam.set(3,camx)
cam.set(4,camy)
mlocold = np.array([0,0])
# mouseloc = np.array([0,0])
damfac = 2.5
pinch_flag = 0
SendInput = ctypes.windll.user32.SendInput
PUL = ctypes.POINTER(ctypes.c_ulong)
W=0x11
A=0x1E
S=0x1F
D=0x20
Q=0x10
E=0x12
SPACE=0x39
R=0x13
class KeyBdInput(ctypes.Structure):
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time",ctypes.c_ulong),
("dwExtraInfo", PUL)]
class Input_I(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
_fields_ = [("type", ctypes.c_ulong),
("ii", Input_I)]
def PressKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
extra = ctypes.c_ulong(0)
ii_ = Input_I()
ii_.ki = KeyBdInput( 0, hexKeyCode, 0x0008 | 0x0002, 0, ctypes.pointer(extra) )
x = Input( ctypes.c_ulong(1), ii_ )
ctypes.windll.user32.SendInput(1, ctypes.pointer(x), ctypes.sizeof(x))
def xAxis(angle):
if angle>10:
PressKey(A)
print('A is pressed')
time.sleep(.1)
# ReleaseKey(A)
# time.sleep(.1)
elif angle<-10:
PressKey(D)
print('D is pressed')
time.sleep(.1)
# ReleaseKey(D)
# time.sleep(.1)
else:
# Brake()
ReleaseKey(D)
time.sleep(.1)
ReleaseKey(A)
time.sleep(.1)
ReleaseKey(W)
time.sleep(.1)
ReleaseKey(S)
time.sleep(.1)
yAxis()
def yAxis():
PressKey(W)
print('W is pressed')
time.sleep(.1)
ReleaseKey(S)
time.sleep(.1)
def Brake():
PressKey(S)
print('S is pressed')
time.sleep(.1)
ReleaseKey(W)
time.sleep(.1)
while(1):
ret,img = cam.read()
img = cv.GaussianBlur(img,(5,5),0)
hsv_img = cv.cvtColor(img,cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_img,np.array([33,80,40]),np.array([102,255,255]))
mask_open = cv.morphologyEx(mask,cv.MORPH_OPEN,np.ones((5,5)))
mask_close = cv.morphologyEx(mask_open,cv.MORPH_CLOSE,np.ones((20,20)))
mask_final = mask_close
conts,_ = cv.findContours(mask_final.copy(),cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
cv.drawContours(img,conts,-1,(0,0,255),3)
if(len(conts)==2):
# if(pinch_flag==1):
# pinch_flag = 0
# mouse.release(Button.left)
x1,y1,w1,h1 = cv.boundingRect(conts[0])
x2,y2,w2,h2 = cv.boundingRect(conts[1])
cv.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)
cv.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),2)
cx1 = round(x1+w1/2)
cy1 = round(y1+h1/2)
cx2 = round(x2+w2/2)
cy2 = round(y2+h2/2)
# print(cx1,cy1,cx2,cy2)
try:
slope = int(((cy2-cy1)/(cx2-cx1))*180/math.pi)
except:
slope = 0
print(slope)
xAxis(slope)
# distance = int(math.sqrt((cx2-cx1)**2 + (cy2-cy1)**2))
# print(distance)
# if distance < 100:
# Brake()
cv.line(img,(cx1,cy1),(cx2,cy2),(255,0,0),2)
cx = round(cx1/2+cx2/2)
cy = round(cy1/2+cy2/2)
cv.circle(img,(cx,cy),2,(0,0,255),2)
# mouseloc = mlocold+((cx,cy)-mlocold)/damfac
# mouse.position = (round(sx - (mouseloc[0]*sx/camx)),round((mouseloc[1]*sy/camy)))
# mlocold = mouseloc
elif(len(conts)==1):
Brake()
# if(pinch_flag==0):
# pinch_flag = 1
# mouse.press(Button.left)
#
# x,y,w,h = cv.boundingRect(conts[0])
# cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# cx = round(x+w/2)
# cy = round(y+h/2)
# cv.circle(img,(cx,cy),20,(0,0,255),2)
# mouseloc = mlocold+((cx,cy)-mlocold)/damfac
# mouse.position = (round(sx - mouseloc[0]*sx/camx),round(mouseloc[1]*sy/camy))
# mlocold = mouseloc
cv.imshow("cam",img)
# cv.imshow("mask",mask)
# cv.imshow("mask open",mask_open)
# cv.imshow("mask close",mask_close)
if cv.waitKey(10) == 13:
break
cv.destroyAllWindows()
cam.release()
| imvickykumar999/hackathon-iot-car-parking | robocar/controler.py | controler.py | py | 5,169 | python | en | code | 2 | github-code | 90 |
36913949766 | import pytest
from fhepy.polynomials import Polynomials
from fhepy.zmodp import ZMod
ZMod2 = ZMod(2)
ZMod7 = ZMod(7)
ZMod11 = ZMod(11)
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0], "0"),
(ZMod2, [1], "1"),
(ZMod2, [3], "1"),
(ZMod7, [6], "6"),
(ZMod7, [7], "0")])
def test_constant(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0, 1], "x"),
(ZMod2, [1, 1], "x + 1"),
(ZMod2, [3, 3], "x + 1"),
(ZMod7, [6, 1], "x + 6"),
(ZMod7, [7, 6], "6x")])
def test_degree_1(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
@pytest.mark.parametrize('field,coefficients,expected', [
(ZMod2, [0, 1, 1], "x**2 + x"),
(ZMod2, [1, 1, 1], "x**2 + x + 1"),
(ZMod2, [1, 0, 1], "x**2 + 1"),
(ZMod2, [0, 0, 1], "x**2"),
(ZMod2, [3, 3, 3], "x**2 + x + 1"),
(ZMod7, [6, 0, 1], "x**2 + 6"),
(ZMod7, [7, 0, 6], "6*(x**2)"),
(ZMod7, [6, 1, 1], "x**2 + x + 6"),
(ZMod7, [7, 1, 6], "6*(x**2) + x"),
(ZMod7, [6, 2, 1], "x**2 + 2x + 6"),
(ZMod7, [7, 5, 6], "6*(x**2) + 5x")
])
def test_degree_2(field, coefficients, expected):
poly = Polynomials(field)
assert str(poly(coefficients)) == expected
| benpbenp/fhepy | tests/polynomials/test_str.py | test_str.py | py | 1,378 | python | en | code | 1 | github-code | 90 |
5992803190 | # 引入库
# Import Packages
import cv2
import numpy as np
from moviepy.editor import VideoFileClip
def gray_scale(img):
"""
灰度转换
Applies the Gray scale transform
:param img:
:return: grey image
"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
def gaussian_blur(img, kernel_size):
"""
高斯滤波
Applies a Gaussian Noise kernel
:param img:
:param kernel_size:
:return: image after a Gaussian Noise kernel
"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def canny(img, low_threshold, high_threshold):
"""
边缘检测
Applies the Canny transform
:param img:
:param low_threshold:
:param high_threshold:
:return: image after thr Canny transform
"""
return cv2.Canny(img, low_threshold, high_threshold)
def region_of_interest(img, vertices):
"""
区域检测
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
:param img:
:param vertices:
:return:
"""
# 定义一个区域
# 先定义一个空白的图片
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# 将要保留的区域设置为255,不保留的区域设置为0
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# 接下来进行and操作,保留要保留的区域
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
绘制车道线
Drawing lane lines
:param img:
:param lines:
:param color:
:param thickness:
:return:
"""
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
霍夫变换
Application of Hough transform
:param img: 灰度图像 image after canny
:param rho: 参数极径rho以像素值为单位的分辨率. 我们使用 1 像素
:param theta: 参数极角theta 以弧度为单位的分辨率. 我们使用 1度 (即CV_PI/180)
:param threshold: 要”检测” 一条直线所需最少的的曲线交点
:param min_line_len: 能组成一条直线的最少点的数量. 点数量不足的直线将被抛弃.线段的最小长度
:param max_line_gap: 线段上最近两点之间的阈值
:return:
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]),
minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines, thickness=8)
return line_img
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
将车道线与原来的图片叠加
return_img = initial_img * α + img * β + γ
:param img:
:param initial_img:
:param α:
:param β:
:param γ:
:return:
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
def process_image(img):
"""
图片处理管道
image process pip line
:param img:
:return:
"""
roi_vtx = np.array([[(0, img.shape[0]), (460, 325), (520, 325), (img.shape[1], img.shape[0])]])
blur_kernel_size = 5 # Gaussian blur kernel size
canny_low_threshold = 50 # Canny edge detection low threshold
canny_high_threshold = 150 # Canny edge detection high threshold
# Hough transform parameters
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 40
max_line_gap = 20
gray = gray_scale(img)
blur_gray = gaussian_blur(gray, blur_kernel_size)
edges = canny(blur_gray, canny_low_threshold, canny_high_threshold)
roi_edges = region_of_interest(edges, roi_vtx)
line_img = hough_lines(roi_edges, rho, theta, threshold, min_line_length, max_line_gap)
res_img = weighted_img(img, line_img, 0.8, 1, 0)
return res_img
def process_video(input_video, output_video):
"""
video pip line
:param input_video:
:param output_video:
:return:
"""
clip = VideoFileClip(input_video)
challenge_clip = clip.fl_image(process_image)
challenge_clip.write_videofile(output_video, audio=False)
| Flash-zhangliangliang/Flash-LaneLines-P1 | LaneFindingPipline/LaneFinding.py | LaneFinding.py | py | 4,846 | python | en | code | 0 | github-code | 90 |
18536165749 | def main():
s = input()
k = int(input())
d = len(s)
m = set()
if k >= d:
for i in range(d):
for j in range(1, d - i + 1):
m.add(s[i:i + j])
else:
for i in range(d):
for j in range(1, k + 1):
m.add(s[i:i + j])
m = list(m)
m.sort()
print(m[k - 1])
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03353/s819788957.py | s819788957.py | py | 395 | python | en | code | 0 | github-code | 90 |
27631351644 | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium import webdriver
import time
# Set the URL of the issue page you want to monitor
url = 'https://github.com/lone-wolf45/Webpage-Maker/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc'
# Set the path of your Chrome driver executable
path='C:\\Users\vedant\OneDrive\Desktop'
# Set the labels you want to monitor
# labels = ["up-for-grabs", "good first issue"]
# Set the path of the file where you want to store the issue information
file_path = 'C:\\Users\\vedant\OneDrive\Desktop\issues.txt'
# Define a function to write the issue information to the file
def write_to_file(issue_info):
with open(file_path, 'a') as f:
f.write(issue_info + '\n\n')
# Define a function to claim the issue by adding a comment
def claim_issue(driver):
comment_box = wait.until(EC.element_to_be_clickable((By.ID, "new_comment_field")))
comment_box.send_keys("Claiming this issue", Keys.CONTROL, Keys.RETURN)
# Define a function to check if that issue is already been visited
def search_issue(issue_link):
with open(file_path, 'r') as file:
# read all content of a file
content = file.read()
# check if string present in a file
if issue_link in content:
return False
else:
return True
# Start the browser and open the issue page
driver = webdriver.Chrome(path)
driver.get(url)
# Set up the wait object
wait = WebDriverWait(driver, 20)
while True:
# Find all the issues with the required labels
issues_label_xpath = "//a[contains(@class,'IssueLabel')]"
issues_label = wait.until(EC.presence_of_all_elements_located((By.XPATH, issues_label_xpath)))
for issue_label in issues_label:
# Check if the issue has the required label
label = issue_label.text
# if label in labels:
if label=="good first issue":
# Get the URL of the issue and open it
issue_parent_element = issue_label.find_element(By.XPATH, "..")
issue_grandparent_element = issue_parent_element.find_element(By.XPATH, "..")
issue_link_element = issue_grandparent_element.find_element(By.CLASS_NAME, "Link--primary")
issue_link = issue_link_element.get_attribute('href')
if(search_issue(issue_link)):
driver.get(issue_link)
# Write the issue information to the file
issue_title = driver.find_element(By.XPATH, "//bdi[contains(@class, 'js-issue-title')]")
issue_info = f"{issue_title.text}\n{issue_link}\n\n"
write_to_file(issue_info)
# Claim the issue by adding a comment
claim_issue(driver)
# break
else:
break
# break
driver.get(url)
time.sleep(600)
# driver.refresh()
driver.quit()
| vedant-z/GitHub-Issue-Claimer | main.py | main.py | py | 3,054 | python | en | code | 1 | github-code | 90 |
29391628313 | import os
import sys
class ConfigDict(dict):
def __init__(self, filename):
self._filename = filename
if os.path.isfile(self._filename):
with open(self._filename) as fh:
for line in fh:
line = line.rstrip()
k, v = line.split('=', 1)
super().__setitem__(k, v)
def __setitem__(self, k, v):
super().__setitem__(k, v)
with open(self._filename, 'w') as fh:
for key, value in self.items():
fh.write('{0}={1}\n'.format(key, value))
if __name__ == "__main__":
cd = ConfigDict('config.txt')
if len(sys.argv) == 3:
key = sys.argv[1]
value = sys.argv[2]
print('writing data: {0}, {1}'.format(key, value))
cd[key] = value
else:
print('reading data')
for key in cd.keys():
print(' {0} = {1}'.format(key, cd[key])) | robinsonleeuk/Python-Beyond-the-Basics---Object-Oriented-Programming-Udemy | Chapter 5/assignment3.py | assignment3.py | py | 941 | python | en | code | 0 | github-code | 90 |
70232296618 | import os
import sys
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from terminaltables import DoubleTable
def get_video_filenames(directory):
"""
Returns a list containing all the mp4 files in a directory
:param directory: the directory containing mp4 files
:return: list of strings
"""
list_of_videos = list()
for filename in os.listdir(directory):
if filename == ".DS_Store":
pass # ignoring .DS_Store file (for macOS)
elif filename.endswith(".mp4"):
list_of_videos.append(filename)
else:
print("no mp4 files found in directory '{}'".format(directory))
return list_of_videos
def print_terminal_table(table_data, method_used):
"""
Prints a table with the results in the terminal.
:param table_data: the data of the table
:param method_used: the method used, to print as the table title
:return: None
"""
table = DoubleTable(table_data)
table.title = method_used
table.inner_heading_row_border = False
table.inner_row_border = True
print(table.table)
def print_finished_training_message(answer, model, runtime, accuracy=None):
"""
Prints a message at the end of the training function.
:param answer: the matched video name
:param model: the histogram model used for training
:param runtime: the time elapsed in seconds
:param accuracy: the accuracy of the classifier in % (True Positives / Number of Matches)
:return: None
"""
print("\n\nGenerated " + "\x1b[1;31m" + "{}".format(model) + "\x1b[0m" + " histograms for all videos")
if accuracy is not None:
print("\n\n" + "\x1b[1;31m" + "MATCH FOUND: {}".format(answer) + "\x1b[0m")
print("\n--- Runtime: {} seconds ---".format(runtime))
if accuracy is not None:
print("--- Accuracy: {} % ---".format(round(accuracy * 100, 2)))
def get_video_first_frame(video, path_output_dir, is_query=False, is_result=False):
"""
Retrieves the first frame from a video and saves it as a PNG.
:param video: the path to the video
:param path_output_dir: the directory to save the frame in
:param is_query: write first frame for query
:param is_result: write first frame for matched video
:return: None
"""
vc = cv2.VideoCapture(video)
frame_counter = 0
while vc.isOpened():
ret, image = vc.read()
if ret and frame_counter == 0:
if is_query:
cv2.imwrite(os.path.join(path_output_dir, "query.png"), image)
elif is_result:
cv2.imwrite(os.path.join(path_output_dir, "result.png"), image)
frame_counter += 1
else:
break
cv2.destroyAllWindows()
vc.release()
def show_final_match(result_name, query_frame, result_frame, runtime, accuracy):
"""
Plots the query image and the matched video.
:param result_name: the name of the matched video
:param query_frame: the query image
:param result_frame: the matched video's image
:param runtime: the time elapsed in seconds
:param accuracy: the accuracy of the classifier in % (True Positives / Number of Matches)
:return: None
"""
query_img = mpimg.imread(query_frame)
result_img = mpimg.imread(result_frame)
plt.subplot(2, 1, 1)
plt.imshow(query_img)
plt.title("Original Query Video", fontSize=16), plt.xticks([]), plt.yticks([])
plt.subplot(2, 1, 2)
plt.imshow(result_img)
plt.title(
"Match '{}' found in {}s with {}% accuracy".format(result_name, runtime, round(accuracy * 100, 2)),
fontSize=13)
plt.xticks([])
plt.yticks([])
plt.show()
def display_results_histogram(results_dict):
"""
Displays the results in the form of a histogram.
:param results_dict: the histogram with results and the number of matches per video
:return: None
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.bar(list(results_dict.keys()), results_dict.values())
plt.title("Probability of a match for most likely videos")
plt.ylabel("%")
plt.tight_layout()
plt.setp(ax.get_xticklabels(), fontsize=10, rotation='vertical')
plt.show()
def get_number_of_frames(vc):
"""
Retrieves the total number of frames in a video using OpenCV's VideoCapture object cv2.CAP_PROP_FRAME_COUNT
attribute.
:param vc: the video capture
:return: the number of frames in the video capture
"""
return int(vc.get(cv2.CAP_PROP_FRAME_COUNT))
def get_video_fps(vc):
"""
Retrieves the frame rate (Frames Per Second) of a video using OpenCV's VideoCapture object cv2.CAP_PROP_FPS
attribute.
:param vc: the video capture
:return: the video capture's FPS
"""
return round(vc.get(cv2.CAP_PROP_FPS), 2)
def terminal_yes_no_question(question, default="no"):
"""
Ask a yes/no question via input() and return the answer as a boolean.
:param question: string that is presented in the terminal
:param default: presumed answer if <Enter> directly hit with no answer
:return: True for "yes" or False for "no"
"""
valid = {"yes": True, "y": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n")
def video_file_already_stabilised(filepath):
"""
Checks if the path to a stable version of the video already exists.
:param filepath: the path to the video
:return: True if it exists, False if it doesn't
"""
if os.path.isfile(filepath):
return True
return False
| Adamouization/Content-Based-Video-Retrieval-Code | app/helpers.py | helpers.py | py | 6,090 | python | en | code | 15 | github-code | 90 |
23991340336 | from flask import Flask, render_template, request
import json
app = Flask(__name__)
app.config["TEMPLATES_AUTO_RELOAD"] = True
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
@app.route("/")
def index():
questions = refresh()
return render_template("index.html", questions=questions)
@app.route('/test', methods=['POST'])
def test():
output = request.get_json()
print(output) # This is the output that was stored in the JSON within the browser
print(type(output))
result = json.loads(output) #this converts the json output to a python dictionary
print(result) # Printing the new dictionary
print(type(result))#this shows the json converted as a python dictionary
saveQuestion(output)
return result
def refresh():
question_file = open(r"questions.txt", "r")
raw = question_file.readlines()
questions = []
for x in raw:
questions.append(json.loads(x))
questions = sorted(questions, key=lambda i: i['time'])
print(questions)
return questions
def saveQuestion(q):
question_file = open(r"questions.txt", "a+")
question_dict = json.loads(q)
question_file.write(json.dumps(question_dict) + "\n")
question_file.close()
return | eawang02/HackMIT-AsyncLecture | application.py | application.py | py | 1,407 | python | en | code | 0 | github-code | 90 |
3416383134 | #Imports
import pandas as pd
import matplotlib.pyplot as plt
import requests
#Global Variables
url = "https://api.coincap.io/v2/assets"
tracked_currencies = ['bitcoin', 'ethereum']
#Functions
def get_data():
resp = requests.get(url)
if resp.status_code == 200:
data = resp.json()['data']
export = []
for n in range(len(data)):
for target in tracked_currencies:
currency = data[n]['id']
if target == currency:
price = data[n]['priceUsd']
pair = (currency, price)
export.append(pair)
return export
else:
return f"something went wrong! Error - {str(resp.status_code)}"
def daily_check():
export = []
for target in tracked_currencies:
daily = f"https://api.coincap.io/v2/assets/{target}/history?interval=d1"
resp = requests.get(daily)
if resp.status_code == 200:
data = resp.json()['data']
for n in range(len(data)):
currency = target
price = data[n]['priceUsd']
pair = {"currenncy":currency,"price":price}
export.append(pair)
else:
return f"something went wrong! Error - {str(resp.status_code)}"
return export
def hourly_check():
export = []
for target in tracked_currencies:
hourly = f"https://api.coincap.io/v2/assets/{target}/history?interval=h1"
resp = requests.get(hourly)
if resp.status_code == 200:
data = resp.json()['data']
for n in range(len(data)):
currency = target
price = data[n]['priceUsd']
pair = (currency, price)
export.append(pair)
else:
return f"something went wrong! Error - {str(resp.status_code)}"
return export
def create_df(data):
if data is not str:
df = pd.DataFrame(data)
return df
#tratar erro
def plot_data(x, y, title):
graph = plt.plot(x, y)
graph.title(title)
return graph
DF = create_df(daily_check())
print(dir(DF))
DF.plot()
| AlmirPaulo/crypto_tracker | tracker.py | tracker.py | py | 2,152 | python | en | code | 0 | github-code | 90 |
46182304420 | import database as db
from tkinter import messagebox
class Product:
def __init__(self, name="", price=0.0, quantity=0, discount=0, percentoff=0):
self.name = name
self.originalprice = price
self.quantity = quantity
self.discount = discount
self.percentoff = percentoff
self.finalprice = round(price*(1-percentoff), 2)
if quantity == 0:
self.stock = "Sold Out."
elif quantity < 10:
self.stock = "Almost Gone!"
else:
self.stock = "In Stock."
class LineItem:
def __init__(self, name=None, price=0, qty=0):
self.name = name
self.price = price
self.orderQty = qty
class Cart:
def __init__(self):
self.lineItems = []
data = db.listCart()
for product in data:
item = LineItem(product[0], product[1], product[2])
self.lineItems.append(item)
def check(self, name):
inList = -1
i = -1
for lineItem in self.lineItems:
i += 1
if name == lineItem.name:
inList = i
return inList
def AddItem(self, name, price, qty=1):
if qty == "":
qty = 1
else:
qty = int(qty)
indb = db.getQty(name)
if indb<0:
messagebox.showinfo("Cart Message", "Item not found")
elif indb == 0:
messagebox.showinfo("Cart Message", "Item is out of stock")
elif int(indb) < int(qty):
messagebox.showinfo("Cart Message", "There are only "+str(indb)+" items left.")
else:
inCart = db.checkCart(name)
if inCart > 0:
db.editCart(name, inCart+qty)
else:
db.addtoCart(name, price, qty)
messagebox.showinfo("Cart Message", "Added to cart")
def RemoveItem(self, name, qty=1):
inCart = int(db.checkCart(name))
if qty == "":
qty = 1
else:
qty = int(qty)
if inCart > qty:
db.editCart(name, inCart - qty)
else:
db.removeFromCart(name)
messagebox.showinfo("Cart Message", "Removed from cart")
def removeItem(self, name, qty=1):
inList = self.check(name)
if self.lineItems[inList].orderQty <= qty:
self.lineItems.pop(inList)
db.removeFromCart(name)
else:
newQty = self.lineItems[inList].orderQty - qty
self.lineItems[inList].removefromOrder(qty)
db.editCart(name, newQty)
def getTotal(self):
subtotal = 0.00
for item in self.lineItems:
subtotal += round(item.price*item.orderQty, 2)
tax = round(subtotal*0.07, 2)
total = round(tax + subtotal, 2)
totals = [subtotal, tax, total]
return totals
def getItemCount(self):
return db.cartCount()
def __iter__(self):
self.__index = -1
return self
def __next__(self):
if self.__index == len(self.lineItems)-1:
raise StopIteration
self.__index += 1
lineItem = self.lineItems[self.__index]
return lineItem
def decrementProd(name, Qty):
qty = db.getQty(name)
if qty > 0:
qty -= Qty
db.subtractfromProds(name, qty)
| djricky5/HFSShoppingCart | business.py | business.py | py | 3,332 | python | en | code | 0 | github-code | 90 |
4302288909 | import numpy as np
import stellargraph as sg
from keras import Sequential
from keras.layers import Dense, Dropout
from keras.models import Model
from sklearn.metrics import accuracy_score
from tensorflow.keras import losses
from sklearn import model_selection
from stellar_graph_demo.visualisation import tsne_plot_embedding
def create_train_val_test_datasets_mlp(features, targets):
"""
Splits the dataset (features + targets) with stratification according to the following proportions :
Train: 271, Validation: 500, Test: 1937
"""
train_features, test_features, train_targets, test_targets = model_selection.train_test_split(
features, targets, train_size=0.1, test_size=None, stratify=targets
)
val_features, test_features, val_targets, test_targets = model_selection.train_test_split(
test_features, test_targets, train_size=500, test_size=None, stratify=test_targets
)
return train_features, val_features, test_features, train_targets, val_targets, test_targets
def get_mlp_model(input_size, num_labels):
"""Builds the baseline model - 2-layer MLP that takes the initial node features as input"""
model = Sequential()
model.add(Dense(32, input_dim=input_size, activation='relu', name='embedding_layer'))
model.add(Dropout(0.5))
model.add(Dense(num_labels, activation='softmax'))
model.compile(
optimizer='adam',
loss=losses.categorical_crossentropy,
metrics=["acc"],
)
return model
def train_mlp_model(model,
train_features,
train_targets,
val_features,
val_targets):
"""Trains the MLP model in batches"""
history = model.fit(
x=train_features,
y=train_targets,
epochs=200,
batch_size=32,
validation_data=(val_features, val_targets),
verbose=2,
shuffle=True,
)
sg.utils.plot_history(history)
return model
def evaluate_mlp_model_on_test_dataset(model, test_features, test_targets):
"""Evaluate the pre-trained MLP model on test dataset"""
test_predictions = model.predict(test_features)
test_pred_labels = np.argmax(test_predictions, axis=1)
test_targets_labels = np.argmax(test_targets, axis=1)
test_acc = accuracy_score(test_targets_labels, test_pred_labels)
print(f"Test Set Accuracy: {test_acc}")
def visualise_mlp_embedding(model, features, targets, indices):
"""Visualises the first layer of MLP via TSNE, coloured by ground truth labels"""
gt_labels = np.argmax(targets, axis=1)
embedding_model = Model(
inputs=model.input,
outputs=model.get_layer('embedding_layer').output
)
embedding_matrix = embedding_model.predict(features)
tsne_plot_embedding(
X=embedding_matrix,
y=gt_labels,
indices=indices,
model_name='MLP'
)
def visualise_initial_embedding(features, targets, indices):
"""Visualises the first layer of MLP via TSNE, coloured by ground truth labels"""
gt_labels = np.argmax(targets, axis=1)
tsne_plot_embedding(
X=features,
y=gt_labels,
indices=indices,
model_name='MLP'
)
| CuriousKomodo/gnn_experiments | stellar_graph_demo/baseline/train_mlp_functions.py | train_mlp_functions.py | py | 3,222 | python | en | code | 4 | github-code | 90 |
18154903979 | import bisect, copy, heapq, math, sys
from collections import *
from functools import lru_cache
from itertools import accumulate, combinations, permutations, product
def input():
return sys.stdin.readline()[:-1]
def ruiseki(lst):
return [0]+list(accumulate(lst))
def celi(a,b):
return -(-a//b)
sys.setrecursionlimit(5000000)
mod=pow(10,9)+7
al=[chr(ord('a') + i) for i in range(26)]
direction=[[1,0],[0,1],[-1,0],[0,-1]]
n,x,m=map(int,input().split())
cnt=1
now=x
ans=x
jisyo=defaultdict(list)
jisyo[now]=[ans,cnt]
while cnt<n:
nex=now**2%m
cnt+=1
ans+=nex
now=nex
if nex in jisyo:
tmp=jisyo[nex]
# print(tmp)
ans+=(ans-tmp[0])*((n-cnt)//(cnt-tmp[1]))
cnt=n-(n-cnt)%(cnt-tmp[1])
break
else:
jisyo[now]=[ans,cnt]
# print(ans,cnt,now)
for i in range(n-cnt):
now=now**2%m
ans+=now
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02550/s377178681.py | s377178681.py | py | 887 | python | en | code | 0 | github-code | 90 |
26541269495 | import tensorflow as tf
class LayerNormLSTMCell(tf.keras.layers.LSTMCell):
def __init__(
self,
units,
activation = "tanh",
recurrent_activation = "sigmoid",
use_bias= True,
kernel_initializer= "glorot_uniform",
recurrent_initializer = "orthogonal",
bias_initializer= "zeros",
unit_forget_bias= True,
kernel_regularizer= None,
recurrent_regularizer = None,
bias_regularizer= None,
kernel_constraint = None,
recurrent_constraint= None,
bias_constraint= None,
dropout = 0.0,
recurrent_dropout = 0.0,
norm_gamma_initializer = "ones",
norm_beta_initializer = "zeros",
norm_epsilon = 1e-3,
**kwargs
):
super().__init__(
units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
kernel_constraint=kernel_constraint,
recurrent_constraint=recurrent_constraint,
bias_constraint=bias_constraint,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
**kwargs,
)
self.norm_gamma_initializer = tf.keras.initializers.get(norm_gamma_initializer)
self.norm_beta_initializer = tf.keras.initializers.get(norm_beta_initializer)
self.norm_epsilon = norm_epsilon
self.kernel_norm = self._create_norm_layer("kernel_norm")
self.recurrent_norm = self._create_norm_layer("recurrent_norm")
self.state_norm = self._create_norm_layer("state_norm")
def build(self, input_shape):
super().build(input_shape)
def call(self, inputs, states, training=None):
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(h_tm1, training, count=4)
if 0.0 < self.dropout < 1.0:
inputs *= dp_mask[0]
z = self.kernel_norm(tf.keras.backend.dot(inputs, self.kernel))
if 0.0 < self.recurrent_dropout < 1.0:
h_tm1 *= rec_dp_mask[0]
z += self.recurrent_norm(tf.keras.backend.dot(h_tm1, self.recurrent_kernel))
if self.use_bias:
z = tf.keras.backend.bias_add(z, self.bias)
z = tf.split(z, num_or_size_splits=4, axis=1)
c, o = self._compute_carry_and_output_fused(z, c_tm1)
c = self.state_norm(c)
h = o * self.activation(c)
return h, [h, c]
def get_config(self):
config = {
"norm_gamma_initializer": tf.keras.initializers.serialize(
self.norm_gamma_initializer
),
"norm_beta_initializer": tf.keras.initializers.serialize(
self.norm_beta_initializer
),
"norm_epsilon": self.norm_epsilon,
}
base_config = super().get_config()
return {**base_config, **config}
def _create_norm_layer(self, name):
return tf.keras.layers.LayerNormalization(
beta_initializer=self.norm_beta_initializer,
gamma_initializer=self.norm_gamma_initializer,
epsilon=self.norm_epsilon,
name=name,
)
| Z-yq/TensorflowASR | asr/models/layers/LayerNormLstmCell.py | LayerNormLstmCell.py | py | 3,688 | python | en | code | 448 | github-code | 90 |
71726688618 | import random
import os
from datetime import datetime
jug1 = "Jugador 1"
jug2 = "Jugador 2"
def crear_bitacora() -> str:
"""
función: random_boolean()
descripción: Función para obtener un valor aleatorio de True o False
params: N/A
"""
directory = "/Users/robjimn/Documents/Roberto Rojas/Cenfotec/Principios Programación/Proyecto/bitacora"
tiempo_actual = datetime.now()
# Crear String de timestamp
timestamp_str = tiempo_actual.strftime("%d%m%Y_%H%M%S")
file_name = f"Blackjack_Bitácora_{timestamp_str}.txt"
file_path = os.path.join(directory, file_name)
with open(file_path, "w") as file:
file.write(f"Bitácora: {tiempo_actual}\nPD: Las horas se muestran en formato de 24 horas\n")
return file_path
def agregar_accion_bitacora(dir_bitacora, msj_bitacora):
ta = datetime.now()
formatted_time = ta.strftime("%I:%M:%S %p")
msj_completo = f"- {formatted_time} - {msj_bitacora}\n"
with open(dir_bitacora, "a") as file:
file.write(msj_completo)
def crear_baraja(dir_bitacora: str) -> list:
"""
función: crear_baraja()
descripción: Crea la baraja con todos sus valores
params: none
"""
baraja = [
"2", "2", "2", "2",
"3", "3", "3", "3",
"4", "4", "4", "4",
"5", "5", "5", "5",
"6", "6", "6", "6",
"7", "7", "7", "7",
"8", "8", "8", "8",
"9", "9", "9", "9",
"10", "10", "10", "10",
"J", "J", "J", "J",
"Q", "Q", "Q", "Q",
"K", "K", "K", "K",
"A", "A", "A", "A"]
msj_bitacora = f"Se ha creado la baraja.\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
return baraja
def print_baraja(baraja: list, dir_bitacora: str):
"""
función: print_baraja()
descripción: Imprime la baraja con sus elementos actuales
params: baraja
"""
for i in range(0, len(baraja), 4):
print(', '.join(map(str, baraja[i:i+4]))) # With this option "map" converts each element to a string
# print(', '.join(baraja[i:i+4])) - This option reads only the strings
msj_bitacora = f"Se ha solicitado mostrar la baraja.\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
def convert_ace(mano_jug:list, jugador: str, dir_bitacora: str) -> list:
"""
función: convert_as()
descripción: Convierte el valor actual de la mano en el valor que indique el usuario
params: mano_J1
"""
for index in range(len(mano_jug)):
if mano_jug[index] == "A":
decision = input("Qué valor desea darle al As, 1 o 11?\n")
if decision == "1":
mano_jug[index] = 1
elif decision == "11":
mano_jug[index] = 11
msj_bitacora = f"{jugador} le ha dado el valor de {mano_jug[index]} a su A's"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
return mano_jug
def sumar_mano(mano_jugador:list) -> int:
"""
función: sumar_mano()
descripción: Suma los valores que contiene la mano del jugador
params: mano_jugador
"""
total_mano = 0
for i in range(len(mano_jugador)):
valor_actual = mano_jugador[i]
if valor_actual in ["J","Q","K","1"]:
total_mano += 10
# elif valor_actual == "A":
# valor_as = convert_ace(valor_actual)
# total_mano += valor_as
else:
total_mano += int(valor_actual)
return total_mano
def print_mano(jugador:str, mano_jugador:list):
"""
función: print_mano()
descripción: Imprime el mensaje con la mano del jugador
params: jugador, mano_jugador
"""
cont = 1
print(f"\nCartas del {jugador}:")
for i in range(len(mano_jugador)):
print(f"{cont}: {mano_jugador[i]}")
cont += 1
print("")
def play_J1(baraja: list, dir_bitacora: str) -> dict:
"""
función: play_J1()
descripción: Proceso de juego de una mano del Jugador 1
params: baraja
"""
play_in = True
data_J1 = {}
mano_J1 = []
total = 0
baraja_actual = baraja
jug1 = "Jugador 1"
while play_in:
carta_aleatoria = random.randint(0,51)
if baraja_actual[carta_aleatoria] != 0:
mano_J1.append(baraja_actual[carta_aleatoria])
enviar_msj_bitacora(baraja_actual[carta_aleatoria], dir_bitacora)
baraja_actual[carta_aleatoria] = 0
if len(mano_J1) == 2:
play_in = False
print_mano(jug1, mano_J1)
# Verifica si existen A's y si es verdadero reemplaza el valor por el que indique el usuario
cant_ace = mano_J1.count("A")
if cant_ace >= 1:
mano_J1 = convert_ace(mano_J1, jug1, dir_bitacora)
total = sumar_mano(mano_J1)
msj_bitacora = f"La mano del {jug1} suma un total de {total}\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
data_J1 = {
"total_mano_J1": total,
"baraja_actual": baraja_actual,
"mano_J1": mano_J1
}
return data_J1
def play_J2(baraja: list, dir_bitacora: str) -> dict:
"""
función: play_J2()
descripción: Proceso de juego de una mano del Jugador 2
params: baraja
"""
play_in = True
data_J2 = {}
mano_J2 = []
total = 0
baraja_actual = baraja
jug2 = "Jugador 2"
cant_zeros = baraja.count(0)
while play_in:
carta_aleatoria = random.randint(0,51)
if baraja_actual[carta_aleatoria] == 0:
""
else:
mano_J2.append(baraja_actual[carta_aleatoria])
enviar_msj_bitacora(baraja_actual[carta_aleatoria], dir_bitacora)
baraja_actual[carta_aleatoria] = 0
if len(mano_J2) == 2:
play_in = False
print_mano(jug2, mano_J2)
# Verifica si existen A's y si es verdadero reemplaza el valor por el que indique el usuario
cant_aces = mano_J2.count('A')
if cant_aces >= 1:
mano_J2 = convert_ace(mano_J2, jug2, dir_bitacora)
total = sumar_mano(mano_J2)
msj_bitacora = f"La mano del {jug2} suma un total de {total}\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
data_J2 = {
"total_mano_J2": total,
"baraja_actual": baraja_actual,
"mano_J2": mano_J2
}
baraja_actual = data_J2["baraja_actual"]
return data_J2
def jugar_extra_1(dict_J1:dict, dir_bitacora: str) -> dict:
"""
función: jugar_extra_1()
descripción: Jugar una carta extra del jugador 1
params: dict_J1
"""
baraja_actual = dict_J1["baraja_actual"]
carta_aleatoria = random.randint(0,51)
data_J1 = []
mano_J1 = dict_J1["mano_J1"]
if dict_J1["baraja_actual"][carta_aleatoria] != 0:
mano_J1.append(dict_J1["baraja_actual"][carta_aleatoria])
enviar_msj_bitacora(baraja_actual[carta_aleatoria], dir_bitacora)
baraja_actual[carta_aleatoria] = 0
print_mano(jug1, mano_J1)
cant_ace = mano_J1.count("A")
if cant_ace >= 1:
mano_J1 = convert_ace(mano_J1, jug1, dir_bitacora)
total = sumar_mano(mano_J1)
data_J1 = {
"total_mano_J1": total,
"baraja_actual": baraja_actual,
"mano_J1": mano_J1
}
return data_J1
def jugar_extra_2(dict_J2:dict, dir_bitacora: str) -> dict:
"""
función: jugar_extra_1()
descripción: Jugar una carta extra del jugador 1
params: dict_J2
"""
baraja_actual = dict_J2["baraja_actual"]
carta_aleatoria = random.randint(0,51)
data_J2 = []
mano_J2 = dict_J2["mano_J2"]
if dict_J2["baraja_actual"][carta_aleatoria] != 0:
mano_J2.append(dict_J2["baraja_actual"][carta_aleatoria])
enviar_msj_bitacora(baraja_actual[carta_aleatoria], dir_bitacora)
baraja_actual[carta_aleatoria] = 0
print_mano(jug2, mano_J2)
cant_ace = mano_J2.count("A")
if cant_ace >= 1:
mano_J2 = convert_ace(mano_J2, jug2, dir_bitacora)
total = sumar_mano(mano_J2)
data_J2 = {
"total_mano_J2": total,
"baraja_actual": baraja_actual,
"mano_J2": mano_J2
}
return data_J2
def obtener_resultado(total_pts_J1:int, total_pts_J2:int, dir_bitacora:str) -> str:
"""
función: obtener_resultado()
descripción: Función para obtener el mensaje de acuerdo al resultado obtenido
params: total_pts_J1, total_pts_J2
"""
msj = ""
if total_pts_J1 == total_pts_J2:
msj = "Esto es un empate"
msj_bitacora = f"El {jug1} y el {jug2} han empatado!\n"
else:
if total_pts_J1 > total_pts_J2:
msj = f"Ganador del juego: {jug1}"
msj_bitacora = f"El {jug1} ha ganado y el {jug2} perdió el juego!\n"
else:
msj = f"Ganador del juego: {jug2}"
msj_bitacora = f"El {jug2} ha ganado y el {jug1} perdió el juego!\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
return msj
def total_pts(total_J1:int, total_J2:int) -> dict:
"""
función: total_pts()
descripción: Imprime el mensaje con la mano del jugador
params: jugador, mano_jugador
"""
puntos_J1 = calcular_pts_individuales(total_J1, total_J2)
puntos_J2 = calcular_pts_individuales(total_J2, total_J1)
totales = {
'pts_J1': puntos_J1,
'pts_J2': puntos_J2
}
return totales
def calcular_pts_individuales(total_jug, total_adv):
bj = 21
pts_won = 0
if total_jug == bj:
if total_adv == bj:
pts_won = 3
else:
pts_won = 6
else:
if total_jug >= 17 and total_jug <= 20:
if total_adv > 21:
pts_won = 2
elif total_jug > total_adv:
pts_won = 2
else:
if total_jug < 17:
if total_jug > total_adv:
pts_won = 1
else:
if total_jug > bj:
pts_won = 0
return pts_won
def enviar_msj_bitacora(carta_aleatoria, dir_bitacora):
"""
función: enviar_msj_bitacora()
descripción: Función que selecciona el mensaje correcto que se envía a bitácora de acuerdo al valor de la carta
params: carta_actual, dir_bitacora
"""
if carta_aleatoria in ["J", "Q", "K"]:
msj_bitacora = f"{jug1} - Carta: '{carta_aleatoria}' y su valor es '10'.\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
else:
msj_bitacora = f"{jug1} - Carta: '{carta_aleatoria}' y su valor es '{carta_aleatoria}.'\n"
agregar_accion_bitacora(dir_bitacora, msj_bitacora)
def random_boolean() -> bool:
"""
función: random_boolean()
descripción: Función para obtener un valor aleatorio de True o False
params: N/A
"""
random_num = random.random()
return random_num <= 0.4
# /Users/robjimn/Documents/Roberto Rojas/Cenfotec/Principios Programación/Proyecto/bitacora
# -- FUNCIONES DE PRUEBAS
# def crear_baraja() -> list:
# """
# función: crear_baraja() -- PRUEBAS
# descripción: Crea la baraja con todos sus valores
# params: none
# """
# # baraja = [
# # "2", "2", "2", "2",
# # "3", "3", "3", "3",
# # "4", "4", "4", "4",
# # "5", "5", "5", "5",
# # "6", "6", "6", "6",
# # "7", "7", "7", "7",
# # "8", "8", "8", "8",
# # "9", "9", "9", "9",
# # "10", "10", "10", "10",
# # "J", "J", "J", "J",
# # "Q", "Q", "Q", "Q",
# # "K", "K", "K", "K",
# # "A", "A", "A", "A"]
# # Baraja de prueba
# baraja = [
# 0, 0, 0, "2",
# 0, 0, 0, 0,
# 0, 0, 0, 0,
# 0, 0, "5", 0,
# 0, 0, 0, 0,
# 0, 0, 0, 0,
# "8", 0, 0, 0,
# 0, 0, 0, 0,
# 0, 0, 0 ,0,
# 0, 0, 0, 0,
# 0, "Q", 0, 0,
# 0, 0, 0, 0,
# 0, 0, 0, 0]
# return baraja | rrojasj/BlackJack | Code/black_jack_functions.py | black_jack_functions.py | py | 11,850 | python | es | code | 0 | github-code | 90 |
1987460595 | '''
PASA_parser was used to parse the gff3 file of Gene Structure Annotation and Analysis Using PASA and reslut into sorted gff3 and pep file.
'''
import sys
import re
input_file = sys.argv[1]
pep_out = sys.argv[2]
gff_out = sys.argv[3]
gff_dict = dict()
uniq_id_list = list()
with open(input_file, 'r') as gff3:
for line in gff3:
if line == '\n':
continue
elif line.startswith('# ORIGINAL') or line.startswith('# PASA_UPDATE'):
gff_key = ''
temp_pep = ''
temp = {}
elif line.startswith('chr'):
line_spl = line.split("\t")
line_spl[1] = 'PASA'
gff_value = '\t'.join(line_spl[:-1]) + "\tPlaceholders"
if line_spl[2] == 'gene':
chrom = line_spl[0]
start = line_spl[3]
end = line_spl[4]
gff_key = chrom + '_' + start +'_' + end
temp = {'chrom' : chrom, 'start' : start, 'end' : end}
uniq_id_list.append(temp)
gff_dict[gff_key] = [gff_value]
else:
gff_dict[gff_key].append(gff_value)
elif line.startswith('#PROT'):
pep_spl = line.split("\t")
pep_seq = pep_spl[1]
# if this is the first prot
if len(temp_pep) == 0:
temp_pep = 'represnt_pep' + "\t" + pep_seq
gff_dict[gff_key].append(temp_pep)
elif len(pep_seq) > len(temp_pep):
temp_pep = 'represnt_pep' + "\t" + pep_seq
gff_dict[gff_key][-1] = temp_pep
# sort keys
sorted_list = sorted(uniq_id_list, key=lambda k: (k['chrom'], int(k['start'])))
count = 0
mRNA = 0
cds = 0
exon = 0
UTR_5 = 0
UTR_3 = 0
prefix = "Cp"
for i in range(len(sorted_list)):
temp = sorted_list[i]
gff_key = temp['chrom'] + '_' + temp['start'] + '_' + temp['end']
temp_list = gff_dict[gff_key]
for i in range(len(temp_list)):
line = temp_list[i]
records = line.split("\t")
if re.search(r"\tgene\t", line):
count = count + 10
mRNA = 0
UTR_5 = 0
UTR_3 = 0
chr_num = records[0]
#gene_id = chr_num + '_' + str(count).zfill(7)
gene_id = prefix + '_' + chr_num + '_' + str(count).zfill(6)
pep_id = ">" + gene_id + "\t" + "gene=" + gene_id + "\n"
records[8] = "ID={};Name={}".format(gene_id, gene_id)
elif line.startswith('represnt_pep'):
pep_spl = line.split("\t")
pep_seq = pep_spl[1].strip("*")
pep_records = pep_id + pep_seq
with open(pep_out, "a") as pep_file:
pep_file.write(pep_records)
pep_id = ''
records = ''
elif re.search(r"\tmRNA\t", line):
cds = 0
exon = 0
mRNA = mRNA + 1
mRNA_id = gene_id + "." + str(mRNA)
records[8] = "ID={};Parent={};Name={}".format(mRNA_id, gene_id, mRNA_id)
elif re.search(r"\texon\t", line):
exon = exon + 1
exon_id = mRNA_id + "_exon_" + str(exon)
records[8] = "ID={};Parent={};Name={}".format(exon_id, mRNA_id, exon_id)
elif re.search(r"\tCDS\t", line):
cds = cds + 1
cds_id = mRNA_id + "_cds_" + str(cds)
records[8] = "ID={};Parent={};Name={}".format(cds_id, mRNA_id, cds_id)
elif re.search(r"\tfive_prime_UTR\t", line):
UTR_5 = UTR_5 + 1
UTR_5_id = gene_id + ".UTR_5." + str(UTR_5)
records[8] = "ID={};Parent={}".format(UTR_5_id, gene_id)
elif re.search(r"\tthree_prime_UTR\t", line):
UTR_3 = UTR_3 + 1
UTR_3_id = gene_id + ".UTR_3." + str(UTR_3)
records[8] = "ID={};Parent={}".format(UTR_3_id, gene_id)
else:
continue
# skip the position of pep seq
if len(records ) == 9:
with open(gff_out, "a") as new_gff:
new_gff.write("\t".join(records) +'\n')
| Github-Yilei/genome-assembly | Python/PASA_parser.py | PASA_parser.py | py | 4,075 | python | en | code | 2 | github-code | 90 |
22950802607 | import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage.util import random_noise
from scipy.ndimage import distance_transform_edt
from skimage import feature
# Generate noisy image of a square
def fom (edge_img, edge_gold):
alpha = 1.0/9
dist = distance_transform_edt(np.invert(edge_gold))
fom = 1.0/np.maximum(np.count_nonzero(edge_img), np.count_nonzero(edge_gold))
N,M = edge_img.shape
for i in range(0, N):
for j in range (0, M):
if edge_img[i,j]:
fom += 1.0/(1.0+dist[i,j]*dist[i,j]*alpha)
fom /= np.maximum(np.count_nonzero(edge_img), np.count_nonzero(edge_gold))
return fom
image=np.zeros((128, 128), dtype = float)
image[32:-32, 32:-32] = 1
image=ndi.rotate(image, 15, mode='constant')
image=ndi.gaussian_filter(image, 4)
image=random_noise(image, mode = 'speckle', mean = 0.1)
# Compute the Canny filter for two values of sigma
edges1=feature.canny(image)
edges2=feature.canny(image, sigma = 3)
# display results
fig,ax=plt.subplots(nrows = 1, ncols = 3, figsize = (8, 3))
ax[0].imshow(image,cmap = 'gray')
ax[0].set_title('noisy image', fontsize=20)
ax[1].imshow(edges1,cmap = 'gray')
ax[1].set_title(r'Canny filter, $\sigma=1$', fontsize=20)
ax[2].imshow(edges2,cmap = 'gray')
ax[2].set_title(r'Canny filter, $\sigma=3$', fontsize=20)
for a in ax:
a.axis('off')
fig.tight_layout()
plt.show()
print(fom(edges1, edges2)) | vat1kan/hed | example.py | example.py | py | 1,449 | python | en | code | 0 | github-code | 90 |
8886533112 | #!/usr/bin/python2.7
from pyo import *
s = Server(sr=44100, nchnls=2, buffersize=512, duplex=1, audio='offline').boot()
s.recordOptions(dur=30.0, fileformat=0, filename='../../rendered/test_pyo.wav', sampletype=0)
fr = Sig(value=400)
p = Port(fr, risetime=0.001, falltime=0.001)
a = SineLoop(freq=p, feedback=0.08, mul=.3).out()
b = SineLoop(freq=p*1.005, feedback=0.08, mul=.3).out(1)
def pick_new_freq():
fr.value = random.randrange(300,601,50)
pat = Pattern(function=pick_new_freq, time=0.5).play()
s.start()
| pepperpepperpepper/crunchtime | synth_tests/pyo_tests/test7.py | test7.py | py | 517 | python | en | code | 0 | github-code | 90 |
34670231453 | import unittest
import wsgiref.headers
import wsgiref.util
from .server import application
class StartResponseMock:
def __call__(self, status, headers, exc_info=None):
self.status = status
self.headers = headers
self.exc_info = exc_info
return self.write
def write(self, body_data): # pragma: no cover
raise NotImplementedError
def request(method, url):
environ = {
"REQUEST_METHOD": method,
"PATH_INFO": url,
}
wsgiref.util.setup_testing_defaults(environ)
start_response = StartResponseMock()
iterable = application(environ, start_response)
status_code = int(start_response.status.split()[0])
response_headers = wsgiref.headers.Headers(start_response.headers)
body = b"".join(iterable).decode()
assert start_response.exc_info is None
return status_code, response_headers, body
class TestServer(unittest.TestCase):
def test_new_grid(self):
status, headers, body = request("GET", "/")
self.assertEqual(status, 302)
self.assertTrue(headers["Location"].startswith("/problem/"))
def test_problem(self):
url = "/problem/53__7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79"
status, headers, body = request("GET", url)
self.assertEqual(status, 200)
self.assertIn("★☆☆☆☆", body)
self.assertIn(
"<table>"
"<tr>"
"<td>5</td>"
"<td>3</td>"
'<td contenteditable enterkeyhint="done" inputmode="numeric" spellcheck="false"><br></td>'
'<td contenteditable enterkeyhint="done" inputmode="numeric" spellcheck="false"><br></td>'
"<td>7</td>",
body,
)
def test_problem_bad_request(self):
for url, error in [
(
"/problem/53__7____6__A95____98____6_8___6___34__8_3__A7___2___6_6____28____4A9__5____8__79",
"cell contains invalid value: 'A'",
),
(
"/problem/531_7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
"no solution found",
),
(
"/problem/____7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
"multiple solutions found",
),
]:
with self.subTest(url=url):
status, headers, body = request("GET", url)
self.assertEqual(status, 400)
self.assertIn(error, body)
def test_solution(self):
url = "/solution/53__7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79"
status, headers, body = request("GET", url)
self.assertEqual(status, 200)
self.assertIn("★☆☆☆☆", body)
self.assertIn(
"<table><tr><td>5</td><td>3</td><td>4</td><td>6</td><td>7</td>", body
)
def test_solution_bad_request(self):
for url, error in [
(
"/solution/53__7____6__A95____98____6_8___6___34__8_3__A7___2___6_6____28____4A9__5____8__79",
"cell contains invalid value: 'A'",
),
(
"/solution/531_7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
"no solution found",
),
(
"/solution/____7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
"multiple solutions found",
),
]:
with self.subTest(url=url):
status, headers, body = request("GET", url)
self.assertEqual(status, 400)
self.assertIn(error, body)
def test_post_method(self):
for url in [
"/",
"/problem/53__7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
"/solution/53__7____6__195____98____6_8___6___34__8_3__17___2___6_6____28____419__5____8__79",
]:
with self.subTest(url=url):
status, headers, body = request("POST", url)
self.assertEqual(status, 405)
def test_unknown_url(self):
status, headers, body = request("GET", "/admin/")
self.assertEqual(status, 404)
| aaugustin/sudoku | python/sudoku/test_server.py | test_server.py | py | 4,375 | python | en | code | 15 | github-code | 90 |
24590470736 | import sys, os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
if sys.version_info.major == 3:
xrange = range
raw_input = input
sys.path.append( os.path.abspath('..') )
from read_param import *
# import NChan, freq, winSize
winSize = int(freq * 0.5) # 0.5s
A = np.memmap(sys.argv[1], dtype='float32')
if os.path.isfile('chirps.txt') == True:
ans = raw_input('Overwrite chirps file?')
if ans in ['y', 'Y']:
chirpsFile = open('chirps.txt', 'w')
else:
print('Appending to end of file')
chirpsFile = open('chirps.txt', 'a')
else:
chirpsFile = open('chirps.txt', 'w')
if os.path.isfile('without_chirps.txt') == True:
ans = raw_input('Overwrite without_chirps file?')
if ans in ['y', 'Y']:
nonchirpsFile = open('without_chirps.txt', 'w')
else:
print('Appending to end of file')
nonchirpsFile = open('without_chirps.txt', 'a')
else:
nonchirpsFile = open('without_chirps.txt', 'w')
class getWindow(object):
def __init__(self):
self.ch = []
for i in range(nChan):
self.ch.append( A[i::nChan] )
self.size = self.ch[0].size
self.f = plt.figure(1)
self.ax = self.f.add_subplot(111)
self.lines = []
for i in range(nChan):
l, = plt.plot(range(winSize), range(winSize))
self.lines.append(l)
plt.xlim([0, winSize])
plt.ylim([-10, 10+5*nChan])
self.plotNext()
def getNextStart(self):
self.start = np.random.randint(self.size)
self.start = self.start - (self.start % nChan)
print(self.start)
def plotNext(self):
self.getNextStart()
for i in range(nChan):
data = 5*i + self.ch[i][self.start:self.start+winSize]
self.lines[i].set_ydata( data )
plt.draw()
def isChirp(self, event):
sumAbs = np.zeros(winSize)
for i in range(nChan):
sumAbs += np.abs(self.ch[i][self.start:self.start+winSize])
for s in sumAbs:
chirpsFile.write('%f '%s)
chirpsFile.write('\n')
chirpsFile.flush()
self.plotNext()
def notChirp(self, event):
sumAbs = np.zeros(winSize)
for i in range(nChan):
sumAbs += np.abs(self.ch[i][self.start:self.start+winSize])
for s in sumAbs:
nonchirpsFile.write('%f '%s)
nonchirpsFile.write('\n')
nonchirpsFile.flush()
self.plotNext()
def skip(self, event):
self.plotNext()
callback = getWindow()
axChirp = plt.axes([ 0.6, 0.05, 0.09, 0.075 ])
axSkip = plt.axes([ 0.7, 0.05, 0.09, 0.075 ])
axNonchirp = plt.axes([ 0.8, 0.05, 0.09, 0.075 ])
bChirp = Button(axChirp, 'Chirp')
bSkip = Button(axSkip, 'Skip')
bNonchirp = Button(axNonchirp, 'Non Chirp')
bChirp.on_clicked(callback.isChirp)
bSkip.on_clicked(callback.skip)
bNonchirp.on_clicked(callback.notChirp)
plt.show()
| neurobiofisica/gymnotools | chirpDetector/buscaAleatoria.py | buscaAleatoria.py | py | 2,969 | python | en | code | 2 | github-code | 90 |
35753750001 | #!/usr/bin/env python
import sys
input = sys.stdin.readline
n,m=map(int,input().split())
#li=[]
li={}
for _ in range(n):
a,b=map(str,input().split())
#li.append((a,b))
li[a]=b
#dic=dict(li)
for _ in range(m):
#print(dic[input().rstrip()])
print(li[input().rstrip()])
| hansojin/python | string/bj17219.py | bj17219.py | py | 295 | python | en | code | 0 | github-code | 90 |
20469097373 |
from googletrans import Translator
orgFile = open('test_quotes_english.txt', 'r')
orgFilesLines = orgFile.readlines()
translatorFile = open("test_quotes_punjabi.txt", "a", encoding="utf-8")
translator = Translator()
print("Starting Conversion")
# Strips the newline character
count = 0
for line in orgFilesLines:
raw_trans = translator.translate(line, dest="pa", src="en")
translation = raw_trans.text
translatorFile.write(translation + '\n')
count += 1
print(f'Number of lines converted {count}') | verma-rishu/Text_Generation_Indic | GoogleQuoteTranslation.py | GoogleQuoteTranslation.py | py | 516 | python | en | code | 0 | github-code | 90 |
11931685664 | """
"""
import numpy as np
import sympy as sp
import quadpy
import unittest
from opttrj.costarclenjerk import cCostArcLenJerk
from opttrj.opttrj0010 import opttrj0010
from itertools import tee
import sys
def pairwise(iterable):
'''s -> (s0,s1), (s1,s2), (s2, s3), ...'''
a, b = tee(iterable)
next(b, None)
return zip(a, b)
class cMyTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(cMyTest, self).__init__(*args, **kwargs)
np.set_printoptions(
linewidth=5000000,
formatter={'float': '{:+14.7e}'.format},
threshold=sys.maxsize)
np.random.seed()
self.N_ = np.random.randint(2, 4)
self.dim_ = np.random.randint(2, 3)
self.wp_ = (np.random.rand(self.N_ + 1, self.dim_) - 0.5) * 2.0
self.T_ = 10.0
self.Ni_ = 3
self.Ngl_ = 30
def __testValue(self):
cost = cCostArcLenJerk(self.wp_, self.T_, self.Ni_, self.Ngl_)
wp = cost.wp_.copy()
print('''\n---- Computing min jerk for cost value testing ----
---- N= {:d}, dim = {:d}----'''.format(
wp.shape[0] - 1, wp.shape[1]))
q = opttrj0010(wp, self.T_, _printPerformace=True)
qd = q.deriv()
qddd = q.deriv(3)
def runningCost(_t):
if np.isscalar(_t):
qd_ = np.linalg.norm(qd(_t)[0])
qddd_ = np.linalg.norm(qddd(_t)[0])
res = np.power(qd_ * qddd_, 2)
else:
qd_ = qd(_t)
qddd_ = qddd(_t)
res0 = np.einsum('ij,ij->i', qd_, qd_)
res1 = np.einsum('ij,ij->i', qddd_, qddd_)
res = np.multiply(res0, res1)
return res
ytest = q.y_
tauv = q.tau_
ts = np.arange(0, self.T_, 0.5)
u = np.zeros((self.Ni_ * self.N_ * self.dim_, ))
u = cost.wp2u(u)
ynom = cost.waypointConstraints(tauv, u)
assert np.linalg.norm(ytest - ynom) < 1.0e-8
# Test value of the runnign cost
print('---- Testing value of the running cost function ----')
for ti in ts:
rctest = runningCost(ti)
rcnom = cost.runningCost(ti, tauv, u)
e = abs(rctest - rcnom)
assert e < 1.0e-8, '''
error = {:14.7e}
test = {:14.7e}
nominal = {:14.7e}'''.format(e, rctest, rcnom)
print(' Value of the running cost Ok')
x = np.hstack([tauv, u])
Inom = cost(x)
err = 1.e100
badtrentCounter = 0
print('---- Testing value of the cost function ----')
for Ngl in range(100, 500, 30):
scheme = quadpy.line_segment.gauss_legendre(Ngl)
time_partition = np.linspace(0, self.T_, cost.N_)
Itest = 0.0
for t0, tf in pairwise(time_partition):
Itest += scheme.integrate(runningCost, [t0, tf])
if abs(Itest - Inom) > err:
badtrentCounter += 1
else:
badtrentCounter = 0
assert badtrentCounter < 3
e = abs(Itest - Inom)
ep = e / Itest
assert ep < 1.0e-6, '''
error = {:14.7e}
test = {:14.7e}
nominal = {:14.7e}'''.format(e, Itest, Inom)
print(' Value of the cost function Ok')
def testGradient(self):
cost = cCostArcLenJerk(self.wp_, self.T_, self.Ni_, self.Ngl_)
wp = cost.wp_.copy()
print('''\n---- Computing min jerk for gradient testing ----
---- N= {:d}, dim = {:d}----'''.format(
wp.shape[0] - 1, wp.shape[1]))
q = opttrj0010(wp, self.T_, _printPerformace=True)
qd = q.deriv()
qddd = q.deriv(3)
def runningCost(_t):
if np.isscalar(_t):
qd_ = np.linalg.norm(qd(_t)[0])
qddd_ = np.linalg.norm(qddd(_t)[0])
res = np.power(qd_ * qddd_, 2)
else:
qd_ = qd(_t)
qddd_ = qddd(_t)
res0 = np.einsum('ij,ij->i', qd_, qd_)
res1 = np.einsum('ij,ij->i', qddd_, qddd_)
res = np.multiply(res0, res1)
return res
tauv = q.tau_
gradTest = np.zeros((cost.N_ + cost.ushape_, ))
u = np.zeros((cost.ushape_, ))
cost.wp2u(u)
du = 1.0e-8
for j_u in range(cost.ushape_):
print('computinf gradietn w.r.t u_{:d}'.format(j_u))
u_aux = u.copy()
u_aux[j_u] += -du
x = np.hstack([tauv, u_aux])
I0 = cost(x)
u_aux[j_u] += 2.0 * du
x = np.hstack([tauv, u_aux])
I1 = cost(x)
gradTest[j_u + cost.N_] = 0.5 * (I1 - I0) / du
dtau = 1.0e-8
for j_tau in range(0, cost.N_):
print('computinf gradietn w.r.t tau_{:d}'.format(j_tau))
tauv_aux = tauv.copy()
tauv_aux[j_tau] += -2.0 * dtau
x = np.hstack([tauv_aux, u])
I0 = cost(x) * (1.0 / 12.0)
tauv_aux[j_tau] += dtau
x = np.hstack([tauv_aux, u])
I1 = cost(x) * (-2.0 / 3.0)
tauv_aux[j_tau] += 2.0 * dtau
x = np.hstack([tauv_aux, u])
I2 = cost(x) * (2.0 / 3.0)
tauv_aux[j_tau] += dtau
x = np.hstack([tauv_aux, u])
I3 = cost(x) * (-1.0 / 12.0)
gradTest[j_tau] = (I0 + I1 + I2 + I3) / dtau
x = np.hstack([tauv, u])
gradNom = cost.gradient(x)
ev = np.abs(gradNom - gradTest)
e = np.max(ev)
epNom = e / np.max(gradNom)
epTest = e / np.max(gradTest)
print('Error')
print(ev)
print('Nominal Value')
print(gradNom)
print('Test Value')
print(gradTest)
assert e < 1.0e-5, '''
Maximum error = {:14.7e}
Error relative to nomial val = {:14.7e}
Error relative to test val = {:14.7e}
'''.format(e, epNom, epTest)
def testaQderivatives(self):
cost = cCostArcLenJerk(self.wp_, self.T_, self.Ni_, self.Ngl_)
dtaui = 0.0001
for i in range(0, 100):
taui = 0.1 + np.random.rand() * 2
s = np.random.rand() * 2.0 - 1.0
Q10 = cost.buildQ1(s, taui - 2 * dtaui) * (1.0 / 12.0)
Q11 = cost.buildQ1(s, taui - dtaui) * (-2.0 / 3.0)
Q12 = cost.buildQ1(s, taui + dtaui) * (2.0 / 3.0)
Q13 = cost.buildQ1(s, taui + 2 * dtaui) * (-1.0 / 12.0)
dQ1dtauTest = (Q10 + Q11 + Q12 + Q13) / dtaui
dQ1dtauNom = cost.buildQ1(s, taui, 'derivative_tau')
ev = np.abs(dQ1dtauTest - dQ1dtauNom)
epTest = np.max(np.divide(ev, np.max(np.abs(dQ1dtauTest))))
epNom = np.max(np.divide(ev, np.max(np.abs(dQ1dtauNom))))
e = np.max(ev)
from textwrap import dedent
assert epTest < 1.0e-8 and epNom < 1.0e-8, dedent('''
Error
{}
Nominal Value
{}
Test Value
{}
{}
''').format(
*[np.array2string(v) for v in [ev, dQ1dtauNom, dQ1dtauTest, epTest]])
Q30 = cost.buildQ3(s, taui - 2 * dtaui) * (1.0 / 12.0)
Q31 = cost.buildQ3(s, taui - dtaui) * (-2.0 / 3.0)
Q32 = cost.buildQ3(s, taui + dtaui) * (2.0 / 3.0)
Q33 = cost.buildQ3(s, taui + 2 * dtaui) * (-1.0 / 12.0)
dQ3dtauTest = (Q30 + Q31 + Q32 + Q33) / dtaui
dQ3dtauNom = cost.buildQ3(s, taui, 'derivative_tau')
ev = np.abs(dQ3dtauTest - dQ3dtauNom)
e = np.max(ev)
epTest = np.max(np.divide(ev, np.max(np.abs(dQ3dtauTest))))
epNom = np.max(np.divide(ev, np.max(np.abs(dQ3dtauNom))))
from textwrap import dedent
assert epTest < 1.0e-8 and epNom < 1.0e-8, dedent('''
Error
{}
Nominal Value
{}
Test Value
{}
{}
''').format(
*[np.array2string(v) for v in [ev, dQ3dtauNom, dQ3dtauTest, epTest]])
# def testFirstGuess(self):
#
# wp = np.random.rand(self.N_+1, 2)
# cost = cCostArcLenJerk(wp, self.T_, self.Ni_, self.Ngl_)
#
# x0 = cost.getFirstGuess()
#
# tauv0 = x0[:cost.N_]
# u0 = x0[cost.N_:]
#
# qminjerk = cost.qminjerk_
#
# wp2 = cost.wp_
#
# t = np.arange(0, self.T_, 0.001)
# q_ = qminjerk(t)
#
# from matplotlib import pyplot as plt
#
# plt.plot(wp2[:, 0], wp2[:, 1], 'ro')
#
# plt.plot(q_[:, 0], q_[:, 1], 'b')
#
#
# plt.show()
#
def main():
unittest.main()
if __name__ == '__main__':
main()
| rafaelrojasmiliani/gsplines | tests/costarclenjerk.py | costarclenjerk.py | py | 9,057 | python | en | code | 4 | github-code | 90 |
26153791060 | """
Author: Ratnesh Chandak
versions:
Python 3.7.4
pandas==0.25.1
"""
import pandas as pd
#reading input file
data=pd.read_csv("sample_email.csv",encoding='latin1')
#taking user input for adding user define name in email template
user_Defined_Name=input().strip()
#creating email template
template=pd.Series("Email to :"+data['email']+"\n"+\
"Subject Line :"+data['subject']+"\n"+\
"Hi "+data['first_name']+" "+data['last_name']+",\n"+\
data['Email Boby']+"\n"+\
"Please do contact me at "+data['phone']+"\n"+\
"Thanks,\n"+\
user_Defined_Name)
#sample template showing to user
print("-------------------printing sample template------------------")
print(template[0])
#creating dataframe to save email template correponding to each email id
email_template = pd.DataFrame(columns=['to_mail','template'])
email_template.to_mail=data['email']
email_template.template=template
#saving email template to email_template.csv
email_template.to_csv('email_template.csv',index=False)
#creating dataframe to save number and email from email body corresponding to each email id
dfExtractedObj = pd.DataFrame(columns=['to_email','extracted_number','extracted_email'])
dfExtractedObj.to_email=data['email']
dfExtractedObj.extracted_number=data['Email Boby'].str.extract(pat='(\d{3}-\d{3}-\d{4})',expand=False)
dfExtractedObj.extracted_email=data['Email Boby'].str.extract(pat='([\w]+@[\w.]+)',expand=False)
#saving extracted email and number to another csv file
dfExtractedObj.to_csv('extracted_Phone_email.csv',index=False,na_rep="----") | ratnesh93/Email_data_extraction_and_template_creation | Email_data_extraction_and_template_creation/phoneExtractionAndTemplateCreation.py | phoneExtractionAndTemplateCreation.py | py | 1,565 | python | en | code | 0 | github-code | 90 |
26496222678 | # https://www.acmicpc.net/problem/10816
# 숫자 카드 2
import sys
n = int(input())
cards = list(map(int, sys.stdin.readline().strip().split()))
m = int(input())
check = list(map(int, sys.stdin.readline().strip().split()))
dict = {}
for i in cards:
if i in dict : dict[i] += 1
else : dict[i] = 1
for i in check :
if i in dict :
print(dict[i], end = ' ')
else : print(0, end=' ') | hamin2065/PS | 기본문법/10816.py | 10816.py | py | 415 | python | en | code | 0 | github-code | 90 |
18257600109 |
def resolve():
import sys
input = sys.stdin.readline
# row = [int(x) for x in input().rstrip().split(" ")]
# n = int(input().rstrip())
nab = [int(x) for x in input().rstrip().split(" ")]
n = nab[0]
a = nab[1]
b = nab[2]
kurikaesi = n // (a + b)
amari = n % (a + b)
ans = kurikaesi * a
ans += amari if amari < a else a
print(ans)
if __name__ == "__main__":
resolve()
| Aasthaengg/IBMdataset | Python_codes/p02754/s640920419.py | s640920419.py | py | 426 | python | en | code | 0 | github-code | 90 |
74902065256 | from keras.models import load_model
from helpers import resize_to_fit
from imutils import paths
import numpy as np
import cv2
import pickle
from captcha_cleaner import clean_images
def solve_captcha():
# imports the model and the translator
with open('.\\AI_training\\labels_model.dat', 'rb') as translate_file:
lb = pickle.load(translate_file)
model = load_model('.\\AI_training\\trained_model.hdf5')
# clean the captchas
clean_images('.\\captcha_in',
out_path='.\\captcha_out')
# reads all the cleaned images inside 'captcha_out' folder
files = list(paths.list_images('captcha_out'))
for file in files:
img = cv2.imread(file)
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
_, img = cv2.threshold(img, 243, 255, cv2.THRESH_BINARY)
# finds the contours of each letter
contours, _ = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
letters_region = []
# filters the contours that are really letters using the area
for contour in contours:
(x, y, width, height) = cv2.boundingRect(contour)
area = cv2.contourArea(contour)
if area > 100:
letters_region.append((x, y, width, height))
letters_region = sorted(letters_region, key=lambda x: x[0])
# draw the contours and splits the letters
prediction = []
for rectangle in letters_region:
x, y, width, height = rectangle
letter_img = img[y-2:y+height+2, x-2:x+width+2]
# send the letter to AI
letter_img = resize_to_fit(letter_img, 20, 20)
# image processing
letter_img = np.expand_dims(letter_img, axis=2)
letter_img = np.expand_dims(letter_img, axis=0)
predicted_letter = model.predict(letter_img)
predicted_letter = lb.inverse_transform(predicted_letter)[0]
prediction.append(predicted_letter)
predicted_text = ''.join(prediction)
return predicted_text[1:]
if __name__ == '__main__':
captcha_string = solve_captcha()
print(captcha_string)
input('Press any key to continue...')
| KokumaiLuis/artificial_intelligence_captcha_solver | captcha_solver.py | captcha_solver.py | py | 2,202 | python | en | code | 0 | github-code | 90 |
30465393077 | # coding=utf-8
import sys
from utils.api import API
class Solution:
def __init__(self):
'''
Initialize the Solution instance
'''
# Initialize the API object
self.api = API()
def solve_first_question(self):
'''
Solve the first question.
Obtain the number of pokemon with "at" in their name and with 2 "a" in their name, including the first "at"
'''
# Get list of pokemons from PokeAPI
pokemon_list = self.api.get_pokemons_list()
# Initialize the list as empty
result_list = []
# Loop over the list
for pokemon in pokemon_list:
# Get pokemon name from pokemon object
pokemon_name = pokemon['name']
# Check if the pokemon name contains "at" and two "a"
if 'at' in pokemon_name and pokemon_name.count('a') == 2:
# Add the pokemon name to the result list
result_list.append(pokemon_name)
# Result the result list
return len(result_list)
def solve_second_question(self):
'''
Solve the second question.
Obtain the number of species that can procreate Raichu
'''
# Get pokemon specie for Raichu from PokeAPI
raichu_specie = self.api.get_pokemon_species(name='raichu')
if 'egg_groups' in raichu_specie:
# Get the egg groups for Raichu
egg_groups = raichu_specie['egg_groups']
# print("Raichu has {} egg groups".format(len(egg_groups))) # DEBUG
# Initialize the list as empty
result_list = []
for egg_group in egg_groups:
# Get the egg group name
egg_group_name = egg_group['name']
# Get the egg group data from PokeAPI
egg_group_data = self.api.get_egg_groups(name=egg_group_name)
if 'pokemon_species' in egg_group_data:
# Get the list of pokemon species that can procreate Raichu
pokemon_species = egg_group_data['pokemon_species']
for pokemon_specie in pokemon_species:
# Get the pokemon specie name
pokemon_specie_name = pokemon_specie['name']
# Check if the pokemon specie name is in the result list
if pokemon_specie_name not in result_list:
# Add the pokemon specie name to the result list
result_list.append(pokemon_specie_name)
# Print the result
return len(result_list)
else:
# If the specie doesn't have egg groups, return 0
return 0
def solve_third_question(self):
'''
Solve the third question.
Obtain the maximum and minimum weight of the first generation fighting pokemon (id <= 151)
'''
# Initialize the maximum and minimum weight as 0
max_weight = -sys.maxsize - 1
min_weight = sys.maxsize
'''
This was the first way I found to solve the question. It was too slow.
# Get the list of pokemon from PokeAPI
pokemon_list = self.api.get_pokemons_list(151)
# Loop over the list
for pokemon in pokemon_list:
# Get the pokemon id
pokemon_name = pokemon['name']
# Get the pokemon data from PokeAPI
pokemon_data = self.api.get_pokemon_data(name=pokemon_name)
if "types" in pokemon_data:
# Get the types for the pokemon
types = pokemon_data['types']
# Check if the pokemon is a fighting type
for type in types:
if type['type']['name'] == 'fighting':
# Get the pokemon weight
weight = pokemon_data['weight']
# Check if the weight is greater than the maximum weight
if weight > max_weight:
# Update the maximum weight
max_weight = weight
# Check if the weight is smaller than the minimum weight
if weight < min_weight:
# Update the minimum weight
min_weight = weight
'''
# Get the list of pokemon from type object
fighting_type = self.api.get_pokemon_type(name='fighting')
# Validate the type object
if "pokemon" in fighting_type:
# Loop over the list of pokemon
for pokemon in fighting_type['pokemon']:
# Get the pokemon object
pokemon_object = pokemon['pokemon']
# Remove the last slash from the pokemon object url
pokemon_url = pokemon_object['url'][:-1] if pokemon_object['url'].endswith('/') else pokemon_object['url']
# Get the pokemon id
pokemon_id = pokemon_url.split('/')[-1]
if int(pokemon_id) <= 151:
# Get the pokemon data from PokeAPI
pokemon_data = self.api.get_pokemon_data(url=pokemon_url)
# Get the pokemon weight
weight = pokemon_data['weight']
# Check if the weight is greater than the maximum weight
if weight > max_weight:
# Update the maximum weight
max_weight = weight
# Check if the weight is smaller than the minimum weight
if weight < min_weight:
# Update the minimum weight
min_weight = weight
else:
# If the pokemon id is greater than 151, stop the loop, because first generation ends at 151
break
# Print the results
return [max_weight, min_weight]
# Main function
if __name__ == '__main__':
# First question: number of pokemon with "at" in their name and with 2 "a" in their name, including the first "at"
solution = Solution()
print("First question:")
result = solution.solve_first_question()
print("The number of pokemon with 'at' in their name and with 2 'a' in their name, including the first 'at' is:", result)
# Second question: number of species that can procreate raichu
print("\nSecond question:")
result = solution.solve_second_question()
print("The number of species that can procreate Raichu is:", result)
# Third question: maximum and minimum weight of the first generation fighting pokemon
print("\nThird question:")
result = solution.solve_third_question()
print("The maximum weight of the first generation fighting pokemon is:", result[0])
print("The minimum weight of the first generation fighting pokemon is:", result[1]) | lagwy/houm | solution.py | solution.py | py | 6,943 | python | en | code | 0 | github-code | 90 |
42919915384 | import sys
import math
def isPrime(z):
if z%2==0:
return 0
for i in range(3,int(pow(z,.5))+1,2):
if z%i==0:
return 0
return 1
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
test=test.split(",")
mini=int(test[0])
maxi=int(test[1])
n=0
for i in range(mini,maxi+1):
if isPrime(i)==1:
n+=1
print(n)
test_cases.close()
| paulwuertz/CodeEval | Easy/CountingPrimes.py | CountingPrimes.py | py | 418 | python | en | code | 0 | github-code | 90 |
72808990056 | sc_to_user_id = {}#记录每一个用户对应的安全频道
user_id_to_sc = {}#记录用户ID对应的安全频道
#socket_to_sc = {}#句柄为key,value为安全频道
# 不一定是登入状态,只是连接
scs = []
chat_history = []
def remove_sc_from_socket_mapping(sc):
if sc in sc_to_user_id:
uid = sc_to_user_id[sc]
del sc_to_user_id[sc]
if uid in user_id_to_sc:
del user_id_to_sc[uid]
if sc in scs:
scs.remove(sc)
# if sc in socket_to_sc:
# del socket_to_sc[sc] | xiefan-guo/wechat | server/memory.py | memory.py | py | 541 | python | en | code | 0 | github-code | 90 |
35891279021 | from micropython import const
import os
import ubinascii
from . import parse_plist_xml
STAT_IDLE = const(0)
STAT_CONNECTING = const(1)
STAT_WRONG_PASSWORD = const(2)
STAT_NO_AP_FOUND = const(3)
STAT_CONNECT_FAIL = const(4)
STAT_GOT_IP = const(5)
STA_IF = const(0)
AP_IF = const(1)
AUTH_OPEN = const(0)
AUTH_WEP = const(1)
AUTH_WPA_PSK = const(2)
AUTH_WPA2_PSK = const(3)
AUTH_WPA_WPA2_PSK = const(4)
class WLAN:
interface = 'en0'
def __init__(self, interface_id=STA_IF):
if interface_id != STA_IF:
raise NotImplementedError(interface_id)
def active(self, is_active=None):
if is_active is not None:
cmd = 'networksetup -setairportpower %s %s' % (
self.interface, 'on' if is_active else 'off')
os.popen(cmd)
else:
cmd = 'networksetup -getairportpower %s' % self.interface
return os.popen(cmd).read().endswith('On\n')
def connect(self, ssid=None, password=None):
if not ssid:
return
cmd = 'networksetup -setairportnetwork %s "%s" "%s"' % (
self.interface, ssid, password or ''
)
out = os.popen(cmd).read().strip()
if out:
raise RuntimeError('Connection error')
def disconnect(self):
if self.isconnected():
print('WLAN.disconnect() not implemented so this is a no-op')
# Requires root to disassociate
# cmd = '/System/Library/PrivateFrameworks/' \
# 'Apple80211.framework/Versions/' \
# 'Current/Resources/airport -z'
# os.popen(cmd)
def scan(self):
cmd = '/System/Library/PrivateFrameworks/' \
'Apple80211.framework/Versions/' \
'Current/Resources/airport -s -x'
xml_str = os.popen(cmd).read()
retval = []
for net in parse_plist_xml.parse(xml_str):
if net.get('WEP'):
authmode = AUTH_WEP
elif net.get('RSN_IE') or net.get('WPA_IE'):
t = 'RSN'
info = net.get('%s_IE' % t)
if not info:
t = 'WPA'
info = net.get('%s_IE' % t)
uchipers = sorted(info.get('IE_KEY_%s_UCIPHERS' % t, []))
if uchipers == [2]:
authmode = AUTH_WPA_PSK
elif uchipers == [2, 4]:
authmode = AUTH_WPA_WPA2_PSK
elif uchipers == [4]:
authmode = AUTH_WPA2_PSK
else:
raise ValueError('Unknown uciphers: %s',
info.get('IE_KEY_%s_UCIPHERS' % t))
else:
authmode = AUTH_OPEN
hidden = 0
# (ssid, bssid, channel, RSSI, authmode, hidden)
retval.append((
net['SSID_STR'],
net['BSSID'],
net['CHANNEL'],
net['RSSI'],
authmode,
hidden,
))
return retval
def status(self):
if self.isconnected():
return STAT_GOT_IP
else:
return STAT_IDLE
def isconnected(self):
cmd = 'networksetup -getairportnetwork %s' % self.interface
out = os.popen(cmd).read()
if out == 'You are not associated with an AirPort network.\n':
return False
elif out.startswith('Current Wi-Fi Network: '):
# ssid = out[len('Current Wi-Fi Network: '):].strip()
return True
else:
raise ValueError(out)
def ifconfig(self, ifconfig=None):
if ifconfig is not None:
raise NotImplementedError
cmd = "ifconfig %s | awk '/inet /{print $2, $4}'" % self.interface
ip, netmask = os.popen(cmd).read().strip().split()
subnet = '.'.join([str(int(netmask) >> i * 8 & 0xff)
for i in range(3, -1, -1)])
cmd = "route -n get default | awk '/gateway: /{print $2}'"
gateway = os.popen(cmd).read().strip()
cmd = "awk '/^nameserver/{print $2}' /etc/resolv.conf"
dns_servers = os.popen(cmd).read().strip().split()
dns = dns_servers[0] if len(dns_servers) else ''
return (ip, subnet, gateway, dns)
def config(self, *args, **kwargs):
if args and kwargs:
raise TypeError('either pos or kw args are allowed')
if len(args) > 2:
raise TypeError('can query only one param')
if len(args) == 1:
if args[0] == 'mac':
cmd = "ifconfig %s | awk '/ether/{print $2}'" % self.interface
mac_hex = os.popen(cmd).read().strip()
return ubinascii.unhexlify(mac_hex.replace(':', ''))
elif args[0] in ['essid', 'channel', 'hidden',
'authmode', 'password']:
raise NotImplementedError
else:
raise ValueError('unknown config param')
| jonathonlui/micropython-extras | micropython_macos/network/__init__.py | __init__.py | py | 5,029 | python | en | code | 1 | github-code | 90 |
11803467 | # -*- coding: utf-8 -*-
import time
from utils import letterbox_image,exp,minAreaLine,draw_lines,minAreaRectBox,draw_boxes,line_to_line,sqrt,rotate_bound,timer,is_in
from line_split import line_split
import numpy as np
import cv2
from PIL import Image
from skimage import measure
import json
# crnn
from crnn.crnn_torch import crnnOcr, crnnOcr2
tableNetPath = 'UNet/table.weights'
SIZE = 512,512
tableNet = cv2.dnn.readNetFromDarknet(tableNetPath.replace('.weights','.cfg'),tableNetPath)
def dnn_table_predict(img,prob=0.5):
imgResize,fx,fy,dx,dy = letterbox_image(img,SIZE)
imgResize = np.array(imgResize)
imgW,imgH = SIZE
image = cv2.dnn.blobFromImage(imgResize,1,size=(imgW,imgH),swapRB=False)
image = np.array(image)/255
tableNet.setInput(image)
out=tableNet.forward()
out = exp(out[0]) # shape(2,512,512) , 2指的是横纵线两个类对应的map
out = out[:,dy:,dx:] # 虽然左上点对上了,但是右方或下方的padding没去掉?
return out,fx,fy,dx,dy
def get_seg_table(img,prob,row=10,col=10):
out,fx,fy,dx,dy = dnn_table_predict(img,prob)
rows = out[0]
cols = out[1]
labels=measure.label(cols>prob,connectivity=2)
regions = measure.regionprops(labels)
ColsLines = [minAreaLine(line.coords) for line in regions if line.bbox[2]-line.bbox[0]>col ]
# if debug:
# cv2.imwrite('_cols.jpg',labels*255)
labels=measure.label(rows>prob,connectivity=2)
regions = measure.regionprops(labels)
RowsLines = [minAreaLine(line.coords) for line in regions if line.bbox[3]-line.bbox[1]>row ]
# RowsLines[0] = [xmin,ymin,xmax,ymax]注x指横向上,y指纵向上
# if debug:
# cv2.imwrite('_rows.jpg',labels*255)
imgW,imgH = SIZE
tmp =np.zeros((imgH-2*dy,imgW-2*dx),dtype='uint8')
tmp = draw_lines(tmp,ColsLines+RowsLines,color=255, lineW=1)
# 闭运算:先膨胀后腐蚀,用来连接被误分为许多小块的对象
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(3,3))
tmp = cv2.morphologyEx(tmp, cv2.MORPH_CLOSE, kernel,iterations=1)
seg_table = cv2.resize(tmp,None,fx=1.0/fx,fy=1.0/fy,interpolation=cv2.INTER_CUBIC)
degree = 0.0
if len(RowsLines) >= 3:
degree = np.array([np.arctan2(bbox[3]-bbox[1],bbox[2]-bbox[0]) for bbox in RowsLines])
degree = np.mean(-degree*180.0/np.pi)
return seg_table,degree
def find_tables(img_seg):
# from the seg image, detect big bounding box and decide how many tables in the picture
tables = []
h,w = img_seg.shape
_,contours, hierarchy = cv2.findContours(img_seg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
table_flag = True
contourArea = cv2.contourArea(contour)
if contourArea < h * w * 0.05:
table_flag = False
if not table_flag:
continue
contour = contour.reshape((-1, 2))
xmin,ymin = np.min(contour,axis=0)
xmax,ymax = np.max(contour,axis=0)
tables.append([xmin,ymin,xmax,ymax])
tables = sorted(tables,key=lambda x : x[1])
return np.array(tables)
def find_cells(img_seg,tables):
if not len(tables):
return []
h,w = img_seg.shape
tabelLabels=measure.label(img_seg==0,connectivity=2)
regions=measure.regionprops(tabelLabels)
rboxes= []
for table in tables:
tmp = []
for i,region in enumerate(regions):
if h*w*0.0001 < region.bbox_area <h*w*0.5:
rbox = np.array(map(int,region.bbox))[[1,0,3,2]]
if is_in(rbox,table):
tmp.append(rbox)
rboxes.append(np.array(tmp))
return np.array(rboxes)
def annotate_cell(img,cells):
# now cells is a ndarray with shape (n,4)
res = np.array([{'text':''} for cell in cells])
# start col
sc = 0
idx = cells[:, 0].argsort()
cells = cells[idx]
res = res[idx]
eps = np.diff(cells,axis=0)[:,0]
mean = np.mean(eps)
breakpoints = np.where(eps >= mean)[0]
for i,item in enumerate(res):
item['start_col'] = sc
if i in breakpoints:
sc += 1
# end col
ec = 0
idx = cells[:, 2].argsort()
cells = cells[idx]
res = res[idx]
eps = np.diff(cells,axis=0)[:,2]
#print(eps)
mean = np.mean(eps)
breakpoints = np.where(eps >= mean)[0]
for i,item in enumerate(res):
item['end_col'] = ec
if i in breakpoints:
ec += 1
# start row
sr = 0
idx = cells[:, 1].argsort()
cells = cells[idx]
res = res[idx]
eps = np.diff(cells,axis=0)[:,1]
mean = np.mean(eps)
breakpoints = np.where(eps >= mean)[0]
for i,item in enumerate(res):
item['start_row'] = sr
if i in breakpoints:
sr += 1
# end row
er = 0
idx = cells[:, 3].argsort()
cells = cells[idx]
res = res[idx]
eps = np.diff(cells,axis=0)[:,3]
mean = np.mean(eps)
breakpoints = np.where(eps >= mean)[0]
for i,item in enumerate(res):
item['end_row'] = er
if i in breakpoints:
er += 1
batch_list_text = []
for i,([xmin,ymin,xmax,ymax],info) in enumerate(zip(cells,res)):
lines = line_split(img[ymin:ymax,xmin:xmax],y=ymin,x=xmin)
for [_xmin,_ymin,_xmax,_ymax] in lines:
#cv2.imwrite('./part/'+str(i)+'_'+str(_ymax)+'.jpg',img[_ymin:_ymax,_xmin:_xmax])
partImg = img[_ymin:_ymax,_xmin:_xmax]
partImg = Image.fromarray(partImg).convert('L')
batch_list_text.append((i, partImg.convert('L')))
try:
i_value, batch_text = crnnOcr2(batch_list_text)
except:
print("!"*20)
print('CUDA OUT OF MEMORY, SPLIT BATCH')
print("!"*20)
pt = int(len(batch_list_text)/4)
i_value1, batch_text1 = crnnOcr2(batch_list_text[:pt])
i_value2, batch_text2 = crnnOcr2(batch_list_text[pt:2*pt])
i_value3, batch_text3 = crnnOcr2(batch_list_text[2*pt:3*pt])
i_value4, batch_text4 = crnnOcr2(batch_list_text[3*pt:])
i_value = i_value1 + i_value2 + i_value3 + i_value4
batch_text = batch_text1 + batch_text2 + batch_text3 + batch_text4
for i,text in zip(i_value,batch_text):
res[i]['text'] += text.encode("UTF-8")+ '\n'
res = res.tolist()
res = sorted(res,key=lambda x: (x['start_row'], x['start_col']))
return res,er+1,ec+1
def find_text(tables,w,h):
#find the non-table area for PSENet detection
if not len(tables):
return np.array([[0,0,w,h]])
Y1 = tables[:,[1,3]]
Y2 = []
for i in range(len(Y1)):
if i+1 == len(Y1):
Y2.append(Y1[i])
break
if Y1[i][1] >= Y1[i+1][0]: # ymax1 >= ymin2
Y1[i+1][0] = Y1[i][0]
Y1[i+1][1] = max(Y1[i][1],Y1[i+1][1])
continue
else:
Y2.append(Y1[i])
Y2 = np.array(Y2).reshape(-1,)
Y2 = np.append(0,Y2)
Y2 = np.append(Y2,h)
Y2 = Y2.reshape(-1,2)
return np.array([[0,y[0],w,y[1]] for y in Y2])
################################################################CORE######################################################
################################################################CORE######################################################
################################################################CORE######################################################
@timer
def tableXH(img,prob=0.5,row=30,col=10,alph=50):
start_time = time.time()
# use Unet to recognize tabel lines, decide how many degrees to rotate
# also, create a seg image for easy-tablebox-recongisize.
img_seg,degree=get_seg_table(img,prob,row,col)
img = cv2.cvtColor(np.array(img),cv2.COLOR_RGB2BGR)
if degree > 0.5:
print('Rotating...')
img = rotate_bound(img,degree)
img_seg = rotate_bound(img_seg,degree)
h,w = img_seg.shape
tables = find_tables(img_seg)
cells = find_cells(img_seg,tables)
text_areas = find_text(tables,w,h)
#############create json##############
blocks = []
for area in text_areas:
blocks.append({
"is_table": False,
"cells": [],
"position": area.tolist(),
"text":"text"})
for table,cell in zip(tables,cells):
# {"position":int[],"start_row":int,"end_row":int,"start_column":int,"end_column":int,"text":str}
cell,nrow,ncol= annotate_cell(img,cell)
blocks.append({
"is_table": True,
"cells": cell,
"columns": ncol,
"rows": nrow,
"position":table.tolist(),
"text":""})
blocks.sort(key=lambda x: x['position'][1])
end_time = time.time()
return {
"cost_time": end_time - start_time,
"result": {
"rotated_image_width": w,
"rotated_image_height": h,
"result_word":blocks
#"blocks":blocks #according to hehe-AI
}
}
################################################################CORE######################################################
################################################################CORE######################################################
################################################################CORE######################################################
if __name__ == '__main__':
img =Image.open('test_pics/0.jpg').convert('RGB')
res = tableXH(img)
#print(res)
#json.dumps(res, ensure_ascii=False)
| zlr20/Table-Structure-Decomposition-OCR | table.py | table.py | py | 9,619 | python | en | code | 7 | github-code | 90 |
31118264068 |
from typing import List
class Transposition:
def __init__(self, width: int = 5):
self.width = width # ширина таблицы
def get_width(self) -> int:
return self.width
def set_width(self, width: int):
self.width = width
def encrypt(self, message: str, width: int = None) -> str:
"""
Используется для шифрования и дешифрования
:param message: сообщение
:param width: ширина таблицы
:return: преобразованное сообщение
"""
size = self.width if (width is None) else width
length = len(message) # длина сообщения
if length % size == 0:
full_length = length
else:
full_length = (1 + length // size) * size # длина расширенного сообщения
reminder = full_length - length # разница длин расширенного и исходного сообщений
to_encrypt = message + "&" * reminder # расширенное сообщение
table = [list(to_encrypt[i:i + size]) for i in range(0, full_length, size)] # таблица
transposed = Transposition.transpose(table) # транспонированная таблица
return "".join("".join(row) for row in transposed)
@staticmethod
def transpose(table: List[List[str]]) -> List[List[str]]:
n = len(table)
m = len(table[0])
return [[table[i][j] for i in range(n)] for j in range(m)]
| tsyploff/modern-problems-of-applied-math-and-computer-science | crypto/Transposition/src/transposition.py | transposition.py | py | 1,606 | python | ru | code | 0 | github-code | 90 |
15422939977 | import tkinter
from previous_versions.Version_Before_Refactor.src.backend.iRacing.state import State
from previous_versions.Version_Before_Refactor.src.backend.iRacing.telemetry import Telemetry
from previous_versions.Version_Before_Refactor.src.backend.utils.exception_handler import exception_handler
from previous_versions.Version_Before_Refactor.src.frontend.overlays.overlay_abstract import OverlayAbstract
class FuelColumn:
def __init__(self, master, cfg, header_name, special_on):
self.master = master
self.cfg = cfg
self.font = f'{self.cfg.font_style} {self.cfg.font_size} {self.cfg.font_extra}'
self.fg_header = cfg.fg_color_header
self.fg_values = cfg.fg_color_values
self.fg_special = cfg.fg_color_special
self.bg = cfg.bg_color
self.special_on = special_on
self.text_padding = cfg.text_padding
self.text_var = tkinter.StringVar()
self.text_var.set(" ")
self.column_frame = tkinter.Frame(master=self.master, bg=self.bg)
self.column_frame.pack(side='left', anchor='nw', expand=1, fill='both')
self.header = tkinter.Label(self.column_frame, text=header_name, font=self.font, fg=self.fg_header, bg=self.bg,
padx=self.text_padding, pady=self.text_padding)
self.header.pack(expand=0, side='top')
self.value = tkinter.Label(self.column_frame, textvariable=self.text_var, font=self.font, fg=self.fg_values,
bg=self.bg, padx=self.text_padding, pady=self.text_padding)
self.value.pack(expand=1, anchor='center', fill='both')
self.all_labels = [self.header, self.value]
if self.special_on:
self.special_var = tkinter.StringVar()
self.special_var.set(" ")
self.special = tkinter.Label(self.column_frame, textvariable=self.special_var, font=self.font,
fg=self.fg_special,
bg=self.bg, padx=self.text_padding, pady=self.text_padding)
self.special.pack(pady=self.text_padding)
self.all_labels.append(self.special)
def update_appearance_attributes(self, cfg):
self.cfg = cfg
self.text_padding = self.cfg.text_padding
self.font = f'{self.cfg.font_style} {self.cfg.font_size} {self.cfg.font_extra}'
def update_appearance(self, cfg):
"""
Configure() and update() all tk/custom elements in the fuel_column
:return:
"""
self.update_appearance_attributes(cfg)
for label in self.all_labels:
label.configure(font=self.font, padx=self.text_padding, pady=self.text_padding)
label.update()
class FuelScreen(OverlayAbstract):
def __init__(self, parent_obj, telemetry: Telemetry, state: State, config_data, rounded=True):
super().__init__(parent_obj, rounded, overlay_type="fuel", config_data=config_data)
self.font: str | None = None
self.state: State = state
self.telemetry: Telemetry = telemetry
self.fuel_columns: list[FuelColumn] = []
self.create_fuelscreen_entries(respawn=False)
if self.rounded:
self.make_overlay_rounded()
self.master.wm_deiconify()
self.master.title("RacingInsights - Fuel calculator")
@exception_handler
def update_telemetry_values(self):
"""
The new values in self.telemetry are set in the corresponding stringvars
:return:
"""
if not self.state.ir_connected:
if self.parent_obj.settings_open:
self.update_widgets_with_dummy_values()
elif self.state.ir_connected:
self.update_widget_text_var("fuel_widget", self.telemetry.fuel)
self.update_widget_text_var("last_widget", self.telemetry.cons)
self.update_widget_text_var("avg_widget", self.telemetry.avg_cons)
self.update_widget_text_var("target_widget", self.telemetry.target_cons_current)
self.update_widget_special_var("target_widget", self.telemetry.target_cons_extra)
self.update_widget_text_var("range_widget", int(self.telemetry.laps_left_current))
self.update_widget_special_var("range_widget", int(self.telemetry.laps_left_extra))
self.update_widget_text_var("refuel_widget", self.telemetry.refuel * (1 + self.cfg.safety_margin / 100))
self.update_widget_text_var("finish_widget", self.telemetry.target_finish)
self.update_widget_text_var("remaining_widget", int(self.telemetry.laps_left_in_race))
def update_widgets_with_dummy_values(self):
self.update_widget_text_var("fuel_widget", float(0))
self.update_widget_text_var("last_widget", float(0))
self.update_widget_text_var("avg_widget", float(0))
self.update_widget_text_var("target_widget", float(0))
self.update_widget_special_var("target_widget", float(0))
self.update_widget_text_var("range_widget", int(0))
self.update_widget_special_var("range_widget", int(0))
self.update_widget_text_var("refuel_widget", float(0))
self.update_widget_text_var("finish_widget", float(0))
self.update_widget_text_var("remaining_widget", int(0))
def update_widget_special_var(self, widget_name, tm_value):
if tm_value < 0: # Not valid, set to 0 instead
tm_value = 0
if hasattr(self, widget_name):
widget_attr = getattr(self, widget_name)
if hasattr(widget_attr, "special_var"):
special_var = getattr(widget_attr, "special_var")
if isinstance(tm_value, int):
special_var.set(f"{tm_value}")
else:
special_var.set(f"{tm_value:.2f}")
def update_widget_text_var(self, widget_name, tm_value):
if tm_value < 0: # Not valid, set to 0 instead
tm_value = 0
if hasattr(self, widget_name):
widget_attr = getattr(self, widget_name)
if hasattr(widget_attr, "text_var"):
text_var = getattr(widget_attr, "text_var")
if isinstance(tm_value, int):
text_var.set(f"{tm_value}")
else:
text_var.set(f"{tm_value:.2f}")
def update_appearance(self):
self.master.wm_withdraw()
for fuel_column in self.fuel_columns:
if fuel_column: # Make sure it's not None
fuel_column.update_appearance(self.cfg)
self.overlay_frame.update()
if self.rounded:
self.make_overlay_rounded()
self.master.wm_deiconify()
def reconstruct_overlay(self):
"""
Destroys the current overlay_frame instance and rebuilds it for the currently activated elements
:return:
"""
self.font = f'{self.cfg.font_style} {self.cfg.font_size} {self.cfg.font_extra}'
self.master.geometry(f"+{self.cfg.offset_right}+{self.cfg.offset_down}")
self.create_fuelscreen_entries(respawn=True)
self.overlay_frame.update() # Important, otherwise it will not have the updated tk widgets
for widget in self.fuel_columns:
if widget: # To ensure it's not None
for label in widget.all_labels:
if hasattr(label, "configure"):
label.configure(font=self.font)
label.update()
if self.rounded:
self.make_overlay_rounded()
def create_fuelscreen_entries(self, respawn):
"""
Sets and populates the list of fuel_columns with FuelColumn elements
:return:
"""
if respawn:
self.overlay_frame.pack_forget()
self.overlay_frame.destroy()
self.overlay_frame = tkinter.Frame(master=self.overlay_canvas, bg=self.cfg.bg_color)
self.overlay_frame.pack()
self.fuel_columns.clear()
if self.rounded:
# Create a temporary window, just to spawn the widgets before calculating/spawning the final window
self.overlay_canvas.create_window(0, 0, window=self.overlay_frame)
list_of_widgets = ["fuel", "last", "avg", "target", "range", "refuel", "finish", "remaining"]
# Create the widgets in case they are activated in cfg, put in self.fuel_columns
for widget_name in list_of_widgets:
underscored_widget_name = widget_name.replace(" ", "_").replace("\n", "_")
if not getattr(self.cfg,
f"{underscored_widget_name}_activated"): # If widget shouldn't be activated, set to None
setattr(self, f"{underscored_widget_name}_widget", None)
self.fuel_columns.append(getattr(self, f"{underscored_widget_name}_widget"))
else: # If widget should be activated, spawn a FuelColumn instance with customized header name
special_on = False # Default
if widget_name == 'target' or widget_name == 'range':
special_on = True
setattr(self, f"{underscored_widget_name}_widget",
FuelColumn(master=self.overlay_frame, cfg=self.cfg,
header_name=f"{widget_name}".title(),
special_on=special_on))
self.fuel_columns.append(getattr(self, f"{underscored_widget_name}_widget"))
| RacingInsights/RacingInsights-V1 | previous_versions/Version_Before_Refactor/src/frontend/overlays/fuelscreen.py | fuelscreen.py | py | 9,554 | python | en | code | 0 | github-code | 90 |
25022315188 | #!/usr/bin/env python3
import sys
from sodacomm.graph import *
def show_scc(g):
n = g.size()
stk = []
dfs_mark(g, stk)
T = transposition(g)
visited = [False] * n
while stk:
i = stk.pop()
if visited[i]:
continue
vex = []
dfs_scc(T, i, visited, vex)
print('scc: {}'.format(','.join(list(map(lambda x: g.vex_name(x), vex)))))
def transposition(g):
c = g.clone_without_edges()
n = g.size()
for i in range(n):
for p in g.adj_of(i):
c.add_edge(p.adj, i)
return c
def dfs_scc(g, v, visited, vex):
visited[v] = True
vex.append(v)
for p in g.adj_of(v):
if not visited[p.adj]:
dfs_scc(g, p.adj, visited, vex)
def dfs_mark(g, stk):
n = g.size()
visited = [False] * g.size()
for i in range(n):
if not visited[i]:
_dfs_mark(g, stk, i, visited)
def _dfs_mark(g, stk, v, visited):
visited[v] = True
for p in g.adj_of(v):
if not visited[p.adj]:
_dfs_mark(g, stk, p.adj, visited)
stk.append(v)
def test(vex, edge):
g = AdjList.parse(vex, edge)
show_scc(g)
def main():
'''求强连通分量'''
test('a b c d e f g h',
'a,b b,c b,e b,f c,d c,g d,c d,h e,a e,f f,g g,f g,h h,h')
test('A B C D E F G H I J',
'A,C B,A C,F C,B D,A D,C E,D E,C F,B F,G F,H H,G H,I I,J J,H')
if __name__ == '__main__':
main()
| missingjs/soda | works/ita/c22/q05a.py | q05a.py | py | 1,452 | python | en | code | 0 | github-code | 90 |
29262454591 | # Solution to part 2 of day 8 of AOC 2020, Handheld Halting.
# https://adventofcode.com/2020/day/8
import sys
from computer import Computer
VERBOSE = ('-v' in sys.argv)
filename = sys.argv[1]
for flip, flop in [('jmp', 'nop'), ('nop', 'jmp')]:
if VERBOSE:
print(flip, flop)
change_line = 0
done = False
while not done:
if VERBOSE:
print('----')
print(change_line)
comp = Computer()
comp.load(filename)
this_op, this_arg = comp.program[change_line]
if VERBOSE:
print(comp.program[change_line])
if this_op == flip:
comp.program[change_line] = (flop, this_arg)
if VERBOSE:
print(comp.program[change_line])
previous_instructions = []
while True:
if VERBOSE:
comp.status()
if comp.ip in previous_instructions or comp.terminated:
break
previous_instructions.append(comp.ip)
comp.tick()
if comp.terminated:
comp.status()
change_line += 1
if change_line == comp.lines_of_code:
done = True
| johntelforduk/advent-of-code-2020 | 08-handheld-halting/part2.py | part2.py | py | 1,176 | python | en | code | 2 | github-code | 90 |
15746770098 | # --------------------------------------
# Development start date: 23 Apr 2021
# --------------------------------------
from tkinter import *
# App main window
root = Tk()
root.geometry("235x328")
root.title('Calculator')
# Class for realization calculator interface and functionality
class Calculator:
def __init__(self, root):
# List have operators and numbers from buttons
self.main_list = []
# Input and output field
self.entry_main = Entry(root, width=35)
self.entry_main.grid(row=0,column=0,pady=5,ipady=10, columnspan=4)
# Buttons for calculator
Button(root, text='1', width=7, height=3, command=lambda: self.add_in_list('1')).grid(column=0, row=1)
Button(root, text='2', width=7, height=3, command=lambda: self.add_in_list('2')).grid(column=1, row=1)
Button(root, text='3', width=7, height=3, command=lambda: self.add_in_list('3')).grid(column=2, row=1)
Button(root, text='+', width=7, height=3, command=lambda: self.add_in_list('+')).grid(column=3, row=1)
Button(root, text='4', width=7, height=3, command=lambda: self.add_in_list('4')).grid(column=0, row=2)
Button(root, text='5', width=7, height=3, command=lambda: self.add_in_list('5')).grid(column=1, row=2)
Button(root, text='6', width=7, height=3, command=lambda: self.add_in_list('6')).grid(column=2, row=2)
Button(root, text='-', width=7, height=3, command=lambda: self.add_in_list('-')).grid(column=3, row=2)
Button(root, text='7', width=7, height=3, command=lambda: self.add_in_list('7')).grid(column=0, row=3)
Button(root, text='8', width=7, height=3, command=lambda: self.add_in_list('8')).grid(column=1, row=3)
Button(root, text='9', width=7, height=3, command=lambda: self.add_in_list('9')).grid(column=2, row=3)
Button(root, text='*', width=7, height=3, command=lambda: self.add_in_list('*')).grid(column=3, row=3)
Button(root, text='.', width=7, height=3, command=lambda: self.add_in_list('.')).grid(column=0, row=4)
Button(root, text='0', width=7, height=3, command=lambda: self.add_in_list('0')).grid(column=1, row=4)
Button(root, text='=', width=7, height=3, command=self.equally).grid(column=2, row=4)
Button(root, text='/', width=7, height=3, command=lambda: self.add_in_list('/')).grid(column=3, row=4)
Button(root, text='C', width=7, height=3, command=self.clear).grid(column=0, row=5)
Button(root, text='(', width=7, height=3, command=lambda: self.add_in_list('(')).grid(column=1, row=5)
Button(root, text=')', width=7, height=3, command=lambda: self.add_in_list(')')).grid(column=2, row=5)
Button(root, text='<', width=7, height=3, command=self.delete_last_symbol).grid(column=3, row=5)
# Add numbers and operators to main list
def add_in_list(self, value):
if self.check_sym(value):
self.main_list.append(value)
self.set_list_in_entry()
# Output data in entry
def set_list_in_entry(self):
self.entry_main.delete(0, END)
self.entry_main.insert(0, ''.join(self.main_list))
# Check for duplicate operators
def check_sym(self, value):
list_symbols = ['+','-','*','/','.']
if value in list_symbols and self.main_list[-1] in list_symbols:
return False
return True
# Equally
def equally(self):
answer = eval(''.join(self.main_list))
del self.main_list
self.main_list = list(str(answer))
self.set_list_in_entry()
# Clear main entry
def clear(self):
self.main_list = []
self.set_list_in_entry()
# Delete last symbol
def delete_last_symbol(self):
del self.main_list[-1]
self.set_list_in_entry()
calculator = Calculator(root)
root.mainloop() | UAcapitan/code | interesting_projects/Calculator/calculator.py | calculator.py | py | 3,841 | python | en | code | 0 | github-code | 90 |
17830764201 | #!/usr/bin/env python3
from ETA import ETA
times = []
times.append(84.43)
times.append(21.231)
print(ETA(times, 48))
print("Time remaining {0} minutes".format(ETA(times, 48)))
| DavidLutton/Fragments | ETA/ETA_test.py | ETA_test.py | py | 180 | python | ja | code | 0 | github-code | 90 |
2119273136 | import base64
import requests
with open("/Users/quantum/Downloads/u=368725982,2532668121&fm=27&gp=0.jpg", "rb") as f:
# b64encode是编码,b64decode是解码
base64_data = base64.b64encode(f.read())
# base64.b64decode(base64data)
print(base64_data)
result = requests.post("http://ai-api.keruyun.com:5001/face_detect",
data={'base64_image_str': base64_data, 'appid': 2})
print(result.text)
| yuanjungod/StoreLayout | test_base64.py | test_base64.py | py | 444 | python | en | code | 0 | github-code | 90 |
72024095978 | import altair as alt
import numpy as np
import pandas as pd
import streamlit as st
class Plotting:
def __init__(self):
self.FOREST_GREEN = "#1d3c34"
self.SUN_YELLOW = "#FFC358"
def hourly_plot(self, y, COLOR, name):
x = np.arange(8760)
source = pd.DataFrame({"x": x, "y": y})
c = alt.Chart(source).mark_bar(size=0.75, color= COLOR).encode(
x=alt.X("x", scale=alt.Scale(domain=[0,8760]), title="Timer i ett år"),
y=alt.Y("y", title="kW"),
#y=alt.Y("y", scale=alt.Scale(domain=[0,800]), title="kW"),
color=alt.Color(legend=alt.Legend(orient='top', direction='vertical', title=None))).configure_axis(
grid=True
)
st.altair_chart(c, use_container_width=True)
def xy_plot(self, x, y, x_label, y_label, name, y_min, y_max):
COLOR = self.FOREST_GREEN
source = pd.DataFrame({"x": x, "y": y})
c = alt.Chart(source).mark_line().encode(
x=alt.X("x", scale=alt.Scale(domain=[0,len(x)]), title=x_label),
y=alt.Y("y", scale=alt.Scale(domain=[y_min, y_max]), title=y_label),
color = alt.value(COLOR)).properties(title=name)
st.altair_chart(c, use_container_width=True) | magnesyljuasen/grunnvarme | old/utils.py | utils.py | py | 1,265 | python | en | code | 2 | github-code | 90 |
1282344194 | import pdftotext
import os
import re
import constants
import csv
import datetime
"""
Luckily, all of the account value and withdrawal/deposit information is on the first page.
Unfortunately, the statements has some inconsistencies. Some statements have this near the top:
Envelope # BLRJWCBBCCJJS
$42.25
Change from Last Period:
Some statements have this section, followed by the #s associated with them
Beginning Account Value
Additions
Subtractions
Transaction Costs, Fees & Charges
Change in Investment Value *
Ending Account Value **
Accrued Interest (AI)
Ending Account Value Incl. AI
After some entries, there could be an asterisk.
The `Accrued Interest (AI)` and `Ending Account Value Incl. AI` are only on some statements.
There could be entries for `Additions` and `Subtractions`. If Subtractions is an entry, then there could be also an entry for `Transaction Costs, Fees & Charges`
The account value is from the entry for `Ending Account Value`
"""
#subtract 1 b/c PDF pages are 1-indexed
FIDELITY_ACCOUNT_VALUE_PAGE = 1 - 1
entries = {}
def bookkeep_month_entry(date, account_value, deposits, withdraws, account_num):
entries[(account_num, date)] = [deposits, withdraws, account_value]
def parse_statement(pdf_path):
# Load your PDF
with open(pdf_path, "rb") as f:
pdf = pdftotext.PDF(f)
txt = pdf[FIDELITY_ACCOUNT_VALUE_PAGE]
# get the 2nd line; ex: "February 1, 2022 - February 28, 2022"
date_builder = []
is_on_second_line = False
for i in range(100):
char = txt[i]
if char == "\n":
if is_on_second_line:
break
else:
is_on_second_line = True
if is_on_second_line:
date_builder.append(char)
raw_date = "".join(date_builder)
ending_date = raw_date.split("-")[1] #the latter half
formatted_date = datetime.datetime.strptime(ending_date, ' %B %d, %Y').strftime('%Y-%m')
regex = "Beginning Account Value\n(Additions\n)?(Subtractions\n)?(Transaction Costs, Fees & Charges\n)?Change in Investment Value[\* ]*\nEnding Account Value[\* ]*\n(Accrued Interest \(AI\)\n)?(Ending Account Value Incl. AI\n)?"
line_entries = re.findall(regex, txt)
# print(line_entries)
subtractions_entry = False
additions_entry = False
transactions_costs_fees_charges_entry = False
accured_interest_entry = False
ending_account_val_including_accured_interest_entry = False
if len(line_entries) == 0:
# the statement has a different format; try other regex
regex = "Beginning Account Value|Additions|Subtractions|Transaction Costs, Fees & Charges|Change in Investment Value|Ending Account Value|Accrued Interest \(AI\)|Ending Account Value Incl. AI"
line_entries = re.findall(regex, txt)
else:
line_entries = line_entries[0]
# determine if there is an entry for subtractions, additions, and transactions_costs_fees_charges
for entry in line_entries:
entry = entry.replace("\n", "")
if entry == "Additions":
additions_entry = True
elif entry == "Subtractions":
subtractions_entry = True
elif entry == "Transaction Costs, Fees & Charges":
# note, this debit is already included in subtractions
transactions_costs_fees_charges_entry = True
elif entry == "Accrued Interest (AI)":
accured_interest_entry = True
elif entry == "Ending Account Value Incl. AI":
ending_account_val_including_accured_interest_entry = True
# grab the #s associated with each entry
entry_data_regex = "This Period\n\nYear-to-Date\n\n([\s\S]*)$"
target_text = re.findall(entry_data_regex, txt)
if not target_text:
# if "This Period" text is found elsewhere
entry_data_regex = "Year-to-Date\n\n([\s\S]*)$"
target_text = re.findall(entry_data_regex, txt)
digits_regex = "\-?\$?([\d,]*\.\d\d)|\n-\n"
# prepending "\n" to the search text is because the digits_regex will not pick up the first "-" unless it's there
# however, this is an edge case because only on the first account statement (there, the Beginning Account value
# is going to be 0, so it's represented with a "-")
digits = re.findall(digits_regex, "\n" + target_text[0])
# print(digits)
next_digit_iterator = iter(digits)
begn_account_val_this_period = next(next_digit_iterator)
begn_account_val_ytd = next(next_digit_iterator)
additions_this_period, additions_ytd = 0, 0
if additions_entry:
additions_this_period = next(next_digit_iterator)
additions_ytd = next(next_digit_iterator)
subtractions_this_period, subtractions_ytd = 0, 0
if subtractions_entry:
subtractions_this_period = next(next_digit_iterator)
subtractions_ytd = next(next_digit_iterator)
if transactions_costs_fees_charges_entry:
transactions_costs_fees_charges_this_period = next(next_digit_iterator)
transactions_costs_fees_charges_ytd = next(next_digit_iterator)
change_in_investment_val_this_period = next(next_digit_iterator)
change_in_investment_val_ytd = next(next_digit_iterator)
ending_account_val_this_period = next(next_digit_iterator)
ending_account_val_ytd = next(next_digit_iterator)
# both of these should be true, but separating them just in case
accured_interest, ending_account_val_including_accured_interest = 0, 0
if accured_interest_entry:
accured_interest = next(next_digit_iterator)
if ending_account_val_including_accured_interest_entry:
ending_account_val_including_accured_interest = next(next_digit_iterator)
def verify_and_caste_to_float(num):
if num:
return float(num)
else:
return 0
# note: the regex did not take into account (+) or (-) nums; this is because their signs are all intuitive
beginning_account_val = verify_and_caste_to_float(begn_account_val_this_period)
account_val_change = verify_and_caste_to_float(change_in_investment_val_this_period)
contributions = verify_and_caste_to_float(additions_this_period)
withdrawals = verify_and_caste_to_float(subtractions_this_period) * -1
account_val = verify_and_caste_to_float(ending_account_val_this_period)
added_together = beginning_account_val + account_val_change + contributions + withdrawals
if round(added_together, 2) == account_val:
print("check")
else:
added_together = beginning_account_val - account_val_change + contributions + withdrawals
if round(added_together, 2) == account_val:
print("check 2")
else:
print("didn't add up")
# back out transaction costs, fees, and charges from withdrawals
# since withdrawals is a (-) num, we add
if transactions_costs_fees_charges_entry:
withdrawals += verify_and_caste_to_float(transactions_costs_fees_charges_this_period)
account_num_regex = "Account Number: ([A-Z0-9]*-[A-Z0-9]*)"
account_num = re.findall(account_num_regex, txt)[0]
print(account_num)
key = (account_num, formatted_date) #tuple so it can be the key in a dict
payload = [contributions, withdrawals, account_val]
return (key, payload) | stevestar888/holistic-portfolio-returns | parse_fidelity.py | parse_fidelity.py | py | 7,803 | python | en | code | 0 | github-code | 90 |
73844658856 | """
# Machine Learning Online Class - Exercise 2: Logistic Regression
"""
from gradient import gradient
from sigmoid import sigmoid
from predict import predict
from plotDecisionBoundary import plotDecisionBoundary
from costFunction import costFunction
from plotData import plotData
import scipy.optimize as op
import matplotlib.pyplot as plt
import numpy as np
def pause():
input("")
# Load Data
# The first two columns contains the exam scores and the third column
# contains the label.
data = np.loadtxt('ex2data1.txt', delimiter =",")
X = data[:, 0:2] #x refers to the population size in 10,000s
y = data[:, 2] #y refers to the profit in $10,000s
m = y.size #umber of training examples
y = y.reshape((m,1))
"""## Part 1: Plotting ====================
We start the exercise by first plotting the data to understand the
the problem we are working with."""
#scatter plot
print("Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.")
line_pos, line_neg = plotData(X, y, "Exam 1","Exam 2", "Admitted","Not Admitted")
plt.legend(handles=[line_pos,line_neg])
plt.show(block=False)
print("\nProgram paused. Press enter to continue.\n")
pause()
"""## Part 2: Compute Cost and Gradient """
# Setup the data matrix appropriately, and add ones for the intercept term
m, n = X.shape
#Add intercept term to x and X_test
X = np.c_[np.ones((m, 1)), X]
#Initialize fitting parameters
initial_theta = np.zeros((n + 1, 1))
#Compute and display initial cost and gradient
cost, grad = costFunction(initial_theta, X, y), gradient(initial_theta, X, y)
print("Cost at initial theta (zeros): ", cost, "\n")
print("Expected cost (approx): 0.693\n")
print('Gradient at initial theta (zeros): \n')
print(grad)
print("Expected gradients (approx):\n -0.1000\n -12.0092\n -11.2628\n")
#Compute and display cost and gradient with non-zero theta
test_theta = np.array([[-24], [0.2], [0.2]])
cost, grad = costFunction(test_theta, X, y), gradient(test_theta, X, y)
print("\nCost at test theta:", cost, "\n")
print("Expected cost (approx): 0.218\n")
print("Gradient at test theta: \n")
print(grad)
print("Expected gradients (approx):\n 0.043\n 2.566\n 2.647\n")
print("\nProgram paused. Press enter to continue.\n")
pause()
"""## Part 3: Optimizing using scipy.optimize (equivalent to fminunc in matlab)"""
Result = op.minimize(fun = costFunction, x0 = initial_theta, args = (X, y), method = 'TNC', jac = gradient)
optimal_theta = Result.x
print("optimal theta", optimal_theta)
#Plot Boundary
boundary_line = plotDecisionBoundary(optimal_theta, X, y)
plt.legend(handles=[line_pos,line_neg, boundary_line])
plt.show(block=False)
print("\nProgram paused. Press enter to continue.\n")
pause()
"""## Part 4: Predict and Accuracies
After learning the parameters, you'll like to use it to predict the outcomes
on unseen data. In this part, you will use the logistic regression model
to predict the probability that a student with score 45 on exam 1 and
score 85 on exam 2 will be admitted.
Furthermore, you will compute the training and test set accuracies of
our model.
Predict probability for a student with score 45 on exam 1
and score 85 on exam 2 """
theta = optimal_theta
prob = sigmoid(np.dot(np.array([[1, 45, 85]]), theta))
print("For a student with scores 45 and 85, we predict an admission probability of f\n", prob)
print("Expected value: 0.775 +/- 0.002\n\n")
#Compute accuracy on our training set
p = predict(theta, X)
y = y.reshape((m))
print("Train Accuracy: ",np.multiply(np.mean((p == y).astype(int)), 100))
print("Expected accuracy (approx): 89.0\n")
print("\n")
| hzitoun/machine_learning_from_scratch_matlab_python | algorithms_in_python/week_3/ex2/ex2.py | ex2.py | py | 3,649 | python | en | code | 30 | github-code | 90 |
17955733689 | from collections import Counter
n = int(input())
lst = []
for _ in range(n):
lst.append(int(input()))
C_lst = Counter(lst)
cnt = 0
for i in C_lst.values():
if i % 2 != 0:
cnt += 1
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p03607/s693150002.py | s693150002.py | py | 208 | python | en | code | 0 | github-code | 90 |
10242228549 | # -*- coding: <utf-8> -*-
from datetime import datetime, timedelta
from sqlalchemy import Column, MetaData
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.orm import sessionmaker, relationship, backref
from sqlalchemy.types import Integer, String, Text, DateTime, Boolean
from .master_import import Category
from .master_import import db
metadata = MetaData()
association_table = db.Table('programmes_categories',
db.Column('programme_id', Integer, db.ForeignKey('programmes.id')),
db.Column('category_id', Integer, db.ForeignKey('categories.id'))#,
#UniqueConstraint('programme_id', 'category_id', name='uix_1')
)
class Programme(db.Model):
__tablename__ = 'programmes'
id = db.Column(Integer, primary_key=True)
title = db.Column(String(255), index=True)
subtitle = db.Column(String(255))
description = db.Column(db.Text)
series = db.Column(Boolean, nullable = False, default=False)
start_time = db.Column(db.DateTime, index=True)
stop_time = db.Column(db.DateTime, index=True)
duration = db.Column(Integer)
episode_num = db.Column(String(255), index=True)
channel_id = db.Column(db.Integer, db.ForeignKey('channels.id'))
channel = db.relationship('Channel', primaryjoin="Programme.channel_id == Channel.id",
backref=db.backref('programmes', lazy='dynamic'))
categories = db.relationship('Category', secondary=association_table,
backref=db.backref('programmes', lazy='dynamic'))
def __init__(self, channel, title, subtitle, description, start_time, stop_time, duration, episode_num=None, series=False, categories = []):
self.title = title
self.subtitle = subtitle
self.description = description
self.start_time = start_time
self.stop_time = stop_time
self.channel = channel
self.duration = duration
self.episode_num = episode_num
self.series = series
self.categories = self.mapToCategory(categories)
#self.channel_id = channel.id
def mapToCategory(self, listOfCategoryTexts):
listOfCategoryTypes = []
for category_name in listOfCategoryTexts:
listOfCategoryTypes.append(Category.getByName(category_name))
return listOfCategoryTypes
@staticmethod
def clear():
Programme.query.delete()
db.session.flush
db.session.commit
def add(self, commit=False):
db.session.add(self)
if commit:
db.session.commit()
@staticmethod
def with_title(title):
return Programme.query.\
filter(Programme.start_time > datetime.now()).\
filter(Programme.title == title).\
order_by(Programme.start_time).\
all()
@staticmethod
def on_channel_with_title(channel, title):
return Programme.query.\
filter(Programme.start_time > datetime.now()).\
filter(Programme.title == title).\
filter(Programme.channel==channel).\
order_by(Programme.start_time).\
all()
@staticmethod
def on_channel_with_title_and_start_time(channel, title, start_time):
return Programme.query.\
filter(Programme.channel == channel).\
filter(Programme.title == title).\
filter(Programme.start_time.between((start_time - timedelta(minutes=1)), (start_time + timedelta(minutes=1)))).\
order_by(Programme.start_time).\
all()
@staticmethod
def titles_containing(text):
return Programme.query.\
filter(Programme.title.like('%' + text + '%')).\
filter(Programme.start_time > datetime.now()).\
order_by(Programme.title).\
limit(8).all()
@staticmethod
def with_title_containing(text):
return Programme.query.\
filter(Programme.start_time > datetime.now()).\
filter(Programme.title.like('%' + text + '%')).\
order_by(Programme.start_time).\
limit(20).all()
@staticmethod
def _current_programme_for(channel, now):
from .master_import import json_friendly_tuple
if not channel.hidden:
#(Programme.id, Programme.title, Programme.startTime, Programme.duration)
programme = db.session.query(Programme.id, Programme.title, Programme.start_time, Programme.duration).\
filter(Programme.channel == channel).\
filter(Programme.start_time <= now).\
filter(Programme.stop_time > now).\
first()
return json_friendly_tuple(programme)
@staticmethod
def _upcoming_programmes_for(channel, limit, now):
from .master_import import json_friendly_tuple
if channel.hidden:
return []
else:
results = db.session.query(Programme.id, Programme.title, Programme.start_time, Programme.duration).filter(Programme.channel == channel).filter(Programme.start_time > now).order_by(Programme.start_time).limit(limit).all()
serialized_list = []
for programme in results:
serialized_list.append(json_friendly_tuple(programme))
return serialized_list
def __repr__(self):
return '<Programme serialized: %s>' % (self.serialize)
@property
def serialize(self):
from .master_import import safe_value
from dateutil.tz import tzlocal
"""Return object data in easily serializeable format"""
return {
'id' : self.id,
'title': safe_value(self.title),
'subtitle' : safe_value(self.subtitle),
'description' : safe_value(self.description),
'start_time' : self.start_time.replace(tzinfo=tzlocal()).isoformat(),
'startTime' : self.start_time.replace(tzinfo=tzlocal()).isoformat(),
'stop_time' : self.stop_time.replace(tzinfo=tzlocal()).isoformat(),
'duration' : self.duration,
'channel' : self.channel.serialize if self.channel is not None else None
}
| olefriis/simplepvr | python/simplepvr/simple_pvr/programme.py | programme.py | py | 6,194 | python | en | code | 12 | github-code | 90 |
70522259818 | # generator is a function that yield multiple values (not return a single value)
def simple_generator():
yield 1
yield 2
yield 3
def Main():
# generator as a function
for value in simple_generator():
print(value)
# generator as an object
x = simple_generator()
print(x.__next__())
print(x.__next__())
print(x.__next__())
if __name__ == '__main__':
Main() | denny-imanuel/PythonConcept | generator.py | generator.py | py | 408 | python | en | code | 1 | github-code | 90 |
20490144090 | from django.contrib import admin
from django_json_widget.widgets import JSONEditorWidget
from .filters import *
from .models import *
from base.admin import *
from base.filters import *
from base.models import *
from borrowers.filters import *
# Register your models here.
class LoanDataAdmin(JSONBaseAdmin, BaseAdmin, admin.ModelAdmin):
list_display = ('app', 'lender_api', 'response_code')
list_filter = (SuccessFilter, AppFilter, LenderAPIFilter, LenderNestedFilter)
search_fields = ('app__lmsid', 'request', 'response')
autocomplete_fields = ('loan', 'lender_api')
fields = (('loan', 'lender_api', 'response_code'), ('request', 'response'))
class LoanDataInlineAdmin(JSONBaseAdmin, BaseAdmin, admin.TabularInline):
model = LoanData
exclude = ('app', 'request', 'response_code') + BaseAdmin.exclude
ordering = ('lender_api__priority',)
max_num = 0
extra = 0
def get_queryset(self, request):
queryset = super().get_queryset(request).filter(is_success).active(
).order_by('lender_api__priority')
return queryset
def has_delete_permission(self, request, obj=None):
return False
class LoanAdmin(BaseAdmin, admin.ModelAdmin):
list_display = ('app', 'lender')
list_filter = (LenderFilter, LMSNestedFilter)
search_fields = ('app__lmsid',)
autocomplete_fields = ('app', 'lender')
fields = (('app', 'lender',),)
inlines = (LoanDataInlineAdmin,)
class LenderSystemAPIAdmin(APIBaseAdmin, JSONBaseAdmin, BaseAdmin, admin.ModelAdmin):
autocomplete_fields = ('lender',)
towhom_filter = LenderFilter
towhom = 'lender'
class LenderSystemAPIInlineAdmin(APIBaseInlineAdmin, BaseAdmin, admin.TabularInline):
model = LenderSystemAPI
class LenderSystemAdmin(ServiceBaseAdmin, JSONBaseAdmin, BaseAdmin, admin.ModelAdmin):
inlines = (LenderSystemAPIInlineAdmin,)
admin.site.register(Loan, LoanAdmin)
admin.site.register(LoanData, LoanDataAdmin)
admin.site.register(LenderSystem, LenderSystemAdmin)
admin.site.register(LenderSystemAPI, LenderSystemAPIAdmin)
| fasih/lender-integration | app/lenders/admin.py | admin.py | py | 2,120 | python | en | code | 0 | github-code | 90 |
28834228107 | # 1
def multiple_of_three(number):
if number % 3 == 0:
return True
else:
return False
print(multiple_of_three(39))
# 2
def get_currency_symbol_from_code(currency):
uppercase_currency = currency.upper()
if uppercase_currency == "GEL":
return "ლ"
elif uppercase_currency == "USD":
return "$"
elif uppercase_currency == "EUR":
return "€"
else:
return "write currency correctly"
print(get_currency_symbol_from_code("usd"))
# 3
def transform_to_uppercase(string):
uppercase_string = string.upper()
print(uppercase_string)
transform_to_uppercase("my name is joe")
# 4
def profit(price_for_sale, price_for_company):
company_profit = price_for_sale - price_for_company
profit_in_percents = company_profit / price_for_company * 100
result = f'company profit for this sale is {profit_in_percents}%'
print(result)
profit(1250, 1000)
# 5
unfiltered_numbers = [2342, 234, 5123, 42356, 1345, 8939, 3434, 9843]
def even_numbers(number):
return number % 2 == 0
filtered_numbers = list(filter(even_numbers, unfiltered_numbers))
print(filtered_numbers)
# 6
players = [
{
"name": "khvicha kvaratskhelia",
"rank": "pro",
"goals": 142,
},
{
"name": "victor osimen",
"rank": "pro",
"goals": 192,
},
{
"name": "gia suramelashvili",
"rank": "legend",
"goals": 323,
}
]
# 6
def search_in_array(array):
search_player = next(
(player for player in array if player['name'] == "victor osimen"), None)
print(search_player)
search_in_array(players)
| saba-ab/homework26 | app.py | app.py | py | 1,669 | python | en | code | 0 | github-code | 90 |
18386262809 | n=int(input())
if n==1:
print(1)
exit()
x=[]
for i in range(n):
a,b=map(int,input().split())
x.append([a,b])
x.sort()
ans=float("inf")
for i in range(n-1):
for j in range(i+1,n):
p,q=x[j][0]-x[i][0],x[j][1]-x[i][1]
cnt=0
flg=[False for i in range(n)]
for k in range(n):
if flg[k]==False:
flg[k]=True
cnt+=1
fl=True
xx,yy=x[k][0],x[k][1]
while fl:
nx,ny=xx+p,yy+q
if [nx,ny] in x:
a=x.index([nx,ny])
flg[a]=True
xx,yy=nx,ny
else:
fl=False
if cnt<ans:
ans=cnt
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03006/s048356360.py | s048356360.py | py | 788 | python | en | code | 0 | github-code | 90 |
31384103401 | import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from nlplay.models.pytorch.activations import *
from nlplay.utils.utils import human_readable_size
def set_seed(seed: int = 123):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def get_activation_func(activation_func_name: str = "relu"):
if activation_func_name is "none":
return None
elif activation_func_name == "relu":
return nn.ReLU()
elif activation_func_name == "relu6":
return nn.ReLU6()
elif activation_func_name == "prelu":
return nn.PReLU()
elif activation_func_name == "elu":
return nn.ELU()
elif activation_func_name == "gelu":
return nn.GELU()
elif activation_func_name == "selu":
return nn.SELU()
elif activation_func_name == "leakyrelu":
return nn.LeakyReLU()
elif activation_func_name == "sigmoid":
return nn.Sigmoid()
elif activation_func_name == "tanh":
return nn.Tanh()
elif activation_func_name == "hardtanh":
return nn.Hardtanh()
elif activation_func_name == "tanhshrink":
return nn.Tanhshrink()
elif activation_func_name == "hardshrink":
return nn.Hardshrink()
elif activation_func_name == "softshrink":
return nn.Softshrink()
elif activation_func_name == "softsign":
return nn.Softsign()
elif activation_func_name == "softplus":
return nn.Softplus()
elif activation_func_name == "mish":
return Mish()
elif activation_func_name == "ftswishplus":
return FTSwishPlus()
elif activation_func_name == "lightrelu":
return LightRelu()
elif activation_func_name == "trelu":
return TRelu()
else:
raise ValueError("[!] Invalid activation function.")
def embeddings_to_cosine_similarity_matrix(embedding: torch.Tensor):
"""
Title : Converts a a tensor of n embeddings to an (n, n) tensor of similarities.
Authors : Dillon Erb - https://github.com/dte
Papers : ---
Source : https://gist.github.com/dte/e600bb76e72854379f4a306c1873f2c2#file-vectorized_cosine_similarities-py
"""
dot = embedding @ embedding.t()
norm = torch.norm(embedding, 2, 1)
x = torch.div(dot, norm)
x = torch.div(x, torch.unsqueeze(norm, 0))
return x
def masked_softmax(vector, mask, dim=-1, memory_efficient=False, mask_fill_value=-1e32):
"""
Title : A masked softmax module to correctly implement attention in Pytorch.
Authors : Bilal Khan / AllenNLP
Papers : ---
Source : https://github.com/bkkaggle/pytorch_zoo/blob/master/pytorch_zoo/utils.py
https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
A masked softmax module to correctly implement attention in Pytorch.
Implementation adapted from: https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
``torch.nn.functional.softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
If ``memory_efficient`` is set to true, we will simply use a very large negative number for those
masked positions so that the probabilities of those positions would be approximately 0.
This is not accurate in math, but works for most cases and consumes less memory.
In the case that the input vector is completely masked and ``memory_efficient`` is false, this function
returns an array of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of
a model that uses categorical cross-entropy loss. Instead, if ``memory_efficient`` is true, this function
will treat every element as equal, and do softmax over equal numbers.
Args:
vector (torch.tensor): The tensor to softmax.
mask (torch.tensor): The tensor to indicate which indices are to be masked and not included in the softmax operation.
dim (int, optional): The dimension to softmax over.
Defaults to -1.
memory_efficient (bool, optional): Whether to use a less precise, but more memory efficient implementation of masked softmax.
Defaults to False.
mask_fill_value ([type], optional): The value to fill masked values with if `memory_efficient` is `True`.
Defaults to -1e32.
Returns:
torch.tensor: The masked softmaxed output
"""
if mask is None:
result = torch.nn.functional.softmax(vector, dim=dim)
else:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
if not memory_efficient:
# To limit numerical errors from large vector elements outside the mask, we zero these out.
result = torch.nn.functional.softmax(vector * mask, dim=dim)
result = result * mask
result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
else:
masked_vector = vector.masked_fill((1 - mask).byte(), mask_fill_value)
result = torch.nn.functional.softmax(masked_vector, dim=dim)
return result
def masked_log_softmax(vector, mask, dim=-1):
"""
Title : A masked log-softmax module to correctly implement attention in Pytorch.
Authors : Bilal Khan / AllenNLP
Papers : ---
Source : https://github.com/bkkaggle/pytorch_zoo/blob/master/pytorch_zoo/utils.py
https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
A masked log-softmax module to correctly implement attention in Pytorch.
Implementation adapted from: https://github.com/allenai/allennlp/blob/master/allennlp/nn/util.py
``torch.nn.functional.log_softmax(vector)`` does not work if some elements of ``vector`` should be
masked. This performs a log_softmax on just the non-masked portions of ``vector``. Passing
``None`` in for the mask is also acceptable; you'll just get a regular log_softmax.
``vector`` can have an arbitrary number of dimensions; the only requirement is that ``mask`` is
broadcastable to ``vector's`` shape. If ``mask`` has fewer dimensions than ``vector``, we will
unsqueeze on dimension 1 until they match. If you need a different unsqueezing of your mask,
do it yourself before passing the mask into this function.
In the case that the input vector is completely masked, the return value of this function is
arbitrary, but not ``nan``. You should be masking the result of whatever computation comes out
of this in that case, anyway, so the specific values returned shouldn't matter. Also, the way
that we deal with this case relies on having single-precision floats; mixing half-precision
floats with fully-masked vectors will likely give you ``nans``.
If your logits are all extremely negative (i.e., the max value in your logit vector is -50 or
lower), the way we handle masking here could mess you up. But if you've got logit values that
extreme, you've got bigger problems than this.
Args:
vector (torch.tensor): The tensor to log-softmax.
mask (torch.tensor): The tensor to indicate which indices are to be masked and not included in the log-softmax operation.
dim (int, optional): The dimension to log-softmax over.
Defaults to -1.
Returns:
torch.tensor: The masked log-softmaxed output
"""
if mask is not None:
mask = mask.float()
while mask.dim() < vector.dim():
mask = mask.unsqueeze(1)
# vector + mask.log() is an easy way to zero out masked elements in logspace, but it
# results in nans when the whole vector is masked. We need a very small value instead of a
# zero in the mask for these cases. log(1 + 1e-45) is still basically 0, so we can safely
# just add 1e-45 before calling mask.log(). We use 1e-45 because 1e-46 is so small it
# becomes 0 - this is just the smallest value we can actually use.
vector = vector + (mask + 1e-45).log()
return torch.nn.functional.log_softmax(vector, dim=dim)
def get_gpu_info(device):
device_name = torch.cuda.get_device_name(device)
# major, minor = torch.cuda.get_device_capability(device)
# device_capability = "CUDA Compute Capability: {}.{}".format(major, minor)
mem_tot = human_readable_size(torch.cuda.get_device_properties(device).total_memory)
mem_alloc = human_readable_size(torch.cuda.memory_allocated(device))
out = "{} - Memory: {} / {}".format(device_name, mem_alloc, mem_tot)
return out
def char_vectorizer(X, vocab, max_seq: int = 1014):
"""
Function to transform input sentences into a one encoded matrix
of a form [Sentence Index x Sentence Length x Vocabulary size],
so that it can be directly fed into a Conv1D layer
:param X: list of input sentences to be processed
:param vocab: dict of characters to be taken into account for the vectorization
:param max_seq: limit the max of a sentence
:return: (nd.array): vectorized sentences
"""
# TODO - Optimize this code as part of the upcoming Dataset/Vectorizer refactoring
vocab_size = len(vocab)
output = np.zeros((len(X), max_seq, vocab_size))
for i, sentence in enumerate(X):
counter = 0
sentence_vec = np.zeros((max_seq, vocab_size))
chars = list(sentence.lower().replace(" ", ""))
for c in chars:
if counter >= max_seq:
pass
else:
char_array = np.zeros(vocab_size, dtype=np.int)
if c in vocab.keys():
ix = vocab[c]
char_array[ix] = 1
sentence_vec[counter, :] = char_array
counter += 1
output[i, :, :] = sentence_vec
return output
def init_tensor(
tensor,
init_type="XAVIER_UNIFORM",
low=0,
high=1,
mean=0,
std=1,
activation_type="linear",
fan_mode="FAN_IN",
negative_slope=0,
):
"""Init torch.Tensor
Args:
tensor: Tensor to be initialized.
init_type: Init type, candidate can be found in InitType.
low: The lower bound of the uniform distribution,
useful when init_type is uniform.
high: The upper bound of the uniform distribution,
useful when init_type is uniform.
mean: The mean of the normal distribution,
useful when init_type is normal.
std: The standard deviation of the normal distribution,
useful when init_type is normal.
activation_type: For xavier and kaiming init,
coefficient is calculate according the activation_type.
fan_mode: For kaiming init, fan mode is needed
negative_slope: For kaiming init,
coefficient is calculate according the negative_slope.
Returns:
"""
if init_type == "UNIFORM":
return torch.nn.init.uniform_(tensor, a=low, b=high)
elif init_type == "NORMAL":
return torch.nn.init.normal_(tensor, mean=mean, std=std)
elif init_type == "XAVIER_UNIFORM":
return torch.nn.init.xavier_uniform_(
tensor, gain=torch.nn.init.calculate_gain(activation_type)
)
elif init_type == "XAVIER_NORMAL":
return torch.nn.init.xavier_normal_(
tensor, gain=torch.nn.init.calculate_gain(activation_type)
)
elif init_type == "KAIMING_UNIFORM":
return torch.nn.init.kaiming_uniform_(
tensor, a=negative_slope, mode=fan_mode, nonlinearity=activation_type
)
elif init_type == "KAIMING_NORMAL":
return torch.nn.init.kaiming_normal_(
tensor, a=negative_slope, mode=fan_mode, nonlinearity=activation_type
)
elif init_type == "ORTHOGONAL":
return torch.nn.init.orthogonal_(
tensor, gain=torch.nn.init.calculate_gain(activation_type)
)
else:
raise TypeError("Unsupported tensor init type: %s." % init_type)
| jeremypoulain/nlplay | nlplay/models/pytorch/utils.py | utils.py | py | 12,577 | python | en | code | 7 | github-code | 90 |
37931170504 | import unittest
from number_of_recent_calls import RecentCounter, RecentCounterOfficial
class TestRecentCounter(unittest.TestCase):
def test_example_1(self):
recent_counter = RecentCounter()
for t, expected in [(1, 1), (100, 2), (3001, 3), (3002, 3)]:
assert recent_counter.ping(t=t) == expected
recent_counter_official = RecentCounterOfficial()
for t, expected in [(1, 1), (100, 2), (3001, 3), (3002, 3)]:
assert recent_counter_official.ping(t=t) == expected
| saubhik/leetcode | tests/test_number_of_recent_calls.py | test_number_of_recent_calls.py | py | 524 | python | en | code | 3 | github-code | 90 |
72208033578 | from collections import Counter, defaultdict
class Solution:
def firstUniqChar1(self, s: str) -> str:
counts = Counter(s)
for c in s:
if counts[c] == 1:
return c
return " "
def firstUniqChar2(self, s: str) -> str:
if not s:
return " "
indexs = defaultdict(int)
for i, c in enumerate(s):
if c in indexs:
indexs[c] = -1
else:
indexs[c] = i
len_ = len(s)
pos = len_
for index in indexs.values():
if index != -1 and index < pos:
pos = index
return " " if pos == len_ else s[pos]
| Asunqingwen/LeetCode | 剑指offer/第一个只出现一次的字符.py | 第一个只出现一次的字符.py | py | 690 | python | en | code | 0 | github-code | 90 |
18434892629 | import sys
sys.setrecursionlimit(500000)
MOD = 10**9+7
def input():
return sys.stdin.readline()[:-1]
def mi():
return map(int, input().split())
def ii():
return int(input())
def i2(n):
tmp = [list(mi()) for i in range(n)]
return [list(i) for i in zip(*tmp)]
def g(x):
if x <= 0:
return 0
tmp = x
n = 0
while tmp > 0:
tmp //= 2
n += 1
l = [0]*n
for i in range(n):
if i==0:
l[i] = 1 if x%4==1 or x%4==2 else 0
else:
l[i] = max(x%(1<<(i+1))-(1<<i)+1, 0)%2
return sum(l[i]*(2**i) for i in range(n))
def main():
A, B = mi()
print(g(B)^g(A-1))
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03104/s270958365.py | s270958365.py | py | 704 | python | en | code | 0 | github-code | 90 |
41679290071 | #!/bin/env python
import sys
import re
from datetime import datetime
r_ldif = re.compile("^ (.*)")
r_attr = re.compile("^(\w*): (.*)")
r_dn = re.compile("^dn: (.*)")
r_entry_time = re.compile("^time: (.*)")
r_modify_time = re.compile("^modifyTimestamp: (.*)")
r_changetype = re.compile("^changetype: (.*)")
entry=[]
try:
for line in iter(sys.stdin.readline, b''):
st_line=line.rstrip()
if line=="\n":
dn = ""
ent_time = ""
mod_time = ""
change_type = ""
# Iterate entry for dn and time
for l in entry:
m_attr = r_attr.match(l)
if m_attr:
attr_name = m_attr.group(1)
attr_val = m_attr.group(2)
if attr_name == "dn":
dn = attr_val
elif attr_name == "time":
ent_time = attr_val
elif attr_name == "modifyTimestamp":
mod_time = attr_val
elif attr_name == "changetype":
change_type = attr_val
ent_time_parsed = datetime.strptime(ent_time,"%Y%m%d%H%M%S")
print("{} {}".format(change_type,dn))
print("Log timestamp: {}".format(ent_time_parsed))
if mod_time:
mod_time_parsed = datetime.strptime(mod_time,"%Y%m%d%H%M%SZ")
time_diff = int((mod_time_parsed-ent_time_parsed).total_seconds())
print("Mod timestamp: {} ({})".format(mod_time_parsed,time_diff))
entry=[]
print
else:
m_ldif = r_ldif.match(st_line)
if m_ldif:
entry[-1]=entry[-1] + m_ldif.group(1)
else:
entry.append(st_line)
except KeyboardInterrupt:
sys.stdout.flush()
pass | red-tux/perf-scripts | RHDS/audit_show_latency.py | audit_show_latency.py | py | 1,609 | python | en | code | 1 | github-code | 90 |
74720981416 | def mean(x):
m = sum(x) / len(x)
return m
def addnum():
a = float(input("Please enter the numbers,\nany negative number will terminate the input: "))
if a >= 0:
lst.append(a)
addnum()
lst = []
addnum()
print("The positive numebrs are: ", lst)
print("The sum of the positive numebers is: ", sum(lst))
print("The average of the positive numbers is: ", mean(lst))
print("The maximum is: ", max(lst))
print("The minimum is: ", min(lst))
| Lumix888/Python_various_exercises | Sum_average_max_minimum.py | Sum_average_max_minimum.py | py | 492 | python | en | code | 0 | github-code | 90 |
75109677095 | # Rock-Paper-Scissors
# Write your code here
import random
def match(user_choice, computer_choice):
result = ""
if user_choice == computer_choice:
result = "draw"
else:
# if user_choice == "paper":
# if computer_choice == "rock":
# result = "win"
# else:
# result = "lose"
user_index = choices.index(user_choice)
computer_index = choices.index(computer_choice)
gap = (len(choices) // 2) - user_index
computer_index_gapped = (computer_index + gap) % len(choices)
if computer_index_gapped > len(choices) / 2:
result = "lose"
else:
result = "win"
return result
def read_score(name):
f = open("rating.txt", "a+")
for line in f:
player, rating = line.split()
if player == name:
f.close()
return int(rating)
f.close()
return 0
name = input("Enter your name: ")
print("Hello, %s" % name)
score = read_score(name)
choices = input()
if choices == "":
choices = ["rock", "paper", "scissors"]
else:
choices = choices.split(",")
print("Okay, let's start")
while True:
user_choice = input()
if user_choice in choices:
computer_choice = choices[random.randint(0, len(choices) - 1)]
result = match(user_choice, computer_choice)
if result == "lose":
print("Sorry, but the computer chose %s" % computer_choice)
elif result == "draw":
print("There is a draw (%s)" % computer_choice)
score += 50
elif result == "win":
print("Well done. The computer chose %s and failed" % computer_choice)
score += 100
elif user_choice == "!exit":
print("Bye!")
break
elif user_choice == "!rating":
print("Your rating: %s" % score)
else:
print("Invalid input")
| MLohengrin/JetBrains-Academy-Projects | Sources/game.py | game.py | py | 1,911 | python | en | code | 0 | github-code | 90 |
3721899886 | """
从腾讯天气获取气象信息
"http://weather.gtimg.cn/city/01010101.js" 返回数据格式:
sk_wd:对应wt_img.json中的数字,
以便确定图标地址,base url="http://mat1.gtimg.com/weather/2014gaiban/" + "TB_" + ico(wt_img.json中对应) + _baitian/_yejian + .png
背景的地址格式:"http://mat1.gtimg.com/weather/2014gaiban/" + bg + _baitian/_yejian + .jpg
sk_tp:温度,单位 ℃
sk_wd:风向 sk_wp:风力 sk_hd:湿度 %
wInfo.wk['0'] 一周预测
指数图片:http://mat1.gtimg.com/weather/2014gaiban/TB_shzs/zs(split_0/split_2中的key,取值对应接口中zs_xx).png
"""
import requests as rq
import json
import data_json.zhishu as zhishu
url = "http://weather.gtimg.cn/city/01010101.js"
r = rq.get(url).text
j = r.split("=")[1].lstrip().rstrip(";")
result = json.loads(j)
print("温度:", result["sk_tp"])
print("风向:", zhishu.windDir[int(result["sk_wd"])], end="")
print(result["sk_wp"] + "级")
# for i in result:
# print(i, result[i])
# url = "http://weather.gtimg.cn/aqi/01010101.json"
#
# r = rq.get(url).text
#
# ws = []
# ws = r.split("[", 1)[1][:-2]
#
# ws = ws.replace("null", '\"\"')
# ws = ws.replace("}, {", "}*{")
#
# ws = ws.split("*")
#
# print(ws.__doc__)
#
# # w = json.loads(r.split("[", 1)[1][:-3])
# #
# for i in range(len(ws)):
# w = json.loads(ws[i])
# print(w)
| yiyisf/get_link_python | getWeather.py | getWeather.py | py | 1,382 | python | en | code | 0 | github-code | 90 |
40519840968 | from numpy.core.fromnumeric import size
from numpy.lib.utils import info
from bs4 import BeautifulSoup
from PIL import Image, ImageTk
import plotly.express as ex
from tkinter import Canvas, messagebox
from tkinter.font import Font
from tkinter import ttk
from threading import *
import tkinter as tk
import pandas as pd
import requests
import yfinance
import threading
import json
import sys
import os
from plotly.subplots import make_subplots
import plotly.graph_objects as go
def start():
company = symbol_input.get("1.0",'end-1c')
def information():
tab1.delete('all')
print(var.get())
text_1 = tab1.create_text(100,20,text="please wait....",font=("Arial",15))
if var.get() == "IND":
data = requests.get("https://www.screener.in/company/{0}/".format(company.upper()))
if data.status_code == 200:
soup = BeautifulSoup(data.content,"lxml")
inf = soup.find_all("div",{"class":"sub show-more-box about"})
txt = ""
for el in inf:
txt += el.get_text()
txt = txt.split()
else:
messagebox.showerror("Error","Cannot collect data at this time. Try again later.")
tk.Canvas.delete(tab1,text_1)
tab1.create_text(20,20,anchor="nw",text="try again",font=("Arial,15"))
return
else:
data = yfinance.Ticker(company)
inf = data.info
df_inf = pd.DataFrame().from_dict(inf,orient='index')
try:
desc = df_inf.iloc[3]
except:
messagebox.showerror("Error","Cannot collect data at this time. Try again later.")
tk.Canvas.delete(tab1,text_1)
tab1.create_text(20,20,anchor="nw",text="try again",font=("Arial,15"))
return
with open("file.txt","w") as f:
f.write(desc[0])
f.close()
txt = []
with open("file.txt","r") as f:
for line in f:
for word in line.split():
txt.append(word)
f.close()
with open("real.txt","w") as ff:
ff.write("")
ff.close()
with open("real.txt","a") as ff:
ind = 0
for i in txt:
if ind%15==0:
ff.write("\n")
ff.write(i+" ")
ind+=1
ff.close()
del ff
tk.Canvas.delete(tab1,text_1)
f = open("real.txt","r")
tab1.create_text(20,0,anchor="nw",text=f.read(),font=("Century Schoolbook",15))
f.close()
def sentiment_analysis():
tab2.delete('all')
if var.get() == "IND":
tab2.create_text(20,20,anchor="nw",text="This service is not available for NSE stocks",font=("Arial",15))
return
def tv_win():
tv_page = tk.Toplevel(root)
tv_page.title("table view")
df = pd.DataFrame(table)
tv = ttk.Treeview(tv_page)
cols = []
for i in df.columns:
cols.append(i)
tv['columns']= tuple(cols)
tv['show'] = 'headings'
tv.column('#0', width=0, stretch=tk.NO)
tv.column('0', anchor=tk.CENTER, width=80)
tv.column('1', anchor=tk.CENTER, width=80)
tv.column('2', anchor=tk.CENTER, width=80)
tv.column('3', anchor=tk.CENTER, width=80)
tv.column('4', anchor=tk.CENTER, width=80)
tv.column('5', anchor=tk.CENTER, width=80)
tv.heading('#0', text='', anchor=tk.CENTER)
tv.heading('0', text=cols[0], anchor=tk.CENTER)
tv.heading('1', text=cols[1], anchor=tk.CENTER)
tv.heading('2', text=cols[2], anchor=tk.CENTER)
tv.heading('3', text=cols[3], anchor=tk.CENTER)
tv.heading('4', text=cols[4], anchor=tk.CENTER)
tv.heading('5', text=cols[5], anchor=tk.CENTER)
for i in range(len(df)):
t = df.iloc[i].values.tolist()
tv.insert('','end',text='l1',values=(t))
tv.pack()
text_2 = tab2.create_text(50,10,anchor='nw',text="please wait....",font=("Arial",15))
stock = {"name":company,"feature":"sentiment analysis"}
r = requests.post("http://127.0.0.1:8000/",data=stock)
data = json.loads(r.content)
news = data['news']
table = data['table']
sentiment = data['mean sentiment']
tk.Canvas.delete(tab2, text_2)
w = 10
for i in news:
tab2.create_text(20,w,text="➡"+i[0] + " ( "+i[2]+" )",anchor='nw',font=("Century Schoolbook",15))
w += 40
tab2.create_text(50,220,anchor="nw",text="News Sentiment Analysis: "+ str(sentiment['Mean Sentiment'][0]),font=("Century Schoolbook",15),fill='red')
tv_btn = tk.Button(tab2,text="click here for full details",command=tv_win,width=20,height =2,font=("Arial",10),border=10,borderwidth=5)
tv_btn.pack(pady=80,padx=10,side=tk.LEFT)
def stock_prediction():
tab3.delete('all')
global img
text_3 = tab3.create_text(50,10,anchor='nw',text="please wait....",font=("Arial",15))
stock = {"name":company,"feature":"stock prediction","exchange":var.get()}
r = requests.post("http://127.0.0.1:8000/",data=stock)
r = r.content
r = json.loads(r)
#check if dictionay is empty
if not r:
Canvas.delete(tab3,text_3)
tab3.create_text(50,10,anchor='nw',text="No data available",font=("Arial",15))
return
tk.Canvas.delete(tab3,text_3)
text_3 = tab3.create_text(50,10,anchor='nw',text="Loading graphs....",font=("Arial",15))
if "saved_graphs" not in os.listdir():
os.mkdir("saved_graphs")
df = pd.DataFrame(r["prediction"])
df1 = pd.DataFrame(r["output"])
fig = make_subplots(rows=1,cols=2,subplot_titles=("15 days Prediction","Current values with predicted values"))
fig.add_trace(go.Scatter(x=df.index,y=df.values.reshape(df.shape[0])),row=1,col=1)
fig.add_trace(go.Scatter(x=df1.index,y=df1.values.reshape(df1.shape[0])),row=1,col=2)
fig.update_layout(height=600,width=1000)
fig.update_xaxes(title_text="Time")
fig.update_yaxes(title_text="Prediction")
fig.write_image("saved_graphs/"+"plot.png")
img = ImageTk.PhotoImage(Image.open("saved_graphs/"+"plot.png"))
tk.Canvas.delete(tab3,text_3)
tab3.create_image(0,0,image=img,anchor="nw")
vbar = tk.Scrollbar(TabControl,orient=tk.VERTICAL)
vbar.pack(anchor='e',fill='y',expand=True)
vbar.config(command=tab3.yview)
tab3.config(yscrollcommand=vbar.set)
tab3.config(scrollregion=tab3.bbox("all"))
print("done")
t1 = Thread(target=information)
t2 = Thread(target=sentiment_analysis)
t3 = Thread(target=stock_prediction)
t1.start()
t2.start()
t3.start()
def restart_program():
"""Restarts the current program.
Note: this function does not return. Any cleanup action (like
saving data) must be done before calling this function."""
python = sys.executable
os.execl(python, python, * sys.argv)
root = tk.Tk()
root.title("Stock Projection")
root.geometry('700x400')
root.config(bg='#429ef5')
var = tk.StringVar()
menubar = tk.Menu(root)
option = tk.Menu(menubar,tearoff=0)
option.add_command(label="Restart",command=restart_program)
option.add_command(label="Exit",command=root.quit)
menubar.add_cascade(label="options",menu=option)
root.config(menu=menubar)
name = tk.Label(root,text="Stock Projection")
name.configure(font=('Arial',20),fg='Black',bg='lightgrey',width=200)
name.pack()
label1 = tk.Label(root,text="Enter the stock symbol",border=5)
label1.pack(side='left',anchor='n',pady=20)
r1 = tk.Radiobutton(root,text="US",variable=var,value="US")
r2 = tk.Radiobutton(root,text="IND",variable=var,value="IND")
r1.place(x=10,y=100)
r2.place(x=10,y=130)
symbol_input = tk.Text(root)
symbol_input.config(height=1,width=13,border=3,borderwidth=5)
symbol_input.place(x=5,y=160)
btn = tk.Button(root,text='okay',command=start,width=10,border=5,borderwidth=5,bg='lightgreen')
btn.place(x=10,y=190)
TabControl = ttk.Notebook(root)
tab1 = tk.Canvas(TabControl,bg='lightgrey')
tab2 = tk.Canvas(TabControl,bg='lightgrey')
tab3 = tk.Canvas(TabControl,bg='lightgrey')
TabControl.add(tab1,text='Description')
TabControl.add(tab2,text='News Sentiment Analysis')
TabControl.add(tab3,text='Stock Price Forecast')
TabControl.pack(side=tk.LEFT,expand=True,fill='both')
root.mainloop()
| Gajendra-Sonare/myproject | application/main.py | main.py | py | 8,837 | python | en | code | 1 | github-code | 90 |
18153061329 | import sys
read = sys.stdin.read
readlines = sys.stdin.readlines
def main():
n = int(input())
r = 0
for i1 in range(1, n+1):
for i2 in range(1, n+1):
if i1 * i2 >= n:
break
else:
r += 1
print(r)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02548/s081204397.py | s081204397.py | py | 314 | python | en | code | 0 | github-code | 90 |
28554839327 | """Test the exam_env module.
all import and structural testing is done in this module.
"""
from datacenter.model.email import Email
from utilities import create_test_session
try:
import datacenter
except ImportError:
pass
def test_00(capsys):
"""Test module import."""
assert datacenter
assert datacenter.model
out, err = capsys.readouterr()
assert out == ""
assert err == ""
def test_01_email(capsys):
"""Test FooBar."""
session = create_test_session()
email = datacenter.model.Email()
email.email_address = "Email Address"
session.add(email)
session.commit()
assert repr(email) == "email_address:Email Address."
out, err = capsys.readouterr()
assert out == ""
assert err == ""
| htlweiz/datacenter | tests/test_01_email.py | test_01_email.py | py | 760 | python | en | code | 0 | github-code | 90 |
72071244138 | # -*- coding: utf-8 -*-
# UTF-8 encoding when using korean
"""통과"""
import sys
from collections import Counter
def sysinput():
return sys.stdin.readline().rstrip()
sysprint = sys.stdout.write
n, m = map(int, sysinput().split())
events = Counter()
for a in range(m):
person_event = list(map(int, sysinput().split()))[1:]
for e in person_event:
events[e] += 1
common_event = events.most_common()
#print(common_event)
#print(len(common_event))
for i in range(len(common_event)-1):
if common_event[i][1] != common_event[i+1][1]:
common_event = common_event[:i+1]
break
result_list = list(map(lambda x: x[0], common_event))
result_list.sort(reverse=True)
result = ''
for b in result_list:
result += '{} '.format(b)
print(result.strip()) | dig04214/python-algorithm | challenge/7/7_1.py | 7_1.py | py | 750 | python | en | code | 0 | github-code | 90 |
5844226499 | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.cm as cm
import numpy as np
import cv2, os
import glob, collections
dataset_path = '/root/ffabi_shared_folder/datasets/_original_datasets/synthia/SYNTHIA-SF/'
sample = "0000000"
def depth_converter(depth):
R = depth[:, :, 0]
G = depth[:, :, 1]
B = depth[:, :, 2]
values = (R + G * 2 ** 8 + B * 2 ** 16) / (2 ** 24 - 1)
# values = np.array(values, dtype = np.float32)
return values
def equalize_depth_values(depth_image_1d, cut = 0.4, amin = None, amax = None):
depth = depth_image_1d
depth[depth < 0.99] -= np.amin(depth_image_1d)
if amax is None:
amax = np.amax(depth[depth < cut])
depth[depth < 0.99] /= amax
depth[depth > 0.99] = 1
depth **=.33
return depth
def to_bgra(depth, invalid_value = 0.999):
jet_img = cm.jet(depth)[..., :3]
jet_img *= 255
return jet_img
# depth = mpimg.imread(dataset_path + 'SEQ1/DepthLeft/'+sample+'.png')
depth = cv2.imread(dataset_path + 'SEQ1/DepthLeft/'+sample+'.png')
bgra = to_bgra(equalize_depth_values(depth_converter(depth)))
cv2.imwrite("/root/test.png", bgra) | ffabi/Project_TDK | dataset_scripts/depth_test.py | depth_test.py | py | 1,166 | python | en | code | 1 | github-code | 90 |
18555826959 | N = int(input())
red = sorted([list(map(int,input().split())) for i in range(N)], key=lambda x: x[0])[::-1]
blue = [list(map(int,input().split())) for i in range(N)]
ans = 0
for i in range(N):
min_ = 10 ** 9 + 7
ind = -1
for j in range(N):
if red[i][0] <= blue[j][0] and red[i][1] <= blue[j][1] and blue[j][1] < min_:
ind = j
min_ = blue[j][1]
if ind != -1:
blue[ind][0] = -1
ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03409/s148314709.py | s148314709.py | py | 459 | python | en | code | 0 | github-code | 90 |
26811176031 | import sys
from heapq import heappop, heappush
input = sys.stdin.readline
n = int(input())
heap = []
for _ in range(n):
num = int(input())
if num == 0:
try:
print(heappop(heap)[1])
except:
print(0)
else:
heappush(heap, (abs(num), num)) | cyw320712/problem-solving | Baekjoon/python/11286.py | 11286.py | py | 299 | python | en | code | 3 | github-code | 90 |
18113623239 | n, k = map(int, input().split())
l = list(int(input()) for i in range(n))
left = max(l)-1; right = sum(l)
while left+1 < right: #最大値と合計値の間のどこかに考えるべき値が存在する
mid = (left + right) // 2
cnt = 1; cur = 0 #初期化
for a in l:
if mid < cur + a:
cur = a
cnt += 1
else:
cur += a
if cnt <= k:
right = mid
else:
left = mid
print(right)
| Aasthaengg/IBMdataset | Python_codes/p02270/s200522536.py | s200522536.py | py | 470 | python | en | code | 0 | github-code | 90 |
27541685878 | import numpy as np
from util.read_aln import ReadSeqs, ReadSeqs2, Die
import pickle
import glob
import sys
input_aln_path = '/Users/ali_nayeem/Projects/MSA/example/bb3_release'
output_aln_path = '../../output/5obj-3iter'
export_file_dir = '../out'
data_list = ['BB11005'] #, 'BB11018', 'BB11033', 'BB11020',
# 'BB12001', 'BB12013', 'BB12022', 'BB12035', 'BB12044',
# 'BB20001', 'BB20010', 'BB20022', 'BB20033', 'BB20041',
# 'BB30002', 'BB30008', 'BB30015', 'BB30022',
# 'BB40001', 'BB40013', 'BB40025', 'BB40038', 'BB40048',
# 'BB50001', 'BB50005', 'BB50010', 'BB50016']
method_list = ['muscle-ext-output'] #, 'decom-muscle-ext-output', 'decom-muscle-output', 'decom-output']
no_of_aln = {'muscle-ext-output': 50, 'decom-muscle-ext-output':100, 'decom-muscle-output':100, 'decom-output':100}
aln_count = 0
for method in method_list:
aln_count += no_of_aln[method]
for data in data_list:
outfile = open(export_file_dir + '/' + data + '.pickle', 'wb')
pickle.dump(aln_count, outfile)
input_path = input_aln_path + '/RV' + data[2:4] + '/' + data + '.tfa'
labels, seqs = ReadSeqs2(input_path)
for method in method_list:
for aln_i in range(no_of_aln[method]):
path_pattern = output_aln_path + '/' + method + '/' + data + '/' + str(aln_i) + '*.aln'
aln_path = glob.glob(path_pattern)[0]
aln = ReadSeqs(aln_path)
feature = []
for Label in labels:
if Label not in aln.keys():
Die("Not found in alignment: " + Label)
#print(AlnSeqs[Label])
#feature.extend([ord(c) for c in aln[Label]])
feature.extend(list(bytes(aln[Label], 'ascii')))
#print(feature[0:30])
print(str(aln_i) + ': ' + str(len(feature)))
pickle.dump(np.array(feature), outfile)
#sys.exit(1)
outfile.close()
| ali-nayeem/pasta-ext-scripts | py-analysis/src/encode.py | encode.py | py | 1,936 | python | en | code | 0 | github-code | 90 |
23135476484 | # -*- coding: utf-8 -*-
import re
import black
import isort
import nbformat
from .errors import NotPythonNotebookError
_ISORT_SETTINGS = {
"multi_line_output": 3,
"include_trailing_comma": True,
"force_grid_wrap": 0,
"combine_as_imports": True,
"line_length": 88,
}
_BLACK_SETTINGS = {"line_length": 88, "fast": True}
_MAGIC_LINE_REGEX = re.compile(r"^(?=([ \t]*[!%].*$))", re.MULTILINE)
_REMOVE_ME = "# temporarily commented out by nbblack #"
def comment_magic(contents):
return re.sub(_MAGIC_LINE_REGEX, _REMOVE_ME, contents)
def uncomment_magic(contents):
return re.sub("^{0}".format(_REMOVE_ME), "", contents, re.MULTILINE)
def isort_cell(cell):
cell.source = isort.SortImports(
file_contents=cell.source, setting_overrides=_ISORT_SETTINGS
).output.strip()
def blacken_cell(cell):
cell.source = comment_magic(cell.source)
try:
blackened = black.format_file_contents(cell.source, **_BLACK_SETTINGS)
except (SyntaxError, black.NothingChanged):
pass
else:
cell.source = blackened.strip()
cell.source = uncomment_magic(cell.source)
def blacken_notebook_contents(source):
notebook = nbformat.reads(source, as_version=4)
if not is_python_notebook(notebook):
raise NotPythonNotebookError()
for cell in notebook.cells:
if cell.cell_type == "code":
isort_cell(cell)
blacken_cell(cell)
return notebook
def is_python_notebook(notebook):
try:
return notebook.metadata.kernelspec["language"] == "python"
except KeyError:
return False
| mcflugen/nbblack | nbblack/nbblack.py | nbblack.py | py | 1,612 | python | en | code | 2 | github-code | 90 |
24340771335 | import sys
from datetime import datetime
import scipy.io as sio
import torch
import numpy as np
import wandb
from pybmi.utils import TrainingUtils
sys.path.append("kalmannet")
from kalman_net import KalmanNetNN
from pipeline_kf import Pipeline_KF
torch.set_default_dtype(torch.float32)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
from utils.utils import compute_correlation
# Load KF data
today = datetime.today()
now = datetime.now()
strToday = today.strftime("%m_%d_%y")
strNow = now.strftime("%H_%M_%S")
strTime = strToday + "__" + strNow
# Fixed params
monkey = "Joker"
date = "2022-09-21"
run_train = "Run-002"
run_test = None #'Run-003'
binsize = 32
fingers = [2, 4]
is_refit = False
train_test_split = 0.8
norm_x_movavg_bins = None
pred_type = "pv"
run_reg_kf = True
lrate = 0.0002973485
wdecay = 0.0000172
batch_size = 48
conv_size = 70
normalize_x = False
normalize_y = False
h1_size = 510
h2_size = 1560
hidden_dim = 845
num_model = 2
kf_model = sio.loadmat(f"Z:/Data/Monkeys/{monkey}/{date}/decodeParamsKF{num_model}.mat")
good_chans_SBP = kf_model["chansSbp"]
good_chans_SBP_0idx = [x - 1 for x in good_chans_SBP][0]
num_states = (
len(fingers) if pred_type == "v" else 2 * len(fingers)
) # 2 if velocity only, 4 if pos+vel
# Include bias
num_states += 1
A = torch.tensor(kf_model["xpcA"])[:num_states, :num_states, 1]
C = torch.tensor(kf_model["xpcC"])[: len(good_chans_SBP_0idx), :num_states, 1]
[loader_train, loader_val] = TrainingUtils.load_training_data(
monkey,
date,
run_train,
run_test=run_test,
good_chans_0idx=good_chans_SBP_0idx,
isrefit=is_refit,
fingers=fingers,
binsize=binsize,
batch_size=batch_size,
binshist=conv_size,
normalize_x=normalize_x,
normalize_y=normalize_y,
norm_x_movavg_bins=norm_x_movavg_bins,
train_test_split=train_test_split, # only used if run_test is None
pred_type="pv",
return_norm_params=False,
)
# sys_model.InitSequence(x_0, P_0)
knet_model = KalmanNetNN(
binsize, reg_kf=run_reg_kf, h1_size=h1_size, h2_size=h2_size, hidden_dim=hidden_dim
)
knet_model.build(A, C)
pipeline = Pipeline_KF(
"models",
f"KNet_fingflexion_{strTime}",
good_chans_SBP_0idx,
pred_type="pv",
)
# sys_model.InitSequence(x_0, P_0)
KNet_model = KalmanNetNN(binsize)
KNet_model.build(A, C)
pipeline.set_model(KNet_model)
pipeline.set_training_params(
n_epochs=2,
learning_rate=1e-3,
weight_decay=0,
)
# wandb.init(
# project="kalman-net",
# entity="lhcubillos",
# name=f"test_{strTime}",
# config={},
# )
val_loss = pipeline.train(
loader_train, loader_val, compute_val_every=10, stop_at_iterations=5
)
torch.save(KNet_model, f"models/KNet_fingflexion_{strTime}.mdl")
training_outputs = {
"val_loss": val_loss,
}
training_inputs = {
"pred_type": pred_type,
}
TrainingUtils.save_nn_decoder(
monkey,
date,
KNet_model,
None,
binsize,
fingers,
good_chans_SBP,
training_inputs,
training_outputs,
fname_prefix="KNet",
)
print("hola")
| JapmanGill/BIOMEDE-517-Project | main_kalmannet.py | main_kalmannet.py | py | 3,064 | python | en | code | 0 | github-code | 90 |
18369485279 | import bisect
N = int(input())
A = [int(input()) for _ in range(N)]
dp = [-1] * N
dp[N-1] = A[0]
ans = 0
for i in range(1, N):
target_index = bisect.bisect_left(dp, A[i])
dp[target_index-1] = A[i]
print(N - dp.count(-1)) | Aasthaengg/IBMdataset | Python_codes/p02973/s301734009.py | s301734009.py | py | 231 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.