seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71943943336 | # coding=utf-8
'''
this file shows transfer file through 3 machine.
A->B->C
B is our python script machine
'''
import os
import subprocess
from contextlib import contextmanager
def io_hash_fullpath(filename):
pass
def solution1():
'''
一个示范 A B C 都是 linux 机器
'''
scp_path_a = 'user@ip:/path'
scp_path_c = 'user@ip:/path'
local_path_b = ''
files = []
ret_code = -1
for f in files:
arg1 = os.path.join(scp_path_a, f)
arg2 = os.path.join(local_path_b, f)
cmds = ['scp', arg1, arg2]
ret_code = subprocess.call(cmds)
if ret_code !=0:
break
if ret_code == 0:
for f in files:
arg1 = os.path.join(local_path_b, f)
arg2 = os.path.join(scp_path_c, f)
cmds = ['scp', arg1, arg2]
ret_code = subprocess.call(cmds)
import paramiko
import errno
import stat
import itertools
import tempfile
@contextmanager
def open_remote(host):
'''
paramiko 的连接
'''
'''
:param host: (ip,port,username,<password>)
:return: SSHClient instance paramiko.sftp_client.SFTPClient
'''
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(*host)
sftpv = client.open_sftp()
try:
yield sftpv
finally:
sftpv.close()
client.close()
def solution2(hosts, files):
'''
A -> B -> C 中如果 B 是 Windows,而且我们在 B 中运行这个脚本
from = A
to = C
这是用 paramiko 的,也有使用 fabric 的用法
put <files_from> to <files_to>
:param hosts: [(ip1,port1,user1), (ip2,port2,user2)]
:param files: [(file1_from,file1_to),(file2_from,file2_to),...]
:return:
'''
with open_remote(hosts[0]) as sftp_from:
with open_remote(hosts[1]) as sftp_to:
for file_from, file_to in files:
# test exists
try:
sftp_from.lstat(file_from)
except IOError as er:
print('[!] error {} of file {}'.format(er, file_from))
continue
tmpf = tempfile.mkstemp('_ssh')
os.close(tmpf[0])
tmpf = tmpf[1]
sftp_from.get(file_from, tmpf)
m = io_hash_fullpath(tmpf)
print('[+] get {} to {} md5={}'.format(file_from, tmpf, m))
# sftp_to.open(t)
# sftp_to.lstat(t)
# both return Errno 2 No such file
# if exists, remove it
try:
sftp_to.lstat(file_to)
sftp_to.remove(file_to)
except IOError as er:
if not (er.errno == errno.ENOENT):
raise er
sftp_to.put(tmpf, file_to)
print('[+] put to {rh}:{rp}'.format(rh=hosts[1], rp=file_to))
# add execute mode
with open(tmpf) as fr_tmp:
header = fr_tmp.read(4)
if header[1::] == 'ELF':
st = sftp_to.lstat(file_to)
sftp_to.chmod(file_to, st.st_mode | stat.S_IEXEC)
print('')
os.remove(tmpf)
from fabric import Connection as SSHConnection
@contextmanager
def connect(host):
'''
:param host: ("root@1.1.1.1", "password" or "", <port>)
:return:
'''
cnt_kwargs = {}
if host[1]:
cnt_kwargs.update({"password": host[1]})
else:
cnt_kwargs.update({"key_filename": "<ssh key>", "look_for_keys": False,})
cnt_kwargs.update({
"banner_timeout":60, # prevent :SSHException: Error reading SSH protocol banner
"compress":True,
})
cnn = SSHConnection(host=host[0], port=host[2],
connect_kwargs=cnt_kwargs,
connect_timeout=60,
# forward_agent=True # if not, rsync always error Host key verification fail
)
try:
yield cnn
finally:
cnn.close()
def solution3(hosts, files):
with connect(hosts[0]) as cnn_from:
with connect(hosts[1]) as cnn_to:
for file_from, file_to in files:
# test exists
try:
cnn_from.lstat(file_from)
except IOError as er:
print('[!] error {} of file {}'.format(er, file_from))
continue
tmpf = tempfile.mkstemp('_ssh')
os.close(tmpf[0])
tmpf = tmpf[1]
try:
rst = cnn_from.get(remote=file_from,
local = tmpf)
m = io_hash_fullpath(tmpf)
print('[+] get {} to {} md5={}'.format(file_from, tmpf, m))
# 不知道这个 mode 匹配在 Windows 和 Linux 之间会怎样
rst = cnn_to.put(local = tmpf,
remote = file_to,
preserve_mode=True)
print('[+] put to {rh}:{rp}'.format(rh=hosts[1], rp=file_to))
except Exception as err:
print("err {} of {}->{}".format(err, file_from, file_to))
finally:
os.remove(tmpf)
| fooofei/python.pieces | ssh_file_transfer.py | ssh_file_transfer.py | py | 5,455 | python | en | code | 1 | github-code | 90 |
5813641124 | num_hrs = input('Enter Hours: ')
num_rate = input('Enter Rate: ')
try:
hrs = float(num_hrs)
rate = float(num_rate)
except:
print(input('\nError, Please enter numeric input'))
quit() #to stop running another lines
bill = hrs*rate
print('Pay: ',bill)
input('Press ENTER to exit >>')
| bicc99/python_for_everybody_exercises | Exercise3.2 - try and except.py | Exercise3.2 - try and except.py | py | 314 | python | en | code | 0 | github-code | 90 |
72211435818 | # 가장 긴 증가하는 부분 수열의 시간 복잡도를 O(NlogN)으로 만들 수 있는 아이디어는 아래와 같다.
# 먼저 부분수열이 들어갈 리스트를 하나 생성한다. 이 리스트에는 오름차순 순으로 입력된 수열의 요소가 들어간다.
# 입력된 수열을 앞에서부터 하나씩 탐색하고, 현재 수가 부분수열의 최대값보다 크다면 부분수열의 맨 끝에 추가한다.
# 현재 수가 부분수열의 맨 끝 수보다 작다면, 이진 탐색을 통해 오름차순으로 들어갈 수 있는 위치를 찾고 그 위치값을 대신한다.
# 부분수열의 오른쪽 끝 최댓값보다 작은 수를 오름차순으로 들어갈 부분에 덮어씌움으로써
# 부분수열의 길이 변화 없이 작은 수에 대해 처리할 수 있다.
# 따라서, 이 최적화법은 가장 긴 증가하는 부분 수열의 길이를 구하는데 사용가능하다.
# 하지만, 가장 긴 증가하는 부분 수열을 찾는 것은 불가능한 알고리즘.
from bisect import bisect_left
n = int(input())
array = list(map(int, input().split()))
# 부분 수열
dp = [array[0]]
# 입력된 수열을 앞에서부터 하나씩 탐색
for num in array[1:]:
# 현재 수가 부분수열의 최대값보다 크다면 부분수열의 맨 끝에 추가한다.
if num > dp[-1]:
dp.append(num)
# 현재 수가 부분수열의 맨 끝 수보다 작다면
else:
# 이진 탐색을 통해 오름차순으로 들어갈 수 있는 위치를 찾고 그 위치값을 대신한다.
index = bisect_left(dp, num)
dp[index] = num
print(len(dp))
"""
(예시)
수열 = [30, 10, 20, 35, 25, 40, 60]
1. 부분 수열 = [30]
2. 10은 부분 수열의 최댓값인 30보다 작으므로 이진 탐색을 통해 부분 수열에서 오름차순으로
10이 들어갈 인덱스를 찾는다. 인덱스의 결과는 0. 그럼 부분수열[0]의 위치를 10으로 덮어씌운다.
부분 수열 = [10]
3. 20은 부분 수열의 최댓값인 10보다 크므로 부분 수열의 끝에 추가한다.
부분 수열 = [10, 20]
4. 35는 부분 수열의 최댓값인 20보다 크므로 부분 수열의 끝에 추가한다.
부분 수열 = [10, 20, 35]
5. 25는 부분 수열의 최댓값인 35보다 작으므로 이진 탐색을 통해 부분 수열에서 오름차순으로
25가 들어갈 인덱스를 찾는다. 인덱스의 결과는 2. 그럼 부분수열[2]의 위치를 25로 덮어씌운다.
부분 수열 = [10, 20, 25]
6. 40는 부분 수열의 최댓값인 25보다 크므로 부분 수열의 끝에 추가한다.
부분 수열 = [10, 20, 25, 40]
7. 60는 부분 수열의 최댓값인 40보다 크므로 부분 수열의 끝에 추가한다.
부분 수열 = [10, 20, 25, 40, 60]
따라서, 가장 긴 증가하는 부분 수열의 길이는 5.
문제
수열 A가 주어졌을 때, 가장 긴 증가하는 부분 수열을 구하는 프로그램을 작성하시오.
예를 들어, 수열 A = {10, 20, 10, 30, 20, 50} 인 경우에 가장 긴 증가하는 부분 수열은 A = {10, 20, 10, 30, 20, 50} 이고,
길이는 4이다.
입력
첫째 줄에 수열 A의 크기 N (1 ≤ N ≤ 1,000,000)이 주어진다.
둘째 줄에는 수열 A를 이루고 있는 Ai가 주어진다. (1 ≤ Ai ≤ 1,000,000)
출력
첫째 줄에 수열 A의 가장 긴 증가하는 부분 수열의 길이를 출력한다.
예제 입력 1
6
10 20 10 30 20 50
예제 출력 1
4
""" | khyup0629/Algorithm | 이진 탐색 (Binary search)/가장 긴 증가하는 부분 수열 2.py | 가장 긴 증가하는 부분 수열 2.py | py | 3,538 | python | ko | code | 3 | github-code | 90 |
2642478132 | import logging
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import telegram
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
import settings
from collections import defaultdict
from win_unicode_console import enable
enable()
dict_calc = defaultdict(str)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO,
filename='bot.log'
)
def start_bot(bot, update):
print(update.message.chat.id)
custom_keyboard = [['/calculator', '/Planets'], ['/Towns', 'Esc']]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
bot.send_message(chat_id = update.message.chat.id, text = "Нажмите интересующую вас опцию или Esc для выхода из меню", reply_markup = reply_markup)
logging.info('Пользователь {} нажал /start'.format(update.message.chat.username))
def chat(bot, update):
text = update.message.text
chat_id = update.message.chat.id
print(text)
if text in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '+', '-', '*', '÷']:
dict_calc[chat_id] += text
elif text == '=':
clc = dict_calc.get(chat_id)
clc = clc.replace('*' , ' * ')
clc = clc.replace('÷' , ' ÷ ')
clc = clc.replace('+' , ' + ')
clc = clc.replace('-' , ' - ')
clc = clc.split()
if clc[1] == '*':
result = int(clc[0]) * int(clc[2])
bot.send_message(chat_id, 'Результат вычисления: {} * {} = {}'.format(clc[0], clc[2], result))
del dict_calc[chat_id]
elif clc[1] == '+':
result = int(clc[0]) + int(clc[2])
bot.send_message(chat_id, 'Результат вычисления: {} + {} = {}'.format(clc[0], clc[2], result))
del dict_calc[chat_id]
elif clc[1] == '-':
result = int(clc[0]) - int(clc[2])
bot.send_message(chat_id, 'Результат вычисления: {} - {} = {}'.format(clc[0], clc[2], result))
del dict_calc[chat_id]
elif clc[1] == '÷':
try:
result = int(clc[0]) / int(clc[2])
bot.send_message(chat_id, 'Результат вычисления: {} / {} = {}'.format(clc[0], clc[2], result))
del dict_calc[chat_id]
except (ZeroDivisionError):
bot.send_message(chat_id, 'Ошибка деления на ноль. Проверьте корректность ввода данных!')
del dict_calc[chat_id]
else:
bot.send_message(chat_id, 'Проверьте корректность ввода данных для расчет!')
del dict_calc[chat_id]
elif text == 'Esc':
clear_keyboards(bot,chat_id)
else:
update.message.reply_text(text)
def wordcount_bot(bot, update, args):
text= update.message.text
print(text)
user_text = text.split()
user_text = [item for item in user_text if item not in ['?','!', '.', '.', ',', ':', ';', '*', '"']]
update.message.reply_text(len(user_text)-1)
print(len(user_text)-1)
def calculator_bot(bot, update, args):
print(update.message.chat.id)
custom_keyboard = [['1', '2', '3', '÷'], ['4', '5', '6', '*'], ['7', '8', '9', '-'], ['/menu', '0', '+', '=']]
reply_markup = telegram.ReplyKeyboardMarkup(custom_keyboard)
bot.send_message(chat_id = update.message.chat.id, text = "Используйте кнопки калькулятора для расчета!", reply_markup = reply_markup)
def clear_keyboards(bot, userssss_id):
reply_markup = telegram.ReplyKeyboardRemove(remove_keyboard=True)
bot.send_message(chat_id = userssss_id, text = "Для выхода в основное меню наберите команду: /menu", reply_markup = reply_markup)
def main():
updtr = Updater(settings.TELEGRAM_API_KEY)
updtr.dispatcher.add_handler(CommandHandler('menu', start_bot))
updtr.dispatcher.add_handler(MessageHandler(Filters.text, chat))
updtr.dispatcher.add_handler(CommandHandler('wordcount', wordcount_bot, pass_args=True))
updtr.dispatcher.add_handler(CommandHandler('calculator', calculator_bot, pass_args=True))
updtr.start_polling()
updtr.idle()
if __name__=='__main__':
logging.info('Bot started')
main()
| farstas/Les1_Reposit | bot.py | bot.py | py | 4,082 | python | ru | code | 0 | github-code | 90 |
18502712909 | import sys
def input(): return sys.stdin.readline().rstrip()
def solve(l, m):
if l < m:
return l * 2 + m
else:
return l + m * 2
def main():
N, K = map(int, input().split())
x = tuple(map(int, input().split()))
right_x = [i for i in x if i >= 0][:K]
left_x = [-i for i in x if i < 0][-K:]
left_len = len(left_x)
right_len = len(right_x)
if right_len < K:
right_x += [10 ** 9 + 1] * (K - right_len)
if left_len < K:
left_x = [10 ** 9 + 1] * (K - left_len) + left_x
right_x = [0] + right_x
left_x += [0]
ans = 10 ** 9
for i in range(K + 1):
tmp_ans = solve(left_x[i], right_x[i])
if tmp_ans < ans:
ans = tmp_ans
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03274/s377118514.py | s377118514.py | py | 784 | python | en | code | 0 | github-code | 90 |
43416801419 | from argparse import ArgumentParser
import logging
import regex
import sys
try:
from .utils import setup_logging
except ImportError:
from utils import setup_logging
# Detect escaped characters
# allow one of the brackets missing
escaped_re = regex.compile(r"\[?\[[abcdef\d]+\]\]|\[\[[abcdef\d]+\]\]?")
def unescape(match, strict=False):
''' Convert hex values to unicode string '''
hexvalue = match.captures()[0].strip('[]')
logging.debug(f"Unescaping: '{hexvalue}'")
try:
b = bytes.fromhex(hexvalue).decode('utf-8')
except ValueError:
if strict:
# Reraise the exception in strict mode
raise ValueError("Invalid escaped sequence." \
+ " Translator may have copied it wrong.")
else:
# Return invalid hex sequences as empty value
return ''
return b
def main():
parser = ArgumentParser()
parser.add_argument('--debug', action='store_true', help='Debug mode')
parser.add_argument('-s','--strict', action='store_true',
help='Strict mode. By default invalid escaped sequences will be omitted.')
args = parser.parse_args()
setup_logging(args)
for line in sys.stdin:
# Find splits and matches inside the sentence
escaped = list(escaped_re.finditer(line.strip()))
splits = list(escaped_re.splititer(line.strip()))
output = ''
# Join splits with unescaped matches
for i, split in enumerate(splits):
if i != len(splits)-1:
output += split + unescape(escaped[i], args.strict)
else:
output += split
print(output)
if __name__ == "__main__":
main()
| ZJaume/escape-unk | src/escape_unk/unescape_unk.py | unescape_unk.py | py | 1,730 | python | en | code | 0 | github-code | 90 |
13563098038 | import streamlit as st
import pickle as pk
import pandas as pd
import requests
movie_list=pk.load(open("moviesdata.pkl","rb"))
movies=pd.DataFrame(movie_list)
simularity=pk.load(open("simularity.pkl","rb"))
simularitydf=pd.DataFrame(simularity)
st.header("Movie recommender")
movie=st.selectbox(label="Choose the movie name",options=movies["title"].values)
def fetch(movie):
response=requests.get("https://api.themoviedb.org/3/movie/{}?api_key=872b4f07900bffe963fa08e318659170&language=en-US".format(movie))
data=response.json()
return "http://image.tmdb.org/t/p/w185"+data["poster_path"]
def recommend(movie):
movie_id = movies[movies["title"] == movie].index[0]
rec=[]
pos=[]
sim_movies = sorted(list(enumerate(simularity[movie_id])), reverse=True, key=lambda x: x[1])[1:11]
for i in sim_movies:
pos.append(fetch(movies.iloc[i[0]]["id"]))
rec.append((movies.iloc[i[0]]["title"]))
return rec,pos
if st.button("Recommend"):
name,poster=recommend(movie)
col1, col2, col3= st.columns(3)
col4, col5,col6=st.columns(3)
col7, col8, col9=st.columns(3)
col10 ,col11,col12=st.columns(3)
with col1:
st.text(name[0])
st.image(poster[0])
with col2:
st.text(name[1])
st.image(poster[1])
with col3:
st.text(name[2])
st.image(poster[2])
with col4:
st.text(name[3])
st.image(poster[3])
with col5:
st.text(name[4])
st.image(poster[4])
with col6:
st.text(name[5])
st.image(poster[5])
with col7:
st.text(name[6])
st.image(poster[6])
with col8:
st.text(name[7])
st.image(poster[7])
with col9:
st.text(name[8])
st.image(poster[8])
with col10:
st.text(name[9])
st.image(poster[9]) | saitej-a/Movie-recommender | app.py | app.py | py | 1,909 | python | en | code | 0 | github-code | 90 |
18162137389 | import networkx as nx
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
G = nx.Graph()
n, m = map(int, readline().split())
if m == 0:
print(1)
sys.exit()
for i in range(m):
a, b = map(int, readline().split())
G.add_edge(a, b)
largest_cc = max(nx.connected_components(G), key=len)
print(len(largest_cc))
| Aasthaengg/IBMdataset | Python_codes/p02573/s330090347.py | s330090347.py | py | 390 | python | en | code | 0 | github-code | 90 |
19719870506 | import pygame
from constants import SQUARE_SIZE, STATUS_BAR_HEIGHT, STATUS_COLOR, WIDTH, HEIGHT, ROWS, COLS
from board import Board
from network import Network
from sys import argv
from time import sleep
def clicked_pos(position):
return int(position[0]/(WIDTH/ROWS)), int(position[1]/(HEIGHT/COLS))
def flip_coords(x, y):
# Function to flip coords before sending data to opponent
# e.g (0,0) -> (7,7), (2,4) -> (5,3)
return abs(7 - x), abs(7 - y)
def flip_coords_list(list):
flipped = []
for item in range(len(list)):
flipped.append(flip_coords(list[item][0], list[item][1]))
return flipped
def turn(x, y, piece_clicked):
global my_turn
available_kills = board.available_kills()
print("kill list: ", available_kills)
available_super_kills = board.available_super_kills()
print("kill list: ", available_kills)
if not piece_clicked: # When user is going to click a pawn
if board.is_piece(x, y) and board.board_pieces[y][x].is_player_piece():
if board.is_super(x, y):
if len(available_super_kills) == 0: # No kills are available
# Piece clicked
moves = board.check_super_moves(x, y) # moves - tuple containing available moves - x,y coords
board.color_moves(moves)
else: # kill is available
if (x, y) in available_super_kills:
moves = available_super_kills[(x, y)] # moves - tuple containing available moves - x,y coords
board.color_moves(moves)
else:
return None # There are available kills but wrong pawn was clicked
else:
if len(available_kills) == 0: # No kills are available
# Piece clicked
moves = board.check_moves(x, y) # moves - tuple containing available moves - x,y coords
board.color_moves(moves)
else: # kill is available
if (x, y) in available_kills:
moves = available_kills[(x, y)] # moves - tuple containing available moves - x,y coords
board.color_moves(moves)
else:
return None # There are available kills but wrong pawn was clicked
return {'coords': (x, y), 'moves': moves, 'init_coords': (x, y), 'to_kill': []}
else:
# Piece not clicked
return None
else:
if board.is_piece(x, y) and len(piece_clicked['to_kill']) == 0: # when user hasnt killed any pieces in the current turn
if piece_clicked['coords'][0] == x and piece_clicked['coords'][1] == y:
# Piece unclicked
board.color_board()
return None
elif board.is_piece(x, y) and board.board_pieces[y][x].is_player_piece():
# Change to other piece
return turn(x, y, None)
else:
# User clicked opponent pawn
board.color_board()
return None
elif board.is_piece(x, y) == False:
if board.is_super(*piece_clicked['coords']):
if len(available_super_kills) == 0: # player doesnt have possibility of killing
if (x, y) in piece_clicked['moves']:
board.color_board()
board.move_piece(*piece_clicked['coords'], x, y)
network.send((*flip_coords(*piece_clicked['coords']), *flip_coords(x, y)))
my_turn = False
return None
else:
board.color_moves(piece_clicked['moves'])
return piece_clicked
else: #player has to kill piece
if (x, y) in available_super_kills[piece_clicked['coords']]:
x_killed = (piece_clicked['coords'][0] + x)/2
y_killed = (piece_clicked['coords'][1] + y)/2
board.kill_piece([[int(x_killed), int(y_killed)]])
print("x_killed", x_killed, "y_killed", y_killed)
piece_clicked['to_kill'].append([int(x_killed),int(y_killed)])
board.move_piece(*piece_clicked['coords'], x, y)
available_super_kills = board.available_super_kills()
if (x, y) in available_super_kills:
piece_clicked['coords'] = (x, y)
piece_clicked['moves'] = available_super_kills[(x, y)]
board.color_moves(piece_clicked['moves'])
return piece_clicked
else:
network.send((*flip_coords(*piece_clicked['init_coords']), *flip_coords(x, y), flip_coords_list(piece_clicked['to_kill']))) #added killed pieces coords (list) in send data
my_turn = False
return None
else:
board.color_moves(piece_clicked['moves'])
return piece_clicked
else:
if len(available_kills) == 0: # player doesnt have possibility of killing
if (x, y) in piece_clicked['moves']:
board.color_board()
board.move_piece(*piece_clicked['coords'], x, y)
network.send((*flip_coords(*piece_clicked['coords']), *flip_coords(x, y)))
my_turn = False
return None
else:
board.color_moves(piece_clicked['moves'])
return piece_clicked
else: #player has to kill piece
if (x, y) in available_kills[piece_clicked['coords']]:
x_killed = (piece_clicked['coords'][0] + x)/2
y_killed = (piece_clicked['coords'][1] + y)/2
board.kill_piece([[int(x_killed), int(y_killed)]])
print("x_killed", x_killed, "y_killed", y_killed)
piece_clicked['to_kill'].append([int(x_killed),int(y_killed)])
board.move_piece(*piece_clicked['coords'], x, y)
available_kills = board.available_kills()
if (x, y) in available_kills:
piece_clicked['coords'] = (x, y)
piece_clicked['moves'] = available_kills[(x, y)]
board.color_moves(piece_clicked['moves'])
return piece_clicked
else:
network.send((*flip_coords(*piece_clicked['init_coords']), *flip_coords(x, y), flip_coords_list(piece_clicked['to_kill']))) #added killed pieces coords (list) in send data
my_turn = False
return None
else:
board.color_moves(piece_clicked['moves'])
return piece_clicked
else:
# Not Moved
board.color_moves(piece_clicked['moves'])
return piece_clicked
def status_display(status):
font = pygame.font.Font('freesansbold.ttf', 12)
textSurface = font.render(status, True, STATUS_COLOR)
textRect = textSurface.get_rect()
textRect.center = (WIDTH // 2, HEIGHT+STATUS_BAR_HEIGHT//2)
WINDOW.blit(textSurface, textRect)
def main():
global board, my_turn
my_turn = network.connect()
if my_turn:
_ = network.recive()
status = ''
send = False
run = True
piece_clicked = None
while run:
clock.tick(FPS)
if board.pieces_player_len == 0: # Opponent win
run = False
status = 'Opponent wins'
elif board.pieces_opponent_len == 0: # Player win
run = False
status = 'You win'
if board.available_kills_len > 0:
status = 'You have to kill!'
board.draw_board(WINDOW)
board.draw_pieces(WINDOW)
if(piece_clicked):
pygame.draw.circle(WINDOW, (255,255,255), (piece_clicked['coords'][0]*SQUARE_SIZE+SQUARE_SIZE/2,
piece_clicked['coords'][1]*SQUARE_SIZE+SQUARE_SIZE/2), 10)
status_display(status)
pygame.display.update()
if my_turn == False:
status = 'Opponent\'s turn!'
data_recive = network.recive()
if data_recive is not None and len(data_recive) > 0:
board.move_piece(data_recive[0], data_recive[1], data_recive[2], data_recive[3])
if len(data_recive) > 4:
board.kill_piece(data_recive[4])
my_turn = True
else: status = 'Your turn!'
for event in pygame.event.get():
if event.type == pygame.QUIT:
network.disconnect()
pygame.quit()
quit()
if event.type == pygame.MOUSEBUTTONDOWN:
if my_turn == True:
pos = pygame.mouse.get_pos()
if(pos[1] < HEIGHT):
x, y = clicked_pos(pos)
print("X: "+ str(x) + ' Y: ' + str(y))
piece_clicked = turn(x, y, piece_clicked)
print('Piece clicked: '+str(piece_clicked))
else:
pass
sleep(5)
network.disconnect()
pygame.quit()
quit()
if __name__ == '__main__':
'''
consts for window
'''
FPS = 15
programIcon = pygame.image.load('assets/icon.png')
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT+STATUS_BAR_HEIGHT))
pygame.display.set_caption('Warcaby')
pygame.display.set_icon(programIcon)
pygame.font.init()
'''
Game global variables
'''
clock = pygame.time.Clock()
board = Board()
my_turn = None
network = Network(argv) # argv - arguments passed in console (ip address in our case)
main()
| AdithiKashain/Checkers-Game | client.py | client.py | py | 10,327 | python | en | code | 1 | github-code | 90 |
18040821079 | from sys import stdin
def main():
#入力
readline=stdin.readline
a,b,x=map(int,readline().split())
if a==0:
print(b//x+1)
else:
print(b//x-(a-1)//x)
if __name__=="__main__":
main() | Aasthaengg/IBMdataset | Python_codes/p03861/s140063147.py | s140063147.py | py | 223 | python | en | code | 0 | github-code | 90 |
21490641087 | #!/usr/bin/env python3
import sys
import random
import sh
from visidata import *
from .git import *
from .merge import GitMerge
from .blame import GitBlame
from .diff import DifferSheet
__version__ = 'saul.pw/vgit v0.3pre'
option('vgit_show_ignored', False, '')
GitSheet.addCommand('x', 'git-exec', 'i = input("git ", type="git"); git(*i.split())', 'execute arbitrary git command')
GitSheet.addCommand('B', 'git-branches', 'vd.push(gitBranchesSheet).reload()', 'push branches sheet')
GitSheet.addCommand('gO', 'git-options', 'vd.push(gitOptionsSheet).reload()', 'push sheet of git options')
GitSheet.addCommand('', 'git-push', 'git("push")', 'git push')
GitSheet.addCommand('A', 'git-abort', 'abortWhatever()', 'abort the current in-progress action')
GitSheet.addCommand('H', 'git-log', 'vd.push(LogSheet(branch+"_log", source=branch))', 'push log of current branch')
GitSheet.addCommand('T', 'git-stashes', 'vd.push(gitStashesSheet).reload()', 'push stashes sheet')
GitSheet.addCommand('R', 'git-remotes', 'vd.push(gitRemotesSheet).reload()', 'push remotes sheet')
GitSheet.addCommand('', 'git-stash-save', 'git("stash", "save")', 'stash uncommitted changes')
GitSheet.addCommand('', 'git-stash-pop', 'git("stash", "pop")', 'apply the most recent stashed change and drop it')
class GitStashes(GitSheet):
columns = [
ColumnItem('stashid', 0),
ColumnItem('start_branch', 1),
ColumnItem('sha1', 2),
ColumnItem('msg', 3),
]
def reload(self):
self.rows = []
for line in git_lines('stash', 'list'):
stashid, ctx, rest = line[:-1].split(': ', 2)
starting_branch = ctx[len('WIP on '):]
sha1, msg = rest.split(' ', 1)
self.rows.append([stashid, starting_branch, sha1, msg])
GitStashes.addCommand('a', 'git-stash-apply', 'git("stash", "apply", cursorRow[0])', 'apply this stashed change without removing'),
GitStashes.addCommand('', 'git-stash-pop-row', 'git("stash", "pop", cursorRow[0])', 'apply this stashed change and drop it'),
GitStashes.addCommand('d', 'git-stash-drop-row', 'git("stash", "drop", cursorRow[0])', 'drop this stashed change'),
GitStashes.addCommand('b', 'git-stash-branch', 'git("stash", "branch", input("create branch from stash named: "), cursorRow[0])', 'create branch from stash'),
GitStashes.addCommand(ENTER, 'dive-row', 'vd.push(HunksSheet(cursorRow[0]+"_diffs", source=sheet, "stash", "show", "--no-color", "--patch", cursorRow[0]))', 'show this stashed change'),
class GitUndo:
def __init__(self, *args):
self.cmdargs = args
def __enter__(self):
return self
def __exit__(self, exctype, exc, tb):
out = loggit_all(*self.cmdargs)
def randomBranchName():
return ''.join(string.ascii_lowercase[random.randint(0, 25)] for i in range(10))
# rowdef: (commit_hash, refnames, author, author_date, body, notes)
class LogSheet(GitSheet):
# corresponding to rowdef
GIT_LOG_FORMAT = ['%H', '%D', '%an <%ae>', '%ai', '%B', '%N']
columns = [
ColumnItem('commitid', 0, width=8),
ColumnItem('refnames', 1, width=12),
Column('message', getter=lambda c,r: r[4], setter=lambda c,r,v: c.sheet.git('commit', '--amend', '--no-edit', '--quiet', '--message', v), width=50),
Column('author', getter=lambda c,r: r[2], setter=lambda c,r,v: c.sheet.git('commit', '--amend', '--no-edit', '--quiet', '--author', v)),
Column('author_date', type=date, getter=lambda c,r:r[3], setter=lambda c,r,v: c.sheet.git('commit', '--amend', '--no-edit', '--quiet', '--date', v)),
Column('notes', getter=lambda c,r: r[5], setter=lambda c,r,v: c.sheet.git('notes', 'add', '--force', '--message', v, r[0])),
]
colorizers = [RowColorizer(5, 'cyan', lambda s,c,r,v: r and not s.inRemoteBranch(r[0]))]
def amendPrevious(self, targethash):
'amend targethash with current index, then rebase newer commits on top'
prevBranch = loggit_all('rev-parse', '--symbolic-full-name', '--abbrev-ref', 'HEAD').strip()
ret = loggit_all('commit', '-m', 'MERGE '+targethash) # commit index to viewed branch
newChanges = loggit_all('rev-parse', 'HEAD').strip()
ret += loggit_all('stash', 'save', '--keep-index') # stash everything else
with GitUndo('stash', 'pop'):
tmpBranch = randomBranchName()
ret += loggit_all('checkout', '-b', tmpBranch) # create/switch to tmp branch
with GitUndo('checkout', prevBranch), GitUndo('branch', '-D', tmpBranch):
ret += loggit_all('reset', '--hard', targethash) # tmpbranch now at targethash
ret += loggit_all('cherry-pick', '-n', newChanges) # pick new change from original branch
ret += loggit_all('commit', '--amend', '--no-edit') # recommit to fix targethash (which will change)
ret += loggit_all('rebase', '--onto', tmpBranch, 'HEAD@{1}', prevBranch) # replay the rest
return ret.splitlines()
@functools.lru_cache()
def inRemoteBranch(self, commitid):
return git_all('branch', '-r', '--contains', commitid)
@asyncthread
def reload(self):
self.rows = []
for record in git_iter('\0', 'log', '--no-color', '-z', '--pretty=format:%s' % '%x1f'.join(self.GIT_LOG_FORMAT), self.source):
self.rows.append(record.split('\x1f'))
LogSheet.addCommand(ENTER, 'dive-row', 'vd.push(getCommitSheet(cursorRow[0][:7], sheet, cursorRow[0]))', 'show this commit'),
#LogSheet.addCommand('', 'git-squash-selected', '', 'squash selected commits'),
LogSheet.addCommand('x', 'git-pick', 'git("cherry-pick", cursorRow[0])', 'cherry-pick this commit onto current branch'),
LogSheet.addCommand('gx', 'git-pick-selected', '', 'cherry-pick selected commits onto current branch'),
LogSheet.addCommand('C', 'git-commit-amend', 'confirm("amend this commit with the index? "); amendPrevious(cursorRow[0]); reload()', 'amend this commit with changes in the index'),
LogSheet.addCommand('r', 'git-reset-here', 'git("update-ref", "refs/heads/"+source, cursorRow[0])', 'reset this branch to this commit'),
class GitBranches(GitSheet):
columns = [
Column('branch', getter=lambda c,r: r[1][8:] if r[1].startswith('remotes/') else r[1], width=20),
ColumnItem('head_commitid', 2, width=0),
ColumnItem('tracking', 3),
ColumnItem('upstream', 6),
ColumnItem('merge_base', 7, width=20),
ColumnItem('extra', 4, width=0),
ColumnItem('head_commitmsg', 5, width=50),
]
colorizers = [
RowColorizer(10, 'underline', lambda s,c,r,v: r[0]),
RowColorizer(10, 'cyan', lambda s,c,r,v: not r[1].startswith('remotes/')),
]
nKeys = 1
def __init__(self):
super().__init__('branches')
@asyncthread
def reload(self):
self.rows = []
for line in git_lines('branch', '--list', '-vv', '--no-color', '--all'):
if '->' in line:
continue
m = re.match(r'''(\*?)\s+
(\S+)\s+
(\w+)\s+
(?:\[
([^\s\]:]+):?
\s*(.*?)
\])?
\s*(.*)''', line, re.VERBOSE)
if m:
current, localbranch, refid, remotebranch, extra, msg = m.groups()
merge_base = git_all("show-branch", "--merge-base", localbranch, gitStatusSheet.branch, _ok_code=[0,1]).strip()
merge_name = git_all("name-rev", "--name-only", merge_base).strip() if merge_base else ''
self.rows.append(list(m.groups()) + [gitStatusSheet.getBranchStatuses().get(localbranch)] + [merge_name])
GitBranches.addCommand('a', 'git-branch-create', 'git("branch", input("create branch: ", type="branch"))', 'create a new branch off the current checkout'),
GitBranches.addCommand('d', 'git-branch-delete', 'git("branch", "--delete", cursorRow[1])', 'delete this branch'),
GitBranches.addCommand('e', 'git-branch-rename', 'git("branch", "-v", "--move", cursorRow[1], editCell(0))', 'rename this branch'),
GitBranches.addCommand('c', 'git-checkout', 'git("checkout", cursorRow[1])', 'checkout this branch'),
GitBranches.addCommand('m', 'git-branch-merge', 'git("merge", cursorRow[1])', 'merge this branch into the current branch'),
GitBranches.addCommand(ENTER, 'dive-row', 'vd.push(LogSheet(cursorRow[1]+"_log", cursorRow[1]))', 'push log of this branch'),
def getHunksSheet(parent, *files):
return HunksSheet('hunks', parent, 'diff',
'--diff-algorithm=' + options.git_diff_algo,
'--patch',
'--inter-hunk-context=2', '-U1',
'--no-color',
'--no-prefix', *[gf.filename for gf in files])
def getStagedHunksSheet(parent, *files):
return HunksSheet('staged_hunks', parent, 'diff', '--cached',
'--diff-algorithm=' + options.git_diff_algo,
'--patch',
'--inter-hunk-context=2', '-U1',
'--no-color',
'--no-prefix', *[gf.filename for gf in files])
def getCommitSheet(name, parent, *refids):
return HunksSheet(name, parent, 'show',
'--diff-algorithm=' + options.git_diff_algo,
'--patch',
'--inter-hunk-context=2', '-U1',
'--no-color',
'--no-prefix', *refids)
# source is arguments to git()
class HunksSheet(GitSheet):
columns = [
ColumnItem('origfn', 0, width=0),
ColumnItem('filename', 1),
ColumnItem('context', 2),
ColumnItem('leftlinenum', 3),
ColumnItem('leftcount', 4),
ColumnItem('rightlinenum', 5),
ColumnItem('rightcount', 6),
]
def __init__(self, name, parent, *git_args):
super().__init__(name, source=parent)
self.git_args = git_args
def reload(self):
def _parseStartCount(s):
sc = s.split(',')
if len(sc) == 2:
return sc
if len(sc) == 1:
return sc[0], 1
self.rows = []
leftfn = ''
rightfn = ''
header_lines = None
diff_started = False
for line in git_lines(*self.git_args):
if line.startswith('diff'):
diff_started = True
continue
if not diff_started:
continue
if line.startswith('---'):
header_lines = [line] # new file
leftfn = line[4:]
elif line.startswith('+++'):
header_lines.append(line)
rightfn = line[4:]
elif line.startswith('@@'):
header_lines.append(line)
_, linenums, context = line.split('@@')
leftlinenums, rightlinenums = linenums.split()
leftstart, leftcount = _parseStartCount(leftlinenums[1:])
rightstart, rightcount = _parseStartCount(rightlinenums[1:])
self.rows.append((leftfn, rightfn, context, int(leftstart), int(leftcount), int(rightstart), int(rightcount), header_lines))
header_lines = header_lines[:2] # keep file context
elif line[0] in ' +-':
self.rows[-1][-1].append(line)
HunksSheet.addCommand(ENTER, 'dive-row', 'vd.push(HunkViewer(sheet, cursorRow))', 'view the diff for this hunks'),
HunksSheet.addCommand('g^J', 'git-diff-selected', 'vd.push(HunkViewer(sheet, *(selectedRows or rows)))', 'view the diffs for the selected hunks (or all hunks)'),
HunksSheet.addCommand('V', 'git-view-patch', 'vd.push(TextSheet("diff", "\\n".join(cursorRow[7])))', 'view the raw patch for this hunk'),
#HunksSheet.addCommand('gV', 'git-view-patch-selected', '', 'view the raw patch for selected/all hunks'),
HunksSheet.addCommand('a', 'git-apply-hunk', 'git_apply(cursorRow, "--cached")', 'apply this hunk to the index'),
#HunksSheet.addCommand('r', 'git-reverse-hunk', 'git_apply(cursorRow, "--reverse")', 'undo this hunk'),
#HunksSheet.bindkey('d', 'git-reverse-hunk')
class HunkViewer(GitSheet):
def __init__(self, srchunks, *hunks):
super().__init__('hunk', sources=hunks)
self.srchunks = srchunks
self.columns = [
ColumnItem('1', 1, width=vd().windowWidth//2-1),
ColumnItem('2', 2, width=vd().windowWidth//2-1),
]
self.addColorizer(RowColorizer(4, None, HunkViewer.colorDiffRow))
def reload(self):
if not self.sources:
self.vd.remove(self)
return
fn, _, context, linenum, _, _, _, patchlines = self.sources[0]
self.name = '%s:%s' % (fn, linenum)
self.rows = []
nextDelIdx = None
for line in patchlines[3:]: # diff without the patch headers
typech = line[0]
line = line[1:]
if typech == '-':
self.rows.append([typech, line, None])
if nextDelIdx is None:
nextDelIdx = len(self.rows)-1
elif typech == '+':
if nextDelIdx is not None:
if nextDelIdx < len(self.rows):
self.rows[nextDelIdx][2] = line
nextDelIdx += 1
continue
self.rows.append([typech, None, line])
nextDelIdx = None
elif typech == ' ':
self.rows.append([typech, line, line])
nextDelIdx = None
else:
continue # header
def colorDiffRow(self, c, row, v):
if row and row[1] != row[2]:
if row[1] is None:
return 'green' # addition
elif row[2] is None:
return 'red' # deletion
else:
return 'yellow' # difference
HunkViewer.addCommand('2', 'git-apply-hunk', 'srchunks.git_apply(sources.pop(0), "--cached"); reload()', 'apply this hunk to the index and move to the next hunk'),
#HunkViewer.addCommand('1', 'git-remove-hunk', 'git_apply(sources.pop(0), "--reverse")', 'remove this hunk from the diff'),
HunkViewer.addCommand(ENTER, 'git-skip-hunk', 'sources.pop(0); reload()', 'move to the next hunk without applying this hunk'),
HunkViewer.addCommand('d', 'delete-line', 'source[7].pop(cursorRow[3]); reload()', 'delete a line from the patch'),
class GitGrep(GitSheet):
columns = [
ColumnItem('filename', 0),
ColumnItem('linenum', 1),
ColumnItem('line', 2),
]
def __init__(self, regex):
super().__init__(regex, regex)
def reload(self):
self.rows = []
for line in git_lines('grep', '--no-color', '-z', '--line-number', '--ignore-case', self.source):
self.rows.append((line.split('\0')))
GitGrep.addCommand(ENTER, 'dive-row', 'vd.push(TextSheet(cursorRow[0], open(cursorRow[0]))).cursorRowIndex = int(cursorRow[1])-1', 'go to this match')
class GitOptions(GitSheet):
CONFIG_CONTEXTS = ('local', 'local', 'global', 'system')
def __init__(self):
super().__init__('git config')
self.columns = [Column('option', getter=lambda c,r: r[0])]
for i, ctx in enumerate(self.CONFIG_CONTEXTS[1:]):
self.columns.append(Column(ctx, getter=lambda c,r, i=i: r[1][i], setter=self.config_setter(ctx)))
self.nKeys = 1
def config_setter(self, ctx):
def setter(r, v):
self.git('config', '--'+ctx, r[0], v)
return setter
def reload(self):
opts = {}
for i, ctx in enumerate(self.CONFIG_CONTEXTS[1:]):
try:
for line in git_iter('\0', 'config', '--list', '--'+ctx, '-z'):
if line:
k, v = line.splitlines()
if k not in opts:
opts[k] = [None, None, None]
opts[k][i] = v
except:
pass # exceptionCaught()
self.rows = sorted(list(opts.items()))
GitOptions.addCommand('d', 'git-config-unset', 'git("config", "--unset", "--"+CONFIG_CONTEXTS[cursorColIndex], cursorRow[0])', 'unset this config value'),
GitOptions.addCommand('gd', 'git-config-unset-selected', 'for r in selectedRows: git("config", "--unset", "--"+CONFIG_CONTEXTS[cursorColIndex], r[0])', 'unset selected config values'),
#GitOptions.addCommand('e', 'i=(cursorVisibleColIndex or 1); visibleCols[i].setValues(sheet, [cursorRow], editCell(i)); sheet.cursorRowIndex += 1', 'edit this option'),
#GitOptions.addCommand('ge', 'i=(cursorVisibleColIndex or 1); visibleCols[i].setValues(sheet, selectedRows, input("set selected to: ", value=cursorValue))', 'edit this option for all selected rows'),
GitOptions.addCommand('a', 'git-config-add', 'git("config", "--add", "--"+CONFIG_CONTEXTS[cursorColIndex], input("option to add: "), "added")', 'add new option'),
# how to incorporate fetch/push/pull?
class GitRemotes(GitSheet):
def __init__(self, **kwargs):
super().__init__('remotes', **kwargs)
self.columns=[
Column('remote', getter=lambda c,r: r[0], setter=lambda c,r,v: c.sheet.git('remote', 'rename', r[0], v)),
Column('url', getter=lambda c,r: r[1], setter=lambda c,r,v: c.sheet.git('remote', 'set-url', r[0], v)),
Column('type', getter=lambda c,r: r[2]),
]
def reload(self):
self.rows = []
for line in git_lines('remote', '-v', 'show'):
name, url, paren_type = line.split()
self.rows.append((name, url, paren_type[1:-1]))
GitRemotes.addCommand('d', 'git-remote-delete', 'git("remote", "rm", cursorRow[0])', 'delete remote'),
GitRemotes.addCommand('a', 'git-remote-add', 'git("remote", "add", input("new remote name: ", type="remote"), input("url: ", type="url"))', 'add new remote')
gitBranchesSheet = GitBranches()
gitOptionsSheet = GitOptions()
gitStashesSheet = GitStashes('stashes')
gitRemotesSheet = GitRemotes()
# options.wrap = False
# os.chdir(fn)
| shalevy1/visidata | plugins/vgit/vgit.py | vgit.py | py | 18,112 | python | en | code | null | github-code | 90 |
18032135069 | import sys
def input(): return sys.stdin.readline().strip()
def resolve():
s1, s2, t1, t2 = map(int, input().split())
l = []
x = t1 - s1
y = t2 - s2
ans='R'*x+'U'*y+'L'*x+'D'*y
ans+='D'+'R'*(x+1)+'U'*(y+1)+'L'
ans+='U'+'L'*(x+1)+'D'*(y+1)+'R'
print(ans)
resolve() | Aasthaengg/IBMdataset | Python_codes/p03836/s357785816.py | s357785816.py | py | 299 | python | en | code | 0 | github-code | 90 |
35577633013 | #This code imports the necessary modules.
import random
import os
from PIL import Image, ImageOps, ExifTags, ImageFile
import time
import playsound
from RandFunct import random_number
from RandFunct2 import random_number2
snglst = ["F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen1.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen2.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen3.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen4.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen5.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen6.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen7.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen8.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen9.wav",
"F:\\OriginalAudio\\Songs\\SongsC\\Bonegenb\\BoneGen10.wav"]
#snglst = ["C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop.mp3", "C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop2.mp3", "C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop3.mp3", "C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop4.mp3", "C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop5.mp3", "C:\\Users\\mysti\\Media_Files\\Dreams\\DreamHop6.mp3",]
#srchstr = 'C:\\Users\\mysti\\Media_Files\\Dreams'
#srchstr = "C:\\Users\\mysti\\Coding\\Rachel\\static"
#srchstr = "F:\BlackAndWhitePhotography"
srchstr = "F:\\Visual\\VisualArt\\PrintsThatAreUseful\\jpg"
contenttot= []
contentdat = {}
for subdir, dirs, files in os.walk(srchstr):
for file in files:
filepath = subdir + os.sep + file
if filepath.endswith(".jpg") or filepath.endswith(".JPG"):
tim = os.path.getctime(filepath)
contentdat[filepath] = tim
contenttot.append(filepath)
finlst = []
for ctr in range(30):
totlen = len(contenttot)
x2 = random_number(totlen)
finlst.append(contenttot[x2])
contenttot.remove(contenttot[x2])
for elem in finlst:
#img = Image.open(elem)
#img = ImageOps.exif_transpose(img)
#img.show()
#time.sleep(10)
#climg = Image.close(img)
with Image.open(elem) as img:
img = ImageOps.exif_transpose(img)
img.show()
time.sleep(5)
Image.Image.close(img)
# image operations here.
#os.system("taskkill /im firefox.exe /f")
#os.system("taskkill /im chrome.exe /f")
#os.system("killall -9 'Google Chrome'")
#jkbx = random.randrange(6)
#songch = snglst[jkbx]
#playsound.playsound(songch, True)
## THE GHOST OF THE SHADOW ##
| Mystified131/Rachel | ImageSlideShowByDreamb.py | ImageSlideShowByDreamb.py | py | 2,537 | python | en | code | 2 | github-code | 90 |
17299249796 | #coding=gbk
#===========================================
#作者:许刚
#日期: 13-7-31
#时间: 上午8:32
#To change this template use File | Settings | File Templates.
#===========================================
import ui.core._core
from ui.control.Base import XObject, XDraw
from ui.core.Loader import RCLoader
import cStringIO
XImage = ui.core._core.XImage
class Layer(XImage, XObject):
def __init__(self, config):
XObject.__init__(self, config)
buf = RCLoader.Load(self.VStr("image"))
XImage.__init__(self, buf)
self.drawType = self.VInt('drawtype', 1)
def ReSizer(self):
if self.VBool('center'):
self.x += (self.parent.cx - self.cx) / 2
self.y += (self.parent.cy - self.cy) / 2
assert self.x > 0
assert self.y > 0
def Draw(self, dc):
if self.drawType == XDraw.Default:
XImage.DrawDefault(self, dc, self.x, self.y, self.cx, self.cy)
elif self.drawType == XDraw.ThreeInOneH:
XImage.DrawDefault(self, dc, self.x, self.y, self.cx, self.cy)
elif self.drawType == XDraw.ThreeInOneV:
XImage.DrawDefault(self, dc, self.x, self.y, self.cx, self.cy)
elif self.drawType == XDraw.FiveInOneH:
XImage.DrawDefault(self, dc, self.x, self.y, self.cx, self.cy)
elif self.drawType == XDraw.FiveInOneV:
XImage.DrawDefault(self, dc, self.x, self.y, self.cx, self.cy)
elif self.drawType == XDraw.NineInOne:
XImage.DrawNineInOne(self, dc, self.x, self.y, self.cx, self.cy) | xugangjava/PydUI | ui/darw/Canvas.py | Canvas.py | py | 1,573 | python | en | code | 2 | github-code | 90 |
23144052208 |
import pandas as pd
import requests
import numpy as np
import math
from datetime import timedelta
import re
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import pygsheets
data = requests.get('https://www.levels.fyi/js/salaryData.json').json()
df = pd.DataFrame(data)
# Remove columns that we don't need
df = df.drop(['cityid','rowNumber','dmaid'], axis=1)
df = df.replace("", np.nan)
#convert datatypes
num_cols = ['yearsofexperience','basesalary','bonus','stockgrantvalue',
'totalyearlycompensation','yearsatcompany']
df[num_cols] = df[num_cols].apply(pd.to_numeric)
#one record without a location, kick it out
df = df[df.location.notnull()]
#round up all of the years of experience even if it is 0.25 years
df['yearsofexperience'] = np.ceil(df.yearsofexperience)
df['yearsatcompany'] = np.ceil(df.yearsatcompany)
#remove records that fall in the top/bottom 95th/5th percentile on totalyearly compensation
#I do this to remove some of the submissions that say they are making $5 million a year or those that are next to nothing
df = df[df['totalyearlycompensation'].between(df['totalyearlycompensation']. \
quantile(.05),df['totalyearlycompensation'].quantile(.95))]
#remove records that are outside of the US. This definition is any location record that has 2 commas or more but keep remote workers
df = df[(df['location'].str.count(',') == 1) | (df['location'].str.contains('remote',flags=re.IGNORECASE, regex=True))]
#change timestampe to date
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['city'] = df['location'].str.split(",").str[0]
df['state'] = df['location'].str[-2:]
#strip any leading or trailing spaces
ob_cols = df.select_dtypes(include=['object']).columns.tolist()
for col in df[ob_cols]:
df[col] = df[col].str.strip()
#duplicates and fuzzy match company name clean up
company_dict = {'JP Morgan Chase':'JPMorgan Chase','JPMORGAN':'JPMorgan Chase','JP Morgan':'JPMorgan Chase','JPMorgan':'JPMorgan Chase','JP morgan':'JPMorgan Chase',
'Jp Morgan':'JPMorgan Chase','jp morgan':'JPMorgan Chase', 'Jp morgan chase':'JPMorgan Chase',
'Ford Motor':'Ford','Ford Motor Company':'Ford',
'Johnson and Johnson':'Johnson & Johnson',
'Juniper':'Juniper Networks','juniper':'Juniper Networks',
'HP':'HP Inc','Hewlett Packard Enterprise':'HPE',
'Hsbc':'HSBC',
'Amazon web services':'Amazon',
'Apple Inc.':'Apple',
'Bosch Global':'Bosch',
'Deloitte Advisory':'Deloitte','Deloitte Consulting':'Deloitte','Deloitte consulting':'Deloitte',
'DISH':'DISH Network','Dish Network':'DISH Network','Dish':'DISH Network',
'Disney Streaming Services':'Disney','The Walt Disney Company':'Disney',
'Epic':'Epic Systems',
'Ernst and Young':'Ernst & Young',
'Expedia Group':'Expedia',
'Qualcomm Inc':'Qualcomm',
'Raytheon Technologies':'Raytheon',
'MSFT':'Microsoft','Microsoft Corporation':'Microsoft','Msft':'Microsoft','microsoft corporation':'Microsoft',
'Snapchat':'Snap',
'Sony Interactive Entertainment':'Sony',
'Micron':'Micron Technology',
'Mckinsey & Company':'McKinsey',
'Jane Street':'Jane Street Capital',
'EPAM':'EPAM Systems',
'Costco Wholesale':'Costco',
'Akamai Technology':'Akamai','Akamai Technologies':'Akamai',
'Visa inc':'Visa',
'Wipro Limited':'Wipro',
'Zoominfo':'Zoom',
'Zillow Group':'Zillow'}
df['company'] = df['company'].map(company_dict).fillna(df['company'])
#once you have the final dataframe, now it is time to paste it into google sheets
pycred = pygsheets.authorize(service_file='/Users/paul.brown/Documents/Python/credentials.json')
#opening the gsheet and sheet you want to work with
ss = pycred.open_by_key('1CuQDfKALqxxKdYvsudhkRjXelriZktT7QxaDHQGFjeU')[0]
#overwrite what is in the sheet with your df
ss.set_dataframe(df,(1,1))
| browningtons/salaries | levels_fyi.py | levels_fyi.py | py | 3,894 | python | en | code | 0 | github-code | 90 |
42884100794 | import csv
with open ("rows.txt", "rt") as f:
reader = csv.reader(f, delimiter='\t', skipinitialspace=True)
lineDif = list()
for line in reader:
line = list(map(int, line))
# print(line)
diff = max(line) - min(line)
# print(diff)
lineDif.append(diff)
# print(lineDif)
print(lineDif)
controlSum = sum(lineDif)
print(controlSum)
| tsmaga/python-prework | task_1.py | task_1.py | py | 403 | python | en | code | 0 | github-code | 90 |
18570893649 | import sys
a,b,c,x=map(int,sys.stdin.read().split())
cnt=0
for aa in range(a+1):
for bb in range(b+1):
for cc in range(c+1):
if (aa*500+bb*100+cc*50) == x:
cnt+=1
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p03448/s824889976.py | s824889976.py | py | 195 | python | en | code | 0 | github-code | 90 |
18001148959 | N,W = map(int,input().split())
v1 = []
v2 = []
v3 = []
v4 = []
w1,v = map(int,input().split())
v1.append(v)
for i in range(N-1):
w,v = map(int,input().split())
if w == w1:
v1.append(v)
elif w == w1 + 1:
v2.append(v)
elif w == w1 + 2:
v3.append(v)
elif w == w1 + 3:
v4.append(v)
v1.sort(reverse=True)
v2.sort(reverse=True)
v3.sort(reverse=True)
v4.sort(reverse=True)
l1 = len(v1)
l2 = len(v2)
l3 = len(v3)
l4 = len(v4)
ans = 0
for i in range(l1+1):
for j in range(l2+1):
for k in range(l3+1):
for l in range(l4+1):
if i*w1 + j*(w1+1) + k*(w1+2) + l*(w1+3) <= W:
temp = sum(v1[:i]) + sum(v2[:j]) + sum(v3[:k]) + sum(v4[:l])
ans = max(ans,temp)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03732/s971698548.py | s971698548.py | py | 786 | python | en | code | 0 | github-code | 90 |
74203175016 | import os
import subprocess
import time
"""
# 概要
Chromecast用に動画データをコンバートする
# 使い方
1. src, dst フォルダを作る
2. src フォルダに変換したい動画を入れる
3. 本スクリプトを実行(引数不要)
# 注意事項
srcフォルダにあるファイルは問答無用で変換するので、
動画ファイル以外があるとエラーになると思う。
⇒ってか、そのくらいチェックする機構を作れよという…
"""
const_src_dir = './src'
const_dst_dir = './dst'
const_enc_param = '-c:v libx264 -crf 19 -c:a ac3'
# const_enc_param = '-c:v h264_qsv -c:a ac3'
def main_func():
in_file_name_list = os.listdir(const_src_dir)
in_file_full_name_list = [os.path.join(const_src_dir, x)
for x in in_file_name_list]
out_file_full_name_list = [os.path.join(const_dst_dir,
os.path.splitext(x)[0] + '.mp4')
for x in in_file_name_list]
for src, dst in zip(in_file_full_name_list, out_file_full_name_list):
cmd = 'ffmpeg.exe -i "{}" {} "{}" -y'.format(src, const_enc_param, dst)
print(cmd)
t0 = time.clock()
subprocess.call(cmd)
t1 = time.clock()
print("dt="+str(t1-t0)+"[s]")
if __name__ == '__main__':
main_func()
| toru-ver4/kanri | script/encode_for_tv.py | encode_for_tv.py | py | 1,354 | python | ja | code | 0 | github-code | 90 |
18287980969 | n,m=map(int,input().split())
l=[0]*(n+1)
miss=[0]*(n+1)
for _ in range(m):
p,s=input().split()
p=int(p)
if l[p]==0:
if s=='AC':
l[p]+=1
else:
miss[p]+=1
for i in range(1,n+1):
if l[i]==0 and miss[i]!=0:
miss[i]=0
a=l.count(1)
b=sum(miss)
print(a,b) | Aasthaengg/IBMdataset | Python_codes/p02802/s205450227.py | s205450227.py | py | 312 | python | en | code | 0 | github-code | 90 |
72574209578 | from typing import List
class Memento:
"""Класс, фиксирующий текущее состояние"""
def __init__(self, state: List[str]):
self.__state = state
def get_state(self) -> List[str]:
return self.__state[:]
class Backpack:
"""Собираемый рюкзак"""
def __init__(self):
self.__state: List[str] = ['base']
def add_a_thing(self, thing: str) -> None:
print(f"В рюкзак положили: {thing}")
self.__state.append(thing)
def create_memento(self):
return Memento(self.__state[:])
def set_memento(self, memento: Memento):
self.__state = memento.get_state()
def __str__(self):
return f"Текущее состояние рюкзака: {self.__state}"
class Student:
def __init__(self, backpack: Backpack):
self.backpack = backpack
self.backpack_states: List[Memento] = []
def add_thing_to_backpack(self, thing: str):
self.backpack_states.append(self.backpack.create_memento())
self.backpack.add_a_thing(thing)
def undo_add_thing(self):
if len(self.backpack_states) == 1:
self.backpack.set_memento(self.backpack_states[0])
print("Пицца вернулась в своё исходное состояние!")
print(self.backpack)
else:
print("Отмена предыдущего действия")
state = self.backpack_states.pop()
self.backpack.set_memento(state)
print(self.backpack)
if __name__ == "__main__":
backpack = Backpack()
student = Student(backpack)
print(backpack)
print("*" * 8 + "Добавляем вещи в рюкзак" + 8 * "*")
student.add_thing_to_backpack('учебник')
student.add_thing_to_backpack('ручки')
student.add_thing_to_backpack('тетрадь')
student.add_thing_to_backpack('сыр')
print(backpack)
print("*" * 4 + "Выкладываем вещи из рюкзака" + 4 * "*")
student.undo_add_thing()
student.undo_add_thing()
student.undo_add_thing()
student.undo_add_thing()
print("*" * 8 + "Добавляем вещи в рюкзак" + 8 * "*")
student.add_thing_to_backpack('карандаш')
student.add_thing_to_backpack('тетрадь')
print(backpack) | CrazyQWERTYlunch/software_architecture | Homework2/Memento.py | Memento.py | py | 2,388 | python | ru | code | 0 | github-code | 90 |
73820312937 | class Solution:
def findDiagonalOrder(self, matrix: List[List[int]]) -> List[int]:
if not matrix:
return []
# direction = [(-1, 1), (1, -1)]
row_size, col_size = len(matrix), len(matrix[0])
current = (0, 0)
result = []
direct = (-1, 1)
while current != (row_size - 1, col_size - 1):
row, col = current
result.append(matrix[row][col])
nxtr, nxtc = row + direct[0], col + direct[1]
if nxtr < 0 or nxtr >= row_size or nxtc < 0 or nxtc >= col_size:
direct = (-direct[0], -direct[1])
if nxtr < 0:
nxtr = 0
elif nxtr >= row_size:
nxtr = row_size - 1
nxtc = col + 1
if nxtc < 0:
nxtc = 0
elif nxtc >= col_size:
nxtc = col_size - 1
nxtr = row + 1
current = (nxtr, nxtc)
result.append(matrix[-1][-1])
return result
| HarrrrryLi/LeetCode | 498. Diagonal Traverse/Python 3/solution.py | solution.py | py | 1,023 | python | en | code | 0 | github-code | 90 |
36700323344 | #!/usr/bin/env python3
import rospy
from sensor_msgs.msg import Imu
if __name__ == '__main__':
rospy.init_node('dummy_imu')
pub = rospy.Publisher('/imu/data', Imu, queue_size=1)
msg = Imu()
msg.header.frame_id = 'imu'
msg.orientation.w = 1
r = rospy.Rate(100)
while not rospy.is_shutdown():
msg.header.stamp = rospy.Time.now()
pub.publish(msg)
r.sleep()
| MosHumanoid/bitbots_thmos_meta | bitbots_misc/bitbots_bringup/scripts/dummy_imu.py | dummy_imu.py | py | 410 | python | en | code | 3 | github-code | 90 |
5442361943 | """
Service class includes functionalities for implementing program features
"""
import copy
import random
from src.domain.entity import Book
class Service:
def __init__(self):
self.books = []
self.history = []
def add_book(self, book):
"""
Adds the book to the list of all books
:param book: The book data read from the console
:return: -
"""
new_list = copy.deepcopy(self.books)
self.history.append(new_list)
self.books.append(book)
def add_random_book(self):
"""
Adds a random book to the list of all books, containing data from a list
:return: -
"""
new_list = copy.deepcopy(self.books)
self.history.append(new_list)
done = False
while not done:
book = Book(random.choice(isbn_list), random.choice(title_list), random.choice(author_list))
double = False
for element in self.books:
if book.isbn == element.isbn or book.title == element.title:
double = True
if double is False:
self.books.append(book)
done = True
def filter_books(self, word):
new_list = copy.deepcopy(self.books)
self.history.append(new_list)
i = 0
while i < len(self.books):
first_word = self.books[i].title.split(' ')
if first_word[0] == word:
self.books.pop(i)
else:
i = i+1
def undo(self):
self.books = list(self.history[-1])
self.history.pop()
def __str__(self):
for book in self.books:
print(str(book))
isbn_list = ['978-0-1143-5752-8', '978-8-0580-1931-1', '978-1-8228-4963-3', '978-4-9091-3702-9', '978-7-3708-8495-9', '978-5-5665-9592-4', '978-6-8370-0740-5', '978-1-6209-0963-8', '978-2-3395-3601-6', '978-2-7173-8420-8', '978-7-1984-7358-8', '978-9-2190-6864-3', '978-2-1398-8216-0', '978-2-2069-7943-4', '978-0-4862-6402-8', '978-5-4977-2950-4', '978-4-1785-6555-4', '978-9-4782-9221-6', '978-8-8659-6895-6', '978-7-1713-8840-3', '978-7-6757-6745-2', '978-7-2777-5330-3', '978-8-5011-2126-4', '978-1-0733-4164-1', '978-0-1618-3222-1']
title_list = ['The Weeping Star', 'The Throne of the Trident', 'Eclipse of Ceres', 'Prince\'s Mask', 'The Solitary Wedding', 'Made of Fury', 'Resonant Light', 'Secret of the Mute Baker', 'Californian Gold', 'Sign of the Hollow Staircase', 'Plague of Blood', 'Something Gained', 'The Titan in the Sea', 'Secret of the Vanishing Man', 'Ladders of Love', 'The Town', 'Kaus', 'Cosmic Vortex', '2105: Omega', 'Queen of Capella', 'Lacy and Racy', 'The Dark Bow', 'Babylon Ascending', 'Copper Heart', 'A Man\'s Man', 'Mark of Winter', '2105: Alpha', 'Secret of the Andes']
author_list = ['Christiana Fitzpatrick', 'Sarah-Jayne Hodson', 'Lola-Mae Koch', 'Phoenix Murphy', 'Jason Mcfarlane', 'Perry Hollis', 'Jo Melia', 'Levi Mccray', 'Anna-Maria Santos', 'Aiden Norris', 'Rajan Avila', 'Justin Woods', 'Steve Ireland', 'Athena Tillman', 'Lilli O\'Neill', 'Beatrice Ahmed', 'Kennedy Barton', 'Edmund Fry', 'Mandy Stott', 'Giulia Alvarez', 'Terrence Carver', 'Sabrina Burris', 'Hannah Berry', 'Connor Harwood', 'Taliah Gale']
def test_add_random_book():
my_list = Service()
for i in range(10):
my_list.add_random_book()
assert len(my_list.books) == 10
def test_add_book():
my_list = Service()
b1 = Book('978-7-9876-5432-0', 'The Forgotten Lands', 'Lewis Fox')
b2 = Book('978-4-6277-5012-1', 'The Sands of Time', 'Katy Woods')
my_list.add_book(b1)
my_list.add_book(b2)
assert len(my_list.books) == 2
test_add_random_book()
test_add_book()
| vladceontea/Fundamentals-of-Programming | Lab5/src/services/service.py | service.py | py | 3,818 | python | en | code | 0 | github-code | 90 |
22207397048 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
pointer = root
while pointer:
if p.val > pointer.val and q.val > pointer.val:
pointer = pointer.right
elif p.val < pointer.val and q.val < pointer.val:
pointer = pointer.left
else:
return pointer
| Eurus-Holmes/LCED | Lowest Common Ancestor of a Binary Search Tree.py | Lowest Common Ancestor of a Binary Search Tree.py | py | 678 | python | en | code | 11 | github-code | 90 |
71105616297 | # coding=utf-8
'''
@Author : ericzhang
@Version : python3.7
@Date : 2018-08-10 08:05
@Soft : PyCharm
@Desc : python中一切皆对象
@Docs :
'''
## 1、赋值给一个变量
def myFunc(name='jiading'):
print(name)
my_fun = myFunc # 赋值
my_fun('pudong')
class Person:
def __init__(self):
print('changning')
my_class = Person
my_class()
"""
pudong
changning
"""
## 2、可以添加到集合对象中
obj_list = []
obj_list.append(my_fun)
obj_list.append(my_class)
for item in obj_list:
print(item())
"""
zhang
None
changning
<__main__.Person object at 0x101f8f290>
"""
## 3、可以当作函数的返回值
def decotator():
print('dec start...')
return my_fun
my_dec = decotator()
my_dec()
"""
dec start...
jiading
""" | zhagyilig/AdvancePy | 01-python中一切皆对象/all_is_object.py | all_is_object.py | py | 800 | python | en | code | 0 | github-code | 90 |
30341279010 | import numpy as np
import pandas as pd
import math
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.utils import resample
from sklearn.model_selection import KFold
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import roc_auc_score
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def normCol(col):
newcolumn = df[col].copy()
mean = df[col].mean()
sd = df[col].std()
newcolumn = (df[col] - mean) / sd
return newcolumn
def resampletrain(df):
df_majority = df[df.readmitted==0]
df_minority = df[df.readmitted==1]
df_minority_upsampled = resample(df_minority,
replace=True,
n_samples=len(df_majority.index),
random_state=1)
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
return df_upsampled
df = pd.read_csv("diabetic_data.csv")
df.drop(['encounter_id','payer_code','weight','diag_2','diag_3','medical_specialty'], axis=1, inplace=True)
for i in df.columns:
if df[i].value_counts().max() > 91000:
df.drop([i], axis=1, inplace=True)
a = df.index[df['race'] == '?'].tolist()
b = df.index[df['gender'] == 'Unknown/Invalid'].tolist()
removeRow = a + b
df.drop(removeRow,axis=0, inplace=True)
#remove duplicated patients record
l = set()
removeL = []
for index, row in df.iterrows():
if row['patient_nbr'] not in l:
l.add(row['patient_nbr'])
else:
removeL.append(index)
df = df.drop(removeL,axis=0)
df = df.drop('patient_nbr',axis=1)
df['readmitted'] = np.where(df['readmitted'] == '<30',1,0)
df['diabetesMed'] = np.where(df['diabetesMed'] == 'Yes',1,0)
df['change'] = np.where(df['change'] == 'Ch',1,0)
df['glipizide'] = np.where(df['glipizide'] == 'No',0,1)
df['metformin'] = np.where(df['metformin'] == 'No',0,1)
df['A1Cresult'] = np.where(df['A1Cresult'] == 'None',0,1)
df['gender'] = np.where(df['gender'] == 'Male',1,0)
df['age'].replace('[0-10)',5,inplace=True)
df['age'].replace('[10-20)',15,inplace=True)
df['age'].replace('[20-30)',25,inplace=True)
df['age'].replace('[30-40)',35,inplace=True)
df['age'].replace('[40-50)',45,inplace=True)
df['age'].replace('[50-60)',55,inplace=True)
df['age'].replace('[60-70)',65,inplace=True)
df['age'].replace('[70-80)',75,inplace=True)
df['age'].replace('[80-90)',85,inplace=True)
df['age'].replace('[90-100)',95,inplace=True)
df['insulin'].replace('No',1,inplace=True)
df['insulin'].replace('Up',2,inplace=True)
df['insulin'].replace('Down',3,inplace=True)
df['insulin'].replace('Steady',4,inplace=True)
df['race'].replace('Hispanic',3,inplace=True)
df['race'].replace('Asian',3,inplace=True)
df['race'].replace('Other',3,inplace=True)
df['race'].replace('Caucasian',1,inplace=True)
df['race'].replace('AfricanAmerican',2,inplace=True)
#diag_1:
diag_1 = df['diag_1']
#Circulatory 390–459, 785
C = list(range(390,460)) + [785]
C = [str(x) for x in C]
for i in C:
diag_1.replace(i,1,inplace = True)
#Respiratory 460–519, 786
R = list(range(460,520))+[786]
R = [str(i) for i in R ]
for i in R:
diag_1.replace(i,2, inplace = True)
#Digestive 520–579, 787
D = list(range(520,580))+[787]
D = [str(i) for i in D]
for i in D:
diag_1.replace(i,3,inplace = True)
#Diabetes 250.xx
DB = list(np.arange(250.01,251,0.01))
DB = [round(i,2) for i in DB]
DB = [str(i) for i in DB]
DB += ['250']
for i in DB:
diag_1.replace(i,4,inplace = True)
#Injury 800–999
I = list(range(800,1000))
I = [str(i) for i in I]
for i in I:
diag_1.replace(i,5,inplace = True)
#Musculoskeletal 710–739
M = list(range(710,740))
M = [str(i) for i in M]
for i in M:
diag_1.replace(i,6,inplace = True)
#Genitourinary 580–629, 788
G = list(range(580,630))+['788']
G = [str(i) for i in G]
for i in G:
diag_1.replace(i,7,inplace = True)
#Neoplasms 140–239
N = list(range(140,240))
N = [str(i) for i in N]
for i in N:
diag_1.replace(i, 8, inplace= True)
diagSet = set([1,2,3,4,5,6,7,8])
oindex = df['diag_1'].loc[~df['diag_1'].isin(diagSet)].index.tolist()
for i in oindex:
df.at[i,'diag_1'] = 9
def combine(feature,rangeList,category):
index = df[feature].loc[df[feature].isin(rangeList)].index.tolist()
for i in index:
df.at[i,feature] = category
combine('num_lab_procedures',list(range(0,11)),10)
combine('num_lab_procedures',list(range(11,21)),20)
combine('num_lab_procedures',list(range(21,31)),30)
combine('num_lab_procedures',list(range(31,41)),40)
combine('num_lab_procedures',list(range(41,51)),50)
combine('num_lab_procedures',list(range(51,61)),60)
combine('num_lab_procedures',list(range(61,71)),70)
combine('num_lab_procedures',list(range(71,81)),80)
combine('num_lab_procedures',list(range(81,91)),90)
combine('num_lab_procedures',list(range(91,150)),100)
combine('num_medications',list(range(0,6)),5)
combine('num_medications',list(range(6,11)),10)
combine('num_medications',list(range(11,16)),15)
combine('num_medications',list(range(16,21)),20)
combine('num_medications',list(range(21,31)),30)
combine('num_medications',list(range(31,41)),40)
combine('num_medications',list(range(41,100)),50)
#reindexing
df = df.reset_index(drop=True)
print('total number of class 0 instances', df['readmitted'].value_counts()[0])
print('total number of class 1 instances', df['readmitted'].value_counts()[1])
label = df['readmitted'].copy()
train = df.drop('readmitted',axis = 1)
X = train
Y = label
rf = RandomForestClassifier()
rf.fit(X, Y)
importance = rf.feature_importances_
result = pd.DataFrame({
'importance':importance,
'features':train.columns
})
result = result.sort_values(['importance'] , ascending=0)
print(result)
objects = result['features'].tolist()
y_pos = np.arange(len(objects))
performance = result['importance'].tolist()
plt.bar(y_pos, performance, align='center', alpha=0.5)
plt.show();
pos = []
neg = []
acc = []
trainpos = []
trainneg = []
for i in range(1,22):
select = result['features'][0:i].tolist() + ['readmitted']
DF = df[select]
kf = KFold(n_splits=10)
kf.get_n_splits(DF)
accsum = 0
posum = 0
negsum = 0
trainposum = 0
trainnegsum = 0
for train_index, test_index in kf.split(DF):
train, test = DF.iloc[train_index], DF.iloc[test_index]
x_train = train.drop('readmitted',axis=1)
y_train = train['readmitted']
x_test = test.drop('readmitted',axis=1)
y_test = test['readmitted']
model = RandomForestClassifier(n_estimators = 1000, n_jobs = -1,class_weight="balanced",min_samples_leaf= 1000)
model.fit(x_train,y_train)
y_pred = model.predict(x_test)
y_train_pred = model.predict(x_train)
trainposum += recall_score(y_train, y_train_pred,pos_label=1)
trainnegsum += recall_score(y_train, y_train_pred,pos_label=0)
accsum += accuracy_score(y_test, y_pred)
posum += recall_score(y_test, y_pred,pos_label=1)
negsum += recall_score(y_test, y_pred,pos_label=0)
print(posum)
accsum /= 10
posum /= 10
negsum /= 10
trainposum /= 10
trainnegsum /= 10
acc.append(accsum)
pos.append(posum)
neg.append(negsum)
trainpos.append(trainposum)
trainneg.append(trainnegsum)
x = list(range(1,22))
plt.plot(x,pos,'-g',label='test recall for TP')
plt.plot(x,trainpos,'-y',label='train recall for TP')
plt.plot(x,neg,'-b',label='test recall for TN')
plt.plot(x,trainneg,label='train recall for TN')
plt.plot(x,acc,'-r',label='testing acc')
plt.legend(loc='lower right')
plt.axis([1,21 , 0.52, 0.625])
plt.xticks(np.arange(min(x), max(x)+1, 1.0))
plt.show(); | hyuan9310/Hospital_readmission | jupiter_notebook/RF.py | RF.py | py | 7,927 | python | en | code | 0 | github-code | 90 |
18061671269 | Z = list(map(int, input().split()))
ans = 10 ** 18
for i in range(3):
a, b, c = Z[i % 3], Z[(i + 1) % 3], Z[(i + 2) % 3]
# print(a, b, c)
left = c // 2
right = c - left
ans = min(ans, abs(left - right) * a * b)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p04005/s962574430.py | s962574430.py | py | 243 | python | en | code | 0 | github-code | 90 |
17956110289 | from itertools import permutations
N, M, R = map(int, input().split())
r = list(map(lambda x: int(x)-1, input().split()))
INF = 10**9
cost = [[INF]*N for _ in range(N)]
for _ in range(M):
A, B, C = map(int, input().split())
A -= 1
B -= 1
cost[A][B] = C
cost[B][A] = C
for k in range(N):
for i in range(N):
for j in range(N):
cost[i][j] = min(cost[i][j], cost[i][k]+cost[k][j])
ans = INF
for route in permutations(r):
temp = 0
for i in range(R-1):
temp += cost[route[i]][route[i+1]]
ans = min(ans, temp)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03608/s123953375.py | s123953375.py | py | 551 | python | en | code | 0 | github-code | 90 |
18194484639 | K = int(input())
S = input()
s = len(S)
L = []
mod = 10 ** 9 + 7
N = 2 * 10**6
fac = [1, 1]
finv = [1, 1]
inv = [0, 1]
def cmb(n, r):
return fac[n] * ( finv[r] * finv[n-r] % mod ) % mod
for i in range(2, N + 1):
fac.append( ( fac[-1] * i ) % mod )
inv.append( mod - ( inv[mod % i] * (mod // i) % mod ) )
finv.append( finv[-1] * inv[-1] % mod )
for i in range(K+1):
L.append( cmb(i+s-1, s-1) * pow(25, i, mod) % mod )
ans = []
for i, x in enumerate(L):
if i == 0:
ans.append(x)
else:
ans.append( ( ans[i-1]*26%mod + x ) % mod )
print(ans[K]) | Aasthaengg/IBMdataset | Python_codes/p02632/s391524664.py | s391524664.py | py | 571 | python | en | code | 0 | github-code | 90 |
18488184219 | N, M = map(int, input().split())
def divisor(n): #nの約数を全て求める
i = 1
table = []
while i * i <= n:
if n%i == 0:
table.append(i)
table.append(n//i)
i += 1
table = list(set(table))
return table
tab = divisor(M)
ans = []
for i in tab:
j = M // i
if j >= N:
ans += [i]
print(max(ans)) | Aasthaengg/IBMdataset | Python_codes/p03241/s322177968.py | s322177968.py | py | 372 | python | en | code | 0 | github-code | 90 |
18447460227 | import threading
import time
'''so much resource cpu need to frequently change them so ...
maybe turn into chaos'''
g_num=0
def test1(temp):
global g_num#define g_num as global
for i in range(temp):
g_num += 1
print('---1:{}---'.format(g_num))
def test2(temp):
global g_num
for i in range(temp):
g_num += 1
print('---2:{}---'.format(g_num))
def main():
t1=threading.Thread(target=test1,args=(100000000,))
t2=threading.Thread(target=test2,args=(100000000,))
t1.start()
t2.start()
# ---main---:28622471
# ---1:126803457---
# ---2:127107718---
#turn into chaos so we need chaos cause so much resource cpu need to frequently
#change process
time.sleep(5)
print('---main---:{}'.format(g_num))
if __name__ == "__main__":
main()
| theguyisnoone/daily | whyneedlock.py | whyneedlock.py | py | 807 | python | en | code | 0 | github-code | 90 |
41540077351 | #!/usr/bin/python3
import sys
from itertools import cycle, islice, product
# # Advent of Code 2021
# Day 21
def quantic_play(p1, s1, p2, s2, pool = {}):
key = (p1, s1, p2, s2)
if key in pool:
return pool[key]
sc = (0, 0)
for r in product((1, 2, 3), repeat = 3):
r = sum(r)
np = (p1 + r - 1) % 10 + 1
ns = s1 + np
i, j = 0, 1
if ns < 21:
i, j = quantic_play(p2, s2, np, ns)
sc = (sc[0] + j, sc[1] + i)
pool[key] = sc
return sc
if __name__ == '__main__':
filename = 'input.txt'
if len(sys.argv) > 1:
filename = sys.argv[1]
file = open(filename)
lines = [line.strip() for line in file]
start_p1 = int(lines[0][28:])
start_p2 = int(lines[1][28:])
# Part One:
# What do you get if you multiply the score of the losing player by the number of times the die was rolled during the game?
p = [start_p1, start_p2]
sc = [0, 0]
rolls = 0
dice = iter(cycle(range(1, 101)))
turn = 0
while max(sc) < 1000:
p[turn] = (p[turn] + sum(list(islice(dice, 3))) - 1) % 10 + 1
sc[turn] += p[turn]
rolls += 3
turn = 1 - turn
print("Part One:", min(sc) * rolls)
# Part Two:
# Find the player that wins in more universes; in how many universes does that player win?
p = [start_p1, start_p2]
sc = [0, 0]
print("Part Two:", max(quantic_play(p[0], sc[0], p[1], sc[1])))
| TomCarton/AdventOfCode | 2021/day21/day21.py | day21.py | py | 1,469 | python | en | code | 2 | github-code | 90 |
14754165470 | '''
Description from the course:
https://machinelearningmastery.com/machine-learning-in-python-step-by-step/
Data to used:
We are going to use the iris flowers dataset. This dataset is famous because it is used as the “hello world”
dataset in machine learning and statistics by pretty much everyone.
The data will be available from a CSV file at: https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv
Here is an overview of what we are going to cover:
0. Version check
1. Import necessary libraries of Python
2. Loading the dataset.
3. Summarizing the dataset.
4. Visualizing the dataset.
5. Evaluating some algorithms.
6. Making some predictions.
'''
'''
0: Version check (once checked then commented out)
#Pythos version
import sys
print('Python: {}'.format(sys.version))
#scipy version
import scipy as sp
print('scipy: {}'.format(sp.__version__))
#numpy version
import numpy as np
print('numpy: {}'.format(np.__version__))
#matplotlib version
import matplotlib
print('matplotlib: {}'.format(matplotlib.__version__))
#pandas version
import pandas as pd
print('pandas: {}'.format(pd.__version__))
#sklearn version
import sklearn
print('sklearn: {}'.format(sklearn.__version__))
'''
'''
1: Import necessary libraries of Python
'''
# Load libraries
import pandas as pd
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
'''
2: Loading the dataset
'''
url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'class']
dataset = pd.read_csv(url, names=names)
'''
3: Summarizing the dataset:
a. Dimensions of the dataset
b. Peek at the data itself
c. Statistical summary of all attributes
d. Breakdown of the data by the class variable
'''
# a. Dimensions of teh dataset
#shape
print(dataset.shape)
# b. Peek at the dataset itself
#head
print(dataset.head(5))
#tail
print(dataset.tail(5))
# c. Statistical summary of all attributes
print(dataset.describe())
# d. Class distribution
print(dataset.groupby('class').size())
'''
4: Visualizing the dataset:
a. Univariate plots to better understand each attribute
b. Multivariate plots to better understand the relationships between attributes
'''
# a.1 Univariate plotting: box and whisker plots
dataset.plot(kind='box', subplots=True, layout=(2,2), sharex=False, sharey=False)
pyplot.show()
# a.2 Univariate plotting: histograms
dataset.hist()
pyplot.show()
# b. Multivariate plotting: scatter plot matrix
scatter_matrix(dataset)
pyplot.show()
'''
5: Evaluating some algorithms:
a. Separate out a validation dataset.
b. Set-up the test harness to use 10-fold cross validation.
c. Build multiple different models to predict species from flower measurements
d. Select the best model.
'''
# Split-out validation dataset
feature_col_names = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width']
class_col_name = 'class'
# X = dataset[['sepal-length', 'sepal-width', 'petal-length', 'petal-width']].values
# y = dataset['class'].values
X = dataset[feature_col_names].values
y = dataset[class_col_name].values
split_test_size = 0.20
# array = dataset.values
# X = array[:,0:4]
# y = array[:,4]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=split_test_size, random_state=1)
# Spot Check Algorithms
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1, shuffle=True)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# Compare Algorithms
pyplot.boxplot(results, labels=names)
pyplot.title('Algorithm Comparison')
pyplot.show()
'''
6. Making some predictions
previous section suggest that the SVM was perhaps the most accurate model. We will use this model as our final model
We'll FIT the model on the entire training dataset and make PREDICTIONS on the validation dataset
'''
# Make predictions on validation dataset
model = SVC(gamma='auto')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
# Evaluate predictions
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions)) | jrahman1988/PythonSandbox | myML_MLMasteryStepByStep_IRISClassification.py | myML_MLMasteryStepByStep_IRISClassification.py | py | 5,357 | python | en | code | 0 | github-code | 90 |
25532597353 | #!/usr/bin/env python
from PyQt4 import QtCore, QtGui
import admin_ui
import form_ui
import sys
import time
from datetime import datetime
import binascii
import PN532
from config import Db
class NasabahTread(QtCore.QThread):
def __init__(self):
super(self.__class__, self).__init__()
def __del__(self):
self.wait()
def run(self):
pass
class Admin(QtGui.QTabWidget, admin_ui.Ui_TabWidget):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.tambah_warga_btn.clicked.connect(self.open_form)
self.display_nasabah_list()
self.display_user_list()
self.display_transaksi_log()
self.display_isi_ulang_log()
self.display_user_log()
self.showMaximized()
self.nasabah_thread = NasabahThread()
self.connect(self.nasabah_thread, QtCore.SIGNAL('nasabahAdded'), self.display_nasabah_list)
self.nasabah_thread.start()
def display_nasabah_list(self):
data = self.get_nasabah_list()
header = ["Nama", "Jenis Kelamin", "Tanggal Lahir", "Alamat", "Saldo", "Waktu Daftar"]
self.show_data_on_table(data, header, self.nasabah_list)
def display_user_list(self):
data = self.get_user_list()
header = ["Username", "Active", "Waktu Daftar"]
self.show_data_on_table(data, header, self.user_list)
def display_transaksi_log(self):
data = self.get_transaksi_log()
header = ["Waktu", "Nama", "Jenis Transaksi", "Jumlah"]
self.show_data_on_table(data, header, self.log_list)
def display_isi_ulang_log(self):
data = self.get_isi_ulang_log()
header = ["Waktu", "User", "Jumlah"]
self.show_data_on_table(data, header, self.isi_ulang_log)
def display_user_log(self):
data = self.get_user_log()
header = ["Waktu", "User", "Aktifitas"]
self.show_data_on_table(data, header, self.log_user)
def show_data_on_table(self, data, header, element):
element.setRowCount(len(data))
element.setColumnCount(len(header))
element.setHorizontalHeaderLabels(header)
for column, h in enumerate(header):
for row, item in enumerate(data):
element.setItem(row, column, QtGui.QTableWidgetItem(str(item[column])))
element.resizeColumnsToContents()
element.resizeRowsToContents()
element.showMaximized()
def open_form(self):
self.form = FormDaftar()
def get_nasabah_list(self):
return self.get_db_records("SELECT nama, jenis_kelamin, tanggal_lahir, alamat, saldo, created_at "
"FROM nasabah ORDER BY nama ASC")
def get_user_list(self):
return self.get_db_records("SELECT username, active, created_at FROM user ORDER BY username ASC")
def get_transaksi_log(self):
return self.get_db_records("SELECT transaksi.waktu, nasabah.nama, transaksi.jenis_transaksi, transaksi.jumlah "
"FROM transaksi JOIN nasabah ON nasabah.id = transaksi.nasabah_id "
"ORDER BY transaksi.waktu DESC")
def get_isi_ulang_log(self):
return self.get_db_records("SELECT l.waktu, u.username, l.jumlah FROM log_isi_ulang l "
"JOIN user u ON u.id = l.user_id ORDER BY l.waktu DESC")
def get_user_log(self):
return self.get_db_records("SELECT l.waktu, u.username, l.activity FROM log_user l "
"JOIN user u ON u.id = l.user_id ORDER BY l.waktu DESC")
def get_db_records(self, sql):
cur = Db.con.cursor()
cur.execute(sql)
records = cur.fetchall()
cur.close()
return records
class FormDaftar(QtGui.QWidget, form_ui.Ui_Form):
def __init__(self):
super(self.__class__, self).__init__()
self.setupUi(self)
self.info.setText('')
self.pin.setText('')
self.reset_btn.clicked.connect(self.reset_form)
self.simpan_btn.clicked.connect(self.simpan)
self.show()
def simpan(self):
if self.laki_laki.isChecked():
jenis_kelamin = 'L'
else:
jenis_kelamin = 'P'
cur = Db.con.cursor()
cur.execute(
"INSERT INTO nasabah (nama, jenis_kelamin, tanggal_lahir, alamat) VALUES (%s, %s, %s, %s)",
(self.nama.text(), jenis_kelamin, self.tgl_lahir.text(), self.alamat.text())
)
cur.close()
Db.con.commit()
def reset_form(self):
self.info.setText('')
self.pin.setText('')
self.nama.setText('')
self.alamat.setText('')
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
ui = Admin()
sys.exit(app.exec_()) | udibagas/atmb | admin.py | admin.py | py | 4,824 | python | en | code | 0 | github-code | 90 |
22708487478 | from PIL import Image
import numpy as np
five=[]
cover_pix=[]
cover_hide_pix=[]
get_secrt_message=[]
get_secrt=[]
secrt_five=[]
get_secrt_message_two=[]
x=[-1,-2,0,1,2]
def five_to_two(get_secrt_message_five):
lenth=int(len(get_secrt_message_five)/2)
for i in range(lenth):
message=int(str(get_secrt_message_five[i*2])+str(get_secrt_message_five[i*2+1]))
message=int(message/10)*5+message%10
aa=bin(message)[2:]
lentehs=len(aa)
if lentehs==2:
aa="0"+"0"+aa
elif lentehs==3:
aa="0"+aa
elif lentehs==1:
aa="0"+"0"+"0"+aa
for j in aa:
get_secrt_message_two.append(int(j))
def ten_to_five(a):#将十进制转换成5进制,并且用字符串的形式打印出来
shang=[]
yu=[]
shangshu=1
while(shangshu!=0):
shangshu=int(a/5)
yushu=a%5
a=shangshu
yu.append(yushu)
lenth=len(yu)
st = ""
for i in range(lenth):
aa = str(yu[lenth - i - 1])
st = st + aa
if len(st)==1:
st="0"+st
return st
def two_to_five(secrt_message):
lenth=len(secrt_message)
yu=int(lenth/4)
for i in range(yu):
dd=str(secrt_message[i*4])+str(secrt_message[i*4+1])+str(secrt_message[i*4+2])+str(secrt_message[i*4+3])
dd=int(dd,2)#得到十进制的数,将每4位二进制分成一组,然后转换成10进制
dd=ten_to_five(dd)#将十进制转换成5进制
for j in dd:
j=int(j)
five.append(j) # 将转换后的5进制放在一个列表当中,方便后面使用
def secrt_message_hide(img,secrt_message):
global cover_hide_pix
img=Image.open(img).convert("L")
img.save("./img/img_gray.png")
img_hide=img.copy()
width=img.width
hight=img.height
for i in range(hight):
for j in range(width):
pix=img.getpixel((j,i))
cover_pix.append(pix)
for lenth in range(len(cover_pix)):
cover_hide_pix.append(cover_pix[lenth])
print("更改前的像素值:",end="")
for flag in range(len(five)):
print(cover_pix[flag],end=",")
#消息嵌入的核心部分
for flags in range(len(five)):
for xx in x:
f=(cover_pix[flags]+xx)%5
if f==five[flags]:
cover_hide_pix[flags]=cover_pix[flags]+xx
print()
print("更改后的像素值:", end="")
for flag in range(len(five)):
print(cover_hide_pix[flag],end=",")
get_secrt_message.append(cover_hide_pix[flag]%5)
a=0
#对更改后的像素进行更改到灰度图像当中
for i in range(hight):
for j in range(width):
img_hide.putpixel((j,i),cover_hide_pix[a])
a=a+1
img_hide.save("./img/img_hide.png")
def secrt_message_get(img):
img=Image.open(img).convert("L")
lenth=len(five)
width=img.width
hight=img.height
for i in range(hight):
for j in range(width):
pix=img.getpixel((j,i))
get_secrt.append(pix)
for b in range(lenth):
secrt_five.append(get_secrt[b]%5)
print()
print("提取出来的5进制消息:",secrt_five)
five_to_two(secrt_five)
print("转换之后的2进制消息:",get_secrt_message_two)
#采用峰值信噪比(PSNR)对算法进行评价,如果PSNR>30,那么表示人类视觉系统是难以察觉的
def evaluate(cover_pix,cover_hide_pix):
img=Image.open("./img/img.png")
width=img.width
hight=img.height
sum=0
for i in range(width*hight):
sum=sum+(cover_pix[i]-cover_hide_pix[i])*(cover_pix[i]-cover_hide_pix[i])
print("-----------------------------评价隐藏算法-------------------------------------")
MSE=sum/(width*hight)
print("均方误差为:",MSE)
PSNR=(10*int(np.log10(255*255)))/MSE
print("PSNR值:",PSNR)
if __name__ == '__main__':
print("本代码n取2,将2进制的秘密消息以5进制的形式进行嵌入")
secrt_message = [1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1,1,0,1,1]
two_to_five(secrt_message)
print("需要嵌入的2进制消息:",secrt_message)
print("转换之后的5进制秘密消息:",five)
secrt_message_hide("./img/img.png",five)
secrt_message_get("./img/img_hide.png")
if secrt_message==get_secrt_message_two:
print("------嵌入消息与提取消息相同,提取成功--------")
else:print("------嵌入消息与提取消息不相同,提取失败--------")
evaluate(cover_pix,cover_hide_pix)
| 18281765528/picture_handle | EMD_PRO/main.py | main.py | py | 4,696 | python | en | code | 0 | github-code | 90 |
34131341220 | import tkinter as tk
import tkinter.messagebox
import tkinter.simpledialog
from utils import keys, responses_path
from tkinter import ttk, filedialog, messagebox
def ask_confirmation():
result = tkinter.messagebox.askyesno("Confirmation", "Are you sure you would like to get this data?"
" This is an API call")
return result
def load_file():
path = filedialog.askopenfilename(initialdir=responses_path, filetypes=[('JSON files', "*.json")])
return path
def prompt_for_number():
number = tkinter.simpledialog.askinteger("New stake", "Enter new stake value $", minvalue=1, maxvalue=1000000000)
return number
def show_error_message(error_title, message):
messagebox.showerror(error_title, message)
class GUI:
def __init__(self):
apis = list(keys.keys())
self.controller = None
self.window = tk.Tk()
self.window.title("Arbitrage finder")
self.window.geometry("800x600")
self.window.resizable(width=False, height=False)
top_bar = tk.Frame(self.window)
top_bar.columnconfigure(4, weight=1)
top_bar.pack()
self.api_selection = tk.StringVar(self.window)
self.api_selection.set(apis[0])
self.market_selection = tk.StringVar(self.window)
self.market_selection.set(list(keys.get(apis[0]))[0])
self.api_dropdown = tk.OptionMenu(top_bar, self.api_selection, *apis,
command=self.update_dropdowns)
self.market_dropdown = ttk.Combobox(top_bar, values=list(keys.get(apis[0])), state='readonly')
self.market_dropdown.configure(width=35)
self.market_dropdown.current(0)
self.button1 = tk.Button(top_bar, text="Get data")
self.button2 = tk.Button(top_bar, text="Clear list")
self.button3 = tk.Button(top_bar, text="Set stake")
self.button4 = tk.Button(top_bar, text="Load from JSON")
self.api_dropdown.grid(row=0, column=0)
self.market_dropdown.grid(row=0, column=1)
self.button1.grid(row=0, column=2)
self.button2.grid(row=0, column=3)
self.button3.grid(row=0, column=4)
self.button4.grid(row=0, column=5)
self.scrollable_info_canvas = tk.Canvas(self.window)
self.main_info_scrollbar = tk.Scrollbar(self.window, orient="vertical", command=self.scrollable_info_canvas.yview)
self.scrollable_info_canvas.configure(yscrollcommand=self.main_info_scrollbar.set)
self.scrollable_info_canvas.pack(side="left", fill="both", expand=True)
self.main_info_scrollbar.pack(side="right", fill="y")
self.scroll_frame = tk.Frame(self.scrollable_info_canvas)
self.items = []
self.market_dropdown.bind("<Enter>", self.disable_main_info_scrolling)
self.market_dropdown.bind("<Leave>", self.enable_main_info_scrolling)
def set_controller(self, controller):
self.controller = controller
self.button1.configure(command=controller.get_data_and_update_info)
self.button2.configure(command=controller.clear_list)
self.button3.configure(command=controller.update_stake)
self.button4.configure(command=controller.load_json)
def disable_main_info_scrolling(self, event):
pass
def enable_main_info_scrolling(self, event):
pass
def start_gui(self):
self.window.mainloop()
def update_scrollable_info(self, new_info_list):
self.scroll_frame.destroy()
self.scroll_frame = tk.Frame(self.scrollable_info_canvas)
self.scroll_frame.pack()
self.items = new_info_list
self.scroll_frame.columnconfigure(len(self.items), weight=1)
self.scroll_frame.rowconfigure(len(self.items), weight=1)
for i, item in enumerate(self.items):
split_by_dollar_sign = item.split("$")
profit = float(split_by_dollar_sign[len(split_by_dollar_sign) - 1])
color = "black"
if profit > 0.0:
color = "green"
elif profit < 0.0:
color = "red"
label = tk.Label(self.scroll_frame, text=item, foreground=color,
relief="groove", width=109, height=2, wraplength=720)
label.grid(row=i, column=0, stick="nsew")
self.scrollable_info_canvas.create_window((0, 0), window=self.scroll_frame, anchor="nw")
self.scrollable_info_canvas.bind_all("<MouseWheel>",
lambda event: self.scrollable_info_canvas.yview_scroll
(int(-1 * (event.delta / 120)), "units"))
self.scroll_frame.update_idletasks()
self.scrollable_info_canvas.configure(scrollregion=self.scrollable_info_canvas.bbox("all"))
def update_dropdowns(self, event=None):
selected_api = self.api_selection.get()
self.market_dropdown.configure(values=list(keys.get(selected_api)))
self.market_dropdown.current(0)
def get_api_market(self):
return self.api_selection.get(), self.market_dropdown.get()
if __name__ == '__main__':
gui = GUI()
gui.start_gui()
| dli85/ArbFinder | GUI.py | GUI.py | py | 5,222 | python | en | code | 1 | github-code | 90 |
39721814888 | from constants import DEJAVUSANS_PATH
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django_filters.rest_framework import DjangoFilterBackend
from djoser.views import UserViewSet
from recipies.models import (Favorite, Ingredient, IngridientInRecipe, Recipe,
ShoppingCart, Tag)
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import SimpleDocTemplate, Table, TableStyle
from rest_framework import filters, status, viewsets
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import (AllowAny, IsAuthenticated)
from rest_framework.response import Response
from users.models import CustomUser, Subscription
from api.serializers import (CustomUserSerializer, HelpCreateSerializer,
IngridientsSerializer, PasswordSerializer,
RecipeCreateSerializer, RecipeShowSerializer,
TagSerializer, UserSubscriptionSerializer)
from .filters import IngredientFilter, RecipeFilter
from .permissions import IsOwnerOrAdminOrReadOnly
pdfmetrics.registerFont(TTFont('DejaVuSans', DEJAVUSANS_PATH))
class CustomUserViewSet(UserViewSet):
queryset = CustomUser.objects.all()
serializer_class = CustomUserSerializer
@action(
detail=False,
methods=['get'],
permission_classes=(IsAuthenticated,)
)
def me(self, request):
user = CustomUser.objects.get(username=request.user.username)
serializer = CustomUserSerializer(user, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
@action(
detail=False,
methods=['get'],
permission_classes=(IsAuthenticated,)
)
def subscriptions(self, request):
queryset = CustomUser.objects.filter(
subscribed_author__user=request.user)
if queryset:
pages = self.paginate_queryset(queryset)
serializer = UserSubscriptionSerializer(pages, many=True,
context={'request':
request})
return self.get_paginated_response(serializer.data)
return Response('У вас нет подписок',
status=status.HTTP_400_BAD_REQUEST)
@action(
detail=True,
methods=['post', 'delete'],
permission_classes=(IsAuthenticated,)
)
def subscribe(self, request, id):
user = self.request.user
author = get_object_or_404(CustomUser, id=id)
subscription = Subscription.objects.filter(user=user,
author=author)
if request.method == 'POST':
subscribe = Subscription.objects.create(
user=user,
author=author
)
subscribe.save()
return Response(f'Вы подписались на {author}',
status=status.HTTP_201_CREATED)
if request.method == 'DELETE':
if subscription:
subscription.delete()
return Response(f'Вы отменили подписку на {author}',
status=status.HTTP_204_NO_CONTENT)
return Response(f'Вы не можете отменить подписку на {author},'
f'потому что не подписаны на него',
status=status.HTTP_400_BAD_REQUEST)
@action(
detail=False,
methods=['post'],
permission_classes=(IsAuthenticated,)
)
def set_password(self, request, pk=None):
user = self.request.user
serializer = PasswordSerializer(data=request.data,
context={'request': request})
if serializer.is_valid(raise_exception=True):
new_password = serializer.data['new_password']
if user.check_password(new_password):
return Response(data='Новый пароль не должен совпадать с '
'предыдущим',
status=status.HTTP_400_BAD_REQUEST)
user.set_password(new_password)
user.save()
return Response(data='Пароль изменен',
status=status.HTTP_204_NO_CONTENT)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class IngredientsViewSet(viewsets.ModelViewSet):
queryset = Ingredient.objects.all()
serializer_class = IngridientsSerializer
permission_classes = (AllowAny,)
filter_backends = (DjangoFilterBackend, filters.SearchFilter)
filterset_class = IngredientFilter
search_fields = ('name',)
pagination_class = None
class TagViewSet(viewsets.ModelViewSet):
queryset = Tag.objects.all()
serializer_class = TagSerializer
permission_classes = (AllowAny,)
pagination_class = None
class RecipeViewSet(viewsets.ModelViewSet):
queryset = (
Recipe.objects
.select_related('author')
.prefetch_related('tags', 'ingredients_in_recipe')
)
permission_classes = (IsOwnerOrAdminOrReadOnly,)
filter_backends = (DjangoFilterBackend,)
filterset_class = RecipeFilter
pagination_class = PageNumberPagination
def get_queryset(self):
qs = super().get_queryset()
if 'is_favorited' in self.request.query_params:
qs = qs.filter(favorites__user=self.request.user)
if 'is_in_shopping_cart' in self.request.query_params:
qs = qs.filter(shopping_cart__user=self.request.user)
return qs
def get_serializer_class(self):
if self.request.method == 'POST' or self.request.method == 'PATCH':
return RecipeCreateSerializer
return RecipeShowSerializer
def perform_create(self, serializer):
serializer.save(author=self.request.user)
def perform_update(self, serializer):
serializer.save()
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=(IsAuthenticated,)
)
def favorite(self, request, pk=None):
user = self.request.user
if user.is_anonymous:
return Response(status=status.HTTP_401_UNAUTHORIZED)
recipe = get_object_or_404(Recipe, pk=pk)
if request.method == 'POST':
favorite, created = Favorite.objects.get_or_create(
user=user, recipe=recipe)
if created:
serializer = HelpCreateSerializer(favorite.recipe)
return Response(
data=serializer.data,
status=status.HTTP_201_CREATED
)
return Response(status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
favorites = Favorite.objects.filter(user=user,
recipe=recipe).delete()
if favorites[0] == 0:
data = {'errors': 'Такого рецепта нет в избранных.'}
return Response(data=data, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(status=status.HTTP_400_BAD_REQUEST)
@action(
methods=['post', 'delete'],
detail=True,
permission_classes=(IsAuthenticated,)
)
def shopping_cart(self, request, pk=None):
user = request.user
recipe = get_object_or_404(Recipe, id=pk)
if request.method == 'POST':
shopping_cart, created = ShoppingCart.objects.get_or_create(
user=user, recipe=recipe)
if created:
serializer = HelpCreateSerializer(shopping_cart.recipe)
return Response(serializer.data,
status=status.HTTP_201_CREATED)
return Response(data='Рецепт уже добавлен в корзину',
status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
shopping_cart = ShoppingCart.objects.filter(user=user,
recipe=recipe)
if shopping_cart.exists():
shopping_cart.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
return Response(data='Этого рецепта нет в списке покупок',
status=status.HTTP_404_NOT_FOUND)
@action(
detail=False,
methods=['get'],
permission_classes=(IsAuthenticated,)
)
def download_shopping_cart(self, request):
self.response = HttpResponse(content_type='application/pdf')
self.response[
'Content-Disposition'] = 'attachment; filename="my_recipes.pdf"'
queryset = IngridientInRecipe.objects.filter(
recipe__shopping_cart__user=request.user)
ingredients_dict = {}
for item in queryset:
if item.ingredient not in ingredients_dict:
ingredients_dict[item.ingredient] = (
item.ingredient.measurement_unit, item.amount)
else:
current_unit, current_amount = ingredients_dict[
item.ingredient]
ingredients_dict[item.ingredient] = (
current_unit, current_amount + item.amount)
ingredients_list = []
ingredients_list.append(['Ингредиент',
'Единицы изменения',
'Количество'])
for ing, (unit, amount) in ingredients_dict.items():
ingredients_list.append([ing, unit, amount])
style = TableStyle([
('FONTNAME', (0, 0), (-1, -1), 'DejaVuSans')
])
table = Table(ingredients_list, style=style)
doc = SimpleDocTemplate(self.response)
doc.build([table])
return self.response
| Soita-pa-well/foodgram-project-react | backend/api/views.py | views.py | py | 10,283 | python | en | code | 0 | github-code | 90 |
4040484737 | import sys
import csv
import random
import copy
import time
from RushClass import Gameboard, Vehicle, Dimensions
from RushHelpers import backtrace, backtraceV2
from Algorithms import randomSolver, breadth_First_Search, depth_First_Search
import os
# Get vehicles from csv file and return as vehicle class
def uploadBoard(filepath):
vehicles = []
with open(filepath, 'r') as csvboard:
boardreader = csv.reader(csvboard)
next(boardreader)
for row in boardreader:
id, x, y, orientation, length = row
vehicles.append(Vehicle(id, int(x), int(y), orientation, length))
return vehicles
if __name__ == "__main__":
# let user choose the boardsize and the game
Dimensions.init()
boardsize = input("What board size would you like to solve?\n 1. 6x6\n 2. 9x9 \n 3. 12x12\n")
path = "Boards/"
if (boardsize == "1" or boardsize =="6x6"):
path = path + "6x6/"
Dimensions.width = 6
Dimensions.height = 6
print("These 6x6 boards are available:")
for item in os.listdir(path):
print (item)
path = path + input("Which board would you like to solve?\n")
if (boardsize == "2" or boardsize == "9x9"):
path = path + "9x9/"
Dimensions.width = 9
Dimensions.height = 9
print("These 9x9 boards are available:")
for item in os.listdir(path):
print (item)
path = path + input("Which board would you like to solve?\n")
if (boardsize == "3" or boardsize == "12x12"):
path = path + "12x12/"
for item in os.listdir(path):
print (item)
Dimensions.width = 12
Dimensions.height = 12
path = path + "game7.csv"
# let user choose the algorithm, load game in gameboard class and print results
algorithm = input("Which algorithm would you like to use?\n 1. Random Solver\n 2. Breadth First Search\n 3. Depth First Search\n")
if (algorithm == "1" or algorithm.lower() == "random solver"):
# run a random solver
results = randomSolver(Gameboard(uploadBoard(path)))
# print results of random solver
print("Time: " + str(results["solvetime"]))
print("Steps: " + str(results["steps"]))
if (algorithm == "2" or algorithm.lower() == "breadth first search"):
# run a breadth first search
results = breadth_First_Search(Gameboard(uploadBoard(path)))
# print results of breadth first
print("Time: " + str(results["solvetime"]))
print("Nodes: " + str(results["nodes_popped"]))
path = backtrace(results["archive"], results["solution"])
print("Length solution: " + str(len(path)))
print(backtraceV2(path))
if (algorithm == "3" or algorithm.lower() == "depth first search"):
# run a depth first search
game = Gameboard(uploadBoard(path))
results = depth_First_Search(game)
# print results of depth first search
print("Time: " + str(results["solvetime"]))
print("Nodes: " + str(results["nodes"]))
path = backtrace(results["archive"], results["solution"])
print("Length solution: " + str(len(path)))
# the backtrace has been commented out because it is not of interest and generates a lot of text
# if for some reason you would like to print the solution, uncomment the next line:
#print(backtraceV2(path))
| KaKariki02/rushHour | RushHour.py | RushHour.py | py | 3,435 | python | en | code | 0 | github-code | 90 |
20302077961 | import random
def is_prime(n):
i = 2
while i<n:
if(n%i==0):
return False
i+=1
return True
def gcd(a,b):
while a != 0 and b != 0:
if a > b:
a = a % b
else:
b = b % a
return a+b
def multiplicative_inverse(a,b):
if(a<b):
tmp = a
a = b
b = tmp
# 0 1 2 3 4 5
# matrix = [a b a//b a%b x y]
matrix = []
i = 0
while a%b != 0:
matrix.append([a, b, a % b, a // b, '_', '_'])
a = matrix[i][1]
b = matrix[i][2]
i+=1
matrix.append([a, b, a % b, a // b, '_', '_'])
matrix[i][4] = 0
matrix[i][5] = 1
i-=1
while i>=0:
matrix[i][4] = matrix[i+1][5]
matrix[i][5] = matrix[i+1][4] - (matrix[i+1][5] * matrix[i][3])
i-=1
#return matrix
return matrix[0][5]
def generate_keypair(p, q):
if not (is_prime(p) and is_prime(q)):
raise ValueError('Both numbers must be prime.')
elif p == q:
raise ValueError('p and q cannot be equal')
n = p*q
# PUT YOUR CODE HERE
phi = (p-1)*(q-1)
# PUT YOUR CODE HERE
# Choose an integer e such that e and phi(n) are coprime
e = random.randrange(1, phi)
# Use Euclid's Algorithm to verify that e and phi(n) are comprime
g = gcd(e, phi)
while g != 1:
e = random.randrange(1, phi)
g = gcd(e, phi)
# Use Extended Euclid's Algorithm to generate the private key
d = multiplicative_inverse(e, phi)
# Return public and private keypair
# Public key is (e, n) and private key is (d, n)
return ((e, n), (d, n))
while True:
p = int(input('Введите простое число p: '))
if (is_prime(p) == True):
break
while True:
q = int(input('Введите простое число q: '))
if (is_prime(q) == True):
break
print("Сгенерированная пара ключей: ",generate_keypair(p,q))
| BorisBelovA/Python-Labs | rsa.py | rsa.py | py | 1,983 | python | en | code | 1 | github-code | 90 |
35615236677 | from __future__ import annotations
import dataclasses
import botocore.exceptions
from rich.style import Style
from rich.text import Text
import textual.binding
import textual.widgets
import textual.reactive
import textual.events
import rich.console
import rich.text
from textual.widgets._tree import TreeNode
from bucketman.constants import AWS_HEX_COLOR_CODE
from bucketman.widgets.common import ObjectType
@dataclasses.dataclass
class S3Object:
key: str
size: float
type: ObjectType
loaded: bool = False
@property
def is_dir(self):
return self.type == ObjectType.FOLDER
class S3Tree(textual.widgets.Tree[S3Object]):
name = "S3Tree"
BINDINGS = [
textual.binding.Binding("r", "reload", "Reload", show=True),
textual.binding.Binding("d", "download", "Download", show=True, key_display='d'),
textual.binding.Binding("D", "s3_delete", "Delete", show=True, key_display="Shift+d"),
textual.binding.Binding("b", "select_bucket", "Select Bucket", show=True),
]
def __init__(self, bucket_name: str, *args, **kwargs):
self.bucket_name = bucket_name
label = bucket_name
data = S3Object(key="", size=0, type=ObjectType.FOLDER)
super().__init__(label, *args, data=data, **kwargs)
self.root.expand()
@property
def selected_object(self) -> S3Object:
return self.cursor_node.data
async def on_mount(self) -> None:
self.load_objects(self.root)
def on_paste(self, event: textual.events.Paste) -> None:
"""Handle pasting a path from the clipboard or file drop."""
# TODO support pasting multiple paths without erroring
paths = event.text.splitlines()
for path in paths:
self.log(f"Would upload {path}")
#self.app.action_upload(path.strip())
def reload_node(self, node: TreeNode[S3Object]):
"""Reload the given node. If the node is a file or a prefix with no children, reload the parent."""
node.remove_children()
self.load_objects(node)
if not node.children and self.root != node:
self.reload_node(node.parent)
else:
node.expand()
def reload_selected_prefix(self) -> str:
if self.cursor_node.data.is_dir:
node_to_reload = self.cursor_node
else:
node_to_reload = self.cursor_node.parent
self.reload_node(node_to_reload)
return node_to_reload.data.key
def action_reload(self) -> None:
reloaded_key = self.reload_selected_prefix()
self.notify(f'Reloaded objects in {self.bucket_name}/{reloaded_key}')
def render_label(self, node: TreeNode[S3Object], base_style: Style, style: Style) -> Text:
node_label = node._label.copy()
node_label.stylize(style)
if node.is_root:
prefix = ("🪣 ", base_style)
elif node.data.is_dir:
prefix = (
"📂 " if node.is_expanded else "📁 ",
base_style + rich.style.Style.from_meta({"toggle": True}),
)
else:
prefix = ("📄 ", base_style)
text = Text.assemble(prefix, node_label)
return text
def load_objects(self, node: textual.widgets.TreeNode[S3Object]):
if node is None:
node = self.root
prefix = node.data.key
paginator = self.app.s3_client.get_paginator("list_objects_v2")
result = paginator.paginate(
Bucket=self.bucket_name, Delimiter="/", Prefix=prefix
)
try:
for common_prefix in result.search("CommonPrefixes"):
if not common_prefix:
continue
key = common_prefix.get("Prefix")
node.add(
key.replace(prefix, "", 1), S3Object(key, 0, ObjectType.FOLDER)
)
for obj in result.search("Contents"):
if not obj:
continue
key = obj.get("Key")
node.add(
key.replace(prefix, "", 1),
S3Object(key, obj.get("Size"), ObjectType.FILE),
allow_expand=False
)
except botocore.exceptions.ClientError:
self.notify(
f'Failed to load contents of bucket "{self.bucket_name}". Please check your credentials and make sure the bucket exists and you have permission to access it.',
title="Error",
severity="error"
)
self.app.action_change_bucket()
#self.app.panic(
# f'Failed to load contents of bucket "{self.bucket_name}". Please check your credentials and make sure the bucket exists and you have permission to access it.'
#)
node.data.loaded = True
def load_and_toggle_selected_node(self):
node = self.cursor_node
if node.data.is_dir:
if not node.data.loaded:
self.load_objects(node)
node.toggle()
def action_toggle_node(self):
self.load_and_toggle_selected_node()
def action_select_cursor(self):
self.load_and_toggle_selected_node()
| brennerm/bucketman | bucketman/widgets/s3tree.py | s3tree.py | py | 5,228 | python | en | code | 2 | github-code | 90 |
22937411854 | from __future__ import annotations
import base64
from asyncio import Queue
from aiohttp import web
from aiohttp.web_request import Request
from protos import GetRaidDetailsOutProto, GymGetInfoOutProto, METHOD_GET_RAID_DETAILS, METHOD_GYM_GET_INFO
from .log import log
from .config import config
MESSAGES = {
METHOD_GET_RAID_DETAILS: GetRaidDetailsOutProto,
METHOD_GYM_GET_INFO: GymGetInfoOutProto
}
class RawInput:
def __init__(self, process_queue: Queue):
self.app = web.Application(logger=log)
self.queue: Queue = process_queue
routes = [web.post("/raw", self.accept_protos)]
self.app.add_routes(routes)
async def accept_protos(self, request: Request):
log.debug(f"Received message from {request.remote}")
if not request.can_read_body:
log.warning(f"Couldn't read body of incoming request")
return web.Response(status=400)
data = await request.json()
for raw_proto in data.get("contents", []):
method_id: int = raw_proto.get("type", 0)
message = MESSAGES.get(method_id)
if message is None:
continue
payload = raw_proto.get("payload")
if not payload:
log.warning(f"Empty paylod in {raw_proto}")
continue
try:
decoded = base64.b64decode(payload)
except Exception as e:
log.warning(f"Couldn't decode {payload} in {raw_proto}")
continue
try:
proto = message().parse(decoded)
lat, lon = data.get("lat", 0), data.get("lon", 0)
self.queue.put_nowait((proto, lat, lon))
except Exception as e:
log.exception(f"Unknown error while parsing proto {raw_proto}", e)
continue
return web.Response()
| ccev/raidwatcher | raidwatcher/raw_input.py | raw_input.py | py | 1,899 | python | en | code | 1 | github-code | 90 |
75053576936 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 29 23:33:15 2020
@author: Lung-Yi
"""
from __future__ import print_function
import sys
import os
import re
import copy
import math
import rdkit.Chem as Chem
import rdkit.Chem.AllChem as AllChem
from rdkit.Chem.rdchem import ChiralType, BondType, BondDir
from rdchiral.utils import vprint, PLEVEL, atoms_are_different
from rdchiral.initialization import rdchiralReaction, rdchiralReactants
from rdchiral.chiral import template_atom_could_have_been_tetra, atom_chirality_matches#,copy_chirality,\
from rdchiral.clean import canonicalize_outcome_smiles, combine_enantiomers_into_racemic
from rdchiral.bonds import BondDirOpposite, restore_bond_stereo_to_sp2_atom
def canon_remap(smiles, return_NumAtom=False):
# copy mol before changing it
mol = Chem.MolFromSmiles(smiles)
if mol == None: return None
# remove all atom numbers beforehand, as the CanonicalRankAtoms function
# takes into account existing mapping numbers while doing it's mapping (!)
for atom in mol.GetAtoms():
if atom.HasProp('molAtomMapNumber'):
atom.ClearProp('molAtomMapNumber')
if return_NumAtom:
return Chem.MolToSmiles(mol,isomericSmiles=False), mol.GetNumAtoms()
return Chem.MolToSmiles(mol,isomericSmiles=False)
def CalculateNumHs(atom):
# atom_valence_dict = {"C":4, "N":3, "O":2, "S":(2,4,6), "P":(3,5),"Si":4, "Br":1, "Mg":2}
valence = atom_valence_dict[atom.GetSymbol()]
return int(valence - sum([bond.GetBondTypeAsDouble() for bond in atom.GetBonds()]) - abs(atom.GetFormalCharge()))
# def UpdateAtomInfo(atom):
# # atom_valence_dict = {"C":4, "N":3, "O":2, "S":(2,4,6), "P":(3,5),"Si":4, "Br":1, "Mg":2}
# if atom.GetTotalValence() != atom_valence_dict[atom.GetSymbol()]:
# atom.SetNumExplicitHs(CalculateNumHs(atom))
# atom.UpdatePropertyCache()
# return
def GetAtomWithAtomMapNum(mol, mapnum):
for atom in mol.GetAtoms():
if atom.GetAtomMapNum() == mapnum:
return atom
return
def ReassignMapping(smiles, return_NumAtom=False, iso=False):
m = Chem.MolFromSmiles(smiles)
i = 1
for a in m.GetAtoms():
if a.GetIsotope():
a.SetIsotope(0)
if a.GetSymbol() != 'H':
a.SetAtomMapNum(i)
i += 1
else:
a.SetAtomMapNum(i)
i += 1
if return_NumAtom:
return Chem.MolToSmiles(m,isomericSmiles=iso), m.GetNumAtoms()
return Chem.MolToSmiles(m,isomericSmiles=iso)
atom_valence_dict = {"C":4, "N":3, "O":2, "S":(2,4,6), "P":(3,5),"Si":4, "Br":1, "Cl":1, "I":1, "F":1, "Mg":2}
"""
start testing
append_reagent function only supports one atom now
"""
def rdchiralRunText_modified(reaction_smarts, reactant_smiles, append_reagent=False):
reactant_mapnum = set([int(atom.GetProp('molAtomMapNumber')) for atom in Chem.MolFromSmiles(reactant_smiles).GetAtoms()])
rxn = rdchiralReaction(reaction_smarts)
rxn.reset()
if append_reagent:
need_reagent = True
if rxn.template_r.GetNumAtoms() == rxn.template_p.GetNumAtoms():
need_reagent = False
else:
reagents_list = []
reactants = rdchiralReactants(reactant_smiles)
# Run naive RDKit on ACHIRAL version of molecules
outcomes = rxn.rxn.RunReactants((reactants.reactants_achiral,))
# mol = Chem.MolFromSmiles(reactant_smiles)
# Chem.rdmolops.RemoveStereochemistry(mol)
# outcomes = rxn.rxn.RunReactants((mol,)) # for tesret
smiles_list = []
for outcome in outcomes:
###############################################################################
# Look for new atoms in products that were not in
# reactants (e.g., LGs for a retro reaction)
changed_index = []
reagent_smiles = []
unmapped = sum([z.GetNumAtoms() for z in outcome])
for m in outcome:
try:
for a in m.GetAtoms():
# if not a.IsInRing():
# a.SetIsAromatic(False)
# for b in a.GetBonds():
# b.SetIsAromatic(False)
# Assign map number to outcome based on react_atom_idx
if a.HasProp('react_atom_idx'):
num = reactants.idx_to_mapnum(int(a.GetProp('react_atom_idx')))
if a.GetAtomMapNum() != num:
changed_index.append(num)
a.UpdatePropertyCache(strict=False)
a.SetAtomMapNum(num)
except:
continue
try:
Chem.Kekulize(m)
except Exception as e:
# print(e)
continue
for a in m.GetAtoms():
if (int(a.GetAtomMapNum()) in changed_index) and (not a.HasProp('_QueryHCount')): # check type
try:
if type(atom_valence_dict[a.GetSymbol()]) == int:
if a.GetTotalValence() != atom_valence_dict[a.GetSymbol()]:
a.SetNumExplicitHs(CalculateNumHs(a))
a.UpdatePropertyCache(strict=True)
else:
for valence_number in atom_valence_dict[a.GetSymbol()]:
try:
a.SetNumExplicitHs(int(valence_number - sum([bond.GetBondTypeAsDouble() for bond in a.GetBonds()]) - abs(a.GetFormalCharge()) ))
a.UpdatePropertyCache(strict=True)
break
except:
continue
except Exception as e:
# print(e)
pass
try:
Chem.SanitizeMol(m)
except:
continue
#######################################################################################
# Convert product(s) to single product so that all
# reactions can be treated as pseudo-intramolecular
# But! check for ring openings mistakenly split into multiple
# This can be diagnosed by duplicate map numbers (i.e., SMILES)
mapnums = [a.GetAtomMapNum() for m in outcome for a in m.GetAtoms() if a.GetAtomMapNum()]
if len(mapnums) != len(set(mapnums)): # duplicate?
try:
if PLEVEL >= 1: print('Found duplicate mapnums in product - need to stitch')
# need to do a fancy merge
merged_mol = Chem.RWMol(outcome[0])
merged_map_to_id = {a.GetAtomMapNum(): a.GetIdx() for a in outcome[0].GetAtoms() if a.GetAtomMapNum()}
for j in range(1, len(outcome)):
new_mol = outcome[j]#
for a in new_mol.GetAtoms():
if a.GetAtomMapNum() not in merged_map_to_id:
merged_map_to_id[a.GetAtomMapNum()] = merged_mol.AddAtom(a)
for b in new_mol.GetBonds():
bi = b.GetBeginAtom().GetAtomMapNum()
bj = b.GetEndAtom().GetAtomMapNum()
if PLEVEL >= 10: print('stitching bond between {} and {} in stich has chirality {}, {}'.format(
bi, bj, b.GetStereo(), b.GetBondDir()
))
if not merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]):
merged_mol.AddBond(merged_map_to_id[bi],
merged_map_to_id[bj], b.GetBondType())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetStereo(b.GetStereo())
merged_mol.GetBondBetweenAtoms(
merged_map_to_id[bi], merged_map_to_id[bj]
).SetBondDir(b.GetBondDir())
outcome = merged_mol.GetMol()
except:
continue
else:
new_outcome = outcome[0]
for j in range(1, len(outcome)):
new_outcome = AllChem.CombineMols(new_outcome, outcome[j])
outcome = new_outcome
# Assign atom map num after stitching to those atoms that are reagent atom:
# "reactant_mapnum" is the set containing the atom mapping number that have been used in reactants
unmapped_mapnum = set([ i+1 for i in range(0, 299)])
unmapped_mapnum = unmapped_mapnum.difference(reactant_mapnum)
# outcome_mapnum = set([atom.GetProp('molAtomMapNumber') for atom in Chem.MolFromSmiles(outcome).GetAtoms()])
# unmapped_mapnum = unmapped_mapnum.difference(outcome_mapnum)
for a in outcome.GetAtoms():
if (not a.HasProp('react_atom_idx')) and (not a.GetAtomMapNum()):
unmapped = unmapped_mapnum.pop()
a.SetAtomMapNum(unmapped)
if append_reagent:
reagent_atom = '[{}:{}]'.format(a.GetSymbol(), unmapped)
reagent_mol = Chem.MolFromSmiles(reagent_atom)
reagent_mol.GetAtoms()[0].SetNumExplicitHs(CalculateNumHs(reagent_mol.GetAtoms()[0]))
reagent_smiles.append(Chem.MolToSmiles(reagent_mol))
# unmapped -= 1
#######################################################################################
# Update molecule because maybe bonds change.
try:
outcome.UpdatePropertyCache(strict=True)
except:
pass
#######################################################################################
smiles = Chem.MolToSmiles(outcome)
if append_reagent:
if need_reagent:
reagent_smiles = '.'.join(reagent_smiles)
reagents_list.append(reagent_smiles)
smiles_list.append(smiles.rstrip("."))
if append_reagent:
if not need_reagent:
reagents_list = ['']*len(smiles_list)
return smiles_list, reagents_list
return list(set(smiles_list))
def RemoveReagent(rxn_smiles, select_major_product = False):
r, p = rxn_smiles.split('>>')
r = r.split('.')
# if '.' not in p:
# p = Chem.MolFromSmiles(p)
# p_map_total = [x.GetAtomMapNum() for x in p.GetAtoms()]
# else:
p = p.split('.')
can_r = [Chem.CanonSmiles(smi) for smi in r]
can_p = [Chem.CanonSmiles(smi) for smi in p]
inter = set(can_r) & set(can_p)
if inter:
for smi in inter:
can_p.remove(smi)
can_r.remove(smi)
r = can_r
p = can_p
if (p == []) or (r == []):
print("This reaction has no change:")
print(rxn_smiles)
return rxn_smiles
r = [Chem.MolFromSmiles(smi) for smi in r]
p = [Chem.MolFromSmiles(smi) for smi in p]
r_map_total = [atom.GetAtomMapNum() for mol in r for atom in mol.GetAtoms()]
p_map_total = [atom.GetAtomMapNum() for mol in p for atom in mol.GetAtoms()]
r2 = r.copy()
p2 = p.copy()
for m in r2:
r_map = [a.GetAtomMapNum() for a in m.GetAtoms()]
if set(r_map) & set(p_map_total) == set(): r.remove(m)
for m in p2:
p_map = [a.GetAtomMapNum() for a in m.GetAtoms()]
if set(r_map_total) & set(p_map) == set(): p.remove(m)
if select_major_product:
p = sorted(p, key= lambda x: x.GetNumAtoms(),reverse=True)[0] # select the major product
return '.'.join([Chem.MolToSmiles(m) for m in r]) +'>>'+ Chem.MolToSmiles(p)
else:
return '.'.join([Chem.MolToSmiles(m) for m in r]) +'>>'+ '.'.join([Chem.MolToSmiles(x) for x in p])
# def Get_chirality(atom):
# """Input an atom, and return the map number and its chirality info.
# None means this atom is not chiral center."""
# # Not possible to be a tetrahedral center anymore?
# if atom.GetDegree() < 3:
# return
# if atom.GetDegree() == 3 and \
# any(b.GetBondType() != BondType.SINGLE for b in atom.GetBonds()):
# return
# return atom.GetAtomMapNum(), atom.GetChiralTag()
def copy_chirality_modify(a_src, a_new):
"""append chiral info to new correspnding atom """
# Not possible to be a tetrahedral center anymore?
if a_new.GetDegree() < 3:
return
if a_new.GetDegree() == 3 and \
any(b.GetBondType() != BondType.SINGLE for b in a_new.GetBonds()):
return
if PLEVEL >= 3: print('For mapnum {}, copying src {} chirality tag to new'.format(
a_src.GetAtomMapNum(), a_src.GetChiralTag()))
a_new.SetChiralTag(a_src.GetChiralTag())
def copy_stereo(b_src, b_new):
"""Notice: https://github.com/rdkit/rdkit/issues/2404
Need to append SetStereoAtoms(end_atom_idx, begin_atom_idx)"""
if b_new.GetBondTypeAsDouble() == Chem.rdchem.BondStereo.STEREONONE:
return
b_new.SetStereoAtoms(b_new.GetEndAtomIdx(), b_new.GetBeginAtomIdx())
b_new.SetStereo(b_src.GetStereo())
return
def move_info(gold_smiles, new_smiles):
g_count = gold_smiles.count('.')
n_count = new_smiles.count('.')
assert g_count == n_count
if g_count == 0:
gold_mol = Chem.MolFromSmiles(gold_smiles)
new_mol = Chem.MolFromSmiles(new_smiles)
append_ChiralStereo_info_for_mol(gold_mol,new_mol)
else:
gold_smiles = gold_smiles.split('.')
new_smiles = new_smiles.split('.')
gold_smiles = sorted(gold_smiles,key= lambda s:canon_remap(s))
new_smiles = sorted(new_smiles,key= lambda s:canon_remap(s))
for i in range(g_count+1):
m1 = Chem.MolFromSmiles(gold_smiles[i])
m2 = Chem.MolFromSmiles(new_smiles[i])
append_ChiralStereo_info_for_mol(m1, m2)
if i == 0:
new_mol = m2
else:
new_mol = AllChem.CombineMols(new_mol, m2)
return Chem.MolToSmiles(new_mol)
def append_ChiralStereo_info_for_mol(gold_mol, new_mol):
"""
Parameters
----------
gold_mol : rdkit.MolObject
gold_mol has the chiral and stereo information.
new_mol : rdkit.MolObject
new_mol is the reactant of product after template-mapping, so it
does not have chiral and stereo information in itself.
Returns None
-------
None.
"""
assert gold_mol.GetNumAtoms() == new_mol.GetNumAtoms()
Chem.AssignStereochemistry(new_mol, force=True, cleanIt=True)
gold_atoms = [atom for atom in gold_mol.GetAtoms()]
new_atoms = [atom for atom in new_mol.GetAtoms()]
for i in range(gold_mol.GetNumAtoms()):
gold_atom = gold_atoms[i]
new_atom = new_atoms[i]
assert not atoms_are_different_2nd(gold_atom, new_atom)
copy_chirality_modify(gold_atom, new_atom)
gold_bonds = [bond for bond in gold_atom.GetBonds()]
new_bonds = [bond for bond in new_atom.GetBonds()]
gold_bonds = sorted(gold_bonds, key= lambda b:bond_to_label_2nd(b))
new_bonds = sorted(new_bonds, key= lambda b:bond_to_label_2nd(b))
for j in range(len(gold_bonds)):
copy_stereo(gold_bonds[j], new_bonds[j])
#################Check the results#################
# gold_smiles = Chem.MolToSmiles(gold_mol)
# new_smiles = Chem.MolToSmiles(new_mol)
# print("gold")
# print(gold_smiles)
# print("new")
# print(new_smiles)
# if gold_smiles != new_smiles:
# print('ERROR! The smiles are not the same.')
return
def bond_to_label_2nd(bond):
'''This function takes an RDKit bond and creates a label describing
the most important attributes'''
a1_label = str(bond.GetBeginAtom().GetAtomicNum())
a2_label = str(bond.GetEndAtom().GetAtomicNum())
if bond.GetBeginAtom().GetAtomMapNum():
a1_label += str(bond.GetBeginAtom().GetIdx())
if bond.GetEndAtom().GetAtomMapNum():
a2_label += str(bond.GetEndAtom().GetIdx())
atoms = sorted([a1_label, a2_label])
return '{}{}'.format(atoms[0], atoms[1])
def atoms_are_different_2nd(atom1, atom2):
'''Compares two RDKit atoms based on basic properties'''
#if atom1.GetSmarts() != atom2.GetSmarts(): return True # should be very general
if atom1.GetAtomicNum() != atom2.GetAtomicNum(): return True # must be true for atom mapping
if atom1.GetTotalNumHs() != atom2.GetTotalNumHs(): return True
if atom1.GetFormalCharge() != atom2.GetFormalCharge(): return True
if atom1.GetDegree() != atom2.GetDegree(): return True
if atom1.GetNumRadicalElectrons() != atom2.GetNumRadicalElectrons(): return True
if atom1.GetIsAromatic() != atom2.GetIsAromatic(): return True
# Check bonds and nearest neighbor identity
bonds1 = sorted([bond_to_label_2nd(bond) for bond in atom1.GetBonds()])
bonds2 = sorted([bond_to_label_2nd(bond) for bond in atom2.GetBonds()])
if bonds1 != bonds2: return True
return False
if __name__ == '__main__':
pass
# example_reaction = '[CH3:1]/[CH:2]=[CH:3]/[CH:4]=[CH:5]/[CH2:6][O:7][C:8](=[O:9])[C:10]#[C:11][c:12]1[cH:13][cH:14][cH:15][cH:16][cH:17]1>>[CH3:1][C@@H:2]1[CH:3]=[CH:4][C@@H:5]2[CH2:6][O:7][C:8](=[O:9])[C:10]2=[C:11]1[c:12]1[cH:13][cH:14][cH:15][cH:16][cH:17]1'
# reactants, product = example_reaction.split('>>')
# reactants = ReassignMapping(reactants,iso=True)
# product = ReassignMapping(product,iso=True)
# smiles_list = rdchiralRunText_modified('[*:3]1-[*:4]~[*:5]-[*:6]-[*:1]~[*:2]-1>>[*:3]=[*:4]-[*:5]=[*:6].[*:1]#[*:2]',
# product)
# new_reactants = smiles_list[0]
# gold_mol = Chem.MolFromSmiles(reactants)
# new_mol = Chem.MolFromSmiles(new_reactants) | Lung-Yi/AutoTemplate | autotemplate/module/rdchiral_main_modified.py | rdchiral_main_modified.py | py | 18,446 | python | en | code | 2 | github-code | 90 |
29855610275 | def log_values_step(cost, grad_norms, epoch, batch_id, step,
log_likelihood, reinforce_loss, bl_loss, tb_logger, opts):
avg_cost = cost.mean().item()
avg_loss = reinforce_loss.item()
avg_nll = -log_likelihood.mean().item()
grad_norms, grad_norms_clipped = grad_norms
# Log values to screen
print('epoch: {}, train_batch_id: {}, avg_cost: {}, loss: {}, nll: {}'.format(epoch, batch_id, avg_cost, avg_loss, avg_nll))
print('grad_norm: {}, clipped: {}'.format(grad_norms[0], grad_norms_clipped[0]))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.add_scalar('loss/step/avg_cost_per_step', avg_cost, step)
tb_logger.add_scalar('loss/step/actor_loss_per_step', avg_loss, step)
tb_logger.add_scalar('loss/step/nll_per_step', avg_nll, step)
tb_logger.add_scalar('optimizer/grad_norm', grad_norms[0], step)
tb_logger.add_scalar('optimizer/grad_norm_clipped', grad_norms_clipped[0], step)
if opts.baseline == 'critic':
tb_logger.add_scalar('critic/critic_loss', bl_loss.item(), step)
tb_logger.add_scalar('critic/critic_grad_norm', grad_norms[1], step)
tb_logger.add_scalar('critic/critic_grad_norm_clipped', grad_norms_clipped[1], step)
def log_values_epoch(cost, epoch, log_likelihood, reinforce_loss, tb_logger, opts, model):
avg_cost = cost
avg_loss = reinforce_loss.item()
avg_nll = -log_likelihood
# Log values to screen
print('epoch: {} finished, avg_cost for epoch: {}, loss for epoch: {}, avg_nll for epoch: {}'.format(epoch, avg_cost, avg_loss, avg_nll))
# Log values to tensorboard
if not opts.no_tensorboard:
tb_logger.add_scalar('loss/epoch/avg_cost_per_epoch', avg_cost, epoch)
tb_logger.add_scalar('loss/epoch/actor_loss_per_epoch', avg_loss, epoch)
tb_logger.add_scalar('loss/epoch/nll_per_epoch', avg_nll, epoch)
for name, param in model.named_parameters():
tb_logger.add_histogram("parameters/{}".format(name), param.data, epoch) | ChanaRoss/Thesis | RL/utils/log_utils.py | log_utils.py | py | 2,077 | python | en | code | 0 | github-code | 90 |
41231862369 | from django.db import models
import uuid
# Create your models here.
class Informe(models.Model):
uniqueID=models.UUIDField(primary_key=True,default=uuid.uuid4, editable=False)
title=models.CharField( max_length=20)
resumen=models.CharField(max_length=50)
fecha=models.DateField(auto_now=True)
descripcion=models.TextField()
dineroInvertido=models.IntegerField()
tipoCultivo=models.JSONField(null=True)
talentoHumano=models.JSONField(null=True)
actividadesClave=models.JSONField(null=True)
conclusiones=models.TextField() | lukesd456/Agros-RETO | agrosCRUD/models.py | models.py | py | 561 | python | en | code | 0 | github-code | 90 |
72157866218 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Project: DevelopBasic
# Software: PyCharm
# DateTime : 2018-10-08 20:01
# File : 1-29 猜年龄升级版.py
# __author__: 天晴天朗
# Email : tqtl@tqtl.org
times = 0
age = 26 # 放在外侧,防止每次循环调用!
while times < 3:
guess_age = int(input("GuessAge:"))
if guess_age > age:
print("Try Smaller")
elif guess_age < age:
print("Try Bigger")
else:
print("恭喜你,猜对啦!")
break
times += 1
if times == 3:
choice = input("没猜对,你还想继续吗?(Y|y)") # 判断用户的结束节点;
if choice == "y" or choice == "Y":
times = 0 # 将计数器清空!
else:
print("您已经猜测了%d次,次数已经用完,游戏结束啦!" % times)
| cuixiaozhao/DevelopBasic | Chapter-01/1-29 猜年龄升级版.py | 1-29 猜年龄升级版.py | py | 831 | python | en | code | 0 | github-code | 90 |
73562796775 | import cv2
# img = cv2.imread('pic2.jpeg')
# cv2.imshow('Friends',img)
# cv2.waitKey(0)
frameWidth = 640
frameHeight = 320
cap = cv2.VideoCapture(0)
cap.set(3,frameWidth)
cap.set(4,frameHeight)
while True:
success,img = cap.read()
cv2.imshow('LiveCam',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break | dellucifer/OpenCV_tutorials | 1_image_n_video.py | 1_image_n_video.py | py | 343 | python | en | code | 2 | github-code | 90 |
18313780759 | from collections import deque
def main():
# 入力
n = int(input())
nodes_edges_data = {e: [] for e in range(1, n + 1)}
edges_data_to_number = {}
for i1 in range(n - 1):
a, b = tuple(map(int, input().split()))
nodes_edges_data[a].append(b)
nodes_edges_data[b].append(a)
edges_data_to_number[(a, b)] = i1
edges_data_to_number[(b, a)] = i1
# 処理
edges_color = [-1] * (n - 1)
nodes_num = [0] * (n + 1)
nodes_num[1] = -1
max_edges_per_node = 0
nodes_next = deque([1])
nodes_next_next = deque()
nodes_seen = set()
while len(nodes_seen) < n:
while nodes_next:
e = nodes_next.pop()
nodes_seen.add(e)
adj_nodes = nodes_edges_data[e]
nodes_next_next += adj_nodes
num_of_nodes = len(adj_nodes)
max_edges_per_node = max(max_edges_per_node, num_of_nodes)
num = nodes_num[e]
for adj_node in adj_nodes:
if not adj_node in nodes_seen:
num = (num + 1) % max_edges_per_node
edges_color[edges_data_to_number[(e, adj_node)]] = num
#nodes_num[adj_node] = (num + 1) % max_edges_per_node
nodes_num[adj_node] = num
nodes_next_next_set = set(nodes_next_next)
nodes_next += deque(nodes_next_next_set - nodes_seen)
nodes_next_next = deque()
# 出力
print(max(edges_color) + 1)
for i in range(n - 1):
print(edges_color[i] + 1)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02850/s221367112.py | s221367112.py | py | 1,586 | python | en | code | 0 | github-code | 90 |
16141250594 | import os
import sys
import pygame
pygame.init()
picture = pygame.image.load("Img.jpg")
pygame.display.set_mode(picture.get_size())
surface = pygame.display.get_surface()
while True:
os.system('./srv1')
picture = pygame.image.load("Img.jpg")
surface.blit(picture,(0,0))
pygame.display.flip()
key_pressed = pygame.key.get_pressed()
if key_pressed[pygame.K_ESCAPE]:
sys.exit(0)
| sudeepgeorge/srv | display.py | display.py | py | 437 | python | en | code | 3 | github-code | 90 |
2207166190 | import csv
import codecs
import openpyxl
from pprint import pprint
from openpyxl.chart import (
Reference,
BarChart,
Series,
ScatterChart
)
csvFile = codecs.open("../crawl/data/meltop100.csv", "r", "utf-8")
reader = csv.reader(csvFile, delimiter=',', quotechar='"')
book = openpyxl.Workbook()
sheet1 = book.active
sheet1.title = "문제1"
for i, row in enumerate(reader):
for j, cols in enumerate(row):
val = row[j]
if (j == 0 or j == 3 or j == 4) and val.isnumeric():
val = int(row[j])
sheet1.cell(row=(i+1), column=(j+1)).value = val
sheet2 = book.create_sheet()
sheet2.title = "문제2"
for i in range(100):
newFile = './images/' + str(i+1) + '_50x50.jpg'
img3 = openpyxl.drawing.image.Image(newFile)
sheet2.row_dimensions[(i+1)].height = 42
sheet2.add_image(img3, 'A' + str(i+1))
sheet3 = book.create_sheet()
sheet3.title = "문제3"
datax = Reference(sheet1, min_col=4,
min_row=2, max_col=4, max_row=11)
categs = Reference(sheet1, min_col=1,
min_row=2, max_row=11)
chart = BarChart()
chart.add_data(data=datax)
chart.set_categories(categs)
chart.legend = None # 범례
chart.varyColors = True
chart.title = "Top10 좋아요"
sheet3.add_chart(chart, "B2")
## ------------------------------ scatter
chart = ScatterChart()
chart.style = 13
chart.x_axis.title = 'Size'
chart.y_axis.title = 'Percentage'
xvalues = Reference(sheet1, min_col=1,
min_row=2, max_row=11)
values = Reference(sheet1,
min_col=5,
min_row=2,
max_row=11)
series = Series(values, xvalues,
title_from_data=True)
chart.series.append(series)
sheet3.add_chart(chart, "B20")
book.save('./data/meltop100.xlsx')
| pidokige02/Python_study | hello-master/crawl/meltopxl.py | meltopxl.py | py | 1,800 | python | en | code | 1 | github-code | 90 |
34050294125 | from tkinter import *
import tkinter as tk
import os
import glob
import StatTrackerAnalysis as STA
import StatTracker as ST
'''
def player_list(team):
if team == 'Joy AC':
playerlist = ['Opponent Player','#2 Vukota Mastilovic','#3 Marco Corona Duran','#4, Noah Kantorowicz','#6 Martin Browne Jr','#8 Philip Caputo','#10 Whitney Browne','#11 Bennett Kouame','#12 Marshall Urban','#13 Luca Guarin','#14 Brian Kallman','#18 Dimitri Nair','#19 Mika Folstad','#22 David Riera','#24 Zinedine Kroeten','#28 Simeon Friesen','#29 Gabriel Eduarte','Henry Elias','Diego Paulin','Griffin Price','Jonathan Robles','#0 Tucker Mann','#1 Gage Steiner']
#2021 playerlist = ['Opponent Player','#2 Devan DiGrado','#3 Marco Corona Duran','#4 Noah Kantorowicz','#5 Vukota Mastilovic','#6 David Riera','#7 Zinedine Kroeten','#8 Aiden Cavanaugh','#9 Darley Florvil','#10 Whitney Browne','#11 Emmanuel Iwe','#12 Marshall Urban','#14 Philip Caputo','#15 Otis Anderson','#16 Xavier Zengue','#17 Denilson Ramos','#18 Dennis Mensah','#19 Mika Folstad','#20 Jorge Radilla','#21 Luis Martinez Rojas','#22 Dimitri Nair','#23 Liam Vance','#24 Martin Browne Jr','#27 Abduselam Regassa','#28 Gabriel Eduarte','#29 Diego Paulin','#1 Dawson Fairchild','#0 Tucker Mann','#30 Ayuub Ahmed','Andrei Gotsmanov','Siddiq Madson-Keita','Henry Elias']
elif team == 'Joy LaMancha':
playerlist = ['Opponent Player','Phil','Gabe','Henry','Mika','Luca','Siddiq','Bennett','Dimi','Andrew','Diego','Jack','Diego GK']
elif team == 'Joy U19':
playerlist = ['Opponent Player','Adri','Henry','Michael','Phil','Mika','Victor','Elsini','Hassan','Oliver','Josiah','Noah','Sean','Isaiah','Carlitos','Yonas','Minoli','Gabe','Bennett','Sebe','Zekiah']
elif team == 'Joy Kaghani':
playerlist = ['Opponent Player','Wilton','Si','Luca','Jack','Eric','Johnny','Manny','Max','Liam']
elif team == 'Joy Tennessee Fainting':
playerlist = ['Opponent Player','Alanna','Dare','Allison','Iman','Riana','Sami','Aliviah','Sam','Ari','Caitlyn']
elif team == 'Joy 06':
playerlist = ['Opponent Player','Nico', 'Tyler', 'Oliver', 'Isaiah', 'Will', 'Luca','Benji', 'Mikey', 'Kai', 'Eric', 'Makai', 'John', 'Leo', 'Lucas', 'Johnny','Jeremy']
elif team == 'Cornell Mens Soccer':
playerlist = ['Opponent Player','#2 Gaurab Khadka','#3 Greg Pappadakis','#4 Emerson Roy','#5 Thomas Hamborg','#6 Drew Bruck','#7 Cian McNamara','#8 Nolan Zeger','#9 Aria Dehshid','#10 Owen Smith','#11 Andrew Lopez','#12 Galen Westervelt','#14 Sam Brueck','#15 Zach Miller','#16 Jonas Ricke','#17 Vance Wicker','#18 Mardoche Ntonku','#19 Justin Howe','#20 Brian Gin','#21 George Archer','#22 Blake Soto','#23 Bryce Scott','#24 Aron Mawia','#27 Federico Polidori','#28 Eddie Garces','#0 Will Bickel','#1 Jeremy Spina','#32 Mateo Ramirez']
return playerlist
'''
def match_list(team,year):
directory = '/Users/keanjohansen/Desktop/Stat Tracker/'+team+'/'+year
os.chdir(directory)
lis = glob.glob('*.csv')
for i in range(len(lis)):
lis[i] = lis[i].split('.')[0]
lis = ['All Matches']+lis
return lis
class DropDownMenu:
def __init__(self,root,dropdownlist,defaultoption,rownumber=0,columnnumber=0):
clicked = StringVar()
clicked.set(defaultoption)
self.clicked = clicked
drop = OptionMenu(root,self.clicked,*dropdownlist).grid(row=rownumber,column=columnnumber)
def xG_graphic():
matchID = root.grid_slaves(row=1,column=0)[0]['text']
if matchID == 'Choose Match':
print('Please Select a Match')
return ''
if matchID == 'All Matches':
matchlist1 = []
for ele in matchlist:
if not ele == 'All Matches':
matchlist1.append(ele)
for match in matchlist1:
MatchDict = STA.match_stats(dfDict,match)
STA.draw_xG_graphic(MatchDict,dfDict,match)
else:
MatchDict = STA.match_stats(dfDict,matchID)
STA.draw_xG_graphic(MatchDict,dfDict,matchID)
def team_shot_chart():
matchID = root.grid_slaves(row=1,column=0)[0]['text']
if matchID == 'Choose Match':
print('Please Select a Match')
return ''
# if matchID == 'All Matches':
# print('Feature Not Available Right Now')
# return ''
if matchID == 'All Matches':
matchlist2 = []
for ele in matchlist:
if not ele == 'All Matches':
matchlist2.append(ele)
else:
matchlist2 = [matchID]
shotdf = STA.read_all_data_to_one_df(dfDict,matchlist2)
STA.shot_chart(shotdf,'no')
def player_shot_chart():
player = root.grid_slaves(row=4,column=0)[0]['text']
matchID = root.grid_slaves(row=4,column=1)[0]['text']
if player == 'Choose Player':
print('Please Select a Player')
return ''
if matchID == 'Choose Match':
print('Please Select a Match')
return ''
if matchID == 'All Matches':
matchlist2 = []
for ele in matchlist:
if not ele == 'All Matches':
matchlist2.append(ele)
else:
matchlist2 = [matchID]
shotdf = STA.read_all_data_to_one_df(dfDict,matchlist2)
STA.individual_player_shot_charts(player,shotdf,'no')
def print_player_df():
matchID = root.grid_slaves(row=4,column=1)[0]['text']
if matchID == 'Choose Match':
print('Please Select a Match')
return ''
if matchID == 'All Matches':
matchlist3 = []
for ele in matchlist:
if not ele == 'All Matches':
matchlist3.append(ele)
else:
matchlist3 = [root.grid_slaves(row=4,column=1)[0]['text']]
TeamDict = STA.team_dict(team,year)
TeamDict = STA.full_season_player_data(dfDict,TeamDict,matchlist3)
STA.individual_performers_df(TeamDict)
def season_player_progression():
if root.grid_slaves(row=4,column=0)[0]['text'] == 'Choose Player':
print('Please Select a Player')
else:
STA.season_progression_graphic(root.grid_slaves(row=4,column=0)[0]['text'],dfDict,['Cornell Mens Soccer vs Luther','Cornell Mens Soccer vs Simpson','Cornell Mens Soccer vs Rockford','Cornell Mens Soccer vs Nebraska Wesleyan','Cornell Mens Soccer vs Bethel','Cornell Mens Soccer vs Coe','Cornell Mens Soccer vs Knox','Cornell Mens Soccer vs Central','Cornell Mens Soccer vs Ripon','Cornell Mens Soccer vs Grinnell','Cornell Mens Soccer vs Buena Vista','Cornell Mens Soccer vs Illinois College','Cornell Mens Soccer vs Lake Forest'])#['Cornell vs Luther','Cornell vs Coe','Cornell vs Rockford','Cornell vs Fontbonne','Cornell vs Simpson','Cornell vs Nebraska Wesleyan','Cornell vs Central','Cornell vs Monmouth','Cornell vs Coe (away)','Cornell vs Ripon','Cornell vs Iowa Wesleyan','Cornell vs Grinnell','Cornell vs Illinois College','Cornell vs Lake Forest','Cornell vs Knox','Cornell vs Lawrence'])
def select():
global team
global year
team = stringvar.get()
year = stringvar2.get()
root.destroy()
if __name__ == '__main__':
teamlist = ['Cornell Mens Soccer','Joy AC','Joy U19','Joy LaMancha','Joy Tennessee Fainting','Joy Kaghani','Joy 06']
root = Tk()
frame = Frame(root)
root.title('Welcome')
frame.grid()
label = Label()
label.grid
root.geometry('200x200')
label = Label(root,text='Select Team').grid(row=0,column=0)
stringvar = StringVar()
stringvar.set('Team')
stringvar2 = StringVar()
stringvar2.set('Year')
teammenu = OptionMenu(root,stringvar,*teamlist).grid(row=1,column=0)
year = OptionMenu(root,stringvar2,*['2021','2022']).grid(row=2,column=0)
mybutton = Button(root,text='Select', command=select).grid(row=3,column=0)
root.mainloop()
playerlist = ST.player_list(team,year)[0]
matchlist = match_list(team,year)
dfDict = STA.read_data(team,year)
root = Tk()
frame = Frame(root)
root.title('Stat Tracker Analysis')
frame.grid()
root.geometry('500x500')
### TEAM STATS
label = Label(root,text = 'Team Stats',font=("Arial", 25))
label.grid(row=0,column=0)
DropDownMenu(root,matchlist,'Choose Match',1,0)
mybutton = Button(root,text='xG Report', command=xG_graphic).grid(row=2,column=0)
mybutton = Button(root,text='Shot Charts', command=team_shot_chart).grid(row=2,column=1)
### PLAYER STATS
label = Label(root,text = 'Player Stats',font=("Arial", 25))
label.grid(row=3,column=0)
DropDownMenu(root,playerlist,'Choose Player',4,0)
DropDownMenu(root,matchlist,'Choose Match',4,1)
mybutton = Button(root,text='Shot Chart', command=player_shot_chart).grid(row=5,column=0)
mybutton = Button(root,text='Print df', command=print_player_df).grid(row=5,column=1)
mybutton = Button(root,text='Player Progression', command=season_player_progression).grid(row=6,column=0)
'''
DropDownMenu(positionlist,'Choose Position',0,1)
DropDownMenu(minuteslist,'Minimum Minutes',0,2)
DropDownMenu(statheadings,'Choose Stat',1,0)
DropDownMenu(statheadings,'Choose Stat',1,1)
DropDownMenu(statheadings,'Choose Stat',1,2)
DropDownMenu(statheadings,'Choose Stat',1,3)
DropDownMenu(statheadings,'Choose Stat',1,4)
DropDownMenu(playerlist,'Choose Second Player',0,3)
'''
root.mainloop()
# playerlist = ['#2 Devan DiGrado','#3 Marco Corona Duran','#4 Noah Kantorowicz','#5 Vukota Mastilovic','#6 David Riera','#7 Zinedine Kroeten','#8 Aiden Cavanaugh','#9 Darley Florvil','#10 Whitney Browne','#11 Emmanuel Iwe','#12 Marshall Urban','#14 Philip Caputo','#15 Otis Anderson','#16 Xavier Zengue','#17 Denilson Ramos','#18 Dennis Mensah','#19 Mika Folstad','#20 Jorge Radilla','#21 Luis Martinez Rojas','#22 Dimitri Nair','#23 Liam Vance','#24 Martin Browne Jr','#27 Abduselam Regassa','#28 Gabriel Eduarte','#29 Diego Paulin','#1 Dawson Fairchild','#0 Tucker Mann','#30 Ayuub Ahmed','Andrei Gotsmanov','Siddiq Madson-Keita','Henry Elias']
# playerlist = ['Opponent Player','Phil','Gabe','Henry','Mika','Luca','Siddiq','Bennett','Dimi','Andrew','Diego','Jack','Diego GK']
# playerlist = ['Opponent Player','Wilton','Si','Luca','Jack','Eric','Johnny','Manny','Max','Liam']
# playerlist = ['Opponent Player','Alanna','Dare','Allison','Iman','Riana','Sami','Aliviah','Sam','Ari','Caitlyn']
# playerlist = ['Adri','Henry','Michael','Victor','Elsini','Hassan','Oliver','Josiah','Noah','Sean','Isaiah','Carlitos','Yonas','Minoli','Gabe','Bennett','Sebe','Zekiah']
# playerlist = ['Opponent Player','Nico', 'Tyler', 'Oliver', 'Isaiah', 'Will', 'Luca','Benji', 'Mikey', 'Kai', 'Eric', 'Makai', 'John', 'Leo', 'Lucas', 'Johnny','Jeremy']
| kjoh03/JOTP | StatTrackerInterface.py | StatTrackerInterface.py | py | 10,663 | python | en | code | 0 | github-code | 90 |
36925301493 | #https://leetcode.com/explore/challenge/card/may-leetcoding-challenge/534/week-1-may-1st-may-7th/3317/
#02May2020
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
count=0
for stone in S:
if stone in J:
count=count+1
return count | NitishGadangi/Code-Jungle | JewelsandStones.py | JewelsandStones.py | py | 295 | python | en | code | 0 | github-code | 90 |
73670333096 | # coding=utf-8
from PyQt4.QtGui import QDialog, QDialogButtonBox, QGridLayout, QLabel, QComboBox, QPushButton
from PyQt4.QtCore import Qt, SIGNAL
from MaeMoneyProperties import MaeMoneyProperties
class AppLocaleSetupDialog(QDialog):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.prop = MaeMoneyProperties.instance()
self.locales = {}
self.locales[self.prop.LANGUAGE_ZH_CN] = self.prop.LOCALE_ZH_CN
self.locales[self.prop.LANGUAGE_ZH_HK] = self.prop.LOCALE_ZH_HK
self.locales[self.prop.LANGUAGE_EN_US] = self.prop.LOCALE_EN_US
self.setupUi()
def setupUi(self):
self.setWindowModality(Qt.WindowModal)
self.buttonBox = QDialogButtonBox(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel | QDialogButtonBox.Ok)
self.gridLayout = QGridLayout()
self.setLayout(self.gridLayout)
self.labelAppLocale = QLabel(u"語言 Language")
self.gridLayout.addWidget(self.labelAppLocale, 0, 1, 1, 1)
self.comboBoxAppLocale = QComboBox()
for [lang, appLocale] in sorted(self.locales.iteritems()):
self.comboBoxAppLocale.addItem(lang, appLocale)
language = self.prop.getAppLanguage()
index = self.comboBoxAppLocale.findText(language)
self.comboBoxAppLocale.setCurrentIndex(index)
self.gridLayout.addWidget(self.comboBoxAppLocale, 0, 2, 1, 1)
self.gridLayout.addWidget(QLabel(self.tr("Current setting")), 1, 1, 1, 1)
self.gridLayout.addWidget(QLabel(self.prop.getAppLanguage()),
1, 2, 1, 1)
self.setLanguageButton = QPushButton(self.tr("Set language"))
self.gridLayout.addWidget(self.setLanguageButton, 2, 1, 1, 2)
self.setWindowTitle(self.tr("Language setup"))
self.connect(self.buttonBox, SIGNAL("accepted()"), self.accept)
self.connect(self.buttonBox, SIGNAL("rejected()"), self.reject)
self.connect(self.setLanguageButton, SIGNAL("clicked()"), self.setLanguage)
def setLanguage(self):
indexSelected = self.comboBoxAppLocale.currentIndex()
locale = self.comboBoxAppLocale.itemData(indexSelected)
self.prop.setAppLocale(locale)
self.accept()
# try:
# from PyQt4.QtMaemo5 import QMaemo5InformationBox
# timeoutInMs = 3000
# QMaemo5InformationBox.information(
# self.parent,
# self.tr("Language change will take effect next time when you run Stock Matcher"),
# timeoutInMs)
# except ImportError:
# qDebug("Can't use QMaemo5InformationBox")
| shinghei/MaeMoney | src/AppLocaleSetupDialog.py | AppLocaleSetupDialog.py | py | 2,702 | python | en | code | 2 | github-code | 90 |
3419014041 | vowels = ['a', 'e', 'i', 'o', 'u']
found = {}
word = input("Provide a word to search for vowels: ")
for letter in word:
if letter in vowels:
#if letter not in found:
# found[letter]=1
#else:
# found[letter] += 1
#handigere methode
found.setdefault(letter, 0)
found[letter] += 1
#for letter in found:
print(found)
#### version 2
word = input("Provide a word to search for vowels: ")
print(vowels.intersection(set(word))) | kuphus/python_code | challenges/vowel.py | vowel.py | py | 496 | python | en | code | 0 | github-code | 90 |
13306932181 | """
Primary interface for handling recording session data.
"""
from functools import reduce
import operator as op
from scipy.interpolate import interp1d
import numpy as np
from roto import arrays
from roto.decorators import memoize, lazyprop
from pouty import debug
from . import time, motion, lfp, cluster
from .. import store
from ..tools import binned, adaptive, spikes
from .data import circinterp1d, Arenas
from .parsers import parse_session, parse_ttc
INFO_PVAL_SAMPLES = 1000
SHUFFLE_MIN_OFFSET = 20.0
def record(s_id):
"""Get a dict with the /sessions row for the given session id."""
return parse_session(s_id)
@memoize
def get(*args, **kwargs):
return RecordingSession(*args, **kwargs)
class RecordingSession(object):
"""
Load recording session data and perform session-wide computations.
"""
def __init__(self, index, **mapargs):
# Store attributes from /sessions row data
self.attrs = parse_session(index, raise_on_fail=True)
keymap = {k:k for k in self.attrs}
keymap['path'] = 'folder'
keymap['group'] = 'path'
for key in self.attrs:
setattr(self, keymap[key], self.attrs[key])
# Load session-level data sources
self.arena = Arenas(self.attrs['arena'])
self.motion = motion.MotionData(self.path)
self.lfp = lfp.ContinuousData(self.path)
# Adaptive maps
self.adaptive_ratemap = adaptive.AdaptiveRatemap(self.motion,
**mapargs)
self.adaptive_phasemap = adaptive.AdaptivePhasemap(self.motion,
**mapargs)
# Session timing
self.tstart = max(self.motion.t[0], self.lfp.t[0])
self.tend = min(self.motion.t[-1], self.lfp.t[-1])
self.tlim = (self.tstart, self.tend)
self.duration = self.tend - self.tstart
# Load spiking data for each cluster
f = store.get()
self.clusters = {}
for C in f.root.recordings.where("session_id==%d" % self.id):
cdata = cluster.ClusterData(self.path, C)
self.clusters[cdata.ttc] = cdata
def get_cluster(self, cell):
"""Get the cluster data for a given cell."""
ttc = parse_ttc(cell)
if ttc not in self.clusters:
raise KeyError('no cluster data for cluster %s' % ttc)
return self.clusters[ttc]
# Mapping methods
def dirmap(self, cell, bursts=False, speed='fast', intervals=None,
bins=None):
"""Compute a directional rate map (1D) for the given cell.
Arguments:
cell -- (tetrode, cluster) key for the cluster
Keyword arguments:
bursts -- restrict spike train to burst onset timing
speed -- ('fast'|'slow'|'still'|None) velocity filter
intervals -- restrict data to the given list of time intervals
bins -- int, number of directional bins (default {0})
Returns:
rates -- (bins,)-shaped array
angles -- (bins,)-shaped array
"""
(_, ds), (_, dp) = self._dirdata(cell, speed, intervals, bursts)
R, phi = binned.dirmap(ds, dp, bins=bins, freq=self.motion.Fs)
return R, phi
dirmap.__doc__ = dirmap.__doc__.format(binned.DEFAULT_DIR_BINS)
def speedmap(self, cell, bursts=False, intervals=None, min_occ=None,
bins=None, slim=None):
"""Compute a speed rate map (1D) for the given cell.
Arguments:
cell -- (tetrode, cluster) key for the cluster
Keyword arguments:
bursts -- restrict spike train to burst onset timing
intervals -- restrict data to the given list of time intervals
min_occ -- minimum bin occupancy for masking (default {0})
bins -- int, number of speed bins (default {1})
slim -- (smin, smax) speed limits tuple (default ({2}, {3}))
Returns:
rates -- (bins,)-shaped array
speeds -- (bins,)-shaped array
"""
speed = None
smin, smax = (None, None) if slim is None else slim
(_, vs), (_, vp) = self._speeddata(cell, speed, intervals, bursts)
R, speeds = binned.speedmap(vs, vp, bins=bins, smin=smin, smax=smax,
min_occ=min_occ, freq=self.motion.Fs)
return R, speeds
speedmap.__doc__ = speedmap.__doc__.format(binned.DEFAULT_SPEED_MIN_OCC,
binned.DEFAULT_SPEED_BINS, binned.DEFAULT_SPEED_MIN,
binned.DEFAULT_SPEED_MAX)
def ratemap(self, cell, bursts=False, adaptive=True, speed='fast',
intervals=None, min_occ=None, bins=None):
"""Compute a spatial rate map for the given cell.
Note: `min_occ` and `bins` are ignored for `adaptive=True`. The value
for `min_occ` (default {0}) should be reduced as `bins` (default {1}) is
increased above the default number.
Arguments:
cell -- (tetrode, cluster) key for the cluster
Keyword arguments:
bursts -- restrict spike train to burst onset timing
adaptive -- boolean, use adaptive kernel smoothing
speed -- ('fast'|'slow'|'still'|None) velocity filter
intervals -- restrict data to the given list of time intervals
min_occ -- minimum bin occupancy for spatial masking (default {0})
bins -- int, number of spatial bins along each dimension (default {1})
Returns:
(bins,bins)-shaped array
"""
(_, xs, ys), (_, xp, yp) = self._posdata(cell, speed, intervals, bursts)
if adaptive:
R = self.adaptive_ratemap(xs, ys, xp, yp)
else:
R = binned.ratemap(xs, ys, xp, yp, bins=bins, min_occ=min_occ,
freq=self.motion.Fs)
return R
ratemap.__doc__ = ratemap.__doc__.format(binned.DEFAULT_MIN_OCC,
binned.DEFAULT_BINS)
def phasemap(self, cell, bursts=False, adaptive=True, speed='fast',
intervals=None, min_spikes=5, bins=None):
"""Compute a spatial phase map for the given cell.
Arguments are the same as `ratemap`.
"""
ts, xs, ys = self._spkposdata(cell, speed, intervals, bursts)
phase = self.lfp.F('phase', ts)
if adaptive:
P = self.adaptive_phasemap(xs, ys, phase)
else:
P = binned.phasemap(xs, ys, phase, bins=bins,
min_spikes=min_spikes, freq=self.motion.Fs)
return P
# Information rate methods
def directional_info(self, cell, speed='fast', intervals=None, bursts=False,
pvalue=False):
"""Compute directional information for a cell with optional p-value."""
(ts, ds), (tm, dm) = self._dirdata(cell, speed, intervals, bursts)
I = binned.dirinfo(ds, dm)
if not pvalue:
return I
debug('spatial_info: computing shuffled direction information values')
self._compress_intervals(ts, tm, speed=speed)
dstar = circinterp1d(tm, dm, zero_centered=False, copy=False)
Istar = [binned.dirinfo(dstar(tstar), dm) for tstar in
self._shuffle_spikes(ts, tm)]
pval = self._shuffle_pvalue(I, Istar)
return I, pval
def speed_info(self, cell, intervals=None, bursts=False, pvalue=False):
"""Compute speed information for a cell with optional p-value."""
speed = None
(ts, vs), (tm, vm) = self._speeddata(cell, speed, intervals, bursts)
I = binned.speedinfo(vs, vm)
if not pvalue:
return I
debug('spatial_info: computing shuffled speed information values')
self._compress_intervals(ts, tm, speed=speed)
vstar = interp1d(tm, vm, copy=False)
Istar = [binned.speedinfo(vstar(tstar), vm) for tstar in
self._shuffle_spikes(ts, tm)]
pval = self._shuffle_pvalue(I, Istar)
return I, pval
def spatial_info(self, cell, speed='fast', intervals=None, bursts=False,
pvalue=False):
"""Compute spatial information for a cell with optional p-value."""
(ts, xs, ys), (tm, xp, yp) = self._posdata(cell, speed, intervals,
bursts)
I = binned.rateinfo(xs, ys, xp, yp)
if not pvalue:
return I
debug('spatial_info: computing shuffled spatial information values')
self._compress_intervals(ts, tm, speed=speed)
xstar = interp1d(tm, xp, copy=False)
ystar = interp1d(tm, yp, copy=False)
Istar = [binned.rateinfo(xstar(tstar), ystar(tstar), xp, yp)
for tstar in self._shuffle_spikes(ts, tm)]
pval = self._shuffle_pvalue(I, Istar)
return I, pval
def phase_space_info(self, cell, speed='fast', intervals=None, bursts=False,
pvalue=False):
"""Compute phase-position mutual information with optional p-value."""
ts, xs, ys = self._spkposdata(cell, speed, intervals, bursts)
phase = self.lfp.F('phase', ts)
I = binned.phaseinfo(xs, ys, phase)
if not pvalue:
return I
debug('spatial_info: computing shuffled phase information values')
Istar = np.empty(INFO_PVAL_SAMPLES)
for i in range(INFO_PVAL_SAMPLES):
np.random.shuffle(phase)
Istar[i] = binned.phaseinfo(xs, ys, phase)
pval = self._shuffle_pvalue(I, Istar)
return I, pval
# Filtering methods
def spike_filter(self, cell, speed='fast', intervals=None, bursts=False):
"""Spike times filtered by speed, time intervals, and/or bursting."""
return self._spkfilt(cell, speed, intervals, bursts)
def motion_filter(self, speed='fast', intervals=None):
"""Motion index array for filtering by speed or time intervals."""
return self._occfilt(speed, intervals)
# Private methods for shuffle-testing spike trains
def _compress_intervals(self, spike_t, motion_t, speed='fast'):
"""Remove speed interval gaps in spike and motion timing in place."""
if speed is None:
return
ints = getattr(self.motion, '%s_intervals' % speed)
t0 = motion_t[-1] - motion_t[0]
for i in range(ints.shape[0] - 1, 1, -1):
start, prev = ints[i, 0], ints[i - 1, 1]
gap = start - prev
spike_t[spike_t>=start] -= gap
motion_t[motion_t>=start] -= gap
debug('_compress_intervals: {:.1f}% compression',
100 - 100 * (motion_t[-1] - motion_t[0]) / t0)
def _shuffle_spikes(self, spike_t, motion_t, samples=INFO_PVAL_SAMPLES,
min_offset=SHUFFLE_MIN_OFFSET):
"""Generate random offset shuffles of spike times."""
start, end = motion_t[0], motion_t[-1]
dur = end - start
shuffled = np.empty_like(spike_t)
for i in range(samples):
offset = min_offset + (dur - 2 * min_offset) * np.random.rand()
shuffled[:] = spike_t + offset
shuffled[shuffled > end] -= dur
yield shuffled
def _shuffle_pvalue(self, observed, shuffled):
"""Compute a p-value against a shuffled distribution."""
shuffled = np.asarray(shuffled)
pval = ((shuffled >= observed).sum() + 1) / shuffled.size
pval = min(pval, 1.0) # avoid 1.001
return pval
# Private methods for filtered spike and occupancy map data
def _posdata(self, cell, speed, intervals, bursts):
"""Retrieve filtered positional spike and occupancy data."""
spk = self._spkposdata(cell, speed, intervals, bursts)
occ = self._occposdata(speed, intervals)
return spk, occ
def _dirdata(self, cell, speed, intervals, bursts):
"""Retrieve filtered directional spike and occupancy data."""
spk = self._spkdirdata(cell, speed, intervals, bursts)
occ = self._occdirdata(speed, intervals)
return spk, occ
def _speeddata(self, cell, speed, intervals, bursts):
"""Retrieve filtered speed spike and occupancy data."""
spk = self._spkspeeddata(cell, speed, intervals, bursts)
occ = self._occspeeddata(speed, intervals)
return spk, occ
def _spkposdata(self, *args):
"""Filtered positional spike data."""
st = self._spkfilt(*args)
return st, self.motion.F('x', st), self.motion.F('y', st)
def _spkdirdata(self, *args):
"""Filtered directional spike data."""
st = self._spkfilt(*args)
return st, self.motion.F('md', st)
def _spkspeeddata(self, *args):
"""Filtered speed spike data."""
st = self._spkfilt(*args)
return st, self.motion.F('speed_cm', st)
def _spkfilt(self, cell, speed, intervals, bursts):
"""Generate a filtered spike timing array."""
st = self.get_cluster(cell).spikes
filters = []
if speed is not None:
filters.append(self.motion.speed_filter(st, speed=speed))
if intervals is not None:
filters.append(time.select_from(st, intervals))
if bursts:
if type(bursts) is float:
bf = spikes.burst_onset(st, isi_thresh=bursts)
else:
bf = spikes.burst_onset(st)
filters.append(bf)
session_timing = np.logical_and(st >= self.tstart, st <= self.tend)
ix = reduce(op.and_, filters, session_timing)
return st[ix]
def _occposdata(self, *args):
"""Filtered positional occupancy data."""
ix = self._occfilt(*args)
return self.motion.t[ix], self.motion.x[ix], self.motion.y[ix]
def _occdirdata(self, *args):
"""Filtered directional occupancy data."""
ix = self._occfilt(*args)
return self.motion.t[ix], self.motion.md[ix]
def _occspeeddata(self, *args):
"""Filtered speed occupancy data."""
ix = self._occfilt(*args)
return self.motion.t[ix], self.motion.speed_cm[ix]
def _occfilt(self, speed, intervals):
"""Create an occupancy index filter."""
filters = []
if speed is not None:
if speed in motion.SPEED_LIMITS:
filters.append(getattr(self.motion, '%s_index' % speed))
else:
filters.append(self.motion.speed_index(speed))
if intervals is not None:
filters.append(time.select_from(self.motion.t, intervals))
session_timing = np.logical_and(self.motion.t >= self.tstart,
self.motion.t <= self.tend)
ix = reduce(op.and_, filters, session_timing)
return ix
| jdmonaco/skaggs | skaggs/session.py | session.py | py | 14,514 | python | en | code | 0 | github-code | 90 |
6675382699 | import pygame
import random
def get_highscore():
with open("BrickCatcherHighscores.txt", "r+") as f:
return f.read()
pygame.init()
screen_width = 800
screen_height = 600
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption("Catcher game")
clock = pygame.time.Clock()
boardX = 370
boardY = 530
board_length = 100
board = pygame.Rect(boardX, boardY, board_length, 10)
board_speed = 10
squareX = random.randrange(0, 750, 10)
squareY = 0
square = pygame.Rect(squareX, squareY, 20, 20)
square_speed = 5
score = 0
def Game():
global board, square, score
FPS_num = clock.get_fps()
pygame.draw.rect(screen, (255, 255, 255), board)
pygame.draw.rect(screen, (255, 255, 255), square)
font = pygame.font.Font("freesansbold.ttf", 32)
highScoreText = font.render("High Score: " + get_highscore(), True, (255, 255, 255))
scoreText = font.render("Score: " + str(score), True, (255, 255, 255))
FPS = font.render("FPS: " + str(round(FPS_num)), True, (255, 255, 255))
screen.blit(scoreText, (0, 30))
screen.blit(highScoreText, (0, 64))
screen.blit(FPS, (0, 100))
if isCollision(board, square):
square.x = random.randrange(0, 750, 10)
square.y = 0
if pygame.key.get_pressed()[pygame.K_a] or pygame.key.get_pressed()[pygame.K_LEFT]:
board.x += -board_speed
if pygame.key.get_pressed()[pygame.K_d] or pygame.key.get_pressed()[pygame.K_RIGHT]:
board.x += board_speed
square.y += square_speed
if board.x >= 730:
board.x = 730
if board.x <= 0:
board.x = 0
if square.y >= screen_height:
Change_Highscore(score)
gameOver()
def Change_Highscore(score):
if score > int(get_highscore()):
highscore = open("BrickCatcherHighscores.txt", "w")
highscore.write(str(score))
highscore.close()
def isCollision(player, box):
global score, square_speed, board_speed
if player.colliderect(box):
score += 1
square_speed += 1
board_speed += 1
return True
def gameOver():
global running, score
font = pygame.font.Font("freesansbold.ttf", 32)
gameOverText = font.render("GAME OVER!", True, (255, 255, 255))
playAgainText = font.render("Play again(Y/N)?", True, (255, 255, 255))
screen.blit(playAgainText, ((screen_width // 2) - 135, (screen_height // 2) + 50))
screen.blit(gameOverText, ((screen_width//2) - 120 , (screen_height//2 - 100)))
if pygame.key.get_pressed()[pygame.K_y]:
Reset()
elif pygame.key.get_pressed()[pygame.K_n]:
running = False
def Reset():
global board, square, board_speed, boardX, boardY, squareX, score, squareY, square_speed
boardX = 370
boardY = 530
board = pygame.Rect(boardX, boardY, board_length, 10)
board_speed = 10
squareX = random.randrange(0, 750, 10)
squareY = 0
square = pygame.Rect(squareX, squareY, 20, 20)
square_speed = 5
score = 0
running = True
while running:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
Game()
pygame.display.update()
clock.tick(60) | Chrisson543/brick-catcher-game | Brick Catcher Game.py | Brick Catcher Game.py | py | 3,215 | python | en | code | 0 | github-code | 90 |
3795221141 | from sqlflow_models import ARIMAWithSTLDecomposition
import unittest
import tensorflow as tf
from datetime import datetime, timedelta
import numpy as np
class TestARIMAWithSTLDecompose(unittest.TestCase):
def setUp(self):
self.order = [7, 0, 2]
self.period = [7, 30]
self.date_format = '%Y-%m-%d'
self.train_start = '2014-04-01'
self.train_end = '2014-08-31'
self.forecast_start = '2014-09-01'
self.forecast_end = '2014-09-30'
def str2datetime(self, date_str):
if isinstance(date_str, bytes):
date_str = date_str.decode('utf-8')
return datetime.strptime(str(date_str), self.date_format)
def datetime2str(self, date):
return datetime.strftime(date, self.date_format)
def create_dataset(self):
def generator():
start_date = self.str2datetime(self.train_start)
end_date = self.str2datetime(self.train_end)
delta = timedelta(days=1)
while start_date <= end_date:
date_str = np.array(self.datetime2str(start_date))
label = np.random.random(size=[1]) * 1e8
yield date_str, label
start_date += delta
def dict_mapper(date_str, label):
return {'time': date_str}, label
dataset = tf.data.Dataset.from_generator(
generator, output_types=(tf.dtypes.string, tf.dtypes.float32)
)
dataset = dataset.map(dict_mapper)
return dataset
def prediction_days(self):
pred_start = self.str2datetime(self.forecast_start)
pred_end = self.str2datetime(self.forecast_end)
return (pred_end - pred_start).days + 1
def test_main(self):
model = ARIMAWithSTLDecomposition(order=[7, 0, 2],
period=[7, 30],
date_format=self.date_format,
forecast_start=self.forecast_start,
forecast_end=self.forecast_end)
prediction = model.sqlflow_train_loop(self.create_dataset())
self.assertEqual(len(prediction), self.prediction_days())
if __name__ == '__main__':
unittest.main()
| Kelang-Tian/models | tests/test_arima_with_stl_decomposition.py | test_arima_with_stl_decomposition.py | py | 2,245 | python | en | code | null | github-code | 90 |
14042281340 | import sys
sys.path.insert(0, './preprocessor')
sys.path.insert(0, './model')
# import preprocessor
from randomForestPreprocessor import RandomForestPreprocessor
from PCAPreprocessor import PCAPreprocessor
# import model
from sklearn.ensemble import RandomForestRegressor
from SVMR import SVMR
# required import
from finalSolver import FinalSolver
from itertools import product
from math import fabs
import numpy as np
from lgbm import LGBM
N_data = int(sys.argv[1])
selected_preprocessor = sys.argv[2]
N_selected_feature = int(sys.argv[3])
selected_model = sys.argv[4]
writePredict = sys.argv[5]
preprocessor_option = {
"randomforest": RandomForestPreprocessor(),
"pca": PCAPreprocessor(),
}
model_option = {
"svr": SVMR(3),
"randomforest": RandomForestRegressor(
criterion = "mae", n_jobs = -1, n_estimators = 100, min_samples_leaf = 0.01,
max_features = "sqrt", warm_start = True, oob_score = True),
"lgbm": LGBM(),
}
# loss function
def WMAE(y, y_hat):
n, d = y.shape
ret = 0
weight = [300, 1, 200]
for n_idx, d_idx in product(range(n), range(d)):
ret += weight[d_idx] * fabs(y[n_idx][d_idx] - y_hat[n_idx][d_idx] )
return ret / n
def NAE(y, y_hat):
n, d = y.shape
ret = 0
for n_idx, d_idx in product(range(n), range(d)):
ret += fabs(y[n_idx][d_idx] - y_hat[n_idx][d_idx] ) / y[n_idx][d_idx]
return ret / n
if __name__ == '__main__':
solver = FinalSolver()
if N_data != -1:
solver.readXTrain("./dataset/X_train_small_{}.npz".format(N_data))
solver.readYTrain("./dataset/Y_train_small_{}.npz".format(N_data))
solver.readXValidation("./dataset/X_validation_{}.npz".format(N_data))
solver.readYValidation("./dataset/Y_validation_{}.npz".format(N_data))
solver.readXTest("./dataset/X_test.npz")
else:
solver.readXTrain("./dataset/X_train.npz")
solver.readYTrain("./dataset/Y_train.npz")
solver.readXTest("./dataset/X_test.npz")
print("trainning shape")
print("x: {}".format(solver.x_train.shape))
print("y: {}".format(solver.y_train.shape))
print("\n###\n")
# preprocess
if selected_preprocessor != "n":
solver.scaleY(np.array([300.0, 1.0, 200.0]))
solver.setPreprocessor(selected_preprocessor, preprocessor_option[selected_preprocessor])
solver.preprocessData(N_selected_feature)
print("x_train shape after preprocess: {}".format(solver.x_train.shape))
print(solver.preprocessor.report())
print("preprocessDone")
print("\n###\n")
# model train
solver.setModel(selected_model, model_option[selected_model])
solver.fit()
print("\n###\n")
# error
# WMAE
solver.setLossFunction(WMAE)
print("WMAE EIN: {}".format(solver.calculateEin()))
if N_data != -1:
print("Validation: {}".format(solver.calculateValError()))
print("\n###\n")
# NAE
solver.setLossFunction(NAE)
print("NAE EIN: {}".format(solver.calculateEin()))
if N_data != -1:
print("Validation: {}".format(solver.calculateValError()))
print("\n###\n")
if writePredict == "w":
solver.writePredict(solver.x_test)
if hasattr(solver.model, "report"):
solver.model.report()
| tall15421542/Machine-Learning-2019 | solver/Main.py | Main.py | py | 3,333 | python | en | code | 0 | github-code | 90 |
36293506574 | # -*- coding: utf-8 -*-
# """
# Created on Sun Nov 6 14:41:52 2022
# @author: qomon
# filename='pi_txt.txt'
# with open('pi_txt.txt') as file:
# pi=file.read()
# birthday='20042001'
# if birthday in pi:
# print('yes there is ')
# else:
# print('there is no')
import pickle
# info="i love you mommy"
# info1={'my mother name:':'Motabar','her age':'45'}
# with open('royhat.dat','wb') as file:
# pickle.dump(info1,file)
# pickle.dump(info,file)
with open('royhat.dat','rb') as file:
infos=pickle.load(file)
infos2=pickle.load(file)
print(infos2)
print(infos) | Farrukh-Maruf/python-works-from-anvarnarz | pi_spyder.py | pi_spyder.py | py | 589 | python | en | code | 4 | github-code | 90 |
24918680177 | import torch
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Reshaping, stacking-comnbine esnors, squeezing-removes 1D , unsqueezing adds 1D
y = torch.arange(1, 9)
z=torch.arange(10, 18)
print(y, y.shape)
y_res = y.reshape(2, 4)
print(y_res, y_res.shape)
y_stacked=torch.stack((y, z), dim=0)
print(y_stacked, y_stacked.shape,y_stacked.squeeze(), y_stacked.squeeze().shape)
new_tens=torch.tensor([[1, 2, 3, 4]])
print(new_tens.shape,new_tens.squeeze(),new_tens.squeeze().shape) # squeeze removes 1D
print(new_tens.unsqueeze(0),new_tens.unsqueeze(0).shape) # unsqueeze adds 1D
#permute
#buzena
xyz=torch.permute(new_tens,(2,1))
print(xyz,xyz.size()) | dhakalmahima188/Labs-6th-sem | pytorch/2.py | 2.py | py | 672 | python | en | code | 0 | github-code | 90 |
37143874830 | import copy
import mock
from fuel_bootstrap import consts
from fuel_bootstrap.tests import base
PARSED_ARGS = {'extend_kopts': None,
'no_compress': False,
'output_dir': None,
'image_build_dir': None,
'post_script_file': None,
'root_ssh_authorized_file': None,
'activate': False,
'ubuntu_release': None,
'root_password': None,
'no_default_direct_repo_addr': False,
'https_proxy': None,
'http_proxy': None,
'direct_repo_addr': None,
'label': None,
'repos': None,
'kernel_flavor': None,
'certs': None,
'extra_dirs': None,
'no_default_packages': False,
'no_default_extra_dirs': False,
'packages': None,
'config_file': consts.CONFIG_FILE}
UUID = 'fake_uuid'
PATH = 'fake_path'
class TestBuildCommand(base.BaseTest):
@mock.patch('fuel_bootstrap.utils.bootstrap_image.make_bootstrap',
return_value=(UUID, PATH))
def test_parser(self, mock_make_bootstrap):
self.app.run(['build'])
mock_make_bootstrap.assert_called_once_with(PARSED_ARGS)
self.assertEqual("Bootstrap image {0} has been built: {1}\n"
.format(UUID, PATH),
self.app.stdout.getvalue())
@mock.patch('fuel_bootstrap.utils.bootstrap_image.activate',
return_value=(UUID, PATH))
@mock.patch('fuel_bootstrap.utils.bootstrap_image.import_image',
return_value=UUID)
@mock.patch('fuel_bootstrap.utils.bootstrap_image.make_bootstrap',
return_value=(UUID, PATH))
def test_parser_activate(self, mock_make_bootstrap,
mock_import, mock_activate):
self.app.run(['build', '--activate'])
parsed_args = copy.deepcopy(PARSED_ARGS)
parsed_args['activate'] = True
mock_make_bootstrap.assert_called_once_with(parsed_args)
mock_import.assert_called_once_with(PATH)
mock_activate.assert_called_once_with(UUID)
self.assertEqual("Bootstrap image {0} has been built: {1}\n"
"Bootstrap image {0} has been activated.\n"
.format(UUID, PATH),
self.app.stdout.getvalue())
| skdong/fuel-bootstrap | fuel_bootstrap/tests/test_build_command.py | test_build_command.py | py | 2,438 | python | en | code | 0 | github-code | 90 |
18168617599 | X, K, D = map(int, input().split())
X = abs(X)
a = K*D
if X >= a:
result = X-a
else:
b = X//D
c = (X-b*D)
d = abs(X-(b+1)*D)
if c < d:
if b%2==K%2:
result = c
else:
result = d
else:
if (b+1)%2==K%2:
result=d
else:
result=c
print(result) | Aasthaengg/IBMdataset | Python_codes/p02584/s332803963.py | s332803963.py | py | 343 | python | en | code | 0 | github-code | 90 |
18163536189 | import sys
def resolve(in_):
N, X, T = map(int, next(in_).split())
minute = 0
tako = 0
while tako < N:
tako += X
minute += T
return minute
def main():
answer = resolve(sys.stdin)
print(answer)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02576/s050840186.py | s050840186.py | py | 282 | python | en | code | 0 | github-code | 90 |
18115759099 | inf=float("inf")
def merge(a,left,mid,right):
count=0
n1=mid-left
n2=right-mid
L=[0]*(n1+1)
R=[0]*(n2+1)
for i in range(n1):
L[i]=a[left+i]
for i in range(n2):
R[i]=a[mid+i]
L[n1]=inf
R[n2]=inf
i=0
j=0
for k in range(left,right):
count+=1
if L[i]<=R[j]:
a[k]=L[i]
i+=1
else:
a[k]=R[j]
j+=1
return count
def mergeSort(a,left,right):
if left+1<right:
mid=(left+right)//2
return mergeSort(a,left,mid)+mergeSort(a,mid,right)+merge(a,left,mid,right)
return 0
n=int(input())
*a,=map(int,input().split())
ans=mergeSort(a,0,n)
print(*a)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02272/s860625532.py | s860625532.py | py | 706 | python | en | code | 0 | github-code | 90 |
15798364298 |
import os
import re
import random
import hashlib
from datetime import datetime
from flask import Flask, render_template, request, redirect, session
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import secure_filename
import functional as fn
# App settings
app = Flask(__name__)
db = SQLAlchemy()
app.secret_key = "1234"
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///pmf.db'
app.config['UPLOAD_FOLDER'] = 'static/files'
# Models
class User(db.Model):
__tablename__ = "user"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(100), unique=True)
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(512))
date_of_reg = db.Column(db.DateTime())
class Question(db.Model):
__tablename__ = "question"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
question = db.Column(db.Text)
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
author = db.relationship("User", backref="author")
date = db.Column(db.DateTime())
class Tag(db.Model):
__tablename__ = "tag"
id = db.Column(db.Integer, primary_key=True)
tag = db.Column(db.String(50), unique=True)
class Question__Tag(db.Model):
__tablename__ = "question__tag"
id = db.Column(db.Integer, primary_key=True)
tag_id = db.Column(db.Integer, db.ForeignKey("tag.id"))
tag = db.relationship("Tag", backref="tags")
question_id = db.Column(db.Integer, db.ForeignKey("question.id"))
question = db.relationship("Question", backref="question_obj")
class QuestionAnswer(db.Model):
__tablename__ = "question_answer"
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text, nullable=False)
question_id = db.Column(db.Integer, db.ForeignKey("question.id"))
question = db.relationship("Question", backref="question_answer_obj")
author_id = db.Column(db.Integer, db.ForeignKey("user.id"))
author = db.relationship("User", backref="author_answer")
date = db.Column(db.DateTime())
class UserAvatar(db.Model):
__tablename__ = "user_avatar"
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
user = db.relationship("User", backref="user_avatar")
class Like(db.Model):
__tablename__ = "like"
id = db.Column(db.Integer, primary_key=True)
content_id = db.Column(db.Integer)
author_id = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
user = db.relationship("User", backref="user")
class QuestionImage(db.Model):
__tablename__ = "question_image"
id = db.Column(db.Integer, primary_key=True)
question_id = db.Column(db.Integer, db.ForeignKey("question.id"))
path = db.Column(db.String(255))
question = db.relationship("Question", backref="question_images")
class Follower(db.Model):
__tablename__ = "follower"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer)
follower_id = db.Column(db.Integer)
# DB init
db.init_app(app)
with app.app_context():
db.create_all()
# Views
@app.route("/")
def index():
if session.get('loggedin', False):
return redirect("/main")
return render_template("index.html")
@app.route("/reg", methods=['GET', 'POST'])
def reg():
if session.get('loggedin', False):
return render_template("main.html")
if request.method == 'POST':
username = request.form['username']
if not re.match(r"^[a-zA-Z0-9_]{1,100}$", username):
return "Username is wrong"
email = request.form['email']
if not re.match(r"^[a-zA-Z0-9.]{1,20}@[a-z]{1,10}\.[a-z]{1,5}$", email):
return "Email is wrong"
password = request.form['password']
if not re.match(r"^[a-zA-Z0-9_\-\.]{8,20}$", password):
return "Password is wrong"
re_password = request.form['re_password']
if password != re_password:
return "Password are not similar"
username_exist = User.query.filter_by(username=username).first()
if username_exist:
return 'Username is already used'
email_exist = User.query.filter_by(email=email).first()
if email_exist != None:
return 'Email is already used'
password = fn.encrypte_password(password)
try:
user = User(
username=username,
email=email,
password=password,
date_of_reg=datetime.now().replace(second=0, microsecond=0)
)
db.session.add(user)
db.session.commit()
session['loggedin'] = True
session['id'] = user.id
session['username'] = user.username
return redirect('/main')
except Exception as e:
print(e)
return "Error while registration"
return render_template("reg.html")
@app.route("/login", methods=["GET", "POST"])
def login():
if session.get('loggedin', False):
return redirect("/main")
if request.method == "POST":
email = request.form['email']
if not re.match(r"^[a-zA-Z0-9.]{1,20}@[a-z]{1,10}\.[a-z]{1,5}$", email):
return "Email is wrong"
password = request.form['password']
if not re.match(r"^[a-zA-Z0-9_\-\.]{8,20}$", password):
return "Password is wrong"
password = fn.encrypte_password(password)
user = User.query.filter_by(email=email).first()
if user:
if user.password == password:
session['loggedin'] = True
session['id'] = user.id
session['username'] = user.username
return redirect('/main')
else:
return "Unsuccessful login on site"
else:
return f"You're not registered with this email: {email}"
return render_template("login.html")
@app.route("/logout")
def logout():
if session.get('loggedin', False):
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect("/")
@app.route("/main")
def main():
page = request.args.get('page', 1, type=int)
tags_count = Tag.query.count()
rand = list(range(1, tags_count + 1))
random.shuffle(rand)
questions_count = Question.query.count()
rand_q = list(range(1, questions_count + 1))
random.shuffle(rand_q)
if tags_count > 15:
n = 15
else:
n = tags_count
if questions_count > 15:
n_q = 15
else:
n_q = questions_count
data = {
"questions": Question.query.order_by(Question.id.desc()).paginate(page=page, per_page=10),
"tags": [Tag.query.filter_by(id=rand.pop(0)).first() for _ in range(n)],
"random_questions": [Question.query.filter_by(id=rand_q.pop(0)).first() for _ in range(n_q)]
}
return render_template("main.html", **data)
@app.route("/ask", methods=["GET", "POST"])
def ask():
if session.get('loggedin', False):
if request.method == "POST":
title = request.form["title"]
question = request.form["question"]
tags = request.form["tags"]
if title:
q = Question(
title=title,
question=question,
author_id=session["id"],
date=datetime.now().replace(second=0, microsecond=0))
db.session.add(q)
db.session.commit()
if tags:
for tag in tags.strip().split(","):
tag_obj = Tag.query.filter_by(tag=tag).first()
if tag_obj:
tag_id = tag_obj.id
else:
try:
t = Tag(tag=tag.strip())
db.session.add(t)
db.session.commit()
tag_id = t.id
except:
continue
db.session.add(Question__Tag(tag_id=tag_id, question_id=q.id))
db.session.commit()
if "file" in request.files:
files = request.files.getlist("file")
for file in files:
if file.filename == '':
pass
if file and fn.allowed_file(file.filename):
filename = secure_filename(file.filename)
date_time = datetime.now().strftime("%a_%-m_%y-%H_%M_%S_")
path = os.path.join(app.config["UPLOAD_FOLDER"], date_time + filename)
file.save(path)
db.session.add(QuestionImage(question_id=q.id, path=path))
db.session.commit()
return redirect("/main")
return render_template("ask.html")
return redirect("/login")
@app.route("/question/<int:id>")
def question(id):
data = {
"question": Question.query.filter_by(id=id).first(),
"popular_answers": [],
"new_answers": QuestionAnswer.query.filter_by(question_id=id).order_by(QuestionAnswer.id.desc()).limit(4).all(),
"other_questions": Question.query.order_by(Question.id.desc()).limit(7).all(),
"like": bool(Like.query.filter_by(content_id=id, user_id=session["id"]).first()),
"likes": Like.query.filter_by(content_id=id).count(),
"images": QuestionImage.query.filter_by(question_id=id).all()
}
return render_template("question.html", **data)
@app.route("/question/answer", methods=["GET", "POST"])
def question_answer():
if session.get('loggedin', False):
if request.method == "POST":
answer = request.form["answer"]
question_id = request.form["question_id"]
db.session.add(QuestionAnswer(
text=answer,
question_id=question_id,
author_id=session["id"],
date=datetime.now().replace(second=0, microsecond=0))
)
db.session.commit()
return redirect(f"/question/{question_id}")
return redirect("/login")
@app.route("/question/<int:id>/like")
def question_like(id):
if session.get('loggedin', False):
like = Like.query.filter_by(content_id=id, user_id=session["id"]).first()
if not like:
q = Question.query.filter_by(id=id).first()
if q.author_id != session["id"]:
like = Like(
content_id = id,
author_id = q.author_id,
user_id = session["id"]
)
db.session.add(like)
db.session.commit()
return redirect(f"/question/{id}")
else:
return redirect(f"/question/{id}")
else:
Like.query.filter_by(content_id=id, user_id=session["id"]).delete()
db.session.commit()
return redirect(f"/question/{id}")
return redirect("/login")
@app.route("/profile/<int:id>")
def profile(id):
if session.get('loggedin', False):
if id == 0:
id = session.get("id")
user = User.query.filter_by(id=id).first()
questions = Question.query.filter_by(author_id=id).order_by(Question.id.desc()).limit(3).all()
following = Follower.query.filter_by(follower_id=id).order_by(Follower.id.desc()).limit(7).all()
following = [User.query.filter_by(id=f.user_id).first() for f in following]
data = {
"user": user,
"questions": questions,
"follow": Follower.query.filter_by(user_id=id, follower_id=session["id"]).first(),
"id": id,
"followers_count": Follower.query.filter_by(user_id=id).count(),
"reputation": Like.query.filter_by(author_id=id).count() + Follower.query.filter_by(user_id=id).count() * 10,
"following": following
}
return render_template("profile.html", **data)
return redirect("/login")
@app.route("/tag/<int:id>")
def tag(id):
page = request.args.get('page', 1, type=int)
data = {
"tag_name": Question__Tag.query.filter_by(tag_id=id).first(),
"tag": Question__Tag.query.filter_by(tag_id=id).order_by(Question__Tag.id.desc()).paginate(page=page, per_page=10),
"id": session["id"]
}
return render_template("tags.html", **data)
@app.route("/profile/avatar", methods=["POST",])
def upload_avatar():
if session.get('loggedin', False):
file = request.files["file"]
if file and fn.allowed_file(file.filename):
filename = secure_filename(file.filename)
date_time = datetime.now().strftime("%a_%-m_%y-%H_%M_%S_")
path = os.path.join(app.config["UPLOAD_FOLDER"], date_time + filename)
file.save(path)
if UserAvatar.query.filter_by(user_id=session.get("id")).first():
UserAvatar.query.filter_by(user_id=session.get("id")).delete()
db.session.commit()
db.session.add(UserAvatar(path=path, user_id=session.get("id")))
db.session.commit()
return redirect("/profile/0")
return redirect("/login")
@app.route("/question/answers/<int:id>")
def answers(id):
page = request.args.get('page', 1, type=int)
data = {
"id": id,
"answers": QuestionAnswer.query.filter_by(question_id=id).order_by(QuestionAnswer.id.desc()).paginate(page=page, per_page=7)
}
return render_template("all_answers.html", **data)
@app.route("/profile/<int:id>/follow")
def follow(id):
if session.get('loggedin', False):
if id != session["id"]:
if Follower.query.filter_by(user_id=id, follower_id=session["id"]).first():
Follower.query.filter_by(user_id=id, follower_id=session["id"]).delete()
db.session.commit()
else:
f = Follower(user_id=id, follower_id=session["id"])
db.session.add(f)
db.session.commit()
return redirect(f"/profile/{id}")
return redirect(f"/profile/{id}")
return redirect("/login")
@app.route("/admin", methods=["GET", "POST"])
def admin():
if session.get('loggedin', False):
if request.method == "POST":
password = request.form["password"]
if hashlib.md5(password.encode("utf_8")).hexdigest() == "764118d23b7d8809551c894a92868c18":
session["root"] = True
return redirect("/admin")
return redirect("/profile/0")
if request.method == "GET":
return render_template("admin.html")
return redirect("/login")
@app.route("/admin/logout")
def admin_logout():
if session.get('loggedin', False):
if session.get('root', False):
del session['root']
return redirect("/profile/0")
@app.route("/admin/users")
def admin_users():
if session.get('loggedin', False):
if session.get('root', False):
page = request.args.get('page', 1, type=int)
users = User.query.order_by(User.id.desc()).paginate(page=page, per_page=50)
data = {
"users": users,
}
return render_template("admin_users.html", **data)
return redirect("/profile/0")
@app.route("/admin/questions")
def admin_questions():
if session.get('loggedin', False):
if session.get('root', False):
page = request.args.get('page', 1, type=int)
questions = Question.query.order_by(Question.id.desc()).paginate(page=page, per_page=50)
data = {
"questions": questions,
}
return render_template("admin_questions.html", **data)
return redirect("/main")
@app.route("/profile/<int:id>/delete")
def admin_user_delete(id):
if session.get('loggedin', False):
if session.get('root', False):
User.query.filter_by(id=id).delete()
db.session.commit()
return redirect("/admin/users")
return redirect("/profile/0")
@app.route("/question/<int:id>/delete")
def admin_question_delete(id):
if session.get('loggedin', False):
if session.get('root', False):
Question.query.filter_by(id=id).delete()
db.session.commit()
return redirect("/admin/questions")
return redirect(f"/questions/{str(id)}")
@app.route("/question/<int:id>/update", methods=["GET", "POST"])
def admin_question_update(id):
if session.get('loggedin', False):
if session.get('root', False):
if request.method == "POST":
title = request.form["title"]
text = request.form["question"]
q = Question.query.filter_by(id=id).first()
q.title, q.question = title, text
db.session.commit()
if request.method == "GET":
data = {
"question": Question.query.filter_by(id=id).first()
}
return render_template("update.html", **data)
return redirect(f"/question/{str(id)}")
@app.route("/following/<int:id>")
def following_page(id):
if session.get('loggedin', False):
page = request.args.get('page', 1, type=int)
following_obj = Follower.query.filter_by(follower_id=id).order_by(Follower.id.desc()).paginate(page=page, per_page=10)
following = [User.query.filter_by(id=f.user_id).first() for f in following_obj]
data = {
"id": id,
"following": following_obj,
"following_list": following
}
return render_template("following.html", **data)
return redirect("/login")
@app.route("/feed")
def feed():
if session.get('loggedin', False):
id_ = session.get('id', False)
f_list = Follower.query.filter_by(follower_id=id_).all()
questions_all = []
for f in f_list:
for q in Question.query.filter_by(author_id=f.user_id).all():
questions_all.append(q)
questions_all.sort(key=lambda x: x.id, reverse=True)
data = {
"questions": questions_all[:10]
}
return render_template("feed.html", **data)
return redirect("/main")
if __name__ == "__main__":
app.run(debug=True)
| UAcapitan/code | interesting_projects/ProtectMyFreedom/app.py | app.py | py | 18,662 | python | en | code | 0 | github-code | 90 |
34582308881 | from publish.agents.flow_functions import BaseFlowFunction
from publish.data import HypergridTrajectoryDataset
from publish.utils import get_device, get_grid_one_hot_int_encoder_mask
from torchtyping import TensorType
from typing import Dict
import torch
FORWARD = 'frwd'
BACKWARD = 'back'
class ExactFlowFunction(BaseFlowFunction):
def __init__(self, config: Dict[str, object]):
self.mode = config['mode']
assert self.mode in [FORWARD, BACKWARD]
env = config['env']
num_states = env.side_length ** env.num_dims
self.state_flows = torch.zeros(
num_states,
device=get_device(),
requires_grad=False
).exp()
traj_dataset = HypergridTrajectoryDataset.create(
env.side_length,
env.num_dims,
env.R_0,
env.R_1,
env.R_2,
use_depth_first_search=False
)
self.states, self.actions, self.back_actions, self.rewards, self.dones = (
traj_dataset.one_hot_states,
traj_dataset.actions,
traj_dataset.back_actions,
traj_dataset.rewards,
traj_dataset.dones
)
self.num_terminal_states = num_states
int_encoder_mask = get_grid_one_hot_int_encoder_mask(
env.num_dims,
env.side_length
)
self.int_encoded_states = (
self.states * int_encoder_mask
).sum(dim=-1).long()
self.int_encoded_states[self.int_encoded_states <= -1] = num_states
self.update(config['agent'])
def __str__(self) -> str:
return 'ExactFlowFunction'
def update(self, agent) -> None:
probs, multiplier = None, None
if self.mode == FORWARD:
probs = agent.get_log_pf(
self.states,
self.actions
).sum(dim=-1).exp()
multiplier = agent.true_Z
else:
probs = agent.get_log_pb(
self.states,
self.back_actions
).sum(dim=-1).exp()
multiplier = self.rewards
traj_flows = probs * multiplier
state_flows_pre = torch.zeros(
self.num_terminal_states + 1,
device=get_device()
)
state_flows_pre.index_add_(
dim=0,
index=self.int_encoded_states.flatten(),
source=traj_flows.repeat_interleave(self.actions.shape[-1])
)
self.state_flows = state_flows_pre[:-1].detach()
def get_flows(
self,
int_encoded_states: TensorType['num_states', int],
log_Z: TensorType[float]
) -> TensorType['num_states', float]:
is_origin_mask = int_encoded_states == 0
if self.mode == BACKWARD:
is_origin_mask[:] = False
int_encoded_states[int_encoded_states <= -1] = 0
return (
(~is_origin_mask * self.state_flows[int_encoded_states]) +
(is_origin_mask * log_Z.exp())
)
| WristTurn384/publish | publish/agents/flow_functions/exact_flow_function.py | exact_flow_function.py | py | 3,024 | python | en | code | 0 | github-code | 90 |
13622479788 | #! /usr/bin/python
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
from ..const import T_sun, d2r
from ..parfile import DerivePar
from .pkcorr import doppler
import matplotlib.pyplot as plt
import numpy as np
rcParams["mathtext.fontset"] = "cm"
def om1dot_m1m2(omd,omderr,a1,e,pb,om,m1,npts):
"""
Calculate upper/lower bounds of OMDOT curve in the m1-m2 plane.
"""
m2omh = ((omd+omderr)*d2r*(1.-e**2)*(pb/2./np.pi)**(5./3.)/3./\
T_sun.value**(2./3.)/86400./365.25)**(3./2.) - m1
m2oml = ((omd-omderr)*d2r*(1.-e**2)*(pb/2./np.pi)**(5./3.)/3./\
T_sun.value**(2./3.)/86400./365.25)**(3./2.) - m1
return m2omh, m2oml
def pbdot_m1m2(pbdot,pbdoterr,pb,e,m1,npts):
"""
Calculate the upper/lower bounds of PBDOT curve in the m1-m2 plane.
"""
m2pbdh = np.zeros(npts)
m2pbdl = np.zeros(npts)
fe = 1.+73./24.*e**2+37./96.*e**4
A = -192.*np.pi/5.*(pb/2./np.pi)**(-5./3.)*fe*(1.-e**2)**(-7./2.)*\
T_sun.value**(5./3.)
for i in range(npts):
m2 = 1.
# use Newton-Raphson method to get upper-bound curve.
if (m1[i] == 0.):
m1[i] = 0.001
for j in range(100):
m2b = m2
f = A*m1[i]*m2*(m1[i]+m2)**(-1./3.)
fp = A*m1[i]*m2*(m1[i]+m2)**(-1./3.)*(1./m2-1./3./(m1[i]+m2))
m2 = m2-((pbdot+pbdoterr)*1e-12-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2pbdh[i] = m2
break
for k in range(npts):
m2 = 1.
# use Newton-Raphson method to get upper-bound curve.
if (m1[k] == 0.):
m1[k] = 0.001
for l in range(100):
m2b = m2
f = A*m1[k]*m2*(m1[k]+m2)**(-1./3.)
fp = A*m1[k]*m2*(m1[k]+m2)**(-1./3.)*(1./m2-1./3./(m1[k]+m2))
m2 = m2-((pbdot-pbdoterr)*1e-12-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2pbdl[k] = m2
break
return m2pbdh, m2pbdl
def gamma_m1m2(gam,gamerr,e,pb,m1,npts):
"""
Calculate upper/lower bounds of GAMMA curve in the m1-m2 plane.
"""
m2gamh = np.zeros(npts)
m2gaml = np.zeros(npts)
for i in range(npts):
m2 = 1.
# use Newton-Raphson method to get upper-bound curve.
for j in range(100):
m2b = m2
f = e*(pb/2./np.pi)**(1./3.)*T_sun.value**(2./3.)*(m1[i]+m2)**(-4./3.)*\
m2*(m1[i]+2.*m2)
fp = e*(pb/2./np.pi)**(1./3.)*T_sun.value**(2./3.)*(m1[i]+m2)**(-4./3.)*\
(-4./3./(m1[i]+m2)*m2*(m1[i]+m2)+m1[i]+4.*m2)
m2 = m2-(gam+gamerr-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2gamh[i] = m2
break
for k in range(npts):
m2 = 1.
# use Newton-Raphson method to get lower-bound curve.
for l in range(100):
m2b = m2
f = e*(pb/2./np.pi)**(1./3.)*T_sun.value**(2./3.)*(m1[k]+m2)**(-4./3.)*\
m2*(m1[k]+2.*m2)
fp = e*(pb/2./np.pi)**(1./3.)*T_sun.value**(2./3.)*(m1[k]+m2)**(-4./3.)*\
(-4./3./(m1[k]+m2)*m2*(m1[k]+2.*m2)+m1[k]+4.*m2)
m2 = m2-(gam-gamerr-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2gaml[k] = m2
break
return m2gamh, m2gaml
def r_m1m2(m2,m2err,npts):
"""
Calculate upper/lower bounds of Shapiro-r curve in the m1-m2 plane.
"""
m2rh = np.zeros(npts)+(m2+m2err)
m2rl = np.zeros(npts)+(m2-m2err)
return m2rh, m2rl
def s_m1m2(s,serr,x,pb,m1,npts):
"""
Calculate upper/lower bounds of Shapiro-s curve in the m1-m2 plane.
"""
m2sh = np.zeros(npts)
m2sl = np.zeros(npts)
for i in range(npts):
m2 = 0.2
# use Newton-Raphson method to get upper-bound curve.
for j in range(100):
m2b = m2
f = x*(pb/2./np.pi)**(-2./3.)*T_sun.value**(-1./3.)*(m1[i]+m2)**(2./3.)/m2
fp = x*(pb/2./np.pi)**(-2./3.)*T_sun.value**(-1./3.)*(m1[i]+m2)**(2./3.)/m2*\
(2./3./(m1[i]+m2)-1./m2)
m2 = m2-(s+serr-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2sh[i] = m2
break
for k in range(npts):
m2 = 0.2
# use Newton-Raphson method to get upper-bound curve.
for l in range(100):
m2b = m2
f = x*(pb/2./np.pi)**(-2./3.)*T_sun.value**(-1./3.)*(m1[k]+m2)**(2./3.)/m2
fp = x*(pb/2./np.pi)**(-2./3.)*T_sun.value**(-1./3.)*(m1[k]+m2)**(2./3.)/m2*\
(2./3./(m1[k]+m2)-1./m2)
m2 = m2-(s-serr-f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2sl[k] = m2
break
return m2sh, m2sl
def om1spin_m1m2(om1s,om1serr,pb,ecc,m1,npts):
"""
Calculate upper/lower bounds of precession-rate curve in the m1-m2 plane.
"""
om1serr_lo, om1serr_up = om1serr[0], om1serr[1]
m2sh = np.zeros(npts)
m2sl = np.zeros(npts)
A = 0.5 * (T_sun.value)**(2./3.) * (pb / 2 / np.pi)**(-5./3.) / \
(1 - ecc**2) * 86400 * 365.25
for i in range(npts):
m2 = 0.2
# use Newton-Raphson method to get upper-bound curve.
for j in range(100):
m2b = m2
f = A * (m2 * (4*m1[i] + 3*m2) / (m1[i] + m2)**(4./3.)) / d2r
fp = A * ((m1[i] + m2) * (4*m1[i] + 6*m2) - (4./3.) * m2 * (4*m1[i] + 3*m2)) / \
(m1[i] + m2)**(7./3.) / d2r
m2 = m2-(om1s + om1serr_up - f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2sh[i] = m2
break
for k in range(npts):
m2 = 0.2
for l in range(100):
m2b = m2
f = A * (m2 * (4*m1[k] + 3*m2) / (m1[k] + m2)**(4./3.)) / d2r
fp = A * ((m1[k] + m2) * (4*m1[k] + 6*m2) - (4./3.) * m2 * (4*m1[k] + 3*m2)) / \
(m1[k] + m2)**(7./3.) / d2r
m2 = m2 - (om1s - om1serr_lo - f)/(-fp)
if (np.fabs(m2-m2b) < 1e-7):
m2sl[k] = m2
break
return m2sh, m2sl
# define m1m2 class that uses the above functions when they're set in parfile.
class M1M2(object):
def __init__(self, inobj, npts=200, font_name="serif", om1s=[False,False,False], pkcorr='n'):
"""
Calculate upper/lower bounds of post-Keplerian (PK)parameters and store
all arrays in a single object. Currently supported PK parameters are:
PBDOT, OMDOT, GAMMA, r, s.
Required argument:
- 'inobj' = input parfile object, which can be generated by using
the "readpar" class in the 'psrpar.py' module.
Default arguments:
- 'npts' = number of array elements for upper/lower bounds.
- 'pkcorr' = correct for Doppler bias? (y = yes, n = no).
- 'om1s' = list of values for a geodetic-precession measurement.
(median value, lower uncertainy, upper uncertainy.)
"""
self.font = FontProperties()
self.font.set_name(font_name)
a1 = inobj.A1["value"]
e = inobj.E["value"]
pb = inobj.PB["value"] * 86400
om = inobj.OM["value"]
m1 = 3. * np.arange(npts) / (npts-1.)
setattr(self,'m1',m1)
m2omh = m2oml = np.zeros(npts)
# if OMDOT and its error are set, calculate m1m2 arrays.
if inobj.OMDOT["error"] is not None:
omdot = inobj.OMDOT["value"]
omdoterr = inobj.OMDOT["error"]
m2omh, m2oml = om1dot_m1m2(omdot,omdoterr,a1,e,pb,om,m1,npts)
setattr(self,'OMDOT_U',m2omh)
setattr(self,'OMDOT_L',m2oml)
# if GAMMA and its error are set, calculate m1m2 arrays.
if inobj.GAMMA["error"] is not None:
gamma = inobj.GAMMA["value"]
gammaerr = inobj.GAMMA["error"]
m2gamh, m2gaml = gamma_m1m2(gamma,gammaerr,e,pb,m1,npts)
setattr(self,'GAMMA_U',m2gamh)
setattr(self,'GAMMA_L',m2gaml)
# if Shapiro-r and its error are set, calculate m1m2 arrays.
if inobj.M2["error"] is not None :
r = inobj.M2["value"]
rerr = inobj.M2["error"]
m2rh, m2rl = r_m1m2(r,rerr,npts)
setattr(self,'r_U',m2rh)
setattr(self,'r_L',m2rl)
# if Shapiro-s and its error are set, calculate m1m2 arrays.
if inobj.SINI["error"] is not None:
s = inobj.SINI["value"]
serr = inobj.SINI["error"]
m2sh, m2sl = s_m1m2(s,serr,a1,pb,m1,npts)
setattr(self,'s_U',m2sh)
setattr(self,'s_L',m2sl)
# if PBDOT and its error are set, calculate m1m2 arrays.
if inobj.PBDOT["error"] is not None:
pbdot = inobj.PBDOT["value"]
pbdoterr = inobj.PBDOT["error"]
#if (pkcorr == 'y'):
# der = DerivePar(inobj)
# b, l, mu, muerr = der.gal_b, der.gal_l, der.mu, der.muerr
# corr, corr_err = doppler(0.7,0.3,b,l,mu,muerr)
# corr *= inobj.PB*86400.*1e12
# corr_err *= inobj.PB*86400.*1e12
# pbdot -= sum(corr)
# pbdoterr = np.sqrt(pbdoterr**2+corr_err**2)
m2pbdh, m2pbdl = pbdot_m1m2(pbdot,pbdoterr,pb,e,m1,npts)
setattr(self,'PBDOT_U',m2pbdh)
setattr(self,'PBDOT_L',m2pbdl)
# if OM1SPIN and the uncertainties are set, calculate m1m2 arrays.
if (any(om1s)):
om1smed = om1s[0]
om1serr = om1s[1:]
print(om1serr)
m2om1sh, m2om1sl = om1spin_m1m2(om1smed, om1serr, pb, e, m1, npts)
setattr(self,'OM1SPIN_U',m2om1sh)
setattr(self,'OM1SPIN_L',m2om1sl)
def plot(self, pbdot_extra: float = None):
"""
Plot availabe m1-m2 data using the matplotlib package.
"""
if (hasattr(self,'OMDOT_U') and hasattr(self,'OMDOT_L')):
#plt.plot(self.m1,self.OMDOT_U,'k-')
#plt.plot(self.m1,self.OMDOT_L,'k-')
plt.fill_between(self.m1, self.OMDOT_U, self.OMDOT_L, color='k', alpha=0.8)
plt.text(2.0, 0.4, r'$\dot{\omega}$', fontproperties=self.font, fontsize=15)
if (hasattr(self,'GAMMA_U') and hasattr(self,'GAMMA_L')):
#plt.plot(self.m1,self.GAMMA_U,'g-')
#plt.plot(self.m1,self.GAMMA_L,'g-')
plt.text(0.2, 1.0, r'$\gamma$', fontproperties=self.font, fontsize=15)
plt.fill_between(self.m1, self.GAMMA_U, self.GAMMA_L, color='g', alpha=0.8)
if (hasattr(self,'r_U') and hasattr(self,'r_L')):
#plt.plot(self.m1,self.r_U,'r-')
#plt.plot(self.m1,self.r_L,'r-')
plt.fill_between(self.m1, self.r_U, self.r_L, color='r', alpha=0.5)
plt.text(2.7, 1.1, r'$r$', fontproperties=self.font, fontsize=15)
if (hasattr(self,'s_U') and hasattr(self,'s_L')):
#plt.plot(self.m1,self.s_U,'m-')
#plt.plot(self.m1,self.s_L,'m-')
plt.fill_between(self.m1, self.s_U, self.s_L, color='m', alpha=0.5)
plt.text(0.5, 0.7, r'$s$', fontproperties=self.font, fontsize=15)
if (hasattr(self,'PBDOT_U') and hasattr(self,'PBDOT_L')):
#plt.plot(self.m1,self.PBDOT_U,'b-')
#plt.plot(self.m1,self.PBDOT_L,'b-')
plt.fill_between(self.m1, self.PBDOT_U, self.PBDOT_L, color='blue', alpha=0.5)
plt.text(0.9, 2.5, r'$\dot{P}_{\rm b}$', fontproperties=self.font, fontsize=15)
if pbdot_extra is not None:
pbdot_extra_lo, pbdot_extra_hi = pbdot_extra
plt.plot(self.m1, pbdot_extra_lo, 'k--')
plt.plot(self.m1, pbdot_extra_hi, 'k--')
if (hasattr(self,'OM1SPIN_U') and hasattr(self,'OM1SPIN_L')):
#plt.plot(self.m1,self.OM1SPIN_U,'y-')
#plt.plot(self.m1,self.OM1SPIN_L,'y-')
plt.fill_between(self.m1, self.OM1SPIN_U, self.OM1SPIN_L, color='yellow', alpha=0.7)
plt.text(2.5, 2.3, r'$\Omega_1^{\rm spin}$', fontproperties=self.font, fontsize=15)
plt.xlim(0.,3.)
plt.ylim(0.,3.)
#plt.axes().set_aspect('equal')
plt.xlabel(r'Pulsar Mass (${\rm M}_{\odot}$)', fontproperties=self.font, fontsize=15)
plt.ylabel(r'Companion Mass (${\rm M}_{\odot}$)', fontproperties=self.font, fontsize=15)
plt.savefig('m1m2.png', dpi=500, fmt='png')
plt.show()
| emmanuelfonseca/PSRpy | PSRpy/orbit/m1m2.py | m1m2.py | py | 12,535 | python | en | code | 2 | github-code | 90 |
18142290569 | def main():
r, c = [int(x) for x in input().split()]
matrix = []
totals = [0 for _ in range(c+1)]
for _ in range(r):
nums = [int(x) for x in input().split()]
total = sum(nums)
row = nums + [total]
matrix.append(row)
for i in range(c+1):
totals[i] += row[i]
matrix.append(totals)
for m in matrix:
print(" ".join([str(x) for x in m]))
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02413/s719960826.py | s719960826.py | py | 463 | python | en | code | 0 | github-code | 90 |
71987355178 | import sys
if sys.version_info[0] < 3:
input = raw_input
def MPF(total_income):
total_income = int(total_income)
if total_income < 7100 * 12:
return 0
return min(18000, total_income * 0.05)
def net_income(total_income):
""" Personal Net Income """
total_income = int(total_income)
return total_income - MPF(total_income)
def NCI(net_income, combined = False):
allowance = 132000
if combined:
allowance *= 2
return max(net_income - allowance, 0)
def standard_tax(net_income):
return net_income * 0.15
def progressive_tax(nci):
if nci <= 50000:
return nci * 0.02
if nci <= 100000:
return 1000 + (nci - 50000) * 0.06
if nci <= 150000:
return 4000 + (nci - 100000) * 0.10
if nci <= 200000:
return 9000 + (nci - 150000) * 0.14
# Remainder
return 16000 + (nci - 200000) * 0.17
def tax_selection(net_income, combined = False):
standard = int(standard_tax(net_income))
progressive = int(progressive_tax(NCI(net_income, combined)))
return [standard, progressive], int(progressive < standard)
def tax_calculation(data):
""" Tax Calculator
Input Format:
{
"self_income": int,
"spouse_income": int,
"marital_status": bool,
}
Output Format:
{
"marital_status": bool,
"combined": bool,
"self_mpf": int,
"self_tax": int,
"spouse_mpf": int,
"spouse_tax": int,
"combined_tax": int,
}
"""
output = {
"marital_status": data["marital_status"],
"combined": False,
"self_tax": 0,
"self_mpf": MPF(data["self_income"]),
"spouse_tax": 0,
"spouse_mpf": MPF(data["spouse_income"]),
"combined_tax": 0,
}
values, choice = tax_selection(net_income(data["self_income"]))
output["self_tax"] = values[choice]
values, choice = tax_selection(net_income(data["spouse_income"]))
output["spouse_tax"] = values[choice]
values, choice = tax_selection(net_income(data["self_income"]) + net_income(data["spouse_income"]), combined = True)
output["combined_tax"] = values[choice]
# decide whether combined_tax is chosen by comparing it with sum of seperated taxes (self_tax + spouse_tax)
output["combined"] = output["marital_status"] and output["combined_tax"] < (output["self_tax"] + output["spouse_tax"])
return output
def get_input():
data = {
"self_income": 0,
"spouse_income": 0,
"marital_status": False,
}
def get_income(role):
return int(input("%s income: " % role))
data["self_income"] = get_income("Your")
marital_status = input("Marital Status: ")[0].lower()
data["marital_status"] = ["n", "y"].index(marital_status)
if data["marital_status"]:
data["spouse_income"] = get_income("Spouse's")
return data
def show_output(result):
print ("Your MPF: %d" % result["self_mpf"])
print ("Your seperate tax payable: %d" % result["self_tax"])
if result["marital_status"]:
print ("Spouse's MPF: %d" % result["spouse_mpf"])
print ("Spouse's seperate tax payable: %d" % result["spouse_tax"])
print ("Joint Tax payable: %d" % result["combined_tax"])
print ("Suggest: %s" % ("Combining" if result["combined"] else "Seperating"))
if __name__ == "__main__":
print ("Tax Calculator")
data = get_input()
result = tax_calculation(data)
show_output(result)
| leoslf/302CEM | tax_assessment2/tax_assessment.py | tax_assessment.py | py | 3,583 | python | en | code | 0 | github-code | 90 |
7418618935 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name="home"),
path('login/', views.login, name="login"),
path('register/', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('create/room/', views.create_room, name='create_room'),
path('join/room/', views.join_room, name='join_room'),
path('room/<room_code>/lobby', views.room_lobby, name='room_lobby'),
path('room/<room_code>/game', views.room_game, name='room_game'),
] | Cristian1997Ion/Djanswer | game/urls.py | urls.py | py | 525 | python | en | code | 0 | github-code | 90 |
6980520086 | from collections import OrderedDict
def sort_sales(data):
temp_product=list(zip(*data))[0]
product=dict.fromkeys(temp_product,0)
for x in data:
product[x[0]]+=x[1]
return product
def sales_summary(data):
#===== write your code below ====
sorted_dict=OrderedDict(sorted(data.items(), key = lambda x:x[1], reverse=True))
temp_rank=list(range(1,len(sorted_dict)+1))
temp_values=list(sorted_dict.values())
for i in range(len(sorted_dict)-1):
if temp_values[i]==temp_values[i+1]:
temp_rank[i+1]=temp_rank[i]
result=list(zip(sorted_dict.keys(),sorted_dict.values(),temp_rank))
return result
if __name__ == '__main__':
data =[('apple', 14),('pencil', 14),('book', 8),('orange', 6),('beer', 4),('beer', 2),('paper', 3)]
sorted_sales = sort_sales(data)
summary = sales_summary(sorted_sales)
print(summary)
| Yeongbi-Na/STUDY | Python/ex-08-sales-summary-main/practice.py | practice.py | py | 925 | python | zh | code | 0 | github-code | 90 |
31280124054 | x = 1
y = 0
r = 1
s = 0
processList = []
bufferX = []
bufferY = []
bufferR = []
bufferS = []
def p(_x, _process, _buffer):
_x -= 1
if _x < 0:
_buffer.append(_process)
return _x
def v(_x, _process, _buffer):
_x += 1
if _x <= 0:
_buffer.remove(_process)
return _x
# FIRST C
s = p(s,"C",bufferS)
if s >= 0:
y = p(y, "C", bufferY)
if y > 0:
x = v(x,"C",bufferX)
r = v(r,"C",bufferR)
# FIRST B
r = p(r,"B",bufferR)
if r >= 0:
y = p(y,"B", bufferY)
if y > 0:
x = v(x,"B", bufferX)
s = v(s,"B", bufferS)
# FIRST A
x = p(x,"A",bufferX)
"""if x >= 0:
y = v(y,"A",bufferY)"""
print(f"Value of X is: {x}")
print(f"Value of Y is: {y}")
print(f"Value of R is: {r}")
print(f"Value of S is: {s}")
print(f"List X Contains: {bufferX}")
print(f"List Y Contains: {bufferY}")
print(f"List R Contains: {bufferR}")
print(f"List S Contains: {bufferS}") | madescoces/python | matematicas_3/src/semaforos.py | semaforos.py | py | 905 | python | en | code | 0 | github-code | 90 |
74740413096 |
def minion_game(string):
Stuart = 0
Kevin = 0
dd = list(string)
#print(len(d))
for c in range (1,len(string)+1):
for i in range (0,len(s)-c):
h= string[i:i+(c+1)]
dd.append(h)
len(dd)
for i in dd:
first = str(i[0])
print(first)
if first in ("A, E, I, O, U"):
Kevin +=1
#print("yes")
else :
Stuart +=1
print(Stuart,Kevin)
if int(Kevin) > int(Stuart):
print("Kevin", Kevin)
if int(Kevin) == int(Stuart):
print("Draw")
if int(Kevin) < int(Stuart):
print("Stuart", Stuart)
if __name__ == '__main__':
s = input()
minion_game(s)
| KingSlayer-KS/python_practice_hacker_rank | hacker-rank-minion-game.py | hacker-rank-minion-game.py | py | 732 | python | en | code | 0 | github-code | 90 |
18303268239 | import sys
sys.setrecursionlimit(10**9)
N, u, v = map(int, input().split())
u, v = u-1, v-1
edge = [[] for _ in range(N)]
for i in range(N-1):
a, b = map(int, input().split())
a, b = a-1, b-1
edge[a].append(b)
edge[b].append(a)
taka = [0] * N
aoki = [0] * N
def dfs(v, pre, cost, i):
for e in edge[v]:
if e == pre:
continue
cost = dfs(e, v, cost, i+1)
cost[v] = i
return cost
taka = dfs(u, -1, [0] * N, 0)
aoki = dfs(v, -1, [0] * N, 0)
m = 0
for i in range(N):
if taka[i] < aoki[i]:
m = max(m, aoki[i])
print(m-1)
| Aasthaengg/IBMdataset | Python_codes/p02834/s201665822.py | s201665822.py | py | 588 | python | en | code | 0 | github-code | 90 |
40090016551 | '''
@Author: Pavan Nakate
@Date: 2021-11-02 02:08
@Last Modified by: Pavan Nakate
@Last Modified time: None
@Title : WindChill : To find the wind chill using temperature and wind speed as a user input
'''
import math
def wind_chill():
"""
Description:
This function is used to find the wind chill.
Temperature and wind speed are the two inputs given.
Calculate the wind chill temperature should not be greater than 50 and wind speed should be in between 3-120.
Parameter:
None
Return:
None
"""
#User Inputs for temperature and wind-speed
temperature = float(input("Enter temperature in fahrenheit in between the range 0 to 50 : "))
wind_speed = float(input("Enter wind speed in between the range 3 to 120 : "))
# One of this condition gets true, wind chill can not calculated
if(temperature > 50 or wind_speed < 3 or wind_speed > 120):
print("Wind chill cannot be found for the entered condition")
else:
wind_chill = 35.74 + 0.6215 * temperature +(0.4275*temperature + 35.75)*math.pow(wind_speed,0.16)
print(f"Wind chill for the given {temperature} and {wind_speed} is: ",wind_chill)
if __name__ == "__main__":
wind_chill() | Pavan699/Python_Concepts | Functional Programs/WindChill.py | WindChill.py | py | 1,244 | python | en | code | 0 | github-code | 90 |
4047693937 | # Задайте список из нескольких чисел. Напишите программу, которая найдёт сумму элементов списка, стоящих на нечётной позиции.
# Пример:
# - [2, 3, 5, 9, 3] -> на нечётных позициях элементы 3 и 9, ответ: 12
import random
user_number_of_elements = int(input('Введите желаемое число элементов: '))
list_for_treatment = []
for element in range(user_number_of_elements):
list_for_treatment.append(random.randint(0, 10))
print(f'Получен список чисел:\n{list_for_treatment}')
print()
sum_of_numbers_with_odd_indexes = 0
i = 1
while i < len(list_for_treatment):
sum_of_numbers_with_odd_indexes = sum_of_numbers_with_odd_indexes + list_for_treatment[i]
i += 2
print(f'Сумма элементов списка, стоящих на нечётной позиции равна: {sum_of_numbers_with_odd_indexes}')
# Напишите программу, которая найдёт произведение пар чисел списка. Парой считаем первый и последний элемент, второй и предпоследний и т.д.
# *Пример:*
# - [2, 3, 4, 5, 6] => [12, 15, 16];
# - [2, 3, 5, 6] => [12, 15]
import random
user_number_of_elements2 = int(input('Введите желаемое число элементов: '))
list_for_treatment2 = []
for element in range(user_number_of_elements2):
list_for_treatment2.append(random.randint(0, 10))
print(f'Получен список чисел:\n{list_for_treatment2}')
print()
from_left_to_right_index = 0
from_right_to_left_index = -1
result_list = []
while from_left_to_right_index <= round((len(list_for_treatment2) + 0.1) / 2) and - from_right_to_left_index <= round((len(list_for_treatment2) + 0.1) / 2):
result_list.append(list_for_treatment2[from_left_to_right_index] * list_for_treatment2[from_right_to_left_index])
from_left_to_right_index += 1
from_right_to_left_index += -1
print(f'Попарно перемножены элементы вводного списка, получен итоговый список:\n{result_list}')
# Задайте список из вещественных чисел. Напишите программу, которая найдёт разницу между максимальным и минимальным значением дробной части элементов.
# *Пример:*
# - [1.1, 1.2, 3.1, 5, 10.01] => 0.19
import random
user_number_of_elements3 = int(input('Введите желаемое число элементов: '))
list_for_treatment3 = []
for element in range(user_number_of_elements3):
list_for_treatment3.append(random.randint(0,10) + round(random.random(), 2))
print(f'Получен список чисел:\n{list_for_treatment3}')
list_for_treatment3_without_whole_part = []
for element in list_for_treatment3:
list_for_treatment3_without_whole_part.append(element - int(element))
result = max(list_for_treatment3_without_whole_part) - min(list_for_treatment3_without_whole_part)
print(f'Разница между максимальным и минимальным значением дробной части элементов равна: {result}')
# 4 Напишите программу, которая будет преобразовывать десятичное число в двоичное.
# - 45 -> 101101
# - 3 -> 11
# - 2 -> 10
user_number = int(input("Введите число: "))
binary_result = []
while user_number:
binary_result.append(user_number % 2)
user_number //= 2
binary_result.reverse()
print(binary_result)
| Kachinske/Python_seminar_3_homework | Python_seminar3_homework.py | Python_seminar3_homework.py | py | 3,778 | python | ru | code | 0 | github-code | 90 |
816845149 | import tav.proxy.database
import tav.bot
import os.path
import sys
PROXY_DATABASE = \
os.path.join(os.path.abspath(os.path.split(__file__)[0]), 'proxy.db')
def main():
import argparse
parser = argparse.ArgumentParser(
description='Twitch Artificial Viewers'
)
parser.add_argument(
'--timeout', dest='timeout', type=int, default=5,
help='Timeout after which a connection/proxy times out'
)
parser.add_argument(
'--db', dest='database', default=PROXY_DATABASE,
help='Path to proxy database'
)
parser.add_argument(
'--quiet', dest='quiet', action='store_true',
help='Don\'t print any status updates'
)
parser.add_argument(
'--score', dest='score', default=0.3, type=float,
help='relative score of proxies to use'
)
parser.add_argument(
'name', help='Twitch username'
)
parser.add_argument(
'viewers', type=int, help='How many viewers/threads'
)
ns = parser.parse_args()
bot = tav.bot.ViewerBot(ns.name, timeout=ns.timeout, verbose=not ns.quiet)
with tav.proxy.database.SqliteProxyDatabase(PROXY_DATABASE) as db:
proxies = db.load(ns.score)
bot.proxies.add_working_proxies(proxies)
bot.run(ns.viewers)
if __name__ == '__main__':
main()
| Dav1dde/tav | vbot.py | vbot.py | py | 1,334 | python | en | code | 3 | github-code | 90 |
11850208329 | #!/usr/bin/env python
# a0.py : Solve the N-Rooks/N-queen/N-knight problem!
# Ranjana, 2018
# Updated the code given in class
import sys
from collections import deque
import time
# Count # of pieces in given row
def count_on_row(board, row):
return sum(board[row])
# Count # of pieces in given column
def count_on_col(board, col):
return sum([row[col] for row in board])
# Count total # of pieces on board
def count_pieces(board):
return sum([sum(row) for row in board])
# Return a string with the board rendered in a human-friendly format
def printable_board(board):
boardstr = ""
for row in range(N):
boardstr += "\n"
for col in range(N):
if [row+1, col+1] in coordinates:
boardstr+= "X "
elif board[row][col] == 1:
if type == "nknight":
boardstr+= "K "
elif type == "nqueen":
boardstr+= "Q "
elif type == "nrook":
boardstr+= "R "
else:
boardstr+= "_ "
return boardstr
# Add a piece to the board at the given position, and return a new board (doesn't change original)
def add_piece(board, row, col):
return board[0:row] + [board[row][0:col] + [1, ] + board[row][col + 1:]] + board[row + 1:]
def check_diagonals(board, row, col):
row_ind = row
col_ind = col
while(row_ind < N and col_ind < N):
if board[row_ind][col_ind] == 1:
return False
row_ind = row_ind + 1
col_ind = col_ind + 1
row_ind = row
col_ind = col
while(row_ind >= 0 and col_ind >= 0):
if board[row_ind][col_ind] == 1:
return False
row_ind = row_ind - 1
col_ind = col_ind - 1
row_ind = row
col_ind = col
while(row_ind >= 0 and col_ind < N):
if board[row_ind][col_ind] == 1:
return False
row_ind = row_ind - 1
col_ind = col_ind + 1
row_ind = row
col_ind = col
while(row_ind < N and col_ind >= 0):
if board[row_ind][col_ind] == 1:
return False
row_ind = row_ind + 1
col_ind = col_ind - 1
return True
def check_attacking_nights(board, r, c):
# For any knight placed at (r,c), it can have maximum of 8 attaking nights at locations (r-2, c-1), (r-2, c+1), (r-1, c-2), (r-1, c+2),
# (r+1,c-2), (r+1,c+2), (r+2,c-1) and (r+2, c+1), if any of these locations have a knight already present this function returns False. Only if all the eight
# locations are empty it returns True.
if (r - 2) >= 0:
if (c - 1) >= 0:
if board[r - 2][c - 1] == 1:
return False
if (c + 1) < N:
if board[r - 2][c + 1] == 1:
return False
if (r - 1) >= 0:
if (c - 2) >= 0:
if board[r - 1][c - 2] == 1:
return False
if (c + 2) < N:
if board[r - 1][c + 2] == 1:
return False
if (r + 1) < N:
if (c - 2) >= 0:
if board[r + 1][c - 2] == 1:
return False
if (c + 2) < N:
if board[r + 1][c + 2] == 1:
return False
if (r + 2) < N:
if (c - 1) >= 0:
if board[r + 2][c - 1] == 1:
return False
return True
def successors_rook(board):
succ_board = []
# we are going to place rooks to the leftmost empty column
# number of already placed rooks help us get the column number
col = count_pieces(board)
for row in range(0, N):
# the input coordinates start from 1, not 0 hence r+1, c+1 to match the scale
if [row+1,col+1] not in coordinates:
if count_on_row(board, row) == 0:
temp_board = add_piece(board, row, col)
succ_board.append(temp_board)
return succ_board
def successors_queen(board):
succ_board = []
# we are going to place queens to the leftmost empty column
# number of already placed queens help us get the column number
col = count_pieces(board)
for row in range(0, N):
# the input coordinates start from 1, not 0 hence r+1, c+1 to match the scale
if [row+1,col+1] not in coordinates:
if count_on_row(board, row) == 0:
proceed = check_diagonals(board, row, col)
if proceed:
temp_board = add_piece(board, row, col)
succ_board.append(temp_board)
return succ_board
def successors_knight(board):
succ_board = []
for c in range(0, N):
for r in range(0, N):
# the input coordinates start from 1, not 0 hence r+1, c+1 to match the scale
if [r+1,c+1] not in coordinates:
if board[r][c] != 1:
proceed = check_attacking_nights(board, r, c)
if proceed:
temp_board = add_piece(board, r, c)
succ_board.append(temp_board)
return succ_board
# check if board is a goal state
def is_goal(board):
return count_pieces(board) == N and \
all([count_on_row(board, r) <= 1 for r in range(0, N)]) and \
all([count_on_col(board, c) <= 1 for c in range(0, N)])
def is_nknight_goal(board):
return count_pieces(board) == N
# Solve n-rooks!
def solve(initial_board):
fringe = [initial_board]
while len(fringe) > 0:
if type == "nrook":
for s in successors_rook(fringe.pop()):
if is_goal(s):
return (s)
fringe.append(s)
elif type == "nqueen":
for s in successors_queen(fringe.pop()):
if is_goal(s):
return (s)
fringe.append(s)
elif type == "nknight":
for s in successors_knight(fringe.pop()):
if is_nknight_goal(s):
return (s)
fringe.append(s)
return False
# This is N, the size of the board. It is passed through command line arguments.
num_arg = len(sys.argv)
if num_arg < 2:
print("Please input type of piece: nrook/nqueen/nknight.")
else:
type = sys.argv[1]
if num_arg < 3:
print("Please input the board size.")
else:
N = int(sys.argv[2])
# time_beg = time.time()
list_coordinates = []
if num_arg >= 4:
num_coord = int(sys.argv[3])
if num_coord > 0:
for arg in sys.argv[4:]:
# put all the coordinates in the list_coordinates list
list_coordinates.append(int(arg))
if len(list_coordinates) != 2*num_coord:
print("Check the number of cordinates passed")
coordinates = []
# break the coordinates in the x,y format and store them into coordinates
coordinates = [list_coordinates[x:x+2] for x in range(0, len(list_coordinates) - 1, 2)]
# The board is stored as a list-of-lists. Each inner list is a row of the board.
# A zero in a given square indicates no piece, and a 1 indicates a piece.
initial_board = [[0] * N] * N
print("Starting from initial board:\n" + printable_board(initial_board) + "\n\nLooking for solution...\n")
solution = solve(initial_board)
# while using BFS enable the next line
# solution = solveBFS(initial_board)
print(printable_board(solution) if solution else "Sorry, no solution found. :(")
# time_end = time.time()
# time_elapsed = time_end - time_beg
# print("Time taken: ", time_elapsed)
| ranjanasinha89/N-Queen-Problem | a0.py | a0.py | py | 7,435 | python | en | code | 0 | github-code | 90 |
13997530348 |
import cv2
import numpy as np
from faceplusplus_sdk import Detect
from config import Config
import sys
import json
import os
import utils
def readPoints(path):
pointsArray = []
json_array = []
eliminated_index = []
listdir = os.listdir(path)
listdir.sort()
for txtFile in listdir:
#print(txtFile)
points = []
with open(path + txtFile) as json_file:
data = json.load(json_file)
for key, item in data.items():
points.append([item['x'], item['y']]) #load the json from the file
if key in Config['eliminated']:
eliminated_index.append(len(points) - 1)
pointsArray.append(points) #put the [x,y] into the points array
json_array.append(data)
return eliminated_index + [len(pointsArray[0]) + i for i in range(8)], pointsArray, json_array #return two formats of the points
def readImages(path):
imagesArray = []
listdir = os.listdir(path)
listdir.sort()
BG = 0
for imageFile in listdir:
#print(imageFile)
if imageFile == '.DS_Store':
continue
if imageFile == Config['background']:
BG = len(imagesArray)
img = cv2.imread(os.path.join(path, imageFile))
img = np.float32(img) / 255.0
imagesArray.append(img)
return BG, imagesArray
def Normalization(w, h, allPoints, allPoints_json, images):
imagesNorm = []
pointsNorm = [] #the normalized points and images
boundaryPts = np.array([
(0, 0),
(w / 2, 0),
(w - 1, 0),
(w - 1, h / 2),
(w - 1, h - 1),
(w / 2, h - 1),
(0, h - 1),
(0, h / 2)
]
)
pointsAvg = np.array([[0, 0]] * len(allPoints[0])) #an array representing the final average landmarks
eyecorner_chin_Dst = [
[0.3 * w, h / 2],
[0.7 * w, h / 2],
[0.5 * w, h * 0.9]
] #the final locations of eye conners and chin
for i, image in enumerate(images):
points = allPoints[i]
#the two eye corners from the original image
eyecorner_chin_Src = [allPoints_json[i]['left_eye_left_corner'], allPoints_json[i]['right_eye_right_corner'], allPoints_json[i]['contour_chin']]
eyecorner_chin_Src = [[p['x'], p['y']] for p in eyecorner_chin_Src]
tform, img = utils.applyAffineTransform(image, eyecorner_chin_Src, eyecorner_chin_Dst, (w, h)) # transform the original image
points = np.reshape(cv2.transform(np.reshape(np.array(points), (-1, 1, 2)), tform), (-1, 2)) # transform the points
points = np.maximum(points, 0)
points = np.minimum(points, [w - 1, h - 1])
pointsAvg += points # contribute to the average points
pointsNorm.append(np.append(points, boundaryPts, axis = 0))
imagesNorm.append(img)
pointsAvg = pointsAvg / len(images)
return np.append(pointsAvg, boundaryPts, axis = 0), pointsNorm, imagesNorm
def Trianglar_affine(BG, w, h, pointsAvg, pointsNorm, imagesNorm, eliminated_index):
rect = (0, 0, w, h)
dt = utils.calculateDelaunayTriangles(rect, np.array(pointsAvg)) # the Delaunay Triangles dividing
# the final output image
output_bg = np.zeros((h, w, 3), np.float32())
for j in range(0, len(dt)):
if dt[j][0] in eliminated_index or dt[j][1] in eliminated_index or dt[j][2] in eliminated_index:
tri_in = [pointsNorm[BG][dt[j][k]] for k in range(0, 3)]
tri_out = [pointsAvg[dt[j][k]] for k in range(0, 3)]
utils.warpTriangle(imagesNorm[BG], output_bg, tri_in, tri_out)
output = np.zeros((h, w, 3), np.float32())
for i in range(0, len(imagesNorm)):
img = output_bg
for j in range(0, len(dt)):
if dt[j][0] in eliminated_index or dt[j][1] in eliminated_index or dt[j][2] in eliminated_index:
continue
tri_in = [pointsNorm[i][dt[j][k]] for k in range(0, 3)]
tri_out = [pointsAvg[dt[j][k]] for k in range(0, 3)]
utils.warpTriangle(imagesNorm[i], img, tri_in, tri_out)
output = output + img / len(imagesNorm)
return output
def main():
img_dir = Config['img_dir']
point_dir = img_dir + '_points'
result_dir = img_dir + '_result'
Detect(img_dir).run()
w = Config['w'] # the width of the final picture
h = Config['h'] # the height of the final picture
eliminated_index, allPoints, allPoints_json = readPoints(point_dir + '/')
BG, images = readImages(img_dir + '/')
#print(len(allPoints[0]))
pointsAvg, pointsNorm, imagesNorm = Normalization(w, h, allPoints, allPoints_json, images)
output = Trianglar_affine(BG, w, h, pointsAvg, pointsNorm, imagesNorm, eliminated_index)
#cv2.imshow('image', output)
#cv2.waitKey(0)
final_output = np.zeros((h, (len(images) + 1) * (w + 5), 3), dtype = np.float32())
for i, image in enumerate(imagesNorm):
final_output[:, i * (w + 5) : i * (w + 5) + w, :] = image
#cv2.imshow('image', final_output[:, i * (w + 5) : i * (w + 5) + w, :])
#cv2.waitKey(0)
final_output[:, len(images) * (w + 5) : len(images) * (w + 5) + w, :] = output
if not os.path.exists(result_dir):
os.mkdir(result_dir)
cv2.imwrite(os.path.join(result_dir, 'result.jpg'), final_output * 255)
cv2.imshow('image', final_output)
cv2.waitKey(0)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('interrupt')
sys.exit(1)
| Jeret-Ljt/average_face | pj.py | pj.py | py | 5,700 | python | en | code | 1 | github-code | 90 |
28170182779 | import numpy as np
import pandas as pd
import argparse
import math
import csv
from collections import OrderedDict
def get_args():
parser = argparse.ArgumentParser(description='Call peaks over CRISPRi screen windows.')
parser.add_argument('guide_data',help='Input flow-fish guide count data.')
parser.add_argument('output_file',help='Output file to print TSV to.')
parser.add_argument('--meta_data','-m',type=str,default=None,help='Tab-delimited metadata table. '+\
'Must contain header with `Short_name` '+\
'and `cutting_specificity_score` fields.')
parser.add_argument('--window_size','-w',type=int,default=100,help='Window size over which to subset.')
parser.add_argument('--subset_frac','-f',type=float,default=0.5,help='Maximum fraction of guides to remove by subsetting.')
parser.add_argument('--negate','-n',action='store_true',help='Negate `cutting_spcificity_score` to flip sort order.')
parser.add_argument('--random_seed','-r',type=int,default=None,help='Set random seed for '+\
'consistant sampling. '+\
'Use current date (e.g., 20200901). '+\
'Required if meta_data is None')
args = parser.parse_args()
args.step_size = args.window_size
return args
def check_args(args):
assert args.subset_frac > 0, "subset_frac aust be greater than 0."
assert args.subset_frac < 0, "subset_frac aust be less than 1."
assert args.window_size > 0, "Windows must take up space."
return True
def check_overlap_bed(interval, array):
height = array.shape[0]
intervals = np.stack([np.tile(interval,(height,1)), array],axis=0)
swaghook = (intervals[0,:,1] < intervals[1,:,1]).astype(int)
chrom = (intervals[0,:,0] == intervals[0,:,0])
overlap = intervals[1-swaghook,np.arange(height),2] > intervals[swaghook,np.arange(height),1]
return overlap & chrom
def main(args):
#####################
## Import data ##
#####################
guide_data = pd.read_table(args.guide_data, sep='\t', header=0, index_col=False)
if args.meta_data is not None:
meta_data = pd.read_table(args.meta_data, sep='\t', header=0, index_col='Short_name')
else:
np.random.seed(args.random_seed)
meta_data = {
'Short_name': guide_data['Coordinates'],
'cutting_specificity_score': np.random.rand( len(guide_data['Coordinates']) )
}
meta_data = pd.DataFrame(meta_data['cutting_specificity_score'],
index=meta_data['Short_name'])
if args.negate:
meta_data['cutting_specificity_score'] = -1 * meta_data['cutting_specificity_score']
else:
pass
########################################
## Fill missing metadata, link guides ##
########################################
spec_list = []
for name in guide_data['Coordinates']:
try:
spec_list.append( meta_data.loc[name,'cutting_specificity_score'] )
except KeyError:
spec_list.append( np.nan )
guide_data['cutting_specificity_score'] = spec_list
guide_data = guide_data.fillna( 0.0 )
## Split targeting and control guides
targ_data = guide_data.loc[ guide_data['Coordinates'].str.contains('chr') ]
ctrl_data = guide_data.loc[~guide_data['Coordinates'].str.contains('chr') ]
## Parse targeting coordinates
plus_offsets = [152, 147]
minus_offsets= [146, 153]
uniq_chrom= np.unique([coord.split(':')[0] for coord in targ_data['Coordinates']])
chrom2idx = OrderedDict( [ (x,i) for i,x in enumerate(uniq_chrom) ] )
idx2chrom = OrderedDict( [ (i,x) for i,x in enumerate(uniq_chrom) ] )
## Get targeting positions for each guide
pos_array = np.array([ ( chrom2idx[coord.split(':')[0]],
int(coord.split(':')[1].split('-')[1]) - plus_offsets[0],
int(coord.split(':')[1].split('-')[1]) + plus_offsets[1] ) if coord.split(':')[2] == '+'
else ( chrom2idx[coord.split(':')[0]],
int(coord.split(':')[1].split('-')[1]) - minus_offsets[0],
int(coord.split(':')[1].split('-')[1]) + minus_offsets[1] )
for coord in targ_data['Coordinates'] ])
## Get genomic windows covered on each chrom
genome_lims = OrderedDict(
[ (idx,
(pos_array[pos_array[:,0] == idx, 1].min(),
pos_array[pos_array[:,0] == idx, 2].max())
) for idx, chrom in idx2chrom.items() ]
)
sliding_window = [ (idx, np.vstack( (np.arange(*lims,args.step_size),
np.minimum(np.arange(*lims,args.step_size)+args.window_size,lims[1])) ).T
)
for idx, lims in genome_lims.items() ]
sliding_window = np.concatenate(
[ np.concatenate( (np.tile( [[idx]], (a_window.shape[0],1) ), a_window), axis=1 )
for idx, a_window in sliding_window ]
)
sliding_window = sliding_window[[ np.any(check_overlap_bed(interval,pos_array))
for interval in sliding_window ]]
## Get chromosome
chrom = targ_data['Coordinates'].iloc[0].split(':')[0]
## Work through windows and do subset
guide_filter = []
for a_window in sliding_window:
guide_capture = check_overlap_bed(a_window,pos_array)
current_slice = targ_data.loc[guide_capture]
capture_count = guide_capture.sum()
removal_count = math.floor(capture_count*args.subset_frac)
removal_count = max(0, removal_count - current_slice['Coordinates'].isin(guide_filter).sum())
current_slice = current_slice.iloc[~current_slice['Coordinates'].isin(guide_filter).values ]
remove_guides = current_slice.iloc[ current_slice['cutting_specificity_score'].argsort() ] \
.iloc[:removal_count,:] \
.loc[:,'Coordinates'].values
guide_filter = guide_filter + list(remove_guides)
final_data = pd.concat( [ctrl_data, targ_data.iloc[~targ_data['Coordinates'].isin(guide_filter).values ]] )
final_data.drop('cutting_specificity_score', axis=1) \
.to_csv(args.output_file, sep="\t", quoting=csv.QUOTE_NONE, index=False)
return None
if __name__ == "__main__":
args = get_args()
main(args)
| sjgosai/casa | src/subset_guides.py | subset_guides.py | py | 6,741 | python | en | code | 2 | github-code | 90 |
23236739001 | def reversedDiff(num):
old = str(num)
new = ''
for i in range(len(old)-1, -1, -1):
new += old[i]
return abs(num-int(new))
# Complete the beautifulDays function below.
def beautifulDays(i, j, k):
days = 0
for num in range(i, j+1):
revDiff = reversedDiff(num)
if revDiff%k == 0:
days += 1
return days | Anisha7/HackerRank | beautifulday.py | beautifulday.py | py | 364 | python | en | code | 0 | github-code | 90 |
18035798849 | quiznum=int(input())
timeline=input().split(" ")
timeline=[int(n) for n in timeline]
#print(timeline)
time_0=0
for i in range(quiznum):
time_0+=timeline[i]
#print(time_0)
medinum=int(input())
for i in range(medinum):
temp_line=input().split(" ")
a=int(temp_line[0])
b=int(temp_line[1])
time=time_0 - timeline[a-1] + b
print(time) | Aasthaengg/IBMdataset | Python_codes/p03845/s903287118.py | s903287118.py | py | 354 | python | en | code | 0 | github-code | 90 |
14444769816 | import collections, functools, time, itertools
import numpy as np, matplotlib.pyplot as plt
from matplotlib.colors import TABLEAU_COLORS
class BBox:
def __init__(self, base, sizes):
self.base = base
self.sizes = sizes
self.half_sizes = sizes / 2
self.diameter = np.linalg.norm(sizes)
def split(self, index):
return BBox(self.base + index * self.half_sizes, self.half_sizes)
@classmethod
def from_xs(cls, xs):
base = functools.reduce(np.minimum, xs)
sizes = functools.reduce(np.maximum, xs) - base
return cls(base, sizes)
class BHTree:
def __init__(self, bbox, xs):
dim = len(bbox.base.shape)
self.bbox = bbox
self.total_mass = len(xs)
if len(xs) == 0:
self.com = np.zeros(dim)
self.subtrees = []
elif len(xs) == 1:
self.com = xs[0]
self.subtrees = []
else:
subboxes = collections.defaultdict(lambda: [])
for x in xs:
subbox_index = tuple(np.round((x - bbox.base) / bbox.sizes))
subboxes[subbox_index].append(x)
self.subtrees = [BHTree(bbox.split(index), sub_xs) for index, sub_xs in subboxes.items()]
self.com = sum(subtree.total_mass * subtree.com for subtree in self.subtrees) / self.total_mass
def compute_force(self, x, eps, h, G):
d = self.com - x
dist = np.linalg.norm(d)
if dist == 0.0:
return np.zeros(len(x))
if len(self.subtrees) <= 1 or self.bbox.diameter / dist < eps: # barnes hut condition
print(len(self.subtrees) > 1)
return G * self.total_mass / (h + dist)**3 * d # units/scales are in G
else:
return sum(subtree.compute_force(x, eps, h, G) for subtree in self.subtrees)
def compute_forces_bh(xs, eps, h, G):
bbox = BBox.from_xs(xs)
bhtree = BHTree(bbox, xs)
return [bhtree.compute_force(x, eps, h, G) for x in xs]
def compute_force_direct(xs, h, G):
forces = []
for x in xs:
force = np.zeros(len(xs[0]))
for x_prime in x:
d = x_prime - x
dist = np.linalg.norm(d)
if dist == 0.0:
continue
force += G / (h + dist)**3 * d
forces.append(force)
return forces
def run_velocity_verlet(compute_force, xs, vs, args, nsteps, dt):
print("step: 0")
Fs = compute_force(xs, *args)
history = []
for i in range(nsteps):
print("step:", i + 1)
xs = [x + dt*v + 0.5*dt**2*F for x, v, F in zip(xs, vs, Fs)]
new_Fs = compute_force(xs, *args)
vs = [v + dt*0.5*(F + new_F) for v, F, new_F in zip(vs, Fs, new_Fs)]
Fs = new_Fs
history.append(xs)
return history
np.random.seed(42)
nstars = 1000
radius = 10.0
height = 0
G = 1e-3
rel_zv = 0.001
eps = 1.0
h = 1
tspan = 3
dt = 1
nsteps = int(tspan / dt)
alphas = np.random.uniform(0, 2*np.pi, nstars)
rs = np.random.uniform(0, radius, nstars)
np.sort(rs)
c, s = np.cos(alphas), np.sin(alphas)
x = rs * c
y = rs * s
z = np.random.uniform(-height/2, height/2, nstars)
xs = np.vstack([x, y, z]).T
M = np.arange(nstars) # integrated mass
v = 0 # np.sqrt(G * M / rs) # speed of circular orbit
vx = - v * s
vy = v * c
vz = rel_zv * v * np.random.uniform(-0.5, 0.5, nstars)
vs = np.vstack([vx, vy, vz]).T
start = time.time()
history_bh = run_velocity_verlet(compute_forces_bh, xs, vs, (eps, h, G), nsteps, dt)
end = time.time()
print("bh time:", end - start)
start = time.time()
history_direct = run_velocity_verlet(compute_force_direct, xs, vs, (h, G), nsteps, dt)
end = time.time()
print("direct time:", end - start)
fig = plt.figure()
ax = fig.add_subplot() # projection="3d")
for i, c in zip(range(len(history_bh[0])), itertools.cycle(TABLEAU_COLORS)):
ax.plot([history_bh[k][i][0] for k in range(len(history_bh))],
[history_bh[k][i][1] for k in range(len(history_bh))], color=c)
# [history_bh[k][i][2] for k in range(len(history_bh))])
for i, c in zip(range(len(history_direct[0])), itertools.cycle(TABLEAU_COLORS)):
ax.plot([history_direct[k][i][0] for k in range(len(history_direct))],
[history_direct[k][i][1] for k in range(len(history_direct))], ls="--", color=c)
# [history_direct[k][i][2] for k in range(len(history_direct))])
plt.show()
| anna-jana/numerics-physics-stuff | barnes_hut.py | barnes_hut.py | py | 4,388 | python | en | code | 3 | github-code | 90 |
40374438401 | # coding:utf8
import re
import datetime
import requests
import io
def get_stock_type(stock_code):
"""判断股票ID对应的证券市场
匹配规则
['50', '51', '60', '90', '110'] 为 sh
['00', '13', '18', '15', '16', '18', '20', '30', '39', '115'] 为 sz
['5', '6', '9'] 开头的为 sh, 其余为 sz
:param stock_code:股票ID, 若以 'sz', 'sh' 开头直接返回对应类型,否则使用内置规则判断
:return 'sh' or 'sz'"""
assert type(stock_code) is str, "stock code need str type"
if stock_code.startswith(("sh", "sz")):
return stock_code[:2]
if stock_code.startswith(
("50", "51", "60", "90", "110", "113", "132", "204")
):
return "sh"
if stock_code.startswith(
("00", "13", "18", "15", "16", "18", "20", "30", "39", "115", "1318")
):
return "sz"
if stock_code.startswith(("5", "6", "9", "7")):
return "sh"
return "sz"
def get_code_type(code):
"""
判断代码是属于那种类型,目前仅支持 ['fund', 'stock']
:return str 返回code类型, fund 基金 stock 股票
"""
if code.startswith(("00", "30", "60")):
return "stock"
return "fund"
def get_all_stock_codes():
"""获取所有股票 ID"""
all_stock_codes_url = "http://www.shdjt.com/js/lib/astock.js"
grep_stock_codes = re.compile("~(\d+)`")
response = requests.get(all_stock_codes_url)
stock_codes = grep_stock_codes.findall(response.text)
return stock_codes
def round_price_by_code(price, code):
"""
根据代码类型[股票,基金] 截取制定位数的价格
:param price: 证券价格
:param code: 证券代码
:return: str 截断后的价格的字符串表示
"""
if isinstance(price, str):
return price
typ = get_code_type(code)
if typ == "fund":
return "{:.3f}".format(price)
return "{:.2f}".format(price)
def get_ipo_info(only_today=False):
import pyquery
response = requests.get(
"http://vip.stock.finance.sina.com.cn/corp/go.php/vRPD_NewStockIssue/page/1.phtml",
headers={"accept-encoding": "gzip, deflate, sdch"},
)
html = response.content.decode("gbk")
html_obj = pyquery.PyQuery(html)
table_html = html_obj("#con02-0").html()
import pandas as pd
df = pd.read_html(
io.StringIO(table_html),
skiprows=3,
converters={"证券代码": str, "申购代码": str},
)[0]
if only_today:
today = datetime.datetime.now().strftime("%Y-%m-%d")
df = df[df["上网发行日期↓"] == today]
return df
| shidenggui/easyutils | easyutils/stock.py | stock.py | py | 2,620 | python | en | code | 86 | github-code | 90 |
36678332747 | import contextlib
import os
import time
import requests
import settings
def pytest_sessionstart(session):
""" Sleeps for up to 60 seconds before session.main() is called. """
for i in range(0, 120):
print(
"Waiting for schema-registry to start: {seconds} seconds waited"
.format(seconds=(i / 2))
)
with contextlib.suppress(Exception):
response = requests.get(os.getenv("SCHEMA_REGISTRY_LISTENERS"))
if response.ok:
print("Waited {seconds} seconds for schema-registry to start".format(seconds=(i / 2)))
break
time.sleep(.5)
| Akino1976/kafka-python | kafka-python-cp/tests/system/conftest.py | conftest.py | py | 648 | python | en | code | 0 | github-code | 90 |
6250750142 | import functools
import json
import logging
import itertools
import bisect
from packageurl import PackageURL
from sbomnix.cpe import CPE
from sbomnix.utils import (
LOGGER_NAME,
LOG_SPAM,
)
###############################################################################
_LOG = logging.getLogger(LOGGER_NAME)
###############################################################################
class SkipDrv(RuntimeError):
"""This derivation cannot be treated as package."""
pass
def components_lt(left, right):
"""Port from nix/src/libexpr/names.cc"""
try:
lnum = int(left)
except ValueError:
lnum = None
try:
rnum = int(right)
except ValueError:
rnum = None
if lnum is not None and rnum is not None:
return lnum < rnum
if left == "" and rnum is not None:
return True
if left == "pre" and right != "pre":
return True
if right == "pre":
return False
if rnum is not None:
return True
if lnum is not None:
return False
return left < right
def category(char):
"""Classify `char` into: punctuation, digit, non-digit."""
if char in (".", "-"):
return 0
if char in ("0", "1", "2", "3", "4", "5", "6", "7", "8", "9"):
return 1
return 2
def split_components(v):
"""Yield cohesive groups of digits or non-digits. Skip punctuation."""
start = 0
stop = len(v)
while start < stop:
cat0 = category(v[start])
i = start + 1
while i < stop and category(v[i]) == cat0:
i += 1
if cat0 != 0:
yield v[start:i]
start = i
def compare_versions(left, right):
"""Compare two versions with the same logic as `nix-env -u`.
Returns -1 if `left` is older than `right`, 1 if `left` is newer
than `right`, and 0 if both versions are considered equal.
See https://nixos.org/nix/manual/#ssec-version-comparisons for rules
and examples.
"""
if left == right:
return 0
for lc, rc in itertools.zip_longest(
split_components(left), split_components(right), fillvalue=""
):
if lc == rc:
continue
if components_lt(lc, rc):
return -1
if components_lt(rc, lc):
return 1
return 0
################################################################################
def load(path):
"""Load derivation from path"""
_LOG.debug("")
with open(path, encoding="utf-8") as f:
d_obj = eval(f.read(), {"__builtins__": {}, "Derive": Derive}, {})
d_obj.store_path = path
_LOG.debug("load derivation: %s", d_obj)
if _LOG.level <= LOG_SPAM:
_LOG.log(LOG_SPAM, "deivation attrs: %s", d_obj.to_dict())
return d_obj
def destructure(env):
"""Decodes Nix 2.0 __structuredAttrs."""
return json.loads(env["__json"])
@functools.total_ordering
class Derive:
"""Nix derivation as found as .drv files in the Nix store."""
store_path = None
def __init__(
self,
_outputs=None,
_inputDrvs=None,
_inputSrcs=None,
_system=None,
_builder=None,
_args=None,
envVars=None,
_derivations=None,
name=None,
patches=None,
):
"""Create a derivation from a .drv file.
The derivation files are just accidentally Python-syntax, but
hey! :-)
"""
if envVars is None:
envVars = {}
envVars = dict(envVars)
_LOG.log(LOG_SPAM, envVars)
self.name = name or envVars.get("name")
if not self.name:
self.name = destructure(envVars)["name"]
pname = envVars.get("pname", self.name)
# pname read from envVars might not match the pname in nixpkgs.
# As an example 'Authen-SASL' full pname is 'perl5.36.0-Authen-SASL'
# Below, we reconstruct the full pname based on self.name which
# contains the full pname:
self.pname = self.name.partition(pname)[0] + pname
self.version = envVars.get("version", "")
self.patches = patches or envVars.get("patches", "")
self.system = envVars.get("system", "")
self.out = [envVars.get("out", "")]
# pname 'source' in Nix has special meaning - it is the default name
# for all fetchFromGitHub derivations. As such, it should not be used
# to construct cpe or purl, rather, cpe and purl should be empty
# for such packages.
self.cpe = ""
self.purl = ""
if self.pname != "source":
self.cpe = CPE().generate(self.pname, self.version)
self.purl = str(
PackageURL(type="nix", name=self.pname, version=self.version)
)
self.urls = envVars.get("urls", "")
def __repr__(self):
return f"<Derive({repr(self.name)})>"
def __eq__(self, other):
if type(self) != type(other):
return NotImplementedError()
return self.store_path == other.store_path
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
if self.pname < other.pname:
return True
if self.pname > other.pname:
return False
return compare_versions(self.version, other.version) == -1
def __gt__(self, other):
if self.pname > other.pname:
return True
if self.pname < other.pname:
return False
return compare_versions(self.version, other.version) == 1
def add_outpath(self, path):
"""Add an outpath to derivation"""
if path not in self.out and path != self.store_path:
_LOG.debug("adding outpath to %s:%s", self, path)
bisect.insort(self.out, path)
def to_dict(self):
"""Return derivation as dictionary"""
ret = {}
for attr in vars(self):
ret[attr] = getattr(self, attr)
return ret
| weisong-gmail/sbomnix | sbomnix/derivation.py | derivation.py | py | 5,970 | python | en | code | 0 | github-code | 90 |
18232445919 | from collections import defaultdict
N = int(input())
A = list(map(int, input().split()))
B = sorted([(A[i], i + 1) for i in range(N)], reverse=True)
dp = defaultdict(int)
for S in range(1, N + 1):
for x in range(S + 1):
y = S - x
candid = []
if x >= 1:
candid.append(dp[(x - 1, y)] + B[S - 1][0]
* (B[S - 1][1] - x))
if y >= 1:
candid.append(dp[(x, y - 1)] + B[S - 1][0]
* ((N - y + 1) - B[S - 1][1]))
dp[(x, y)] = max(candid)
print(max([dp[(i, N - i)] for i in range(N + 1)]))
| Aasthaengg/IBMdataset | Python_codes/p02709/s395557356.py | s395557356.py | py | 602 | python | en | code | 0 | github-code | 90 |
10302637874 | import sys
from collections import deque
n = int(sys.stdin.readline())
graph = list(map(int,sys.stdin.readline().split()))
start = int(sys.stdin.readline())
visit = [0 for _ in range(n)]
queue = deque()
queue.append(start-1)
visit[start-1] = 1
while queue:
current = queue.popleft()
back = current - graph[current]
front = current + graph[current]
if 0 <= back and visit[back] is 0:
visit[back] = 1
queue.append(back)
if front < n and visit[front] is 0:
visit[front] = 1
queue.append(front)
print(sum(visit)) | kjh9267/BOJ_Python | BFS/14248.py | 14248.py | py | 563 | python | en | code | 0 | github-code | 90 |
5543729212 | # 골드바흐의 추측
import sys
# Prime_list 만들기 0.40 초
a = [False,False] + [True] * (int(1e6)-1)
prime_list = []
for i in range(2, int(1e6)+1):
if a[i]:
prime_list.append(i)
for j in range(i*i, int(1e6)+1, i):
a[j] = False
while True:
flag = 0
val = int(sys.stdin.readline().rstrip())
if val == 0:
break
# 둘 다 소수인 경우 체크
for prime_num in prime_list:
if prime_num and val - prime_num:
print(str(val) + " = " + str(prime_num) + " + " + str(val - prime_num))
flag = 1
break
if flag == 0:
print("Goldbach's conjecture is wrong.")
# 둘 다 소수인 경우 체크
for idx in range(2, len(a)):
if a[idx] and a[val - idx]:
print(str(val) + " = " + str(idx) + " + " + str(val - idx))
flag = 1
break
if flag == 0:
print("Goldbach's conjecture is wrong.")
# # 둘 다 소수인 경우 체크
# for prime_num in prime_list:
# if val - prime_num in prime_list:
# print(str(val) + " = " + str(prime_num) + " + " + str(val - prime_num))
# flag = 1
# break
# if flag == 0:
# print("Goldbach's conjecture is wrong.") | SteadyKim/Algorism | language_PYTHON/BJ6588.py | BJ6588.py | py | 1,285 | python | en | code | 0 | github-code | 90 |
18549481249 | import sys
input = sys.stdin.readline
def main():
num = list(map(int, input().split()))
MAX = max(num)
CNT = 0
for i in range(3):
num[i] -= MAX
CNT -= num[i]
if CNT%2 == 0:
ans = CNT//2
print(ans)
else:
ans = CNT//2 + 2
print(ans)
if __name__ == '__main__':
main() | Aasthaengg/IBMdataset | Python_codes/p03387/s329263968.py | s329263968.py | py | 341 | python | en | code | 0 | github-code | 90 |
34769336430 | # -*- coding: utf-8 -*-
'''
Unit Tests for functions located in salt.utils.files.py.
'''
# Import python libs
from __future__ import absolute_import
import os
import shutil
import tempfile
# Import Salt libs
import salt.utils.files
# Import Salt Testing libs
from tests.support.paths import TMP
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
patch,
NO_MOCK,
NO_MOCK_REASON
)
class FilesUtilTestCase(TestCase):
'''
Test case for files util.
'''
@skipIf(NO_MOCK, NO_MOCK_REASON)
def test_safe_rm(self):
with patch('os.remove') as os_remove_mock:
salt.utils.files.safe_rm('dummy_tgt')
self.assertTrue(os_remove_mock.called)
@skipIf(os.path.exists('/tmp/no_way_this_is_a_file_nope.sh'), 'Test file exists! Skipping safe_rm_exceptions test!')
def test_safe_rm_exceptions(self):
error = False
try:
salt.utils.files.safe_rm('/tmp/no_way_this_is_a_file_nope.sh')
except (IOError, OSError):
error = True
self.assertFalse(error, 'salt.utils.files.safe_rm raised exception when it should not have')
def test_safe_walk_symlink_recursion(self):
tmp = tempfile.mkdtemp(dir=TMP)
try:
if os.stat(tmp).st_ino == 0:
self.skipTest('inodes not supported in {0}'.format(tmp))
os.mkdir(os.path.join(tmp, 'fax'))
os.makedirs(os.path.join(tmp, 'foo/bar'))
os.symlink('../..', os.path.join(tmp, 'foo/bar/baz'))
os.symlink('foo', os.path.join(tmp, 'root'))
expected = [
(os.path.join(tmp, 'root'), ['bar'], []),
(os.path.join(tmp, 'root/bar'), ['baz'], []),
(os.path.join(tmp, 'root/bar/baz'), ['fax', 'foo', 'root'], []),
(os.path.join(tmp, 'root/bar/baz/fax'), [], []),
]
paths = []
for root, dirs, names in salt.utils.files.safe_walk(os.path.join(tmp, 'root')):
paths.append((root, sorted(dirs), names))
if paths != expected:
raise AssertionError(
'\n'.join(
['got:'] + [repr(p) for p in paths] +
['', 'expected:'] + [repr(p) for p in expected]
)
)
finally:
shutil.rmtree(tmp)
| sacren/salt | tests/unit/utils/test_files.py | test_files.py | py | 2,400 | python | en | code | 0 | github-code | 90 |
37565739386 | # Chinese CCGbank conversion
# ==========================
# (c) 2008-2012 Daniel Tse <cncandc@gmail.com>
# University of Sydney
# Use of this software is governed by the attached "Chinese CCGbank converter Licence Agreement"
# supplied in the Chinese CCGbank conversion distribution. If the LICENCE file is missing, please
# notify the maintainer Daniel Tse <cncandc@gmail.com>.
import unittest
from munge.penn.parse import *
class PennParseTests(unittest.TestCase):
def testParseSinglePennDeriv(self):
deriv = '''
( (S
(NP-SBJ
(NP (NNP Pierre) (NNP Vinken) )
(, ,)
(ADJP
(NP (CD 61) (NNS years) )
(JJ old) )
(, ,) )
(VP (MD will)
(VP (VB join)
(NP (DT the) (NN board) )
(PP-CLR (IN as)
(NP (DT a) (JJ nonexecutive) (NN director) ))
(NP-TMP (NNP Nov.) (CD 29) )))
(. .) ))'''
docs = parse_tree(deriv, PennParser)
self.assertEqual(len(docs), 1)
self.assertEqual(str(docs[0]), r'((S (NP-SBJ (NP (NNP Pierre) (NNP Vinken)) (, ,) (ADJP (NP (CD 61) (NNS years)) (JJ old)) (, ,)) (VP (MD will) (VP (VB join) (NP (DT the) (NN board)) (PP-CLR (IN as) (NP (DT a) (JJ nonexecutive) (NN director))) (NP-TMP (NNP Nov.) (CD 29)))) (. .)))')
def testIgnoresWhitespace(self):
deriv = '''
( (A (B c) (D e ) ( F g)
\r\r )
)'''
docs = parse_tree(deriv, PennParser)
self.assertEqual(len(docs), 1)
doc = docs[0]
self.assertEqual(doc.tag, 'A') # root level tag
self.assertEqual(doc.count(), 3) # number of children
for (actual, expected) in zip([kid.lex for kid in doc.kids], ('c', 'e', 'g')):
self.assertEqual(actual, expected)
if __name__ == "__main__":
unittest.main()
| jogloran/cnccgbank | munge/tests/penn_parse_tests.py | penn_parse_tests.py | py | 1,843 | python | en | code | 12 | github-code | 90 |
22560739224 | import os
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Final, Optional
import tomlkit
import typer
from rich.console import Console
DOCS_VERSION_REGEX: Final = re.compile(r'(version|release)\s=\s"(\d+\.\d+\.\d+)"')
@dataclass(frozen=True)
class CommonArgs:
pyproject_path: Path
docs_conf_path: Path
verbose: bool
def get_args(ctx: typer.Context) -> CommonArgs:
"""Typed getter for the common arguments stored in the typer context."""
args = ctx.obj
assert isinstance(args, CommonArgs) # noqa: S101
return args
if os.environ.get("CI"):
console = Console(force_terminal=True, force_interactive=False)
else:
console = Console()
app = typer.Typer()
def check_consistency(
pyproject_path: Path,
docs_conf_path: Path,
*,
verbose: bool,
target_version: Optional[str],
) -> bool:
"""Check that version numbers are consistent.
Args:
pyproject_path: path to the pyproject.toml file.
docs_conf_path: path to the Sphinx documentation configuration module.
verbose: whether to show the detail of the found version numbers.
target_version: if set, pass only if the detected version numbers are also
consistent with this target version.
Returns:
Whether the detected version number are consistent.
"""
pyproject = tomlkit.parse(pyproject_path.read_text(encoding="utf-8"))
pyproject_version = str(pyproject["tool"]["poetry"]["version"]) # type: ignore[index]
docs_conf = docs_conf_path.read_text(encoding="utf-8")
docs_version, docs_release = "", ""
for line in docs_conf.splitlines():
result = DOCS_VERSION_REGEX.match(line.strip())
if result:
if result.group(1) == "version":
docs_version = result.group(2)
if result.group(1) == "release":
docs_release = result.group(2)
if docs_version and docs_release:
break
if verbose:
if target_version is not None:
console.print(f"Target version: {target_version}")
console.print(f"{pyproject_path}: {pyproject_version}")
console.print(f"{docs_conf_path} (version): {docs_version or '[red]not found'}")
console.print(f"{docs_conf_path} (release): {docs_release or '[red]not found'}")
consistent = pyproject_version == docs_version == docs_release
if target_version is not None:
consistent = consistent and (pyproject_version == target_version)
if consistent:
console.print("[bold green]PASS")
return True
console.print("[bold red]FAIL")
return False
def bump_versions(pyproject_path: Path, docs_conf_path: Path, new_version: str) -> None:
"""Update version number to match a new target.
Args:
pyproject_path: path to the pyproject.toml file.
docs_conf_path: path to the Sphinx documentation configuration module.
new_version: target version to update to.
"""
pyproject = tomlkit.parse(pyproject_path.read_text(encoding="utf-8"))
pyproject["tool"]["poetry"]["version"] = new_version # type: ignore[index]
pyproject_path.write_text(tomlkit.dumps(pyproject), encoding="utf-8")
docs_conf = docs_conf_path.read_text(encoding="utf-8")
docs_conf = re.sub(r"version\s=\s\"(.*)\"", f'version = "{new_version}"', docs_conf)
docs_conf = re.sub(r"release\s=\s\"(.*)\"", f'release = "{new_version}"', docs_conf)
docs_conf_path.write_text(docs_conf, encoding="utf-8")
@app.command()
def check(ctx: typer.Context) -> None:
"""Check whether the package version numbers are consistent."""
args = get_args(ctx)
if not check_consistency(
args.pyproject_path,
args.docs_conf_path,
verbose=args.verbose,
target_version=None,
):
raise typer.Exit(1)
@app.command()
def bump(ctx: typer.Context, new_version: str) -> None:
"""Update the package version."""
args = get_args(ctx)
bump_versions(args.pyproject_path, args.docs_conf_path, new_version)
if not check_consistency(
args.pyproject_path,
args.docs_conf_path,
verbose=args.verbose,
target_version=new_version,
):
raise typer.Exit(1)
@app.callback()
def common_args(
ctx: typer.Context,
pyproject_path: Path = typer.Option(Path("pyproject.toml")),
docs_conf_path: Path = typer.Option(Path("docs/conf.py")),
verbose: bool = False,
) -> None:
"""Command line arguments shared between multiple sub-commands."""
ctx.obj = CommonArgs(
pyproject_path=pyproject_path, docs_conf_path=docs_conf_path, verbose=verbose
)
if __name__ == "__main__":
app()
| qiskit-community/qiskit-aqt-provider | scripts/package_version.py | package_version.py | py | 4,734 | python | en | code | 26 | github-code | 90 |
25974228256 | from django import forms
from apps.product.models import Products,ProductCategory,Banner
from apps.news.models import News
class AddProductForm(forms.ModelForm):
thumbnail=forms.URLField(label='产品图片url',error_messages={"required": "产品图片url不能为空"})
top_tag_id=forms.IntegerField(label='产品一级分类',error_messages={"required": "产品一级分类id不能为空"})
sub_tag_id=forms.IntegerField(label='产品二级分类',error_messages={"required": "产品二级分类id不能为空"})
class Meta:
model=Products
fields=['brand','thumbnail','version','describe']
error_messages = {
'brand': {
'max_length': "产品品牌长度不能超过100",
'min_length': "产品品牌长度大于1",
'required': '产品品牌不能为空',
},
'version': {
'max_length': "产品型号长度不能超过200",
'min_length': "产品型号长度大于1",
'required': '产品型号不能为空',
},
'describe': {
'max_length': "产品描述长度不能超过200",
'min_length': "产品描述长度大于1",
'required': '产品描述不能为空',
},
}
class PubNewsForm(forms.ModelForm):
class Meta:
model=News
fields=['title','content']
error_messages={
'title': {
'max_length': "文章标题长度不能超过200",
'min_length': "文章标题长度大于1",
'required': '文章标题不能为空',
},
'content': {
'required': '文章内容不能为空',
},
}
class BannerForm(forms.ModelForm):
pass | chaiyuming/GX | apps/cms/forms.py | forms.py | py | 1,830 | python | en | code | 0 | github-code | 90 |
74765301737 | instr="""
<x=0, y=4, z=0>
<x=-10, y=-6, z=-14>
<x=9, y=-16, z=-3>
<x=6, y=-1, z=2>
"""
# instr="""
# <x=-8, y=-10, z=0>
# <x=5, y=5, z=10>
# <x=2, y=-7, z=3>
# <x=9, y=-8, z=-3>
# """
# instr="""
# <x=-1, y=0, z=2>
# <x=2, y=-10, z=-7>
# <x=4, y=-8, z=8>
# <x=3, y=5, z=-1>
# """
import re
class Moon(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
self.vx = 0
self.vy = 0
self.vz = 0
def __str__(self):
return "pos=<x=%3i, y=%3i, z=%3i>, vel=<x=%3i, y=%3i, z=%3i>" % (self.x, self.y, self.z, self.vx, self.vy, self.vz)
def gravity(self, moons):
for m in moons:
if self == m: continue
if self.x > m.x: self.vx -= 1
elif self.x < m.x: self.vx += 1
if self.y > m.y: self.vy -= 1
elif self.y < m.y: self.vy += 1
if self.z > m.z: self.vz -= 1
elif self.z < m.z: self.vz += 1
def velocity(self):
self.x += self.vx
self.y += self.vy
self.z += self.vz
lines=instr.split("\n")
pat = "<x=(.*), y=(.*), z=(.*)>"
regex = re.compile(pat)
moons=[]
start_moons = []
for l in lines:
mat = re.match(regex, l)
if mat:
x, y, z = map(int, mat.group(1,2,3))
moons.append(Moon(x, y, z))
start_moons.append(Moon(x, y, z))
xs = {}
ys = {}
zs = {}
for i in range(1000000):
for m in moons:
m.gravity(moons)
for m in moons:
m.velocity()
all_x = tuple([(m.x, m.vx) for m in moons])
all_y = tuple([(m.y, m.vy) for m in moons])
all_z = tuple([(m.z, m.vz) for m in moons])
last_x = xs.get(all_x, 100000000)
x_period = i - last_x
xs[all_x] = i
print(x_period, all_x)
last_y = ys.get(all_y, 100000000)
y_period = i - last_y
ys[all_y] = i
print(y_period, all_y)
last_z = zs.get(all_z, 100000000)
z_period = i - last_z
zs[all_z] = i
print(z_period, all_z)
| harveyj/aoc | 2019/12/twelve-2.py | twelve-2.py | py | 1,734 | python | en | code | 0 | github-code | 90 |
18287792669 | n, m = map(int, input().split())
if m > 0:
p, s = zip(* [input().split() for _ in range(m)])
else:
p = []
s = []
ac_dict = {str(i): False for i in range(1, n + 1)}
wa_dict = {str(i): 0 for i in range(1, n + 1)}
ac_num = 0
wa_num = 0
for p_i, s_i in zip(p, s):
if ac_dict[p_i] == False:
if s_i == 'AC':
ac_num += 1
wa_num += wa_dict[p_i]
ac_dict[p_i] = True
elif s_i == 'WA':
wa_dict[p_i] += 1
print('{} {}'.format(ac_num, wa_num))
| Aasthaengg/IBMdataset | Python_codes/p02802/s017088594.py | s017088594.py | py | 512 | python | en | code | 0 | github-code | 90 |
28968631191 | import os
import sys
import errno
import pickle
import math
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn import metrics as skmetrics
import numpy as np
from tqdm import tqdm
from tabulate import tabulate
import torch.nn.utils.prune as prune
torch.manual_seed(0)
np.random.seed(0)
def fed_avg(models, dataset, arch, data_nums):
new_model = create_model(dataset, arch) #copy_model(server_model, dataset, arch, source_buff=dict(server_model.named_buffers()))
num_models = len(models)
num_data_total = sum(data_nums)
data_nums = data_nums / num_data_total
with torch.no_grad():
# Getting all the weights and masks from original models
weights, masks = [], []
for i in range(num_models):
weights.append(dict(models[i].named_parameters()))
masks.append(dict(models[i].named_buffers()))
for name, param in new_model.named_parameters():
param.data.copy_(torch.zeros_like(param.data))
# Averaging weights
for name, param in new_model.named_parameters():
for i in range(num_models):
weighted_param = torch.mul(weights[i][name], data_nums[i])
param.data.copy_(param.data + weighted_param)
avg = torch.div(param.data, num_models)
param.data.copy_(avg)
return new_model
def lottery_fl_v2(server_model, models, dataset, arch, data_nums):
new_model = create_model(dataset, arch) #copy_model(server_model, dataset, arch, source_buff=dict(server_model.named_buffers()))
num_models = len(models)
num_data_total = sum(data_nums)
data_nums = data_nums / num_data_total
with torch.no_grad():
# Get server weights
server_weights = dict(server_model.named_parameters())
# Getting all the weights and masks from original models
weights, masks = [], []
for i in range(num_models):
weights.append(dict(models[i].named_parameters()))
masks.append(dict(models[i].named_buffers()))
for name, param in new_model.named_parameters():
param.data.copy_(torch.zeros_like(param.data))
# Averaging weights
for name, param in new_model.named_parameters():
for i in range(num_models):
#parameters_to_prune, num_global_weights, _ = get_prune_params(models[i])
model_masks = masks[i]
try:
layer_mask = model_masks[name.strip("_orig") + "_mask"]
weights[i][name] *= layer_mask
weights[i][name] = np.where(weights[i][name] != 0, weights[i][name], server_weights[name])
except Exception as e:
#print("exceptions")
#print(e)
pass
weighted_param = weights[i][name]
#weighted_param = torch.mul(weights[i][name], data_nums[i])
param.data.copy_(param.data + weighted_param)
avg = torch.div(param.data, num_models)
param.data.copy_(avg)
return new_model
def lottery_fl_v3(server_model, models, dataset, arch, data_nums):
new_model = create_model(dataset, arch) #copy_model(server_model, dataset, arch, source_buff=dict(server_model.named_buffers()))
num_models = len(models)
num_data_total = sum(data_nums)
with torch.no_grad():
# Getting all the weights and masks from original models
weights, masks = [], []
for i in range(num_models):
new_c_model = copy_model(models[i], dataset, arch)
parameters_to_prune, _, _ = get_prune_params(new_c_model)
for m, n in parameters_to_prune:
prune.remove(m, n)
weights.append(dict(new_c_model.named_parameters()))
for name, param in new_model.named_parameters():
param.data.copy_(torch.zeros_like(param.data))
# Averaging weights
for name, param in new_model.named_parameters():
for i in range(num_models):
weighted_param = weights[i][name.strip("_orig")] #torch.mul(weights[i][name], data_nums[i])
param.data.copy_(param.data + weighted_param)
avg = torch.div(param.data, num_models)
param.data.copy_(avg)
return new_model
def average_weights(models, dataset, arch, data_nums):
new_model = create_model(dataset, arch) #copy_model(server_model, dataset, arch, source_buff=dict(server_model.named_buffers()))
num_models = len(models)
num_data_total = sum(data_nums)
with torch.no_grad():
# Getting all the weights and masks from original models
weights, masks = [], []
for i in range(num_models):
weights.append(dict(models[i].named_parameters()))
masks.append(dict(models[i].named_buffers()))
for name, param in new_model.named_parameters():
param.data.copy_(torch.zeros_like(param.data))
# Averaging weights
for name, param in new_model.named_parameters():
for i in range(num_models):
weighted_param = weights[i][name] #torch.mul(weights[i][name], data_nums[i])
param.data.copy_(param.data + weighted_param)
avg = torch.div(param.data, num_models)
param.data.copy_(avg)
return new_model
def copy_model(model, dataset, arch, source_buff=None):
new_model = create_model(dataset, arch)
source_weights = dict(model.named_parameters())
source_buffers = source_buff if source_buff else dict(model.named_buffers())
for name, param in new_model.named_parameters():
param.data.copy_(source_weights[name])
for name, buffer in new_model.named_buffers():
buffer.data.copy_(source_buffers[name])
return new_model
def create_model(dataset_name, model_type):
if dataset_name == "mnist":
from archs.mnist import mlp, cnn
elif dataset_name == "cifar10":
from archs.cifar10 import mlp, cnn
else:
print("You did not enter the name of a supported architecture for this dataset")
print("Supported datasets: {}, {}".format('"CIFAR10"', '"MNIST"'))
exit()
if model_type == 'mlp':
new_model = mlp.MLP()
# This pruning call is made so that the model is set up for accepting
# weights from another pruned model. If this is not done, the weights
# will be incompatible
prune_fixed_amount(new_model, 0.0, verbose=False)
return new_model
elif model_type == 'cnn':
new_model = cnn.CNN()
prune_fixed_amount(new_model, 0, verbose=False)
return new_model
else:
print("You did not enter the name of a supported architecture for this dataset")
print("Supported models: {}, {}".format('"mlp"', '"cnn"'))
exit()
def train(round,
client_id,
epoch,
model,
train_loader,
lr=0.001,
verbose=True):
loss_function = nn.CrossEntropyLoss()
opt = optim.Adam(model.parameters(), lr=lr)
num_batch = len(train_loader)
model.train()
metric_names = ['Loss',
'Accuracy',
'Balanced Accuracy',
'Precision Micro',
'Recall Micro',
'Precision Macro',
'Recall Macro']
score = {name:[] for name in metric_names}
progress_bar = tqdm(enumerate(train_loader),
total = num_batch,
file=sys.stdout)
# Iterating over all mini-batches
for i, data in progress_bar:
x, ytrue = data
yraw = model(x)
loss = loss_function(yraw, ytrue)
model.zero_grad()
loss.backward()
opt.step()
# Truning the raw output of the network into one-hot result
_, ypred = torch.max(yraw, 1)
score = calculate_metrics(score, ytrue, yraw, ypred)
average_scores = {}
for k, v in score.items():
average_scores[k] = [sum(v) / len(v)]
score[k].append(sum(v) / len(v))
if verbose:
print(f"round={round}, client={client_id}, epoch= {epoch}: ")
print(tabulate(average_scores, headers='keys', tablefmt='github'))
return score
def evaluate(model, data_loader, verbose=True):
# Swithicing off gradient calculation to save memory
torch.no_grad()
# Switch to eval mode so that layers like Dropout function correctly
model.eval()
metric_names = ['Loss',
'Accuracy',
'Balanced Accuracy',
'Precision Micro',
'Recall Micro',
'Precision Macro',
'Recall Macro']
score = {name:[] for name in metric_names}
num_batch = len(data_loader)
progress_bar = tqdm(enumerate(data_loader),
total=num_batch,
file=sys.stdout)
for i, (x, ytrue) in progress_bar:
yraw = model(x)
_, ypred = torch.max(yraw, 1)
score = calculate_metrics(score, ytrue, yraw, ypred)
progress_bar.set_description('Evaluating')
for k, v in score.items():
score[k] = [sum(v) / len(v)]
if verbose:
print('Evaluation Score: ')
print(tabulate(score, headers='keys', tablefmt='github'), flush=True)
model.train()
torch.enable_grad()
return score
def prune_fixed_amount(model, amount, verbose=True, glob=True):
parameters_to_prune, num_global_weights, layers_w_count = get_prune_params(model)
if glob:
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount = math.floor(amount * num_global_weights))
else:
for i, (m, n) in enumerate(parameters_to_prune):
prune.l1_unstructured(m, name=n, amount = math.floor(amount * layers_w_count[i][1]))
num_global_zeros, num_layer_zeros, num_layer_weights = 0, 0, 0
global_prune_percent, layer_prune_percent = 0, 0
prune_stat = {'Layers': [],
'Weight Name': [],
'Percent Pruned': [],
'Total Pruned': []}
# Pruning is done in-place, thus parameters_to_prune is updated
for layer, weight_name in parameters_to_prune:
num_layer_zeros = torch.sum(getattr(layer, weight_name) == 0.0).item()
num_global_zeros += num_layer_zeros
num_layer_weights = torch.numel(getattr(layer, weight_name))
layer_prune_percent = num_layer_zeros / num_layer_weights * 100
prune_stat['Layers'].append(layer.__str__())
prune_stat['Weight Name'].append(weight_name)
prune_stat['Percent Pruned'].append(f'{num_layer_zeros} / {num_layer_weights} ({layer_prune_percent:.5f}%)')
prune_stat['Total Pruned'].append(f'{num_layer_zeros}')
global_prune_percent = num_global_zeros / num_global_weights
if verbose:
print('Pruning Summary', flush=True)
print(tabulate(prune_stat, headers='keys'), flush=True)
print(f'Percent Pruned Globaly: {global_prune_percent:.2f}', flush=True)
def get_prune_summary(model):
num_global_zeros = 0
parameters_to_prune, num_global_weights, _ = get_prune_params(model)
masks = dict(model.named_buffers())
for i, (layer, weight_name) in enumerate(parameters_to_prune):
attr = getattr(layer, weight_name)
try:
attr *= masks[list(masks)[i]]
except Exception as e:
print(e)
num_global_zeros += torch.sum(attr == 0.0).item()
return num_global_zeros, num_global_weights
def get_prune_params(model):
layers = []
layers_weight_count = []
num_global_weights = 0
modules = list(model.modules())
for layer in modules:
is_sequential = type(layer) == nn.Sequential
is_itself = type(layer) == type(model) if len(modules) > 1 else False
if (not is_sequential) and (not is_itself):
for name, param in layer.named_parameters():
field_name = name.split('.')[-1]
# This might break if someone does not adhere to the naming
# convention where weights of a module is stored in a field
# that has the word 'weight' in it
if 'weight' in field_name and param.requires_grad:
if field_name.endswith('_orig'):
field_name = field_name[:-5]
# Might remove the param.requires_grad condition in the future
layers.append((layer, field_name))
layer_weight_count = torch.numel(param)
layers_weight_count.append((layer, layer_weight_count))
num_global_weights += layer_weight_count
return layers, num_global_weights, layers_weight_count
def calculate_metrics(score, ytrue, yraw, ypred):
if 'Loss' in score:
loss = nn.CrossEntropyLoss()
score['Loss'].append(loss(yraw, ytrue))
if 'Accuracy' in score:
score['Accuracy'].append(skmetrics.accuracy_score(ytrue, ypred))
if 'Balanced Accuracy' in score:
score['Balanced Accuracy'].append(skmetrics.balanced_accuracy_score(ytrue, ypred))
if 'Precision Micro' in score:
score['Precision Micro'].append(skmetrics.precision_score(ytrue,
ypred,
average='micro',
zero_division=0))
if 'Recall Micro' in score:
score['Recall Micro'].append(skmetrics.recall_score(ytrue,
ypred,
average='micro',
zero_division=0))
if 'Precision Macro' in score:
score['Precision Macro'].append(skmetrics.precision_score(ytrue,
ypred,
average='macro',
zero_division=0))
if 'Recall Macro' in score:
score['Recall Macro'].append(skmetrics.recall_score(ytrue,
ypred,
average='macro',
zero_division=0))
return score
def log_obj(path, obj):
pass
# if not os.path.exists(os.path.dirname(path)):
# try:
# os.makedirs(os.path.dirname(path))
# except OSError as exc: # Guard against race condition
# if exc.errno != errno.EEXIST:
# raise
#
# with open(path, 'wb') as file:
# if isinstance(obj, nn.Module):
# torch.save(obj, file)
# else:
# pickle.dump(obj, file)
| Donglin-Wang/LotteryEnsemble | util.py | util.py | py | 15,588 | python | en | code | 11 | github-code | 90 |
70297351018 | # -*- coding: utf-8 -*-
'''
Tulip routine libraries, based on lambda's lamlib
Author Bugatsinho
License summary below, for more details please read license.txt file
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, sys, cookielib, time, random
import urllib, urllib2, urlparse, HTMLParser
import gzip, StringIO
try:
import requests
requester = requests.get
except ImportError:
requester = None
def _basic_request(url, headers=None, post=None, timeout='30', limit=None):
try:
try:
headers.update(headers)
except:
headers = {}
request = urllib2.Request(url, data=post)
_add_request_header(request, headers)
response = urllib2.urlopen(request, timeout=int(timeout))
return _get_result(response, limit)
except:
return
def _add_request_header(_request, headers):
try:
if not headers:
headers = {}
try:
scheme = _request.get_type()
except:
scheme = 'http'
referer = headers.get('Referer') if 'Referer' in headers else '%s://%s/' % (scheme, _request.get_host())
_request.add_unredirected_header('Host', _request.get_host())
_request.add_unredirected_header('Referer', referer)
for key in headers: _request.add_header(key, headers[key])
except:
return
def _get_result(response, limit=None):
if limit == '0':
result = response.read(224 * 1024)
elif limit:
result = response.read(int(limit) * 1024)
else:
result = response.read(5242880)
try:
encoding = response.info().getheader('Content-Encoding')
except:
encoding = None
if encoding == 'gzip':
result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()
return result
def retriever(source, destination, *args):
class Opener(urllib.URLopener):
version = randomagent()
Opener().retrieve(source, destination, *args)
def parseDOM(html, name=u"", attrs=None, ret=False):
if attrs is None:
attrs = {}
if isinstance(name, str): # Should be handled
try:
name = name # .decode("utf-8")
except:
pass
if isinstance(html, str):
try:
html = [html.decode("utf-8")] # Replace with chardet thingy
except:
html = [html]
elif isinstance(html, unicode):
html = [html]
elif not isinstance(html, list):
return u""
if not name.strip():
return u""
ret_lst = []
for item in html:
temp_item = re.compile('(<[^>]*?\n[^>]*?>)').findall(item)
for match in temp_item:
item = item.replace(match, match.replace("\n", " "))
lst = _getDOMElements(item, name, attrs)
if isinstance(ret, str):
lst2 = []
for match in lst:
lst2 += _getDOMAttributes(match, name, ret)
lst = lst2
else:
lst2 = []
for match in lst:
temp = _getDOMContent(item, name, match, ret).strip()
item = item[item.find(temp, item.find(match)) + len(temp):]
lst2.append(temp)
lst = lst2
ret_lst += lst
return ret_lst
def _getDOMContent(html, name, match, ret): # Cleanup
endstr = u"</" + name # + ">"
start = html.find(match)
end = html.find(endstr, start)
pos = html.find("<" + name, start + 1 )
while pos < end and pos != -1: # Ignore too early </endstr> return
tend = html.find(endstr, end + len(endstr))
if tend != -1:
end = tend
pos = html.find("<" + name, pos + 1)
if start == -1 and end == -1:
result = u""
elif start > -1 and end > -1:
result = html[start + len(match):end]
elif end > -1:
result = html[:end]
elif start > -1:
result = html[start + len(match):]
if ret:
endstr = html[end:html.find(">", html.find(endstr)) + 1]
result = match + result + endstr
return result
def _getDOMAttributes(match, name, ret):
lst = re.compile('<' + name + '.*?' + ret + '=([\'"].[^>]*?[\'"])>', re.M | re.S).findall(match)
if len(lst) == 0:
lst = re.compile('<' + name + '.*?' + ret + '=(.[^>]*?)>', re.M | re.S).findall(match)
ret = []
for tmp in lst:
cont_char = tmp[0]
if cont_char in "'\"":
# Limit down to next variable.
if tmp.find('=' + cont_char, tmp.find(cont_char, 1)) > -1:
tmp = tmp[:tmp.find('=' + cont_char, tmp.find(cont_char, 1))]
# Limit to the last quotation mark
if tmp.rfind(cont_char, 1) > -1:
tmp = tmp[1:tmp.rfind(cont_char)]
else:
if tmp.find(" ") > 0:
tmp = tmp[:tmp.find(" ")]
elif tmp.find("/") > 0:
tmp = tmp[:tmp.find("/")]
elif tmp.find(">") > 0:
tmp = tmp[:tmp.find(">")]
ret.append(tmp.strip())
return ret
def _getDOMElements(item, name, attrs):
lst = []
for key in attrs:
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=[\'"]' + attrs[key] + '[\'"].*?>))', re.M | re.S).findall(item)
if len(lst2) == 0 and attrs[key].find(" ") == -1: # Try matching without quotation marks
lst2 = re.compile('(<' + name + '[^>]*?(?:' + key + '=' + attrs[key] + '.*?>))', re.M | re.S).findall(item)
if len(lst) == 0:
lst = lst2
lst2 = []
else:
test = range(len(lst))
test.reverse()
for i in test: # Delete anything missing from the next list.
if not lst[i] in lst2:
del(lst[i])
if len(lst) == 0 and attrs == {}:
lst = re.compile('(<' + name + '>)', re.M | re.S).findall(item)
if len(lst) == 0:
lst = re.compile('(<' + name + ' .*?>)', re.M | re.S).findall(item)
return lst
def replaceHTMLCodes(txt):
txt = re.sub("(&#[0-9]+)([^;^0-9]+)", "\\1;\\2", txt)
txt = HTMLParser.HTMLParser().unescape(txt)
txt = txt.replace(""", "\"")
txt = txt.replace("&", "&")
txt = txt.replace("&", "&")
txt = txt.replace(" ", "")
return txt
def randomagent():
BR_VERS = [['%s.0' % i for i in xrange(18, 43)], ['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101',
'38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95',
'39.0.2171.99', '40.0.2214.93', '40.0.2214.111', '40.0.2214.115',
'42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81',
'43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101',
'45.0.2454.85', '46.0.2490.71', '46.0.2490.80', '46.0.2490.86',
'47.0.2526.73', '47.0.2526.80'], ['11.0']]
WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0',
'Windows NT 5.1', 'Windows NT 5.0']
FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', '']
RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}',
'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36',
'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
index = random.randrange(len(RAND_UAS))
return RAND_UAS[index].format(win_ver=random.choice(WIN_VERS), feature=random.choice(FEATURES), br_ver=random.choice(BR_VERS[index]))
def agent():
return 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'
def mobile_agent():
return 'Mozilla/5.0 (Android 4.4; Mobile; rv:18.0) Gecko/18.0 Firefox/18.0'
def ios_agent():
return 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25'
def spoofer(_agent=True, age_str=randomagent(), referer=False, ref_str=''):
if _agent and referer:
return '|User-Agent=' + urllib.quote_plus(age_str) + '&Referer=' + urllib.quote_plus(ref_str)
elif _agent:
return '|User-Agent=' + urllib.quote_plus(age_str)
elif referer:
return '|Referer=' + urllib.quote_plus(ref_str)
def cfcookie(netloc, ua, timeout):
try:
headers = {'User-Agent': ua}
req = urllib2.Request(netloc, headers=headers)
try:
urllib2.urlopen(req, timeout=int(timeout))
except urllib2.HTTPError as response:
result = response.read(5242880)
jschl = re.findall('name="jschl_vc" value="(.+?)"/>', result)[0]
init = re.findall('setTimeout\(function\(\){\s*.*?.*:(.*?)};', result)[-1]
builder = re.findall(r"challenge-form\'\);\s*(.*)a.v", result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line) > 0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
answer = decryptVal + len(urlparse.urlparse(netloc).netloc)
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (netloc, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval = re.findall('name="pass" value="(.*?)"', result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (netloc, urllib.quote_plus(passval), jschl, answer)
time.sleep(5)
cookies = cookielib.LWPCookieJar()
handlers = [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
opener = urllib2.build_opener(*handlers)
urllib2.install_opener(opener)
try:
request = urllib2.Request(query, headers=headers)
urllib2.urlopen(request, timeout=int(timeout))
except:
pass
cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
return cookie
except:
pass
def parseJSString(s):
try:
offset=1 if s[0]=='+' else 0
val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
return val
except:
pass
| bugatsinho/bugatsinho.github.io | plugin.video.freevl/resources/modules/client.py | client.py | py | 11,353 | python | en | code | 61 | github-code | 90 |
5531699535 | import sys
import gi
import os
import configparser
gi.require_version('Gtk', '4.0')
gi.require_version('Adw', '1')
from gi.repository import Gtk, Gio, Adw, Gst, GObject, GdkPixbuf, Gdk
from .window import JokosherWindow
from .project import Project
from .settings import Settings
from .globals import Globals
from .platform_utils import PlatformUtils
from .jokosherpreferences import JokosherPreferences
class JokosherApplication(Adw.Application):
"""The main application singleton class."""
__gsignals__ = {
"project" : ( GObject.SIGNAL_RUN_LAST | GObject.SIGNAL_DETAILED, GObject.TYPE_NONE, () ),
}
def __init__(self):
super().__init__(application_id='org.gnome.Jokosher',
flags=Gio.ApplicationFlags.FLAGS_NONE)
self.create_action('quit', self.quit, ['<primary>q'])
self.create_action('about', self.on_about_action)
self.create_action('preferences', self.on_preferences_action)
# initialise project stuff
self.project = None
self.create_action('new-project', self.on_project_new_action)
self.create_action('save-project', self.on_project_save_action)
self.create_action('open-project', self.on_project_open_action)
self.create_action('close-project', self.on_project_close_action)
self.create_action('export-audio', self.on_export_audio_action)
self.connect("shutdown", self.on_shutdown)
# TODO initialise settings
# setup global variables
self.settings = Settings()
# some app states
self.isRecording = False
self.isPlaying = False
self.isPaused = False
# instrument cache
self.instrumentPropertyList = []
self._alreadyCached = False
self._cacheGeneratorObject = None
# indication that we are clearing up for new project
self.will_open_project = False
# idle instrument load
GObject.idle_add(self.idleCacheInstruments)
def do_activate(self):
"""Called when the application is activated.
We raise the application's main window, creating it if
necessary.
"""
Gst.init(None)
Gst.debug_set_active(True)
#Gst.debug_set_default_threshold(5)
Gst.debug_set_threshold_from_string("nle*:3", False)
# Load all CSS bits
css_provider = Gtk.CssProvider()
file = Gio.File.new_for_path(os.path.dirname(__file__) + '/jokosher.css')
css_provider.load_from_file(file)
screen = Gdk.Display.get_default()
Gtk.StyleContext.add_provider_for_display(screen, css_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)
win = self.props.active_window
if not win:
win = JokosherWindow(application=self)
win.present()
def on_about_action(self, widget, _):
"""Callback for the app.about action."""
about = Adw.AboutWindow(transient_for=self.props.active_window,
application_name='jokosher',
application_icon='org.gnome.Jokosher',
developer_name='Pēteris Krišjānis',
version='0.1.0',
developers=['Pēteris Krišjānis'],
copyright='© 2022 Pēteris Krišjānis')
about.present()
def on_preferences_action(self, widget, _):
self.preferences_window = JokosherPreferences()
self.preferences_window.show()
def create_action(self, name, callback, shortcuts=None):
"""Add an application action.
Args:
name: the name of the action
callback: the function to be called when the action is
activated
shortcuts: an optional list of accelerators
"""
action = Gio.SimpleAction.new(name, None)
action.connect("activate", callback)
self.add_action(action)
if shortcuts:
self.set_accels_for_action(f"app.{name}", shortcuts)
def on_project_new_action(self, widget, _):
# check and close if there is already project running
if self.project:
self.close_project()
# let main window to show create project dialog
self.emit("project::dialog")
# TODO new project dialog
#self.project = Project.create(name='Untitled1', author='Pēteris Krišjānis', location='file:///home/peteriskrisjanis')
#self.props.active_window.on_open_project()
def on_project_create(self, dialog, name, author, location, sample_rate=None, bit_depth=None):
# this is callback from ProjectDialog
# double check if project is really closed, should be at this point
if self.project:
self.close_project()
self.project = Project.create(name=name, author=author, location=location, sample_rate=sample_rate, bit_depth=bit_depth)
# let everyone know we open new project
self.emit("project::open")
#self.props.active_window.on_open_project()
def on_project_save_action(self, widget, _):
self.project.save_project_file()
def on_project_open_action(self, widget, _):
# action for opening file dialog
self.props.active_window.on_open_project_file()
def open_project(self, project_file_path):
# try:
# we need to close project if any
if self.project:
self.close_project()
uri = PlatformUtils.pathname2url(project_file_path)
self.set_project(Project.load_project_file(uri))
self.emit("project::open")
# app.on_project_open()
#self.on_project_open()
return True
# except ProjectManager.OpenProjectError as e:
# self.ShowOpenProjectErrorDialog(e, parent)
# return False
def on_project_open(self):
# method triggered on opening project from open project file dialog
# this means project is set already and we are ready to roll
pass
def on_project_close_action(self, widget, _):
self.close_project()
def set_project(self, project):
"""
Tries to establish the Project parameter as the current project.
If there are errors, an error message is issued to the user.
Parameters:
project -- the Project object to set as the main project.
"""
# try:
# ProjectManager.ValidateProject(project)
# except ProjectManager.InvalidProjectError as e:
# message=""
# if e.files:
# message+=_("The project references non-existant files:\n")
# for f in e.files:
# message += f + "\n"
# if e.images:
# message+=_("\nThe project references non-existant images:\n")
# for f in e.images:
# message += f + "\n"
# dlg = Gtk.MessageDialog(self.window,
# Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
# Gtk.MessageType.ERROR,
# Gtk.ButtonsType.OK,
# _("%s\n Invalid or corrupt project file, will not open.")%message)
# dlg.run()
# dlg.destroy()
# return
if self.project:
if self.CloseProject() != 0:
return
self.project = project
# self.project.connect("audio-state::play", self.OnProjectAudioState)
# self.project.connect("audio-state::pause", self.OnProjectAudioState)
# self.project.connect("audio-state::record", self.OnProjectAudioState)
# self.project.connect("audio-state::stop", self.OnProjectAudioState)
# self.project.connect("audio-state::export-start", self.OnProjectExportStart)
# self.project.connect("audio-state::export-stop", self.OnProjectExportStop)
# self.project.connect("name", self.OnProjectNameChanged)
# self.project.connect("undo", self.OnProjectUndo)
# self.project.transport.connect("transport-mode", self.OnTransportMode)
# self.OnTransportMode()
# self.UpdateProjectLastUsedTime(project.projectfile, project.name)
# self.project.PrepareClick()
# make various buttons and menu items enabled now we have a project
# self.SetGUIProjectLoaded()
def _cacheInstrumentsGenerator(self, alreadyLoadedTypes=[]):
"""
Yields a loaded Instrument everytime this method is called,
so that the gui isn't blocked while loading many Instruments.
If an Instrument's type is already in alreadyLoadedTypes,
it is considered a duplicate and it's not loaded.
Parameters:
alreadyLoadedTypes -- array containing the already loaded Instrument types.
Returns:
the loaded Instrument. *CHECK*
"""
try:
#getlocale() will usually return a tuple like: ('en_GB', 'UTF-8')
lang = locale.getlocale()[0]
except:
lang = None
for instr_path in self.settings.INSTR_PATHS:
if not os.path.exists(instr_path):
continue
instrFiles = [x for x in os.listdir(instr_path) if x.endswith(".instr")]
for f in instrFiles:
config = configparser.SafeConfigParser()
try:
config.read(os.path.join(instr_path, f))
except (ConfigParser.MissingSectionHeaderError,e):
debug("Instrument file %s in %s is corrupt or invalid, not loading"%(f,instr_path))
continue
if config.has_option('core', 'type') and config.has_option('core', 'icon'):
icon = config.get('core', 'icon')
type = config.get('core', 'type')
else:
continue
#don't load duplicate instruments
if type in alreadyLoadedTypes:
continue
if lang and config.has_option('i18n', lang):
name = config.get('i18n', lang)
elif lang and config.has_option('i18n', lang.split("_")[0]):
#in case lang was 'de_DE', use only 'de'
name = config.get('i18n', lang.split("_")[0])
elif config.has_option('i18n', 'en'):
#fall back on english (or a PO translation, if there is any)
name = _(config.get( 'i18n', 'en'))
else:
continue
#name = unicode(name, "UTF-8")
pixbufPath = os.path.join(instr_path, "images", icon)
pixbuf = GdkPixbuf.Pixbuf.new_from_file(pixbufPath)
# add instrument to defaults list if it's a defaults
# if instr_path == INSTR_PATHS[0]:
# DEFAULT_INSTRUMENTS.append(type)
yield (name, type, pixbuf, pixbufPath)
#_____________________________________________________________________
def getCachedInstruments(self, checkForNew=False):
"""
Creates the Instrument cache if it hasn't been created already and
return it.
Parameters:
checkForNew -- True = scan the Instrument folders for new_dir.
False = don't scan for new Instruments.
Returns:
a list with the Instruments cached in memory.
"""
if self._alreadyCached and not checkForNew:
return self.instrumentPropertyList
else:
self._alreadyCached = True
listOfTypes = [x[1] for x in self.instrumentPropertyList]
try:
newlyCached = list(self._cacheInstrumentsGenerator(listOfTypes))
#extend the list so we don't overwrite the already cached instruments
self.instrumentPropertyList.extend(newlyCached)
except StopIteration:
pass
#sort the instruments alphabetically
#using the lowercase of the name (at index 0)
self.instrumentPropertyList.sort(key=lambda x: x[0].lower())
return self.instrumentPropertyList
#_____________________________________________________________________
def getCachedInstrumentPixbuf(self, get_type):
for (name, type, pixbuf, pixbufPath) in self.getCachedInstruments():
if type == get_type:
return pixbuf
return None
#_____________________________________________________________________
def idleCacheInstruments(self):
"""
Loads the Instruments 'lazily' to avoid blocking the GUI.
Returns:
True -- keep calling itself to load more Instruments.
False -- stop calling itself and sort Instruments alphabetically.
"""
if self._alreadyCached:
#Stop idle_add from calling us again
return False
#create the generator if it hasnt been already
if not self._cacheGeneratorObject:
self._cacheGeneratorObject = self._cacheInstrumentsGenerator()
try:
self.instrumentPropertyList.append(next(self._cacheGeneratorObject))
#Make sure idle add calls us again
return True
except StopIteration:
_alreadyCached = True
#sort the instruments alphabetically
#using the lowercase of the name (at index 0)
self.instrumentPropertyList.sort(key=lambda x: x[0].lower())
#Stop idle_add from calling us again
return False
def on_play(self):
# flipping bit
self.isPlaying = True
# let's play active project
self.project.Play()
def on_stop(self):
self.isPlaying = False
self.isRecording = False
self.project.stop()
def on_record(self, widget=None):
"""
Toggles recording. If there's an error, a warning/error message is
issued to the user.
Parameters:
widget -- reserved for GTK callbacks, don't use it explicitly.
"""
# toggling the record button invokes this function so we use the settingButtons var to
# indicate that we're just changing the GUI state and dont need to do anything code-wise
# if self.settingButtons:
# return
# if self.isRecording:
# self.project.Stop()
# return
can_record = False
for i in self.project.instruments:
if i.is_armed:
can_record = True
#Check to see if any instruments are trying to use the same input channel
usedChannels = []
armed_instrs = [x for x in self.project.instruments if x.is_armed]
for instrA in armed_instrs:
for instrB in armed_instrs:
if instrA is not instrB and instrA.input == instrB.input and instrA.inTrack == instrB.inTrack:
string = _("The instruments '%(name1)s' and '%(name2)s' both have the same input selected. Please either disarm one, or connect it to a different input through 'Project -> Recording Inputs'")
message = string % {"name1" : instrA.name, "name2" : instrB.name}
dlg = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
message)
dlg.connect('response', lambda dlg, response: dlg.destroy())
dlg.run()
self.settingButtons = True
widget.set_active(False)
self.settingButtons = False
return
if not can_record:
Globals.debug("can not record")
if self.project.instruments:
errmsg = "No instruments are armed for recording. You need to arm an instrument before you can begin recording."
else:
errmsg = "No instruments have been added. You must add an instrument before recording"
dlg = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
_(errmsg))
dlg.connect('response', lambda dlg, response: dlg.destroy())
dlg.run()
self.settingButtons = True
widget.set_active(False)
self.settingButtons = False
else:
Globals.debug("can record")
self.project.Record()
def on_shutdown(self, application):
self.close_project()
def close_project(self):
"""
Closes the current project. If there's changes pending, it'll ask the user for confirmation.
Returns:
the status of the close operation:
0 = there was no project open or it was closed succesfully.
1 = cancel the operation and return to the normal program flow.
"""
if not self.project:
return 0
# stop playing if it is not already done
self.project.stop()
"""
if self.project.CheckUnsavedChanges():
message = _("<span size='large' weight='bold'>Save changes to project \"%s\" before closing?</span>\n\nYour changes will be lost if you don't save them.") % self.project.name
dlg = Gtk.MessageDialog(self.window,
Gtk.DialogFlags.MODAL |
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.WARNING,
Gtk.ButtonsType.NONE)
dlg.set_markup(message)
dlg.add_button(_("Close _Without Saving"), Gtk.ResponseType.NO)
dlg.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
defaultAction = dlg.add_button(Gtk.STOCK_SAVE, Gtk.ResponseType.YES)
#make save the default action when enter is pressed
dlg.set_default(defaultAction)
dlg.set_transient_for(self.window)
response = dlg.run()
dlg.destroy()
if response == Gtk.ResponseType.YES:
self.OnSaveProject()
elif response == Gtk.ResponseType.NO:
pass
elif response == Gtk.ResponseType.CANCEL or response == Gtk.ResponseType.DELETE_EVENT:
return 1
"""
# if self.project.CheckUnsavedChanges():
# self.OnSaveProject()
# self.project.close_project()
# elif self.project.newly_created_project:
# self.project.close_project()
# ProjectManager.DeleteProjectLocation(self.project)
# else:
self.emit("project::close")
self.project.close_project()
# write down in recent projects
self.settings.add_recent_project(self.project)
self.project = None
return 0
def on_export_audio_action(self, widget, _):
self.emit("project::export")
@staticmethod
def get_application():
return Gio.Application.get_default()
| Pecisk/jokosher-ng | src/jokosherapplication.py | jokosherapplication.py | py | 19,279 | python | en | code | 2 | github-code | 90 |
10995936881 | # -*- coding: utf-8 -*-
# (c) Nano Nano Ltd 2019
from decimal import Decimal
from .config import config
class TransactionRecordBase(object):
TYPE_DEPOSIT = 'Deposit'
TYPE_MINING = 'Mining'
TYPE_INCOME = 'Income'
TYPE_GIFT_RECEIVED = 'Gift-Received'
TYPE_WITHDRAWAL = 'Withdrawal'
TYPE_SPEND = 'Spend'
TYPE_GIFT_SENT = 'Gift-Sent'
TYPE_CHARITY_SENT = 'Charity-Sent'
TYPE_TRADE = 'Trade'
BUY_TYPES = (TYPE_MINING, TYPE_INCOME, TYPE_GIFT_RECEIVED)
SELL_TYPES = (TYPE_SPEND, TYPE_GIFT_SENT, TYPE_CHARITY_SENT)
cnt = 0
def __init__(self, t_type, timestamp,
buy_quantity=None, buy_asset="", buy_value=None,
sell_quantity=None, sell_asset="", sell_value=None,
fee_quantity=None, fee_asset="", fee_value=None,
wallet=""):
TransactionRecordBase.cnt += 1
self.line_num = TransactionRecordBase.cnt
self.t_type = t_type
self.buy_quantity = Decimal(buy_quantity) if buy_quantity is not None else None
self.buy_asset = buy_asset
self.buy_value = Decimal(buy_value) if buy_value is not None else None
self.sell_quantity = Decimal(sell_quantity) if sell_quantity is not None else None
self.sell_asset = sell_asset
self.sell_value = Decimal(sell_value) if sell_value is not None else None
self.fee_quantity = Decimal(fee_quantity) if fee_quantity is not None else None
self.fee_asset = fee_asset
self.fee_value = Decimal(fee_value) if fee_value is not None else None
self.wallet = wallet
self.timestamp = timestamp
@staticmethod
def format_quantity(quantity):
if quantity is None:
return '-'
return '{:0,f}'.format(quantity.normalize())
@staticmethod
def format_value(value):
if value is not None:
return ' (' + config.sym() + '{:0,.2f} {})'.format(value, config.CCY)
return ''
def __eq__(self, other):
return self.timestamp == other.timestamp
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self.timestamp < other.timestamp
def __str__(self):
if self.t_type in self.BUY_TYPES or self.t_type == self.TYPE_DEPOSIT:
return "[Row:" + str(self.line_num) + "] " + \
self.t_type + ": " + \
self.format_quantity(self.buy_quantity) + " " + \
self.buy_asset + \
self.format_value(self.buy_value) + " \"" + \
self.wallet + "\" " + \
self.timestamp.strftime('%Y-%m-%dT%H:%M:%S %Z')
elif self.t_type in self.SELL_TYPES or self.t_type == self.TYPE_WITHDRAWAL:
return "[Row:" + str(self.line_num) + "] " + \
self.t_type + ": " + \
self.format_quantity(self.sell_quantity) + " " + \
self.sell_asset + \
self.format_value(self.sell_value) + " \"" + \
self.wallet + "\" " + \
self.timestamp.strftime('%Y-%m-%dT%H:%M:%S %Z')
else:
return "[Row:" + str(self.line_num) + "] " + \
self.t_type + ": " + \
self.format_quantity(self.buy_quantity) + " " + \
self.buy_asset + \
self.format_value(self.buy_value) + " <- " + \
self.format_quantity(self.sell_quantity) + " " + \
self.sell_asset + \
self.format_value(self.sell_value) + " \"" + \
self.wallet + "\" " + \
self.timestamp.strftime('%Y-%m-%dT%H:%M:%S %Z')
class TransactionInRecord(TransactionRecordBase):
def validate(self):
if self.t_type in self.BUY_TYPES or self.t_type == self.TYPE_DEPOSIT:
if not self.buy_asset or self.buy_quantity is None:
raise Exception("Type: " + self.t_type + " missing buy details")
if self.sell_asset or self.sell_quantity is not None:
raise Exception("Type: " + self.t_type + " sell details not required")
if self.buy_quantity < 0:
raise Exception("Type: " + self.t_type + " buy quantity is negative")
elif self.t_type in self.SELL_TYPES or self.t_type == self.TYPE_WITHDRAWAL:
if not self.sell_asset or self.sell_quantity is None:
raise Exception("Type: " + self.t_type + " missing sell details")
elif self.buy_asset or self.buy_quantity is not None:
raise Exception("Type: " + self.t_type + " buy details not required")
elif self.sell_quantity < 0:
raise Exception("Type: " + self.t_type + " sell quantity is negative")
elif self.t_type == self.TYPE_TRADE:
if not self.buy_asset or self.buy_quantity is None:
raise Exception("Type: " + self.t_type + " missing buy details")
if not self.sell_asset or self.sell_quantity is None:
raise Exception("Type: " + self.t_type + " missing sell details")
if self.buy_quantity < 0:
raise Exception("Type: " + self.t_type + " buy quantity is negative")
if self.sell_quantity < 0:
raise Exception("Type: " + self.t_type + " sell quantity is negative")
else:
raise ValueError("Type: " + self.t_type + " is unrecognised")
if self.fee_asset and (self.fee_asset != self.buy_asset
and self.fee_asset != self.sell_asset):
raise Exception("Fee asset: " + self.fee_asset + " does not match")
elif self.fee_asset and self.fee_quantity is None:
raise Exception("Missing fee quantity")
elif self.fee_asset and self.fee_quantity < 0:
raise Exception("Fee quantity is negative")
def normalise_to_localtime(self):
self.timestamp = self.timestamp.astimezone(config.TZ_LOCAL)
def include_fees(self):
# Include fees within buy/sell portion
if self.buy_asset == self.fee_asset and self.fee_quantity:
self.buy_quantity -= self.fee_quantity
if self.fee_value:
self.buy_value -= self.fee_value
self.fee_quantity = None
self.fee_asset = ""
self.fee_value = None
elif self.sell_asset == self.fee_asset and self.fee_quantity:
self.sell_quantity += self.fee_quantity
if self.fee_value:
self.sell_value += self.fee_value
self.fee_quantity = None
self.fee_asset = ""
self.fee_value = None
| 737147948/BittyTax | bittytax/record.py | record.py | py | 6,717 | python | en | code | null | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.