seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
11675785964 | from typing import Union
class BitReadStream:
def __init__(self, some_bytes: bytes = b''):
self.some_bytes = some_bytes
self.r_pointer = 0
self.r_in_byte_pointer = 0
def read(self, size: int = -1) -> str:
bits = []
while size != 0:
bit = self.get_bit_at(self.r_pointer, self.r_in_byte_pointer)
if bit is None:
break
self.inc_r_pointer()
bits.append(bit)
size -= 1
return ''.join(bits)
def inc_r_pointer(self):
self.r_in_byte_pointer += 1
if self.r_in_byte_pointer == 8:
self.r_in_byte_pointer = 0
self.r_pointer += 1
def get_bit_at(self, pointer, in_byte_pointer) -> str | None:
try:
byte = self.some_bytes[pointer]
except IndexError:
return None
bit_raw = byte & (1 << in_byte_pointer)
return '1' if bit_raw else '0'
class BitWriteStream:
def __init__(self):
self.bytes = b''
self.last_byte = b'\x00'
self.w_pointer = 0
self.w_in_byte_pointer = 0
def inc_w_pointer(self):
self.w_in_byte_pointer += 1
if self.w_in_byte_pointer == 8:
self.w_in_byte_pointer = 0
self.w_pointer += 1
self.bytes += self.last_byte
self.last_byte = b'\x00'
def write(self, bits: str):
for bit in bits:
self.write_to_last_byte(bit, self.w_in_byte_pointer)
self.inc_w_pointer()
def write_to_last_byte(self, bit, index):
last_byte: int = self.last_byte[0]
mask = 1 << index
if bit == '0':
last_byte &= ~mask
else:
last_byte |= mask
self.last_byte = last_byte.to_bytes(1, 'big')
def get_bytes(self) -> bytes:
r_value = self.bytes
if self.w_in_byte_pointer > 0:
r_value += self.last_byte
return r_value
| Arcimiendar/huffman_python | src/bit_stream.py | bit_stream.py | py | 1,972 | python | en | code | 0 | github-code | 90 |
41340613770 | from pylsl import StreamInfo, StreamOutlet
import random
import time
import csv
import keyboard
def main():
# Set up LabStreamingLayer stream.
info = StreamInfo(name='PyMarker', type='Markers', channel_count=3,
channel_format='double64', source_id='unique113')
# Broadcast the stream.
outlet = StreamOutlet(info)
print("Now sending data...")
markerValue = 1
prev = -1
while (True):
if keyboard.is_pressed(" ") and (time.time()-prev) > 2:
prev = time.time()
markerValue+=1
print("Marker value: ", markerValue,"\tMarker time: ", now)
now = time.time()
# data: MarkerTime, MarkerValue, Current EpochTime
data = [now, markerValue, now]
# push data
outlet.push_sample(data)
time.sleep(0.05)
if __name__ == "__main__":
main() | underhood31/AFC-scripts | drive-download-20220321T153410Z-001/pylsl_outlet.py | pylsl_outlet.py | py | 957 | python | en | code | 0 | github-code | 90 |
16812631637 | n = int(input())
max_n = n*n
a = [[0 for j in range(n)] for i in range(n)]
step = 1
i, j = 0, 0
i_min, j_min = 0, 0
i_max, j_max = n, n
while step <= max_n:
while j < j_max: #вправо
a[i][j] = step
j += 1
step += 1
j -=1
i +=1
while i < i_max: #вниз
a[i][j] = step
i += 1
step += 1
i -= 1
i_max -= 1
j_max -= 1
while j > j_min: #влево
j -= 1
a[i][j] = step
step +=1
i -= 1
i_min += 1
while i > i_min: #вверх
a[i][j] = step
step += 1
i -= 1
j_min += 1
for i in range(n):
for j in range(n):
print(a[i][j], end = ' ')
print()
| FoxProklya/Step-Python | conclusion_of_the_squares_in_a_spiral.py | conclusion_of_the_squares_in_a_spiral.py | py | 707 | python | en | code | 0 | github-code | 90 |
43286245033 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
#from selenium.webdriver.support.ui import Select
import time
import math
import pyperclip
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
link = "http://suninjuly.github.io/explicit_wait2.html"
try:
browser = webdriver.Firefox()
browser.get(link)
WebDriverWait(browser, 15).until(EC.text_to_be_present_in_element((By.ID, "price"), "100"))
button = browser.find_element_by_css_selector("#book")
button.click()
x = browser.find_element_by_css_selector("#input_value").text
y = calc(x)
input_field = browser.find_element_by_css_selector("#answer")
#browser.execute_script("return arguments[0].scrollIntoView(true);", input_field)
input_field.send_keys(y)
button = browser.find_element_by_xpath("//button[text()='Submit']")
button.click()
pyperclip.copy(browser.switch_to.alert.text.split(': ')[-1]) #для копирования в буфер
browser.switch_to.alert.accept()
finally:
time.sleep(5)
browser.quit()
# не забываем оставить пустую строку в конце файла
| sergeykasyan/Stepik---auto-tests-course | lesson2-4_step8.py | lesson2-4_step8.py | py | 1,271 | python | en | code | 0 | github-code | 90 |
37926266708 | ### 1 – Crie um dicionário em que suas chaves serão os números 1, 4, 5, 6, 7, e 9
# (que podem ser armazenados em uma lista) e seus valores correspondentes
# aos quadrados desses números.
# listaNum = [1, 4, 5, 6, 7, 9]
# numDicionario = dict()
# for i in listaNum :
# numDicionario[i] = i**2
# print(numDicionario)
# print()
### 2 – Crie um dicionário em que suas chaves correspondem a números inteiros entre [1, 10]
# e cada valor associado é o número ao quadrado.
# numDicionario2 = dict()
# for n in range(1,11) :
# numDicionario2[n] = n**2
# print(numDicionario2)
# print()
### 2.1 com input
# numero = int(input('Digite até que valor para calcular ao quadrado: '))
# numDicionario3 = dict()
# for q in range(1,numero+1) :
# numDicionario3[q] = q**2
# print(numDicionario3)
############################################ ainda não consegui resolver certinho
### 3 - Crie um programa, utilizando dicionário, que simule a baixa de estoque
# das vendas de um supermercado. Não esqueça de fazer as seguintes validações:
# Produto Indisponível
# Produto Inválido
# Quantidade solicitada não disponível
# O programa deverá mostrar para o cliente a quantidade de itens comprados e o total.
# estoqueLista = [('melancia',4),('carne',0),('agua',1),('refrigerante',0),('cafe',8)]
# estoque = dict(estoqueLista)
# while True :
# mercado = input('Deseja comprar [SIM]? Ou [0] para sair do mercado: ')
# if mercado == '0' :
# break
# elif mercado != '0' :
# escolhaDeItens = input('O que deseja comprar? ')
# quantidadeNoEstoque = estoque.pop(escolhaDeItens,'Produto inválido.')
# if quantidadeNoEstoque == 'Produto inválido.' :
# print('Produto inválido')
# else :
# if quantidadeNoEstoque == 0 :
# print('Produto indisponível.')
# else :
# escolhaDeQuantidade = int(input(f'Quantos {escolhaDeItens} deseja comprar? '))
# if quantidadeNoEstoque < escolhaDeQuantidade :
# print('Quantidade solicitada não disponível.')
############################################ ainda não consegui resolver certinho
# correção do exercicio 03 pelo professor
itens_comprados = []
total_quantidade_geral = 0
estoque = {'coca':15, 'chocolate':6, 'batata':11, 'papel':3, 'presunto':26}
continuar = input('Bem-vindo(a) ao Supermercado T3C5!. Deseja ir as compras? (s/n)').lower()
while continuar not in ['s','n']:
continuar = input('Resposta inválida. Deseja ir as compras (s/n)').lower()
while continuar == 's':
print()
print('Nossos produtos:')
for i in estoque:
if estoque[i] > 0:
print(i)
print()
produto = input('Qual produto vc deseja comprar?')
quantidade_atual = estoque.get(produto,-1)
if quantidade_atual == -1:
print('Produto Inválido')
elif quantidade_atual == 0:
print('Produto Indisponível')
else:
quantidade = int(input('Qual a quantidade desejada?'))
if quantidade > quantidade_atual:
print(f'Quantidade solicitada não disponível. No momento temos apenas a quantidade de {quantidade_atual} em estoque')
else:
estoque[produto] = quantidade_atual - quantidade
if produto not in itens_comprados:
itens_comprados.append(produto)
total_quantidade_geral += quantidade
print('Compra realizado com sucesso!!!')
continuar = input('Deseja continuar as compras? (s/n)').lower()
while continuar not in ['s','n']:
continuar = input('Resposta inválida. Deseja ir as compras (s/n)').lower()
print()
print('---Resumo da compra---')
print(f'Quantidade de itens comprados: {len(itens_comprados)}')
print(f'Total de itens comprados: {total_quantidade_geral}') | GarconeAna/aulasBlue | aula11-exercicos/exercicios01.py | exercicios01.py | py | 3,878 | python | pt | code | 0 | github-code | 90 |
26515609509 | import pymysql
def db_login(user, passwd, server_addr, dbname):
try:
db = pymysql.connect(server_addr, user, passwd, dbname)
except pymysql.err.OperationalError:
db = None
return db
def db_showtable(db):
cursor = db.cursor()
cursor.execute("show tables")
tabs = cursor.fetchall()
res = list()
for tab in tabs:
cursor.execute("select count(*) from " + tab[0])
row_cnt = cursor.fetchone()
res.append((tab[0], row_cnt[0]))
cursor.close()
return res
def db_close(db):
if db is not None:
db.close()
def db_SearchCustomer(db):
cursor = db.cursor()
cursor.execute("select * from customer")
tabs = cursor.fetchall()
res = list()
for tab in tabs:
cusID = tab[0]
cusname = tab[1]
cusphone= tab[2]
res.append((cusID, cusname, cusphone))
cursor.close()
return res
def db_InsertCustomer(db, cusID, cusname, cusphone, address, contact_phone, contact_name, contact_Email, relation):
cursor = db.cursor()
try:
sql = "INSERT INTO customer VALUES('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', null, null)" %\
(cusID, cusname, cusphone, address, contact_phone, contact_name, contact_Email, relation)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_DeleteCustomer(db, cusID):
cursor = db.cursor()
try:
sql = "DELETE FROM customer WHERE cusID = '%s'" % (cusID)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_UpdateCustomer(db, cusID, cusname, cusphone, address, contact_phone, contact_name, contact_Email, relation):
cursor = db.cursor()
sql = "UPDATE customer SET cusname = '%s', cusphone = '%s', address = '%s', contact_phont = '%s', contact_name = '%s', contact_Email='%s', relation='%s' WHERE \
cusID = '%s'" % (cusname, cusphone, address, contact_phone, contact_name, contact_Email, relation, cusID)
cursor.execute(sql)
db.commit()
cursor.close()
def db_OpenSavingAcc(db, cusID, accID, money, settime, interest, saveType):
cursor = db.cursor()
try:
sql = "INSERT INTO accounts VALUES('%s', %s, '%s', '储蓄账户')" %(accID, money, settime)
cursor.execute(sql)
sql = "INSERT INTO saveacc VALUES('%s', %s, '%s')" %(accID, interest, saveType)
cursor.execute(sql)
sql = "INSERT INTO cusforacc VALUES('%s', 'Liverpool', '%s', '%s', '储蓄账户')" %(accID, cusID, settime)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_OpenCheckingAcc(db, cusID, accID, money, settime, overdraft):
cursor = db.cursor()
try:
sql = "INSERT INTO accounts VALUES('%s', %s, '%s', '支票账户')" %(accID, money, settime)
cursor.execute(sql)
sql = "INSERT INTO checkacc VALUES('%s', %s )" %(accID, overdraft)
cursor.execute(sql)
sql = "INSERT INTO cusforacc VALUES('%s', 'Liverpool', '%s', '%s', '支票账户')" %(accID, cusID, settime)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_SearchAcc(db):
cursor = db.cursor()
cursor.execute("select accounts.accountID, cusforacc.cusID, accounts.accountType from accounts, cusforacc WHERE accounts.accountID = cusforacc.accountID")
tabs = cursor.fetchall()
res = list()
for tab in tabs:
accID = tab[0]
cusID = tab[1]
accType= tab[2]
res.append((accID, cusID, accType))
cursor.close()
return res
def db_DeleteAcc(db, accID):
cursor = db.cursor()
try:
sql = "DELETE FROM saveacc WHERE accountID = '%s'" %(accID)
cursor.execute(sql)
sql = "DELETE FROM checkacc WHERE accountID = '%s'" %(accID)
cursor.execute(sql)
sql = "DELETE FROM cusforacc WHERE accountID = '%s'" %(accID)
cursor.execute(sql)
sql = "DELETE FROM accounts WHERE accountID = '%s'" %(accID)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_UpdateAcc(db, accID, money, interest, overdraft):
cursor = db.cursor()
cursor.execute("SELECT accountType from accounts WHERE accountID = '%s'" %(accID))
Type = cursor.fetchall()
for tab in Type:
if(tab[0] == '储蓄账户'):
sql = "UPDATE saveacc SET interestrate = %s WHERE accountID = '%s'" %(interest, accID)
cursor.execute(sql)
elif(tab[0] == '支票账户'):
sql = "UPDATE checkacc SET overdraft = %s WHERE accountID = '%s'" %(overdraft, accID)
sql = "UPDATE accounts SET money = %s WHERE accountID = '%s'" %(money, accID)
cursor.execute(sql)
db.commit()
cursor.close()
def db_CreateLoan(db, loanID, cusID, money):
cursor = db.cursor()
try:
sql = "INSERT INTO loan VALUES('%s', %s, 'Liverpool', '0')" %(loanID, money)
cursor.execute(sql)
sql = "INSERT INTO cusforloan VALUES('%s', '%s')" %(loanID, cusID)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_SearchLoan(db):
cursor = db.cursor()
sql = "SELECT cusforloan.loanID, cusforloan.cusID, loan.money, loan.state FROM loan, cusforloan WHERE loan.loanID = cusforloan.loanID"
cursor.execute(sql)
tabs = cursor.fetchall()
res = list()
for tab in tabs:
loanID = tab[0]
cusID = tab[1]
money= tab[2]
state = tab[3]
res.append((loanID, cusID, money, state))
cursor.close()
return res
def db_DeleteLoan(db, loanID):
cursor = db.cursor()
try:
sql = "DELETE FROM cusforloan WHERE loanID = '%s'" %(loanID)
cursor.execute(sql)
sql = "DELETE FROM loan WHERE loanID = '%s'" %(loanID)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
def db_PayLoan(db, loanID, cusID, money, paytime):
cursor = db.cursor()
try:
sql = "INSERT INTO payinfo VALUES('%s', '%s', %s, '%s')" %(loanID, cusID, money, paytime)
cursor.execute(sql)
db.commit()
except:
db.rollback()
cursor.close()
if __name__ == "__main__":
db = db_login("root", "", "127.0.0.1", "test")
tabs = db_showtable(db)
db_close(db) | Philip-Chang/USTC-2020SPRING | Data Base/python+flask/db.py | db.py | py | 6,391 | python | en | code | 1 | github-code | 90 |
19932235322 | #Program coded by: Ian McDowell
import math
import random
#makes array for 'songs'
global SongList
SongList = []
#asks user for how many 'songs' they want in the list and creates the list
global SongAmount
SongAmount = int(raw_input("How many songs?"))
for x in range(0,SongAmount + 1):
SongList.insert(0,x)
SongList.remove(0)
#makes needed arrays for shuffling
global PlayedList
PlayedList = []
global UnplayedList
UnplayedList = []
global PrintList
PrintList = []
#asks how many loops the user wants
global loop
loop = int(raw_input("How many loops?"))
UnplayedList = SongList
# ~(-*-)~
#method for shuffling the list
def Shuffle():
global PlayedList
global UnplayedList
global PrintList
global loop
while loop != 0: #while the loop value (starting with the user inputed amount) is not 0:
while len(UnplayedList) > 1: #while there is more than 1 'song' in the unplayed list:
#choose a random 'song' from the list then removes it from the list
r = random.choice(UnplayedList)
UnplayedList.remove(r)
#reshuffles the unplayed list
UnplayedList = random.sample(UnplayedList, len(UnplayedList))
#adds played 'song' to the played 'songs' list
PrintList.insert(0,r)
PlayedList.insert(0,r)
#gets the last 'song' and 'plays' it
x = random.choice(UnplayedList)
PrintList.insert(0,x)
#clears unplayed list then refills it and empties the played list
UnplayedList.remove(x)
UnplayedList = PlayedList
PlayedList = []
#choose a random 'song' from the list
r = random.choice(UnplayedList)
PrintList.insert(0,r)
# removes the played 'song' from the unplayed list, adds it to the played list, and adds the 'song' x back to the unplayed list
PlayedList.insert(0,r)
UnplayedList.remove(r)
UnplayedList.insert(0,x)
#removes 1 from the amount of loops left
loop = loop - 1
if loop <= 0: #removes the latest 'played' 'song' from the printlist then prints the shuffle order
PrintList.remove(r)
print("Shuffle Order: ")
print(PrintList)
exit()
Shuffle()
| ianm24/Music-Player-Shuffler | Test/Shuffle/shuffle.py | shuffle.py | py | 2,021 | python | en | code | 0 | github-code | 90 |
18536539659 | # union-find
# こういう问题のときにUnion-Findなのか幅优先探索なのか深さ优先探索なのか迷う。
# ちなみに解说PDFはUnion-Findを挙げている。
n, m = map(int, input().split())
def find_root(x):
if par[x] == x:
return x
else:
par[x] = find_root(par[x])
return par[x]
def unite(x, y):
x = find_root(x)
y = find_root(y)
if(x == y):
return 0
if rank[x] < rank[y]:
par[x] = y
temp = size[x] * size[y]
size[y] = size[x] + size[y]
else:
par[y] = x
if (rank[x] == rank[y]):
rank[x] += 11
temp = size[x] * size[y]
size[x] = size[x] + size[y]
return temp
def is_same(x,y):
return find_root(x) == find_root(y)
# par = [0]*n
par = list(range(n))
rank = [0]*n
size = [1]*n
init_nums = list(map(int, input().split()))
edges = [list(map(int, input().split())) for _ in range(m)]
edges = [[b[0]-1, b[1]-1] for b in edges] #1-idx -> 0-idx
for b in range(m):
unite(edges[b][0] , edges[b][1])
ans = 0
for i in range(n):
num = init_nums[i]
# i+1 が init_numの中でi+1と同じグループだったら
if is_same(i, num - 1):
ans += 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03354/s293670845.py | s293670845.py | py | 1,241 | python | zh | code | 0 | github-code | 90 |
42039763520 | """
You are given a string s that consists of lower case English letters and brackets.
Reverse the strings in each pair of matching parentheses, starting from the innermost one.
Your result should not contain any brackets.
Example 1:
Input: s = "(abcd)"
Output: "dcba"
Example 2:
Input: s = "(u(love)i)"
Output: "iloveu"
Explanation: The substring "love" is reversed first, then the whole string is reversed.
https://leetcode.com/problems/reverse-substrings-between-each-pair-of-parentheses/
"""
class Solution:
def reverseParentheses(self, s: str) -> str:
stack = ['']
for i in s:
if i == "(":
stack.append("")
elif i == ")":
local = stack.pop()[::-1]
stack[-1] += local
else:
stack[-1] += i
return stack.pop()
| nilay-gpt/LeetCode-Solutions | reve_str_in_brackets.py | reve_str_in_brackets.py | py | 887 | python | en | code | 2 | github-code | 90 |
20902854572 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import functools
import paddle
import paddle.fluid as fluid
import models
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('model', str, "ResNet200_vd", "Set the network to use.")
add_arg('embedding_size', int, 512, "Embedding size.")
add_arg('image_shape', str, "3,448,448", "Input image size.")
add_arg('pretrained_model', str, None, "Whether to use pretrained model.")
add_arg('binary_model', str, None, "Set binary_model dir")
add_arg('task_mode', str, "retrieval", "Set task mode")
# yapf: enable
model_list = [m for m in dir(models) if "__" not in m]
def convert(args):
# parameters from arguments
model_name = args.model
pretrained_model = args.pretrained_model
if not os.path.exists(pretrained_model):
print("pretrained_model doesn't exist!")
sys.exit(-1)
image_shape = [int(m) for m in args.image_shape.split(",")]
assert model_name in model_list, "{} is not in lists: {}".format(args.model,
model_list)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
# model definition
model = models.__dict__[model_name]()
if args.task_mode == 'retrieval':
out = model.net(input=image, embedding_size=args.embedding_size)
else:
out = model.net(input=image)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
fluid.io.save_inference_model(
dirname = args.binary_model,
feeded_var_names = ['image'],
target_vars = [out['embedding']] if args.task_mode == 'retrieval' else [out],
executor = exe,
main_program = None,
model_filename = 'model',
params_filename = 'params')
print('input_name: {}'.format('image'))
print('output_name: {}'.format(out['embedding'].name)) if args.task_mode == 'retrieval' else ('output_name: {}'.format(out.name))
print("convert done.")
def main():
args = parser.parse_args()
print_arguments(args)
convert(args)
if __name__ == '__main__':
main()
| PaddlePaddle/Research | CV/landmark/inference/convert_binary_model.py | convert_binary_model.py | py | 2,544 | python | en | code | 1,671 | github-code | 90 |
36437174249 | import pygame
from src.components.Coin import Coin
from src.utils.drawText import drawText
class UserInterface:
def __init__(self, screen, colors, level_name, player_info):
# Passed attributes
self.screen = screen
self.colors = colors
self.level_name = level_name
self.player_info = player_info
# Class attributes
self.coin = Coin(screen, x=25, y=0)
def drawLevelName(self):
drawText(
self.screen,
text=self.level_name,
x=100,
y=10,
color=self.colors["black"]
)
def drawPlayerScore(self):
self.coin.draw()
self.coin.updateColor()
drawText(
self.screen,
text="{:06d}".format(self.player_info["score"]),
x=1030,
y=10,
color=self.colors["black"]
)
def draw(self):
self.drawLevelName()
self.drawPlayerScore() | Luc4r/Rectov | src/components/UserInterface.py | UserInterface.py | py | 846 | python | en | code | 0 | github-code | 90 |
42236050129 | import mysql.connector
import os
from timeit import default_timer as timer
from datetime import timedelta, date
import cred
import platform
con = mysql.connector.connect(host=cred.host, password=cred.password, user=cred.user, database=cred.database)
cursor = con.cursor()
cursor.execute('Delete from info2')
con.commit()
cursor.execute('Delete from info')
con.commit()
os.system('python Welcome_Screen.py')
cursor.execute("select * from info")
results = cursor.fetchone()
if results is not None:
if results[0] == 'True':
start = timer()
os.system('python main_game.py')
end = timer()
f = open('endnote.txt', 'a')
elapsed_time = timedelta(seconds=end-start)
f.write(f'Thanks for playing\nElapsed time: {elapsed_time}\nDate: {date.today()} \n \n')
f.close()
if platform.system() == 'Darwin':
os.system("open -a TextEdit endnote.txt")
elif platform.system() == 'Windows':
os.system("endnote.txt")
else:
print("You didn't start the game! ") | Baibhav-Mishra/Ludo | start.py | start.py | py | 1,011 | python | en | code | 0 | github-code | 90 |
29581719852 | from piano_transcription_inference import PianoTranscription, sample_rate, load_audio
import os
import time
def main():
st = time.time()
song = "./audio/seg/Q4_aQ4PIaRMLwE_0.mp3"
# Load audio
print("### Loading ###")
(audio, _) = load_audio(song, sr=sample_rate, mono=True)
# Transcriptor
print("### Transcriptor ###")
transcriptor = PianoTranscription(device='cuda') # 'cuda' | 'cpu'
# Transcribe and write out to MIDI file
print("### Transcribing/Writing ###")
name = song.split(".")[1].split("/")[-1]
transcribed_dict = transcriptor.transcribe(audio, os.path.join("./output", name+".mid"))
ed = time.time()
print("### DONE ###")
print("Took "+str(ed-st)+" seconds to process.")
if __name__ == "__main__":
main() | joann8512/piano_transcription | inference_test.py | inference_test.py | py | 805 | python | en | code | 0 | github-code | 90 |
33412833993 | import numpy as np
import matplotlib.pyplot as plt
import math
height = 480
width = 640
r = max(height,width)
x1 = 50
y1 = -150
x2 = 150
y2 = 150
dx = x2-x1
dy = y2-y1
dr = math.sqrt(dx**2+dy**2)
D = x1*y2 - x2*y1
xr1 = (D*dy+np.sign(dy)*dx*math.sqrt(r**2*dr**2-D**2))/dr**2
yr1 = (-D*dx+abs(dy)*math.sqrt(r**2*dr**2-D**2))/dr**2
xr2 = (D*dy-np.sign(dy)*dx*math.sqrt(r**2*dr**2-D**2))/dr**2
yr2 = (-D*dx-abs(dy)*math.sqrt(r**2*dr**2-D**2))/dr**2
print(xr1, yr1)
p1 = plt.Circle((xr1,yr1),20, fc='None',ec="Red")
p2 = plt.Circle((xr2,yr2),20, fc='None',ec="Red")
plt.gca().add_patch(p1)
plt.gca().add_patch(p2)
#plot circle
# ts = np.linspace(0, 2*np.pi)
# xs = [r*np.cos(t) for t in ts]
# ys = [r*np.sin(t) for t in ts]
# plt.plot(xs, ys, "r")
circle = plt.Circle((0,0),r, fc='None',ec="black")
plt.gca().add_patch(circle)
# plot rectangle
rectangle = plt.Rectangle((-width/2,-height/2), width, height, fc='None',ec="black")
plt.gca().add_patch(rectangle)
# plot lines
line = plt.Line2D((-150, 350), (-750, 750), lw=1, color="green")
plt.gca().add_line(line)
line_segment = plt.Line2D((50, 150), (-150, 150), lw=3, color="purple")
plt.gca().add_line(line_segment)
line_x = plt.Line2D((0, 0), (-750, 750), lw=1, color="grey")
plt.gca().add_line(line_x)
line_y = plt.Line2D((-750, 750), (0, 0), lw=1, color="grey")
plt.gca().add_line(line_y)
line_xr1 = plt.Line2D((xr1, xr1), (-750, 750) ,lw=1, color="grey")
plt.gca().add_line(line_xr1)
line_xr2 = plt.Line2D((xr2, xr2), (-750, 750) ,lw=1, color="grey")
plt.gca().add_line(line_xr2)
line_yr1 = plt.Line2D((-750, 750), (yr1, yr1), lw=1, color="grey")
plt.gca().add_line(line_yr1)
line_yr2 = plt.Line2D((-750, 750), (yr2, yr2), lw=1, color="grey")
plt.gca().add_line(line_yr2)
plt.box(True)
plt.axis(True)
plt.axis("equal")
#plt.grid()
plt.show() | silvasta/centerLine | src/tools/annotation/firstCircleAnnotation.py | firstCircleAnnotation.py | py | 1,827 | python | en | code | 0 | github-code | 90 |
42222433505 | # Desafio 037 - Escreva um programa que
# leia um número inteiro qualquer e peça
# para o usuário escolher qual será a base
# de conversão:
#
# 1 para binário;
# 2 para octal;
# 3 para hexadecimal.
n = int(input('Digite um numero inteiro qualquer\n:'))
escolha = int(input('Me informe a conversão que deseja fazer\n'
'(1) para\033[1;35m Binário\033[m\n'
'(2) para\033[1;34m Octal\033[m\n'
'(3) para\033[1;33m Hexadecimal\033[m\n:'))
if escolha == 1:
print(bin(n)[2::1])
elif escolha == 2:
print(oct(n)[2::1])
elif escolha == 3:
print(hex(n)[2::1])
else:
print('---------------------------------------------------------------')
print(f'\033[1;31m ERRO, dados informados não coincidem {escolha} não é um formato aceito! ') | jhownny/CursoEmVideoPython | CursoEmVideo-Python/PythonExercicios/Atividade de 31 a 40/ex037.py | ex037.py | py | 813 | python | pt | code | 1 | github-code | 90 |
32518048604 | from timeit import timeit
code1 = """
def calculate_xfactor(age):
if age <= 0:
raise ValueError("Age cannot be 0 or less.")
try:
calculate_xfactor(-1)
except ValueError as error:
pass
"""
print(timeit(code1, number=10000))
try:
age = int(input("Age: "))
xfactor = 10 / age
except (ValueError, ZeroDivisionError) as ex:
print("You didn't enter a valid age.")
print(ex)
print(type(ex))
else:
print("Your age is", age)
# Raise / throw exceptions
| bednarczyk/python-practice | Practice/exceptions.py | exceptions.py | py | 496 | python | en | code | 0 | github-code | 90 |
9189273862 | #!/usr/bin/env python
import os
import numpy as np
from ase import Atoms
from ase.io import read, write
from aimdprobe.init_data import init_data, get_raw_traj
from aimdprobe.structure_probe.probe_surface_waters import get_adsorbed_h2o
from aimdprobe.useful_functions import get_cumulative_avg
if __name__ == "__main__":
"""
plot surface adsorbed solvents (H2O)
"""
fp = os.getcwd()
fn = 'vasprun.xml'
# fn = 'OUTCAR'
# get raw data
raw_data = init_data(fp, fn)
raw_traj = get_raw_traj(raw_data)
# parameters
nbins = 100
ads_list = [] # no adsorbate in the system
slab_list = np.arange(64) # metal slab has 64 Au atoms
dist = 3 # Angstrom, a general bond length for water adsorption on transition metal surfaces
N_w_ads = []
for traj in raw_traj:
n_w_ads = get_adsorbed_h2o(raw_data, traj, ads_list, slab_list, dist)
N_w_ads.append(n_w_ads)
N_w_ads_avg = get_cumulative_avg(N_w_ads)
time = np.arange(len(N_w_ads_avg))*0.001 # ps
fig, ax = plt.subplots(figsize=(8,6))
ax.plot(time, N_w_ads, lw = 1, color = 'grey', alpha = 0.5)
ax.plot(time, N_w_ads_avg, lw = 3, color = 'black')
ax.annotate('Average adsorbed solvents: '+str(round(N_w_ads_avg[-1],2)), (1, 2.5), fontsize = 15)
ax.set_xlabel('Time (ps)', fontsize = 20)
ax.set_ylabel('N / per adsorbate', fontsize = 20)
ax.tick_params(labelsize=20)
fig.tight_layout()
fig.savefig('n_w_ads.png', dpi = 150)
| tjunewson/AIMDprobe | scripts/plot_surface_waters.py | plot_surface_waters.py | py | 1,492 | python | en | code | 4 | github-code | 90 |
39159257881 | #-*- coding: UTF-8 -*-
import common
from templates import default
from templates import combination
logger = common.getLogger(__name__)
def isSupport(soup):
"""indicate if the resume is from zhilian
@return true : false"""
return soup.head is not None and soup.head.title is not None and soup.head.title.string.find(u'智联') != -1
def getTemplate(soup):
template = default.Template(soup)
if template.isSupport():
logger.info('template is zhilian default')
return template
template = combination.Template(soup)
if template.isSupport():
logger.info('template is zhilian combination')
return template
logger.warning(
'there isn\'t any zhilian templates could parse the resume correctly')
return None
| yangyraaron/resumeanalysis | zhilian/zlFilter.py | zlFilter.py | py | 791 | python | en | code | 2 | github-code | 90 |
19092460360 | #coding:utf-8
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import time
def getMoriHpNewsList(base_url):
print('## webscrape')
# driver = makeDriver(base_url)
print('## driver make ok')
print('## driver get ok')
# driver.get("http://www.yomiuri.co.jp/")
time.sleep(0.3)
html = makeDriver(base_url).page_source.encode("utf-8")
# html = 'テスト投稿です'
print('## GetTweetList')
return shapingMoriHpNewsList(base_url, html)
def shapingMoriHpNewsList(base_url, html):
moriHpNewsList = []
# 以下にスクレイピングのコードを記載
print('shapingMoriHpNewsList')
soup = BeautifulSoup(html, "html.parser")
#get headlines
mainNewsIndex = soup.find("ul", attrs={"class", "shinchaku"})
mainNewsContentList = mainNewsIndex.find_all("li")
for mainNewsContent in mainNewsContentList:
title = mainNewsContent.find("a").text
url = urljoin(base_url, mainNewsContent.find("a").attrs["href"])
date = mainNewsContent.find("span", attrs={"class", "date"}).text
moriHpNews = {'title': title, 'date': date, 'url': url}
# tweet_data = title + '\n' + date + '\n' + link
print(moriHpNews)
moriHpNewsList.append(moriHpNews)
return moriHpNewsList
def getMoriHpScreenshot(base_url):
print('getMoriHpScreenshot')
time.sleep(0.3)
driver = makeDriver(base_url)
driver.get_screenshot_as_file('/tmp/screenshot.png')
def makeDriver(base_url):
options = webdriver.ChromeOptions()
options.binary_location = '/opt/headless-chrome/headless-chromium'
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--single-process")
driver = webdriver.Chrome('/opt/headless-chrome/chromedriver',
options=options)
driver.get(base_url)
return driver
# if __name__ == "__main__":
# print(webscrape())
| meganeJPN/python-web-scrape-twitter-bot | lambda/webscrape.py | webscrape.py | py | 2,041 | python | en | code | 1 | github-code | 90 |
5261575188 |
class MyLinkedList:
class Node:
def __init__(self,val = 0,next = None):
self.data = val
self.next = next
def __init__(self):
self.head = None
def isEmpty(self):
return self.head == None
def size(self):
tmp = self.head
cnt = 0
while tmp!= None:
cnt += 1
tmp = tmp.next
return cnt
def addFirst(self,ele):
n = self.Node(ele,self.head)
self.head = n
def display(self):
tmp = self.head
while tmp!= None:
print(tmp.data,end= " ")
tmp = tmp.next
print()
def addLast(self,ele):
if self.isEmpty():
self.addFirst(ele)
return
n = self.Node(ele)
tmp = self.head
while tmp.next!=None:
tmp = tmp.next
tmp.next = n
def addAt(self,ele,idx):
if idx<0 or idx >self.size():
return
elif idx == 0:
self.addFirst(ele)
return
else:
tmp = self.__gAt(idx-1)
n = self.Node(ele,tmp.next)
tmp.next = n
def getFirst(self):
if self.isEmpty():
return None
return self.head.data
def __gAt(self,idx):
i = 0
temp = self.head
while temp != None and i < idx:
temp = temp.next
i += 1
return temp
def getAt(self,idx):
if idx < 0 or idx >= self.size():
return None
else:
return self.__gAt(idx).data
def getLast(self):
return self.__gAt(self.size()-1).data
def removeFirst(self):
if self.isEmpty():
return None
tmp = self.head
self.head = self.head.next
return tmp.data
def removeLast(self):
if self.isEmpty():
return None
elif self.head.next == None:
return self.removeFirst()
else:
tmp = self.__gAt(self.size()-2)
dt = tmp.next.data
tmp.next = None
return dt
def removeAt(self,idx):
if idx < 0 or idx >= self.size():
return None
if idx == 0:
return self.removeFirst()
tmp = self.__gAt(idx-1)
dt = tmp.next.data
tmp.next = tmp.next.next
return dt
def reverse(self):
cur = self.head
prev = None
while cur != None:
ahead = cur.next
cur.next = prev
prev = cur
cur = ahead
self.head = prev
def reverseR(self):
self.__rR(self.head,None)
def __rR(self,cur,prev):
if cur == None:
self.head = prev
else:
self.__rR(cur.next,cur)
cur.next = prev
def reverseR2(self):
tmp = self.head
self.__rR2(self.head)
tmp.next = None
def __rR2(self,prev):
if prev.next == None:
self.head = prev
else:
self.__rR2(prev.next)
prev.next.next = prev
def kreverse(self,k=3):
self.head = self.__kr(self.head,k)
def __kr(self,cur,k):
if cur == None:
return None
else:
temp = cur
s = k
while temp != None and s >= 1:
temp = temp.next
s -= 1
prev = self.__kr(temp,k)
while cur!=temp:
ahead = cur.next
cur.next = prev
prev = cur
cur = ahead
return prev
def midNode(self):
return self.__mid().data
def __mid(self):
slow = self.head
fast = self.head
while fast.next != None and fast.next.next != None:
slow = slow.next
fast = fast.next.next
return slow
def merge(self,l2):
l3 = MyLinkedList()
i = self.head
j = l2.head
while i!= None and j!= None:
if i.data > j.data:
l3.addLast(j.data)
j = j.next
else:
l3.addLast(i.data)
i = i.next
while i != None:
l3.addLast(i.data)
i = i.next
while j!= None:
l3.addLast(j.data)
j = j.next
return l3
def mergeSort(self):
l3 = self.__mergerSortHelper()
self.head = l3.head
def __mergerSortHelper(self):
if self.head.next == None:
# l3 = MyLinkedList()
# l3.head = self.head
return self
else:
midNode = self.__mid()
# l1 = MyLinkedList()
l2 = MyLinkedList()
# l1.head = self.head
l2.head = midNode.next
midNode.next = None
left = self.__mergerSortHelper()
right = l2.__mergerSortHelper()
return left.merge(right)
def DummyListForIntersection(self):
n1= self.Node(1)
n2= self.Node(2)
n3= self.Node(3)
n4= self.Node(4)
n5= self.Node(5)
n6= self.Node(6)
n7= self.Node(7)
n8= self.Node(8)
n9= self.Node(9)
n10= self.Node(10)
n11= self.Node(11)
n12= self.Node(12)
n13= self.Node(13)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
n5.next = n6
n6.next = n7
n7.next = n8
n8.next = n9
n9.next = n10
n10.next = None
n11.next = n12
n12.next = n13
n13.next = n7
self.Intersection(n1,n11)
def Intersection(self,head1,head2):
t1 = head1
t2 = head2
while t1 != t2:
if t1 == None:
t1 = head2
if t2 == None:
t2 = head1
t1 = t1.next
t2 = t2.next
print("Intersection",t1.data)
def dummyListForCycle(self):
n1= self.Node(1)
n2= self.Node(2)
n3= self.Node(3)
n4= self.Node(4)
n5= self.Node(5)
n6= self.Node(6)
n7= self.Node(7)
n8= self.Node(8)
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
n5.next = n6
n6.next = n7
n7.next = n8
n8.next = n3
self.head = n1
def cycleDetectionRemoval(self):
slow = self.head
fast = self.head
while fast != None and fast.next != None:
slow = slow.next
fast = fast.next.next
if slow == fast:
# print("Cycle")
t1 = self.head
t2 = slow
while t1.next != t2.next:
t1 = t1.next
t2 = t2.next
t2.next = None
print("Cycle Removed")
break
else:
print("No Cycle")
def fold(self):
l2 = MyLinkedList()
mid = self.__mid()
l2.head = mid.next
mid.next = None
l2.reverse()
t1= self.head
t2 = l2.head
cur = self.Node(0)
h = cur
while t1 != None or t2 != None:
if t1 != None:
cur.next = t1
cur = cur.next
t1 = t1.next
if t2 != None:
cur.next = t2
cur = cur.next
t2 = t2.next
self.head = h.next
ll = MyLinkedList()
for i in range(1,8):
ll.addLast(i)
ll.display()
ll.fold()
ll.display()
# ll.dummyListForCycle()
# ll.display()
# ll.cycleDetectionRemoval()
# ll.display()
# ll2 = MyLinkedList()
# for i in range(1,6):
# ll.addLast(i+1)
# ll.addFirst(2*i+3)
# # l3 = ll.merge(ll2)
# ll.display()
# ll.mergeSort()
# ll.display()
# ll.display()
# ll.kreverse(5)
# ll.display()
# ll.addLast(5)
# ll.addLast(4)
# ll.addLast(3)
# ll.addLast(2)
# ll.addLast(1)
# ll.addAt(5,2)
# print(ll.getLast())
# print(ll.getFirst())
# print(ll.getAt(2))
# ll.display()
# ll.reverseR2()
# ll.display()
# print(ll.size())
| ShubhamSinghal12/PythonDSAClassroomApril2022 | Lec30/MyLinkedList.py | MyLinkedList.py | py | 8,298 | python | en | code | 1 | github-code | 90 |
86592232594 | import logging.config
from typhoon.core.logger import setup_logging
config = """
version: 1
root:
level: INFO
handlers: [console]
handlers:
console:
class: logging.StreamHandler
level: INFO
stream: ext://sys.stderr
"""
def test_logging_config(monkeypatch, tmp_path, capsys):
(tmp_path / 'logger_config.yml').write_text(config)
monkeypatch.setenv('TYPHOON_HOME', str(tmp_path))
setup_logging()
logging.info('Hello World!')
captured = capsys.readouterr()
assert captured.err == 'Hello World!\n'
| typhoon-data-org/typhoon-orchestrator | tests/unit/logging_test.py | logging_test.py | py | 539 | python | en | code | 29 | github-code | 90 |
5721191277 | import sys
text = sys.stdin.read()
test_data = text.splitlines()
for t in test_data[1:]:
a, b ,c ,d = map(int , t.split(','))
for i in range(0, a):
if b * i + c * (a-i) == d:
print(f'{i},{a-i}') | lalalalaluk/python-lesson-ans | ntub/10312.py | 10312.py | py | 211 | python | en | code | 0 | github-code | 90 |
41036823928 | """The tasks API."""
from __future__ import annotations
from typing import Dict, List, Optional, Union
import validators # type: ignore # does not have types
from pydantic import BaseModel, root_validator, validator
from tenacity import RetryError
from . import FailedRequestError, _send_request
SANDBOX_URL = "https://api-sandbox.noonlight.com/tasks/v1/verifications"
PRODUCTION_URL = "https://api.noonlight.com/tasks/v1/verifications"
class PointOfInterest(BaseModel):
"""A point of interest in an image.
The coordinate system for the points_of_interest field has the origin starting at the top left of the image.
The positive x-axis moves right, and the positive y-axis moves down.
Coordinates and distances must be non-negative integers.
Args:
x (int): The x coordinate, in pixels, for the top left corner of the bounding box. Must be a non-negative integer.
dx (int): The distance from the x field, in pixels, for the bounding box. Must be a non-negative integer.
y (int): The y coordinate, in pixels, for the top left corner of the bounding box. Must be a non-negative integer.
dy (int): The distance from the y field, in pixels, for the bounding box. Must be a non-negative integer.
"""
x: int
dx: int
y: int
dy: int
@root_validator()
def verify_values(cls, values: dict[str, int]) -> dict[str, int]:
if any(elem < 0 for elem in values.values()):
raise ValueError("all dictionary values must be non-negative")
return values
class Image(BaseModel):
"""An image that is provided to the verifier
Args:
url (str): The URL of the image
media_type (str): The media type of the image, must be one of image/jpeg, image/png, or image/jpg
points_of_interest (list[PointOfInterest]): A list of `PointOfInterest` objects
"""
url: str
media_type: str
points_of_interest: List[PointOfInterest]
@validator("url")
def url_valid(cls, v: str) -> str:
result = validators.url(v)
if isinstance(result, validators.ValidationFailure):
raise ValueError("must be a valid URL")
return v
@validator("media_type")
def media_type_valid(cls, v: str) -> str:
if v in {"image/jpeg", "image/png", "image/jpg"}:
return v
else:
raise ValueError("must be one of 'image/jpeg', 'image/png', 'image/jpg'")
class Video(BaseModel):
"""A video that is provided to the verifier
Args:
url (str): The URL of the video
media_type (str): The media type of the video. For MP4 videos, the alllowed type is video/mp4. For HLS, use application/x-mpegURL.
"""
url: str
media_type: str
@validator("url")
def url_valid(cls, v: str) -> str:
result = validators.url(v)
if isinstance(result, validators.ValidationFailure):
raise ValueError("must be a valid URL")
return v
@validator("media_type")
def media_type_valid(cls, v: str) -> str:
if v in {"video/mp4", "application/x-mpegURL"}:
return v
else:
raise ValueError("must be one of 'video/mp4', 'application/x-mpegURL'")
class VerificationData(BaseModel):
"""Data for the verifier
Args:
id (str, optional): The ID of the task. If not provided, it will be auto-generated.
owner_id (str, optional): The end-user's account ID.
location_id (str, optional): The location ID of the camera or device.
device_id (str, optional): The device ID of the camera or device.
prompt (str): The text displayed to the verifier. They will select `yes` or `no` in response to this prompt.
expiration (int): The amount of time, in seconds, allotted to complete the verification task.
attachments (Union[list[Image], Video]): The attachment shown to the verifier.
webhook_url (str, optional): The webhook that will be invoked when the verification is complete. If none is provided, it will use the preconfigured webhook.
"""
id: Optional[str] = None
owner_id: Optional[str] = None
location_id: Optional[str] = None
device_id: Optional[str] = None
prompt: str
expiration: int
attachments: Union[List[Image], Video]
webhook_url: Optional[str] = None
class TaskResponse(BaseModel):
id: str
prompt: str
expiration: Dict[str, int]
attachments: Union[List[Dict[str, Union[str, Dict[str, float]]]], Dict[str, str]]
webhook_url: str
async def create_task(
data: VerificationData,
server_token: str,
sandbox: bool = True,
) -> str:
"""Create a verification request to verify a piece of media with a prompt
Args:
data (VerificationData): See VerificationData
server_token (str): Your server token that matches the sandbox or prod environment
sandbox (bool, optional): Set to False if this is a real task. Defaults to True.
prod_url (str, optional): URL for your production environment. Required if sandbox is set to True. Defaults to None.
Raises:
FailedRequestError: Raised when the request to create the task fails.
InvalidURLError: Raised when the production URL is invalid
Returns:
str: The task ID for the given task
"""
url = SANDBOX_URL if sandbox else PRODUCTION_URL
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": f"Bearer {server_token}",
}
payload = data.dict()
payload["expiration"] = {"timeout": data.expiration}
try:
response = await _send_request(
"POST", url=url, headers=headers, payload=payload, expected_code=201
)
except RetryError as e:
raise FailedRequestError from e
response_data = TaskResponse(**await response.json())
return response_data.id
| IceBotYT/pynoonlight | src/pynoonlight/tasks.py | tasks.py | py | 5,916 | python | en | code | 1 | github-code | 90 |
9776578870 | import cv2
import numpy as np
img = cv2.imread('Image/Elon_Musk_1.jpg')
print(img.shape)
imgResize = cv2.resize(img, (480, 640))
imgCropped = img[0:200,200:500]
cv2.imshow('img', img)
cv2.imshow('imgResize', imgResize)
cv2.imshow('imgCropped', imgCropped)
cv2.waitKey(0)
cv2.destroyAllWindows() | shudeath/DATN | DATN/Face_recognition/Learned/L_Resize.py | L_Resize.py | py | 299 | python | en | code | 0 | github-code | 90 |
75166961256 | from xgboost import XGBClassifier, XGBRegressor
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import r2_score, accuracy_score
x, y = load_boston(return_X_y=True) # 데이터 바로 가져오기
x_train, x_test , y_train, y_test = train_test_split(x,y, train_size = 0.8, shuffle = True, random_state= 66)
model = XGBRegressor()
model.fit(x_train, y_train)
score = model.score(x_test, y_test)
print("R2 : ",score) # R2 : 0.9221188601856797
# print("weight : ",model.coef_) # AttributeError: Coefficients are not defined for Booster type None
# print("bias : " ,model.intercept_)
thresholds = np.sort(model.feature_importances_)
# [0.00134153 0.00363372 0.01203115 0.01220458 0.01447935 0.01479119
# 0.0175432 0.03041655 0.04246345 0.0518254 0.06949984 0.30128643
# 0.42848358] => 낮은 차순으로 정렬된다.
print(thresholds)
for thresh in thresholds :
selection = SelectFromModel(model, threshold=thresh, prefit = True)
select_x_train = selection.transform(x_train) # x_train을 select 모드에 맞게 바꿔주겠다.
print(select_x_train.shape)
selection_model = XGBRegressor(n_jobs=8)
selection_model.fit(select_x_train,y_train)
select_x_test = selection.transform(x_test)
y_predict = selection_model.predict(select_x_test)
score = r2_score(y_test, y_predict)
print("Thresh=%.3f, n=%d, R2 : %.2f%%" %(thresh, select_x_train.shape[1], score*100))
'''
(404, 13)
Thresh=0.001, n=13, R2 : 92.21%
(404, 12)
Thresh=0.004, n=12, R2 : 92.16%
(404, 11)
Thresh=0.012, n=11, R2 : 92.03%
(404, 10)
Thresh=0.012, n=10, R2 : 92.19%
(404, 9)
Thresh=0.014, n=9, R2 : 93.08%
(404, 8)
Thresh=0.015, n=8, R2 : 92.37%
(404, 7)
Thresh=0.018, n=7, R2 : 91.48%
(404, 6)
Thresh=0.030, n=6, R2 : 92.71%
(404, 5)
Thresh=0.042, n=5, R2 : 91.74%
(404, 4)
Thresh=0.052, n=4, R2 : 92.11%
(404, 3)
Thresh=0.069, n=3, R2 : 92.52%
(404, 2)
Thresh=0.301, n=2, R2 : 69.41%
(404, 1)
Thresh=0.428, n=1, R2 : 44.98%
feature importance가 가능한 머신러닝 계열에서 활용 가능.
'''
| lynhyul/AIA | ml/m43_SelectFromModel.py | m43_SelectFromModel.py | py | 2,168 | python | en | code | 3 | github-code | 90 |
30139970243 | # -*- coding: utf-8 -*-
class ChannelService(object):
def __init__(self):
pass
def issueChannelToken(self, channelId="1341209950"):
#sqrd = self.DummyProtocol("issueChannelToken", 3, {
# 1: (11, channelId)
#}).read()
sqrd = [128, 1, 0, 1] + self.getStringBytes('issueChannelToken') + [0, 0, 0, 0]
sqrd += [11, 0, 1] + self.getStringBytes(channelId)
sqrd += [0]
return self.postPackDataAndGetUnpackRespData(self.LINE_CHANNEL_ENDPOINT ,sqrd)['issueChannelToken']
def approveChannelAndIssueChannelToken(self, channelId="1341209950"):
sqrd = [128, 1, 0, 1] + self.getStringBytes('approveChannelAndIssueChannelToken') + [0, 0, 0, 0]
sqrd += [11, 0, 1] + self.getStringBytes(channelId)
sqrd += [0]
return self.postPackDataAndGetUnpackRespData(self.LINE_CHANNEL_ENDPOINT ,sqrd)['approveChannelAndIssueChannelToken']
def getChannelInfo(self, channelId):
sqrd = [128, 1, 0, 1, 0, 0, 0, 14, 103, 101, 116, 67, 104, 97, 110, 110, 101, 108, 73, 110, 102, 111, 0, 0, 0, 0, 11, 0, 2, 0, 0, 0, len(channelId)]
for value in str(channelId):
sqrd.append(ord(value))
sqrd += [0]
return self.postPackDataAndGetUnpackRespData(self.LINE_CHANNEL_ENDPOINT ,sqrd)['getChannelInfo']
def getCommonDomains(self, lastSynced=0):
sqrd = [128, 1, 0, 1, 0, 0, 0, 16, 103, 101, 116, 67, 111, 109, 109, 111, 110, 68, 111, 109, 97, 105, 110, 115, 0, 0, 0, 0, 0]
return self.postPackDataAndGetUnpackRespData(self.LINE_CHANNEL_ENDPOINT ,sqrd)['getCommonDomains']
def issueRequestTokenWithAuthScheme(self, channelId, otpId, authScheme, returnUrl):
sqrd = [128, 1, 0, 1, 0, 0, 0, 31, 105, 115, 115, 117, 101, 82, 101, 113, 117, 101, 115, 116, 84, 111, 107, 101, 110, 87, 105, 116, 104, 65, 117, 116, 104, 83, 99, 104, 101, 109, 101, 0, 0, 0, 0]
sqrd += [11, 0, 1, 0, 0, 0, len(channelId)]
for value in channelId:
sqrd.append(ord(value))
sqrd += [11, 0, 2, 0, 0, 0, len(otpId)]
for value in otpId:
sqrd.append(ord(value))
sqrd += [15, 0, 3, 11, 0, 0, 0, len(authScheme)]
for mid in authScheme:
sqrd += [0, 0, 0, len(mid)]
for value in mid:
sqrd.append(ord(value))
sqrd += [11, 0, 4, 0, 0, 0, len(returnUrl)]
for value in returnUrl:
sqrd.append(ord(value))
sqrd += [0]
return self.postPackDataAndGetUnpackRespData(self.LINE_CHANNEL_ENDPOINT ,sqrd)['issueRequestTokenWithAuthScheme']
| alipbudiman/CHRLINE | CHRLINE/services/ChannelService.py | ChannelService.py | py | 2,643 | python | en | code | null | github-code | 90 |
10203500621 | # -*- coding: utf-8 -*-
import tornado.web
from constant.error import const
from constant.tag import const
from utility import auth, util
from utility.msg_pb2 import *
from model import user_data, global_data
class LibaoHandler(tornado.web.RequestHandler):
@auth.authenticated
def post(self, cmsg):
if not util.check_input(cmsg.code, 16, True, True):
return auth.pack(None, const.ERROR_INPUT)
try:
libao_pici = cmsg.code[:2]
libao_type = cmsg.code[2:3]
libao_reward = cmsg.code[3:5]
except:
return auth.pack(None, const.ERROR_LIBAO)
if libao_type == '0':
if user_data.has_libao(cmsg.common.userid, cmsg.code):
return auth.pack(None, const.ERROR_LIBAO)
key = const.TAG_GAME_LIBAO + libao_pici
if not global_data.has_libao(key, cmsg.code):
return auth.pack(None, const.ERROR_LIBAO)
else:
if user_data.has_libao(cmsg.common.userid, libao_pici):
return auth.pack(None, const.ERROR_LIBAO)
key = const.TAG_GAME_LIBAO + libao_pici
if not global_data.has_libao(key, cmsg.code):
return auth.pack(None, const.ERROR_LIBAO)
reward = global_data.get_libao_reward(const.TAG_GAME_LIBAO_REWARD, libao_reward)
life = reward.get("life", 0) if reward else 0
if life > 0:
user_data.inc_user_attr(cmsg.common.userid, "life", life)
if libao_type == '0':
user_data.add_libao(cmsg.common.userid, cmsg.code)
else:
user_data.add_libao(cmsg.common.userid, libao_pici)
key = const.TAG_GAME_LIBAO + libao_pici
global_data.remove_libao(key, cmsg.code)
smsg = smsg_libao()
smsg.life = life
return auth.pack(smsg, 0, cmsg.common.userid)
| RickyTong1024/mario | trunk/soft/server/server/server/handler/huodong/libao_handler.py | libao_handler.py | py | 1,884 | python | en | code | 0 | github-code | 90 |
18068135336 | from flask import Flask
import pandas as pd
from skFunctions import cleaner, sankeyData, nodeNames, sankeyDiagram
from skFunctions import smallMultiples
import dash
import dash_core_components as dcc
import dash_html_components as html
import base64
server = Flask(__name__)
# Dash app
app = dash.Dash(name='DockerTestApp',
server=server,
csrf_protect=False)
colors = {
'background': '#111111',
'text': '#7FDBFF'
}
image_filename = 'ONPCLogo.png'
encoded_image = base64.b64encode(open(image_filename, 'rb').read())
b1Image = 'Ballot1.png'
b2Image = 'Ballot2.png'
b3Image = 'Ballot3.png'
encoded_imageB1 = base64.b64encode(open(b1Image, 'rb').read())
encoded_imageB2 = base64.b64encode(open(b2Image, 'rb').read())
encoded_imageB3 = base64.b64encode(open(b3Image, 'rb').read())
app.layout = html.Div([
html.Div([
html.Div([
html.Img(src='data:image/png;base64,{}'
.format(encoded_image.decode()))
], className='two columns'),
html.Div([
dcc.Markdown('''
## Ontario Provincial PC Leadership Race
After the dramatic resignation of the Ontario Progressive Conservative (PC)
party leader, Patrick Brown, the party ran a leadership contest.
There were four candidates on the final ballot. Ms Elliot, Ms Mulroney, Mr Ford
and Ms Alan.
The resulting leadership race saw a close finish between the top two
contenders as Mr Ford won more points (calculated by counting the number of
people in a riding up
to 100 and then assigning 100 to the result after that).
Ms Elliott ended with more of the popular votes but fewer points, leading to a
confusing and almost contested recount/challenge. [Pundits](http://www.cbc.ca/news/politics/grenier-pc-leadership-results-1.4571699)
have theorized that Ms Alan's voters delivered the victory to Mr Ford mostly because
Mr Ford mimicked her stance on sexual education in schools, taking a more
conservative position.
The Data was collected from [Wikipedia](https://en.wikipedia.org/wiki/ProgressiveConservative_Party_of_Ontario_leadership_election,_2018).
This data is interesting because Mr Ford ran on a populist agenda, similar to
Mr Trump's campaign. In fact, in Canada, Mr Trump was often compared to Mr
Ford's late brother and former mayor of Toronto, Rob Ford. This analysis gives insight into the alliance between religious and populist
candidates as well as highlighting the importance of differing election styles, such as
the points system used in the PC race.
'''),], className='ten columns'),
], className='row'),
html.Div([
html.H3('Chose Total Votes or Total Points'),
dcc.RadioItems(
id='VorR',
options=[
{'label': 'Votes Results', 'value': 'Votes'},
{'label': 'Points Results', 'value': 'Points'}
],
value='Points',),
]),
html.Div([
html.Div([
html.H3('Sankey Diagram of Voting Preferences'),
dcc.Graph(id='sankeyGraph')
], className="six columns"),
html.Div([
html.H3('By Round Results'),
dcc.Graph(id='smGraph')
], className="six columns"),
], className="row"),
html.Div([
html.Div([
html.Img(src='data:image/png;base64,{}'.format(encoded_imageB1.decode()))
], className='four columns'),
html.Div([
html.Img(src='data:image/png;base64,{}'.format(encoded_imageB2.decode()))
], className='four columns'),
html.Div([
html.Img(src='data:image/png;base64,{}'.format(encoded_imageB3.decode()))
], className='four columns'),
], className='row')
])
app.css.append_css({
'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'
})
@app.callback(
dash.dependencies.Output('sankeyGraph', 'figure'),
[dash.dependencies.Input('VorR', 'value')])
def update_graph(VorR):
df = pd.read_csv('results.csv')
SKL = nodeNames()
df = cleaner(df, 'Raw', 'GL', VorR)
df = sankeyData(df)
fig = sankeyDiagram(df,
'{} Won by Cadidate in the 2018 PC Leadership Race'
.format(VorR),
SKL)
return fig
@app.callback(
dash.dependencies.Output('smGraph', 'figure'),
[dash.dependencies.Input('VorR', 'value')])
def update_sm(VorR):
df = pd.read_csv('results.csv')
df = cleaner(df, 'Raw', 'GL', VorR)
smg=smallMultiples(df, '{} Won by Round'.format(VorR))
return smg
if __name__ == '__main__':
app.run_server(debug=True)
| kailukowiak/DATA608 | Assignment6/dash_app/app.py | app.py | py | 4,918 | python | en | code | 0 | github-code | 90 |
2878576814 | # -*- encoding: utf-8 -*-
'''
@File : 69. x 的平方根.py
@Time : 2020/04/22 09:48:50
@Author : windmzx
@Version : 1.0
@Desc : For leetcode template
'''
# here put the import lib
from typing import List
import math
class Solution:
def mySqrt(self, x: int) -> int:
if x==0 or x==1:
return x
left=1
right=x//2
while left<right:
mid=math.ceil((left+right)/2)
if mid*mid<=x:
left=mid
else:
right=mid-1
return left
if __name__ == "__main__":
x=Solution()
print(x.mySqrt(14)) | windmzx/pyleetcode | 69. x 的平方根.py | 69. x 的平方根.py | py | 625 | python | en | code | 0 | github-code | 90 |
26594773601 | # -*- coding: utf-8 -*-
from django.http import HttpResponse, Http404
from django.shortcuts import render, get_object_or_404, redirect
from mailinglist.models import MailingList
from mailinglist.forms import SubscribeForm
def index(request):
return HttpResponse('Ahoj Svet.\
Práve ste v mailinglistovom indexe.')
def mlist(request, list_id):
try:
ml = MailingList.objects.get(pk=list_id)
except MailingList.DoesNotExist:
raise Http404('Mailinglist neexistuje!!!')
return render(request, 'mailinglist/mlist.html', {'ml': ml})
def subscribe(request, list_id):
ml = get_object_or_404(MailingList, pk=list_id)
if request.method == "POST":
form = SubscribeForm(request.POST)
if form.is_valid():
subscriber = form.save()
ml.subscriber.add(subscriber)
return redirect('mlist', list_id=list_id)
else:
form = SubscribeForm()
return render(request, 'mailinglist/subscribe.html',
{'ml': ml, 'form': form})
| ricco386/zaciname-s-djangom | konferencia/mailinglist/views.py | views.py | py | 1,058 | python | en | code | 5 | github-code | 90 |
70070842857 | def main():
# Import the library
import microdots as mdots
from microdots.mini_sequences import MNS, A1, A2
codec4x4 = mdots.AnotoCodec(
mns=MNS,
mns_order=4,
sns=[A1, A2],
pfactors=(3, 5),
delta_range=(1, 15),
)
print(len(MNS), len(A1), len(A2))
main()
| cheind/py-microdots | examples/hello_mini.py | hello_mini.py | py | 323 | python | en | code | 4 | github-code | 90 |
17882718862 | import json
import boto3
from json2html import *
def lambda_handler(event, context):
region = ''
if(event["queryStringParameters"] is not None and 'region' in event["queryStringParameters"] and event["queryStringParameters"]["region"] is not None):
region = event["queryStringParameters"]["region"]
else:
region = 'us-east-1'
#Client Configs
lambda_client = boto3.client("lambda",region_name=region)
ec2_client = boto3.client('ec2',region_name=region)
vpc_func_details = []
list_func = lambda_client.list_functions(FunctionVersion='ALL')
eni_share_report = []
#Paginated Query
while True:
#For each function in the list fetch all versions and check the VPC + Subnet + SGs combination
for i in list_func["Functions"]:
print(list_func)
func_details = lambda_client.get_function(FunctionName=i["FunctionArn"])["Configuration"]
func_arn = i["FunctionArn"]
func_name = i["FunctionName"]
func_version = i["Version"]
if func_details.get("VpcConfig") is not None and len(func_details["VpcConfig"]["VpcId"]) != 0:
eni_filter_node=[{'Name':'group-id', 'Values':[]}, {'Name':'subnet-id', 'Values':[]},{'Name':'vpc-id', 'Values':[]}]
vpc_id = func_details["VpcConfig"]["VpcId"]
vpc_config = func_details.get("VpcConfig")
subnet_level_vpcConfig = {}
for subnet in vpc_config["SubnetIds"]:
print('for each subnet')
#Creating my filter for each subnet and I am calling my ENIs based on the filter
eni_filter_node[0]['Values'].extend(vpc_config["SecurityGroupIds"])
eni_filter_node[1]['Values'].append(subnet)
eni_filter_node[2]['Values'].append(vpc_id)
enis= ec2_client.describe_network_interfaces(Filters=eni_filter_node)
if(enis['NetworkInterfaces'] is not None and len(enis['NetworkInterfaces']) != 0):
share_node ={}
share_node['Function Name'] = func_name
share_node['Version'] = func_version
share_node['VPC'] = vpc_id
share_node['Subnet'] = subnet
share_node['ENI'] = enis['NetworkInterfaces'][0]['NetworkInterfaceId']
share_node['Status'] = enis['NetworkInterfaces'][0]['Status'] #
share_node['Attachment Status'] = enis['NetworkInterfaces'][0]['Attachment']['Status'] #
eni_share_report.append(share_node)
if(list_func.get("NextMarker") is not None):
list_func = lambda_client.list_functions(FunctionVersion='ALL', Marker = list_func.get("NextMarker"))
else:
break
return {"statusCode": 200, "body": give_me_my_html(eni_share_report),"headers": {'Content-Type': 'text/html'}}
def give_me_my_html(eni_share_report_json):
html_string = ""
with open('/var/task/report.html', 'r') as file:
html_string = file.read()
table_body = json2html.convert(json = eni_share_report_json,table_attributes='id=\"myTable\",class=\"cell-border\"')
html_string = html_string.replace("MY_TABLE_MARKER",table_body)
return html_string | debongithub/ENIReporter | lambda_function.py | lambda_function.py | py | 3,417 | python | en | code | 0 | github-code | 90 |
21943507966 | import conf
from boltiot import Bolt
import json, time
mybolt = Bolt(conf.API_KEY, conf.DEVICE_ID)
def convert(sensor_value):
led_intensity= 255-(sensor_value*255/1024)
return led_intensity
while True:
print ("Reading Sensor Value")
response_ldr = mybolt.analogRead('A0')
data = json.loads(response)
print("Sensor value is: " + str(data['value']))
try:
sensor_value = int(data['value'])
print("Calculating required Light Intensity for LED")
led_value_float=convert(sensor_value)
led_value= int(led_value_float)
print(led_value)
mybolt.analogWrite('1', led_value)
except Exception as e:
print ("Error occured: Below are the details")
print (e)
time.sleep(5)
| Rajeswari525/Automatic-Light-Controller | light_automation.py | light_automation.py | py | 848 | python | en | code | 1 | github-code | 90 |
18443578639 | import numpy as np
n = int(input())
a = list(map(int,input().split()))
ans = a[0]
for i in range(1,len(a)):
ans = np.gcd(a[i],ans)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03127/s059556444.py | s059556444.py | py | 149 | python | en | code | 0 | github-code | 90 |
2392797718 | from collections import deque
def math_operations(*args, **kwargs):
mapper = {
'a': lambda x, y: x + y,
's': lambda x, y: x - y,
'd': lambda x, y: x / y,
'm': lambda x, y: x * y
}
args = deque(args)
while args:
for key, value in kwargs.items():
if not args:
break
try:
kwargs[key] = mapper[key](value, args.popleft())
except ZeroDivisionError:
continue
return kwargs
print(math_operations(2, 12, 0, -3, 6, -20, -11, a=1, s=7, d=33, m=15))
print(math_operations(-1, 0, 1, 0, 6, -2, 80, a=0, s=0, d=0, m=0))
print(math_operations(6, a=0, s=0, d=0, m=0))
| grigor-stoyanov/PythonAdvanced | Exam_prep/math_operations.py | math_operations.py | py | 700 | python | en | code | 0 | github-code | 90 |
18003758259 | n = int(input())
a = list(map(int,input().split()))
S1 = 0
S2 = 0
#S1が奇数番目が正の場合、S2が偶数番目が負の場合
cnt1 = 0
cnt2 = 0
for i,num in enumerate(a):
S1 += num
if i % 2 == 0 and S1 <= 0:
cnt1 += 1 - S1
S1 = 1
if i % 2 != 0 and S1 >= 0:
cnt1 += 1 + S1
S1 = -1
S2 += num
if i % 2 == 0 and S2 >= 0:
cnt2 += 1 + S2
S2 = -1
if i % 2 != 0 and S2 <= 0:
cnt2 += 1 - S2
S2 = 1
print(cnt1 if cnt1 <= cnt2 else cnt2)
| Aasthaengg/IBMdataset | Python_codes/p03739/s169626438.py | s169626438.py | py | 528 | python | en | code | 0 | github-code | 90 |
24552953227 | from helper import *
@TestInstance
def test_atan2():
atanarray = EUDArray(360)
for angle in EUDLoopRange(360):
x, y = f_lengthdir(1000, angle)
atanarray[angle] = f_atan2(y, x)
# Value of atan2 may vary by 1 due to rounding error.
# Here we check similarity.
test_assert(
"atan2 test",
[atanarray[angle] - angle + 1 <= 2 for angle in range(360)]
)
test_operator("Square root", f_sqrt, lambda x: int(x ** 0.5))
| phu54321/eudplib | tests/unittests/testmath.py | testmath.py | py | 470 | python | en | code | 13 | github-code | 90 |
36910425795 | import unittest
from pychoco.model import Model
class TestBoolsIntChanneling(unittest.TestCase):
def testBoolsIntChanneling1(self):
m = Model()
bools = m.boolvars(10)
intvar = m.intvar(0, 9)
m.bools_int_channeling(bools, intvar).post()
while m.get_solver().solve():
for b in range(0, 10):
if b == intvar.get_value():
self.assertTrue(bools[b].get_value())
else:
self.assertFalse(bools[b].get_value())
self.assertTrue(m.get_solver().get_solution_count(), 10)
def testBoolsIntChannelingFail(self):
m = Model()
bools = m.boolvars(10)
intvar = m.intvar(10, 11)
m.or_(bools).post()
m.bools_int_channeling(bools, intvar).post()
self.assertFalse(m.get_solver().solve())
| chocoteam/pychoco | tests/int_constraints/test_bools_int_channeling.py | test_bools_int_channeling.py | py | 859 | python | en | code | 9 | github-code | 90 |
42425353574 | """
https://codingbat.com/prob/p104029
"""
def stringClean(s):
if len(s) < 2:
return s
s = (s[0], "")[s[0] == s[1]] + s[1:]
return (s[0], "")[s[0] == s[1]]+stringClean(s[1:])
print(stringClean("hello"))
| vijay2930/HackerrankAndLeetcode | com/CodingBat/recursion-1/stringClean.py | stringClean.py | py | 227 | python | en | code | 0 | github-code | 90 |
25871290029 | ## Spherical subdivision
import math
import bpy,bmesh
import random
## spherical coordinates
anglesub = 100
Radius = .2
Iterations = 1000
Height = .0001
invthetainc = (anglesub/2*math.pi)
def distance(coord):
x,y,z = coord
return (x*x+y*y+z*z)**.5
def dotproduct(c1,c2):
x1,y1,z1 = c1
x2,y2,z2 = c2
return x1*x2+y1*y2+z1*z2
def norm(coord):
d = distance(coord)
x,y,z = coord
return (x/d,y/d,z/d)
def addvec(v1,v2):
x1,y1,z1 = v1
x2,y2,z2 = v2
return (x1+x2,y1+y2,z1+z2)
def scalemult(sc,v1):
x1,y1,z1 = v1
return (sc*x1,sc*y1,sc*z1)
def spheretocoord(r,theta,phi):
return (r*math.cos(theta)*math.sin(phi), r*math.sin(theta)*math.sin(phi),
r*math.cos(phi))
def randomNormal():
theta = random.uniform(0.0,2*math.pi)
phi = random.uniform(0.0,2*math.pi)
return spheretocoord(1.0,theta,phi)
def buildSphere(Radius, anglesub):
## build equitorial subdivision
pi2 = 2*math.pi
pi = math.pi
sphereproj = {}
thetainc = 2*math.pi/anglesub
vertices = []
theta = 0.0
phi = math.pi/2.0
for i in range(anglesub+1):
v = spheretocoord(Radius,theta,phi)
vertices.append(v)
sphereproj[v] = (theta/pi2,phi/pi)
theta += thetainc
## now build positive hemisphere
evertices = vertices[0:len(vertices)]
theta = 0.0
phi -= thetainc
vertgroups = []
##vertgroups += [vertices]
for i in range(int(anglesub/4+1)):
theta = 0.0
verts = [] ## we build a ordered list as we increment phi of vertices
if i != int(anglesub/4 ):
for j in range(anglesub+1):
v = spheretocoord(Radius,theta,phi)
verts.append(v)
sphereproj[v] = (theta/pi2,phi/pi)
theta += thetainc
phi -= thetainc
vertgroups.append(verts)
else:
v = spheretocoord(Radius,theta,0.0)
verts.append(v)
sphereproj[v] = (theta,0.0)
## build the faces and append the vertices
faces = []
vcoordtovindex = {}
for i,vert in enumerate(vertices):
vcoordtovindex[vert] = i
for verts in vertgroups:
for vert in verts:
vertices.append(vert)
vcoordtovindex[vert] = len(vertices)-1
newvertgroups = [evertices]
newvertgroups += vertgroups
vertgroups = newvertgroups
##vertgroups += [vertices]
ifacerow = 0 ## initial face row tracking
for vgindex,verts in enumerate(vertgroups):
if vgindex == len(vertgroups) -1:
break
nvertices = vertgroups[vgindex+1]
for vindex, vertex in enumerate(verts):
face = []
if vindex == len(verts)-1:
nvindex = 0
else:
nvindex1 = vindex+1
if vgindex+1 == len(vertgroups)-1:
nv1 = verts[nvindex1]
nv2 = nvertices[0]
nv3 = nvertices[0]
else:
nv1 = verts[nvindex1]
nv2 = nvertices[nvindex1]
nv3 = nvertices[vindex]
vi = vcoordtovindex[vertex]
nv1i = vcoordtovindex[nv1]
nv2i = vcoordtovindex[nv2]
nv3i = vcoordtovindex[nv3]
face = (vi,nv1i,nv2i,nv3i)
if vgindex == 0:
ifacerow += 1
faces.append(face)
##print(face)
fvinc = len(vertices)
## build negative hemisphere
theta = 0.0
phi = math.pi/2.0
phi += thetainc
vertgroups = []
vertgroups += [evertices]
for i in range(int(anglesub/4+1)):
theta = 0.0
verts = [] ## we build a ordered list as we increment phi of vertices
if i != int(anglesub/4):
for j in range(anglesub+1):
v = spheretocoord(Radius,theta,phi)
verts.append(v)
vertices.append(v)
sphereproj[v] = (theta/pi2,phi/pi)
vcoordtovindex[v] = len(vertices)-1
theta += thetainc
phi += thetainc
## print(phi)
## vertices += verts[0:len(verts)]
vertgroups += [verts]
else:
##verts.append(spheretocoord(Radius,theta,phi))
v = spheretocoord(Radius,theta,pi)
vertices += [v]
sphereproj[v] = (theta/pi2,pi/pi)
vcoordtovindex[v] = len(vertices)-1
vertgroups += [[v]]
## newfaces = []
## for fi, face in enumerate(faces):
## newface = []
## if fi <= ifacerow:
## v1,v2,v3,v4 = face
## nv1 = v1
## nv2 = v2
## nv3 = v3 + fvinc
## nv4 = v4 + fvinc
## else:
## nv1 = v1 + fvinc
## nv2 = v2 + fvinc
## nv3 = v3 + fvinc
## nv4 = v4 + fvinc
## newface = (nv1,nv2,nv3,nv4)
## newfaces.append(newface)
## faces += newfaces
for vgindex,verts in enumerate(vertgroups):
if vgindex == len(vertgroups) -1:
break
nvertices = vertgroups[vgindex+1]
for vindex, vertex in enumerate(verts):
face = []
if vindex == len(verts)-1:
nvindex = 0
else:
nvindex1 = vindex+1
if vgindex+1 == len(vertgroups)-1:
nv1 = verts[nvindex1]
nv2 = nvertices[0]
nv3 = nvertices[0]
else:
nv1 = verts[nvindex1]
nv2 = nvertices[nvindex1]
nv3 = nvertices[vindex]
vi = vcoordtovindex[vertex]
nv1i = vcoordtovindex[nv1]
nv2i = vcoordtovindex[nv2]
nv3i = vcoordtovindex[nv3]
##face = (vi,nv1i,nv2i,nv3i)
face = (vi,nv3i,nv2i,nv1i)
if vgindex == 0:
ifacerow += 1
faces.append(face)
return vertices, faces, sphereproj, vcoordtovindex
vertices, faces, sphereproj, vcoordtovindex = buildSphere(Radius, anglesub)
heightmap = {} ## vertex index keyed, heightmap valued
i = 0
minheight = float('inf') ## any height greater than minheight will initialize
maxheight = -1*float('inf') ## any height less than maxheight will initialize
## these are positive and negative infinity respectively
while i < Iterations:
rN = randomNormal()
height = Height*random.random()
for vi, vert in enumerate(vertices):
vN = norm(vert)
if dotproduct(rN,vN) > 0:
aheight = height
vheight = scalemult(height,vN)
newvec = addvec(vert,vheight)
else:
aheight = -1.0*height
vheight = scalemult(-1*height,vN)
newvec = addvec(vert,vheight)
if vi in heightmap:
heightmap[vi] += aheight
else:
heightmap[vi] = aheight
if heightmap[vi] > maxheight:
maxheight = heightmap[vi]
if heightmap[vi] < minheight:
minheight = heightmap[vi]
del vcoordtovindex[vert]
scoord = sphereproj[vert]
sphereproj[newvec] = scoord
del sphereproj[vert]
vcoordtovindex[newvec] = vi
vertices[vi] = newvec
i += 1
## setreverse index mapping between coordinate to indices
vcoordtovindexrev = {}
for vcoord in vcoordtovindex:
vi = vcoordtovindex[vcoord]
vcoordtovindexrev[vi] = vcoord
meshName = "Polygon"
obName = "PolygonObj"
me = bpy.data.meshes.new(meshName)
ob = bpy.data.objects.new(obName, me)
ob.location = bpy.context.scene.cursor_location
bpy.context.scene.objects.link(ob)
me.from_pydata(vertices,[],faces)
me.update(calc_edges=True)
## Select the new object in scene and set uvs
scn = bpy.context.scene
scn.objects.active = ob
ob.select = True
bpy.ops.object.mode_set(mode = 'EDIT')
bm = bmesh.from_edit_mesh(ob.data)
uv_layer = bm.loops.layers.uv.verify()
bm.faces.layers.tex.verify()
for f in bm.faces:
for l in f.loops:
luv = l[uv_layer]
vind = l.vert.index
vcoord = vcoordtovindexrev[vind]
uvcoord = sphereproj[vcoord]
luv.uv = tuple(uvcoord)
bmesh.update_edit_mesh(me)
bpy.ops.object.mode_set(mode='OBJECT')
| christophermoverton/PyAIRPG | sphericalheightmap.py | sphericalheightmap.py | py | 8,257 | python | en | code | 0 | github-code | 90 |
18105558619 | def bubbleSort(A, N):
flag = 1
cnt = 0
while flag:
flag = 0
for j in range(N - 1, 0, -1):
if A[j] < A[j - 1]:
cnt += 1
A[j], A[j - 1] = A[j - 1], A[j]
flag = 1
print(*A)
print(cnt)
N = int(input())
A = list(map(int, input().split()))
bubbleSort(A, N)
| Aasthaengg/IBMdataset | Python_codes/p02259/s227152819.py | s227152819.py | py | 347 | python | en | code | 0 | github-code | 90 |
40418766834 | from abc import ABCMeta, abstractmethod
import numpy as np
import scipy
import scipy.optimize as opt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel
from .acquisition_functions import expected_improvement
def constraints_all_satisfied(constraints_values) -> bool:
for c in constraints_values:
if c < 0:
# ie == -1
return False
# end loop: everything is == 1
return True
class BOModule(metaclass=ABCMeta):
def __init__(self, black_box_objective_function, bounds: scipy.optimize.Bounds, black_box_constraints=None):
self._x_min = None
self._f_min = None
self._X = None
self._objective = black_box_objective_function
self._objective_dataset = None
self._n_constraints = 0
if black_box_constraints is not None:
self._n_constraints = len(black_box_constraints)
self._constraints = black_box_constraints
self._constraints_dataset = None
def get_result(self, return_objective=False):
if return_objective:
return self._x_min, self._f_min
else:
return self._x_min
@abstractmethod
def acquisition_maximization(self):
# The user must implement this part
pass
def minimize(self, X_init, objective_init_dataset, constraints_init_dataset=None, n_iter=100, verbose=True):
self._X = X_init.copy()
self._objective_dataset = objective_init_dataset.copy()
if self._n_constraints > 0 and constraints_init_dataset is not None:
self._constraints_dataset = constraints_init_dataset.copy()
# update x_min and f_min
self.__find_minimum_from_dataset()
# BO iteration
for it in range(n_iter):
x_new = self.acquisition_maximization()
f_new = self._objective(x_new)
self._X = np.concatenate((self._X, [x_new]))
self._objective_dataset = np.concatenate((self._objective_dataset, [f_new]))
if self._n_constraints > 0:
# if there are constraints
constraints_new = np.array([constraint(x_new) for constraint in self._constraints])
self._constraints_dataset = np.concatenate((self._constraints_dataset, [constraints_new]))
if verbose:
print('iteration %d: new objective evaluation=%.4f' % (it+1, f_new))
self.__find_minimum_from_dataset()
def __find_minimum_from_dataset(self):
if self._objective_dataset is None:
return None
self._x_min = None
self._f_min = None
n = len(self._objective_dataset)
# if there are constraints
if self._n_constraints > 0:
for k in range(n):
if constraints_all_satisfied(self._constraints_dataset[k,:]):
if self._x_min == None or (self._objective_dataset[k] < self._f_min):
self._x_min = self._X[k,:].copy()
self._f_min = self._objective_dataset[k]
else:
# if there is no constraint
self._x_min = self._X[0,:].copy()
self._f_min = self._objective_dataset[0]
for k in range(1,n):
if self._objective_dataset[k] < self._f_min:
self._x_min = self._X[k,:].copy()
self._f_min = self._objective_dataset[k]
class EIAlgorithm(BOModule):
def __init__(self, black_box_objective_function, bounds: scipy.optimize.Bounds):
super().__init__(black_box_objective_function, bounds, None)
def acquisition_maximization(self):
ker = ConstantKernel()*RBF() + WhiteKernel()
gp = GaussianProcessRegressor(kernel=ker).fit(self._X, self._objective_dataset)
def function_to_minimize(x):
X_to_pred = np.array([x])
mean, std = gp.predict(X_to_pred, return_std=True)
return -expected_improvement(self._f_min, mean, std)
res = opt.minimize(fun=function_to_minimize, x0=self._x_min)
return res.x
def minimize(self, X_init, objective_init_dataset, n_iter=100, verbose=True):
return super().minimize(X_init, objective_init_dataset, None, n_iter, verbose) | pjpollot/gp_sandbox | gp_sandbox/bayesian_optimization/models.py | models.py | py | 4,367 | python | en | code | 1 | github-code | 90 |
9085945580 | # -*- coding: utf-8 -*-
"""
Created on Sat Jul 27 12:32:15 2019
@author: Guy Mcbride (Keysight)
@author: B.Ann (TU-Delft)
"""
import sys
import numpy as np
import logging
import matplotlib.pyplot as plt
log = logging.getLogger(__name__)
sys.path.append(r'C:\Program Files (x86)\Keysight\SD1\Libraries\Python')
import keysightSD1 as key
# globals to this module (essentially this is a singleton class)
__dig = key.SD_AIN()
# _channel = 1
_pointsPerCycle = 0
timeStamps = []
_SAMPLE_RATE = 500E+06
def timebase(start, stop, sample_rate):
start_sample = int(start * sample_rate)
stop_sample = int (stop * sample_rate)
timebase = np.arange(start_sample, stop_sample)
timebase = timebase / sample_rate
return(timebase)
def open(chassis, slot, channel_I, channel_Q, captureTime):
log.info("Configuring Digitizer...")
global timeStamps, _pointsPerCycle, _channel_I, _channel_Q
_channel_I = channel_I
_channel_Q = channel_Q
timeStamps = timebase(0, captureTime, _SAMPLE_RATE)
_pointsPerCycle = len(timeStamps)
error = __dig.openWithSlotCompatibility('', chassis, slot, key.SD_Compatibility.KEYSIGHT)
if error < 0:
log.info("Error Opening digitizer in slot #{}".format(slot))
error = __dig.DAQflush(_channel_I)
if error < 0:
log.info("Error Flushing")
error = __dig.DAQflush(_channel_Q)
if error < 0:
log.info("Error Flushing")
error = __dig.channelInputConfig(_channel_I, 2.0, key.AIN_Impedance.AIN_IMPEDANCE_50,
key.AIN_Coupling.AIN_COUPLING_DC)
if error < 0:
log.info("Error Configuring channel")
error = __dig.channelInputConfig(_channel_Q, 2.0, key.AIN_Impedance.AIN_IMPEDANCE_50,
key.AIN_Coupling.AIN_COUPLING_DC)
if error < 0:
log.info("Error Configuring channel")
return (__dig)
def digitize(trigger_delay, number_of_pulses = 1):
trigger_delay = trigger_delay * _SAMPLE_RATE # expressed in samples
trigger_delay = int(np.round(trigger_delay))
error = __dig.DAQconfig(_channel_I, _pointsPerCycle, number_of_pulses, trigger_delay, key.SD_TriggerModes.SWHVITRIG)
if error < 0:
log.info("Error Configuring Acquisition")
error = __dig.DAQstart(_channel_I)
if error < 0:
log.info("Error Starting Digitizer")
error = __dig.DAQconfig(_channel_Q, _pointsPerCycle, number_of_pulses, trigger_delay, key.SD_TriggerModes.SWHVITRIG)
if error < 0:
log.info("Error Configuring Acquisition")
error = __dig.DAQstart(_channel_Q)
if error < 0:
log.info("Error Starting Digitizer")
def get_data_I():
_channel=_channel_I
TIMEOUT = 10000
LSB = 1/ 2**14
dataRead = __dig.DAQread(_channel, _pointsPerCycle, TIMEOUT)
if len(dataRead) != _pointsPerCycle:
log.warn("Attempted to Read {} samples, actually read {} samples".format(_pointsPerCycle, len(dataRead)))
return(dataRead * LSB)
def get_data_Q():
_channel=_channel_Q
TIMEOUT = 10000
LSB = 1/ 2**14
dataRead = __dig.DAQread(_channel, _pointsPerCycle, TIMEOUT)
if len(dataRead) != _pointsPerCycle:
log.warn("Attempted to Read {} samples, actually read {} samples".format(_pointsPerCycle, len(dataRead)))
return(dataRead * LSB)
def flush():
error = __dig.DAQflush(_channel_I)
if error < 0:
log.info("Error Flushing")
error = __dig.DAQflush(_channel_Q)
if error < 0:
log.info("Error Flushing")
def close():
__dig.close() | bann-01/Hardware-control | digitizer.py | digitizer.py | py | 3,523 | python | en | code | 0 | github-code | 90 |
1421000402 | # dumps() convert python object to json string
# dump() method is used for writing into json file
import json
dict = {
"id":1,
"name":"Tom",
"class":"10th"
}
json_object = json.dumps(dict,indent =4)
print(json_object) | AswathiMohan23/Python_Basics | json/python_json.py | python_json.py | py | 231 | python | en | code | 0 | github-code | 90 |
5260552991 | from environment.simulation import Simulation
class MazeSim(Simulation):
"""
A class to simulate an environment based on an Maze. Inherits the run()
function from the Simulation class.
"""
def __init__(self, model):
super().__init__(
model.state_names, model.action_names)
self.initial_state = model.initial
self.num_states = model.num_states*2 # as we have 2 situations each cell
# (withkey and withoukey)
self.shape = model.shape
self.num_actions = model.num_actions
self.maze = model.maze
self.action = model.DIRECTIONS
self.finish = False
self.walls = model.walls
self.door = model.door
self.key = model.key
self.states = model.states
self.neighbours = model.neighbours
self.is_finding_key = True
self.state = self.reset()
def reset(self):
# Initialises state from initial distribution and then outputs state
self.state = self.initial_state
self.reward = 0
self.finish = False
self.is_finding_key = True
return self.state
def next(self,action):
current = self.states.index(self.state)
explorer = self.states[self.neighbours[current, action]]
if self.fall_outside(self.state,action):
self.state = self.initial_state
self.reward -= 0.1
elif explorer == self.state:
self.reward -= 0.1
self.state = explorer
else:
if self.is_finding_key:
if explorer == self.key:
self.reward += 1.
self.is_finding_key = False
else:
self.reward -= 0.1
if not self.is_finding_key:
if explorer == self.door:
self.reward += 1.
self.finish = True
else:
self.reward -= 0.1
self.state = explorer
return self.state, self.reward #first return is next state
def is_terminal(self):
return self.finish
def fall_outside(self,loc,action):
row,col = self.shape
i,j = loc
if action == 0:
i-=1
elif action == 1:
j+=1
elif action == 2:
i+=1
elif action == 3:
j-=1
if i<0 or j<0 or i>row or j>col or (i,j) in self.walls:
return True
return False
class MazeSim_Features(MazeSim):
def __init__(self, model, feature=None):
"""
parameters
----------
1.model is the maze_grid which stores the basic information about the maze
2.valid_action: indicate valid actions for each state
"""
# initilise via the superclass
if feature == "onehot":
self.rep_function = model.get_feature_mapping_onehot()
elif feature == "xy":
self.rep_function = model.get_feature_mapping_xy()
elif feature == "tiling":
self.rep_function = model.get_feature_mapping_tiling()
elif feature == "onehot_tiling":
self.rep_function = model.get_feature_onehot_tiling()
else:
raise TypeError("unknown type of feature mapping")
super().__init__(model)
self.initial = self.reset()
def reset(self):
# initialise the state
_ = super().reset()
# but instead of returning the state, we return the representation
return self.rep_function(self.state, self.is_finding_key)
def next(self, action):
# use the superclass next function to evolve the system
next_state, reward = super().next(action)
# states are now vectors of features
features = self.rep_function(next_state, self.is_finding_key)
return features, reward | yyimingucl/Temporal-Gradient-Correction-in-RL | environment/maze_sim.py | maze_sim.py | py | 4,328 | python | en | code | 2 | github-code | 90 |
1790261928 | import base64
import os
import subprocess
import xlrd
import string
import json
from openpyxl import load_workbook,Workbook
import openpyxl as op
workingPath = ""
# 批量生成用来修改的html文件对应的txt文件,之后用下一个函数将.html后缀改为.txt。
#用来生成之前的记录在表格中的内容
def generateHtmlold():
f1 = open("/home/liu/桌面/gumtree_tmp/special_html/1.txt", 'r', encoding='utf-8')
content = f1.read()
for i in range(50, 101):
with open("/home/liu/桌面/gumtree_tmp/special_html/" + str(i) + ".txt", "w", encoding='utf-8') as f:
f.write(content)
stra = "<html>\n<body>\n<a href=\"https://github.com/"
strb = "\" style=\"margin-left:50px;\">所在commit地址</a>\n<code>\n<pre style=\"font-size: 20px;font-family:'Times New Roman', Times, serif;color:brown;\">\n"
strc = "\n</pre>\n</code>\n<p>message:</p>\n<p>李蓝天:改动分类:原因:</p>\n<p>刘志浩:改动分类:原因:</p>\n</body>\n</html>"
data = xlrd.open_workbook("/home/liu/桌面/gumtree_tmp/newanalysis.xlsx") # 读取存放分析结果的xlsx文件
fspecial = open("/home/liu/PycharmProjects/pythonProject2/special1.txt", "r", encoding='utf-8') # 打开存放commit连接的文件
s1 = fspecial.readlines() # 按行读取存放commit连接的文件,每一行有一个连接
table = data.sheets()[6] # 读取第六张表的数据
rows = table.nrows # 获取行数
cols = table.col_values(0) # 读取第一列的内容
###从第一行读到最后一行,读取第二列的数据
for i in range(0, rows):
cols[i] = int(cols[i]) # 数组中的内容原本是浮点数类型,转换为整型
fpatch = open("/home/liu/PycharmProjects/GenerateAST/special/" + str(cols[i]) + ".txt", 'r',
encoding='utf-8') # 读取对应的patch文件
pacth_content = fpatch.read() # 获得patch中的内容
# 删除最后多余的sha,最后形式是 用户名/仓库名/commit/sha
link1, link2, link3 = s1[cols[i]].rpartition('/')
s1[cols[i]] = link1
# 将各部分连接起来
strall = stra + s1[cols[i]] + strb + pacth_content + strc
fsave = open("/home/liu/桌面/gumtree_tmp/special_html_test/" + str(i) + ".txt", "w", encoding="utf-8") # 存文件
fsave.write(strall)
###### 批量修改文件名称(将.txt变为.html)
def txttohtml():
path = "/home/liu/桌面/gumtree_tmp/special_html_test"
os.chdir(path)
files = os.listdir(path)
for filename in files:
portion = os.path.splitext(filename) # 分离文件名与扩展名
# 如果后缀是.txt
if portion[1] == '.txt':
# 重新组合文件名和后缀名
newname = portion[0] + '.html' # 修改为.html
# newname = "ss.html"
os.rename(filename, newname)
# 批量生成用来修改的html文件对应的txt文件,之后用下一个函数将.html后缀改为.txt。
def generateHtml(pathstr,fLinkstr):
path = pathstr
os.chdir(path)
files = os.listdir(path)
fLink = open(fLinkstr,"r",encoding="utf-8")
s1 = fLink.readlines()
stra = "<html>\n<body>\n<a href=\""
strb = "\" style=\"margin-left:50px;\">所在commit地址</a>\n<code>\n<pre style=\"font-size: 20px;font-family:'Times New Roman', Times, serif;color:brown;\">\n"
strc = "\n</pre>\n</code>\n<p>message:</p>\n<p>李蓝天:改动分类:原因:</p>\n<p>梁叶剑:改动分类:原因:</p>\n<p>刘志浩:改动分类:原因:</p>\n</body>\n</html>"
os.mkdir("webpage")
for filename in files:
if (filename == "webpage") or (filename == "saveLink.txt"):
continue
strall = ""
portion = os.path.splitext(filename)#分离文件名与扩展名
ftmp = open(filename,"r",encoding='utf-8')
tmpnum = int(portion[0])
strall = stra + s1[tmpnum] + strb + ftmp.read() + strc
savefile = open(pathstr + "/webpage/" + str(tmpnum) + ".txt",'w',encoding='utf-8')
savefile.write(strall)
# 批量生成网址
def generateLink():
for i in range(0, 222):
str1 = "https://highbe.github.io/SolidityWorm/HtmlPlace/" + str(i) + ".html\n"
print(str1)
#将当前文件夹下的文件对应的网址找到并存到一个文件中
def collectLink(pathstr,fLinkstr):
fLink = open(fLinkstr,'r',encoding='utf-8')#存放所有地址的文件
s1 = fLink.readlines()
path = pathstr#分类后的文件夹
os.chdir(path)
files = os.listdir(path)
files.sort(key=lambda x:int(x.split('.')[0]))#读取文件后按照文件名称排序,要求文件名要格式一致
content = ""#存放所有需要的网址
for filename in files:
portion = os.path.splitext(filename)#分离文件名与扩展名,portion[0]是文件名,portion[1]是扩展名
tmpnum = int(portion[0])
content += portion[0] + " " + s1[tmpnum] + "\n"
fsave = open("saveLink.txt","w",encoding="utf-8")
fsave.write(content)
#统计单个文件中的行数
def countlines(path,count,emitcount):
catalog = open(path,"r",encoding="utf-8",errors="ignore")
lines = catalog.readlines()
i = 0
while i < len(lines):
# line = lines[i].strip()
lines[i] = lines[i].strip()
# 如果是空行直接跳过
if lines[i] == "":
i += 1
continue
#遇到emit则emit计数器与总计数器都加一,同时直接略过下面的判断语句
if lines[i].startswith("emit "):
emitcount[0] += 1
count[0] += 1
i += 1
continue
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//"):
i += 1
continue
if lines[i].startswith("/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines):#防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
if lines[i].find(";") != -1 or lines[i].find("{") != -1:
count[0] += 1
i += 1
else :
i += 1
# 每个项目需要单独调用该函数 找到每个项目中的所有solidity文件
def traversal(path,count,emitcount):
# 首先遍历当前目录所有文件及文件夹
file_list = os.listdir(path)
# 循环判断每个元素是否是文件夹还是文件,是文件夹的话,递归
for file in file_list:
# 利用os.path.join()方法取得路径全名,并存入cur_path变量,否则每次只能遍历一层目录
cur_path = os.path.join(path, file)
# 判断是否是文件夹
if os.path.isdir(cur_path):
if os.path.islink(file) or file == 'to_outside':#判断这个文件夹是不是一个软连接,软链接可能导致死循环,只有一个项目中有软链接,名称为to_outside所以直接进行判断了
continue
traversal(cur_path,count,emitcount)
# 判断是否是solidity文件
elif cur_path.find(".sol",len(cur_path)-4,len(cur_path)) != -1:
# print("1" + cur_path)
countlines(cur_path,count,emitcount)
#既不是文件夹也不是solidity文件
else:
continue
#统计每个项目的star数量
def countstar(path):
file_list = os.listdir(path)
excel_path = "/home/liu/桌面/gumtree_tmp/统计数据/emitchange汇总.xlsx"
wb = load_workbook(excel_path)
ws = wb.active
for file in file_list:
cur_path = os.path.join(path,file)
content = open(cur_path,"r")
strings = content.read()
jsonStr = json.loads(strings)
jsonStr['stargazers_count'] #收藏数
namebefore = jsonStr['full_name']#用户名 + “/” + 仓库名 需要将两个名之间的“/”改为空格才能再表格中找到对因的文件
nameafter = namebefore.replace('/',' ')
print(file + " " + nameafter)
for i in range(2,2950):
if nameafter == ws.cell(i,1).value:# excel表格的第一行第一列是0,0
ws.cell(i,6).value = jsonStr['stargazers_count']
print(i)
continue
wb.save(excel_path)
#统计含有emit改变的commit占所有commit的比例
def changepercent(path):
all_dir = os.listdir(path)
wb = op.Workbook()
ws = wb['Sheet']
ws.append(['项目名','总改动次数','emit改动次数','emit改动次数/总改动次数'])
for dir in all_dir:
count = 0#统计一共有多少次改动
changecount = 0#统计一共有多少次emit改动
tmp_path = os.path.join(path,dir)
if os.path.isdir(tmp_path):#进入项目
cur_path = os.path.join(path,dir)
file_list = os.listdir(cur_path)
for file in file_list:
count += 1#每有一个文件就有一次改动
print(1)
if(isEmitChange(tmp_path+"/"+file)):#有emit的改动
changecount += 1
d = dir,count,changecount,((changecount/count) if (count!=0) else -1)
ws.append(d)
#生成的表格的存放位置
wb.save("/home/liu/PycharmProjects/SolidityWorm/emitchange.xlsx")
#判断是否包含emit的改变
def isEmitChange(path):
content = open(path, "r")
strings = content.read()
jsonStr = json.loads(strings)
#如果文件中没有包含一下内容说明emit不会有修改
if ('files' not in jsonStr):
return False
for i in range(len(jsonStr['files'])):
if 'patch' not in jsonStr['files'][i]:
continue
tmpStr = jsonStr['files'][i]['patch'].replace(' ', '')
lines = tmpStr.split('\n')#将文件修改的patch信息用列表lines来存储
#开始遍历每一行
i = 0
while i < len(lines):
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//") or lines[i].startswith("+//") or lines[i].startswith("-//"):
i += 1
continue
if lines[i].startswith("/*") or lines[i].startswith("+/*") or lines[i].startswith("-/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
#如果修改的地方有emit,则返回True
if lines[i].startswith("+emit") or lines[i].startswith("-emit"):
return True
#什么都没有发生 到下一行
i += 1
# 遍历完所有内容都没有找到修改emit
return False
#判断是否是相同emit的改变
def samechangeamount(path):
content = open(path, "r")
strings = content.read()
jsonStr = json.loads(strings)
# 如果文件中没有包含一下内容说明emit不会有修改
if ('files' not in jsonStr):
return False
for i in range(len(jsonStr['files'])):
if 'patch' not in jsonStr['files'][i]:
continue
tmpStr = jsonStr['files'][i]['patch'].replace(' ', '')
lines = tmpStr.split('\n') # 将文件修改的patch信息用列表lines来存储
hashtable = dict()
# 开始遍历每一行
i = 0
while i < len(lines):
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//") or lines[i].startswith("+//") or lines[i].startswith("-//"):
i += 1
continue
if lines[i].startswith("/*") or lines[i].startswith("+/*") or lines[i].startswith("-/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
# 如果修改的地方有emit,将修改内容存储下来,之后再遇到emit变化与之前存储内容进行比对
if lines[i].startswith("+emit") or lines[i].startswith("-emit"):
emitStr = lines[i].split(";")[0]#找到完整的emit语句,防止在句子末尾有注释
if emitStr in hashtable: #找到相同的emit操作
return True
else: #没有找到相同的emit操作,将现在的操作记录
hashtable[emitStr] = 1
# 什么都没有发生 到下一行
i += 1
# 遍历完所有内容都没有找到修改emit
return False
#统计相同emit改变占所有emit改变的比例
def samechangepercent(path):
all_dir = os.listdir(path)
wb = op.Workbook()
ws = wb['Sheet']
ws.append(['项目名', 'emit改动次数','相同emit改动次数', '相同emit改动次数/emit改动次数'])
for dir in all_dir:
samecount = 0 # 统计一共有多少次改动
changecount = 0 # 统计一共有多少次emit改动
tmp_path = os.path.join(path, dir)
if os.path.isdir(tmp_path): # 进入项目
cur_path = os.path.join(path, dir)
file_list = os.listdir(cur_path)
for file in file_list:
print(1)
if (isEmitChange(tmp_path + "/" + file)): # 有emit的改动
changecount += 1
if samechangeamount(tmp_path + "/" + file):
samecount += 1
d = dir,changecount,samecount, ((samecount / changecount) if (changecount != 0) else -1)
ws.append(d)
# 生成的表格的存放位置
wb.save("/media/liu/02F8200EF81FFE93/Liu/data/sameEmitChange4.xlsx")
#利用修改后的文件和patch文件还原出修改前的文件
def rebackSrc(patchStr, dstStr):
lines = patchStr.split("\n")
dst = dstStr.split("\n")
taps = []
patchPos = []
for i in range(0, len(lines)):
head = lines[i].find("@@")
if head == 0:
rear = lines[i][head + 2:].find("@@")
tempstr = lines[i][head + 3:rear].split(" ")[1].split(",")[0][1:]
taps.append(tempstr)
patchPos.append(i)
# 进行对patch文件定位
srcPos = 0
tapsPos = 0
newStream = []
lines[len(lines) - 1] += "\n"
while srcPos < len(dst):
# 表示下面接patch内容
if tapsPos >= len(taps):
newStream.append(dst[srcPos] + "\n")
srcPos += 1
continue
if srcPos == int(taps[tapsPos]) - 1:
patchStart = patchPos[tapsPos] + 1
patchEnd = len(lines)
if tapsPos < len(taps) - 1:
patchEnd = patchPos[tapsPos + 1]
for i in range(patchStart, patchEnd):
if lines[i] == '':
newStream.append(lines[i] + "\n")
srcPos += 1
else:
if lines[i][0] == '+':
srcPos += 1
continue
elif lines[i][0] == '-':
newStream.append(lines[i][1:] + "\n")
else:
newStream.append(lines[i] + "\n")
srcPos += 1
tapsPos += 1
else:
newStream.append(dst[srcPos] + "\n")
srcPos += 1
if "No newline at end of file" in newStream[-1]:
newStream.remove(newStream[-1])
return "".join(newStream)
#统计单个文件中的emit行数
def countEmitLine(str):
lines = str.split("\n")
i = 0
emitcount = 0
while i < len(lines):
lines[i] = lines[i].strip()
# 遇到emit则emit计数器与总计数器都加一,同时直接略过下面的判断语句
if lines[i].startswith("emit "):
emitcount += 1
i += 1
continue
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//"):
i += 1
continue
if lines[i].startswith("/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
i += 1
return emitcount
def emitCodeChurn(path):
all_dir = os.listdir(path)
wb = op.Workbook()
ws = wb['Sheet']
ws.append(['项目名', 'add','Code Churn','all'])
objPath = "/home/yantong/Code/CodeLine/repos/" # 存放项目的目录
#一些项目存放的是数据集或类似内容,不属于我们需要的,遇到这个项目直接跳过
notneed = ['tintinweb smart-contract-sanctuary-avalanche','tintinweb smart-contract-sanctuary-fantom','renardbebe Smart-Contract-Benchmark-Suites',
'tintinweb smart-contract-sanctuary-arbitrum','tintinweb smart-contract-sanctuary-tron','xf97 HuangGai',
'tintinweb smart-contract-sanctuary-optimism','gasgauge gasgauge.github.io','interfinetwork audited-codes',
'giacomofi Neural_Smart_Ponzi_Recognition','solidproof projects','SoftSec-KAIST Smartian-Artifact',
'bokkypoobah Tokens','makerdao spells-mainnet','Messi-Q GPSCVulDetector','kupl VeriSmart-benchmarks',
'eff-kay solidity-nicad','ethereum solidity-underhanded-contest','Dapp-Learning-DAO Dapp-Learning']
for j in range(0,len(all_dir)): #进入每个项目
try:
dir = all_dir[j]
changecount = 0 # 统计一共有多少commit
notmp = 0 #保存一个项目中所有改动的emit的code churn的和(不包含emit新增)
intmp = 0 #保存一个项目中所有改动的emit的code churn的和(包含emit新增)
alltmp = 0 #保存一个项目中所有提交的普通代码code churn的和
cur_path = os.path.join(path, dir)
if dir in notneed:
continue
if os.path.isdir(cur_path): # 进入项目
count = [0] #统计所有的代码行数
emitcount = [0] #统计所有的emit行数
file_list = os.listdir(cur_path)
for file in file_list:#file是每次commit前的文件
#将版本切换到sha对应版本修改前的版本
os.chdir(objPath + dir)
nextsha = file[:-5]
command = "git checkout " + nextsha
os.system(command)
command = "git log"
returnStr = os.popen(command).read()
returnlines = returnStr.split("\n")
commitcount = 0
sha = ""
for line in returnlines:
if line.startswith("commit"):
commitcount += 1
if commitcount == 2:
sha = line[7:]
if commitcount == 1: #说明在这条commit之前没有提交,即这是第一次提交,不是我们需要的,直接进行遍历下一次提交
continue
#将库切换到对应版本
os.system("git reset --hard")
os.system("git clean -dfx")#强制版本转换,不会弹提醒
os.system("git checkout " + sha) #切换到sha对应的版本
traversal(objPath + dir, count, emitcount)#统计原文件中代码和emit的行数
changecount += 1 # 每有一个文件就有一次改动
# print(file)
file_path = os.path.join(cur_path,file)
content = open(file_path,"r")
strings = content.read()
jsonStr = json.loads(strings)
noCount = 0 # 统计不包含新增有多少次emit次修改
inCount = 0 # 统计包含新增有多少次emit修改
allcount = 0 #统计所有代码的变动次数
for i in range(len(jsonStr['files'])): #求所有文件的改动数量的和
if 'patch' not in jsonStr['files'][i]: #该文件没有修改
continue
if jsonStr['files'][i]['filename'][-4:] != ".sol": #如果当前文件不存在说明不是solidity文件,直接跳过
continue
noCount += noAddChange(jsonStr['files'][i]) #统计不包含emit新增的修改
inCount += includeAddChange(jsonStr['files'][i]) #统计包含emit新增的修改
allcount += allchange(jsonStr['files'][i])#统计所有代码的修改次数
# print(str(file) + " " + str(inCount) + " " + str(noCount) + " " + str(allcount))
if emitcount[0] != 0:
changecount += 1
notmp += (noCount/emitcount[0]) #这次改动中的code churn
intmp += (inCount/emitcount[0])
alltmp += (allcount/count[0])
if changecount != 0: #commit次数不为零 所有改动中该项目的code churn
notmp /= changecount
intmp /= changecount
alltmp /= changecount
else:
notmp = -1
intmp = -1
alltmp = -1
# d = dir, intmp,notmp,alltmp
print(str(j) + " " + str(intmp) + " " + str(notmp) + " " + str(alltmp))
# ws.append(d)
ws.cell(j+2,1).value = dir
ws.cell(j+2,2).value = intmp
ws.cell(j+2,3).value = notmp
ws.cell(j+2,4).value = alltmp
wb.save("/home/yantong/Zhihao/code_churn/codechurnLiang.xlsx")
except:
continue
# 统计emit修改的行数 不包含新增emit的修改
def noAddChange(jsonStr):
# 如果文件中没有包含一下内容说明emit不会有修改
hashtable = {'+' : 0, '-' : 0} #记录emit的修改内容,用来配对
changecount = 0 #统计emit修改的次数
tmpStr = jsonStr['patch'].replace(' ', '')
lines = tmpStr.split('\n') # 将文件修改的patch信息用列表lines来存储
# 开始遍历每一行
i = 0
while i < len(lines):
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//") or lines[i].startswith("+//") or lines[i].startswith("-//"):
i += 1
continue
if lines[i].startswith("/*") or lines[i].startswith("+/*") or lines[i].startswith("-/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
#统计方法类似于所有代码的code churn 的统计方法
if lines[i].startswith("-emit"):
changecount += 1
i += 1
return changecount
#统计emit修改的函数 包含新增emit的修改
def includeAddChange(jsonStr):
# 如果文件中没有包含一下内容说明emit不会有修改
hashtable = {'+' : 0, '-' : 0} #记录emit的修改内容,用来配对
changecount = 0 # 统计emit修改的次数
tmpStr = jsonStr['patch'].replace(' ', '')
lines = tmpStr.split('\n') # 将文件修改的patch信息用列表lines来存储
# 开始遍历每一行
i = 0
while i < len(lines):
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//") or lines[i].startswith("+//") or lines[i].startswith("-//"):
i += 1
continue
if lines[i].startswith("/*") or lines[i].startswith("+/*") or lines[i].startswith("-/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
# 如果修改的地方有emit,则返回True
if lines[i].startswith("+emit") or lines[i].startswith("-emit"):
if lines[i].startswith("+emit"):
if lines[i].split("(")[0].replace("+","-") in hashtable: # 如果在表中找到能够配对的emit修改,将表中的对应内容删除,同时emit修改次数加一
changecount += 1
del hashtable[lines[i].split("(")[0].replace("+","-")]
hashtable['-'] -= 1
else:
hashtable[lines[i].split("(")[0]] = 1
hashtable['+'] += 1
else:
if lines[i].split("(")[0].replace("-","+") in hashtable: # 如果在表中找到能够配对的emit修改,将表中的对应内容删除,同时emit修改次数加一
changecount += 1
del hashtable[lines[i].split("(")[0].replace("-", "+")]
hashtable['+'] -= 1
else:
hashtable[lines[i].split("(")[0]] = 1
hashtable['-'] += 1
# 什么都没有发生 到下一行
i += 1
# 返回修改次数 加 删除次数 加 新增次数
return changecount + hashtable['-'] + hashtable['+']
#只统计删除的修改
def allchange(jsonStr):
changecount = 0
lines = jsonStr['patch'].split('\n') # 将文件修改的patch信息用列表lines来存储
# 开始遍历每一行
i = 0
while i < len(lines):
# 遇到注释,如果行首是//那么直接跳过这行,如果行首是/*需要找到*/,在这期间的内容都不要
if lines[i].startswith("//") or lines[i].startswith("+//") or lines[i].startswith("-//"):
i += 1
continue
if lines[i].startswith("/*") or lines[i].startswith("+/*") or lines[i].startswith("-/*"):
rightzhushi = lines[i].find("*/")
# 找到另一半注释
while rightzhushi == -1:
i += 1
if i >= len(lines): # 防止有的人只写注释的前一半,不写后一半
break
rightzhushi = lines[i].find("*/")
i += 1
continue
# 如果有删除的修改,记录加一
if lines[i].startswith("-"):
# 如果在删除的一行没有分号,说明这不是一条完整的语句,不是我们需要的内容,直接下一行
if lines[i].find(';') == -1:
# 如果是没有分号,只有是左大括号才能加一
if lines[i].find("{") != -1:
changecount += 1
i += 1
continue
changecount += 1
# 什么都没有发生 到下一行
i += 1
# 返回修改次数
return changecount
#生成markdown表格,放到github库中
#path 存放数据的excel表格的路径
def makeTable(path):
data_excel = xlrd.open_workbook(path)
table = data_excel.sheets()[0]
col_name = table.col(colx=0) #用户和库的名称
col_event = table.col(colx=2) #event use
col_event_loc = table.col(colx=3) #event use / LOC
# col_emitcodechurn = table.col(colx=10) #event use code churn
# col_entirecodechurn = table.col(colx=11) #entire code code churn
col_star = table.col(colx=5)
print("hhh")
f = open("address.txt","w")
ff = open("message.txt","w")
#f 是用来存放生成的2915个库的名称和链接
#ff 是用来存放生成的2915个库的名称、event use event use/LOC churn rate entire code churn rate event use code
# f.write("|serial number| user name | repository name | link |\n| :---: | :---: | :---: | :--------: |\n")
ff.write("|Index|Star| Repository Name | Event Use |Event Use/LOC|\n| ---- | ---- | ---- | ---- | ---- |\n")
for i in range(len(col_name)):
j = str(col_star[i]).split(':')[-1].split('.')[0]
star = int(j)
if star < 5:
star = 5
# print(str(col_name[i]).split(" ")[0].split('\'')[-1] + "|" + str(col_name[i]).split(" ")[-1].split('\'')[0])
# f.write("|" + str(i) + "|" + str(col_name[i]).split(" ")[0].split('\'')[-1] + "|" + str(col_name[i]).split(" ")[-1].split('\'')[0] + "|" + "https://github.com/" + str(col_name[i]).split(" ")[0].split('\'')[-1] + "/" + str(col_name[i]).split(" ")[-1].split('\'')[0] + "\n")
ff.write("|"+ str(i+1) + "|" + str(col_name[i]).split(" ")[0].split('\'')[-1] + "/" + str(col_name[i]).split(" ")[-1].split('\'')[0] + "|" +
str(star) + "|" + str(col_event[i]).split(':')[-1].split('.')[0] + '|' + str(col_event_loc[i]).split(":")[-1] +"\n")
def test(a,b):
try:
print(a/b)
except:
print("except")
else :
print("else")
print("nothing")
| HighBe/SolidityWorm | test/function.py | function.py | py | 30,482 | python | zh | code | 0 | github-code | 90 |
18349148879 | m,d=map(int,input().split())
cnt=0
for i in range(1,m+1):
for j in range(1,d+1):
da=j%10
db=j//10
if da>=2 and db>=2:
if i==da*db:
cnt+=1
print(cnt) | Aasthaengg/IBMdataset | Python_codes/p02927/s039663546.py | s039663546.py | py | 204 | python | en | code | 0 | github-code | 90 |
2392192539 | ###===--- MesiSols NFT Minter ---===###
###===--- Imports ---===###
### General ###
import os, glob
from replit import db
import requests
import json
import re
### Algo API ###
from natsort import natsorted
from algosdk import mnemonic
from algosdk.v2client import algod
from algosdk.future.transaction import AssetConfigTxn
from minting.algo_utils import wait_for_confirmation, print_created_asset
###===--- Minting ---===###
# Set the paths for the images and metadata
images_path = 'image_maker/star_pngs'
meta_path = 'image_maker/star_json'
# Set the Pinata keys for IPFS
api_key = os.environ['pinata_k']
api_secret = os.environ['pinata_s']
acc_mnemonic = os.environ['mesi_mnemonic']
# Going to be using several print statements for debugging/performance
def mint_collection(start_idx=0, end_idx=-1):
'''
This function runs the mint function in a loop, minting each NFT.
Returns: a minted collection of MesiSols on the Algorand Blockchain.
'''
# Set the data pathing.
png_path = 'image_maker/star_pngs'
# Use a for loop to mint each NFT
os.listdir('image_maker/star_pngs')
for index, _ in enumerate(sorted(os.listdir(png_path))[start_idx:]):
mint(index + start_idx)
def mint(code, testnet=False):
'''
This function mints MesiSols to the mainnet.
'''
### Configuration ###
# For debugging and minting purposes, convert the code int to str.
str_code = str(code)
print(f'--- Initializing creation of MesiSol #{str_code} ---')
# Using Pinata link the image to an ipfs URL for upload.
# Do some sorting in the image path to get the right files
imgs = natsorted(glob.glob(os.path.join(images_path, "*.png")))
files = [('file', (str_code + ".png", open(imgs[code], "rb"))),]
ipfs_url = "https://api.pinata.cloud/pinning/pinFileToIPFS"
headers = {
"pinata_api_key": api_key,
"pinata_secret_api_key": api_secret
}
response: requests.Response = requests.post(url=ipfs_url, files=files, headers=headers)
meta = response.json()
# Sets the ipfs_cid hash
ipfs_cid = meta['IpfsHash']
# Find the attribute metadata of the star
i_star_code = re.sub(r'\D+', '', imgs[code])
meta_path = 'image_maker/star_json/'
meta_file = open(meta_path + f'MesiSolsMetadata{i_star_code}.json')
meta_dict = json.load(meta_file)
json_meta = json.dumps(meta_dict)
### Initizializing Blockchain Connection ###
# Set the accounts for minting, setup using a dict because
# multiple may be needed as the maximum amount of assets one
# account can make is 1000.
accounts = {}
acc_counter = 1
for m in [acc_mnemonic]:
accounts[acc_counter] = {}
accounts[acc_counter]['pk'] = mnemonic.to_public_key(m)
accounts[acc_counter]['sk'] = mnemonic.to_private_key(m)
acc_counter += 1
# Check for Test Net and connect to client
if testnet:
algod_address = "https://api.testnet.algoexplorer.io"
else:
algod_address = "https://api.algoexplorer.io"
algod_token = ""
headers = {'User-Agent': 'py-algorand-sdk'}
algod_client = algod.AlgodClient(algod_token, algod_address, headers);
status = algod_client.status()
# Create the asset and build the transaction for the mint.
params = algod_client.suggested_params()
params.fee = 1000
params.flat_fee = True
txn = AssetConfigTxn(
sender = accounts[1]['pk'],
sp = params,
total = 1,
default_frozen = False,
asset_name = f'MesiSol #{str_code}',
unit_name = f'MSOL{str_code}',
manager = accounts[1]['pk'],
reserve = accounts[1]['pk'],
freeze = None,
clawback = None,
strict_empty_address_check=False,
url = f'ipfs://{ipfs_cid}',
metadata_hash = '',
note = json_meta.encode(),
decimals = 0)
# Sign the transaction using the secret key for the account and
# send it to the network gathering the transaction id.
stxn = txn.sign(accounts[1]['sk'])
txid = algod_client.send_transaction(stxn)
print(f'--- {txid} is your transaction ID for MesiSol #{str_code} ---')
wait_for_confirmation(algod_client, txid)
try:
# Pull successful transaction info.
ptx = algod_client.pending_transaction_info(txid)
asset_id = ptx["asset-index"]
db[str_code] = asset_id
print_created_asset(algod_client, accounts[1]['pk'], asset_id)
except Exception as e:
print(e)
| dbchristenson/mesisols | minting/mint.py | mint.py | py | 4,332 | python | en | code | 2 | github-code | 90 |
15566806562 | """Chaneg name
Revision ID: c327b22bdc90
Revises: 2d36a1563a4b
Create Date: 2021-12-09 15:13:17.020567
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c327b22bdc90'
down_revision = '2d36a1563a4b'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('profile', sa.Column('profile_from', sa.String(length=100), nullable=True))
op.drop_column('profile', 'profile_form')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('profile', sa.Column('profile_form', sa.VARCHAR(length=100), nullable=True))
op.drop_column('profile', 'profile_from')
# ### end Alembic commands ###
| Sabuhi0/MyPortfolio | migrations/versions/c327b22bdc90_chaneg_name.py | c327b22bdc90_chaneg_name.py | py | 814 | python | en | code | 0 | github-code | 90 |
72764467177 | # This is a dice faking module
from random import randint
def roll(max):
r = randint(1, max)
return r
def roll_a_bunch(max, numOfDice=3):
rolls = []
for i in range(numOfDice):
rolls.append(roll(max))
return rolls
def roll_distro(max, numOfDice=3):
rolls = roll_a_bunch(max, numOfDice)
distribution = {}
for each in rolls:
currentCount = distribution.get(each, 0)
distribution[each] = currentCount + 1
output = ""
for roll in distribution:
output += "Number " + str(roll) + " was rolled " + str(distribution[roll]) + " times.\n"
print(output)
| PDXDevCampJuly/atifar | die/die.py | die.py | py | 624 | python | en | code | 0 | github-code | 90 |
10938529593 | from fluent.migratetb.helpers import TERM_REFERENCE
from fluent.migratetb.helpers import transforms_from
# This can't just be a straight up literal dict (eg: {"a":"b"}) because the
# validator fails... so make it a function call that returns a dict.. it works
about_replacements = dict(
{
"&brandShorterName;": TERM_REFERENCE("brand-shorter-name"),
"&brandShortName;": TERM_REFERENCE("brand-short-name"),
"&vendorShortName;": TERM_REFERENCE("vendor-short-name"),
}
)
def migrate(ctx):
"""Bug 1816532 - Migrate aboutDialog.dtd strings to Fluent, part {index}"""
target = reference = "mail/messenger/aboutDialog.ftl"
source = "mail/chrome/messenger/aboutDialog.dtd"
ctx.add_transforms(
target,
reference,
transforms_from(
"""
release-notes-link = { COPY(source, "releaseNotes.link") }
update-check-for-updates-button = { COPY(source, "update.checkForUpdatesButton.label") }
.accesskey = { COPY(source, "update.checkForUpdatesButton.accesskey") }
update-update-button = { REPLACE(source, "update.updateButton.label3", about_replacements) }
.accesskey = { COPY(source, "update.updateButton.accesskey") }
update-checking-for-updates = { COPY(source, "update.checkingForUpdates") }
update-downloading-message = { COPY(source, "update.downloading.start") }<span data-l10n-name="download-status"></span>
update-applying = { COPY(source, "update.applying") }
update-downloading = <img data-l10n-name="icon"/>{ COPY(source, "update.downloading.start") }<span data-l10n-name="download-status"></hspan>
update-failed = { COPY(source, "update.failed.start") }<a data-l10n-name="failed-link">{ COPY(source, "update.failed.linkText") }</a>
update-admin-disabled = { COPY(source, "update.adminDisabled") }
update-no-updates-found = { REPLACE(source, "update.noUpdatesFound", about_replacements) }
update-other-instance-handling-updates = { REPLACE(source, "update.otherInstanceHandlingUpdates", about_replacements) }
update-unsupported = { COPY(source, "update.unsupported.start") }<a data-l10n-name="unsupported-link">{ COPY(source, "update.unsupported.linkText") }</a>
update-restarting = { COPY(source, "update.restarting") }
channel-description = { COPY(source, "channel.description.start") }<span data-l10n-name="current-channel">{ $channel }</span> { COPY(source, "channel.description.end", trim: "True") }
warning-desc-version = { REPLACE(source, "warningDesc.version", about_replacements) }
warning-desc-telemetry = { REPLACE(source, "warningDesc.telemetryDesc", about_replacements) }
community-exp = <a data-l10n-name="community-exp-mozilla-link">
{ REPLACE(source, "community.exp.mozillaLink", about_replacements) }</a>
{ COPY(source, "community.exp.middle") }<a data-l10n-name="community-exp-credits-link">
{ COPY(source, "community.exp.creditsLink") }</a>
{ COPY(source, "community.exp.end") }
community-2 = { REPLACE(source, "community.start2", about_replacements) }<a data-l10n-name="community-mozilla-link">
{ REPLACE(source, "community.mozillaLink", about_replacements) }</a>
{ COPY(source, "community.middle2") }<a data-l10n-name="community-credits-link">
{ COPY(source, "community.creditsLink") }</a>
{ COPY(source, "community.end3") }
about-helpus = { COPY(source, "helpus.start") }<a data-l10n-name="helpus-donate-link">
{ COPY(source, "helpus.donateLink") }</a> or <a data-l10n-name="helpus-get-involved-link">
{ COPY(source, "helpus.getInvolvedLink") }</a>
bottom-links-license = { COPY(source, "bottomLinks.license") }
bottom-links-rights = { COPY(source, "bottomLinks.rights") }
bottom-links-privacy = { COPY(source, "bottomLinks.privacy") }
cmd-close-mac-command-key =
.key = { COPY(source, "cmdCloseMac.commandKey") }
""",
source=source,
about_replacements=about_replacements,
),
)
| mozilla/releases-comm-central | python/l10n/tb_fluent_migrations/completed/bug_1816532_about_dialog_migration.py | bug_1816532_about_dialog_migration.py | py | 3,891 | python | en | code | 144 | github-code | 90 |
40573172911 | import logging
import os
import sys
from typing import Union
import colorlog
import colorlog.escape_codes
from trak._info import __version__
COLORLOG_FORMAT = '%(log_color)s%(bold)s%(levelname)s | %(asctime)s | %(name)s: %(thin)s%(message)s%(reset)s'
UNCOLORED_FORMAT = '%(levelname)s $(asctime)s %(name)s: %(message)s'
def color_is_supported() -> bool:
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if sys.platform != 'win32':
return is_a_tty
return is_a_tty and (
'ANSICON' in os.environ or 'WT_SESSION' in os.environ or os.environ.get('TERM_PROGRAM') == 'vscode'
)
def start_logging(level: Union[None, str, int]):
if color_is_supported():
colorlog.basicConfig(level=level, format=COLORLOG_FORMAT, stream=sys.stderr)
else:
logging.basicConfig(level=level, format=UNCOLORED_FORMAT, stream=sys.stderr)
logging.info(f'Welcome to Trakmod v{__version__}')
if __name__ == '__main__':
start_logging(logging.DEBUG)
logging.critical('g')
| trakmod/trakmod | trak/internal/logs.py | logs.py | py | 1,029 | python | en | code | 3 | github-code | 90 |
33666888425 | """
Задача 8
Дан список кортежей
grades = [(‘Ann’, 9), (‘John’, 7), (‘Smith’, 5), (‘George’, 6)].
Вывести информацию об оценках по возрастанию в виде:
‘Hello Ann! Your grade is 9’
"""
# from operator import itemgetter
grades = [('Ann', 9), ('John', 7), ('Smith', 5), ('George', 6)]
# grades.sort(key=itemgetter(1))
# grades.sort(key=lambda q: q[1])
for index in range(len(grades)):
value = grades[index]
grades_reverse = (value[1], value[0])
grades[index] = grades_reverse
grades.sort()
for grade in grades:
print("Hello {}! Your grade is {}.".format(grade[1], grade[0]))
| AlesyaKovaleva/IT-Academy-tasks | tasks_4/tuple_8.py | tuple_8.py | py | 680 | python | en | code | 0 | github-code | 90 |
39835526409 | # -*- coding: utf-8 -*-
from xml.dom import ValidationErr
from odoo import api, fields, models
class Property(models.Model):
_name = 'realestate.property'
_description = 'Real Estate Property'
# Fields
name = fields.Char(string='Name', required=True)
description = fields.Text(string='Description')
# Multiple properties can be owned by one owner
owner_id = fields.Many2one('realestate.owner', string='Owner', required=True)
# Property
type = fields.Selection([
('house', 'House'),
('apartment', 'Apartment'),
('land', 'Land'),
('other', 'Other'),
], string='Type', required=True)
area = fields.Float(string='Area m2', required=True)
status = fields.Selection([
('available', 'Available'),
('sold', 'Sold'),
('rented', 'Rented')
], string='Status', required=True)
available_for_sale = fields.Boolean(string='Available for Sale')
new_owner_id = fields.Many2one('realestate.owner', string='New Owner')
available_for_rent = fields.Boolean(string='Available for Rent')
renter_ids = fields.One2many('realestate.renter', 'property_id', string='Renters')
# Address (Encapsulable)
street = fields.Char(string='Street', required=True)
street2 = fields.Char(string='Street2')
city = fields.Char(string='City', required=True)
state_id = fields.Char(string='Estate / Province', required=True)
zip = fields.Char(string='Zip', required=True)
country_id = fields.Char(string='Country', required=True)
# ==== RENT ====
# Rent the owner agreed
rent_owner = fields.Float(string='Rent agreed with owner')
# Total ammount of monthly rent paid by the renters (Calculated)
rent_renters = fields.Float(string='Total rent paid by renters', compute='_compute_rent_renters')
# ==== SALE ====
# Sale owner ask
sale_owner = fields.Float(string='Sale price asked by owner')
# Sale new owner bid
sale_new_owner = fields.Float(string='Sale price offered by new owner')
# ==== CALCULATED ====
# Total rent paid by renters
@api.depends('renter_ids')
def _compute_rent_renters(self):
for record in self:
record.rent_renters = sum(record.renter_ids.mapped('rent'))
# ==== VALIDATIONS ====
# If the property is sold:
# available_for_sale must be True and owner_id, new_owner_id, sale_owner, sale_new_owner must be set
@api.constrains('status', 'available_for_sale', 'owner_id', 'new_owner_id', 'sale_owner', 'sale_new_owner')
def _check_sold_status(self):
for record in self:
if record.status == 'sold':
if record.available_for_sale is False or record.owner_id is None or record.new_owner_id is None or record.sale_owner is None or record.sale_new_owner is None:
record.status = 'available'
raise ValidationErr('If the property is sold, available_for_sale, owner_id, new_owner_id, sale_owner, sale_new_owner must be set')
# If the property is rented:
# available_for_rent must be True and owner_id, renter_ids, rent_owner, rent_renters must be set
@api.constrains('status', 'available_for_rent', 'owner_id', 'renter_ids', 'rent_owner', 'rent_renters')
def _check_rented_status(self):
for record in self:
if record.status == 'rented':
if record.available_for_rent is False or record.owner_id is None or record.renter_ids is None or record.rent_owner is None or record.rent_renters is None:
record.status = 'available'
raise ValidationErr('If the property is rented, available_for_rent, owner_id, renter_ids, rent_owner, rent_renters must be set')
# All prices must be positive
@api.constrains('rent_owner', 'sale_owner', 'sale_new_owner')
def _check_prices(self):
for record in self:
if record.rent_owner < 0:
record.rent_owner = 0
raise ValidationErr('Rent asked by owner must be positive')
if record.sale_owner < 0:
record.sale_owner = 0
raise ValidationErr('Sale price asked by owner must be positive')
if record.sale_new_owner < 0:
record.sale_new_owner = 0
raise ValidationErr('Sale price paid by new owner must be positive')
| AlejandroBelloIglesias/odoo-model-realstate | models/property.py | property.py | py | 4,409 | python | en | code | 0 | github-code | 90 |
18061854189 | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, K, X, Y = map(int, read().split())
if N <= K:
ans = X * N
else:
ans = X * K + (N - K) * Y
print(ans)
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p04011/s177244822.py | s177244822.py | py | 362 | python | en | code | 0 | github-code | 90 |
46022182170 | import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import glob # 用来读取文件夹中的所有文件
from sklearn.preprocessing import StandardScaler
from scipy.spatial.distance import cdist
from fastdtw import fastdtw
# 定义读取bvh文件中手臂部分数据的函数
def read_arm_data(filename):
# 打开文件
with open(filename, 'r') as f:
# 读取所有行
lines = f.readlines()
# 找到MOTION行的索引
motion_index = lines.index('MOTION\n')
# 找到Frames:行的索引
frames_index = motion_index + 1
# 获取帧数
frames = int(lines[frames_index].split()[1])
# 找到Frame Time:行的索引
frame_time_index = motion_index + 2
# 获取帧时间
frame_time = float(lines[frame_time_index].split()[2])
# 获取数据部分的所有行
data_lines = lines[frame_time_index + 1:]
# 将数据转换为numpy数组
data = np.array([list(map(float, line.split())) for line in data_lines])
# 选择手臂部分的数据
arm_data = data[:, 27:27 + 24]
# 返回手臂部分数据的平均值,作为这个文件的特征向量
return arm_data.mean(axis=0) # 修改:返回平均值,而不是所有特征
# 定义文件夹路径
folder_path = 'D:/Study/NotCleaned/NotCleaned/AbeTomoaki/'
#D:\Dev\AbeTomoaki D:/Study/NotCleaned/NotCleaned/AbeTomoaki/
# 获取文件夹中所有的bvh文件名
file_names = glob.glob(folder_path + '*.bvh')
# 创建一个空的列表,用来存放所有文件的特征向量
feature_vectors = []
# 遍历所有文件名
for file_name in file_names:
# 调用函数,读取手臂部分数据,并返回特征向量
feature_vector = read_arm_data(file_name)
# 将特征向量添加到列表中
feature_vectors.append(feature_vector)
# 将列表中的所有特征向量拼接成一个二维矩阵
feature_matrix = np.array(feature_vectors)
# 使用StandardScaler对特征矩阵进行标准化
scaler = StandardScaler()
feature_matrix_scaled = scaler.fit_transform(feature_matrix)
# 打印标准化后的特征矩阵
print('Scaled feature matrix:')
print(feature_matrix_scaled)
# 创建一个k-means聚类器对象,假设你想将动作分为4类,你可以根据你的实际情况进行修改
kmeans = KMeans(n_clusters=4)
# 对标准化后的特征矩阵进行聚类,并获取聚类结果和标签
cluster_result = kmeans.fit_predict(feature_matrix_scaled)
cluster_label = kmeans.labels_
# 打印聚类结果和标签
print('Cluster result:', cluster_result)
print('Cluster labels:', kmeans.labels_)
cluster_files = {}
for i, label in enumerate(cluster_label):
if label not in cluster_files:
cluster_files[label] = []
cluster_files[label].append(file_names[i])
# Print the files in each cluster
for cluster, files in cluster_files.items():
print(f"Files in Cluster {cluster}:")
for file in files:
print(file)
# 导入matplotlib库
import matplotlib.pyplot as plt
# 绘制散点图,横轴为样本索引,纵轴为第一个特征值,颜色为聚类标签
plt.scatter(range(len(feature_matrix[:, 0])), feature_matrix[:, 0], c=cluster_result[:len(feature_matrix[:, 0])]) # 修改:让颜色参数c和横轴或纵轴长度
# 添加标题和坐标轴标签
plt.title('Clustering result')
plt.xlabel('Sample index')
plt.ylabel('Feature value')
# 显示图形
plt.show()
| panda697196/ArmsClustering | armcluster.py | armcluster.py | py | 3,557 | python | zh | code | 0 | github-code | 90 |
21331624431 | import pyautogui as p #controle de mouse e teclado
#usar print(p.position()) para encontrar a posição do mouse
# p.sleep(tempo) para dar 2 segundos para posicionar o mouse
# p.sleep(2)
# print(p.position())
# p.moveTo(x=710, y=1056, duration=1)
# p.sleep(1)
# p.click(x=13, y=1068)
p.hotkey('win','r') #combinação de teclas
p.sleep(1)
p.typewrite('notepad', .1)
p.sleep(2)
p.press('enter')
p.sleep(2)
p.typewrite('Oi!! Eu sou um Bot Synnex!', .1)
p.sleep(2)
window = p.getActiveWindow()
window.close()
p.press('right')
p.sleep(2)
p.press('enter')
| qmclouca/RPAPython | Robot01.py | Robot01.py | py | 554 | python | pt | code | 0 | github-code | 90 |
18358050089 | import collections
N, M, P = [int(_) for _ in input().split()]
ABC = [[int(_) for _ in input().split()] for _ in range(M)]
cd = collections.defaultdict
Ga = cd(set)
Gb = cd(set)
G = []
ok = cd(int)
for a, b, c in ABC:
Ga[a].add(b)
Gb[b].add(a)
#dfs
for pair in [[1, Ga], [N, Gb]]:
S = pair
Gn = pair.pop()
visited = [0] * (N + 1)
while S:
s = S.pop()
if not visited[s]:
visited[s] = 1
ok[s] += 1
for ns in Gn[s]:
S += [ns]
for a, b, c in ABC:
if ok[a] == ok[b] == 2:
G += [(a, b, P - c)]
def bellman_ford(V, G, i):
INF = float('inf')
D = [INF] * V
D[i] = 0
for _ in range(V):
update = False
for s, t, d in G:
if D[s] != INF and D[t] > D[s] + d:
D[t] = D[s] + d
update = True
if not update:
return D
if _ == V:
return False
a = bellman_ford(N + 1, G, 1)
try:
print(max(-a[-1], 0))
except:
print(-1)
| Aasthaengg/IBMdataset | Python_codes/p02949/s068138506.py | s068138506.py | py | 1,030 | python | en | code | 0 | github-code | 90 |
16151381216 | import json
import logging
import os
from collections import OrderedDict
import yaml
from timon.conf.grpby import cnvt_grpby_to_nested_dict
from timon.conf.grpby import cnvt_nested_grpby_to_lst_dict
logger = logging.getLogger(__name__)
# next two vars needed for ordering generated json
# order in which fields shall show up in json
_orderered_fields = [
'type',
'version',
'workdir',
'statefile',
'users',
'hosts',
'notifiers',
'probes',
'defaults',
'default_params',
'host_groups',
]
_field_ord_dict = dict(
(key, val) for val, key in enumerate(_orderered_fields))
# needed for knowing which sections to autocomplete
_dict_fields = set(['users', 'hosts', 'notifiers', 'probes'])
class ConfigError(Exception):
""" custom exception """
def mk_cert_info(cert_info):
""" 'normalizes' cert info
if a None or a tuple / list has been passed return unchanged.
if a string is passed treat it as a filename of a crt file.
- if a corresponding file with a .key suffix exists treat it
as a key file
- otherwise assume that crt file contains also the key
"""
if not type(cert_info) in (bytes, str):
return cert_info
else:
crt_fname = cert_info
root, ext = os.path.splitext(crt_fname)
key_fname = root + '.key'
if not os.path.isfile(key_fname):
key_fname = crt_fname
if not os.path.isfile(crt_fname) or not os.path.isfile(key_fname):
if key_fname == crt_fname:
raise ConfigError("Cert file %r doesn't exist" % crt_fname)
raise ConfigError("Cert file %r or key file %r doesn't exist"
% (crt_fname, key_fname))
if key_fname == crt_fname:
return crt_fname
else:
return (crt_fname, key_fname)
def complete_dflt_vals(cfg):
""" completes default values for each section, that
can be found in _dict_fields
just one level / default vals
"""
dflt = cfg['default_params'] # all default params
for key, entries in cfg.items():
if key not in _dict_fields:
continue
logger.debug("check for %s defaults", key)
dflts = dflt.get(key, {}) # default params for given section
# if not dflts:
# continue
logger.info("set defaults for %s", key)
if dflts:
logger.debug("defaults %s", dflts)
for name, entry in sorted(entries.items()):
logger.debug("%s:%s", key, name)
if 'name' not in entry: # set name field if missing
logger.debug("NAME = %r", name)
entry['name'] = name
for dkey, dval in dflts.items():
if dkey not in entry:
entry[dkey] = dval
logger.debug("%r = %r", dkey, dval)
def complete_schedules(cfg):
""" add name to each schedule """
for name, schedule in cfg['schedules'].items():
schedule['name'] = name
def complete_probes(cfg):
""" add all default values to probes if no specific val is set """
dflt = cfg['default_params'].get('probes', {})
for probe_name, probe in cfg['probes'].items():
if 'probe' not in probe:
probe['probe'] = probe_name
for key, val in dflt.items():
if key not in probe:
probe[key] = val
def complete_hosts(cfg):
""" completes all potentially required params for host
in particular (probes, schedule, notify) tuples
creates also probe instances
"""
dflt = cfg.get('defaults', {}) # default inst params
dflt_probes = dflt.get('probes', [])
# dflt_schedule = dflt.get('schedule', None)
# dflt_notifiers = dflt.get('notifiers', [])
probes = dict(cfg['probes'])
hosts = cfg['hosts']
# schedules = cfg['schedules']
for host in hosts.values():
if 'probes' not in host:
host['probes'] = list(dict(probe=probe) for probe in dflt_probes)
logger.debug("no probes specified for host %s. will use %r",
host['name'], host['probes'])
hprobes = host['probes']
if hprobes is None:
logger.debug("hprobes is None")
hprobes = []
if type(hprobes) in (str,): # if only one probe conv to list of one
hprobes = [hprobes]
# if just names were include convert to dict
# logger.debug("probes[%s]: %r", host['name'], hprobes)
hprobes = [dict(probes[probe]) if type(probe) in (str,)
else probe for probe in hprobes]
# logger.debug("probes[%s]: %r", host['name'], hprobes)
# set unique name + add default values for non existing keys
host_probe_params = host.get('probe_params') or {}
for probe in hprobes:
assert isinstance(probe, dict)
probe_name = probe['probe']
probe['name'] = host['name'] + "_" + probe_name
updated_probe = dict(probes[probe_name])
updated_probe.update(probe)
probe.update(updated_probe)
probe_params = host_probe_params.get(probe_name) or {}
probe.update(probe_params)
logger.debug("probes[%s]: %r", host['name'], hprobes)
host['probes'] = hprobes
if 'client_cert' not in host:
host['client_cert'] = None
else:
host['client_cert'] = mk_cert_info(host['client_cert'])
def mk_all_probes(cfg):
""" add unique id (counter) to all probes
"""
cfg['all_probes'] = all_probes = OrderedDict()
for host_name, host in sorted(cfg['hosts'].items()):
host_probes = host['probes']
# print(host_probes)
host['probes'] = [probe['name'] for probe in host_probes]
for probe in host_probes:
probe['host'] = host_name
all_probes[probe['name']] = probe
# TODO: remove: function seens used nowhere
def __setifunset(adict, key, val):
""" sets value in dict if not set so far """
if 'key' not in adict:
adict['key'] = val
def mk_ordered_dict(adict):
""" convert a dict instance to an ordered dict
ordered by key
"""
rslt = OrderedDict()
for key, val in sorted(adict.items()):
if isinstance(val, dict):
val = mk_ordered_dict(val)
rslt[key] = val
return rslt
def order_cfg(cfg):
""" order config dict such, that generated cfg file
is predictively ordered
"""
# sort lower levels of cfg file by keys
for key, val in cfg.items():
if isinstance(val, dict):
cfg[key] = mk_ordered_dict(val)
# a nicer top level order for the cfg file for simpler debugging
def sort_key_func(kval):
return (
_field_ord_dict.get(kval[0], len(_field_ord_dict)),
kval[0],
)
ordered_cfg = OrderedDict(sorted(cfg.items(), key=sort_key_func))
return ordered_cfg
def apply_config(options):
""" applies the configuration.
This is not much more than reading the yaml file,
applying defaults and save it as json file
However timon.config will have a config file, which is more
uniform than the human written config file. Many default values
are explicitely set and hopefully, this will accelerate and
simplify the run code as it has to handle less
exceptions / fallbacks to defaults
"""
do_check = options.check
workdir = options.workdir
cfgname = os.path.join(workdir, options.fname)
logger.debug('starting to read config from %s', cfgname)
with open(cfgname) as fin:
cfg = yaml.safe_load(fin)
logging.info('read config from %s', cfgname)
# determine workdir from config
workdir = os.path.realpath(os.path.join(workdir, cfg.get('workdir', '.')))
logger.debug("workdir: %r", workdir)
cfg['workdir'] = workdir
statefile = os.path.join(workdir, cfg.get('statefile', 'timon_state.json'))
cfg['statefile'] = options.statefile or statefile
if do_check:
print("CHECK_CFG not implemented so far")
return
if "webif" in cfg:
if "group_by" in cfg["webif"]:
rslt = cnvt_grpby_to_nested_dict(
cfg["webif"]["group_by"], cfg["hosts"])
rslt = cnvt_nested_grpby_to_lst_dict(
rslt, cfg["webif"]["group_by"])
cfg["host_group"] = rslt
# set abspath for work dir
int_conf_fname = os.path.join(workdir, 'timoncfg_state.json')
complete_dflt_vals(cfg)
complete_schedules(cfg)
complete_probes(cfg) # default probes
complete_hosts(cfg)
mk_all_probes(cfg)
cfg = order_cfg(cfg)
# dump to file
with open(int_conf_fname, 'w') as fout:
json.dump(cfg, fout, indent=1)
| feenes/timon | timon/configure.py | configure.py | py | 8,885 | python | en | code | 0 | github-code | 90 |
31951004757 |
import json
import re
from terminaltables import AsciiTable
counts = {}
def do_inc(val):
global counts
if val in counts:
counts[val] = counts[val] + 1
else:
counts[val] = 1
with open('sbom-hashes.json') as fh:
rpt = json.load(fh)
for k in rpt.keys():
v = rpt[k]
for h in v:
do_inc(h)
fr = {}
for c in sorted(counts.items(), key=lambda x: x[1]):
if c[1] > 1:
fr[c[0]] = 1
print("{}: {}".format(c[0], c[1]))
print (' -- unwind')
newfr = {}
for x in rpt:
for k in fr:
if rpt[x] is not []:
if k in rpt[x]:
if k not in newfr:
newfr[k] = []
newfr[k].append(x)
print ('Total hashes: {}'.format(len(counts)))
print (' -- unwind fr')
for r in newfr:
prodlist = newfr[r]
npl = set()
for p in prodlist:
producer = re.match(r'inputs/(.+?)/', p)
if producer:
npl.add(producer.group(1))
if len(npl) > 1:
print("{}: {}".format(r, npl))
| mwhitecoverity/sbom-tools | sbom-hash-incidence.py | sbom-hash-incidence.py | py | 1,069 | python | en | code | 5 | github-code | 90 |
19858623398 | # main.py
import os
from config.logger_config import setup_logger
from input_module.epub import process_epub
from input_module.other import process_srt
# 配置日志
logger = setup_logger()
def main():
# 获取用户输入
epub_filename = r"test file/繁中调试文件.epub"
file_type = os.path.splitext(epub_filename)[1].lstrip('.').lower()
supported_file_types = ['epub', 'srt']
if file_type not in supported_file_types:
print(f"当前只支持 {', '.join(supported_file_types).upper()} 文件类型。")
return
if not os.path.exists(epub_filename):
print("指定的文件不存在。")
return
source_lang = "zh-tw"
target_lang = "zh-cn"
# 选择翻译服务提供商
print("请选择翻译服务提供商:")
print("1. zhconv (繁体中文至简体中文)")
print("2. OpenAI (多语言翻译)")
print("3. 有道翻译 (多语言翻译)")
provider_choice = input("请输入选项(1/2/3): ")
if file_type == 'epub':
process_epub(epub_filename, source_lang, target_lang, provider_choice)
elif file_type == 'srt':
process_srt(epub_filename, source_lang, target_lang, provider_choice)
if __name__ == "__main__":
# 添加一些日志调试信息
logger.debug("调试信息")
logger.info("普通输出信息")
logger.warning("警告信息")
logger.error("错误信息")
main()
| Hellohistory/Machine_Translation_ebook | main.py | main.py | py | 1,427 | python | zh | code | 3 | github-code | 90 |
26852806310 | import os
import sys
import shutil
import re
from datetime import datetime
import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
import yaml
from astropy.io import fits
from astropy.wcs import WCS
from shutil import which
set_type ='evaluation'
#set_type ='development_small'
#set_type= 'debug'
run_fat = False
type = {'evaluation': {'dir': 'evaluation', 'file': 'sky_eval'},
'development_small': {'dir': 'development_small', 'file': 'sky_dev'},
'debug': {'dir': 'debug', 'file': 'test'},
}
main_dir = './'
data_parameters = './parameters/data.yml'
param_development_small = './parameters/sofia_dev_small.par'
fitsfile = f'''{main_dir}/{type[set_type]['dir']}/{type[set_type]['file']}.fits'''
with open(data_parameters, "r") as f:
data_yml = yaml.load(f, Loader=yaml.FullLoader)
fitsfile = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}/{type[set_type]['file']}.fits'''
data_path = f'''{main_dir}/{data_yml['data_path']}/{type[set_type]['dir']}'''
if not os.path.isdir(f'''{main_dir}/{data_yml['data_path']}'''):
os.mkdir(f'''{main_dir}/{data_yml['data_path']}''')
results_path = f'''{main_dir}/results'''
dev_small_cat = f'''{results_path}/{set_type}_small_cat.txt'''
final_cat = f'''{results_path}/final_catalogue_{set_type}.csv'''
###### Constants to use
f0 = 1420405751.786 #Hz
c = 299792.458 #m/s
# Functions
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
return which(name) is not None
def download_data(data_parameters, type = 'debug', force=False):
if force == True:
shutil.rmtree(data_path)
if not os.path.isdir(data_path):
os.mkdir(data_path)
else:
if os.path.isfile(fitsfile):
print(f'There is no need to download {fitsfile} as it already exists')
# This could be expanded to check for the readme and continuum
return
for filename in data_yml['download_locations'][type]['files']:
pathname = data_yml['download_locations'][type]['path']
command = f'wget --no-check-certificate "{pathname}download?path=%2F&files={filename}" -O {data_path}/{filename}'
print(command)
os.system(command)
def run_sofia(parameters, outputdir):
"""Only executed if the output catalog does not exist"""
#It makes sense to not run this when the results exist but maybe a check on an existing catalog is better
if not os.path.isfile(os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')):
if not os.path.isdir(os.path.join(results_path, outputdir)):
os.mkdir(os.path.join(results_path, outputdir))
# I guess the 2 is because of my dual installation of SoFiA versions we should implement a version check
if is_tool('sofia2'):
os.system(f"sofia2 {parameters}")
elif is_tool('sofia'):
os.system(f"sofia {parameters}")
else:
print('sofia not available. Please install Sofia-2')
sys.exit(1)
command = f'mv {parameters} {os.path.join(results_path, outputdir)}/sofia_input_parameters.par'
print(command)
os.system(command)
else:
print(f'''We have already found the catalogue {os.path.join(results_path, outputdir,f'{outputdir}_cat.txt')}, continuing to process.''' )
return
def read_sofia_header(filename):
with open(filename, 'r') as f:
head_line = f.readlines()[10]
head = re.split('\s+', head_line.strip('\n'))[1:] # 1: to remove #
return head
def sofia2cat(catalog):
head = read_sofia_header(catalog)
raw_cat = pd.read_csv(catalog, delim_whitespace=True, header=None, names=head, comment='#')
raw_cat.sort_values(by='f_sum', ascending=False, inplace=True)
raw_cat_filtered = raw_cat[raw_cat['kin_pa']>0]
print('Sofia raw catalog filtered:')
if 'freq' in raw_cat_filtered:
print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'freq', 'kin_pa', 'w20']])
elif 'v_app' in raw_cat_filtered:
print(raw_cat_filtered[['x', 'y', 'ell_maj', 'ell_min', 'f_sum', 'v_app', 'kin_pa', 'w20']])
return raw_cat_filtered
def pix2coord(wcs, x, y):
coord = wcs.pixel_to_world(x, y, 1)
#print('coord')
#print(coord)
return coord[0].ra.deg, coord[0].dec.deg
def compute_inclination(bmaj, bmin):
# returns an angle in degrees
return np.arctan2(bmin, bmaj)*180./np.pi
def convert_units(raw_cat, fitsfile):
f = fits.open(fitsfile)
wcs=WCS(f[0].header)
f.close()
# Convert x,y in pixels to R.A.,Dec. in deg
ra_deg, dec_deg = pix2coord(wcs, raw_cat['x'], raw_cat['y'])
# Get pixel size
pix2arcsec = wcs.wcs.get_cdelt()[1]*3600. # This assumes same pixel size in both direction
pix2freq = f[0].header['CDELT3']
return ra_deg, dec_deg, pix2arcsec,pix2freq
def frequency_to_vel(freq, invert=False):
if not invert:
return c*((f0**2-freq**2)/(f0**2+freq**2))
else:
return f0*np.sqrt((1-freq/c)/(1+freq/c))
def convert_flux(flux,filename):
#This assume that flux comes from SoFiA in Jy/beam and converts it to Jy * km/s base on the header
hdr = fits.getheader(filename)
print(hdr['BMAJ'],hdr['BMIN'])
beamarea=(np.pi*abs(hdr['BMAJ']*hdr['BMIN']))/(4.*np.log(2.))
pix_per_beam = beamarea/(abs(hdr['CDELT1'])*abs(hdr['CDELT2']))
#cdelt_vel = abs(-c*float(hdr['CDELT3'])/f0)
cdelt_hz = float(hdr['CDELT3'])
return flux/pix_per_beam*cdelt_hz #Jy * hz
# Convert the frequency axis of a cube
def convert_frequency_axis(filename, outname, velocity_req = 'radio'):
c_ms = c*1000.
print(filename)
cube = fits.open(filename)
hdr = cube[0].header
# Check we have a proper third axis
if hdr['CTYPE3'].lower() != 'freq' or hdr['NAXIS'] < 3:
print('We can not convert this axis as it is not a frequency axis')
return
# get central values
crpix = float(hdr['CRPIX3'])
crval = float(hdr['CRVAL3'])
naxis_len = float(hdr['NAXIS3'])
# make sure the central pixel is rather central else large errors are introduce in both vrad and rel
if naxis_len/2.-5 < crpix < naxis_len/2.+5:
hdr_wcs = WCS(hdr)
centralx,centraly, new_freq = hdr_wcs.pix2world([hdr['CRPIX1'],hdr['CRPIX2'],naxis_len/2.],1)
hdr['CRPIX3'] = new_pix
crval = new_freq
#Now convert
if velocity_req == 'radio':
# convert from frequency to radio velocity
cdelt_vel = -c_ms*float(hdr['CDELT3'])/f0
crval_vel = c_ms*(1-crval/f0)
# https://fits.gsfc.nasa.gov/standard40/fits_standard40aa-le.pdf
hdr['CTYPE3'] = 'VRAD'
elif velocity_req == 'relativistic':
# This should always only ever be used for cubes with small velocity range
crval_vel = frequency_to_vel(crval)
freq_step = float(hdr['CDELT3'])
central_two = frequency_to_vel(crval+freqstep)
lower_one = frequency_to_vel(crval-(naxis_len/2.)*freqstep)
lower_two = frequency_to_vel(crval-(naxis_len/2.+1)*freqstep)
upper_one = frequency_to_vel(crval+(naxis_len/2.-1.)*freqstep)
upper_two = frequency_to_vel(crval+(naxis_len/2.)*freqstep)
cdelt_vel = np.mean([central_two-crval_vel,lower_two-lower_one,upper_two-upper_one])*1000.
if cdelt_vel*naxis_len > 1e6:
print('This cube is too big for a relativistic conversion')
return
hdr['CTYPE3'] = 'VELO'
else:
print('We dont do those things here.')
return
hdr['CDELT3'] = cdelt_vel
hdr['CRVAL3'] = crval_vel
if 'CUNIT3' in hdr:
# delete cunit3 because we adopt the default units = m/s
del hdr['CUNIT3']
fits.writeto(outname,cube[0].data,hdr,overwrite = True)
def process_catalog(raw_cat, fitsfile):
# Unit conversion
ra_deg, dec_deg, pix2arcsec,pix2vel = convert_units(raw_cat, fitsfile)
hi_size = raw_cat['ell_maj']*pix2arcsec
# Estimate inclination based on fitted ellipsoid, assuming the galaxy is intrinsically circular
inclination = compute_inclination(raw_cat['ell_maj'], raw_cat['ell_min'])
# Construct the output catalog
processed_cat = pd.DataFrame()
processed_cat['id'] = raw_cat['id']
processed_cat['ra'] = ra_deg
processed_cat['dec'] = dec_deg
processed_cat['hi_size'] = hi_size
processed_cat['line_flux_integral'] = convert_flux(raw_cat['f_sum'],fitsfile) # Now converted to Jy*km/s verifcation for developments needed
if 'freq' in raw_cat:
processed_cat['central_freq'] = raw_cat['freq']
#processed_cat['central_velocity'] = frequency_to_vel(raw_cat['freq'])
processed_cat['w20'] = frequency_to_vel(raw_cat['freq']-raw_cat['w20']/2.*pix2vel)-frequency_to_vel(raw_cat['freq']+raw_cat['w20']/2.*pix2vel) # we need to clarify if the units and the definition is the same
else:
#processed_cat['central_velocity'] = raw_cat['v_app']
processed_cat['central_freq'] = frequency_to_vel(raw_cat['v_app'],invert=True)
processed_cat['w20'] = raw_cat['w20']*pix2vel
# we need to clarify if what sofia gives is the central freq
processed_cat['pa'] = raw_cat['kin_pa'] # we need to clarify if Sofia kinematic angle agrees with their P.A.
processed_cat['i'] = inclination
processed_cat.reset_index(drop=True, inplace=True)
# This is just to set the right order of the output columns
processed_cat = processed_cat[['id', 'ra', 'dec', 'hi_size', 'line_flux_integral', 'central_freq', 'pa', 'i', 'w20']]
return processed_cat
def prepare_parameters(parameters=param_development_small, type ='debug'):
parameters_in = read_sofia_parameters(param_development_small)
parameters_in['input.data'] = f'{fitsfile}'
parameters_in['output.directory'] = f'{results_path}/{type}'
parameters_in['output.filename'] = f'{type}'
if not os.path.isdir(results_path):
os.mkdir(results_path)
write_sofia_parameters(parameters_in, f'{results_path}/sofia_settings.par')
def write_sofia_parameters(template,name, debug = False):
with open(name,'w') as file:
for key in template:
if key[0] == 'E' or key [0] == 'H':
file.write(template[key])
else:
file.write(f"{key} = {template[key]}\n")
def read_sofia_parameters(filename,debug = False):
with open(filename,'r') as f:
template = f.readlines()
result = {}
counter = 0
counter2 = 0
# Separate the keyword names
for line in template:
key = str(line.split('=')[0].strip())
if key == '':
result[f'EMPTY{counter}'] = line
counter += 1
elif key[0] == '#':
result[f'HASH{counter2}'] = line
counter2 += 1
else:
result[key] = str(line.split('=')[1].strip())
return result
def organize_sofia(catalog,convert= True, type='debug'):
fat_catalog = {'id': ['number'], 'dist': ['Distance'], 'dir': ['Directoryname'], 'cube': ['Cubename']}
#sofia_output = ['spec.txt','chan.fits','mom0.fits','mom1.fits','mom2.fits','mask.fits','cube.fits']o
sofia_output = ['cube.fits']
for source in catalog['id']:
if not os.path.isdir(f'{main_dir}/interim//sofia_{source}'):
os.mkdir(f'{main_dir}/interim//sofia_{source}')
#Move all sofia out put to a proper directory
for file in sofia_output:
if convert:
convert_frequency_axis(f'{results_path}/{type}/{type}_cubelets/{type}_{source}_{file}',\
f'{main_dir}/interim/sofia_{source}/{type}_{source}_{file}')
else:
command= f'cp -f {results_path}/{type}/{type}_cubelets/{type}_{source}_{file} {main_dir}/interim/sofia_{source}/{type}_{source}_{file}'
os.system(command)
fat_catalog['id'].append(source)
fat_catalog['dist'].append('-1')
fat_catalog['dir'].append(f'sofia_{source}')
fat_catalog['cube'].append(f'{type}_{source}_cube')
with open(f'{main_dir}/interim/fit_catalogue.txt','w') as f:
for i in range(len(fat_catalog['id'])):
f.write(f'''{fat_catalog['id'][i]}|{fat_catalog['dist'][i]}|{fat_catalog['dir'][i]}|{fat_catalog['cube'][i]}\n''')
def fat_configuration(filename,type='debug'):
with open(filename,'r') as f:
template = f.readlines()
with open(f'{main_dir}/interim/FAT_INPUT.config','w') as f:
f.write(f'#This is the configuration file for fit {type} at {datetime.now()} \n')
with open(f'{main_dir}/interim/FAT_INPUT.config','a') as f:
for line in template:
setting = line.split('=')[0].strip()
if setting == 'catalogue':
line = f'catalogue = {main_dir}/interim/fit_catalogue.txt \n'
elif setting == 'maindir':
line = f'maindir = {main_dir}/interim/ \n'
elif setting == 'outputcatalogue':
line = f'outputcatalogue={main_dir}/interim/fat_results.txt \n'
elif setting == 'outputlog':
line = f'outputlog = log.txt \n'
f.write(line)
def main():
download_data(data_parameters, type= set_type, force=False)
prepare_parameters(parameters=param_development_small, type = set_type)
run_sofia(parameters=f'{results_path}/sofia_settings.par',
outputdir= set_type)
raw_cat = sofia2cat(catalog=os.path.join(results_path, set_type,f'{set_type}_cat.txt'))
processed_cat = process_catalog(raw_cat, fitsfile)
print(processed_cat)
print(f'This catalog is being saved in: {final_cat}')
processed_cat['central_freq'] = processed_cat['central_freq'].map('{:.1f}'.format)
processed_cat.to_csv(final_cat, sep=' ', index=False)
if run_fat:
if not os.path.isdir(f'{main_dir}/interim'):
os.mkdir(f'{main_dir}/interim')
convert = False
if 'freq' in raw_cat:
convert = True
organize_sofia(processed_cat,convert= convert, type=set_type)
fat_configuration('./parameters/FAT_INPUT.config',type=set_type)
command = f'pyFAT -t -c {main_dir}/interim/FAT_INPUT.config'
print(command)
os.system(command)
if __name__ == "__main__":
main()
| jmoldon/verification_sdc2 | scripts/analysis.py | analysis.py | py | 14,308 | python | en | code | 0 | github-code | 90 |
72836187818 | import numpy as np
import os
import time
import h5py
import random
import matplotlib.pyplot as plt
import collections
import utils
def data_gen(config):
hdf5_file = h5py.File(config.val_file, mode='r')
audios = hdf5_file["waveform"]
if config.model=="spec":
act = hdf5_file["new_act"]
else:
act = hdf5_file["act"]
features = hdf5_file["features"]
max_feats = [ 1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 69.57681 , 67.66642 , 80.19115 ,
71.689445 , 61.422714 , 100. , 71.406494 , 32.789112 ,
1. , 85.24432 , 67.71172 , 2.491137 , 0.5797179,
87.83693 , 69.738235 , 71.95989 , 82.336105 , 75.53646 ,
71.00043 , 100. , 81.7323 ]
in_indecis = np.arange(len(features))
act_sum = act[()].sum(axis=1).sum(axis=1)
remove_indecis = np.argwhere(act_sum==0)
in_indecis = np.array([x for x in np.arange(len(features)) if x not in remove_indecis])
for i, idx_batch in enumerate(np.arange(int(len(in_indecis)/config.batch_size))):
i_start = i * config.batch_size
i_end = min([(i + 1) * config.batch_size, len(in_indecis)])
indecis = in_indecis[i_start:i_end]
indecis = [x for x in indecis]
out_audios = audios[indecis]
out_act = act[indecis]
out_act+=1e-15
out_act = out_act/out_act.max(axis=-2)[:,np.newaxis,:]
out_features = features[indecis]/max_feats
out_features = np.concatenate((out_features[:,:19], out_features[:,21:]), axis = 1)
yield np.expand_dims(out_audios, -1), out_act, out_features, int(len(in_indecis)/config.batch_size)
| aframires/drum-loop-synthesis | data_pipeline.py | data_pipeline.py | py | 1,804 | python | en | code | 20 | github-code | 90 |
18655445578 | #! /usr/bin/env python3
import os
from enum import Enum, auto
import numpy as np
import hebi
import rospy
from rospy.timer import TimerEvent
from nav_msgs.msg import Odometry
from microstrain_inertial_msgs.msg import FilterHeading
from geometry_msgs.msg import Twist, PoseStamped
from std_srvs.srv import Trigger, TriggerRequest, SetBool
from std_msgs.msg import Float64, ColorRGBA, Bool
from gps_navigation.msg import Sample
from gps_navigation.srv import SetString, GetSamples, SaveSample, SaveNIRSample, GetNIRSample
from neospectra.msg import NirReading
AUGER_HOME_Z = 0.2
def set_imu_zupts():
pass
def build_sample(location, sample_type, value=np.nan):
sample = Sample()
sample.header.stamp = rospy.Time.now()
sample.location = location
sample.sample_type = sample_type
sample.scalar_value = value
return sample
class RoutineStates(Enum):
STARTUP = auto()
INITIALIZE = auto()
INACTIVE = auto()
DRILLING = auto()
RESET_DRILL = auto()
STOP_DRILL = auto()
CLEAN_DRILL = auto()
POSITION_SENSOR = auto()
DEPLOYING = auto()
COLLECTING = auto()
STOWING = auto()
class SampleTypes(Enum):
DIRT = auto()
VWC = auto()
NIR = auto()
class RoutineManager:
def __init__(self):
self.state = RoutineStates.STARTUP
self.augur_depth = np.nan
self.augur_pos_err = 0.0
self.auger_base_torque = 0.0
self.depth = Float64(0.0)
self.current_loc = PoseStamped()
self.raw_heading = np.nan
self.heading_offset = 0.0
self.target_heading = np.nan
self.heading_integral = 0.0
self.vel_cmd = Twist()
self.start_drill = False
self.cancel = False
self.start_nir = False
self.nir_spectrum = None
self.start_vwc = False
self.vwc_readings = []
self.keyframes = None
@property
def curr_heading(self):
return self.raw_heading + self.heading_offset
def update(self):
t = rospy.get_time()
if self.state == RoutineStates.STARTUP:
try:
set_imu_zupts()
#start_nir_background()
self.transition_to(RoutineStates.INITIALIZE)
except rospy.service.ServiceException:
print('waiting for NIR background service to become available...')
pass
elif self.state == RoutineStates.INITIALIZE:
if self.state_end_time < t:
self.transition_to(RoutineStates.INACTIVE)
if self.state == RoutineStates.INACTIVE:
if self.start_drill:
self.start_drill = False
self.transition_to(RoutineStates.DRILLING)
if self.start_vwc:
self.start_vwc = False
self.transition_to(RoutineStates.COLLECTING, SampleTypes.VWC)
if self.start_nir:
self.start_nir = False
self.transition_to(RoutineStates.COLLECTING, SampleTypes.NIR)
elif self.state == RoutineStates.DRILLING:
if self.cancel:
self.cancel = False
self.transition_to(RoutineStates.STOP_DRILL)
elif self.auger_pos_err < -0.4 or self.auger_base_torque < -4.0:
self.transition_to(RoutineStates.RESET_DRILL)
elif t < self.keyframes['t'][-1]:
self.depth.data = np.interp(t, self.keyframes['t'], self.keyframes['depth'])
#depth_pub.publish(self.depth)
drill_speed = np.interp(t, self.drill_keyframes['t'], self.drill_keyframes['speed'])
drill_pub.publish(Float64(drill_speed))
else:
self.transition_to(RoutineStates.CLEAN_DRILL)
elif self.state == RoutineStates.STOP_DRILL:
if t < self.keyframes['t'][-1]:
self.depth.data = np.interp(t, self.keyframes['t'], self.keyframes['depth'])
depth_pub.publish(self.depth)
drill_speed = np.interp(t, self.drill_keyframes['t'], self.drill_keyframes['speed'])
drill_pub.publish(Float64(drill_speed))
else:
self.transition_to(RoutineStates.INACTIVE)
elif self.state == RoutineStates.RESET_DRILL:
if t < self.keyframes['t'][-1]:
self.depth.data = np.interp(t, self.keyframes['t'], self.keyframes['depth'])
depth_pub.publish(self.depth)
drill_speed = np.interp(t, self.drill_keyframes['t'], self.drill_keyframes['speed'])
drill_pub.publish(Float64(drill_speed))
else:
self.transition_to(RoutineStates.DRILLING)
elif self.state == RoutineStates.CLEAN_DRILL:
if self.state_end_time < t:
self.transition_to(RoutineStates.POSITION_SENSOR)
elif self.state == RoutineStates.POSITION_SENSOR:
#err = self.curr_heading - self.target_heading
#self.heading_integral += err
#print(f'kp* err: {np.round(0.75 * err, 2)}, ki * integral: {np.round(0.001 * self.heading_integral, 2)}')
#self.vel_cmd.angular.z = 0.75 * err + 0.001 * self.heading_integral
self.vel_cmd.linear.x = 0.5
twist_pub.publish(self.vel_cmd)
if self.state_end_time < t:
self.vel_cmd.linear.x = 0.0
twist_pub.publish(self.vel_cmd)
self.transition_to(RoutineStates.DEPLOYING)
elif self.state == RoutineStates.DEPLOYING:
self.transition_to(RoutineStates.INACTIVE)
elif self.state == RoutineStates.COLLECTING:
if self.sample_type == SampleTypes.DIRT:
sample = build_sample(self.current_loc.pose.position, 'dirt')
save_sample(sample)
self.transition_to(RoutineStates.INACTIVE)
elif self.sample_type == SampleTypes.VWC:
if self.state_end_time < t:
avg = np.mean(self.vwc_readings)
sample = build_sample(self.current_loc.pose.position, 'vwc', avg)
save_sample(sample)
self.transition_to(RoutineStates.INACTIVE)
elif self.sample_type == SampleTypes.NIR:
if self.state_end_time < t and self.nir_spectrum is not None:
sample = build_sample(self.current_loc.pose.position, 'nir')
save_nir_sample(sample, self.nir_spectrum)
self.transition_to(RoutineStates.INACTIVE)
def transition_to(self, new_state, *args):
if new_state == self.state:
return
# if we are coming from inactive, set robot to blue
if self.state == RoutineStates.INACTIVE:
color_robot_blue()
if new_state == RoutineStates.INITIALIZE:
color_robot_blue()
self.state_end_time = rospy.get_time() + 15
# if we are going inactive, set robot to not blue
elif new_state == RoutineStates.INACTIVE:
clear_robot_color()
elif new_state == RoutineStates.DRILLING:
self.keyframes = self.build_drill_trajectory()
self.drill_keyframes = self.build_speed_trajectory(0.0, 2.5)
elif new_state == RoutineStates.STOP_DRILL:
self.keyframes = self.build_reverse_trajectory()
self.drill_keyframes = self.build_speed_trajectory(2.5, -2.5)
elif new_state == RoutineStates.RESET_DRILL:
self.keyframes = self.build_reverse_trajectory()
self.drill_keyframes = self.build_speed_trajectory(2.5, -2.5)
elif new_state == RoutineStates.CLEAN_DRILL:
clean_drill()
self.state_end_time = rospy.get_time() + 9
elif new_state == RoutineStates.POSITION_SENSOR:
drill_pub.publish(Float64(0.0))
camera_goto('rear')
self.state_end_time = rospy.get_time() + 3.2
#self.heading_integral = 0.0
# pick which way we turn to minimize heading windup
#turn_angle = -np.pi
#if self.curr_heading < 0:
# turn_angle *= -1
#self.target_heading = self.curr_heading + turn_angle
elif new_state == RoutineStates.DEPLOYING:
self.vel_cmd.angular.z = 0.0
twist_pub.publish(self.vel_cmd)
deploy_scoop(True)
elif new_state == RoutineStates.COLLECTING:
print('transition to collecting')
self.sample_type = args[0]
self.state_end_time = rospy.get_time()
if self.sample_type == SampleTypes.DIRT:
pass
elif self.sample_type == SampleTypes.VWC:
self.state_end_time += 2
self.vwc_readings = []
elif self.sample_type == SampleTypes.NIR:
self.state_end_time += 12
self.nir_spectrum = None
start_nir_measurement()
self.state = new_state
def build_drill_trajectory(self):
start_time = rospy.get_time()
# 0 to -.35
# ground is at -0.05
# -0.18, -0.18, 0.05, -0.18, -0.3, -0.3, 0.05
# -0.09, -0.09, 0.05, -0.09, -0.15, -0.09, 0.05
# -0.14, -0.14, 0.0, -0.14, -0.2, -0.2, 0.0
#depths = [self.augur_depth, 0.0, -0.22, -0.22, 0.0, -0.22, -0.35, -0.35, 0.0]
depths = [self.augur_depth, AUGER_HOME_Z, -0.18, -0.18, AUGER_HOME_Z, -0.18, -0.25, -0.25, AUGER_HOME_Z]
times = np.cumsum([start_time, 2.0, 3.0, 3.0, 2.0, 2.0, 6.0, 5.0, 5.0])
#times = np.cumsum([start_time, 0.2, .50, .20, .20, .20, .100, .4, .50])
return {'t': times, 'depth': depths}
def build_reverse_trajectory(self):
start_time = rospy.get_time()
depths = [self.augur_depth, AUGER_HOME_Z]
times = [start_time,start_time+7.0]
return {'t': times, 'depth': depths}
def build_speed_trajectory(self, v_from, v_to):
start_time = rospy.get_time()
speeds = [v_from, v_to]
times = [start_time,start_time+2.0]
return {'t': times, 'speed': speeds}
if __name__ == '__main__':
manager = RoutineManager()
rospy.init_node('routine_manager')
color_pub = rospy.Publisher('/robot_color', ColorRGBA, queue_size=5)
def clear_robot_color():
c = ColorRGBA()
c.r = 0.0
c.g = 0.0
c.b = 0.0
c.a = 0.0
color_pub.publish(c)
def color_robot_blue():
c = ColorRGBA()
c.r = 0.0
c.g = 0.0
c.b = 1.0
c.a = 1.0
color_pub.publish(c)
drill_pub = rospy.Publisher('/auger_velocity', Float64, queue_size=5)
depth_pub = rospy.Publisher('/auger_z_offset', Float64, queue_size=5)
twist_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)
save_sample = rospy.ServiceProxy('/sample_store/save_sample', SaveSample)
save_nir_sample = rospy.ServiceProxy('/sample_store/save_nir_sample', SaveNIRSample)
clean_drill = rospy.ServiceProxy('/auger_ctrl/clean', Trigger)
def dig_cb(msg):
if manager.state == RoutineStates.INACTIVE:
manager.augur_depth = msg.data
depth_pub.publish(msg)
def drill_cb(msg):
if manager.state == RoutineStates.INACTIVE:
drill_pub.publish(msg)
def twist_cb(msg):
if manager.state == RoutineStates.INACTIVE:
twist_pub.publish(msg)
def err_cb(msg):
manager.auger_pos_err = msg.data
def torque_cb(msg):
manager.auger_base_torque = msg.data
rospy.Subscriber('~auger_velocity', Float64, drill_cb)
rospy.Subscriber('~auger_z_offset', Float64, dig_cb)
rospy.Subscriber('/auger_ctrl/position_error', Float64, err_cb)
rospy.Subscriber('/auger_ctrl/base_torque', Float64, torque_cb)
rospy.Subscriber('~cmd_vel', Twist, twist_cb)
def odom_cb(msg: Odometry):
manager.current_loc.header = msg.header
manager.current_loc.pose = msg.pose.pose
rospy.Subscriber('/nav/odom', Odometry, odom_cb)
def heading_cb(msg: FilterHeading):
prev_heading = manager.raw_heading
manager.raw_heading = msg.heading_rad
# probably discontinuity
if abs(manager.raw_heading - prev_heading) > 1:
# goal is to keep the signal smooth for pid/etc.
# 358 359 | 0 1
# clockwise
if manager.raw_heading < prev_heading:
manager.heading_offset += 2*np.pi
# counterclockwise
else:
manager.heading_offset -= 2*np.pi
rospy.Subscriber('/nav/heading', FilterHeading, heading_cb)
def vwc_cb(msg):
manager.vwc_readings.append(msg.data)
rospy.Subscriber('/moisture_sensor', Float64, vwc_cb)
def nir_cb(msg):
manager.nir_spectrum = msg
rospy.Subscriber('/neospectra/measurement', NirReading, nir_cb)
start_nir_measurement = rospy.ServiceProxy('/neospectra/take_reading', Trigger)
start_nir_background = rospy.ServiceProxy('/neospectra/take_background', Trigger)
deploy_scoop = rospy.ServiceProxy('/deploy_sample_arm', SetBool)
camera_goto = rospy.ServiceProxy('/pan_tilt_ctrl/goto_pose', SetString)
# take over drill control
def drill_srv(req: TriggerRequest):
if manager.state == RoutineStates.INACTIVE:
manager.start_drill = True
return []
def cancel_srv(req: TriggerRequest):
if manager.state == RoutineStates.DRILLING:
manager.cancel = True
return []
rospy.Service('~drill', Trigger, drill_srv)
rospy.Service('~cancel', Trigger, cancel_srv)
def sample_nir_cb(req: TriggerRequest):
if manager.state == RoutineStates.INACTIVE:
manager.start_nir = True
return []
rospy.Service('~sample_nir', Trigger, sample_nir_cb)
def sample_vwc_cb(req: TriggerRequest):
if manager.state == RoutineStates.INACTIVE:
manager.start_vwc = True
return []
rospy.Service('~sample_vwc', Trigger, sample_vwc_cb)
status_pub = rospy.Publisher('~status', Bool, queue_size=5)
def publish_status(evt: TimerEvent):
is_active = manager.state != RoutineStates.INACTIVE
status_pub.publish(Bool(is_active))
rospy.Timer(rospy.Duration.from_sec(0.2), publish_status)
while not rospy.is_shutdown():
manager.update()
rospy.sleep(rospy.Duration(0.01))
| HebiRobotics/environmental_robots | gps_navigation/scripts/routine_manager.py | routine_manager.py | py | 14,681 | python | en | code | 0 | github-code | 90 |
29881391230 | import sys
import heapq
from collections import defaultdict
def huffman_encoding(data):
if not data:
return "", {}
frequency = defaultdict(int)
for symbol in data:
frequency[symbol] += 1
if len(frequency) == 1:
tree = {symbol: '0' for symbol in frequency}
encoded_data = "".join(tree[symbol] for symbol in data)
return encoded_data, tree
heap = [[weight, [symbol, ""]] for symbol, weight in frequency.items()]
heapq.heapify(heap)
while len(heap) > 1:
lo = heapq.heappop(heap)
hi = heapq.heappop(heap)
for pair in lo[1:]:
pair[1] = '0' + pair[1]
for pair in hi[1:]:
pair[1] = '1' + pair[1]
heapq.heappush(heap, [lo[0] + hi[0]] + lo[1:] + hi[1:])
huff_tree = heap[0]
tree = {symbol: code for symbol, code in huff_tree[1:]}
encoded_data = "".join(tree[symbol] for symbol in data)
return encoded_data, tree
def huffman_decoding(encoded_data, tree):
if not encoded_data or not tree:
return ""
decoded_data = []
code = ""
inv_tree = {v: k for k, v in tree.items()}
for bit in encoded_data:
code += bit
if code in inv_tree:
decoded_data.append(inv_tree[code])
code = ""
return "".join(decoded_data)
if __name__ == "__main__":
codes = {}
a_great_sentence = "The bird is the word"
print ("The size of the data is: {}\n".format(sys.getsizeof(a_great_sentence)))
print ("The content of the data is: {}\n".format(a_great_sentence))
encoded_data, tree = huffman_encoding(a_great_sentence)
print ("The size of the encoded data is: {}\n".format(sys.getsizeof(int(encoded_data, base=2))))
print ("The content of the encoded data is: {}\n".format(encoded_data))
decoded_data = huffman_decoding(encoded_data, tree)
print ("The size of the decoded data is: {}\n".format(sys.getsizeof(decoded_data)))
print ("The content of the encoded data is: {}\n".format(decoded_data))
# Add your own test cases: include at least three test cases
# and two of them must include edge cases, such as null, empty or very large values
# Test Case 1
# Nullable Test Case
data1 = ""
encoded_data1, tree1 = huffman_encoding(data1)
decoded_data1 = huffman_decoding(encoded_data1, tree1)
assert data1 == decoded_data1, f"Excpected: {data1}, Received: {decoded_data1}"
# Test Case 2
# Normal Test
data2 = "Huffman encoding and decoding example"
encoded_data2, tree2 = huffman_encoding(data2)
decoded_data2 = huffman_decoding(encoded_data2, tree2)
assert data2 == decoded_data2, f"Esperado: {data2}, Obtido: {decoded_data2}"
# Test case 3
# Long String
data3 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
encoded_data3, tree3 = huffman_encoding(data3)
decoded_data3 = huffman_decoding(encoded_data3, tree3)
assert data3 == decoded_data3, f"Esperado: {data3}, Obtido: {decoded_data3}"
# Test Case 4
# One String Test
data4 = "a"
encoded_data4, tree4 = huffman_encoding(data4)
decoded_data4 = huffman_decoding(encoded_data4, tree4)
assert data4 == decoded_data4, f"Esperado: {data4}, Obtido: {decoded_data4}"
# Test Case 4
# Same letter String Test
data5 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
encoded_data5, tree5 = huffman_encoding(data5)
decoded_data5 = huffman_decoding(encoded_data5, tree5)
assert data5 == decoded_data5, f"Esperado: {data5}, Obtido: {decoded_data5}"
| lleonardogr/EstruturasDeDadosEAlgoritimos | DataStructures/Lesson3.py | Lesson3.py | py | 3,856 | python | en | code | 0 | github-code | 90 |
22761962069 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import os
from dragon.vm import torch
from seetadet.core.config import cfg
from seetadet.core.engine.build import build_lr_scheduler
from seetadet.core.engine.build import build_optimizer
from seetadet.core.engine.build import build_tensorboard
from seetadet.core.engine.utils import count_params
from seetadet.core.engine.utils import get_device
from seetadet.core.engine.utils import get_param_groups
from seetadet.core.engine.utils import load_weights
from seetadet.data.build import build_loader_train
from seetadet.models.build import build_detector
from seetadet.utils import logging
from seetadet.utils import profiler
class Trainer(object):
"""Schedule the iterative model training."""
def __init__(self, coordinator, start_iter=0):
# Build loader.
self.loader = build_loader_train()
# Build model.
self.model = build_detector(training=True)
load_weights(self.model, cfg.TRAIN.WEIGHTS, strict=start_iter > 0)
self.model.to(device=get_device(cfg.GPU_ID))
if cfg.MODEL.PRECISION.lower() == 'float16':
self.model.half()
# Build optimizer.
self.loss_scale = cfg.SOLVER.LOSS_SCALE
param_groups_getter = get_param_groups
if cfg.SOLVER.LAYER_LR_DECAY < 1.0:
lr_scale_getter = functools.partial(
self.model.backbone.get_lr_scale,
decay=cfg.SOLVER.LAYER_LR_DECAY)
param_groups_getter = functools.partial(
param_groups_getter, lr_scale_getter=lr_scale_getter)
self.optimizer = build_optimizer(param_groups_getter(self.model))
self.scheduler = build_lr_scheduler()
# Build monitor.
self.coordinator = coordinator
self.metrics = collections.OrderedDict()
self.board = None
@property
def iter(self):
return self.scheduler._step_count
def snapshot(self):
"""Save the checkpoint of current iterative step."""
f = cfg.SOLVER.SNAPSHOT_PREFIX
f += '_iter_{}.pkl'.format(self.iter)
f = os.path.join(self.coordinator.path_at('checkpoints'), f)
if logging.is_root() and not os.path.exists(f):
torch.save(self.model.state_dict(), f, pickle_protocol=4)
logging.info('Wrote snapshot to: {:s}'.format(f))
def add_metrics(self, stats):
"""Add or update the metrics."""
for k, v in stats['metrics'].items():
if k not in self.metrics:
self.metrics[k] = profiler.SmoothedValue()
self.metrics[k].update(v)
def display_metrics(self, stats):
"""Send metrics to the monitor."""
logging.info('Iteration %d, lr = %.8f, time = %.2fs'
% (stats['iter'], stats['lr'], stats['time']))
for k, v in self.metrics.items():
logging.info(' ' * 4 + 'Train net output({}): {:.4f} ({:.4f})'
.format(k, stats['metrics'][k], v.average()))
if self.board is not None:
self.board.scalar_summary('lr', stats['lr'], stats['iter'])
self.board.scalar_summary('time', stats['time'], stats['iter'])
for k, v in self.metrics.items():
self.board.scalar_summary(k, v.average(), stats['iter'])
def step(self):
stats = {'iter': self.iter}
metrics = collections.defaultdict(float)
# Run forward.
timer = profiler.Timer().tic()
inputs = self.loader()
outputs, losses = self.model(inputs), []
for k, v in outputs.items():
if 'loss' in k:
if isinstance(v, (tuple, list)):
losses.append(sum(v[1:], v[0]))
metrics.update(dict(('stage%d_' % (i + 1) + k, float(x))
for i, x in enumerate(v)))
else:
losses.append(v)
metrics[k] += float(v)
# Run backward.
losses = sum(losses[1:], losses[0])
if self.loss_scale != 1.0:
losses *= self.loss_scale
losses.backward()
# Apply update.
stats['lr'] = self.scheduler.get_lr()
for group in self.optimizer.param_groups:
group['lr'] = stats['lr'] * group.get('lr_scale', 1.0)
self.optimizer.step()
self.scheduler.step()
stats['time'] = timer.toc()
stats['metrics'] = collections.OrderedDict(sorted(metrics.items()))
return stats
def train_model(self, start_iter=0):
"""Network training loop."""
timer = profiler.Timer()
max_steps = cfg.SOLVER.MAX_STEPS
display_every = cfg.SOLVER.DISPLAY
progress_every = 10 * display_every
snapshot_every = cfg.SOLVER.SNAPSHOT_EVERY
self.scheduler._step_count = start_iter
while self.iter < max_steps:
with timer.tic_and_toc():
stats = self.step()
self.add_metrics(stats)
if stats['iter'] % display_every == 0:
self.display_metrics(stats)
if self.iter % progress_every == 0:
logging.info(profiler.get_progress(timer, self.iter, max_steps))
if self.iter % snapshot_every == 0:
self.snapshot()
self.metrics.clear()
def run_train(coordinator, start_iter=0, enable_tensorboard=False):
"""Start a network training task."""
trainer = Trainer(coordinator, start_iter=start_iter)
if enable_tensorboard and logging.is_root():
trainer.board = build_tensorboard(coordinator.path_at('logs'))
logging.info('#Params: %.2fM' % count_params(trainer.model))
logging.info('Start training...')
trainer.train_model(start_iter)
trainer.snapshot()
| seetaresearch/seetadet | seetadet/core/engine/train_engine.py | train_engine.py | py | 5,896 | python | en | code | 1 | github-code | 90 |
17993248429 | HW = input().split()
H = int(HW[0])
W = int(HW[1])
lst = []
for i in range(H):
lst.append('#' + input() + '#')
print('#' * (W+2))
for s in lst:
print(s)
print('#' * (W+2)) | Aasthaengg/IBMdataset | Python_codes/p03712/s162628563.py | s162628563.py | py | 183 | python | en | code | 0 | github-code | 90 |
41948841519 | from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_newsource,get_articles,search_article
from ..models import Source,Article
# Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
#Getting news sources
general_newsource = get_newsource('general')
technology_newsource = get_newsource('technology')
entertainment_newsource = get_newsource('entertainment')
sports_newsource = get_newsource('sports')
business_newsource = get_newsource('business')
science_newsource = get_newsource('science')
return render_template('index.html',general = general_newsource, technology = technology_newsource, entertainment = entertainment_newsource, sports = sports_newsource, business= business_newsource, science = science_newsource)
@main.route('/articles/<source_id>')
def articles(source_id):
'''
View article page function returns the articles based on a new source
'''
#Getting articles
article = get_articles(source_id)
title = f'{source_id}'
search_article = request.args.get('article_query')
if search_article:
return redirect(url_for('.search',article_name=search_article))
else:
return render_template('article.html', title=title,articles=article)
@main.route('/search/<article_name>')
def search(article_name):
'''
View function to display search results
'''
article_name_list = article_name.split(" ")
article_name_format = "+".join(article_name_list)
searched_articles = search_article(article_name_format)
title = f'search results for {article_name}'
return render_template('search.html',title = title,articles=searched_articles) | Pixel-0/arg-news | app/main/views.py | views.py | py | 1,781 | python | en | code | 1 | github-code | 90 |
41771624328 | #!/usr/bin/python3
from json import loads, load, dumps
from sys import argv, exit
def error_handler(exit_on_error=True):
def decorator(func):
def main(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
print(f"Error : {e.args[1] if len(e.args)>1 else e}")
if exit_on_error:
exit("Quitting")
return main
return decorator
@error_handler()
def get_raw_data(resp: dict = {}) -> dict:
with open(args.file, encoding="utf-8") as fh:
data = fh.read()
sorted = f'{{"cookies":{data} }}'
for entry in loads(sorted).get("cookies"):
resp.update({entry["name"]: entry["value"]})
return resp
@error_handler()
def get_key(key: str) -> str:
predefined = {
"bard": "__Secure-1PSID",
"bing": "_U",
}
if args.pre:
with open(args.pre, encoding="utf-8") as fh:
predefined.update(load(fh))
return predefined.get(key, key)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Hunt cookies from `Json-to-Cookies` extension .json files",
exit_on_error=True,
)
parser.add_argument("file", help="Path to .json file", metavar="FILE-PATH")
parser.add_argument(
"-i",
"--indent",
help="Indent level while dumping json data - %(default)s",
metavar="n",
type=int,
default=7,
)
parser.add_argument(
"-g", "--get", help="Stdout the specific cookie value of the key", metavar="KEY"
)
parser.add_argument(
"-p", "--pre", help="Path to .json file containing key-mappings", metavar="PATH"
)
parser.add_argument(
"--zero-mapping",
action="store_true",
help="Disable key mappings - %(default)s",
)
args = parser.parse_args()
hunted = get_raw_data()
if args.get:
print(hunted.get(args.get if args.zero_mapping else get_key(args.get)))
else:
print(dumps(hunted, indent=args.indent))
# Dated Mon, 29 - May - 2023 1809hrs
| Simatwa/cookie-hunter | main.py | main.py | py | 2,139 | python | en | code | 0 | github-code | 90 |
14012053658 | from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
import torch.distributed as dist
from torch.distributed import ProcessGroup
from elixir.cuda import gpu_device
from elixir.parameter import FakeTensor
from .memory_pool import MemoryPool, PrivateBlock, PublicBlock, TensorBlock
from .states import TensorState, ts_update_sanity_check
class ChunkFullError(Exception):
pass
@dataclass
class TensorInfo:
state: TensorState
fake_data: FakeTensor
offset: int
end: int
class Chunk:
"""Chunk is a type of data structure to store tensors.
It allows us to store a sequence of tensors into one continuous memory block.
Moreover, Chunk manages the storage of tensors in a distributed way.
Normally, a chunk is scattered across its process group.
When a tensor in this chunk should be used later, the chunk can be gathered by access_chunk.
When the training is done, the chunk can be scattered by reduce_chunk.
args:
rcache: the memory pool to store replicated chunks
chunk_size: the size of the chunk
chunk_dtype: the dtype of the chunk
process_group: the torch communication group of the chunk
temp_device: the device to store the temporary chunk when initializing
shard_device: the device to store the shard of the scattered chunk
rcache_fused: whether this chunk is fused in rcache without eviction
cpu_pin_memory: whether this chunk use cpu pin memory for its shard
"""
total_count = 0
def __init__(
self,
rcache: MemoryPool,
chunk_size: int,
chunk_dtype: torch.dtype,
process_group: ProcessGroup,
temp_device: Optional[torch.device] = None,
shard_device: Optional[torch.device] = None,
rcache_fused: bool = False, # whether this chunk is used in ZeRO2
cpu_pin_memory: bool = False # whether this chunk has a permanent copy in cpu
) -> None:
self.chunk_id: int = Chunk.total_count
Chunk.total_count += 1
# set replicated cache pool
self.rcache: MemoryPool = rcache
self.chunk_size: int = chunk_size
self.chunk_dtype: torch.dtype = chunk_dtype
self.utilized_size: int = 0
self.torch_pg: ProcessGroup = process_group
self.pg_size: int = dist.get_world_size(self.torch_pg)
self.pg_rank: int = dist.get_rank(self.torch_pg)
# the chunk size should be divisible by the dp degree
assert chunk_size % self.pg_size == 0
self.shard_size: int = chunk_size // self.pg_size
self.shard_begin: int = self.shard_size * self.pg_rank
self.shard_end: int = self.shard_begin + self.shard_size
self.valid_end: int = self.shard_size + 1 # set to an illegal number
# notice: release blocks reserved by Pytorch
torch.cuda.empty_cache()
# rcache block, the global replicated chunk in R cache
self.rcb: Optional[TensorBlock] = None
self.rcache_fused: bool = rcache_fused
self._my_block = None
self.is_replica: bool = True
# allocate a private block for fused chunks
if self.rcache_fused:
self._my_block = rcache.get_private_block(chunk_size, chunk_dtype)
temp_device: torch.device = temp_device or gpu_device()
# chunk_temp is a global chunk, which only exists during building the chunks.
# keep all elements to zero
self.chunk_temp: Optional[torch.Tensor] = None
if rcache_fused:
self.chunk_temp = self._my_block.payload
torch.zero_(self.chunk_temp)
else:
self.chunk_temp = torch.zeros(chunk_size, dtype=chunk_dtype, device=temp_device)
# configure the init device of the shard
# no-offload default: fp16, fp32 -> CUDA
# offload default: fp16, fp32 -> CPU
shard_device: torch.device = shard_device or torch.device('cpu')
pin_flag: bool = cpu_pin_memory and shard_device.type == 'cpu'
# chunk.shard is a local chunk
# it is desinged to exist permanently
self.shard: torch.Tensor = torch.empty(self.shard_size,
dtype=chunk_dtype,
device=shard_device,
pin_memory=pin_flag)
# calculate the memory occupation of the chunk and the shard
self.chunk_memo: int = self.chunk_size * self.chunk_temp.element_size()
self.shard_memo: int = self.chunk_memo // self.pg_size
# each tensor is associated with a TensorInfo to track its meta info
# (state, shape, offset, end)
self.tensors_info: Dict[torch.Tensor, TensorInfo] = {}
# the total number of tensors in the chunk
self.num_tensors: int = 0
# Record the number of tensors in different states
self.tensor_state_cnter: Dict[TensorState, int] = dict()
for state in TensorState:
self.tensor_state_cnter[state] = 0
# we introduce the paired chunk here
# it refers to another chunk having the same parameters
# but with different dtype(such as fp16_chunk.paired_chunk -> fp32_chunk
self.paired_chunk = None
# if this chunk is synchronized with the optimizer, the flag is True
self.optim_sync_flag = True
# whether to record l2 norm for the gradient clipping calculation
self.l2_norm_flag = False
self.l2_norm = None
# whether it overflows after the reduction
self.overflow = False
@property
def prepared_block(self):
return self._my_block
@property
def is_init(self):
return self.chunk_temp is not None
@property
def in_rcache(self):
return self.rcb is not None
@property
def shard_device(self):
return self.shard.device
@property
def memory_usage(self) -> Dict[str, int]:
cuda_memory = 0
cpu_memory = 0
# this chunk is not closed
if self.is_init:
if self.chunk_temp.device.type == 'cuda':
cuda_memory += self.chunk_memo
else:
cpu_memory += self.chunk_memo
# this chunk is on the rcache
if self.in_rcache:
cuda_memory += self.rcb.memo_occ
# calculate the occupation of the chunk shard
if self.shard_device.type == 'cuda':
cuda_memory += self.shard_memo
elif self.shard_device.type == 'cpu':
cpu_memory += self.shard_memo
else:
raise NotImplementedError
return dict(cuda=cuda_memory, cpu=cpu_memory)
@property
def payload(self) -> torch.Tensor:
if self.is_init:
return self.chunk_temp
if self.in_rcache:
return self.rcb.payload
else:
return self.shard
@property
def shard_move_check(self) -> bool:
return not self.in_rcache
def _not_compute_number(self):
total = 0
state_list = [TensorState.HOLD, TensorState.HOLD_AFTER_BWD, TensorState.READY_FOR_REDUCE]
for state in state_list:
total += self.tensor_state_cnter[state]
return total
@property
def scatter_check(self) -> bool:
if self.rcache_fused:
return False
return self._not_compute_number() == self.num_tensors
@property
def reduce_check(self):
return self.tensor_state_cnter[TensorState.READY_FOR_REDUCE] == self.num_tensors
def set_overflow_flag(self, valid_tensor: torch.Tensor) -> None:
assert not self.overflow
self.overflow = torch.isinf(valid_tensor).any().item() | torch.isnan(valid_tensor).any().item()
def set_l2_norm(self, valid_tensor: torch.Tensor) -> None:
assert self.l2_norm is None, 'you are calculating the l2 norm twice'
chunk_l2_norm = valid_tensor.data.float().norm(2)
self.l2_norm = chunk_l2_norm.item()**2
def append_tensor(self, tensor: torch.Tensor):
# sanity check
assert self.is_init
assert tensor.dtype == self.chunk_dtype
new_utilized_size = self.utilized_size + tensor.numel()
# raise exception when the chunk size is exceeded
if new_utilized_size > self.chunk_size:
raise ChunkFullError
self.chunk_temp[self.utilized_size:new_utilized_size].copy_(tensor.data.flatten())
tensor.data = self.chunk_temp[self.utilized_size:new_utilized_size].view(tensor.shape)
fake_data = FakeTensor(tensor.data)
# record all the information about the tensor
self.num_tensors += 1
tensor_state = TensorState.HOLD
self.tensor_state_cnter[tensor_state] += 1
self.tensors_info[tensor] = TensorInfo(state=tensor_state,
fake_data=fake_data,
offset=self.utilized_size,
end=new_utilized_size)
self.utilized_size = new_utilized_size
def close_chunk(self):
# sanity check
assert self.is_init
# calculate the valid end for each shard
if self.utilized_size <= self.shard_begin:
self.valid_end = 0
elif self.utilized_size < self.shard_end:
self.valid_end = self.utilized_size - self.shard_begin
self.__remove_tensors_ptr()
self.__update_shard(self.chunk_temp, self.shard)
self.is_replica = False
self.chunk_temp = None
def replicate(self):
assert not self.is_replica
self.is_replica = True
this_shard = self.shard if self.optim_sync_flag else self.__paired_shard()
self.__update_replica(self.rcb.payload, this_shard)
self.__update_tensors_ptr()
def scatter(self):
assert not self.rcache_fused
assert self.is_replica
self.__remove_tensors_ptr()
if not self.optim_sync_flag:
self.__update_shard(self.rcb.payload, self.shard)
self.optim_sync_flag = True
self.is_replica = False
def reduce(self, always_fp32: bool = False):
assert self.is_replica
self.__remove_tensors_ptr()
if self.pg_size > 1:
cast_to_fp32 = False
if always_fp32 and self.chunk_dtype != torch.float:
cast_to_fp32 = True
# cast the payload to fp32
reduce_buffer = self.rcb.payload.to(dtype=torch.float)
else:
# otherwise, use the same payload
reduce_buffer = self.rcb.payload
# divide the reduce buffer by the size of the process group
reduce_buffer /= self.pg_size
# try to use inplace reduce scatter
# notice: pytorch does not allow true inplace reduce scatter
# because pytorch will allocate a continuous memory space for collective communications
shard_buffer = reduce_buffer[self.shard_begin:self.shard_end]
dist.reduce_scatter_tensor(shard_buffer, reduce_buffer, group=self.torch_pg)
# the result should be moved to payload for norm calculating
if cast_to_fp32:
calc_buffer = self.rcb.payload[self.shard_begin:self.shard_end]
calc_buffer.copy_(shard_buffer)
else:
# if process group size equals to 1, do not communicate
reduce_buffer = self.rcb.payload
self.__update_shard(reduce_buffer, self.shard)
self.is_replica = False
def access_chunk(self, block: Optional[TensorBlock] = None):
# sanity check
assert not self.is_init
assert not self.is_replica
if self.rcache_fused:
assert block is None
self.rcb = self._my_block
else:
assert block in self.rcache.public_used_blocks
assert self.rcb is None
self.rcb = block
self.replicate()
def release_chunk(self) -> TensorBlock:
# sanity check
assert not self.is_init
assert self.is_replica
if self.rcache_fused:
raise RuntimeError
self.scatter()
block = self.rcb
self.rcb = None
return block
def update_extra_reduce_info(self, block: Optional[TensorBlock]):
if self.rcache_fused:
assert block is None
block = self._my_block
else:
assert block is not None
buffer = block.payload[self.shard_begin:self.shard_end]
valid_tensor = buffer[:self.valid_end]
self.set_overflow_flag(valid_tensor)
if self.l2_norm_flag:
self.set_l2_norm(valid_tensor)
def reduce_chunk(self, always_fp32: bool = False, sync: bool = True) -> Optional[TensorBlock]:
"""Reduce scatter all the gradients. It's an operation done in CUDA.
"""
# sanity check
assert not self.is_init
assert self.is_replica
self.reduce(always_fp32=always_fp32)
self.__update_tensors_state(TensorState.HOLD)
# reset the rcb pointer
block = self.rcb
self.rcb = None
if self.rcache_fused:
block = None
if sync:
self.update_extra_reduce_info(block)
return block
def tensor_trans_state(self, tensor: torch.Tensor, tensor_state: TensorState) -> None:
prev_state = self.tensors_info[tensor].state
if prev_state == tensor_state:
return
if ts_update_sanity_check(prev_state, tensor_state):
self.__update_one_tensor_info(self.tensors_info[tensor], tensor_state)
def copy_tensor_to_chunk_slice(self, tensor: torch.Tensor, data_slice: torch.Tensor) -> None:
# sanity check
assert self.is_replica
info = self.tensors_info[tensor]
payload = self.rcb.payload
payload[info.offset:info.end].copy_(data_slice.data.flatten())
tensor.data = payload[info.offset:info.end].view(tensor.shape)
def init_pair(self, friend_chunk: 'Chunk') -> None:
if self.paired_chunk is None and friend_chunk.paired_chunk is None:
self.paired_chunk = friend_chunk
friend_chunk.paired_chunk = self
else:
assert self.paired_chunk is friend_chunk
assert friend_chunk.paired_chunk is self
def optim_update(self) -> None:
"""Update the fp16 chunks via their fp32 chunks. It's used by the optimizer.
"""
# sanity check
assert self.paired_chunk is not None
friend_chunk: Chunk = self.paired_chunk
assert not friend_chunk.is_replica
# gradient and optimizer should be on the same device
assert self.shard_device.type == friend_chunk.shard_device.type
if self.shard_device.type == 'cuda':
self.shard.copy_(friend_chunk.shard)
self.optim_sync_flag = True
elif self.shard_device.type == 'cpu':
# optim_sync_flag is set to False
# see shard_move function for more details
self.optim_sync_flag = False
else:
raise NotImplementedError
def get_tensors(self) -> List[torch.Tensor]:
return list(self.tensors_info.keys())
def get_cpu_copy(self, only_rank_0: bool = False) -> List[torch.Tensor]:
assert not self.is_init
if self.is_replica:
# use the payload directly when being replica
temp_buffer = self.rcb.payload
else:
# otherwise, create a temporary buffer
temp_buffer = torch.empty(self.chunk_size, dtype=self.chunk_dtype, device=gpu_device())
# cheat the assertion in __update_replica
self.is_replica = True
self.__update_replica(temp_buffer, self.shard)
self.is_replica = False
cpu_copys = [None] * self.num_tensors
if not only_rank_0 or self.pg_rank == 0:
for i, (t, info) in enumerate(self.tensors_info.items()):
t_copy = temp_buffer[info.offset:info.end].view(t.shape).cpu()
cpu_copys[i] = t_copy
# synchronize
dist.barrier()
return cpu_copys
def load_tensors(self, tensor_list: List[Optional[torch.Tensor]], only_rank_0: bool = False) -> bool:
assert not self.is_replica
assert not self.is_init
temp_buffer = torch.empty(self.chunk_size, dtype=self.chunk_dtype, device=gpu_device())
# cheat the assertion in __update_replica
self.is_replica = True
self.__update_replica(temp_buffer, self.shard)
self.is_replica = False
if not only_rank_0 or self.pg_rank == 0:
for (_, c_info), load_tensor in zip(self.tensors_info.items(), tensor_list):
if load_tensor is None:
continue
temp_buffer[c_info.offset:c_info.end].copy_(load_tensor.data.flatten())
# synchronize
dist.barrier()
if only_rank_0:
dist.broadcast(temp_buffer, src=0, group=self.torch_pg)
# cheat the assertion in __update_shard
self.is_replica = True
self.__update_shard(temp_buffer, self.shard)
self.is_replica = False
def __update_replica(self, replica: torch.Tensor, shard: torch.Tensor):
assert self.is_replica
assert replica.numel() == self.chunk_size
assert shard.numel() == self.shard_size
buffer = replica[self.shard_begin:self.shard_end]
buffer.copy_(shard)
dist.all_gather_into_tensor(replica, buffer, group=self.torch_pg)
def __update_shard(self, replica: torch.Tensor, shard: torch.Tensor):
assert self.is_replica
assert replica.numel() == self.chunk_size
assert shard.numel() == self.shard_size
shard.copy_(replica[self.shard_begin:self.shard_end])
def __paired_shard(self):
assert self.paired_chunk is not None, 'chunks should be paired before training'
optim_chunk: Chunk = self.paired_chunk
assert self.chunk_size == optim_chunk.chunk_size
# only be called when optimizer state is in CPU memory
# the grad and param should be in the same device
assert self.shard_device.type == 'cpu'
return optim_chunk.shard.to(gpu_device())
def __remove_tensors_ptr(self) -> None:
# sanity check
# each tensor should point to its fake data before scatter
assert self.is_replica
for tensor, info in self.tensors_info.items():
tensor.data = info.fake_data
def __update_tensors_ptr(self) -> None:
# sanity check
# the chunk should be replicated to get the correct pointer
assert self.is_replica
payload = self.rcb.payload
for tensor, info in self.tensors_info.items():
tensor.data = payload[info.offset:info.end].view(tensor.shape)
def __update_one_tensor_info(self, tensor_info: TensorInfo, next_state: TensorState):
self.tensor_state_cnter[tensor_info.state] -= 1
tensor_info.state = next_state
self.tensor_state_cnter[tensor_info.state] += 1
def __update_tensors_state(self, next_state: TensorState, prev_state: Optional[TensorState] = None):
for tensor_info in self.tensors_info.values():
if prev_state is None or tensor_info.state == prev_state:
self.__update_one_tensor_info(tensor_info, next_state)
def __hash__(self) -> int:
return self.chunk_id
def __lt__(self, other: object) -> bool:
return self.chunk_id < other.chunk_id
def __eq__(self, other: object) -> bool:
return self.chunk_id == other.chunk_id
def __repr__(self, detailed: bool = True):
if self.is_init:
state = 'initialization'
elif self.in_rcache:
state = 'replicated'
else:
state = 'scattered'
output = [
f'Chunk {self.chunk_id} details: state -> {state}\n',
f' length: {self.chunk_size}, dtype: {self.chunk_dtype}, group_size: {self.pg_size}, tensors: {self.num_tensors}\n'
f' utilized size: {self.utilized_size}, utilized percentage: {100 * (self.utilized_size / self.chunk_size):.0f}%\n'
]
memory_info = self.memory_usage
output.append(' memory usage: (cuda -> {}, cpu -> {})\n'.format(memory_info['cuda'], memory_info['cpu']))
def print_tensor(name, tensor, prefix=''):
output.append(f'{prefix}{name}: (shape={tensor.shape}, dtype={tensor.dtype}, device={tensor.device})\n')
if self.is_init:
print_tensor(name='temp', tensor=self.chunk_temp, prefix=' ')
if self.in_rcache:
print_tensor(name='block', tensor=self.rcb.payload, prefix=' ')
if self.shard is not None:
print_tensor(name='shard', tensor=self.shard, prefix=' ')
if detailed:
output.append(' tensor state monitor:\n')
for st in TensorState:
output.append(' # of {}: {}\n'.format(st, self.tensor_state_cnter[st]))
return ''.join(output)
| hpcaitech/Elixir | elixir/chunk/core/chunk.py | chunk.py | py | 21,263 | python | en | code | 8 | github-code | 90 |
37479444259 | import numpy as np
import theano
import theano.tensor as T
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from util import relu, error_rate, getKaggleMNIST, init_weights
def T_shared_zeros_like32(p):
# p is a Theano shared itself
return theano.shared(np.zeros_like(p.get_value(), dtype=np.float32))
def momentum_updates(cost, params, mu, learning_rate):
# momentum changes
dparams = [T_shared_zeros_like32(p) for p in params]
updates = []
grads = T.grad(cost, params)
for p, dp, g in zip(params, dparams, grads):
dp_update = mu * dp - learning_rate * g
p_update = p + dp_update
updates.append((dp, dp_update))
updates.append((p, p_update))
return updates
class AutoEncoder(object):
def __init__(self, M, an_id):
self.M = M
self.id = an_id
def fit(self, X, activation=relu, lr=0.5, epochs=1, mu=0.99, batch_sz=20, print_period=100, show_fig=False):
# X = X.astype(np.float32)
mu = np.float32(mu)
lr = np.float32(lr)
# init hidden layers
N, D = X.shape
n_batches = N // batch_sz
# HiddenLayer could do this but i dont know whats up with the ids
W0 = init_weights((D, self.M))
self.W = theano.shared(W0, 'W_%s' % self.id)
self.bh = theano.shared(np.zeros(self.M, dtype=np.float32), 'bh_%s' % self.id)
self.bo = theano.shared(np.zeros(D, dtype=np.float32), 'bo_%s' % self.id)
self.params = [self.W, self.bh, self.bo]
self.forward_params = [self.W, self.bh]
# shit for momentum
# TODO: technically these should be reset before doing backprop
self.dW = theano.shared(np.zeros(W0.shape), 'dW_%s' % self.id)
self.dbh = theano.shared(np.zeros(self.M), 'dbh_%s' % self.id)
self.dbo = theano.shared(np.zeros(D), 'dbo_%s' % self.id)
self.dparams = [self.dW, self.dbh, self.dbo]
self.forward_dparams = [self.dW, self.dbh]
X_in = T.matrix('X_%s' % self.id)
X_hat = self.forward_output(X_in)
H = T.nnet.sigmoid(X_in.dot(self.W) + self.bh)
self.hidden_op = theano.function(
inputs=[X_in],
outputs=H,
)
self.predict = theano.function(
inputs=[X_in],
outputs=X_hat,
)
# mse
# cost = ((X_in - X_hat) * (X_in - X_hat)).sum() / N #mean or sum and mse as cost function
# cross entropy
cost = -(X_in * T.log(X_hat) + (1 - X_in) * T.log(1 - X_hat)).flatten().mean()
cost_op = theano.function(
inputs=[X_in],
outputs=cost,
)
# grad descent + adding momentum changes
updates = momentum_updates(cost, self.params, mu, lr)
train_op = theano.function(
inputs=[X_in],
updates=updates,
)
costs = []
print("training autoencoder: %s" % self.id)
print("epochs to do:", epochs)
for i in range(epochs):
print("epoch:", i)
X = shuffle(X)
for j in range(n_batches):
batch = X[j * batch_sz:(j * batch_sz + batch_sz)]
train_op(batch)
the_cost = cost_op(batch) # technically we could also get the cost for Xtest here
if j % 10 == 0:
print("j / n_batches:", j, "/", n_batches, "cost:", the_cost)
costs.append(the_cost)
if show_fig:
plt.plot(costs)
plt.show()
def forward_hidden(self, X):
Z = T.nnet.sigmoid(X.dot(self.W) + self.bh)
return Z
def forward_output(self, X):
Z = self.forward_hidden(X)
Y = T.nnet.sigmoid(Z.dot(self.W.T) + self.bo)
return Y
def main():
pass
def test_single_autoencoder():
Xtrain, Ytrain, Xtest, Ytest = getKaggleMNIST()
autoencoder = AutoEncoder(300, 0)
autoencoder.fit(Xtrain, epochs=2, show_fig=True)
done = False
while not done:
i = np.random.choice(len(Xtest))
x = Xtest[i]
y = autoencoder.predict([x])
plt.subplot(1, 2, 1)
plt.imshow(x.reshape(28, 28), cmap='gray')
plt.title('Original')
plt.subplot(1, 2, 2)
plt.imshow(y.reshape(28, 28), cmap='gray')
plt.title('Reconstructed')
plt.show()
ans = input("Generate another?")
if ans and ans[0] in ('n' or 'N'):
done = True
if __name__ == '__main__':
test_single_autoencoder()
# main()
| RKorzeniowski/Lazy_programmer_projects | unsupervised_ml/my_autoencoder.py | my_autoencoder.py | py | 4,546 | python | en | code | 0 | github-code | 90 |
18815199297 | from spatula import HtmlPage, XPath
from openstates.models import ScrapeCommittee
import re
leader_re = re.compile(r"(.+),\s+(.*Chairman)")
class Committees(HtmlPage):
def process_page(self):
chamber = XPath(".//house//text()").match(self.root)[0]
committees = XPath(".//committee").match(self.root)
for committee in committees:
name = XPath(".//name//text()").match(committee)[0]
comm = ScrapeCommittee(
name=name,
chamber="upper" if chamber == "S" else "lower",
classification="committee",
)
chair = committee.xpath(".//chair//text()")
vice = committee.xpath(".//vice_chair//text()")
for leader in chair, vice:
if leader:
name, role = leader_re.search(leader[0]).groups()
comm.add_member(name, role)
members_block = committee.xpath(".//members//text()")
members = [x.strip() for x in members_block[0].split(";")]
for member in members:
comm.add_member(member, "Member")
comm.add_source(self.source.url, note="Committees List Page")
comm.add_link(self.source.url, note="homepage")
yield comm
class HouseComm(Committees):
source = "http://billstatus.ls.state.ms.us/htms/h_cmtememb.xml"
class SenateComm(Committees):
source = "http://billstatus.ls.state.ms.us/htms/s_cmtememb.xml"
| openstates/openstates-scrapers | scrapers_next/ms/committees.py | committees.py | py | 1,494 | python | en | code | 820 | github-code | 90 |
18303655599 | import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, radians
from itertools import accumulate, permutations, combinations, product
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left
from fractions import gcd
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
mod = 10 ** 9 + 7
def dijkstra(E, start):
N_d = len(E)
dist = [INF] * N_d
dist[start] = 0
q = [(0, start)]
while q:
dist_v, v = heappop(q)
if dist[v] != dist_v:
continue
for u, dist_vu in E[v]:
dist_u = dist_v + dist_vu
if dist_u < dist[u]:
dist[u] = dist_u
heappush(q, (dist_u, u))
return dist
def main():
N, u, v = MAP()
tree = [[] for _ in range(N)]
for _ in range(N-1):
A, B = MAP()
tree[A-1].append((B-1, 1))
tree[B-1].append((A-1, 1))
d_t = dijkstra(tree, u-1)
d_a = dijkstra(tree, v-1)
# print(d_t, d_a)
ans = -1
for i in range(N):
if d_t[i] <= d_a[i]: # 青木くんより距離が近いか同じところで追いつかれる
ans = max(ans, d_a[i])
print(ans-1)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02834/s668196214.py | s668196214.py | py | 1,532 | python | en | code | 0 | github-code | 90 |
13648863460 | # coding:utf-8
import jieba
import xlrd
import numpy as np
import re
import string
#(Begin), I 表示内部(inside), O 表示外部(outside), E 表示这个词处于一个实体的结束为止, S 表示,这个词是自己就可以组成一个实体(Single)
def Creat_Txt():
f=open('../资料/全部/jieba_data.txt','a+',encoding='utf-8')
return f
def Add():
print("添加字典进入jieba\n--------")
# 将字典添加到jieba
jieba.load_userdict('../资料/全部/jieba_data.txt')
def Open_Data(boolean):
print("文件读入")
print('------------------------')
#打开文件
Workbook = xlrd.open_workbook('../资料/全部/pandas_excel.xlsx')
sheet = Workbook.sheet_by_index(0)
#获取药材名称
Medicinal_Name = sheet.col_values(1)
#print(Medicinal_Name)
#获取别名
Medicinal_Other_Name=sheet.col_values(3)
#print(Medicinal_Other_Name)
# 获取来源
Medicinal_From = sheet.col_values(4)
#print(Medicinal_From)
#获取性味
Medicinal_Taste=sheet.col_values(5)
#print(Medicinal_Taste)
#获取功能
Medicinal_Function=sheet.col_values(6)
#print(Medicinal_Function)
#获取用量
Medicinal_Use_Num=sheet.col_values(7)
#print(Medicinal_Use_Num)
print('读入完成')
print('---------------------')
#展示数量是否相同
print('数量展示\n')
print('Medicinal_Name',len(Medicinal_Name))
print('Medicinal_Use_Num',len(Medicinal_Use_Num))
print('Medicinal_Function',len(Medicinal_Function))
print('Medicinal_Taste',len(Medicinal_Taste))
print('Medicinal_From',len(Medicinal_From))
print('Medicinal_Other_Name',len(Medicinal_Other_Name),'\n-------------')
#根据药材名进行去重
Name=[]
Num=[]
for i in range(len(Medicinal_Name)):
if Medicinal_Name[i] not in Name:
Name.append(Medicinal_Name[i])
else: Num.append(i)
#for i in Num:
#print(i)
#print(Medicinal_Name[i])
#后面数据都是重复的,所以需要删除 7373-12163
del Medicinal_Name[7373:12163]
del Medicinal_Use_Num[7373:12163]
del Medicinal_Function[7373:12163]
del Medicinal_Taste[7373:12163]
del Medicinal_From[7373:12163]
del Medicinal_Other_Name[7373:12163]
#删除第一个
del Medicinal_Name[0]
del Medicinal_Use_Num[0]
del Medicinal_Function[0]
del Medicinal_Taste[0]
del Medicinal_From[0]
del Medicinal_Other_Name[0]
print('去重后数量展示\n')
print('Medicinal_Name',len(Medicinal_Name))
print('Medicinal_Use_Num',len(Medicinal_Use_Num))
print('Medicinal_Function',len(Medicinal_Function))
print('Medicinal_Taste',len(Medicinal_Taste))
print('Medicinal_From',len(Medicinal_From))
print('Medicinal_Other_Name',len(Medicinal_Other_Name),'\n--------------------------')
#对别名进行处理
#将别名分开,括号是别名的来源,去除括号中的内容
#展示顺序没变,先在这里输出一下
#for i in Medicinal_Other_Name:
#print(i)
#print(Medicinal_Other_Name)
Medicinal_Other_Name_1=[]
for i in Medicinal_Other_Name:
a=re.sub("\(.*?\)","",i)
#print(a)
Medicinal_Other_Name_1.append(a)
#print(Medicinal_Other_Name_1)
Medicinal_Other_Name=Medicinal_Other_Name_1
#建立药名-别名,使用list作为值
Medicinal_Other_Name_1=[]
for i in Medicinal_Other_Name:
a=list(re.split("、|,|。",i))
#过滤空字符
a=list(filter(None,a))
#print(a)
Medicinal_Other_Name_1.append(a)
#print(len(Medicinal_Name))
#print(Medicinal_Other_Name_1)
Medicinal_Other_Name_2=[]
#获取所有的别名,存的时候存这个
for i in range(len(Medicinal_Other_Name_1)):
for j in Medicinal_Other_Name_1[i]:
if j not in Medicinal_Other_Name_2:
Medicinal_Other_Name_2.append(j)
Name2OtherName={}
for i in range(len(Medicinal_Name)):
key=Medicinal_Name[i]
value=Medicinal_Other_Name_1[i]
Name2OtherName.setdefault(key,value)
#print(Name2OtherName)
#药材来源目前不做标注
#print(Medicinal_From)
Medicinal_Taste_1=[]
#这个是将所有的性味进行统计之后的,存入字典存入的是这个
for i in Medicinal_Taste:
a=re.sub("\《.*?\》",'',i)
a=re.sub("[A-Za-z0-9\!\%\[\]\,\:\"\①]","",a)
b=list(re.split("、|,|。|;",a))
# 过滤空字符
b = list(filter(None, b))
for j in b:
if j not in Medicinal_Taste_1:
Medicinal_Taste_1.append(j)
#print(len(Medicinal_Taste_1))
Medicinal_Function_1=[]
#功能
for i in Medicinal_Function:
a = re.sub("\《.*?\》", '', i)
a = re.sub("[A-Za-z0-9\!\%\[\]\,\:\"\①\(\)\“\”]", "", a)
b=list(re.split("、|,|。|;", a))
# 过滤空字符
b = list(filter(None, b))
for j in b:
if j not in Medicinal_Function_1:
Medicinal_Function_1.append(j)
#print(Medicinal_Function_1)
#print(Medicinal_Use_Num)
Medicinal_Use_Num_1=[]
for i in Medicinal_Use_Num:
a = re.sub("[A-Za-z0-9\!\%\[\]\,\"\①\(\)\“\”\~\-\.\钱\两]", "", i)
b = list(re.split(":|,|。|;|:|;|、", a))
# 过滤空字符
b = list(filter(None, b))
for j in b:
if j not in Medicinal_Use_Num_1:
Medicinal_Use_Num_1.append(j)
#print(Medicinal_Use_Num_1)
if boolean:
print("开始建立字典\n-------------")
# 创建自定义字典,也就是一个txt,里面每一行是词语、词频(可省略)、词性(可省略)
f = Creat_Txt()
for i in range(len(Medicinal_Name)):
f.write(Medicinal_Name[i] +'\n')
for i in range(len(Medicinal_Other_Name_2)):
f.write(Medicinal_Other_Name_2[i]+"\n")
for i in range(len(Medicinal_Taste_1)):
f.write(Medicinal_Taste_1[i] +"\n")
for i in range(len(Medicinal_Function_1)):
f.write(Medicinal_Function_1[i] + "\n")
for i in range(len(Medicinal_Use_Num_1)):
f.write(Medicinal_Use_Num_1[i] +"\n")
f.close()
print("建立完成\n----------")
boolean=False
Add()
print("拼接语料\n-----------")
Str=[]
w=0
for i in range(len(Medicinal_Name)):
w=w+1
print(w,'/',len(Medicinal_Name))
str=""
str=str+Medicinal_Name[i]+" "
str=str+Medicinal_Other_Name[i]+" "
str=str+Medicinal_Function[i]+" "
str=str+Medicinal_Taste[i]+" "
str=str+Medicinal_Use_Num[i]+" "
str=str+Medicinal_From[i]+" "
Str.append(str)
#print(str)
print('拼接完成')
return Str,Medicinal_Name,Medicinal_Other_Name_2,Medicinal_From,Medicinal_Function_1,Medicinal_Use_Num_1,Medicinal_Taste_1
def Numo(num):
str=''
for i in range(len(num)):
if i==0:
str=str+num[i]+" B-jiliang\n"
elif i==len(num):
str=str+num[i]+" E-jiliang\n"
else:
str=str+num[i]+" I-jiliang\n"
return str
def Chinese(Str,name):
a=''
if len(Str)==1:
a=Str+' S-'+name+'\n'
else:
for w in range(len(Str)):
if w==0:
a=a+Str[w]+' B-'+name+'\n'
elif w==len(Str)-1:
a=a+Str[w]+' E-'+name+'\n'
else:
a=a+Str[w]+' I-'+name+'\n'
return a
def Cut():
str,Medicinal_Name,Medicinal_Other_Name,Medicinal_From,Medicinal_Function,Medicinal_Use_Num,Medicinal_Taste=Open_Data(False)
#str="我是谁"
#str=['金银花,中药名。为忍冬科忍冬属植物忍冬Lonicera japonica Thunb.、华南忍冬Lonicera confusa (Sweet) DC.、菰腺忍冬Lonicera hypoglauca Miq.、黄褐毛忍冬Lonicera fulvotomentosa Hsu et S. C. Cheng的花蕾。植物忍冬多分布于华东、中南、西南及河北、山西、辽宁、陕西、甘肃等地;华南忍冬多分布于广东、广西、海南;菰腺忍冬分布于浙江、安徽、福建、江西、湖北、湖南、广东、广西、四川、贵州、云南、台湾等;黄褐毛忍冬分布于广西、贵州、云南。具有清热解毒之功效。主治外感风热或温病发热,中暑,热毒血痢,痈肿疔疮,喉痹,多种感染性疾病。']
print('获取完成')
Output=[]
#判断是否是中文
bo = re.compile(u'[\u4e00-\u9fa5]')
#判断是否是标点符号
punc=['、',',',':','。',' ']
#数字的处理,遇到非数字时将其输出
Isnum=False
Num=''
Isstr=False
Str=''
q=0
for i in str:
First=True
q=q+1
print(q,'/',7373)
a=(jieba.lcut(i))
for j in a:
d=bo.search(j)
#不是中文
if not d :
#是标点符号
if j in punc:
if Isnum:
p=Numo(Num)
Num=''
Isnum=False
Output.append(p)
Output.append(j+'\n')
#不是标点符号,就是英文或者数字
#是数字
elif j.isdigit():
Num=Num+j
Isnum=True
else:
if Isnum:
p = Numo(Num)
Num = ''
Isnum = False
Output.append(p)
Str=Str+'\n'+j
Isstr=True
#中文处理
else:
if Isnum:
p = Numo(Num)
Num = ''
Isnum = False
Output.append(p)
if First:
if j in Medicinal_Name:
p=Chinese(j,'Medicinal_Name')
Output.append(p)
First=False
elif j in Medicinal_Use_Num:
p=Chinese(j,'Medicinal_Use_Num')
Output.append(p)
elif j in Medicinal_Function:
p = Chinese(j, 'Medicinal_Function')
Output.append(p)
elif j in Medicinal_Taste:
p = Chinese(j, 'Medicinal_Taste')
Output.append(p)
elif j in Medicinal_From:
p = Chinese(j, 'Medicinal_From')
Output.append(p)
elif j in Medicinal_Other_Name:
p = Chinese(j, 'Medicinal_Other_Name')
Output.append(p)
else:
if len(j)==1:
p=j+' S\n'
Output.append(p)
else:
p=''
for w in range(len(j)):
if w==0:
p=p+j[w]+' B\n'
elif w==len(j)-1:
p=p+j[w]+' E\n'
else:
p=p+j[w]+' I\n'
Output.append(p)
print('进行写入')
f = open('../资料/全部/Data.txt', 'a+', encoding='utf-8')
for z in Output:
f.write(z)
print('写入完成')
Cut()
| srx-2000/traditional_Chinese_medicine | 实体标注/伪_实体标注.py | 伪_实体标注.py | py | 9,547 | python | en | code | 69 | github-code | 90 |
17990757489 | import sys
sys.setrecursionlimit(2147483647)
INF=float("inf")
MOD=10**9+7
input=lambda :sys.stdin.readline().rstrip()
def bisection(l,r,f,left=True,discrete=True):
eps=1 if discrete else 10**-12
if((not left)^f(r)): return r if left else r+1
elif(left^f(l)): return l-1 if left else l
while(r-l>eps):
h=(l+r)//2 if discrete else (l+r)/2
if((not left)^f(h)): l=h
else: r=h
return (l+r)/2 if not discrete else l if left else r
from math import ceil
def resolve():
n,a,b=map(int,input().split())
a=a-b
H=[int(input()) for _ in range(n)]
def check(x):
cnt=0
for i in range(n):
cnt+=ceil(max(0,H[i]-b*x)/a)
return cnt<=x
print(bisection(0,max(H),check,left=False))
resolve() | Aasthaengg/IBMdataset | Python_codes/p03700/s079932158.py | s079932158.py | py | 774 | python | en | code | 0 | github-code | 90 |
18107750969 | n = int(input())
*s1, = input().split()
s2 = s1[:]
def bubbleSort(s):
flag = True
while flag:
flag = False
for i in range(n-1):
if int(s[i][1]) > int(s[i+1][1]):
s[i],s[i+1] = s[i+1],s[i]
flag = True
return s
def selectionSort(s):
for i in range(n):
minj = i
for j in range(i+1,n):
if int(s[minj][1]) > int(s[j][1]):
minj = j
s[i],s[minj] = s[minj],s[i]
return s
print(' '.join(bubbleSort(s1)))
print("Stable")
print(' '.join(selectionSort(s2)))
if s1 == s2:
print("Stable")
else:
print("Not stable")
| Aasthaengg/IBMdataset | Python_codes/p02261/s905976896.py | s905976896.py | py | 645 | python | en | code | 0 | github-code | 90 |
29450560909 | #!/usr/bin/env python
from parseTweet import parse_tweets
from operator import itemgetter
import helper
from collections import defaultdict
"""
makes the various chunks, depending on number of chunks K
"""
def makeChunks(trainingExList, K):
numChunk = (len(trainingExList) / K)+1
dictChunk = []
chunkCounter = 0
for i in range(K):
dictChunk.append(trainingExList[chunkCounter:chunkCounter+numChunk])
chunkCounter+=numChunk
return dictChunk
"""
finds most common sentiment per chunk
"""
def mfsChunk(tweetData, dictChunk, conf):
mfsChunks = {}
for chunk in range(len(dictChunk)):
dictSent = helper.conflate(tweetData, dictChunk[chunk], conf)
mostFreq = helper.MFS(dictSent)
mfsChunks[chunk] = mostFreq
return mfsChunks
"""
Creates a 'test' and 'train' data from the chunks, saved in dictionary
"""
def createSet(i, dictChunk):
training = []
test = []
for j in range(len(dictChunk)):
if i != j:
training += dictChunk[j]
else:
test += dictChunk[i]
toReturn = {}
toReturn['train'] = training
toReturn['test'] = test
return toReturn
"""
Tests accuracy of most frequent sentiment
"""
def testAcc(dictChunk, K, tweetData, CONF):
accTest = {}
for i in range(K):
sets = createSet(i, dictChunk)
s = []
s.append(sets['train'])
trainMFS = mfsChunk(tweetData, s, CONF)
dictSent = helper.conflate(tweetData, sets['test'], CONF)
accTest[i] = dictSent[trainMFS[0]][1]
acc = 0.0
return accTest
| dyelsey/SemEval | crossval.py | crossval.py | py | 1,591 | python | en | code | 0 | github-code | 90 |
44354538268 | #!/usr/bin/python3
import pprint
import time
from datetime import datetime, date, timedelta
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
#############
## Globals ##
#############
pp = pprint.PrettyPrinter(indent=4, compact=False, width=80)
SCOPES = ['https://www.googleapis.com/auth/calendar.events']
CREDENTIALS_FILE = 'credentials.json'
GOOGLE_CALENDAR_ID = 'r9mdghs8cja9p9n5c2ntav7nqc@group.calendar.google.com'
MONDAY_MASK=0x01
TUESDAY_MASK=0x02
WEDNESDAY_MASK=0x04
THURSDAY_MASK=0x08
FRIDAY_MASK=0x10
SATURDAY_MASK=0x20
SUNDAY_MASK=0x40
BIGWEEKEND_MASK=0x61
NORMALDAY_MASK=0x1E
peopleDB = {
'alice': {
},
'bob': {
},
'carol': {
},
'dave': {
},
# 'joe': {
# },
# 'toto': {
# },
# 'titi': {
# },
# 'tata': {
# },
}
calendarDB = {
'currentBigWeekEndPerson': None,
'previousBigWeekEndPerson': None,
'currentWeekFridayPerson': None,
'currentWeekPreviousDayPerson': None
}
startDate = '2021-01-01'
startDateObj = None
endDate = '2021-12-31'
endDateObj = None
deltaDaysObj = None
curDateObj = None
weekNumber = 0
calendar = None
########################
## Internal functions ##
########################
def assign_people_to_support_day(dayNumber, date, dayOfWeekMask, calendarDB, peopleDB, settings):
global calendar
assignedPerson = None
if dayOfWeekMask & MONDAY_MASK:
for person in peopleDB.keys():
peopleDB[person]['supportDaysThisWeek'] = 0
if dayOfWeekMask & BIGWEEKEND_MASK:
if calendarDB['currentBigWeekEndPerson'] != None :
assignedPerson = calendarDB['currentBigWeekEndPerson']
else:
for person in sorted(peopleDB.keys()):
if (
person != calendarDB['currentWeekFridayPerson']
and (assignedPerson == None
or peopleDB[person]['bigWeekEndDays'] < peopleDB[assignedPerson]['bigWeekEndDays']
or peopleDB[person]['supportDaysThisWeek'] < peopleDB[assignedPerson]['supportDaysThisWeek']
)
):
assignedPerson = person
peopleDB[assignedPerson]['bigWeekEndPeriod'] += 1
peopleDB[assignedPerson]['bigWeekEndDays'] += 1
calendarDB['currentBigWeekEndPerson'] = assignedPerson
else:
for person in sorted(peopleDB.keys()):
if (person != calendarDB['currentWeekPreviousDayPerson']
and (dayOfWeekMask & ~TUESDAY_MASK or person != calendarDB['previousBigWeekEndPerson'])
and (assignedPerson == None
or ((dayOfWeekMask & ~FRIDAY_MASK
or peopleDB[person]['bigWeekEndDays'] > peopleDB[assignedPerson]['bigWeekEndDays']
or peopleDB[person]['supportFridayPeriod'] < peopleDB[assignedPerson]['supportFridayPeriod']
)
and peopleDB[person]['supportDaysPeriod'] < peopleDB[assignedPerson]['supportDaysPeriod']
)
)
):
assignedPerson = person
peopleDB[assignedPerson]['supportDaysThisWeek'] += 1
peopleDB[assignedPerson]['supportDaysPeriod'] += 1
calendarDB['currentWeekPreviousDayPerson'] = assignedPerson
print("- Person assigned to day n°%d ('%s') is '%s' [WEDays: %d, WE: %d, Friday: %d, supDayWeek: %d, supDayTotal: %d]"
% (dayNumber, date, assignedPerson,
peopleDB[assignedPerson]['bigWeekEndDays'],
peopleDB[assignedPerson]['bigWeekEndPeriod'],
peopleDB[assignedPerson]['supportFridayPeriod'],
peopleDB[assignedPerson]['supportDaysThisWeek'],
peopleDB[assignedPerson]['supportDaysPeriod']))
calendarBody = {
'summary': assignedPerson,
'start': {
'date': date
},
'end': {
'date': date
},
}
time.sleep(200/1000000.0) # Tempo to avoid Rate Limit Exceeded
event = calendar.events().insert(calendarId=GOOGLE_CALENDAR_ID,
body=calendarBody).execute()
if dayOfWeekMask & MONDAY_MASK:
calendarDB['previousBigWeekEndPerson'] = calendarDB['currentBigWeekEndPerson']
calendarDB['currentBigWeekEndPerson'] = calendarDB['currentWeekFridayPerson'] = None
if dayOfWeekMask & FRIDAY_MASK:
calendarDB['currentWeekFridayPerson'] = assignedPerson
peopleDB[assignedPerson]['supportFridayPeriod'] += 1
def get_calendar_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS_FILE, SCOPES)
creds = flow.run_console()
# creds = flow.run_local_server(port=39666, host='localhost')
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
##########
## MAIN ##
##########
def main():
global startDate
global startDateObj
global endDate
global endDateObj
global deltaDaysObj
global curDateObj
global weekNumber
global calendar
try:
curDateObj = startDateObj = date.fromisoformat(startDate)
except:
print("ERROR: Invalid Start Date: '%s' !" % startDate)
exit(-1)
try:
endDateObj = date.fromisoformat(endDate)
except:
print("ERROR: Invalid End Date: '%s' !" % endDate)
exit(-1)
deltaDaysObj = endDateObj - startDateObj
if deltaDaysObj.days <= 0:
print("ERROR: End Date '%s' is anterior or equal to Start Date '%s' !" % (endDate, startDate))
exit(-1)
if len(peopleDB.keys()) < 3:
print("ERROR: Minimum people in PeopleDB is 3, actual number is %d !" % len(peopleDB.keys()))
exit(-1)
for person in peopleDB.keys():
if ('supportDaysThisWeek' not in peopleDB[person]):
peopleDB[person]['supportDaysThisWeek'] = 0
if ('supportDaysPeriod' not in peopleDB[person]):
peopleDB[person]['supportDaysPeriod'] = 0
if ('bigWeekEndDays' not in peopleDB[person]):
peopleDB[person]['bigWeekEndDays'] = 0
if ('bigWeekEndPeriod' not in peopleDB[person]):
peopleDB[person]['bigWeekEndPeriod'] = 0
if ('supportFridayPeriod' not in peopleDB[person]):
peopleDB[person]['supportFridayPeriod'] = 0
# Get the Google Calendar Service (auth with Oauth2)
calendar = get_calendar_service()
# Call the Calendar API
# now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
# print('Getting the upcoming 10 events')
# events_result = calendar.events().list(calendarId=GOOGLE_CALENDAR_ID, timeMin=now,
# maxResults=10, singleEvents=True,
# orderBy='startTime').execute()
# events = events_result.get('items', [])
# if not events:
# print('No upcoming events found.')
# for event in events:
# start = event['start'].get('dateTime', event['start'].get('date'))
# print(start, event['summary'])
# exit()
for idx in range(deltaDaysObj.days+1):
dayOfWeek = curDateObj.isoweekday()-1
# print ("- Date n°%d: %d/%d/%d (dow: %d)" % (idx+1, curDateObj.year,
# curDateObj.month, curDateObj.day, dayOfWeek))
if dayOfWeek == 0:
weekNumber += 1
print("<====== We are start Week n°%d ======>" % weekNumber)
dayOfWeekMask = 0x01 << dayOfWeek
assign_people_to_support_day(idx+1, curDateObj.isoformat(), dayOfWeekMask, calendarDB, peopleDB, {})
curDateObj = curDateObj + timedelta(days=1)
print("[======================================]")
pp.pprint(peopleDB)
if __name__ == '__main__':
main() | ZeGhost/test-support-calendar | test.py | test.py | py | 8,526 | python | en | code | 0 | github-code | 90 |
575406817 | import models
import schemas
from typing import List
from database import engine, SessionLocal
from fastapi import FastAPI, Depends, status, Response, HTTPException
from sqlalchemy.orm import Session
from Hashing import Hash
app = FastAPI()
models.Base.metadata.create_all(bind=engine)
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post('/add_details', status_code=status.HTTP_201_CREATED, tags=['Blog'])
def add_blog(request: schemas.Blog, db: Session = Depends(get_db)):
new_blog = models.BlogTable(blog=request.blog, body=request.body, user_id=1)
db.add(new_blog)
db.commit()
db.refresh(new_blog)
return new_blog
@app.get('/get_details', tags=['Blog'], response_model=List[schemas.ShowBlog])
def get_details(db: Session = Depends(get_db)):
get_data = db.query(models.BlogTable).all()
return get_data
@app.get('/get_details_by_id/{id1}', response_model=schemas.ShowBlog, status_code=200, tags=['Blog'])
def get_by_id(id1, response: Response, db: Session = Depends(get_db)):
id_details = db.query(models.BlogTable).filter(models.BlogTable.id == id1).first()
if not id_details:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"The id: {id1} is not found.")
# response.status_code = status.HTTP_404_NOT_FOUND
# return f"The id: {id} is not found."
return id_details
@app.delete('/delete_by_id/{id1}', status_code=status.HTTP_201_CREATED, tags=['Blog'])
def delete_by_id(id1, db: Session = Depends(get_db)):
delete_id = db.query(models.BlogTable).filter(models.BlogTable.id == id1)
if not delete_id.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"This id {id1} is not present")
delete_id.delete(synchronize_session=False)
db.commit()
return "Deleted"
@app.put('/update/{id1}', status_code=status.HTTP_202_ACCEPTED, tags=['Blog'])
def update(id1, request: schemas.Blog, db: Session = Depends(get_db)):
update_username = db.query(models.BlogTable).filter(models.BlogTable.id == id1)
if not update_username.first():
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"This id {id1} is not present")
update_username.update({'blog': request.blog})
db.commit()
return "updated"
@app.post('/create_user', tags=['User'])
def create_user(request: schemas.User, db: Session = Depends(get_db)):
add_user = models.UserTable(username=request.username, email=request.email,
password=Hash.bcrpyt(request.password))
db.add(add_user)
db.commit()
db.refresh(add_user)
return "User added"
@app.get('/show_user', response_model=List[schemas.ShowUser], tags=['User'])
def show_user(db: Session = Depends(get_db)):
show_data = db.query(models.UserTable).all()
return show_data
| Pratik180198/fast-api-demo | New Project/demo.py | demo.py | py | 2,871 | python | en | code | 0 | github-code | 90 |
22811406879 | from synthnet import *
from simulation import *
from detcom import *
import matplotlib.pyplot as plt
import numpy as np
from networkx.generators.community import LFR_benchmark_graph
def af(G):
G = add_feature_vector(G, [('uniform', (0, 1))]*5)
def av(G, l=0, u=1):
G = add_vulnerability_of_node(G, 'uniform', l, u)
def aa(G):
G = add_age_to_edges(G)
def make_init_connections(G, threshold=0.5):
nodes = list(G.nodes)
for i in range(len(nodes)-1):
for j in range(i+1, len(nodes)):
x1, x2 = G.nodes[nodes[i]]['feature'], G.nodes[nodes[j]]['feature']
prod = (x1*x2)/(np.linalg.norm(x1)*np.linalg.norm(x2))
commonality = np.sum(prod)
if(commonality>threshold):
G.add_edge(nodes[i], nodes[j])
def show(G):
nx.draw(G, with_labels=True)
plt.show()
def get_angle(x, y):
return np.sum(x*y)/(np.linalg.norm(x)*np.linalg.norm(y))
def get_commonness(G, u, v):
return get_angle(G.nodes[u]['feature'], G.nodes[v]['feature'])
def get_empty_graph(n):
G = nx.Graph()
G.add_nodes_from(list(range(n)))
return G
def commonness_between(G, group1, group2):
ar = []
for u in group1:
for v in group2:
ar.append(get_commonness(G, u, v))
return ar
def analyse(G, clusters):
commonness = []
for i in range(len(clusters)):
ar = []
for j in range(len(clusters)):
if j < i:
ar.append(None)
else:
ar.append(commonness_between(G, clusters[i], clusters[j]))
commonness.append(ar)
return commonness
def ppanalyse(y, func):
n = len(y)
for i in range(-1, n):
if i<0:
print(" "*5, end=' ')
else:
print(f"{i}".center(5), end=' ')
print()
for i in range(n):
for j in range(-1, n):
if j<0:
print(f"{i}".rjust(5), end=' ')
else:
if y[i][j]:
print(f"{func(y[i][j])} "[:5], end=' ')
else:
print(" "*5, end=' ')
print()
| sagalpreet/Evolution-of-Clusters | Code/usagsp.py | usagsp.py | py | 2,125 | python | en | code | 3 | github-code | 90 |
15212740603 | import os
from waflib import Logs, Utils, Options, TaskGen, Task
from waflib.Errors import WafError
import wutils
def options(opt):
opt = opt.add_option_group ('ccnSIM Options')
opt.add_option('--enable-ccn-plugins',
help="""Enable CCN plugins (may require patching). topology plugin enabled by default""",
dest='enable_ccn_plugins')
opt.add_option('--disable-ccn-plugins',
help="""Enable CCN plugins (may require patching). topology plugin enabled by default""",
dest='disable_ccn_plugins')
opt.add_option('--pyccn-install-path', dest='pyccn_install_path',
help="""Installation path for PyCCN (by default: into standard location under PyCCN folder""")
REQUIRED_BOOST_LIBS = ['graph']
def required_boost_libs(conf):
conf.env.REQUIRED_BOOST_LIBS += REQUIRED_BOOST_LIBS
def configure(conf):
conf.env['ENABLE_CCNSIM']=False;
if not conf.env['LIB_BOOST']:
conf.report_optional_feature("ccnSIM", "ccnSIM", False,
"Required boost libraries not found")
Logs.error ("ccnSIM will not be build as it requires boost libraries of version at least 1.48")
conf.env['MODULES_NOT_BUILT'].append('ccnSIM')
return
else:
present_boost_libs = []
for boost_lib_name in conf.env['LIB_BOOST']:
if boost_lib_name.startswith("boost_"):
boost_lib_name = boost_lib_name[6:]
if boost_lib_name.endswith("-mt"):
boost_lib_name = boost_lib_name[:-3]
present_boost_libs.append(boost_lib_name)
missing_boost_libs = [lib for lib in REQUIRED_BOOST_LIBS if lib not in present_boost_libs]
if missing_boost_libs != []:
conf.report_optional_feature("ccnSIM", "ccnSIM", False,
"ccnSIM requires boost libraries: %s" % ' '.join(missing_boost_libs))
conf.env['MODULES_NOT_BUILT'].append('ccnSIM')
Logs.error ("ccnSIM will not be build as it requires boost libraries: %s" % ' '.join(missing_boost_libs))
Logs.error ("Please upgrade your distribution or install custom boost libraries (http://ccnsim.net/faq.html#boost-libraries)")
return
boost_version = conf.env.BOOST_VERSION.split('_')
if int(boost_version[0]) < 1 or int(boost_version[1]) < 48:
conf.report_optional_feature("ccnSIM", "ccnSIM", False,
"ccnSIM requires at least boost version 1.48")
conf.env['MODULES_NOT_BUILT'].append('ccnSIM')
Logs.error ("ccnSIM will not be build as it requires boost libraries of version at least 1.48")
Logs.error ("Please upgrade your distribution or install custom boost libraries (http://ccnsim.net/faq.html#boost-libraries)")
return
conf.env['CCN_plugins'] = ['topology', 'ip-faces']
if Options.options.enable_ccn_plugins:
conf.env['CCN_plugins'] = conf.env['CCN_plugins'] + Options.options.enable_ccn_plugins.split(',')
if Options.options.disable_ccn_plugins:
conf.env['CCN_plugins'] = conf.env['CCN_plugins'] - Options.options.disable_ccn_plugins.split(',')
if Options.options.pyccn_install_path:
conf.env['PyCCN_install_path'] = Options.options.pyccn_install_path
conf.env['ENABLE_CCNSIM']=True;
conf.env['MODULES_BUILT'].append('ccnSIM')
conf.report_optional_feature("ccnSIM", "ccnSIM", True, "")
def build(bld):
deps = ['core', 'network', 'point-to-point']
deps.append ('internet') # Until RttEstimator is moved to network module
if 'ns3-visualizer' in bld.env['NS3_ENABLED_MODULES']:
deps.append ('visualizer')
if 'topology' in bld.env['CCN_plugins']:
deps.append ('topology-read')
deps.append ('mobility')
if 'mobility' in bld.env['CCN_plugins']:
deps.append ('mobility')
module = bld.create_ns3_module ('ccnSIM', deps)
module.module = 'ccnSIM'
module.features += ' ns3fullmoduleheaders'
module.uselib = 'BOOST BOOST_IOSTREAMS'
headers = bld (features='ns3header')
headers.module = 'ccnSIM'
if not bld.env['ENABLE_CCNSIM']:
bld.env['MODULES_NOT_BUILT'].append('ccnSIM')
return
module.source = bld.path.ant_glob(['model/**/*.cc',
'model/cs/utility-caching/*.cc',
'apps/*.cc',
'utils/**/*.cc',
'helper/**/*.cc',
'ccn.cxx/**/*.cc',
])
module.full_headers = [p.path_from(bld.path) for p in bld.path.ant_glob([
'utils/**/*.h',
'model/**/*.h',
'model/cs/utility-caching/*.h',
'apps/**/*.h',
'helper/**/*.h',
'ccn.cxx/**/*.h',
])]
headers.source = [
"helper/ccn-stack-helper.h",
"helper/ccn-app-helper.h",
"helper/ccn-header-helper.h",
"helper/ccn-face-container.h",
"helper/ccn-global-routing-helper.h",
"helper/ccn-link-control-helper.h",
"apps/ccn-app.h",
"model/ccn-common.h",
"model/ccn-l3-protocol.h",
"model/ccn-face.h",
"model/ccn-app-face.h",
"model/ccn-net-device-face.h",
"model/ccn-common-object.h",
"model/ccn-interest.h",
"model/ccn-interestresponse.h",
"model/ccn-data.h",
"model/ccn-name-components.h",
"model/ccn-name.h",
"ccn.cxx/blob.h",
"ccn.cxx/name-component.h",
"ccn.cxx/name.h",
"ccn.cxx/exclude.h",
"ccn.cxx/ccn-api-face.h",
"model/cs/utility-caching/UtilityFunctions.h",
"model/cs/utility-caching/UtilityCachingHelper.h",
"model/cs/ccn-content-store.h",
"model/sm/ccn-security-module.h",
"model/cs/ccn-content-storage.h",
"model/fib/ccn-fib.h",
"model/fib/ccn-fib-entry.h",
"model/pit/ccn-pit.h",
"model/pit/ccn-pit-entry.h",
"model/pit/ccn-pit-entry-incoming-face.h",
"model/pit/ccn-pit-entry-outgoing-face.h",
"model/fw/ccn-forwarding-strategy.h",
"model/fw/ccn-fw-tag.h",
"model/wire/icnx/tlv_base.h",
"model/wire/icnx/tlv_endiandata.h",
"model/wire/icnx/tlv_rawdata.h",
"model/wire/icnx/tlv_container.h",
"model/wire/icnx/tlv_factory.h",
"model/wire/icnx/ccn-1.0_tlv-id.h",
"model/wire/icnx/RangeDataTemplate.h",
"model/wire/icnx/ccn-1.0_interest_tlv.h",
"model/wire/icnx/ccn-1.0_interestresponse_tlv.h",
"model/wire/icnx/ccn-1.0_interestresp_returncode_tlv.h",
"model/wire/icnx/ccn-1.0_content_tlv.h",
"model/wire/icnx/ccn-1.0_expiry_tlv.h",
"model/wire/icnx/ccn-1.0_cachetime_tlv.h",
"model/wire/icnx/ccn-1.0_intlife_tlv.h",
"model/wire/icnx/ccn-1.0_name_tlv.h",
"model/wire/icnx/ccn-1.0_payload_tlv.h",
"model/wire/icnx/ccn-1.0_interest_hop_tlv.h",
"model/wire/icnx/ccn-1.0_app_tlv.h",
"model/wire/icnx/ccn-1.0_mesg_tlv.h",
"model/wire/icnx/ccn-1.0_payldtype_tlv.h",
"model/wire/icnx/ccn-1.0_ipidm_tlv.h",
"model/wire/icnx/ccn-1.0_namesegment_tlv.h",
"model/wire/icnx/ccn-1.0_pad_tlv.h",
"model/wire/icnx/ccn-1.0_keyidrestr_tlv.h",
"model/wire/icnx/ccn-1.0_ipid_tlv.h",
"model/wire/icnx/ccn-1.0_flow_tlv.h",
"model/wire/icnx/ccn-1.0_objhashrestr_tlv.h",
"model/wire/icnx/ccn-1.0_cert_tlv.h",
"model/wire/icnx/ccn-1.0_crc32_tlv.h",
"model/wire/icnx/ccn-1.0_crc32c_tlv.h",
"model/wire/icnx/ccn-1.0_ec_secp256k1_tlv.h",
"model/wire/icnx/ccn-1.0_ec_secp384r1_tlv.h",
"model/wire/icnx/ccn-1.0_hmac_sha256_tlv.h",
"model/wire/icnx/ccn-1.0_keyname_tlv.h",
"model/wire/icnx/ccn-1.0_keyid_tlv.h",
"model/wire/icnx/ccn-1.0_link_tlv.h",
"model/wire/icnx/ccn-1.0_publickey_tlv.h",
"model/wire/icnx/ccn-1.0_publickeylocation_tlv.h",
"model/wire/icnx/ccn-1.0_rfc793_tlv.h",
"model/wire/icnx/ccn-1.0_rsa_sha256_tlv.h",
"model/wire/icnx/ccn-1.0_sigtime_tlv.h",
"model/wire/icnx/ccn-1.0_validationalg_tlv.h",
"model/wire/icnx/ccn-1.0_validationalgpayld_tlv.h",
"model/wire/icnx/ccn-1.0_vmac128_tlv.h",
"model/wire/ccn-wire.h",
"utils/ccn-limits.h",
"utils/ccn-rtt-estimator.h",
"model/wire/icnx/pugixml.h",
"model/wire/icnx/pugiconfig.h",
# "utils/tracers/ipv4-app-tracer.h",
# "utils/tracers/ipv4-l3-tracer.h",
# "utils/tracers/ipv4-rate-l3-tracer.h",
# "utils/tracers/ipv4-seqs-app-tracer.h",
"utils/tracers/l2-rate-tracer.h",
"utils/tracers/l2-tracer.h",
"utils/tracers/ccn-app-delay-tracer.h",
"utils/tracers/ccn-cs-tracer.h",
"utils/tracers/ccn-l3-aggregate-tracer.h",
"utils/tracers/ccn-l3-tracer.h",
"utils/tracers/ccn-l3-rate-tracer.h",
"apps/callback-based-app.h",
]
if 'topology' in bld.env['CCN_plugins']:
headers.source.extend ([
"plugins/topology/rocketfuel-weights-reader.h",
"plugins/topology/annotated-topology-reader.h",
])
module.source.extend (bld.path.ant_glob(['plugins/topology/*.cc']))
module.full_headers.extend ([p.path_from(bld.path) for p in bld.path.ant_glob(['plugins/topology/**/*.h'])])
if 'mobility' in bld.env['CCN_plugins']:
headers.source.extend ([
"plugins/mobility/spring-mobility-model.h",
"plugins/mobility/spring-mobility-helper.h",
])
module.source.extend (bld.path.ant_glob(['plugins/mobility/*.cc']))
module.full_headers.extend ([p.path_from(bld.path) for p in bld.path.ant_glob(['plugins/mobility/**/*.h'])])
if 'ip-faces' in bld.env['CCN_plugins']:
headers.source.extend ([
"plugins/ip-faces/ccn-ip-faces-helper.h",
])
module.source.extend (bld.path.ant_glob(['plugins/ip-faces/*.cc']))
module.full_headers.extend ([p.path_from(bld.path) for p in bld.path.ant_glob(['plugins/ip-faces/**/*.h'])])
# bld.install_files('${INCLUDEDIR}/%s%s/ns3/ccnSIM' % (wutils.APPNAME, wutils.VERSION), ccnSIM_headers, relative_trick=True)
# bld.install_files('$PREFIX/include', ccnSIM_headers)
tests = bld.create_ns3_module_test_library('ccnSIM')
tests.source = bld.path.ant_glob('test/*.cc')
if bld.env.ENABLE_EXAMPLES:
bld.recurse ('examples')
bld.recurse ('tools')
bld.ns3_python_bindings()
if bld.env['ENABLE_PYTHON_BINDINGS']:
if bld.env['PyCCN_install_path']:
bld (features = "py",
source = bld.path.ant_glob (["PyCCN/**/*.py"]),
install_from = "PyCCN",
install_path = bld.env['PyCCN_install_path'])
@TaskGen.feature('ns3fullmoduleheaders')
@TaskGen.after_method('process_rule')
def apply_ns3fullmoduleheaders(self):
# ## get all of the ns3 headers
ns3_dir_node = self.bld.path.find_or_declare("ns3")
mode = getattr(self, "mode", "install")
for filename in set(self.to_list(self.full_headers)):
src_node = self.path.find_resource(filename)
if src_node is None:
raise WafError("source ns3 header file %s not found" % (filename,))
dst_node = ns3_dir_node.find_or_declare(src_node.path_from(self.bld.path.find_dir('src')))
assert dst_node is not None
relpath = src_node.parent.path_from(self.bld.path.find_dir('src'))
task = self.create_task('ns3header')
task.mode = getattr(self, 'mode', 'install')
if task.mode == 'install':
self.bld.install_files('${INCLUDEDIR}/%s%s/ns3/%s' % (wutils.APPNAME, wutils.VERSION, relpath),
[src_node])
task.set_inputs([src_node])
task.set_outputs([dst_node])
else:
task.header_to_remove = dst_node
| chris-wood/SCoNet | ns-3-dev/src/ccnSIM/wscript | wscript | 12,022 | python | en | code | 0 | github-code | 90 | |
42106072020 | import random
print("Let's play Rock Paper Scissors!")
play_again = "Y"
options = ['r','p','s']
options_dict = {'r':'rock', 'p':'paper', 's':'scissors'}
conditions_dict = {'r': 1, 'p': 2, 's': 3}
user_win_count = 0
computer_win_count = 0
#loop back here
while play_again == "Y" or play_again == "y":
selection = input("Make your Choice: (r)ock, (p)aper, or (s)cissors?\n")
computer_selection = random.choice(options)
#error loop
while selection != 'r' and selection != 'p' and selection != 's':
selection = input("Please choose from 'r', 'p', 's'\n")
#show hands
print(f"You picked {options_dict[selection]}. The computer picked {options_dict[computer_selection]}.")
#test results
if conditions_dict[selection] == conditions_dict[computer_selection]:
print("It's a tie!")
elif (conditions_dict[selection] + 2) % 3 == conditions_dict[computer_selection] % 3:
print("You won! Take that, computer!")
user_win_count += 1
else:
print("You lost!")
computer_win_count += 1
print(f"You: {user_win_count} - Computer: {computer_win_count}")
#restart loop (maybe)
play_again = input("Press [Y] to play again!\n") | DenverSherman/terminal_games | rock_paper_scissors/rock_paper_scissors.py | rock_paper_scissors.py | py | 1,210 | python | en | code | 0 | github-code | 90 |
6242569695 | from os import name
from seaborn.matrix import heatmap
import streamlit as st
import numpy as np
import pandas as pd
import pydeck as pdk
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans, DBSCAN
from urllib.error import URLError
from PIL import Image
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from multipage import MultiPage
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from multipage import MultiPage
from sklearn.metrics import classification_report, confusion_matrix, mean_squared_error
import plotly.figure_factory as ff
from tensorflow.keras.models import load_model
from sklearn.svm import SVR
from sklearn.feature_selection import SelectKBest, mutual_info_regression, RFE
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_squared_error,mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
from boruta import BorutaPy
from imblearn.over_sampling import SMOTE
# import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import roc_curve, auc
def app():
# =============================================================================
# LOADING DATA AND PREPROCESSING
# =============================================================================
cases_malaysia = pd.read_csv('./cases/epidemic/cases_malaysia.csv')
cases_state = pd.read_csv('./cases/epidemic/cases_state.csv')
clusters = pd.read_csv('./cases/epidemic/clusters.csv')
deaths_malaysia = pd.read_csv('./cases/epidemic/deaths_malaysia.csv')
deaths_state = pd.read_csv('./cases/epidemic/deaths_state.csv')
hospital = pd.read_csv('./cases/epidemic/hospital.csv')
icu = pd.read_csv('./cases/epidemic/icu.csv')
pkrc = pd.read_csv('./cases/epidemic/pkrc.csv')
tests_malaysia = pd.read_csv('./cases/epidemic/tests_malaysia.csv')
tests_state = pd.read_csv('./cases/epidemic/tests_state.csv')
vax_malaysia = pd.read_csv('./vaccination/vaccination/vax_malaysia.csv')
vax_state = pd.read_csv('./vaccination/vaccination/vax_state.csv')
vaxreg_malaysia = pd.read_csv('./vaccination/registration/vaxreg_malaysia.csv')
vaxreg_state = pd.read_csv('./vaccination/registration/vaxreg_state.csv')
population = pd.read_csv('./vaccination/static/population.csv')
checkins = pd.read_csv('./cases/mysejahtera/checkin_malaysia.csv')
income = pd.read_csv('./vaccination/static/income.csv')
global_datasets = pd.read_csv('./global_datasets/owid-covid-data.csv')
aefi = pd.read_csv('./cases/vaccination/aefi.csv')
trace_malaysia = pd.read_csv('./cases/mysejahtera/trace_malaysia.csv')
trace_malaysia.fillna(0,inplace=True)
trace_malaysia.drop_duplicates(inplace=True)
# cluster columns are irrelevant, remove them
cases_malaysia.drop(columns=['cluster_import', 'cluster_religious', 'cluster_community', 'cluster_highRisk', 'cluster_education', 'cluster_detentionCentre', 'cluster_workplace'], inplace=True)
# other dates with a null value, just drop that row
cases_malaysia.fillna(0, inplace=True)
# cases_malaysia.head()
cases_state.drop_duplicates(inplace=True)
cases_state.fillna(0, inplace=True)
cases_state_pivoted = cases_state.pivot(index='date', columns='state', values='cases_new')
clusters.drop_duplicates(inplace=True)
deaths_malaysia.drop_duplicates(inplace=True)
deaths_malaysia.drop(columns=['deaths_bid', 'deaths_new_dod', 'deaths_bid_dod', 'deaths_pvax', 'deaths_fvax', 'deaths_tat'], inplace=True)
deaths_state.drop_duplicates(inplace=True)
deaths_state_pivoted = deaths_state.pivot(index='date', columns='state', values='deaths_new')
hospital.drop_duplicates(inplace=True)
hospital.drop(columns=['beds', 'beds_noncrit', 'admitted_pui', 'admitted_total', 'discharged_pui', 'discharged_total','hosp_pui','hosp_noncovid'], inplace=True)
icu.drop_duplicates(inplace=True)
icu.drop(columns=['beds_icu', 'beds_icu_rep', 'beds_icu_total', 'vent', 'vent_port', 'icu_pui','icu_noncovid','vent_pui','vent_noncovid','vent_used','vent_port_used'], inplace=True)
pkrc.drop_duplicates(inplace=True)
pkrc.drop(columns=['beds', 'admitted_pui', 'admitted_total', 'discharge_pui', 'discharge_total', 'pkrc_pui','pkrc_noncovid'], inplace=True)
tests_malaysia.drop_duplicates(inplace=True)
tests_malaysia['total_testing'] = tests_malaysia['rtk-ag'] + tests_malaysia['pcr']
tests_malaysia.drop(columns=['rtk-ag', 'pcr'], inplace=True)
vax_malaysia.drop_duplicates(inplace=True)
vax_malaysia['cumul_vaccine'] = vax_malaysia['daily_full'].cumsum()
vax_malaysia_all_attributes = vax_malaysia.copy()
# total up first and second dose
vax_malaysia_all_attributes['pfizer'] = vax_malaysia_all_attributes['pfizer1'] + vax_malaysia_all_attributes['pfizer2']
vax_malaysia_all_attributes['astra'] = vax_malaysia_all_attributes['astra1'] + vax_malaysia_all_attributes['astra2']
vax_malaysia_all_attributes['sinovac'] = vax_malaysia_all_attributes['sinovac1'] + vax_malaysia_all_attributes['sinovac2']
vax_malaysia.drop(columns=['daily_partial_child','cumul_partial','cumul_full','cumul','cumul_partial_child','cumul_full_child','pfizer1','pfizer2','sinovac1','sinovac2','astra1','astra2','cansino','pending'], inplace=True)
vax_state.drop_duplicates(inplace=True)
vax_state.drop(columns=['daily_partial_child', 'daily_full_child','cumul_partial','cumul_full','cumul','cumul_partial_child','cumul_full_child','pfizer1','pfizer2','sinovac1','sinovac2','astra1','astra2','cansino','pending'], inplace=True)
vaxreg_malaysia.drop_duplicates(inplace=True)
vaxreg_malaysia.drop(columns=['phase2', 'mysj','call','web','children','elderly','comorb','oku'], inplace=True)
vaxreg_state.drop_duplicates(inplace=True)
vaxreg_state.drop(columns=['phase2', 'mysj','call','web','children','elderly','comorb','oku'], inplace=True)
population.drop(columns=['pop_18', 'pop_60'], inplace=True)
income = income[income['Year'] == 2020]
income.rename(columns={'Country/State': 'state', 'Mean Monthly Household Gross Income': 'income', 'Year': 'year'}, inplace=True)
global_datasets.fillna(0, inplace=True)
global_datasets.drop_duplicates(inplace=True)
global_datasets.drop(columns=['iso_code', 'continent','new_cases_smoothed','new_deaths_smoothed','new_cases_smoothed_per_million',
'new_deaths_smoothed_per_million','reproduction_rate','icu_patients','icu_patients_per_million','hosp_patients',
'hosp_patients_per_million','weekly_icu_admissions','weekly_icu_admissions_per_million','weekly_hosp_admissions',
'weekly_hosp_admissions_per_million','new_tests_smoothed','total_boosters','new_vaccinations_smoothed',
'total_boosters_per_hundred','new_vaccinations_smoothed_per_million','stringency_index','median_age',
'aged_65_older','aged_70_older','gdp_per_capita','extreme_poverty','cardiovasc_death_rate','diabetes_prevalence',
'female_smokers','male_smokers','handwashing_facilities','hospital_beds_per_thousand','life_expectancy','human_development_index',
'excess_mortality_cumulative_absolute','excess_mortality_cumulative','excess_mortality','excess_mortality_cumulative_per_million',
], inplace=True)
st.markdown('''
## Classification
''')
st.markdown('''
### Can we classify individual check-ins in Malaysia into groups (Low, Medium and High)?
We first do feature selection using Boruta, SMOTE the dataset then evaluate Random Forest Classifier, Logistic Regression and the Naive Bayes classifier.
''')
cases_testing_deaths_vax_checkins = cases_malaysia.merge(tests_malaysia, on='date')
cases_testing_deaths_vax_checkins = cases_testing_deaths_vax_checkins.merge(deaths_malaysia, on='date')
cases_testing_deaths_vax_checkins = cases_testing_deaths_vax_checkins.merge(vax_malaysia[['date', 'daily_full']], on='date')
cases_testing_deaths_vax_checkins = cases_testing_deaths_vax_checkins.merge(checkins[['date', 'unique_ind']], on='date')
cases_testing_deaths_vax_checkins['ind_checkins_class'] = pd.cut(cases_testing_deaths_vax_checkins['unique_ind'], 3, labels=['Low', 'Medium', 'High'])
cases_testing_deaths_vax_checkins.drop(['unique_ind'], axis=1, inplace=True)
X = cases_testing_deaths_vax_checkins.drop(columns=['date', 'ind_checkins_class'])
X_scaler = MinMaxScaler()
X_scaled = X_scaler.fit_transform(X)
y_encoder = LabelEncoder()
y = cases_testing_deaths_vax_checkins['ind_checkins_class']
y_encoded = y_encoder.fit_transform(y)
features = ["cases_new", "cases_import", "cases_recovered", "cases_active", "cases_cluster", "cases_pvax", "cases_fvax", "cases_child","cases_adolescent", "cases_adult", "cases_elderly", "total_testing", "deaths_new", "daily_full"]
feat_display = st.multiselect('Optimal Feature Set: ', features, default=features)
filtered = cases_testing_deaths_vax_checkins[features]
filtered['ind_checkins_class'] = y_encoded
filtered['date'] = cases_testing_deaths_vax_checkins['date']
filtered['date'] = pd.to_datetime(filtered['date'])
filtered.set_index('date', inplace=True)
# SMOTE dataset
# X_scaler = MinMaxScaler()
X = filtered.drop(columns=['ind_checkins_class'])
y = filtered['ind_checkins_class']
# train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
smt = SMOTE(random_state=42, k_neighbors=3)
X_smt, y_smt = smt.fit_resample(X_train, y_train)
classification_model = st.selectbox('Which classification model do you want to test?', ['Random Forest Classifier', 'Logistic Regression', 'Naive Bayes'])
if classification_model == 'Random Forest Classifier':
# Random Forest Classifier
best_params = {'criterion': 'entropy', 'max_depth': 7, 'max_features': 'auto', 'n_estimators': 100}
rf = RandomForestClassifier(**best_params)
rf.fit(X_smt, y_smt)
# get score
accuracy = rf.score(X_test, y_test)
# F1-Score
f1 = f1_score(y_test, rf.predict(X_test), average='weighted')
st.write(f"Accuracy of Random Forest: {accuracy}")
st.write(f"Weighted Averaged F1-Score of Random Forest: {f1}")
y_pred = rf.predict(X_test)
# plot confusion matrix with plotly
cf = ff.create_annotated_heatmap(z=confusion_matrix(y_test, y_pred).T, x=['High', 'Low', 'Medium'], y=['True High', 'True Low', 'True Medium'], annotation_text=confusion_matrix(y_test, y_pred).T, colorscale='Viridis', showscale=True)
st.plotly_chart(cf)
clf = OneVsRestClassifier(rf)
clf.fit(X_smt, y_smt)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
fpr = {}
tpr = {}
thresh = {}
n_class = 3
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, pred_prob[:, i], pos_label=i)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='High')
ax.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Low')
ax.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Medium')
ax.legend()
st.pyplot(fig)
elif classification_model == 'Logistic Regression':
log = LogisticRegression()
log.fit(X_smt, y_smt)
accuracy = log.score(X_test, y_test)
f1 = f1_score(y_test, log.predict(X_test), average='weighted')
st.write(f"Accuracy of Logistic Regression: {accuracy}")
st.write(f"Weighted Averaged F1-Score of Logistic Regression: {f1}")
# classification report
y_pred = log.predict(X_test)
# plot confusion matrix with plotly
cf = ff.create_annotated_heatmap(z=confusion_matrix(y_test, y_pred).T, x=['High', 'Low', 'Medium'], y=['True High', 'True Low', 'True Medium'], annotation_text=confusion_matrix(y_test, y_pred).T, colorscale='Viridis', showscale=True)
st.plotly_chart(cf)
clf = OneVsRestClassifier(log)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
fpr = {}
tpr = {}
thresh = {}
n_class = 3
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, pred_prob[:, i], pos_label=i)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='High')
ax.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Low')
ax.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Medium')
ax.legend()
st.pyplot(fig)
else:
gnb = GaussianNB()
gnb.fit(X_smt, y_smt)
accuracy = gnb.score(X_test, y_test)
f1 = f1_score(y_test, gnb.predict(X_test), average='weighted')
st.write(f"Accuracy of Naive Bayes: {accuracy}")
st.write(f"Weighted Averaged F1-Score of Naive Bayes: {f1}")
# classification report
y_pred = gnb.predict(X_test)
# plot confusion matrix with plotly
cf = ff.create_annotated_heatmap(z=confusion_matrix(y_test, y_pred).T, x=['High', 'Low', 'Medium'], y=['True High', 'True Low', 'True Medium'], annotation_text=confusion_matrix(y_test, y_pred).T, colorscale='Viridis', showscale=True)
st.plotly_chart(cf)
clf = OneVsRestClassifier(gnb)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
fpr = {}
tpr = {}
thresh = {}
n_class = 3
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, pred_prob[:, i], pos_label=i)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='High')
ax.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Low')
ax.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Medium')
ax.legend()
st.pyplot(fig)
st.markdown('''
### Can we predict the type of vaccine based on the symptoms?
Some vaccines produce more of a certain symptom than others. Hence, would it be possible to predict whether the vaccine is Pfizer, Sinovac, Astra, etc. based purely on the symptoms reported each day.
We use self-reported symptoms for each vaccine daily as the training data. Appropriate hyperparameter tuning is done using GridSearchCV for the Random Forest Classifier. Both Logistic Regression and the Support Vector Classifier are evaluated for this question using the metrics accuracy and weighted averaged F1-Score. The training set is SMOTE-d.
Feature selection (symptoms) is done using Recursive Feature Elimination.''')
vaccine_prediction = aefi.copy()
vaccine_prediction['vaxtype_label'] = LabelEncoder().fit_transform(vaccine_prediction['vaxtype'])
vaccine_prediction.drop(columns=['daily_total'], inplace=True)
X_scaler = MinMaxScaler()
X = vaccine_prediction.drop(columns=['date', 'vaxtype', 'vaxtype_label'])
X_scaled = X_scaler.fit_transform(X)
y = vaccine_prediction['vaxtype_label']
logreg = LogisticRegression()
rfe = RFE(logreg, 20)
rfe = rfe.fit(X_scaled, y)
X_transformed = pd.DataFrame(rfe.transform(X_scaled), columns=X.columns[rfe.support_])
y_encoder = LabelEncoder()
X = vaccine_prediction.drop(columns=['date', 'vaxtype', 'vaxtype_label'])
y = y_encoder.fit_transform(vaccine_prediction['vaxtype'])
features = ['daily_nonserious_mysj', 'daily_nonserious_npra', 'daily_serious_npra', 'daily_nonserious_mysj_dose1', 'd1_site_pain', 'd1_site_swelling', 'd1_site_redness', 'd1_headache', 'd1_muscle_pain', 'd1_joint_pain', 'd1_weakness', 'd1_fever', 'd1_chills', 'd1_rash', 'd2_site_pain', 'd2_site_swelling', 'd2_headache', 'd2_joint_pain', 'd2_fever', 'd2_chills']
feat_display = st.multiselect('Optimal Feature Set: ', features, default=features)
X_transformed = X[features]
X_scaled = X_scaler.fit_transform(X_transformed)
# X_transformed = pd.DataFrame(rfe.transform(X_scaled), columns=X.columns[rfe.support_])
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
smt = SMOTE(random_state=42, k_neighbors=3)
X_smt, y_smt = smt.fit_resample(X_train, y_train)
classification_model2 = st.selectbox('Which classification model do you want to test?', ['Logistic Regression', 'Support Vector Classification'])
if classification_model2 == 'Logistic Regression':
logreg = LogisticRegression()
logreg.fit(X_smt, y_smt)
accuracy = logreg.score(X_test, y_test)
f1 = f1_score(y_test, logreg.predict(X_test), average='weighted')
st.write(f"Accuracy of Logistic Regression: {accuracy}")
st.write(f"Weighted Averaged F1-Score of Logistic Regression: {f1}")
y_pred = logreg.predict(X_test)
# confusion matrix
cf = ff.create_annotated_heatmap(z=confusion_matrix(y_test, y_pred).T, x=['Pfizer', 'Sinovac', 'Astrazeneca', 'Cansino'], y=['True Pfizer', 'True Sinovac', 'True Astrazeneca', 'True Cansino'], annotation_text=confusion_matrix(y_test, y_pred).T, colorscale='Viridis', showscale=True)
st.plotly_chart(cf)
clf = OneVsRestClassifier(logreg)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
fpr = {}
tpr = {}
thresh = {}
n_class = 4
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, pred_prob[:, i], pos_label=i)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='Astrazeneca')
ax.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Cansino')
ax.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Pfizer')
ax.plot(fpr[3], tpr[3], linestyle='--',color='red', label='Sinovac')
ax.legend()
st.pyplot(fig)
elif classification_model2 == 'Support Vector Classification':
# defining parameter range
best_params = {'C': 1000, 'gamma': 1, 'kernel': 'rbf'}
# fitting the model for grid search
svc = SVC(**best_params, probability=True)
svc.fit(X_smt, y_smt)
st.write(f'Best Model {svc}')
accuracy = svc.score(X_test, y_test)
f1 = f1_score(y_test, svc.predict(X_test), average='weighted')
st.write(f"Accuracy of Support Vector Regression: {accuracy}")
st.write(f"Weighted Averaged F1-Score of Support Vector Regression: {f1}")
y_pred = svc.predict(X_test)
# confusion matrix
cf = ff.create_annotated_heatmap(z=confusion_matrix(y_test, y_pred).T, x=['Pfizer', 'Sinovac', 'Astrazeneca', 'Cansino'], y=['True Pfizer', 'True Sinovac', 'True Astrazeneca', 'True Cansino'], annotation_text=confusion_matrix(y_test, y_pred).T, colorscale='Viridis', showscale=True)
st.plotly_chart(cf)
clf = OneVsRestClassifier(svc)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
pred_prob = clf.predict_proba(X_test)
fpr = {}
tpr = {}
thresh = {}
n_class = 4
for i in range(n_class):
fpr[i], tpr[i], thresh[i] = roc_curve(y_test, pred_prob[:, i], pos_label=i)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(fpr[0], tpr[0], linestyle='--',color='orange', label='Astrazeneca')
ax.plot(fpr[1], tpr[1], linestyle='--',color='green', label='Cansino')
ax.plot(fpr[2], tpr[2], linestyle='--',color='blue', label='Pfizer')
ax.plot(fpr[3], tpr[3], linestyle='--',color='red', label='Sinovac')
ax.legend()
st.pyplot(fig) | sidharrth2002/covid-streamlit-2 | pages/Classification.py | Classification.py | py | 20,753 | python | en | code | 2 | github-code | 90 |
13025006643 | # STATEMENT: https://www.codingame.com/training/medium/the-grand-festival---i
import sys
import math
# Total prize starting on day d with rest r.
def prize(d,r):
if(d == N):
return 0
if(r == 0):
return prize(d+1,R)
if memo[d+1][R] == -1:
prize_excluded = prize(d+1,R)
memo[d+1][R] = prize_excluded
else:
prize_excluded = memo[d+1][R]
if memo[d+1][r-1] == -1:
prize_included = p[d] + prize(d+1,r-1)
memo[d+1][r-1] = prize_included
else:
prize_included = memo[d+1][r-1]
if(prize_excluded < prize_included):
print(d, file=sys.stderr)
return max(prize_excluded, prize_included)
if __name__ == "__main__":
N = int(input())
R = int(input())
p = []
for i in range(N):
p.append(int(input()))
memo = [[-1] * (R+1) for _ in range(N+1)]
print(prize(0,R)) | gabrielrodcanal/codingame-sols | grand-festival-i.py | grand-festival-i.py | py | 893 | python | en | code | 1 | github-code | 90 |
6316396741 | import keras.backend as K
from utils import data_generator
from tcn import tcn
def get_activations(model, model_inputs, print_shape_only=False, layer_name=None):
print('----- activations -----')
activations = []
inp = model.input
model_multi_inputs_cond = True
if not isinstance(inp, list):
# only one input! let's wrap it in a list.
inp = [inp]
model_multi_inputs_cond = False
outputs = [layer.output for layer in model.layers if
layer.name == layer_name or layer_name is None] # all layer outputs
funcs = [K.function(inp + [K.learning_phase()], [out]) for out in outputs] # evaluation functions
if model_multi_inputs_cond:
list_inputs = []
list_inputs.extend(model_inputs)
list_inputs.append(0.)
else:
list_inputs = [model_inputs, 0.]
# Learning phase. 0 = Test mode (no dropout or batch normalization)
# layer_outputs = [func([model_inputs, 0.])[0] for func in funcs]
layer_outputs = [func(list_inputs)[0] for func in funcs]
for layer_activations in layer_outputs:
activations.append(layer_activations)
if print_shape_only:
print(layer_activations.shape)
else:
print(layer_activations)
return activations
# np.sum(activations[15].squeeze(), axis=1)
def run_task():
(x_train, y_train), (x_test, y_test) = data_generator()
model, param_str = tcn.dilated_tcn(output_slice_index='last', # try 'first'.
num_feat=1,
num_classes=10,
nb_filters=64,
kernel_size=8,
dilatations=[1, 2, 4, 8],
nb_stacks=8,
max_len=x_train[0:1].shape[1],
activation='norm_relu',
use_skip_connections=False,
return_param_str=True)
print(f'x_train.shape = {x_train.shape}')
print(f'y_train.shape = {y_train.shape}')
print(f'x_test.shape = {x_test.shape}')
print(f'y_test.shape = {y_test.shape}')
model.summary()
# a = np.zeros_like(x_train[0:1])
# a[:, 0, :] = 1.0
# print(get_activations(model, a))
model.fit(x_train, y_train.squeeze().argmax(axis=1), epochs=100,
validation_data=(x_test, y_test.squeeze().argmax(axis=1)))
if __name__ == '__main__':
run_task()
| cxz/keras-tcn | mnist_pixel/main.py | main.py | py | 2,584 | python | en | code | null | github-code | 90 |
74736464937 | import os,time
pid = os.getpid()
import pyjit
#os.chdir("C:\\Dev\\PyJit\\buildtest")
srcs=["helloworld.cpp"]
#srcs=["*.cpp>[cppcompiler.cpp,buildsystem.cpp]"]
src =pyjit.expand(srcs)
target_name ="hello_pyjit"
output_dir ="."
target_type ="exe"
target_lang = "c++"
m1 = [("YAML_CPP_STATIC_DEFINE",None),("WIN32",1)]
def direct_build():
from distutils import ccompiler
compiler = ccompiler.new_compiler()
from distutils import sysconfig
sysconfig.customize_compiler(compiler)
output_file ="C:\\Dev\\PyJit\\buildtest\\out\\"+target_name
objs = compiler.compile(srcs,debug=True,output_dir=output_dir,macros = m1,extra_preargs=["/std:c++17"])
ret = compiler.link_executable(objs,output_file)
#direct_build()
pyjit.build(srcs=srcs,
target_name=target_name,
target_type = target_type,
target_lang = target_lang,
debug=True,
#macros = m1,
#link_extra_preargs=["-lstdc++"],
output_dir=output_dir)
print("end")
| Galaxy3DVision-Inc/PyJit | buildtest/simple.py | simple.py | py | 1,024 | python | en | code | 0 | github-code | 90 |
46181072400 | """Paranthesis checker."""
from stack import Stack
def paranthesis_checker(symbol_string):
"""Paranthesis checker."""
s = Stack()
is_balanced = True
index = 0
# Navigate character by character through the symbol_string.
while index < len(symbol_string) and is_balanced:
symbol = symbol_string[index]
# Push the open paranthesis into the stack
if symbol in "([{":
s.push(symbol)
elif symbol in ")]}":
# Pop the corresponding open paranthesis for the close paranthesis.
if s.is_empty():
is_balanced = False
else:
top = s.pop()
# Make sure the most recent parenthesis matches the next close
# symbol. If no open symbol on the stack to match a close
# symbol, the string is not balanced.
if not matches(top, symbol):
is_balanced = False
index = index + 1
# When all the symbols are processed, the stack should be empty for a balanced
# paranthesis. Othewise, the paranthesis in the given string is not balanced.
if is_balanced and s.is_empty():
return True
return False
def matches(open_paran, close_paran):
"""Checks the close paranthesis matches with the open paranthesis."""
opens = "([{"
closes = ")]}"
return opens.index(open_paran) == closes.index(close_paran)
if __name__ == '__main__':
st = '{{([][])}()}'
print(f'Is {st} balanced?', paranthesis_checker(st))
st = '[{()]'
print(f'Is {st} balanced?', paranthesis_checker(st))
| dhanraju/python | data_structures/probl_sol_with_algs_and_ds/ch03_basic_ds/paranthesis_checker.py | paranthesis_checker.py | py | 1,618 | python | en | code | 0 | github-code | 90 |
13000161809 | ones = {
0: "",
1: "one",
2: "two",
3: "three",
4: "four",
5: "five",
6: "six",
7: "seven",
8: "eight",
9: "nine"
}
teens = {
0: "ten",
1: "eleven",
2: "twelve",
3: "thirteen",
4: "fourteen",
5: "fifteen",
6: "sixteen",
7: "seventeen",
8: "eighteen",
9: "nineteen"
}
tens = {
0: "",
2: "twenty",
3: "thirty",
4: "forty",
5: "fifty",
6: "sixty",
7: "seventy",
8: "eighty",
9: "ninety"
}
def num_to_string(num):
string = ""
hund = num // 100
ten = num % 100 // 10
one = num % 10
if hund > 0:
string += ones[hund] + "hundred"
if len(string) > 0 and (ten > 0 or one > 0):
string += "and"
if ten > 1:
string += tens[ten]
elif ten == 1:
string += teens[one]
if ten != 1:
string += ones[one]
return string
number_name = "onethousand"
for i in range(1, 1000):
number_name += num_to_string(i)
print(len(number_name))
| jwmortensen/project-euler | 017/num_to_text.py | num_to_text.py | py | 1,013 | python | en | code | 0 | github-code | 90 |
17939534428 | from typing import List, Tuple
import numpy as np
import pandas as pd
NUMERIC_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.complex64,
np.complex128,
]
def split_columns_types(data_frame: pd.DataFrame) -> Tuple[List[int], List[int]]:
"""Split series of a pandas dataframe based on their type (continuous or categorical).
Arguments
---------
data_frame
Returns
-------
array: two arrays of column indices,
respectively for continuous and categorical variables
"""
continuous = [
idx
for idx, name in enumerate(data_frame.columns)
if data_frame[name].dtype in NUMERIC_DTYPES
]
categorical = filter(lambda x: x not in continuous, range(0, data_frame.shape[1]))
return continuous, list(categorical)
| octopize/avatar-python | avatars/lib/split_columns_types.py | split_columns_types.py | py | 931 | python | en | code | 1 | github-code | 90 |
71546438377 | for _ in range(int(input())):
s=input()
arr=[0]*257
ans=""
for i in range(len(s)):
ind=ord(s[i])
if(arr[ind]==0):
arr[ind]=1
ans+=s[i]
print(ans) | anirudhkannanvp/GeeksForGeeksSolutions | remove-duplicates.py | remove-duplicates.py | py | 205 | python | en | code | 0 | github-code | 90 |
18212749739 | import sys
#入力する名前S
S = input()
#新しいID T
T = input()
#print(S)
#print(type(S))
#print(T)
#print(type(T))
if S == T[:-1]:
print("Yes")
sys.exit()
else:
print("No") | Aasthaengg/IBMdataset | Python_codes/p02681/s035110291.py | s035110291.py | py | 196 | python | ja | code | 0 | github-code | 90 |
25958502434 | import node
node1 = node.Node("John")
print(node1.get_name())
print(node1.get_next())
node2 = node.Node("Krish")
node1.set_next(node2)
print(node1.get_next().get_name())
node3 = node.Node("Anything")
node2.set_next(node3)
print("while loop")
new_node_list = []
n = node1
while n is not None:
print(n.get_name())
if n.get_name() == "Krish":
new_node_list.append(n)
n = n.next
print ("print filter")
for x in new_node_list:
print(x.get_name())
| krishras23/WebApp | testnode.py | testnode.py | py | 474 | python | en | code | 0 | github-code | 90 |
14796108109 | import os, random, torch
import argparse
import multiprocessing as mp
import numpy as np
import pickle
import shutil
from functools import partial
import lmdb
from tqdm.auto import tqdm
from utils.data import PDBProtein, parse_sdf_file
from scripts.binana_script.detect_interactions import run_binana_command
AA_NAME_SYM = {
'ALA': 'A', 'CYS': 'C', 'ASP': 'D', 'GLU': 'E', 'PHE': 'F', 'GLY': 'G', 'HIS': 'H',
'ILE': 'I', 'LYS': 'K', 'LEU': 'L', 'MET': 'M', 'ASN': 'N', 'PRO': 'P', 'GLN': 'Q',
'ARG': 'R', 'SER': 'S', 'THR': 'T', 'VAL': 'V', 'TRP': 'W', 'TYR': 'Y',
}
AA_NUMBER = {i: k for i, (k, _) in enumerate(AA_NAME_SYM.items())}
LIG_ATOM = {5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 17: 'Cl', 15: 'P', 16: 'S', 35: 'Br', 53: 'I'}
# INTERACTION = {0: 'cation_pi', 1: 'halogen', 2: 'hydrogen', 3: 'pi-pi'}
# INTERACTION = {0: 'cationPiInteractions', 1: 'halogenBonds', 2: 'hydrogenBonds', 3: 'piPiStackingInteractions', 4: 'saltBridges'}
INTERACTION = {'cationPiInteractions': 'cation_pi', 'halogenBonds': 'halogen', 'hydrogenBonds': 'hydrogen', 'piPiStackingInteractions': 'pi-pi'}
interaction_prompt = {
'ALA_halogen': 0, 'ALA_hydrogen': 1, 'ASP_halogen': 2, 'ASP_hydrogen': 3, 'GLU_halogen': 4, 'GLU_hydrogen': 5,
'CYS_halogen': 6, 'CYS_hydrogen': 7, 'GLY_halogen': 8, 'GLY_hydrogen': 9, 'HIS_halogen': 10, 'HIS_hydrogen': 11,
'ILE_halogen': 12, 'ILE_hydrogen': 13, 'LYS_halogen': 14, 'LYS_hydrogen': 15, 'LEU_halogen': 16, 'LEU_hydrogen': 17,
'MET_halogen': 18, 'MET_hydrogen': 19, 'ASN_halogen': 20, 'ASN_hydrogen': 21, 'PRO_halogen': 22, 'PRO_hydrogen': 23,
'GLN_halogen': 24, 'GLN_hydrogen': 25, 'ARG_halogen': 26, 'ARG_hydrogen': 27, 'SER_halogen': 28, 'SER_hydrogen': 29,
'THR_halogen': 30, 'THR_hydrogen': 31, 'VAL_halogen': 32, 'VAL_hydrogen': 33, 'TRP_halogen': 34, 'TRP_hydrogen': 35,
'TYR_caption': 36, 'TYR_halogen': 37, 'TYR_hydrogen': 38, 'TYR_pi': 39, 'PHE_caption': 40, 'PHE_halogen': 41, 'PHE_hydrogen': 42, 'PHE_pi': 43,
'None': 44
}
def load_item(item, path):
pdb_path = os.path.join(path, item[0])
sdf_path = os.path.join(path, item[1])
with open(pdb_path, 'r') as f:
pdb_block = f.read()
with open(sdf_path, 'r') as f:
sdf_block = f.read()
return pdb_block, sdf_block
def process_interactions(data):
interactions = ['cationPiInteractions', 'halogenBonds', 'hydrogenBonds', 'piPiStackingInteractions', 'saltBridges']
interactions = interactions[:-1]
all_residue_ids = set()
residues = {}
for inter in interactions:
for sub_inter in data[inter]:
# inter keys: ligandAtoms, metrics, receptorAtoms
res_ids_, res_names_ = [], []
for res_atom in sub_inter['receptorAtoms']:
res_ids_.append(res_atom['resID'])
res_names_.append(res_atom['resName'])
if len(np.unique(res_ids_)) == 1 and np.unique(res_ids_).item() not in all_residue_ids:
res_id = np.unique(res_ids_).item()
res_name = np.unique(res_names_).item()
brief_inter = INTERACTION[inter]
prompt_id = interaction_prompt.get(res_name + '_' + brief_inter, 44)
residues[res_id] = prompt_id
all_residue_ids.union({np.unique(res_ids_).item()})
return residues
def process_item(item, args):
i, item = item
try:
pdb_block, sdf_block = load_item(item, args.source_data_path)
protein = PDBProtein(pdb_block)
ligand_dict = parse_sdf_file(os.path.join(args.source_data_path, item[1]))
selected_residues = protein.query_residues_ligand_(ligand_dict, args.radius)
pdb_block_pocket = protein.residues_to_pdb_block(selected_residues)
ligand_fn = item[1]
pocket_fn = ligand_fn[:-4] + '_pocket%d.pdb' % args.radius
ligand_dest = os.path.join(args.save_pocket_path, ligand_fn)
pocket_dest = os.path.join(args.save_pocket_path, pocket_fn)
os.makedirs(os.path.dirname(ligand_dest), exist_ok=True)
shutil.copyfile(
src=os.path.join(args.source_data_path, ligand_fn),
dst=os.path.join(args.save_pocket_path, ligand_fn)
)
with open(pocket_dest, 'w') as f:
f.write(pdb_block_pocket)
# detect interaction
interactions = run_binana_command((pocket_dest, ligand_dest, args.temp_path, str(i)))
interactions = process_interactions(interactions)
return (pocket_fn, ligand_fn, item[0], item[2]), (ligand_dict, selected_residues, interactions) # item[0]: original protein filename; item[2]: rmsd.
except Exception as e:
return (None, item[1], item[0], item[2]), None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--source_data_path', type=str, default='../interdiff_data/crossdocked_v1.1_rmsd1.0')
parser.add_argument('--save_pocket_path', type=str, default='../interdiff_data/crossdocked_v1.1_rmsd1.0_pocket_')
parser.add_argument('--temp_path', type=str, default='../interdiff_data/temp')
parser.add_argument('--save_db_path', type=str, default='../interdiff_data/pocket_prompt_test.lmdb')
parser.add_argument('--radius', type=int, default=8)
parser.add_argument('--num_workers', type=int, default=16)
args = parser.parse_args()
os.makedirs(args.save_pocket_path, exist_ok=True)
os.makedirs(args.temp_path, exist_ok=True)
with open(os.path.join(args.source_data_path, 'index.pkl'), 'rb') as f:
index = pickle.load(f)
db = lmdb.open(
args.save_db_path,
map_size=int(10 * (1024 * 1024 * 1024)), # 10GB
create=False,
subdir=False,
readonly=False, # Writable
lock=False,
readahead=False,
meminit=False,
)
txn = db.begin(write=True, buffers=True)
pool = mp.Pool(args.num_workers)
index_pocket = []
for i, item_pockets in enumerate(tqdm(pool.imap_unordered(partial(process_item, args=args), enumerate(index)), total=len(index))):
item_pocket, protein_ligand_dicts = item_pockets
data = {}
if protein_ligand_dicts:
index_pocket.append(item_pocket)
# added to lmdb
ligand_dict, selected_residues, prompt = protein_ligand_dicts
pocket_dict = PDBProtein.residues_to_dict_atom_(selected_residues)
data['protein_filename'] = item_pocket[0]
data['ligand_filename'] = item_pocket[1]
if pocket_dict is not None:
for key, value in pocket_dict.items():
data['protein_' + key] = value
if ligand_dict is not None:
for key, value in ligand_dict.items():
data['ligand_' + key] = value
# add the interaction prompt to every protein atom
interactions = np.zeros(len(data['protein_element']), dtype=np.int64)
for res_id_orig, interactions_type in prompt.items():
interactions = np.where(data['protein_res_id_orig'] == res_id_orig, interactions_type, interactions)
data['interactions'] = interactions
# save
txn.put(
key=str(i).encode(),
value=pickle.dumps(data)
)
txn.commit()
pool.close()
index_path = os.path.join(args.save_pocket_path, 'index.pkl')
with open(index_path, 'wb') as f:
pickle.dump(index_pocket, f)
print('Done. %d protein-ligand pairs in total.' % len(index_pocket))
# split train/test data
with db.begin() as txn:
keys = list(txn.cursor().iternext(values=False))
train = random.sample(range(len(keys)), int(0.998 * len(keys)))
test = list(set(range(len(keys))).difference(set(train)))
# train nums: 165640 , test nums 332
print('train nums: {} , test nums {}'.format(len(train), len(test)))
torch.save({'train': train, 'test': test}, '../interdiff_data/prompt_split.pt')
db.close()
| zephyrdhb/InterDiff | scripts/data_preparation/extract_pockets_prompts.py | extract_pockets_prompts.py | py | 8,008 | python | en | code | 3 | github-code | 90 |
23175961141 | import asyncio
from bergen.registries.ward import get_ward_registry
from bergen.wards.default import MainWard
from bergen.config.types import ArkitektConfig, HerreConfig
from bergen.schemas.herre.types import User
from bergen.schema import Transcript
from bergen.hookable.base import Hooks
from pydantic.main import BaseModel
from typing import Dict
from bergen.enums import ClientType, HostProtocol, PostmanProtocol, ProviderProtocol
from bergen.logging import setLogging
from bergen.auths.base import BaseAuthBackend
from bergen.wards.base import BaseWard
from bergen.postmans.base import BasePostman
import logging
from bergen.console import console
from rich.panel import Panel
from rich.table import Table
from rich import pretty
pretty.install()
from rich.traceback import install
install()
logger = logging.getLogger(__name__)
import os
class SafetyError(Exception):
pass
class BaseBergen:
def __init__(self, auth: BaseAuthBackend, config: ArkitektConfig ,
bind=True,
log=logging.INFO,
client_type: ClientType = ClientType.CLIENT,
log_stream=False,
auto_connect=False,
capture_exceptions=False,
**kwargs) -> None:
setLogging(log, log_stream=log_stream)
if bind:
# We only import this here for typehints
from bergen.registries.client import set_current_client
set_current_client(self)
self.auth = auth
self.auth.login()
self.config = config
self.client_type = client_type
self.capture_exceptions=False
self.registered_hooks = Hooks()
self.host = config.host
self.port = config.port
self.is_iternal = config.internal
self.protocol = "https" if config.secure else "http"
self._transcript = None
self.identifierDataPointMap = {}
self.identifierWardMap: Dict[str, BaseWard] = {}
self._provider = None
self._entertainer = None
self.negotiate_additionals = {}
try:
self.loop = asyncio.get_event_loop()
except RuntimeError:
console.log("[yellow] Creating New EventLoop in This Thread")
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
if auto_connect:
self.negotiate()
@property
def transcript(self):
assert self._transcript is not None, "We have to negotiate first with our"
return self._transcript
def getWardForIdentifier(self, identifier):
if identifier in ["node","template","pod"]:
return self.main_ward
if self._transcript is None:
raise Exception("Not Negotiated yet!")
if identifier in self.identifierWardMap:
return self.identifierWardMap[identifier]
else:
raise Exception(f"Couldn't find a Ward/Datapoint for Model {identifier}, this mostly results from importing a schema that isn't part of your arkitekts configuration ..Check Documentaiton")
def getPostmanFromSettings(self, transcript: Transcript):
settings = transcript.postman
if settings.type == PostmanProtocol.RABBITMQ:
try:
from bergen.postmans.pika import PikaPostman
postman = PikaPostman(self, **settings.kwargs, hooks=self.registered_hooks)
except ImportError as e:
logger.error("You cannot use the Pika Postman without installing aio_pika")
raise e
elif settings.type == PostmanProtocol.WEBSOCKET:
try:
from bergen.postmans.websocket import WebsocketPostman
postman = WebsocketPostman(self, **settings.kwargs, hooks=self.registered_hooks)
except ImportError as e:
logger.error("You cannot use the Websocket Postman without installing websockets")
raise e
else:
raise Exception(f"Postman couldn't be configured. No Postman for type {settings.type}")
return postman
def getProviderFromSettings(self, transcript: Transcript):
settings = transcript.provider
if settings.type == ProviderProtocol.WEBSOCKET:
try:
from bergen.provider.websocket import WebsocketProvider
provider = WebsocketProvider(self, **settings.kwargs, hooks=self.registered_hooks)
except ImportError as e:
logger.error("You cannot use the Websocket Provider without installing websockets")
raise e
else:
raise Exception(f"Provider couldn't be configured. No Provider for type {settings.type}")
return provider
def getEntertainerFromSettings(self, transcript: Transcript):
settings = transcript.host
if settings.type == HostProtocol.WEBSOCKET:
try:
from bergen.entertainer.websocket import WebsocketEntertainer
provider = WebsocketEntertainer(self, **settings.kwargs, hooks=self.registered_hooks)
except ImportError as e:
logger.error("You cannot use the Websocket Entertainer without installing websockets")
raise e
else:
raise Exception(f"Entertainer couldn't be configured. No Entertainer for type {settings.type}")
return provider
async def negotiate_async(self):
from bergen.schemas.arkitekt.mutations.negotiate import NEGOTIATION_GQL
# Instantiate our Main Ward, this is only for Nodes and Pods
self.main_ward = MainWard(self)
await self.main_ward.configure()
# We resort escalating to the different client Type protocols
self._transcript = await NEGOTIATION_GQL.run(ward=self.main_ward, variables={"clientType": self.client_type, "internal": self.is_iternal, **self.negotiate_additionals})
#Lets create our different Wards
assert self._transcript.models is not None, "We apparently didnt't get any points"
assert self._transcript.wards is not None, "We apparently didnt't get any wards"
ward_registry = get_ward_registry()
ward_registry.set_base(self.main_ward)
for ward in self._transcript.wards:
ward_registry.create_ward(self, ward)
for model in self._transcript.models:
ward_registry.register_model(model)
# Negotating Extensions for the Datapoints
self._extensions = await ward_registry.configure_wards()
self.postman = self.getPostmanFromSettings(self._transcript)
await self.postman.connect()
if self.client_type in [ClientType.PROVIDER, ClientType.HOST]:
self._entertainer = self.getEntertainerFromSettings(self._transcript)
await self._entertainer.connect()
if self.client_type == ClientType.PROVIDER:
self._provider = self.getProviderFromSettings(self._transcript)
await self._provider.connect()
self.log_table()
def log_table(self):
table = Table.grid()
table.add_column()
table.add_column()
table.add_row()
arkitekt_table = Table(title="Arkitekt", show_header=False)
for key, value in self.config.dict().items():
arkitekt_table.add_row(key,str(value))
herre_table = Table(title="Herre", show_header=False)
for key, value in self.auth.config.dict().items():
if "secret" in key: continue
herre_table.add_row(key, str(value))
extensions_table = Table.grid()
row = []
for key, value in self._extensions.items():
if isinstance(value, dict):
extensions_table.add_column()
sub_table = Table(title=key, show_header=False)
for key, value in value.items():
sub_table.add_row(key, str(value))
row.append(sub_table)
extensions_table.add_row(*row)
table.add_row("Welcome to Arnheim")
table.add_row(herre_table, arkitekt_table)
table.add_row()
if self.client_type in [ClientType.PROVIDER, ClientType.HOST]:
table.add_row("We are Connected as a [bold]Host[/bold]")
if self.client_type in [ClientType.PROVIDER]:
table.add_row("We are Connected as a [bold]Provider[/bold]")
table.add_row()
table.add_row("[green]Connected :pile_of_poo:")
table.add_row(extensions_table)
console.print(Panel(table, title="Arkitekt"))
async def disconnect_async(self, client_type=None):
await self.main_ward.disconnect()
if self.postman: await self.postman.disconnect()
if self._provider: await self._provider.disconnect()
if self._entertainer: await self._entertainer.disconnect()
ward_registry = get_ward_registry()
await asyncio.gather(*[ward.disconnect() for ward in ward_registry.wards])
print("Sucessfulyl disconnected")
def negotiate(self):
assert not self.loop.is_running(), "You cannot negotiate with an already running Event loop, please ue negotiate_async"
self.loop.run_until_complete(self.negotiate_async())
def getUser(self) -> User:
return self.auth.getUser()
def getExtensions(self, service: str) -> dict:
"""Returns the negotiated Extensions fromt he serive
Args:
service (str): The clearlabel Datapoint (e.g. Elements)
Returns:
dict: A dict of the extensions
"""
assert self._extensions[service] is not None, "There are no extensions registered for this Service and this App (see negotiate)"
return self._extensions[service]
def getWard(self) -> BaseWard:
return self.main_ward
def getPostman(self) -> BasePostman:
return self.postman
def _repr_html_(self):
if not self._transcript: return """Unconnected Client"""
return f"""
<p> Arnheim Client <p>
<table>
<tr>
<td> Connected to </td> <td> {self.main_ward.name} </td>
</tr>
</table>
"""
async def __aenter__(self):
await self.negotiate_async()
return self
async def __aexit__(self, type, value, traceback):
try:
if value and isinstance(value, asyncio.CancelledError):
raise SafetyError("We caputered a Cancellation at the Bergen Context Level. Please make sure to capture it before in your code. See documentation!") from value#
except Exception as e:
await self.disconnect_async()
if not self.capture_exceptions: raise e
def disconnect(self):
self.loop.run_until_complete(self.disconnect_async())
def __enter__(self):
self.negotiate()
return self
def __exit__(self, *args, **kwargs):
self.disconnect()
@property
def provider(self):
if self._provider:
return self._provider
else:
raise Exception("We are not in Provider mode")
@property
def entertainer(self):
if self._entertainer:
return self._entertainer
else:
raise Exception("We are not in Enterainer mode")
def hook(self, hook: str, overwrite=False):
def real_decorator(function):
self.registered_hooks.addHook(hook, function, overwrite=overwrite)
return function
return real_decorator
def enable(self, *args, **kwargs):
if self._provider:
return self.provider.enable(*args, **kwargs)
else:
raise Exception("We are not in Provider Mode")
def template(self, *args, **kwargs):
if self._provider:
return self.provider.template(*args, **kwargs)
else:
raise Exception("We are not in Provider Mode")
async def provide_async(self):
return await self.provider.provide_async()
def provide(self):
return self.provider.provide()
| jhnnsrs/bergen | bergen/clients/base.py | base.py | py | 12,589 | python | en | code | 0 | github-code | 90 |
8131651935 | import streamlit as st
import os
def set_page_title(title):
st.sidebar.markdown(unsafe_allow_html=True, body=f"""
<iframe height=0 srcdoc="<script>
const title = window.parent.document.querySelector('title') \
const oldObserver = window.parent.titleObserver
if (oldObserver) {{
oldObserver.disconnect()
}} \
const newObserver = new MutationObserver(function(mutations) {{
const target = mutations[0].target
if (target.text !== '{title}') {{
target.text = '{title}'
}}
}}) \
newObserver.observe(title, {{ childList: true }})
window.parent.titleObserver = newObserver \
title.text = '{title}'
</script>" />
""")
def read(path):
if not os.path.exists(path):
write(path, '')
with open(path, 'r') as f:
t = f.read()
print(t)
f.close()
return t
def add(path,str):
if not os.path.exists(path):
write(path,'')
with open(path, 'a') as f:
f.write(str)
f.close()
def clear(path):
with open(path, 'w+') as f:
f.truncate(0)
f.close()
def write(path,str):
with open(path, 'w+') as f:
f.write(str)
f.close() | cnsdqd-dyb/Yubo_Dong_Work_Share_Platform | scripts/st_temp_scripts.py | st_temp_scripts.py | py | 1,332 | python | en | code | 0 | github-code | 90 |
32888160254 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 17:33:58 2020
@author: Cheng Rong
"""
import numpy as np
import xlrd
import import_data
data=import_data.import_data()
import assignment
from assignment.assign import *
from assignment.line import *
from assignment.graph import *
import random
import copy
import time
import csv
def enumeration ():
book = xlrd.open_workbook("enumeration.xlsx")
table1 = book.sheet_by_name("lane")
table2 = book.sheet_by_name("station")
row_Num1 = table1.nrows
col_Num1 = table1.ncols
row_Num2 = table2.nrows
col_Num2 = table2.ncols
lane = np.array(np.zeros((row_Num1,col_Num1)),dtype=np.int)
station = np.array(np.zeros((row_Num2,col_Num2)),dtype=np.int)
for i in range(row_Num1):
for j in range(col_Num1):
lane[i,j] = int(table1.cell(i,j).value)
for i in range(row_Num2):
for j in range(col_Num2):
station[i,j] = int(table2.cell(i,j).value)
# print(lane,station)
return lane,station
def cal_new_cost(_label_station, _label_lane, _cost_station, _cost_lane, _lane, Budget, od_info, od_flow, nt_a, nt_b, UE_converge, sita, fy, demand):
fixed_cost = 0
_new_cost = 0
once_FW_time = 0
od_flow_bike = copy.deepcopy(od_flow)
# print ("lane = ",_label_lane, " station=", _label_station)
for i in range(len(demand)):
if _label_station[i] != 0:
fixed_cost += _cost_station[i]
for i in _lane:
if _label_lane[i] != 0:
fixed_cost += _cost_lane[i]
if fixed_cost > Budget:
_new_cost =1000000000000000+fixed_cost
else:
# time_cost
time_cost = 0
No_edge = len(_cost_lane)
nt_a = data.read_network_auto(nt_a, _label_lane, No_edge)
nt_b = data.read_network_bike(nt_b, _label_lane, No_edge)
star_FW = time.time()
# print("lane=",_label_lane,"station=",_label_station)
vol_a, vol_b, time_cost, od_flow_bike = assignment.assign.FW_main(
nt_a, nt_b, od_info, od_flow, _label_lane, _label_station, UE_converge, sita, fy, demand)
# print("od_flow_bike=",od_flow_bike)
end_FW = time.time()
once_FW_time = end_FW-star_FW
# print("fw time=", end_FW-star_FW)
# if isOutPutDetail:
# print("*****motor vehicles*****")
# for link in vol_a.keys():
# print("{0},{1}".format(link,vol_a[link]))
# print("*****bikes*****")
# for link in vol_b.keys():
# print("{0},{1}".format(link,vol_b[link]))
_new_cost = time_cost+fixed_cost
return _new_cost, fixed_cost, once_FW_time, od_flow_bike
def set_Ex_ID(Ex_ID):
if Ex_ID == 7:
case_ID=0
demand_ID=0
Budget=10000000000
fy = 2.5
sita = 1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 8:
case_ID=0
demand_ID=1
Budget=10000000000
fy = 2.5
sita = 1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 9:
case_ID=0
demand_ID=2
Budget=10000000000
fy = 2.5
sita = 1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 10: #Five node - High demand - 0.1
case_ID=0
demand_ID=2
Budget=10000000000
fy = 2.5
sita = 0.1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 11: # Five node - High demand - 10
case_ID=0
demand_ID=2
Budget=10000000000
fy = 2.5
sita = 10
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 12: # Five node - Med demand - 0.1
case_ID=0
demand_ID=1
Budget=10000000000
fy = 2.5
sita = 0.1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 13: # Five node - Med demand - 10
case_ID=0
demand_ID=1
Budget=10000000000
fy = 2.5
sita = 10
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 14: # Five node - Low demand - 0.1
case_ID=0
demand_ID=0
Budget=10000000000
fy = 2.5
sita = 0.1
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 15: # Five node - Low demand - 10
case_ID=0
demand_ID=0
Budget=10000000000
fy = 2.5
sita = 10
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 16: # Five node - Low demand - 5
case_ID=0
demand_ID=0
Budget=10000000000
fy = 2.5
sita = 5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 17: # Five node - Med demand - 5
case_ID=0
demand_ID=1
Budget=10000000000
fy = 2.5
sita = 5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 18: # Five node - High demand - 5
case_ID=0
demand_ID=2
Budget=10000000000
fy = 2.5
sita = 5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 19: # Five node - Low demand - 5
case_ID=0
demand_ID=0
Budget=10000000000
fy = 2.5
sita = 0.5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 20: # Five node - Med demand - 5
case_ID=0
demand_ID=1
Budget=10000000000
fy = 2.5
sita = 0.5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 21: # Five node - High demand - 5
case_ID=0
demand_ID=2
Budget=10000000000
fy = 2.5
sita = 0.5
UE_converge = 0.001
isOutPutDetail = True
Max_gen = 1
if Ex_ID == 70:
case_ID = 0
demand_ID = 0
Budget = 1000000
fy = 2.5
# sita = 1
UE_converge = 0.001
# isOutPutDetail = True
# if _alg is "HH":
# Max_gen = 100
# Pop_size = 10
# elif _alg is "GA":
# Max_gen = 10
# pop_size = 10
# cross_p = 0.9
# mutation_p = 0.3
# else:
# print("Warning")
if Ex_ID == 80:
case_ID = 0
demand_ID = 1
Budget = 1000000
fy = 2.5
# sita = 1
UE_converge = 0.001
# isOutPutDetail = True
# if _alg is "HH":
# Max_gen = 120
# Pop_size = 10
# elif _alg is "GA":
# Max_gen = 12
# pop_size = 10
# cross_p = 0.9
# mutation_p = 0.3
# else:
# print("Warning")
if Ex_ID == 90:
case_ID = 0
demand_ID = 2
Budget = 1000000
fy = 2.5
# sita = 1
UE_converge = 0.001
# isOutPutDetail = True
# if _alg is "HH":
# Max_gen = 150
# Pop_size = 10
# elif _alg is "GA":
# Max_gen = 15
# pop_size = 10
# cross_p = 0.9
# mutation_p = 0.3
return case_ID,demand_ID,Budget,fy,UE_converge
#
#def run_enumeration(Ex_ID):
# case_ID,demand_ID,Budget,fy,sita,UE_converge,isOutPutDetail,Max_gen = set_Ex_ID(Ex_ID)
#
#
# #.....................input od_demand, network
## data=import_data.import_data()
# od_info,od_flow=data.read_od(case_ID,demand_ID) #od_info list, od_demand dict
#
# station,lane,cost_lane,cost_station,time_station,demand = data.set_sta_lane(case_ID)
# nt_a,nt_b=data.set_network(case_ID)
# label_lane, label_station = enumeration()
# best_cost,fixcost,FW_time = cal_new_cost(label_station,label_lane,cost_station,cost_lane,lane,time_station,Budget,od_info,od_flow,nt_a,nt_b,UE_converge,sita,fy,demand)
# result = ["{0}{1}".format("Ex ",Ex_ID),best_cost,fixcost,(best_cost-fixcost)/20000,label_lane[i],label_station[j],FW_time]
#
# return result
a=[]
d = []
enum_time = []
run_ex_ID = [70,80,90]
for c in run_ex_ID:
Ex_ID = c
print("Ex_ID",Ex_ID)
case_ID,demand_ID,Budget,fy,UE_converge = set_Ex_ID(Ex_ID)
od_info,od_flow=data.read_od(case_ID,demand_ID) #od_info list, od_demand dict
lane,cost_lane,cost_station,demand = data.set_sta_lane(case_ID)
nt_a,nt_b,net_bike=data.set_network(case_ID)
lane_set,station_set=enumeration()
label_lane=np.array(np.zeros((6)),dtype=np.int)
label_station = np.array(np.zeros((3)),dtype=np.int)
start_time = time.time()
sita = 0
for k in range(1,51):
sita = 0.1 * k
b = []
for i in range(len(lane_set)):
for j in range(len(station_set)):
for m in range(6):
label_lane[m] = lane_set[i,m]
for n in range(3):
label_station[n] = station_set [j,n]
best_cost,fixcost,FW_time, od_bike = cal_new_cost(label_station,label_lane,cost_station,cost_lane,lane,Budget,od_info,od_flow,nt_a,nt_b,UE_converge,sita,fy,demand)
result = []
result = ["{0}{1}".format("Ex ",Ex_ID),"{0}{1}".format("sita ",sita),best_cost,fixcost,(best_cost-fixcost)/20000,label_lane,label_station,FW_time,od_bike]
result1 = copy.deepcopy(result)
a.append(result1)
b.append(result1)
b.sort(key=lambda x:x[2])
# print(b)
c1 = []
c1 = b[0]
print(c1)
c = copy.deepcopy(c1)
d.append(c)
end_time = time.time()
enum_time.append(end_time-start_time)
f = open('enum_solution.csv','w',newline='')
writer = csv.writer(f)
writer.writerow(["Ex_ID","sita","Best_cost","Constr_cost","Travel_time","Best_lane","Best_station","FW_time","Bike_flow"])
for i in range(len(a)):
writer.writerow(a[i])
writer.writerow([enum_time])
f.close()
f1 = open('sita_sum.csv','w',newline='')
writer1 = csv.writer(f1)
writer1.writerow(["Ex_ID","sita","Best_cost","Constr_cost","Travel_time","Best_lane","Best_station","FW_time","Bike_flow"])
for j in range(len(d)):
writer1.writerow(d[j])
f1.close()
| hkujy/HHbike | enumeration.py | enumeration.py | py | 11,146 | python | en | code | 0 | github-code | 90 |
18158169639 | #!/usr/bin/python3
#coding: utf-8
S = int(input())
memo = {}
def rec(n):
if n < 3:
return 0
ret = 1
if n in memo:
return memo[n]
for i in range(n-3):
ret += rec(n-3-i)
ret %= 10**9 + 7
memo[n] = ret
return ret
print(rec(S)) | Aasthaengg/IBMdataset | Python_codes/p02555/s452158188.py | s452158188.py | py | 284 | python | en | code | 0 | github-code | 90 |
12181651376 | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 18 23:17:29 2021
@author: Harshu
"""
import requests
url = 'http://localhost:5000/results'
r = requests.post(url)
print(r.json())
#json={'rate':5, 'sales_in_first_month':200, 'sales_in_second_month':400 | Harshu2032000/Loan-prediction-web-app | request.py | request.py | py | 266 | python | en | code | 0 | github-code | 90 |
18306380569 | import numpy as np
N=int(input())
A=np.array([int(x) for x in input().split()])
ans=0
M=pow(10,9)+7
for i in range(100):
one=int(np.sum((A>>i)&1))
zero=N-one
ans+=(one*zero)*pow(2,i)
ans%=M
#print(one,zero)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02838/s066560585.py | s066560585.py | py | 229 | python | en | code | 0 | github-code | 90 |
21897323280 | import ecmcJinja2
import ecmcAxes
def main():
"""
render axis configuration to `cli.outFile` based on yaml-config `cli.cfgFile`
The script will lint the input and validate the axis against the configured type
In case a PLC is defined within the axis config, the PLC will be validated and added to the product.
"""
cli = ecmcJinja2.JinjaCli()
axis = ecmcAxes.EcmcAxis(cli.cfgFile, cli.templatedir)
axis.create()
axis.make()
axis.axisTemplate.writeProduct(cli.outFile)
if __name__ == '__main__':
main()
| paulscherrerinstitute/ecmccfg | scripts/jinja2/axisYamlJinja2.py | axisYamlJinja2.py | py | 550 | python | en | code | 6 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.