hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c3d17483a3af8faee3d6fe2b256913e702cd42e8 | 1,236 | py | Python | LeetCode/1-1000/601-700/601-625/605. Can Place Flowers/solution-python3.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | LeetCode/1-1000/601-700/601-625/605. Can Place Flowers/solution-python3.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | LeetCode/1-1000/601-700/601-625/605. Can Place Flowers/solution-python3.py | adubois85/coding_challenge_websites | 7867a05847a216661eff3b24b1cb1480fb7d3030 | [
"Apache-2.0"
] | null | null | null | from typing import List
class Solution:
# copy / paste from the supposed "fastest" solution on LeetCode
# Sometimes the fastest isn't the best, though, as I find this significantly
# more difficult to parse what's going on than either of my solutions
# Also, submitting to LeetCode showed it wasn't actually faster than mine
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
index = 0
count = 0
while index < (end := len(flowerbed)):
if flowerbed[index] == 1:
index += 2
else:
canPut = False
if index - 1 >= 0 and flowerbed[index - 1] == 0 \
and index + 1 < end and flowerbed[index + 1] == 0:
count += 1
canPut = True
elif index == 0 and (index + 1 >= end or flowerbed[index + 1] == 0):
count += 1
canPut = True
elif index == end - 1 and flowerbed[index - 1] == 0:
count += 1
canPut = True
if canPut:
index += 2
else:
index += 1
return count >= n
| 38.625 | 84 | 0.478964 | 141 | 1,236 | 4.198582 | 0.475177 | 0.091216 | 0.126689 | 0.108108 | 0.277027 | 0.202703 | 0.202703 | 0.202703 | 0.202703 | 0.138514 | 0 | 0.033237 | 0.440129 | 1,236 | 31 | 85 | 39.870968 | 0.822254 | 0.223301 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3d242b3088a9e13dd0d9c61fc0a9a0586b1bef7 | 1,996 | py | Python | sfftkplus/unittests/test_schema.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | sfftkplus/unittests/test_schema.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | sfftkplus/unittests/test_schema.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# test_schema.py
import os
import shlex
import unittest
import h5py
from . import TEST_DATA_PATH
from ..core.parser import parse_args
from ..formats import vtkmesh
from ..schema import SFFPSegmentation
SCHEMA_VERSION = SFFPSegmentation().version
__author__ = "Paul K. Korir, PhD"
__email__ = "pkorir@ebi.ac.uk, paul.korir@gmail.com"
__date__ = "2017-08-17"
class TestSFFPSegmentation(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sff_file = os.path.join(TEST_DATA_PATH, 'sff', 'test_emd_1832.sff')
cls.hff_file = os.path.join(TEST_DATA_PATH, 'sff', 'test_emd_1832.hff')
cls.json_file = os.path.join(TEST_DATA_PATH, 'sff', 'test_emd_1832.json')
def test_read_sff(self):
"""Test that we can read an .sff file"""
sff_segmentation = SFFPSegmentation(self.sff_file)
self.assertEqual(sff_segmentation.version, '0.7.0.dev0')
self.assertEqual(len(sff_segmentation.segments), 6)
def test_read_hff(self):
"""Test that we can read an .hff file"""
# with h5py.File(self.hff_file, u'r') as h:
hff_segmentation = SFFPSegmentation.from_file(self.hff_file)
self.assertEqual(hff_segmentation.version, SCHEMA_VERSION)
self.assertEqual(len(hff_segmentation.segments), 6)
def test_read_json(self):
"""Test that we can read a .json file"""
json_segmentation = SFFPSegmentation.from_json(self.json_file)
self.assertEqual(json_segmentation.version, '0.7.0.dev0')
self.assertEqual(len(json_segmentation.segments), 6)
def test_as_vtk(self):
"""Test that we can convert to a VTK object"""
args, configs = parse_args('createroi file.sff -o file.roi', use_shlex=True)
sff_segmentation = SFFPSegmentation(self.sff_file)
vtk_segmentation = sff_segmentation.as_vtk(args, configs)
self.assertIsInstance(vtk_segmentation, vtkmesh.VTKSegmentation)
| 35.017544 | 84 | 0.688878 | 269 | 1,996 | 4.873606 | 0.312268 | 0.06865 | 0.036613 | 0.042715 | 0.356979 | 0.322654 | 0.193745 | 0.158658 | 0.158658 | 0.091533 | 0 | 0.021277 | 0.199399 | 1,996 | 56 | 85 | 35.642857 | 0.799124 | 0.112725 | 0 | 0.057143 | 0 | 0 | 0.101317 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.142857 | false | 0 | 0.228571 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3d57923087c2261e4fd447792f926798a4e982f | 10,806 | py | Python | Calculator.py | dabl03/python-advanced-calculator | d4d025b1bdea4d9be1cc47dfabc917f579fed3f8 | [
"Apache-2.0"
] | null | null | null | Calculator.py | dabl03/python-advanced-calculator | d4d025b1bdea4d9be1cc47dfabc917f579fed3f8 | [
"Apache-2.0"
] | null | null | null | Calculator.py | dabl03/python-advanced-calculator | d4d025b1bdea4d9be1cc47dfabc917f579fed3f8 | [
"Apache-2.0"
] | null | null | null | signos_admitidos=['+','-','*','/'];
def search(list_char,str_,start=0):
"""
---------------------------------------------------------
| Busca un char ingresado por list_char en una
| | cadena pasada por str_, Cuando lo consige
| | crea una lista con la ubicacion de todas
| | las coincidencia y lo guarda en una lista.
| |
| return: {char_1:[coincidencia],....}.
| |
| example:
| | >> search(['+','-','*','/'],"1-2-3+4+5*6*7*8+9*7");
| | {'+': [5, 7, 15], '-': [1, 3], '*': [9, 11, 13, 17], '/': None}
---------------------------------------------------------
"""
if not isinstance(str_,str):
raise ValueError("I do not pass a chain as an argument");
coincidence={};
for a in list_char:
coincidence.update({a:[]});
is_list_empit=True;
for b in range(start,len(str_)):
if a==str_[b]:
coincidence[a].append(b);
is_list_empit=False;
if is_list_empit:#To know if it is an empty list.
coincidence[a]=None;
return coincidence;
def search_parenthesis(str_,start=0,end=None):
"""
-------------------------------------------------------
| Busca el inicio y el fin de un parentesis,
| | innorando los que sean hijos o decen-
| | diente del parentesis origina: "(())"
| | retorna [0,3].
| Example:
| | >>> search_parenthesis("hola(como ((estas)) )");
| | [4, 20]
| Nota: Comprueba si el primer parentesis quedo
| | abierto, en ese caso retorna -1.
| | Example:
| | | >>> search_parenthesis("hola(como ((estas) )");
| | | -1
| | Pero no comprueba si se cierra mas
| | parentesis de los que se abre.
--------------------------------------------------------
"""
LEN=len(str_);#For higher speed.
#Comprobamos que ingreso un buen dato final.
if not isinstance(end,int):
end=LEN;
elif end > LEN or end<0:
end=LEN;
del LEN;#Desde aqui ya no se necesita.
i_parenthesis=0;
init_end=[0,0];
for i in range(start,end):
if str_[i]=='(' or str_[i]==')':
if str_[i]=='(':
if i_parenthesis==0:#Si es el inicio marcamos la ubicacion inicial.
init_end[0]=i;
i_parenthesis+=1;
else:
i_parenthesis-=1;
if i_parenthesis==0:#Si es el final marcamos esta ubicacion y retornamos las coordenadas.
init_end[1]=i;
return init_end;
return -1;#Si no se encontro el parentesis final entonces retornamos -1.
def get_num_of_str(str_) -> str:
"""
------------------------------------------
| Funcion que retorna el numero pasado
| | por la cadena, ya sea float o int.
| example:
| | >> get_num_of_str("12")
| | 12
| | >> get_num_of_str("12,3")
| | 12.3
| | >> get_num_of_str("12.3")
| | 12.3
--------------------------------------------
"""
l=search(['.',','],str_);
if l['.']!=None:
return float(str_);
elif l[',']!=None:
return float(str_[ : l[','][0] ]+'.'+str_[ l[','][0]+1 : ]);#Ajuro debemos convertir ese signo para que la funcion float no lo vea como un string.
else:
return int(str_);
def calculation(a,operator,b):#No tratare de tratar errores en esta funcion para asegurar velocidad.
"""
-----------------------------------------------------
| Funcion que toma 1 numero, un string y 1 numero.
| | con esos numeros usa el string para retorna
| | una operacion matematica.
| |
| Example:
| | >> calculation(2,'*',2);
| | 4
| Return: int or float or None.
-------------------------------------------------------
"""
if operator=='+':
return a+b;
elif operator=='-':
return a-b;
elif operator=='*':
return a*b;
elif operator=='/':
return a/b;
else:#No es necesario.
return None;
def Calculator(str_input) -> str:
"""
---------------------------------------------------------
| Motor para calculadora avanzadas, diseñada
| | para hacer operaciones dificiles
| | como: 1-2-3+4+5*6*7*8+9*7*(-1-3*4).
| | Esta diseñada para tratar errores como 1*
| | o 1************************
| | o 1*************************1
| | y sacar el resultado.
| |
| Example:
| | >> 1-2-3+4+5*6*7*8+9*7*(-1-3*4)
| | 861
| return: int or float.
| Nota: Todavia no saca potencia, raiz cuadrada y tam-
| | poco tiene contantes como PI.
| | para ver lo que puede hacer: ver la lista
| | de signos admitidos(signos_admitidos).
---------------------------------------------------------
"""
class Error_arg(Exception):
def __init__(self,msg="Error: The argument must only be str. Example: Cacular('1+1');"):
self.message=msg;
is_negative=False;#Solo afectara al primer numero convirtiendolo en negativo.
i=0;#indice, lo coloco aqui para poder cambiarlo con el elif.
if not isinstance(str_input,str):
raise Error_arg;
if len(str_input)==0:
return 0;
if str_input[0] in signos_admitidos:#Nos aseguramos de tratar como se debe al primer signo que introduce el usuario.
if str_input[0]=='/':
raise SyntaxError( "Operation not valid: '/' "+str_input[1:] );
elif str_input[0]=='-':
is_negative=True;
str_input=str_input[1:];
num=[];
operand=[];
str_="";
flags={"previous":False,"is_":""};
i=0;
MAX_NUM=0;
MAX_OPERAND=0;
STR_LEN=len(str_input);
while True:
if i>=STR_LEN:
if len(str_)>0:
num.append(get_num_of_str(str_));
MAX_NUM=len(num);
MAX_OPERAND=len(operand);
#Por si el usuario no ingreso numeros:
if MAX_NUM==0:
return 0;
elif MAX_NUM==1:#Por si el usuario ingreso 1 numero:
return num[0];
elif MAX_NUM==2:#Ingreso dos numeros.
if MAX_OPERAND==0:#Si estuvo un signo de multipricacion o division ya se habra eliminado, pero falta hacer la operacion.
return num[0]*num[1] if flags["is_"]=='*' else num[0]/num[1];
return calculation(num[0],operand[0],num[1]);
#Si el ultimo es multipricacion o division:
if not flags["is_"]=="":#Recuerda primero se hace la multipricacion.
num[-2]=num[-2]*num[-1] if flags["is_"]=='*' else num[-2]/num[-1];
del num[-1];
break;
char=str_input[i];
if char in signos_admitidos:
if flags["previous"]:#Normal: 2+2
if not flags["is_"]=="":#Recuerda primero se hace la multipricacion.
num_2=get_num_of_str(str_);
num[-1]=num[-1]*num_2 if flags["is_"]=='*' else num[-1]/num_2;
flags["is_"]="";
else:
num.append(get_num_of_str(str_));
#Recuerda que la multipricacion y division se hacen primeros que las sumas y restas.
#El flags es porque actualmente no conosco el numero, pero despues si lo conocere.
if char=='*' or char=='/':
flags["is_"]='*' if char=='*' else '/';
else:
operand.append(char);
else:
if char=='-' or char=='-':#Entoces el numero es negativo.
is_negative=(char=='-');
else:
#Cuando pase:
# --> 2** retorna: 2
# --> 2**2 retorna 2*2 o 4
# --> 2/*2 retorna 2*2 o 4
# --> 2*/2 retorna 2/2 o 1
#"""Se tomara el ultimo signo: Nota: Si quieres que pase el primer signo pon esto en comentario:{
if char=='*' or char=='/':
flags["is_"]='*' if char=='*' else '/';
#}"""
pass;
str_="";
flags["previous"]=False;
elif char=='(':
l=search_parenthesis(str_input,i);
if isinstance(l,int):#Si se retorno -1 entonces no se ha cerrado el parentesis.
num.append(Calculator( str_input[i+1:] ));
break;
str_=str( Calculator(str_input[ i+1:l[1] ]) );
#tambien es valido: num[a_or_b].append(Cacular( str_input[ l[0]+1:l[1] ] ));
i=l[1];
flags["previous"]=True;
#flags["pre_is_parenthesis"]=True;
else:
str_+=char;
flags["previous"]=True;
if is_negative:
str_='-'+str_;
is_negative=False;
i+=1;
# Creo que no es necesario, ¿sera que lo quito?:
del i , STR_LEN , str_ , str_input, is_negative, flags;
i_s=0;
result=num[0];
for i_n in range(1,len(num)):
n_2=num[i_n];
if i_s<MAX_OPERAND:
result=calculation(result,operand[i_s],n_2);
i_s+=1;
else:#Ocurrio un error inesperado.
print(f"num: {num}, operand: {operand}, i_s: {i_s}");
raise NameError("Error inesperado de la apricacion.");
return result;
if __name__=="__main__":
"""
Para saber si funciona la Calculadora comparamos su resultados con los resultados de python.
"""
from timeit import timeit;
input_='';
comparar=True;
while True:
input_=input(f"""
Ingrese 'q' para terminar.\n
Ingrese 'n' para calcular sin comparar con python.\n
Ingrese su operacion para sacar el calculo:
comparar={comparar}
--> """).lower();
if input_[0]=='q':
break;
elif input_[0]=='n':
comparar=False;
continue;
if comparar:
print("Operacion con python: ",end="");
timeit(f"print({input_});",number=1);
print("Operacion com mi calculadora: "+str(Calculator(input_)));
"""Nota: Si una formula se calculo mal por mi calculadora entonces por favor rellena este formulario y enviamelo:
ERROR: ?
Resultado obtenido: ?
Resultado deseado: ?
"""
input("Enter space for finish.");
#Nota: La calculadora de window al sacar esta cuenta: 2*2+2-4+6*7+8*6+3/2/3 me retorna 64.5, pero el shell de python y la calculadora de mi telefono me retorna 92.5, digo shell porque no saque la cuenta con mi calculadora.
| 38.592857 | 223 | 0.492782 | 1,324 | 10,806 | 3.905589 | 0.239426 | 0.024753 | 0.01083 | 0.014891 | 0.13247 | 0.106362 | 0.092052 | 0.067298 | 0.060723 | 0.049507 | 0 | 0.026831 | 0.323987 | 10,806 | 279 | 224 | 38.731183 | 0.680903 | 0.402832 | 0 | 0.215569 | 0 | 0 | 0.100484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035928 | false | 0.011976 | 0.005988 | 0 | 0.149701 | 0.023952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3d5d0386568e2d8e4d24b1ddf0520a183df8e73 | 1,935 | py | Python | python/static-site/tests/test_stacks.py | jeffmaley/aws-cdk-examples | 86f10b0e1c21e5d6b943a02426d06e7a0f085681 | [
"Apache-2.0"
] | 2,941 | 2019-02-08T15:29:36.000Z | 2022-03-31T23:57:42.000Z | python/static-site/tests/test_stacks.py | jeffmaley/aws-cdk-examples | 86f10b0e1c21e5d6b943a02426d06e7a0f085681 | [
"Apache-2.0"
] | 558 | 2019-02-14T23:32:02.000Z | 2022-03-30T00:35:11.000Z | python/static-site/tests/test_stacks.py | jeffmaley/aws-cdk-examples | 86f10b0e1c21e5d6b943a02426d06e7a0f085681 | [
"Apache-2.0"
] | 1,409 | 2019-02-12T19:13:04.000Z | 2022-03-31T18:46:21.000Z | import pytest
from aws_cdk import core as cdk
from site_stack import StaticSiteStack
@pytest.fixture(scope="session")
def synth():
app = cdk.App(
context={
"namespace": "static-site",
"domain_name": "example.com",
"domain_certificate_arn": "arn:aws:acm:us-east-1:123456789012:certificate/abc",
"sub_domain_name": "blog",
"origin_custom_header_parameter_name": "/prod/static-site/referer",
"hosted_zone_id": "ZABCEF12345",
"hosted_zone_name": "example.com.",
}
)
props = {
"namespace": app.node.try_get_context("namespace"),
"domain_name": app.node.try_get_context("domain_name"),
"sub_domain_name": app.node.try_get_context("sub_domain_name"),
"domain_certificate_arn": app.node.try_get_context(
"domain_certificate_arn"
),
"enable_s3_website_endpoint": app.node.try_get_context(
"enable_s3_website_endpoint"
),
"origin_custom_header_parameter_name": app.node.try_get_context(
"origin_custom_header_parameter_name"
),
"hosted_zone_id": app.node.try_get_context("hosted_zone_id"),
"hosted_zone_name": app.node.try_get_context("hosted_zone_name"),
}
StaticSiteStack(
scope=app,
construct_id=props["namespace"],
props=props,
env={"account": "123456789012", "region": "us-east-1"},
)
return app.synth()
def get_buckets(stack):
return [
v
for k, v in stack.template["Resources"].items()
if v["Type"] == "AWS::S3::Bucket"
]
def test_created_stacks(synth):
assert {"static-site"} == {x.id for x in synth.stacks}
def test_site_bucket(synth):
stack = [x for x in synth.stacks if x.id == "static-site"][0]
buckets = get_buckets(stack)
assert buckets[0]["Properties"]["BucketName"] == "blog.example.com"
| 31.209677 | 91 | 0.626357 | 238 | 1,935 | 4.798319 | 0.315126 | 0.049037 | 0.070053 | 0.091068 | 0.30035 | 0.157618 | 0.105079 | 0 | 0 | 0 | 0 | 0.02439 | 0.237209 | 1,935 | 61 | 92 | 31.721311 | 0.749322 | 0 | 0 | 0.058824 | 0 | 0 | 0.350388 | 0.154005 | 0 | 0 | 0 | 0 | 0.039216 | 1 | 0.078431 | false | 0 | 0.058824 | 0.019608 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3d84d6d30fcdcb05b353fb7fabedea3353d5848 | 9,935 | py | Python | data_openml.py | Sergiodiaz53/saint | 4d0294ddae5dc79035c252e88fd176af5e417a8e | [
"Apache-2.0"
] | null | null | null | data_openml.py | Sergiodiaz53/saint | 4d0294ddae5dc79035c252e88fd176af5e417a8e | [
"Apache-2.0"
] | null | null | null | data_openml.py | Sergiodiaz53/saint | 4d0294ddae5dc79035c252e88fd176af5e417a8e | [
"Apache-2.0"
] | null | null | null | import openml
import numpy as np
from sklearn.preprocessing import LabelEncoder
import pandas as pd
from torch.utils.data import Dataset
def simple_lapsed_time(text, lapsed):
hours, rem = divmod(lapsed, 3600)
minutes, seconds = divmod(rem, 60)
print(text+": {:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
def task_dset_ids(task):
dataset_ids = {
'binary': [1487,44,1590,42178,1111,31,42733,1494,1017,4134],
'multiclass': [188, 1596, 4541, 40664, 40685, 40687, 40975, 41166, 41169, 42734],
'regression':[541, 42726, 42727, 422, 42571, 42705, 42728, 42563, 42724, 42729]
}
return dataset_ids[task]
def concat_data(X,y):
# import ipdb; ipdb.set_trace()
return pd.concat([pd.DataFrame(X['data']), pd.DataFrame(y['data'][:,0].tolist(),columns=['target'])], axis=1)
def data_split(X,y,nan_mask,indices):
x_d = {
'data': X.values[indices],
'mask': nan_mask.values[indices]
}
if x_d['data'].shape != x_d['mask'].shape:
raise'Shape of data not same as that of nan mask!'
y_d = {
'data': y[indices].reshape(-1, 1)
}
return x_d, y_d
def data_prep_CBC(seed, task, datasplit=[.65, .15, .2]):
np.random.seed(seed)
#Load data
CBC_file_dir = "data/ProcessedData-2021-Filtrados.csv"
CBC = pd.read_csv(CBC_file_dir, error_bad_lines=True)
CBC = CBC[CBC['Clase'] != 2]
CBC['Clase'] = CBC['Clase'].replace(to_replace = 3, value = 2)
CBC['Clase'] = CBC['Clase'].replace(to_replace = 4, value = 2)
healthy = CBC.loc[CBC['Clase'] == 0]
thalassemias = CBC.loc[CBC['Clase'] == 1]
anemias = CBC.loc[CBC['Clase'] == 2]
CBC = pd.concat([healthy,thalassemias, anemias])
y = CBC['Clase']
CBC = CBC.drop('Clase', axis=1)
CBC = CBC.drop('TipoClase', axis=1)
X = CBC
categorical_indicator = []
for i in range(0, len(X.iloc[0])): categorical_indicator.append(False)
categorical_columns = X.columns[list(np.where(np.array(categorical_indicator)==True)[0])].tolist()
cont_columns = list(set(X.columns.tolist()) - set(categorical_columns))
CBC.reset_index(drop=True, inplace=True)
cat_idxs = list(np.where(np.array(categorical_indicator)==True)[0])
con_idxs = list(set(range(len(X.columns))) - set(cat_idxs))
for col in categorical_columns:
X[col] = X[col].astype("object")
X["Set"] = np.random.choice(["train", "valid", "test"], p = datasplit, size=(X.shape[0],))
train_indices = X[X.Set=="train"].index
valid_indices = X[X.Set=="valid"].index
test_indices = X[X.Set=="test"].index
X = X.drop(columns=['Set'])
temp = X.fillna("MissingValue")
nan_mask = temp.ne("MissingValue").astype(int)
cat_dims = []
for col in categorical_columns:
# X[col] = X[col].cat.add_categories("MissingValue")
X[col] = X[col].fillna("MissingValue")
l_enc = LabelEncoder()
X[col] = l_enc.fit_transform(X[col].values)
cat_dims.append(len(l_enc.classes_))
for col in cont_columns:
# X[col].fillna("MissingValue",inplace=True)
X.fillna(X.loc[train_indices, col].mean(), inplace=True)
y = y.values
X_train, y_train = data_split(X,y,nan_mask,train_indices)
X_valid, y_valid = data_split(X,y,nan_mask,valid_indices)
X_test, y_test = data_split(X,y,nan_mask,test_indices)
train_mean, train_std = np.array(X_train['data'][:,con_idxs],dtype=np.float32).mean(0), np.array(X_train['data'][:,con_idxs],dtype=np.float32).std(0)
train_std = np.where(train_std < 1e-6, 1e-6, train_std)
return cat_dims, cat_idxs, con_idxs, X_train, y_train, X_valid, y_valid, X_test, y_test, train_mean, train_std
def data_prep_FV(seed, task, datasplit=[.65, .15, .2]):
np.random.seed(seed)
X = np.load("data/freqvectors_hotspots-3k-polys-500chunk_with_reversed.npy")
X = np.delete(X, np.s_[512:1024], axis=1)
X = pd.DataFrame(data=X)
y = np.load("data/labels_hotspots-3k-list-500chunk_with_reversed.npy")
y = pd.DataFrame(data=y)
categorical_indicator = []
for i in range(0, len(X.iloc[0])): categorical_indicator.append(False)
categorical_columns = X.columns[list(np.where(np.array(categorical_indicator)==True)[0])].tolist()
cont_columns = list(set(X.columns.tolist()) - set(categorical_columns))
cat_idxs = list(np.where(np.array(categorical_indicator)==True)[0])
con_idxs = list(set(range(len(X.columns))) - set(cat_idxs))
for col in categorical_columns:
X[col] = X[col].astype("object")
X["Set"] = np.random.choice(["train", "valid", "test"], p = datasplit, size=(X.shape[0],))
train_indices = X[X.Set=="train"].index
valid_indices = X[X.Set=="valid"].index
test_indices = X[X.Set=="test"].index
X = X.drop(columns=['Set'])
temp = X.fillna("MissingValue")
nan_mask = temp.ne("MissingValue").astype(int)
cat_dims = []
for col in categorical_columns:
# X[col] = X[col].cat.add_categories("MissingValue")
X[col] = X[col].fillna("MissingValue")
l_enc = LabelEncoder()
X[col] = l_enc.fit_transform(X[col].values)
cat_dims.append(len(l_enc.classes_))
for col in cont_columns:
# X[col].fillna("MissingValue",inplace=True)
X.fillna(X.loc[train_indices, col].mean(), inplace=True)
y = y.values
X_train, y_train = data_split(X,y,nan_mask,train_indices)
X_valid, y_valid = data_split(X,y,nan_mask,valid_indices)
X_test, y_test = data_split(X,y,nan_mask,test_indices)
train_mean, train_std = np.array(X_train['data'][:,con_idxs],dtype=np.float32).mean(0), np.array(X_train['data'][:,con_idxs],dtype=np.float32).std(0)
train_std = np.where(train_std < 1e-6, 1e-6, train_std)
return cat_dims, cat_idxs, con_idxs, X_train, y_train, X_valid, y_valid, X_test, y_test, train_mean, train_std
def data_prep_openml(ds_id, seed, task, datasplit=[.65, .15, .2]):
np.random.seed(seed)
dataset = openml.datasets.get_dataset(ds_id)
X, y, categorical_indicator, attribute_names = dataset.get_data(dataset_format="dataframe", target=dataset.default_target_attribute)
if ds_id == 42178:
categorical_indicator = [True, False, True,True,False,True,True,True,True,True,True,True,True,True,True,True,True,False, False]
tmp = [x if (x != ' ') else '0' for x in X['TotalCharges'].tolist()]
X['TotalCharges'] = [float(i) for i in tmp ]
y = y[X.TotalCharges != 0]
X = X[X.TotalCharges != 0]
X.reset_index(drop=True, inplace=True)
print(y.shape, X.shape)
if ds_id in [42728,42705,42729,42571]:
# import ipdb; ipdb.set_trace()
X, y = X[:50000], y[:50000]
X.reset_index(drop=True, inplace=True)
categorical_columns = X.columns[list(np.where(np.array(categorical_indicator)==True)[0])].tolist()
cont_columns = list(set(X.columns.tolist()) - set(categorical_columns))
cat_idxs = list(np.where(np.array(categorical_indicator)==True)[0])
con_idxs = list(set(range(len(X.columns))) - set(cat_idxs))
for col in categorical_columns:
X[col] = X[col].astype("object")
X["Set"] = np.random.choice(["train", "valid", "test"], p = datasplit, size=(X.shape[0],))
train_indices = X[X.Set=="train"].index
valid_indices = X[X.Set=="valid"].index
test_indices = X[X.Set=="test"].index
X = X.drop(columns=['Set'])
temp = X.fillna("MissingValue")
nan_mask = temp.ne("MissingValue").astype(int)
cat_dims = []
for col in categorical_columns:
# X[col] = X[col].cat.add_categories("MissingValue")
X[col] = X[col].fillna("MissingValue")
l_enc = LabelEncoder()
X[col] = l_enc.fit_transform(X[col].values)
cat_dims.append(len(l_enc.classes_))
for col in cont_columns:
# X[col].fillna("MissingValue",inplace=True)
X.fillna(X.loc[train_indices, col].mean(), inplace=True)
y = y.values
if task != 'regression':
l_enc = LabelEncoder()
y = l_enc.fit_transform(y)
X_train, y_train = data_split(X,y,nan_mask,train_indices)
X_valid, y_valid = data_split(X,y,nan_mask,valid_indices)
X_test, y_test = data_split(X,y,nan_mask,test_indices)
train_mean, train_std = np.array(X_train['data'][:,con_idxs],dtype=np.float32).mean(0), np.array(X_train['data'][:,con_idxs],dtype=np.float32).std(0)
train_std = np.where(train_std < 1e-6, 1e-6, train_std)
# import ipdb; ipdb.set_trace()
return cat_dims, cat_idxs, con_idxs, X_train, y_train, X_valid, y_valid, X_test, y_test, train_mean, train_std
class DataSetCatCon(Dataset):
def __init__(self, X, Y, cat_cols,task='clf',continuous_mean_std=None):
cat_cols = list(cat_cols)
X_mask = X['mask'].copy()
X = X['data'].copy()
con_cols = list(set(np.arange(X.shape[1])) - set(cat_cols))
self.X1 = X[:,cat_cols].copy().astype(np.int64) #categorical columns
self.X2 = X[:,con_cols].copy().astype(np.float32) #numerical columns
self.X1_mask = X_mask[:,cat_cols].copy().astype(np.int64) #categorical columns
self.X2_mask = X_mask[:,con_cols].copy().astype(np.int64) #numerical columns
if task == 'clf':
self.y = Y['data']#.astype(np.float32)
else:
self.y = Y['data'].astype(np.float32)
self.cls = np.zeros_like(self.y,dtype=int)
self.cls_mask = np.ones_like(self.y,dtype=int)
if continuous_mean_std is not None:
mean, std = continuous_mean_std
self.X2 = (self.X2 - mean) / std
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
# X1 has categorical data, X2 has continuous
return np.concatenate((self.cls[idx], self.X1[idx])), self.X2[idx],self.y[idx], np.concatenate((self.cls_mask[idx], self.X1_mask[idx])), self.X2_mask[idx]
| 38.657588 | 162 | 0.644389 | 1,525 | 9,935 | 4.015738 | 0.146885 | 0.017636 | 0.016329 | 0.017962 | 0.658067 | 0.641901 | 0.626061 | 0.608099 | 0.597322 | 0.597322 | 0 | 0.036959 | 0.188425 | 9,935 | 256 | 163 | 38.808594 | 0.72256 | 0.054152 | 0 | 0.519337 | 0 | 0 | 0.070157 | 0.018659 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055249 | false | 0 | 0.027624 | 0.016575 | 0.132597 | 0.01105 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3d9223f50cffa1ff241a1b0548ffbf81ee7c5f8 | 6,086 | py | Python | ROS_ws/src/lab2/traj_planning_ros/src/iLQR/ellipsoid/dyn_sys.py | kenarvyas/ECE346 | f0229a9f3e03ca06e2d8fa74f9208fea5b2c29c7 | [
"MIT"
] | 4 | 2022-02-04T03:08:53.000Z | 2022-03-24T13:17:46.000Z | ROS_ws/src/lab2/traj_planning_ros/src/iLQR/ellipsoid/dyn_sys.py | kenarvyas/ECE346 | f0229a9f3e03ca06e2d8fa74f9208fea5b2c29c7 | [
"MIT"
] | null | null | null | ROS_ws/src/lab2/traj_planning_ros/src/iLQR/ellipsoid/dyn_sys.py | kenarvyas/ECE346 | f0229a9f3e03ca06e2d8fa74f9208fea5b2c29c7 | [
"MIT"
] | 12 | 2022-01-28T05:07:56.000Z | 2022-03-30T02:43:05.000Z | import numpy as np
class DynSys():
"""
The dynamical system class.
Author: Haimin Hu (haiminh@princeton.edu)
Reference: Ellipsoidal Toolbox (MATLAB) by Dr. Alex Kurzhanskiy.
Supports:
DTLTI: Discrete-time linear time-invariant system.
x[k+1] = A x[k] + B u[k] + c + G d[k]
DTLTV: Discrete-time linear time-varying system.
x[k+1] = A[k] x[k] + B[k] u[k] + c[k] + G[k] d[k]
CTLTI: Continuous-time linear time-invariant system (not yet implemented).
dx/dt = A x(t) + B u(t) + c + G d(t)
CTLTV: Continuous-time linear time-varying system (not yet implemented).
dx/dt = A(t) x(t) + B(t) u(t) + c(t) + G(t) d(t)
NNCS: Neural-network control system (not yet implemented).
x - state, vector in R^n.
u - control, vector in R^m.
c - constant offset, vector in R^n.
d - disturbance, vector in R^l.
A - system matrix, in R^(nxn).
B - control matrix, in R^(nxm).
G - disturbance matrix, in R^(nxl).
Todo list:
- Accout for output map and noise: y(t) = C(t) x(t) + w(t).
"""
def __init__(self, sys_type, A, B, c=np.array([]), G=np.array([]), T=0):
"""
Constructor for dynamical system object.
Args:
sys_type (str): system type.
A (np.ndarray or a list of np.ndarray): system matrix.
B (np.ndarray or a list of np.ndarray): control matrix.
c (np.ndarray or a list of np.ndarray, optional): offset vector.
G (np.ndarray or a list of np.ndarray, optional): disturbance matrix.
T (int): time horizon (for time-varying systems).
"""
# Discrete-time linear time-invariant system (DTLTI).
if sys_type == 'DTLTI':
self.sys_type = 'DTLTI'
# A matrix
if not isinstance(A, np.ndarray):
raise ValueError(
"[ellReach-DynSys] A must be an np.ndarray for DTLTI systems."
)
n = A.shape[0]
if n != A.shape[1]:
raise ValueError("[ellReach-DynSys] A must be a square matrix.")
self.A = A
# B matrix
if np.size(B) > 0:
if not isinstance(B, np.ndarray):
raise ValueError(
"[ellReach-DynSys] B must be an np.ndarray for DTLTI systems."
)
if n != B.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and B do not match."
)
self.B = B
# c vector
if np.size(c) == 0:
self.c = np.zeros((n, 1))
else:
if not isinstance(c, np.ndarray):
raise ValueError(
"[ellReach-DynSys] c must be an np.ndarray for DTLTI systems."
)
if n != c.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and c do not match."
)
self.c = c
# G matrix
if np.size(G) > 0:
if not isinstance(G, np.ndarray):
raise ValueError(
"[ellReach-DynSys] G must be an np.ndarray for DTLTI systems."
)
if n != G.shape[0]:
raise ValueError(
"[ellReach-DynSys] Dimensions of A and G do not match."
)
self.G = G
# Discrete-time linear time-varying system (DTLTV).
elif sys_type == 'DTLTV':
self.sys_type = 'DTLTV'
if not isinstance(T, int) or not T > 0:
raise ValueError("[ellReach-DynSys] T must be a positive integer.")
self.T = T
# A matrices
if not isinstance(A, list):
raise ValueError(
"[ellReach-DynSys] A must be a list for DTLTV systems."
)
if len(A) != T-1:
raise ValueError("[ellReach-DynSys] T and length of A do not match.")
n = A[0].shape[0]
self.A = A
# B matrices
if np.size(B) > 0:
if not isinstance(B, list):
raise ValueError(
"[ellReach-DynSys] B must be a list for DTLTV systems."
)
if len(B) != T-1:
raise ValueError("[ellReach-DynSys] T and length of B do not match.")
self.B = B
# c vectors
if np.size(c) == 0:
self.c = [np.zeros((n, 1))] * T
else:
if not isinstance(c, list):
raise ValueError(
"[ellReach-DynSys] c must be a list for DTLTV systems."
)
if len(c) != T-1:
raise ValueError("[ellReach-DynSys] T and length of c do not match.")
self.c = c
# G matrices
if np.size(G) > 0:
if not isinstance(G, list):
raise ValueError(
"[ellReach-DynSys] G must be a list for DTLTV systems."
)
if len(G) != T-1:
raise ValueError("[ellReach-DynSys] T and length of G do not match.")
self.G = G
else:
raise ValueError("[ellReach-DynSys] Unsupported system type.")
def display(self):
"""
Displays information of the DynSys object.
"""
print("\n")
print("System type: ", self.sys_type)
if self.sys_type == 'DTLTI':
print("A matrix: \n", self.A)
if not self.autonomous():
print("B matrix: \n", self.B)
else:
print("This is an autonomous system.")
print("c vector: \n", self.c)
if not self.no_dstb():
print("G matrix: \n", self.G)
else:
print("This system has no disturbance.")
elif self.sys_type == 'DTLTV':
print("Horizon T =", self.T)
if self.autonomous():
print("This is an autonomous system.")
if self.no_dstb():
print("This system has no disturbance.")
print("\n")
def time_varying(self):
"""
Check if the system is time-varying.
"""
if self.sys_type == 'DTLTV' or self.sys_type == 'CTLTV':
return True
else:
return False
def autonomous(self):
"""
Check if the system is autonomous (empty B matrix).
"""
if np.size(self.B) == 0:
return True
else:
return False
def no_dstb(self):
"""
Check if the system has no distrubances (empty G matrix).
"""
if np.size(self.G) == 0:
return True
else:
return False
| 32.545455 | 79 | 0.548143 | 870 | 6,086 | 3.812644 | 0.155172 | 0.081399 | 0.124812 | 0.157371 | 0.584564 | 0.520953 | 0.392222 | 0.301779 | 0.233946 | 0.144106 | 0 | 0.006102 | 0.326816 | 6,086 | 186 | 80 | 32.72043 | 0.803515 | 0.294611 | 0 | 0.368852 | 0 | 0 | 0.284953 | 0 | 0 | 0 | 0 | 0.005376 | 0 | 1 | 0.040984 | false | 0 | 0.008197 | 0 | 0.106557 | 0.098361 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3dd774e3fe02dfa112c698582886e4595fb2a9c | 1,667 | py | Python | phyluce/mafft.py | faircloth-lab/phyluce | ae6801a7e749be2fa38513db9846046241d0fd7a | [
"BSD-3-Clause"
] | 63 | 2015-03-16T15:10:17.000Z | 2022-02-16T12:36:23.000Z | phyluce/mafft.py | faircloth-lab/phyluce | ae6801a7e749be2fa38513db9846046241d0fd7a | [
"BSD-3-Clause"
] | 253 | 2015-01-26T13:03:23.000Z | 2022-03-15T19:03:05.000Z | phyluce/mafft.py | faircloth-lab/phyluce | ae6801a7e749be2fa38513db9846046241d0fd7a | [
"BSD-3-Clause"
] | 45 | 2015-01-26T13:09:50.000Z | 2021-05-24T04:20:30.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2015 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 0 March 2012 09:03 PST (-0800)
"""
import os
import tempfile
import subprocess
from Bio import AlignIO
from phyluce.pth import get_user_path
from phyluce.generic_align import GenericAlign
# import pdb
class Align(GenericAlign):
"""MAFFT alignment class. Subclass of GenericAlign which
contains a majority of the alignment-related helper functions
(trimming, etc.)"""
def __init__(self, input):
"""initialize, calling superclass __init__ also"""
super(Align, self).__init__(input)
def run_alignment(self, clean=True):
# create results file
fd, aln = tempfile.mkstemp(suffix=".mafft")
os.close(fd)
aln_stdout = open(aln, "w")
# run MAFFT on the temp file
cmd = [
get_user_path("binaries", "mafft"),
"--adjustdirection",
"--maxiterate",
"1000",
self.input,
]
# just pass all ENV params
proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=aln_stdout)
proc.communicate()
aln_stdout.close()
self.alignment = AlignIO.read(open(aln, "rU"), "fasta")
# we now need to set the molecule type for biopython
# due to removal of seq.alphabet
for seq in self.alignment:
seq.annotations = {"molecule_type": "DNA"}
if clean:
self._clean(aln)
if __name__ == "__main__":
pass
| 26.046875 | 79 | 0.628674 | 207 | 1,667 | 4.913043 | 0.628019 | 0.026549 | 0.021632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018776 | 0.265147 | 1,667 | 63 | 80 | 26.460317 | 0.811429 | 0.362927 | 0 | 0 | 0 | 0 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.033333 | 0.2 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3de5aec1e9569212d47284ebab8aa408f948043 | 833 | py | Python | main.py | dunningjack/parrot | 1b3cf8b17a8d7880e1a6b2864366115ca8a1759f | [
"MIT"
] | null | null | null | main.py | dunningjack/parrot | 1b3cf8b17a8d7880e1a6b2864366115ca8a1759f | [
"MIT"
] | null | null | null | main.py | dunningjack/parrot | 1b3cf8b17a8d7880e1a6b2864366115ca8a1759f | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from discord.ext import commands
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = os.getenv('DISCORD_GUILD')
client = commands.Bot(command_prefix='!p')
@client.event
async def on_ready():
print("We have logged in as {0.user}".format(client))
@client.command()
async def on_message(message):
if message.content.startswith("!p"):
await parrot(message)
@client.event
async def parrot(message):
command = message.split(" ")
if command[1].isdigit:
iterations = command[1]
for i in range(1, (len(command))):
separator = ', '
statement = separator.join(command)
for j in range(1, iterations):
await client.process_commands(statement)
await client.process_commands(message)
client.run(TOKEN)
| 21.358974 | 57 | 0.669868 | 108 | 833 | 5.083333 | 0.462963 | 0.043716 | 0.054645 | 0.069217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.207683 | 833 | 38 | 58 | 21.921053 | 0.824242 | 0 | 0 | 0.076923 | 0 | 0 | 0.07443 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.115385 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e0f5d4f905072bdceac9216392204ee8ebc83c | 7,885 | py | Python | elliptic_curve.py | eriktaubeneck/ellipic-curve | 1014c8dfad7917aaacd6cdf1811f0af3513d3e4c | [
"MIT"
] | null | null | null | elliptic_curve.py | eriktaubeneck/ellipic-curve | 1014c8dfad7917aaacd6cdf1811f0af3513d3e4c | [
"MIT"
] | null | null | null | elliptic_curve.py | eriktaubeneck/ellipic-curve | 1014c8dfad7917aaacd6cdf1811f0af3513d3e4c | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import Union, Tuple
import sys
import secrets
import hashlib
import math
import warnings
import base64
from algebra import ZModField, ZModElement
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.concatkdf import ConcatKDFHash
warning_message = """
▓█████▄ ▄▄▄ ███▄ █ ▄████ ▓█████ ██▀███
▒██▀ ██▌▒████▄ ██ ▀█ █ ██▒ ▀█▒▓█ ▀ ▓██ ▒ ██▒
░██ █▌▒██ ▀█▄ ▓██ ▀█ ██▒▒██░▄▄▄░▒███ ▓██ ░▄█ ▒
░▓█▄ ▌░██▄▄▄▄██ ▓██▒ ▐▌██▒░▓█ ██▓▒▓█ ▄ ▒██▀▀█▄
░▒████▓ ▓█ ▓██▒▒██░ ▓██░░▒▓███▀▒░▒████▒░██▓ ▒██▒
▒▒▓ ▒ ▒▒ ▓▒█░░ ▒░ ▒ ▒ ░▒ ▒ ░░ ▒░ ░░ ▒▓ ░▒▓░
░ ▒ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░ ░ ░ ░ ░ ░ ░▒ ░ ▒░
░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░
░
This code is for DEMO and EDUCATIONAL purposes only. It is NOT SECURE!"""
warnings.warn(
warning_message +
"You've imported code that should not be used for cryptograpic purposes."
)
@dataclass(order=True, frozen=True)
class EC:
"""
a simple EC of the form y^2 = x^3 + a*x + b
within the field ZModField
"""
field: ZModField
a: ZModElement
b: ZModElement
@classmethod
def gen_from_int(cls, field: ZModField, x: int, y: int) -> 'EC':
return cls(field, field.gen_element(x), field.gen_element(y))
def gen_element(self, x: Union[int, ZModElement], y: Union[int, ZModElement]) -> 'ECElement':
if isinstance(x, int):
x = self.field.gen_element(x)
if isinstance(y, int):
y = self.field.gen_element(y)
return ECElement(x, y, self)
def generate_elements(self):
elements = {self.infinity}
possible_x = self.field.elements
residues = self.field.quadratic_residues
for x in possible_x:
y_sqr = x**3 + self.a * x + self.b
ys = residues.get(y_sqr, None)
if ys:
for y in ys:
elements.add(self.gen_element(x, y))
return elements
@property
def infinity(self) -> 'ECElement':
zero = self.field.gen_element(0)
return ECElement(zero, zero, self, True)
@dataclass(order=True, frozen=True)
class ECElement:
x: ZModElement
y: ZModElement
ec: 'EC' = field(repr=False)
infinity: bool = False
def __post_init__(self):
if not self.validate() and not self.infinity:
raise Exception(f'{self} is not a point on {self.ec}')
def __repr__(self):
if self.infinity:
return '(inf)'
return f'({self.x.value},{self.y.value})'
def validate(self) -> bool:
return self.y ** 2 == self.x ** 3 + self.ec.a * self.x + self.ec.b
def __add__(self, other: 'ECElement') -> 'ECElement':
p = self
q = other
if p == self.ec.infinity:
return q
if q == self.ec.infinity:
return p
if p.x == q.x and p.y == -q.y:
return self.ec.infinity
elif p == q:
m = ((p.x ** 2) * 3 + self.ec.a) / (p.y * 2)
else:
m = (q.y - p.y) / (q.x - p.x)
x = m**2 - p.x - q.x
y = m*(p.x - x) - p.y
return self.ec.gen_element(x, y)
def __sub__(self, other: 'ECElement') -> 'ECElement':
return self + -other
def __mul__(self, other: int) -> 'ECElement':
bits = f'{other:b}' # hack to get the bits of other
value = self
for bit in bits[1:]:
prev_value = value
value = prev_value + prev_value
if bit == '1':
value += self
return value
def __neg__(self) -> 'ECElement':
return self.ec.gen_element(self.x, -self.y)
def generate(self):
q = self
elements = {q}
yield self
if q == self.ec.infinity:
return
new_q = q + q
while new_q not in elements:
elements.add(new_q)
yield new_q
new_q += q
@dataclass(frozen=True)
class ECC:
ec: EC
size: int = field(repr=False)
generator: ECElement = field(repr=False)
field: ZModField = field(repr=False, init=False)
def __post_init__(self):
warnings.warn(
warning_message +
"You've initiated the ECC class that should not be used for cryptograpic purposes."
)
object.__setattr__(self, 'field', ZModField(self.size))
def generate_private_key(self) -> int:
return secrets.randbelow(self.size)
def get_public_key(self, private_key: int) -> ECElement:
return self.generator * private_key
def generate_key_pair(self) -> Tuple[int, ECElement]:
private_key = self.generate_private_key()
return (private_key, self.get_public_key(private_key))
def hash(self, message: bytes) -> ZModElement:
h = int.from_bytes(
hashlib.blake2b(message, digest_size=math.ceil(math.log(self.size, 2)/8)).digest(),
sys.byteorder
)
return self.field.gen_element(h)
def sign(self, message: bytes, private_key: int) -> Tuple[ZModElement, ZModElement]:
warnings.warn(
warning_message +
"You've signed a message with code that should not be used for cryptograpic purposes."
)
h: ZModElement = self.hash(message)
k, kG = self.generate_key_pair()
r: ZModElement = self.field.gen_element(kG.x.value)
s: ZModElement = (h + r * private_key) / k
return (r, s)
def verify(
self,
message: bytes,
signature: Tuple[ZModElement, ZModElement],
public_key: ECElement
) -> bool:
warnings.warn(
warning_message +
"You've verified a signature with code that should not be used "
"for cryptograpic purposes."
)
r: ZModElement
s: ZModElement
r, s = signature
h: ZModElement = self.hash(message)
w: ZModElement = ~s
u: ZModElement = w * h
v: ZModElement = w * r
Q: ECElement = (self.generator*u.value) + (public_key*v.value)
return self.field.gen_element(Q.x.value) == r
def symetric_key_derivation_scheme(
self,
shared_secret: ECElement,
iterations: int = 100000
) -> bytes:
kdf = ConcatKDFHash(
algorithm=hashes.SHA256(),
length=32,
otherinfo=b'elliptic-curve-demo',
)
shared_secret_bytes: bytes = shared_secret.x.value.to_bytes(
(shared_secret.x.value.bit_length() + 7) // 8,
byteorder=sys.byteorder
)
key = base64.urlsafe_b64encode(kdf.derive(shared_secret_bytes))
return key
def encrypt(self, message: bytes, public_key: ECElement) -> Tuple[bytes, ECElement]:
warnings.warn(
warning_message +
"You've encrypted a message with code that should not be used "
"for cryptograpic purposes."
)
d: int = self.generate_private_key()
ephemeral_key: ECElement = self.generator * d
shared_secret: ECElement = public_key * d
key = self.symetric_key_derivation_scheme(shared_secret)
f = Fernet(key)
token = f.encrypt(message)
return (token, ephemeral_key)
def decrypt(self, message: bytes, ephemeral_key: ECElement, private_key: int) -> bytes:
warnings.warn(
warning_message +
"You've decrypted a token with code that should not be used "
"for cryptograpic purposes."
)
shared_secret: ECElement = ephemeral_key * private_key
key = self.symetric_key_derivation_scheme(shared_secret, iterations=1)
f = Fernet(key)
return f.decrypt(message)
| 32.315574 | 98 | 0.555485 | 1,033 | 7,885 | 4.375605 | 0.21394 | 0.009735 | 0.011947 | 0.012389 | 0.212389 | 0.160398 | 0.091372 | 0.089823 | 0.060177 | 0.047788 | 0 | 0.006461 | 0.312999 | 7,885 | 243 | 99 | 32.44856 | 0.779029 | 0.012809 | 0 | 0.131068 | 0 | 0 | 0.158037 | 0.006955 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106796 | false | 0 | 0.063107 | 0.029126 | 0.354369 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e0f6e08ad291980fc7266ea3e2dbebcd6fc1e2 | 2,045 | py | Python | DuplicateFileDetector/Menu.py | aksheus/Duplicate-File-Detector | 2c096635b78e9617a65027a725952a8fdbb5a0f7 | [
"MIT"
] | null | null | null | DuplicateFileDetector/Menu.py | aksheus/Duplicate-File-Detector | 2c096635b78e9617a65027a725952a8fdbb5a0f7 | [
"MIT"
] | null | null | null | DuplicateFileDetector/Menu.py | aksheus/Duplicate-File-Detector | 2c096635b78e9617a65027a725952a8fdbb5a0f7 | [
"MIT"
] | null | null | null | from tkinter import filedialog,messagebox
from builtins import str
import tkinter
import os
class Menu:
def __init__(self,Title,Resolution,):
self.Root=tkinter.Tk()
self.Root.title(Title)
self.Root.geometry(Resolution)
self.MenuTitle = tkinter.Label(self.Root, text="Find Duplicate Files",width=25,font=('Consolas',16))
self.MenuTitle.pack()
self.MenuTitle.place(x=125,y=50)
self.Root.configure(background="black")
self.Buttons=[]
self.ButtonPositions=[225,150]
self.ChosenPath=''
self.ChosenFile=''
self.IsSingle=False
self.SetupButtons()
self.Root.mainloop()
def GetSearchPath(self,IsSingle):
self.ChosenPath=filedialog.askdirectory(parent=self.Root,initialdir="/",title="Please select base directory")
assert isinstance(self.ChosenPath,str)
if not os.path.exists(self.ChosenPath):
messagebox.showerror('Error','Path Does not Exist')
self.Root.destroy()
if IsSingle:
self.ChosenFile=filedialog.askopenfilename(parent=self.Root,title='Choose a File')
assert isinstance(self.ChosenFile,str)
if not os.path.exists(self.ChosenFile):
messagebox.showerror('Error','File Does Not Exist')
self.Root.destroy()
self.IsSingle=True
messagebox.showinfo('Execution','Csv of Duplicate Files Will Be Generated Shortly')
self.Root.destroy()
return
def AddButton(self,Text,Action=None):
self.Buttons.append(tkinter.Button(self.Root,text=Text,command=Action))
self.Buttons[-1].pack()
self.Buttons[-1].place(x=self.ButtonPositions[0],y=self.ButtonPositions[1])
self.ButtonPositions[1]+=60
return
def SetupButtons(self):
self.AddButton(Text="Search All Duplicates",Action= lambda : self.GetSearchPath(False))
self.AddButton(Text="Find Duplicate of File",Action= lambda : self.GetSearchPath(True))
return
| 38.584906 | 117 | 0.654279 | 237 | 2,045 | 5.628692 | 0.396624 | 0.071964 | 0.033733 | 0.014993 | 0.076462 | 0.076462 | 0.035982 | 0 | 0 | 0 | 0 | 0.01388 | 0.224939 | 2,045 | 52 | 118 | 39.326923 | 0.82776 | 0 | 0 | 0.130435 | 0 | 0 | 0.1091 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 1 | 0.086957 | false | 0 | 0.086957 | 0 | 0.26087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e2825a16a71ef4f94da2a5f0430d294e223568 | 974 | py | Python | b_tree/b_tree_2/balanced_b_tree.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | b_tree/b_tree_2/balanced_b_tree.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | 5 | 2021-03-10T11:49:39.000Z | 2022-02-27T01:35:59.000Z | b_tree/b_tree_2/balanced_b_tree.py | rjsnh1522/geeks-4-geeks-python | 9bea0ce4f3fae9b5f9e5952fb5b4b3a8c6186cf4 | [
"MIT"
] | null | null | null | # Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param A : root node of tree
# @return an integer
def isBalanced(self, A):
is_bal = self.height(A)
return 1 if is_bal[0] else 0
def height(self, A):
if A is None:
return True, -1
lh = self.height(A.left)
rh = self.height(A.right)
maxer = max(lh[1], rh[1]) + 1
if lh[0] is True and rh[0] is True:
if abs(lh[1]-rh[1]) <= 1:
return True, maxer
else:
return False, maxer
else:
return False, maxer
n1 = TreeNode(1)
n2 = TreeNode(2)
n1.left = n2
n3 = TreeNode(3)
n1.right = n3
n4 = TreeNode(4)
n3.left = n4
n5 = TreeNode(5)
n4.right = n5
n6 = TreeNode(6)
n5.left = n6
n7 = TreeNode(7)
n6.left = n7
sol = Solution()
print(sol.isBalanced(n1))
| 19.48 | 43 | 0.540041 | 150 | 974 | 3.466667 | 0.36 | 0.057692 | 0.063462 | 0.023077 | 0.113462 | 0 | 0 | 0 | 0 | 0 | 0 | 0.060842 | 0.341889 | 974 | 49 | 44 | 19.877551 | 0.75039 | 0.084189 | 0 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0 | 0 | 0.27027 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e5419e60dd7ed8ba59e0eb9fb45ebe1365558f | 472 | py | Python | datasets/tinyimagenet.py | SonyPony/shrinkbench | efe078569d6c91add40f14fa673c1fa7c9cde624 | [
"MIT"
] | null | null | null | datasets/tinyimagenet.py | SonyPony/shrinkbench | efe078569d6c91add40f14fa673c1fa7c9cde624 | [
"MIT"
] | null | null | null | datasets/tinyimagenet.py | SonyPony/shrinkbench | efe078569d6c91add40f14fa673c1fa7c9cde624 | [
"MIT"
] | null | null | null | # Authors: Son Hai Nguyen, Miroslav Karpíšek
# Logins: xnguye16, xkarpi05
# Project: Neural network pruning
# Course: Convolutional Neural Networks
# Year: 2021
import os
import torchvision.datasets as datasets
class TinyImageNet(datasets.ImageFolder):
IMG_SIZE = 56
def __init__(self, root: str, train=True, **kwargs):
root = os.path.join(root, "train" if train else "test")
super().__init__(
root,
**kwargs
)
| 22.47619 | 63 | 0.661017 | 55 | 472 | 5.509091 | 0.781818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027933 | 0.241525 | 472 | 20 | 64 | 23.6 | 0.818436 | 0.317797 | 0 | 0 | 0 | 0 | 0.028481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e61bd8ca1f42e3b9a657af37fc1b37d341004b | 6,937 | py | Python | tuun/probo/models/gp_simple.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 33 | 2020-08-30T16:22:35.000Z | 2022-02-26T13:48:32.000Z | tuun/probo/models/gp_simple.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2021-01-18T19:46:43.000Z | 2021-03-24T09:59:14.000Z | tuun/probo/models/gp_simple.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2020-08-25T17:02:15.000Z | 2021-04-21T16:40:44.000Z | """
Classes for simple GP models without external PPL backend.
"""
from argparse import Namespace
import copy
import numpy as np
from .gp.gp_utils import kern_exp_quad, sample_mvn, gp_post
from ..util.data_transform import DataTransformer
from ..util.misc_util import dict_to_namespace
class SimpleGp:
"""
Simple GP model without external PPL backend.
"""
def __init__(self, params=None, verbose=True):
"""
Parameters
----------
params : Namespace_or_dict
Namespace or dict of parameters for this model.
verbose : bool
If True, print description string.
"""
self.set_params(params)
if verbose:
self.print_str()
def set_params(self, params):
"""Set self.params, the parameters for this model."""
params = dict_to_namespace(params)
# Set self.params
self.params = Namespace()
self.params.ls = getattr(params, 'ls', 3.7)
self.params.alpha = getattr(params, 'alpha', 1.85)
self.params.sigma = getattr(params, 'sigma', 1e-5)
self.params.kernel = getattr(params, 'kernel', kern_exp_quad)
self.params.trans_x = getattr(params, 'trans_x', False)
def set_data(self, data):
"""Set self.data."""
self.data_init = copy.deepcopy(data)
self.data = copy.deepcopy(self.data_init)
# Transform data.x
self.data.x = self.transform_xin_list(self.data.x)
# Transform data.y
self.transform_data_y()
def transform_xin_list(self, xin_list):
"""Transform list of xin (e.g. in data.x)."""
# Ensure data.x is correct format (list of 1D numpy arrays)
xin_list = [np.array(xin).reshape(-1) for xin in xin_list]
if self.params.trans_x:
# apply transformation to xin_list
xin_list_trans = xin_list # TODO: define default transformation
else:
xin_list_trans = xin_list
return xin_list_trans
def transform_data_y(self):
"""Transform data.y using DataTransformer."""
self.dt = DataTransformer(self.data, False)
y_trans = self.dt.transform_y_data()
self.data = Namespace(x=self.data.x, y=y_trans)
def inf(self, data):
"""Set data, run inference, update self.sample_list."""
self.set_data(data)
self.sample_list = [
Namespace(
ls=self.params.ls, alpha=self.params.alpha, sigma=self.params.sigma
)
]
def post(self, s):
"""Return one posterior sample."""
return self.sample_list[0]
def gen_list(self, x_list, z, s, nsamp):
"""
Draw nsamp samples from generative process, given list of inputs
x_list, posterior sample z, and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(-1,).
z : Namespace
Namespace of GP hyperparameters.
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from generative process.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_pred(nsamp, x_list, z)
pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]
return pred_list
def postgen_list(self, x_list, s, nsamp):
"""
Draw nsamp samples from posterior predictive distribution, given list
of inputs x_list and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(-1,).
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from the posterior predictive
distribution.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
hp = self.sample_list[0]
pred_list = self.sample_gp_post_pred(nsamp, x_list, hp, full_cov=True)
pred_list = [self.dt.inv_transform_y_data(pr) for pr in pred_list]
return pred_list
def sample_gp_pred(self, nsamp, input_list, hp):
"""
Sample from GP predictive distribution given one posterior GP sample.
Parameters
----------
nsamp : int
Number of samples from predictive distribution.
input_list : list
A list of numpy ndarray shape=(-1, ).
hp : Namespace
Namespace of GP hyperparameters.
Returns
-------
list
A list of len=len(input_list) of numpy ndarrays shape=(nsamp, 1).
"""
postmu, postcov = gp_post(
self.data.x,
self.data.y,
input_list,
hp.ls,
hp.alpha,
hp.sigma,
self.params.kernel,
)
single_post_sample = sample_mvn(postmu, postcov, 1).reshape(-1)
pred_list = [
single_post_sample for _ in range(nsamp)
] #### TODO: instead of duplicating this TS,
#### sample nsamp times from generative
#### process (given/conditioned-on this TS)
return list(np.stack(pred_list).T)
def sample_gp_post_pred(self, nsamp, input_list, hp, full_cov=False):
"""
Sample from GP posterior predictive distribution.
Parameters
----------
nsamp : int
Number of samples from posterior predictive distribution.
input_list : list
A list of numpy ndarray shape=(-1, ).
hp : Namespace
Namespace of GP hyperparameters.
full_cov : bool
If True, return covariance matrix, else return diagonal only.
Returns
-------
list
A list of len=len(input_list) of numpy ndarrays shape=(nsamp, 1).
"""
postmu, postcov = gp_post(
self.data.x,
self.data.y,
input_list,
hp.ls,
hp.alpha,
hp.sigma,
self.params.kernel,
full_cov,
)
if full_cov:
ppred_list = list(sample_mvn(postmu, postcov, nsamp))
else:
ppred_list = list(
np.random.normal(
postmu.reshape(-1),
postcov.reshape(-1),
size=(nsamp, len(input_list)),
)
)
return list(np.stack(ppred_list).T)
def print_str(self):
"""Print a description string."""
print('*[INFO] ' + str(self))
def __str__(self):
return f'SimpleGp with params={self.params}'
| 30.831111 | 83 | 0.566095 | 849 | 6,937 | 4.467609 | 0.182568 | 0.044819 | 0.023201 | 0.030055 | 0.389402 | 0.348537 | 0.310572 | 0.274189 | 0.274189 | 0.274189 | 0 | 0.004552 | 0.335015 | 6,937 | 224 | 84 | 30.96875 | 0.817689 | 0.357071 | 0 | 0.270833 | 0 | 0 | 0.017753 | 0 | 0 | 0 | 0 | 0.008929 | 0 | 1 | 0.135417 | false | 0 | 0.0625 | 0.010417 | 0.28125 | 0.03125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3e7b29b7add9b32e1e75aded94807db6fe3e477 | 2,882 | py | Python | alipay.py | nju161250102/NJUSystem_Server | 8b72fde8701d55e62ce8ed4bcb299ec8f0ab32fe | [
"MIT"
] | 1 | 2018-07-31T13:31:52.000Z | 2018-07-31T13:31:52.000Z | alipay.py | nju161250102/NJUSystem_Server | 8b72fde8701d55e62ce8ed4bcb299ec8f0ab32fe | [
"MIT"
] | null | null | null | alipay.py | nju161250102/NJUSystem_Server | 8b72fde8701d55e62ce8ed4bcb299ec8f0ab32fe | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import sqlite3
import pandas as pd
from model import AliPay
def classify(item: AliPay):
if item.money_state == "" or item.money_state == "冻结":
return "无效交易", ""
if "余额宝-" in item.name and "-收益发放" in item.name:
return "理财", "余额宝收益"
if "蚂蚁财富-" in item.target:
if item.flag == "收入":
return "理财", "基金购买"
else:
return "理财", "基金赎回"
if "博时黄金" in item.target:
if item.flag == "收入":
return "理财", "黄金购买"
else:
return "理财", "黄金卖出"
if item.target in ["中国工商银行", "网商银行"]:
return "转账", "转账到银行卡"
if "淘宝" in item.source:
return "消费", "淘宝"
if "中国铁路" in item.target:
return "消费", "火车票"
if "车巴达" in item.target:
return "消费", "汽车票"
if "定期理财" in item.name and item.flag == "支出":
return "理财", "定期购买"
if "理财赎回" in item.name and item.flag == "收入":
return "理财", "定期赎回"
if item.source == "支付宝网站" and item.type == "即时到账交易" and item.money_state != "资金转移":
if item.target in ["支付宝推荐赏金", "红包推荐奖励"]:
return "收入", "推荐奖励"
if item.target in ["蚂蚁财富", "支付宝五福的红包"]:
return "收入", "活动奖励"
if "自来水" in item.target:
return "消费", "水费"
if item.flag == "收入":
return "转账", "转入"
else:
return "转账", "转出"
if "其他" in item.source:
if "超市" in item.target or "超市" in item.name:
return "消费", "超市"
if "小天鹅" in item.target:
return "消费", "洗衣"
return "其他", ""
def main():
#
conn = sqlite3.connect('alipay.db')
c = conn.cursor()
c.execute("DROP TABLE IF EXISTS alipay;")
c.execute('''
CREATE TABLE alipay (
trade_number TEXT DEFAULT NULL,
order_number TEXT DEFAULT NULL,
create_time TIMESTAMP DEFAULT NULL,
pay_time TIMESTAMP DEFAULT NULL,
modify_time TIMESTAMP DEFAULT NULL,
source TEXT DEFAULT NULL,
type TEXT DEFAULT NULL,
target TEXT DEFAULT NULL,
name TEXT DEFAULT NULL,
money REAL DEFAULT NULL,
flag INTEGER DEFAULT NULL,
trade_state TEXT DEFAULT NULL,
fee REAL DEFAULT NULL,
money_back REAL DEFAULT NULL,
remark TEXT DEFAULT NULL,
money_state REAL DEFAULT NULL,
first_type TEXT DEFAULT '',
second_type TEXT DEFAULT '');
''')
# 读取文件
df = pd.read_csv("D:/alipay_record.csv", encoding="gbk")
# 去掉列名后面的空格
columns_dict = {}
for i in range(len(df.columns)):
columns_dict[df.columns[i]] = df.columns[i].strip()
df = df.rename(columns=columns_dict)
#
for index, row in df.iterrows():
print(row["交易号"])
item = AliPay.from_row(row)
item.first_type, item.second_type = classify(item)
item.save()
if __name__ == '__main__':
main()
| 29.408163 | 87 | 0.552047 | 376 | 2,882 | 4.154255 | 0.327128 | 0.112676 | 0.076825 | 0.040973 | 0.137004 | 0.067862 | 0.040973 | 0.040973 | 0.040973 | 0 | 0 | 0.001525 | 0.317488 | 2,882 | 97 | 88 | 29.71134 | 0.792578 | 0.009368 | 0 | 0.070588 | 0 | 0 | 0.343278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023529 | false | 0 | 0.047059 | 0 | 0.305882 | 0.011765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3ec7075da3ac0f6caa68b6294ec49080570e1a4 | 258 | py | Python | learning/challenge/counting.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | 2 | 2019-06-23T07:17:30.000Z | 2019-07-06T15:15:42.000Z | learning/challenge/counting.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | null | null | null | learning/challenge/counting.py | Nephrin/Tut | 9454be28fd37c155d0b4e97876196f8d33ccf8e5 | [
"Apache-2.0"
] | 1 | 2019-06-23T07:17:43.000Z | 2019-06-23T07:17:43.000Z | def main(inp):
print(count(inp))
def count(inp):
d = {1:"one",2:"two",3:"three",4:"four",5:"five",6:"six",7:"seven",8:"eight",9:"nine"}
if inp%10 == 0:
print(inp)
else:
print(d.get((inp%10)))
if __name__ == '__main__':
main(200) | 17.2 | 88 | 0.53876 | 45 | 258 | 2.911111 | 0.688889 | 0.122137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08134 | 0.189922 | 258 | 15 | 89 | 17.2 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0.179592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.2 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3edee5be71eaec577fa2bdd9fc8bc030cfafc06 | 5,736 | py | Python | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_ruby.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 11 | 2017-09-30T05:47:28.000Z | 2019-04-15T11:58:40.000Z | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_ruby.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/wx-2.8-msw-unicode/wx/tools/Editra/src/syntax/_ruby.py | ekkipermana/robotframework-test | 243ca26f69962f8cf20cd7d054e0ff3e709bc7f4 | [
"bzip2-1.0.6"
] | 7 | 2018-02-13T10:22:39.000Z | 2019-07-04T07:39:28.000Z | ###############################################################################
# Name: ruby.py #
# Purpose: Define Ruby syntax for highlighting and other features #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2007 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
FILE: ruby.py
AUTHOR: Cody Precord
@summary: Lexer configuration module for Ruby.
@todo: Default Style Refinement.
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: _ruby.py 64561 2010-06-12 01:49:05Z CJP $"
__revision__ = "$Revision: 64561 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
import re
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# Ruby Keywords
# NOTE: putting words with question marks in them causes an assertion to be
# raised when showing the list in the keyword helper! defined?
RUBY_KW = (0, "__FILE__ and def end in or self unless __LINE__ begin defined "
"ensure module redo super until BEGIN break do false next "
"require rescue then when END case else for nil retry true while "
"alias class elsif if not return undef yieldr puts raise "
"protected private")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_RB_BACKTICKS, 'scalar_style'),
(stc.STC_RB_CHARACTER, 'char_style'),
(stc.STC_RB_CLASSNAME, 'class_style'),
(stc.STC_RB_CLASS_VAR, 'default_style'), # STYLE ME
(stc.STC_RB_COMMENTLINE, 'comment_style'),
(stc.STC_RB_DATASECTION, 'default_style'), # STYLE ME
(stc.STC_RB_DEFAULT, 'default_style'),
(stc.STC_RB_DEFNAME, 'keyword3_style'), # STYLE ME
(stc.STC_RB_ERROR, 'error_style'),
(stc.STC_RB_GLOBAL, 'global_style'),
(stc.STC_RB_HERE_DELIM, 'default_style'), # STYLE ME
(stc.STC_RB_HERE_Q, 'here_style'),
(stc.STC_RB_HERE_QQ, 'here_style'),
(stc.STC_RB_HERE_QX, 'here_style'),
(stc.STC_RB_IDENTIFIER, 'default_style'),
(stc.STC_RB_INSTANCE_VAR, 'scalar2_style'),
(stc.STC_RB_MODULE_NAME, 'global_style'), # STYLE ME
(stc.STC_RB_NUMBER, 'number_style'),
(stc.STC_RB_OPERATOR, 'operator_style'),
(stc.STC_RB_POD, 'default_style'), # STYLE ME
(stc.STC_RB_REGEX, 'regex_style'), # STYLE ME
(stc.STC_RB_STDIN, 'default_style'), # STYLE ME
(stc.STC_RB_STDOUT, 'default_style'), # STYLE ME
(stc.STC_RB_STRING, 'string_style'),
(stc.STC_RB_STRING_Q, 'default_style'), # STYLE ME
(stc.STC_RB_STRING_QQ, 'default_style'), # STYLE ME
(stc.STC_RB_STRING_QR, 'default_style'), # STYLE ME
(stc.STC_RB_STRING_QW, 'default_style'), # STYLE ME
(stc.STC_RB_STRING_QX, 'default_style'), # STYLE ME
(stc.STC_RB_SYMBOL, 'default_style'), # STYLE ME
(stc.STC_RB_UPPER_BOUND, 'default_style'), # STYLE ME
(stc.STC_RB_WORD, 'keyword_style'),
(stc.STC_RB_WORD_DEMOTED, 'keyword2_style') ]
#---- Extra Properties ----#
FOLD = ("fold", "1")
TIMMY = ("fold.timmy.whinge.level", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Ruby"""
def __init__(self, langid):
syndata.SyntaxDataBase.__init__(self, langid)
# Setup
self.SetLexer(stc.STC_LEX_RUBY)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [RUBY_KW]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD, TIMMY]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'#']
#-----------------------------------------------------------------------------#
def AutoIndenter(estc, pos, ichar):
"""Auto indent cpp code.
@param estc: EditraStyledTextCtrl
@param pos: current carat position
@param ichar: Indentation character
"""
rtxt = u''
line = estc.GetCurrentLine()
text = estc.GetTextRange(estc.PositionFromLine(line), pos)
eolch = estc.GetEOLChar()
indent = estc.GetLineIndentation(line)
if ichar == u"\t":
tabw = estc.GetTabWidth()
else:
tabw = estc.GetIndent()
i_space = indent / tabw
ndent = eolch + ichar * i_space
rtxt = ndent + ((indent - (tabw * i_space)) * u' ')
def_pat = re.compile('\s*(class|def)\s+[a-zA-Z_][a-zA-Z0-9_]*')
text = text.strip()
if text.endswith('{') or def_pat.match(text):
rtxt += ichar
# Put text in the buffer
estc.AddText(rtxt)
#---- Syntax Modules Internal Functions ----#
def KeywordString(option=0):
"""Returns the specified Keyword String
@note: not used by most modules
"""
return RUBY_KW[1]
#---- End Syntax Modules Internal Functions ----#
| 38.24 | 80 | 0.540795 | 625 | 5,736 | 4.712 | 0.3728 | 0.06927 | 0.089643 | 0.070628 | 0.224448 | 0.187097 | 0.129372 | 0.056027 | 0 | 0 | 0 | 0.009074 | 0.269874 | 5,736 | 149 | 81 | 38.496644 | 0.694126 | 0.299512 | 0 | 0 | 0 | 0.0125 | 0.223649 | 0.022204 | 0 | 0 | 0 | 0.006711 | 0 | 1 | 0.0875 | false | 0 | 0.05 | 0 | 0.2125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3eef6f9729f8c1bef48244b9732c1b942b74194 | 4,270 | py | Python | dephell_venvs/_venv.py | dephell/dephell_venvs | 3d8487eae88cb8d644d0e9a5c36d5392214afdec | [
"MIT"
] | 3 | 2019-04-07T21:46:35.000Z | 2020-11-20T21:09:24.000Z | dephell_venvs/_venv.py | dephell/dephell_venvs | 3d8487eae88cb8d644d0e9a5c36d5392214afdec | [
"MIT"
] | 2 | 2019-07-18T15:20:50.000Z | 2020-11-11T10:42:05.000Z | dephell_venvs/_venv.py | dephell/dephell_venvs | 3d8487eae88cb8d644d0e9a5c36d5392214afdec | [
"MIT"
] | 1 | 2021-09-28T02:40:29.000Z | 2021-09-28T02:40:29.000Z | # built-in
import shutil
import sys
from itertools import chain
from pathlib import Path
from typing import Optional, Union
# external
import attr
from dephell_pythons import Python, Finder
# app
from ._constants import PYTHONS, IS_WINDOWS
from ._cached_property import cached_property
from ._builder import VEnvBuilder
@attr.s()
class VEnv:
path = attr.ib(type=Path)
project = attr.ib(type=str, default=None)
env = attr.ib(type=str, default=None)
def __attrs_post_init__(self) -> None:
# `Path` as `converter` doesn't work for Python 3.5
if type(self.path) is str:
self.path = Path(self.path)
# properties
@property
def name(self) -> str:
return self.path.name
@property
def prompt(self) -> str:
if self.project and self.env:
return self.project + '/' + self.env
if self.project:
return self.project
if self.env:
return self.env
return self.path.name
@cached_property
def bin_path(self) -> Optional[Path]:
if IS_WINDOWS:
path = self.path / 'Scripts'
if path.exists():
return path
path = self.path / 'bin'
if path.exists():
return path
return None
@cached_property
def lib_path(self) -> Optional[Path]:
# pypy
path = self.path / 'site-packages'
if path.exists():
return path
# win
if IS_WINDOWS:
path = self.path / 'Lib' / 'site-packages'
if path.exists():
return path
# cpython unix
if self.python_path is not None:
path = self.path / 'lib' / self.python_path.name / 'site-packages'
if path.exists():
return path
# cpython unix when python_path detected not so good
path = self.path / 'lib'
paths = list(path.glob('python*'))
if not paths:
return None
path = paths[0] / 'site-packages'
if path.exists():
return path
return None
@cached_property
def python_path(self) -> Optional[Path]:
if self.bin_path is None:
return None
executables = {path.name for path in self.bin_path.iterdir()}
for implementation in ('pypy', 'python'):
for suffix in chain(PYTHONS, ['']):
for ext in ('', '.exe'):
path = self.bin_path / (implementation + suffix)
if ext:
path = path.with_suffix(ext)
if path.name in executables:
return path
return None
@cached_property
def python(self) -> Python:
finder = Finder()
python = Python(
path=self.python_path,
version=finder.get_version(path=self.python_path),
implementation=finder.get_implementation(path=self.python_path),
)
python.lib_paths = [self.lib_path]
return python
# methods
def exists(self) -> bool:
"""Returns true if venv already created and valid.
It's a method like in `Path`.
"""
return bool(self.bin_path)
def create(self, python_path: Union[Path, str, None] = None) -> None:
if python_path is None:
python_path = sys.executable
builder = VEnvBuilder(
python=str(python_path),
with_pip=True,
prompt=self.prompt,
)
builder.create(str(self.path))
self._clear_cache()
def destroy(self) -> None:
shutil.rmtree(str(self.path))
self._clear_cache()
def clone(self, path: Path) -> 'VEnv':
shutil.copytree(str(self.path), str(path), copy_function=shutil.copy)
# TODO: fix executables
# https://github.com/ofek/hatch/blob/master/hatch/venv.py
...
return type(self)(path=path)
# private methods
def _clear_cache(self):
if 'bin_path' in self.__dict__:
del self.__dict__['bin_path']
if 'lib_path' in self.__dict__:
del self.__dict__['lib_path']
if 'python_path' in self.__dict__:
del self.__dict__['python_path']
| 27.908497 | 78 | 0.566745 | 512 | 4,270 | 4.572266 | 0.232422 | 0.054677 | 0.035882 | 0.046134 | 0.24434 | 0.217001 | 0.176848 | 0.101239 | 0.080308 | 0.041862 | 0 | 0.001053 | 0.332787 | 4,270 | 152 | 79 | 28.092105 | 0.820639 | 0.078923 | 0 | 0.27027 | 0 | 0 | 0.038738 | 0 | 0 | 0 | 0 | 0.006579 | 0 | 1 | 0.108108 | false | 0 | 0.09009 | 0.009009 | 0.414414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3f2cf04bce9bef9ea2b365012ef9d47d14ae3f2 | 447 | py | Python | apps/bc_scraper/actions/search.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 7 | 2021-07-14T18:13:35.000Z | 2021-11-21T20:10:54.000Z | apps/bc_scraper/actions/search.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 57 | 2021-07-10T01:31:56.000Z | 2022-01-14T02:02:58.000Z | apps/bc_scraper/actions/search.py | aurmeneta/ramos-uc | 364ab3c5a55032ab7ffc08665a2da4c5ff04ae58 | [
"MIT"
] | 4 | 2021-07-23T16:51:55.000Z | 2021-08-31T02:41:41.000Z | from ..scraper.search import bc_search
def search(initials, period):
"""Search for a initial and period in BuscaCursosUC.
Prints the results. Useful for testing only.
"""
print("Searching in BC:", initials)
courses = bc_search(initials, period)
for c in courses:
print(c["initials"], c["section"], c["name"], "-", c["teachers"])
if len(courses) >= 50:
print("> Some results may have been truncated.")
| 31.928571 | 73 | 0.642058 | 59 | 447 | 4.830508 | 0.59322 | 0.05614 | 0.140351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005714 | 0.217002 | 447 | 13 | 74 | 34.384615 | 0.808571 | 0.210291 | 0 | 0 | 0 | 0 | 0.245562 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3f344b0d44522013033f6e3330ba00bba1d717e | 30,559 | py | Python | wgpu/rs.py | Korijn/wgpu-py | 72f89be121ea6cd819a145ee3b037004211b3245 | [
"BSD-2-Clause"
] | null | null | null | wgpu/rs.py | Korijn/wgpu-py | 72f89be121ea6cd819a145ee3b037004211b3245 | [
"BSD-2-Clause"
] | null | null | null | wgpu/rs.py | Korijn/wgpu-py | 72f89be121ea6cd819a145ee3b037004211b3245 | [
"BSD-2-Clause"
] | null | null | null | """
WGPU backend implementation based on the wgpu library.
The Rust wgpu project (https://github.com/gfx-rs/wgpu) is a Rust library
based on gfx-hal, which wraps Metal, Vulkan, DX12 and more in the
future. It can compile into a dynamic library exposing a C-API,
accomanied by a C header file. We wrap this using cffi, which uses the
header file to do most type conversions for us.
"""
import os
import ctypes
from cffi import FFI
from . import classes
from . import _register_backend
from .utils import get_resource_dir
from ._mappings import cstructfield2enum, enummap
os.environ["RUST_BACKTRACE"] = "0" # Set to 1 for more trace info
# Read header file and strip some stuff that cffi would stumble on
lines = []
with open(os.path.join(get_resource_dir(), "wgpu.h")) as f:
for line in f.readlines():
if not line.startswith(
(
"#include ",
"#define WGPU_LOCAL",
"#define WGPUColor",
"#define WGPUOrigin3d_ZERO",
"#if defined",
"#endif",
)
):
lines.append(line)
# Configure cffi
ffi = FFI()
ffi.cdef("".join(lines))
ffi.set_source("wgpu.h", None)
# Load the dynamic library
_lib = ffi.dlopen(os.path.join(get_resource_dir(), "wgpu_native-debug.dll"))
def new_struct(ctype, **kwargs):
""" Create an ffi struct. Provides a flatter syntax and converts our
string enums to int enums needed in C.
"""
struct = ffi.new(ctype)
for key, val in kwargs.items():
if isinstance(val, str) and isinstance(getattr(struct, key), int):
structname = cstructfield2enum[ctype.strip(" *")[4:] + "." + key]
ival = enummap[structname + "." + val]
setattr(struct, key, ival)
else:
setattr(struct, key, val)
return struct
# %% The API
# wgpu.help('requestadapter', 'RequestAdapterOptions', dev=True)
# IDL: Promise<GPUAdapter> requestAdapter(optional GPURequestAdapterOptions options = {});
async def requestAdapter(powerPreference: "enum PowerPreference"):
""" Request an GPUAdapter, the object that represents the implementation of WGPU.
This function uses the Rust WGPU library.
Params:
powerPreference(enum): "high-performance" or "low-power"
"""
# Convert the descriptor
struct = new_struct("WGPURequestAdapterOptions *", power_preference=powerPreference)
# Select possible backends. This is not exposed in the WebGPU API
# 1 => Backend::Empty,
# 2 => Backend::Vulkan,
# 4 => Backend::Metal,
# 8 => Backend::Dx12, (buggy)
# 16 => Backend::Dx11, (not implemented yet)
# 32 => Backend::Gl, (not implemented yet)
backend_mask = 2 | 4 # Vulkan or Metal
# Do the API call and get the adapter id
adapter_id = None
@ffi.callback("void(uint64_t, void *)")
def _request_adapter_callback(received, userdata):
nonlocal adapter_id
adapter_id = received
_lib.wgpu_request_adapter_async(
struct, backend_mask, _request_adapter_callback, ffi.NULL
) # userdata, stub
# For now, Rust will call the callback immediately
# todo: when wgpu gets an event loop -> while run wgpu event loop or something
assert adapter_id is not None
extensions = []
return GPUAdapter("WGPU", extensions, adapter_id)
# Mark as the backend on import time
_register_backend(requestAdapter)
class GPUAdapter(classes.GPUAdapter):
def __init__(self, name, extensions, id):
super().__init__(name, extensions)
self._id = id
# wgpu.help('adapterrequestdevice', 'DeviceDescriptor', dev=True)
# IDL: Promise<GPUDevice> requestDevice(optional GPUDeviceDescriptor descriptor = {});
async def requestDevice(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {}
):
return self.requestDeviceSync(label=label, extensions=extensions, limits=limits)
def requestDeviceSync(
self,
*,
label="",
extensions: "GPUExtensionName-list" = [],
limits: "GPULimits" = {}
):
extensions = tuple(extensions)
c_extensions = new_struct(
"WGPUExtensions *",
anisotropic_filtering="anisotropicFiltering" in extensions,
)
c_limits = new_struct("WGPULimits *", max_bind_groups=limits["maxBindGroups"])
struct = new_struct(
"WGPUDeviceDescriptor *", extensions=c_extensions[0], limits=c_limits[0]
)
id = _lib.wgpu_adapter_request_device(self._id, struct)
queue_id = _lib.wgpu_device_get_queue(id)
queue = GPUQueue("", queue_id, self)
return GPUDevice(label, id, self, extensions, limits, queue)
class GPUDevice(classes.GPUDevice):
# wgpu.help('devicecreatebuffer', 'BufferDescriptor', dev=True)
# IDL: GPUBuffer createBuffer(GPUBufferDescriptor descriptor);
def createBuffer(
self, *, label="", size: "GPUBufferSize", usage: "GPUBufferUsageFlags"
):
size = int(size)
struct = new_struct("WGPUBufferDescriptor *", size=size, usage=usage)
id = _lib.wgpu_device_create_buffer(self._internal, struct, mem)
return GPUBuffer(label, id, self, size, usage, "unmapped", None)
# wgpu.help('devicecreatebuffermapped', 'BufferDescriptor', dev=True)
# IDL: GPUMappedBuffer createBufferMapped(GPUBufferDescriptor descriptor);
def createBufferMapped(
self, *, label="", size: "GPUBufferSize", usage: "GPUBufferUsageFlags"
):
size = int(size)
struct = new_struct("WGPUBufferDescriptor *", size=size, usage=usage)
# Pointer that device_create_buffer_mapped sets, so that we can write stuff there
buffer_memory_pointer = ffi.new("uint8_t * *")
id = _lib.wgpu_device_create_buffer_mapped(
self._internal, struct, buffer_memory_pointer
)
# Map a numpy array onto the data
pointer_as_int = int(ffi.cast("intptr_t", buffer_memory_pointer[0]))
mem_as_ctypes = (ctypes.c_uint8 * size).from_address(pointer_as_int)
# mem_as_numpy = np.frombuffer(mem_as_ctypes, np.uint8)
return GPUBuffer(label, id, self, size, usage, "mapped", mem_as_ctypes)
# wgpu.help('devicecreatebindgrouplayout', 'BindGroupLayoutDescriptor', dev=True)
# IDL: GPUBindGroupLayout createBindGroupLayout(GPUBindGroupLayoutDescriptor descriptor);
def createBindGroupLayout(
self, *, label="", bindings: "GPUBindGroupLayoutBinding-list"
):
c_bindings_list = []
for binding in bindings:
c_binding = new_struct(
"WGPUBindGroupLayoutBinding *",
binding=int(binding.binding),
visibility=int(binding.visibility),
ty=binding.BindingType,
texture_dimension=binding.textureDimension,
multisampled=bool(binding.multisampled),
dynamic=bool(binding.hasDynamicOffset),
) # WGPUShaderStage
c_bindings_list.append(c_binding)
c_bindings_array = ffi.new("WGPUBindGroupLayoutBinding []", c_bindings_list)
struct = new_struct(
"WGPUBindGroupLayoutDescriptor *",
bindings=c_bindings_array,
bindings_length=len(c_bindings_list),
)
id = _lib.wgpu_device_create_bind_group_layout(self._internal, struct)
return classes.GPUBindGroupLayout(label, id, self, bindings)
# wgpu.help('devicecreatebindgroup', 'BindGroupDescriptor', dev=True)
# IDL: GPUBindGroup createBindGroup(GPUBindGroupDescriptor descriptor);
def createBindGroup(
self,
*,
label="",
layout: "GPUBindGroupLayout",
bindings: "GPUBindGroupBinding-list"
):
c_bindings_list = []
for binding in bindings:
c_binding = new_struct(
"WGPUBindGroupBinding *",
binding=int(binding.binding),
resource=binding.resource,
) # todo: xxxx WGPUBindingResource
c_bindings_list.append(c_binding)
c_bindings_array = ffi.new("WGPUBindGroupBinding []", c_bindings_list)
struct = new_struct(
"WGPUBindGroupDescriptor *",
layout=layout._internal,
bindings=c_bindings_array,
bindings_length=len(c_bindings_list),
) # noqa
id = _lib.wgpu_device_create_bind_group(self._internal, struct)
return classes.GPUBindGroup(label, id, self, bindings)
# wgpu.help('devicecreatepipelinelayout', 'PipelineLayoutDescriptor', dev=True)
# IDL: GPUPipelineLayout createPipelineLayout(GPUPipelineLayoutDescriptor descriptor);
def createPipelineLayout(
self, *, label="", bindGroupLayouts: "GPUBindGroupLayout-list"
):
bindGroupLayouts_ids = [x._internal for x in bindGroupLayouts] # noqa
c_layout_array = ffi.new("WGPUBindGroupLayoutId []", bindGroupLayouts_ids)
struct = new_struct(
"WGPUPipelineLayoutDescriptor *",
bind_group_layouts=c_layout_array,
bind_group_layouts_length=len(bindGroupLayouts),
)
id = _lib.wgpu_device_create_pipeline_layout(self._internal, struct)
return classes.GPUPipelineLayout(label, id, self, bindGroupLayouts)
# wgpu.help('devicecreateshadermodule', 'ShaderModuleDescriptor', dev=True)
# IDL: GPUShaderModule createShaderModule(GPUShaderModuleDescriptor descriptor);
def createShaderModule(self, *, label="", code: "GPUShaderCode"):
if isinstance(code, bytes):
data = code # Assume it's Spirv
elif hasattr(code, "to_spirv_bytes"):
data = code.to_spirv_bytes()
assert True # todo: check on SpirV magic number
# From bytes to WGPUU32Array
data_u8 = ffi.new("uint8_t[]", data)
data_u32 = ffi.cast("uint32_t *", data_u8)
c_code = ffi.new(
"WGPUU32Array *", {"bytes": data_u32, "length": len(data) // 4}
)
struct = new_struct("WGPUShaderModuleDescriptor *", code=c_code[0])
id = _lib.wgpu_device_create_shader_module(self._internal, struct)
return classes.GPUShaderModule(label, id, self)
# wgpu.help('devicecreaterenderpipeline', 'RenderPipelineDescriptor', dev=True)
# IDL: GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor);
def createRenderPipeline(
self,
*,
label="",
layout: "GPUPipelineLayout",
vertexStage: "GPUProgrammableStageDescriptor",
fragmentStage: "GPUProgrammableStageDescriptor",
primitiveTopology: "GPUPrimitiveTopology",
rasterizationState: "GPURasterizationStateDescriptor" = {},
colorStates: "GPUColorStateDescriptor-list",
depthStencilState: "GPUDepthStencilStateDescriptor",
vertexState: "GPUVertexStateDescriptor" = {},
sampleCount: int = 1,
sampleMask: int = 0xFFFFFFFF,
alphaToCoverageEnabled: bool = False
):
refs = [] # to avoid premature gc collection
c_vertex_stage = new_struct(
"WGPUProgrammableStageDescriptor *",
module=vertexStage["module"]._internal,
entry_point=ffi.new("char []", vertexStage["entryPoint"].encode()),
)
c_fragment_stage = new_struct(
"WGPUProgrammableStageDescriptor *",
module=fragmentStage["module"]._internal,
entry_point=ffi.new("char []", fragmentStage["entryPoint"].encode()),
)
c_rasterization_state = new_struct(
"WGPURasterizationStateDescriptor *",
front_face=rasterizationState["frontFace"],
cull_mode=rasterizationState["cullMode"],
depth_bias=rasterizationState["depthBias"],
depth_bias_slope_scale=rasterizationState["depthBiasSlopeScale"],
depth_bias_clamp=rasterizationState["depthBiasClamp"],
)
c_color_states_list = []
for colorState in colorStates:
alphaBlend = colorState["alphaBlend"]
if not isinstance(alphaBlend, (list, tuple)): # support dict and tuple
alphaBlend = (
alphaBlend["srcFactor"],
alphaBlend["dstFactor"],
alphaBlend["operation"],
)
c_alpha_blend = new_struct(
"WGPUBlendDescriptor *",
src_factor=alphaBlend[0],
dst_factor=alphaBlend[1],
operation=alphaBlend[2],
)
colorBlend = colorState["colorBlend"]
if not isinstance(colorBlend, (list, tuple)): # support dict and tuple
colorBlend = (
colorBlend["srcFactor"],
colorBlend["dstFactor"],
colorBlend["operation"],
)
c_color_blend = new_struct(
"WGPUBlendDescriptor *",
src_factor=colorBlend[0],
dst_factor=colorBlend[1],
operation=colorBlend[2],
)
c_color_state = new_struct(
"WGPUColorStateDescriptor *",
format=colorState["format"],
alpha_blend=c_alpha_blend[0],
color_blend=c_color_blend[0],
write_mask=colorState["writeMask"],
) # enum
refs.extend([c_alpha_blend, c_color_blend])
c_color_states_list.append(c_color_state[0])
c_color_states_array = ffi.new(
"WGPUColorStateDescriptor []", c_color_states_list
)
if depthStencilState is None:
c_depth_stencil_state = ffi.NULL
else:
raise NotImplementedError()
# c_depth_stencil_state = new_struct(
# "WGPUDepthStencilStateDescriptor *",
# format=
# depth_write_enabled=
# depth_compare
# stencil_front
# stencil_back
# stencil_read_mask
# stencil_write_mask
# )
c_vertex_buffer_descriptors_list = []
for buffer_des in vertexState["vertexBuffers"]:
c_attributes_list = []
for attribute in buffer_des["attributes"]:
c_attribute = new_struct(
"WGPUVertexAttributeDescriptor *",
format=attribute["format"],
offset=attribute["offset"],
shader_location=attribute["shaderLocation"],
)
c_attributes_list.append(c_attribute)
c_attributes_array = ffi.new(
"WGPUVertexAttributeDescriptor []", c_attributes_list
)
c_vertex_buffer_descriptor = new_struct(
"WGPUVertexBufferDescriptor *",
stride=buffer_des["arrayStride"],
step_mode=buffer_des["stepmode"],
attributes=c_attributes_array,
attributes_length=len(c_attributes_list),
)
refs.append(c_attributes_list)
c_vertex_buffer_descriptors_list.append(c_vertex_buffer_descriptor)
c_vertex_buffer_descriptors_array = ffi.new(
"WGPUVertexBufferDescriptor []", c_vertex_buffer_descriptors_list
)
c_vertex_input = new_struct(
"WGPUVertexInputDescriptor *",
index_format=vertexState["indexFormat"],
vertex_buffers=c_vertex_buffer_descriptors_array,
vertex_buffers_length=len(c_vertex_buffer_descriptors_list),
)
struct = new_struct(
"WGPURenderPipelineDescriptor *",
layout=layout._internal,
vertex_stage=c_vertex_stage[0],
fragment_stage=c_fragment_stage,
primitive_topology=primitiveTopology,
rasterization_state=c_rasterization_state,
color_states=c_color_states_array,
color_states_length=len(c_color_states_list),
depth_stencil_state=c_depth_stencil_state,
vertex_input=c_vertex_input[0],
sample_count=sampleCount,
sample_mask=sampleMask,
alpha_to_coverage_enabled=alphaToCoverageEnabled,
) # noqa # c-pointer # enum
id = _lib.wgpu_device_create_render_pipeline(self._internal, struct)
return classes.GPURenderPipeline(label, id, self)
# wgpu.help('devicecreatecommandencoder', 'CommandEncoderDescriptor', dev=True)
# IDL: GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {});
def createCommandEncoder(self, *, label=""):
struct = new_struct("WGPUCommandEncoderDescriptor *", todo=0)
id = _lib.wgpu_device_create_command_encoder(self._internal, struct)
return GPUCommandEncoder(label, id, self)
def configureSwapChainQt(self, *, label="", surface, format, usage):
""" Get a swapchain object from a Qt widget.
"""
# Note: surface is a Qt Widget object
import sys
if sys.platform.startswith("win"):
# Use create_surface_from_windows_hwnd
# todo: factor this line out into a gui.py or something
hwnd = ffi.cast("void *", int(surface.winId()))
hinstance = ffi.NULL
surface_id = _lib.wgpu_create_surface_from_windows_hwnd(hinstance, hwnd)
elif sys.platform.startswith("linux"):
# Use create_surface_from_xlib
raise NotImplementedError("Linux")
elif sys.platform.startswith("darwin"):
# Use create_surface_from_metal_layer
raise NotImplementedError("OS-X")
else:
raise RuntimeError("Unsupported platform")
struct = new_struct(
"WGPUSwapChainDescriptor *",
usage=usage,
format=format,
width=surface.width(),
height=surface.height(),
present_mode=1,
) # vsync or not vsync
# todo: safe surface id somewhere
# todo: maybe move this stuff into the swap chain class, so we can ce-create on resize and all that
id = _lib.wgpu_device_create_swap_chain(self._internal, surface_id, struct)
return GPUSwapChain(label, id, self)
class GPUBuffer(classes.GPUBuffer):
# wgpu.help('bufferunmap', dev=True)
# IDL: void unmap();
def unmap(self):
if self._state == "mapped":
_lib.wgpu_buffer_unmap(self._internal)
self._state = "unmapped"
# wgpu.help('bufferdestroy', dev=True)
# IDL: void destroy();
def destroy(self):
if self._state != "destroyed":
self._state = "destroyed"
_lib.wgpu_buffer_destroy(self._internal)
class GPUTexture(classes.GPUTexture):
# wgpu.help('texturecreateview', 'TextureViewDescriptor', dev=True)
# IDL: GPUTextureView createView(optional GPUTextureViewDescriptor descriptor = {});
def createView(
self,
*,
label="",
format: "GPUTextureFormat",
dimension: "GPUTextureViewDimension",
aspect: "GPUTextureAspect" = "all",
baseMipLevel: int = 0,
mipLevelCount: int = 0,
baseArrayLayer: int = 0,
arrayLayerCount: int = 0
):
struct = new_struct(
"WGPUTextureViewDescriptor *",
dimension=dimension,
aspect=aspect,
base_mip_level=baseMipLevel,
level_count=mipLevelCount,
base_array_layer=baseArrayLayer,
array_layer_count=arrayLayerCount,
)
id = _lib.wgpu_texture_create_view(self._internal, struct)
return classes.GPUTextureView(label, id, self)
# wgpu.help('texturedestroy', dev=True)
# IDL: void destroy();
def destroy(self):
_lib.wgpu_texture_destroy(self._internal)
class GPUCommandEncoder(classes.GPUCommandEncoder):
# wgpu.help('commandencoderbeginrenderpass', 'RenderPassDescriptor', dev=True)
# IDL: GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor descriptor);
def beginRenderPass(
self,
*,
label="",
colorAttachments: "GPURenderPassColorAttachmentDescriptor-list",
depthStencilAttachment: "GPURenderPassDepthStencilAttachmentDescriptor"
):
refs = []
c_color_attachments_list = []
for colorAttachment in colorAttachments:
assert isinstance(colorAttachment["attachment"], classes.GPUTextureView)
texture_view_id = colorAttachment["attachment"]._internal
if colorAttachment["resolveTarget"] is None:
c_resolve_target = ffi.NULL
else:
raise NotImplementedError()
if isinstance(colorAttachment["loadValue"], str):
assert colorAttachment["loadValue"] == "load"
c_load_op = 1 # WGPULoadOp_Load
c_clear_color = ffi.new("WGPUColor *", dict(r=0, g=0, b=0, a=0))
else:
c_load_op = 0 # WGPULoadOp_Clear
clr = colorAttachment["loadValue"]
if isinstance(clr, dict):
c_clear_color = ffi.new("WGPUColor *", *clr)
else:
c_clear_color = ffi.new(
"WGPUColor *", dict(r=clr[0], g=clr[1], b=clr[2], a=clr[3])
)
c_attachment = new_struct(
"WGPURenderPassColorAttachmentDescriptor *",
attachment=texture_view_id,
resolve_target=c_resolve_target,
load_op=c_load_op,
store_op=colorAttachment["storeOp"],
clear_color=c_clear_color[0],
)
refs.append(c_clear_color)
c_color_attachments_list.append(c_attachment[0])
c_color_attachments_array = ffi.new(
"WGPURenderPassColorAttachmentDescriptor []", c_color_attachments_list
)
c_depth_stencil_attachment = ffi.NULL
if depthStencilAttachment is not None:
raise NotImplementedError()
struct = new_struct(
"WGPURenderPassDescriptor *",
color_attachments=c_color_attachments_array,
color_attachments_length=len(c_color_attachments_list),
depth_stencil_attachment=c_depth_stencil_attachment,
)
id = _lib.wgpu_command_encoder_begin_render_pass(self._internal, struct)
return GPURenderPassEncoder(label, id, self)
# wgpu.help('commandencoderfinish', 'CommandBufferDescriptor', dev=True)
# IDL: GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {});
def finish(self, *, label=""):
struct = new_struct("WGPUCommandBufferDescriptor *", todo=0)
id = _lib.wgpu_command_encoder_finish(self._internal, struct)
return classes.GPUCommandBuffer(label, id, self)
class GPUProgrammablePassEncoder(classes.GPUProgrammablePassEncoder):
# wgpu.help('programmablepassencodersetbindgroup', 'BindGroup', dev=True)
# IDL: void setBindGroup(unsigned long index, GPUBindGroup bindGroup, Uint32Array dynamicOffsetsData, unsigned long long dynamicOffsetsDataStart, unsigned long long dynamicOffsetsDataLength);
def setBindGroup(
self,
index,
bindGroup,
dynamicOffsetsData,
dynamicOffsetsDataStart,
dynamicOffsetsDataLength,
):
offsets = list(dynamicOffsetsData)
c_offsets = ffi.new("WGPUBufferAddress []", offsets)
bind_group_id = bindGroup._internal
if isinstance(self, GPUComputePassEncoder):
_lib.wgpu_compute_pass_set_bind_group(
self._internal, index, bind_group_id, c_offsets, len(offsets)
)
else:
_lib.wgpu_render_pass_set_bind_group(
self._internal, index, bind_group_id, c_offsets, len(offsets)
)
# wgpu.help('programmablepassencoderpushdebuggroup', dev=True)
# IDL: void pushDebugGroup(DOMString groupLabel);
def pushDebugGroup(self):
raise NotImplementedError()
# wgpu.help('programmablepassencoderpopdebuggroup', dev=True)
# IDL: void popDebugGroup();
def popDebugGroup(self):
raise NotImplementedError()
# wgpu.help('programmablepassencoderinsertdebugmarker', dev=True)
# IDL: void insertDebugMarker(DOMString markerLabel);
def insertDebugMarker(self):
raise NotImplementedError()
class GPUComputePassEncoder(GPUProgrammablePassEncoder):
"""
"""
# wgpu.help('computepassencodersetpipeline', 'ComputePipeline', dev=True)
# IDL: void setPipeline(GPUComputePipeline pipeline);
def setPipeline(self):
raise NotImplementedError()
# wgpu.help('computepassencoderdispatch', dev=True)
# IDL: void dispatch(unsigned long x, optional unsigned long y = 1, optional unsigned long z = 1);
def dispatch(self):
raise NotImplementedError()
# wgpu.help('computepassencoderdispatchindirect', 'Buffer', 'BufferSize', dev=True)
# IDL: void dispatchIndirect(GPUBuffer indirectBuffer, GPUBufferSize indirectOffset);
def dispatchIndirect(self):
raise NotImplementedError()
# wgpu.help('computepassencoderendpass', dev=True)
# IDL: void endPass();
def endPass(self):
raise NotImplementedError()
class GPURenderEncoderBase(GPUProgrammablePassEncoder):
"""
"""
# wgpu.help('renderencoderbasesetpipeline', 'RenderPipeline', dev=True)
# IDL: void setPipeline(GPURenderPipeline pipeline);
def setPipeline(self, pipeline):
pipeline_id = pipeline._internal # noqa
_lib.wgpu_render_pass_set_pipeline(self._internal, pipeline_id)
# wgpu.help('renderencoderbasesetindexbuffer', 'Buffer', 'BufferSize', dev=True)
# IDL: void setIndexBuffer(GPUBuffer buffer, optional GPUBufferSize offset = 0);
def setIndexBuffer(self):
raise NotImplementedError()
# wgpu.help('renderencoderbasesetvertexbuffer', 'Buffer', 'BufferSize', dev=True)
# IDL: void setVertexBuffer(unsigned long slot, GPUBuffer buffer, optional GPUBufferSize offset = 0);
def setVertexBuffer(self):
raise NotImplementedError()
# wgpu.help('renderencoderbasedraw', dev=True)
# IDL: void draw(unsigned long vertexCount, unsigned long instanceCount, unsigned long firstVertex, unsigned long firstInstance);
def draw(self, vertexCount, instanceCount, firstVertex, firstInstance):
_lib.wgpu_render_pass_draw(
self._internal, vertexCount, instanceCount, firstVertex, firstInstance
)
# wgpu.help('renderencoderbasedrawindirect', 'Buffer', 'BufferSize', dev=True)
# IDL: void drawIndirect(GPUBuffer indirectBuffer, GPUBufferSize indirectOffset);
def drawIndirect(self):
raise NotImplementedError()
# wgpu.help('renderencoderbasedrawindexed', dev=True)
# IDL: void drawIndexed(unsigned long indexCount, unsigned long instanceCount, unsigned long firstIndex, long baseVertex, unsigned long firstInstance);
def drawIndexed(self):
raise NotImplementedError()
# wgpu.help('renderencoderbasedrawindexedindirect', 'Buffer', 'BufferSize', dev=True)
# IDL: void drawIndexedIndirect(GPUBuffer indirectBuffer, GPUBufferSize indirectOffset);
def drawIndexedIndirect(self):
raise NotImplementedError()
# todo: this does not inherit from classes.GPURenderPassEncoder. Use multiple inheritance or leave it?
class GPURenderPassEncoder(GPURenderEncoderBase):
"""
"""
# wgpu.help('renderpassencodersetviewport', dev=True)
# IDL: void setViewport(float x, float y, float width, float height, float minDepth, float maxDepth);
def setViewport(self):
raise NotImplementedError()
# wgpu.help('renderpassencodersetscissorrect', dev=True)
# IDL: void setScissorRect(unsigned long x, unsigned long y, unsigned long width, unsigned long height);
def setScissorRect(self):
raise NotImplementedError()
# wgpu.help('renderpassencodersetblendcolor', 'Color', dev=True)
# IDL: void setBlendColor(GPUColor color);
def setBlendColor(self):
raise NotImplementedError()
# wgpu.help('renderpassencodersetstencilreference', dev=True)
# IDL: void setStencilReference(unsigned long reference);
def setStencilReference(self):
raise NotImplementedError()
# wgpu.help('renderpassencoderexecutebundles', dev=True)
# IDL: void executeBundles(sequence<GPURenderBundle> bundles);
def executeBundles(self):
raise NotImplementedError()
# wgpu.help('renderpassencoderendpass', dev=True)
# IDL: void endPass();
def endPass(self):
_lib.wgpu_render_pass_end_pass(self._internal)
class GPUQueue(classes.GPUQueue):
# wgpu.help('queuesubmit', dev=True)
# IDL: void submit(sequence<GPUCommandBuffer> commandBuffers);
def submit(self, commandBuffers):
command_buffer_ids = [cb._internal for cb in commandBuffers]
c_command_buffers = ffi.new("WGPUCommandBufferId []", command_buffer_ids)
_lib.wgpu_queue_submit(
self._internal, c_command_buffers, len(command_buffer_ids)
)
class GPUSwapChain(classes.GPUSwapChain):
def getCurrentTextureView(self):
# todo: should we cache instances (on their id)?
# otherwise we have multiple instances mapping to same internal texture
swapChainOutput = _lib.wgpu_swap_chain_get_next_texture(self._internal)
return classes.GPUTextureView("swapchain", swapChainOutput.view_id, self)
def _gui_present(self):
""" Present the current texture. This is not part of the public API,
instead, GUI backends should call this at the right moment.
"""
_lib.wgpu_swap_chain_present(self._internal)
# %%
def _copy_docstrings():
for ob in globals().values():
if not (isinstance(ob, type) and issubclass(ob, classes.GPUObject)):
continue
elif ob.__module__ != __name__:
continue
base = ob.mro()[1]
ob.__doc__ = base.__doc__
for name, attr in ob.__dict__.items():
if name.startswith("_") or not hasattr(attr, "__doc__"):
continue
base_attr = getattr(base, name, None)
if base_attr is not None:
attr.__doc__ = base_attr.__doc__
_copy_docstrings()
| 37.914392 | 198 | 0.6476 | 2,931 | 30,559 | 6.539065 | 0.232003 | 0.015861 | 0.019827 | 0.018262 | 0.186267 | 0.102682 | 0.068559 | 0.044871 | 0.034332 | 0.034332 | 0 | 0.003706 | 0.258222 | 30,559 | 805 | 199 | 37.961491 | 0.841803 | 0.253313 | 0 | 0.206963 | 0 | 0 | 0.105701 | 0.05084 | 0 | 0 | 0.000447 | 0.004969 | 0.007737 | 1 | 0.085106 | false | 0.038685 | 0.015474 | 0 | 0.154739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3f55e5b9b4b2ea7da77a3255ee2851f0eb3612a | 980 | py | Python | py/examples/plot_line_annotation.py | swt2c/wave | 7fe897a34f4ac25157920132b2e873da755643a8 | [
"Apache-2.0"
] | 3,013 | 2020-12-15T15:53:23.000Z | 2022-03-31T00:21:06.000Z | py/examples/plot_line_annotation.py | swt2c/wave | 7fe897a34f4ac25157920132b2e873da755643a8 | [
"Apache-2.0"
] | 591 | 2020-12-15T15:54:42.000Z | 2022-03-31T12:51:19.000Z | py/examples/plot_line_annotation.py | swt2c/wave | 7fe897a34f4ac25157920132b2e873da755643a8 | [
"Apache-2.0"
] | 159 | 2020-12-15T16:34:43.000Z | 2022-03-31T07:27:16.000Z | # Plot / Line / Annotation
# Add annotations to a line #plot. #annotation
# ---
from synth import FakeTimeSeries
from h2o_wave import site, data, ui
page = site['/demo']
n = 50
f = FakeTimeSeries()
v = page.add('example', ui.plot_card(
box='1 1 4 5',
title='Time-Numeric',
data=data('date price', n),
plot=ui.plot([
ui.mark(type='line', x_scale='time', x='=date', y='=price', y_min=0, y_max=100),
ui.mark(x=50, y=50, label='point'),
ui.mark(x='2010-05-15T19:59:21.000000Z', label='vertical line'),
ui.mark(y=40, label='horizontal line'),
ui.mark(x='2010-05-24T19:59:21.000000Z', x0='2010-05-20T19:59:21.000000Z', label='vertical region'),
ui.mark(y=70, y0=60, label='horizontal region'),
ui.mark(x='2010-05-10T19:59:21.000000Z', x0='2010-05-05T19:59:21.000000Z', y=30, y0=20,
label='rectangular region')
])
))
v.data = [(t, x) for t, x, dx in [f.next() for _ in range(n)]]
page.save()
| 33.793103 | 108 | 0.604082 | 163 | 980 | 3.595092 | 0.441718 | 0.071672 | 0.093857 | 0.056314 | 0.213311 | 0.064846 | 0 | 0 | 0 | 0 | 0 | 0.163085 | 0.192857 | 980 | 28 | 109 | 35 | 0.57775 | 0.072449 | 0 | 0 | 0 | 0 | 0.307863 | 0.149502 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3f8493530d35d4422ed29b6391daa2f5bde737f | 3,412 | py | Python | meissner/naver.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | 1 | 2022-01-27T10:16:46.000Z | 2022-01-27T10:16:46.000Z | meissner/naver.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | null | null | null | meissner/naver.py | terrylove19/meissner | 14cedb73aa86172aac7af4031aa4670d26acc8ef | [
"MIT"
] | null | null | null | """
.@@#
(@&*%@@@/,%@@@# #&@@@@&. .@@# /&@@@@&* /&@@@@&* (@&*%@@@( *%@@@@&/ .@@&*&@.
(@@&((&@@@(/&@@, #@@#/(&@@. .@@# #@@(///(, .@@%////, (@@&(/#@@# #@@&//#@@( .@@@@@%.
(@@. /@@* ,@@/ .&@@%%%&@@* .@@# (@@&&%#* .@@@&%#/ (@@. .&@% &@@&%%%@@% .@@@
(@@. /@@, ,@@/ .&@%,,,,,, .@@# ./#%&@@&. ./(%&@@&. (@@. .&@% &@@/,,,,,. .@@@
(@@. /@@, ,@@/ #@@#////* .@@# ./////&@@. /////&@@. (@@. .&@% #@@&/////. .@@@
(@@. /@@, ,@@/ #&@@@@@% .@@# ,&@@@@@%. &@@@@@&. (@@. .&@% *%@@@@@&* .@@@
MIT License
Copyright (c) 2017 epsimatt (https://github.com/epsimatt/meissner)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from json.decoder import JSONDecodeError
import logging
import meissner.config
import requests
log = logging.getLogger(__name__)
config_mgr = meissner.config.ConfigManager()
client_id = config_mgr.get('naver_client_id')
client_secret = config_mgr.get('naver_client_secret')
# search_api_url = "https://openapi.naver.com/v1/search/webkr.json?"
papago_api_url = "https://openapi.naver.com/v1/papago/n2mt?"
def papago_translate(source: str, target: str, text: str) -> str:
"""
Translate a text using the NAVER Papago NMT API.
Supported languages: ko, en, zh-CN (ko <-> en / ko <-> zh-CN)
"""
req_vars = {
'source': source,
'target': target,
'text': text
}
response = requests.post(
papago_api_url,
req_vars,
headers={
'X-Naver-Client-Id': client_id,
'X-Naver-Client-Secret': client_secret
}
)
try:
raw_dict = response.json()
except JSONDecodeError: # Subclass of ValueError
log.error("Could not retrieve JSON model: Invalid JSON")
return ""
if not isinstance(raw_dict, dict) or 'message' not in raw_dict:
if 'errorCode' in raw_dict:
return raw_dict['errorCode']
else:
return 'HTTP_' + str(response.status_code)
message = raw_dict['message']
if 'result' not in message:
return ""
result = message['result']
if 'translatedText' not in result:
return ""
return result['translatedText']
| 35.915789 | 111 | 0.541911 | 361 | 3,412 | 5.033241 | 0.462604 | 0.048431 | 0.014309 | 0.018712 | 0.056136 | 0.03082 | 0.03082 | 0 | 0 | 0 | 0 | 0.002823 | 0.273154 | 3,412 | 94 | 112 | 36.297872 | 0.729839 | 0.580305 | 0 | 0.075 | 0 | 0 | 0.191686 | 0.016166 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.1 | 0 | 0.275 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3fb8295327161f394ebc1278a58504f74938869 | 1,460 | py | Python | benchmark/code/files.py | scailfin/rob-demo-top-tagger | d8a7d9faefd9822cd1e81e8b734158bb96263321 | [
"MIT"
] | null | null | null | benchmark/code/files.py | scailfin/rob-demo-top-tagger | d8a7d9faefd9822cd1e81e8b734158bb96263321 | [
"MIT"
] | 2 | 2020-02-22T18:37:38.000Z | 2020-08-26T02:12:17.000Z | benchmark/code/files.py | scailfin/rob-demo-top-tagger | d8a7d9faefd9822cd1e81e8b734158bb96263321 | [
"MIT"
] | 1 | 2021-05-06T15:21:16.000Z | 2021-05-06T15:21:16.000Z | # This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB) - Top Tagger Benchmark Demo.
#
# Copyright (C) [2019-2020] NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Definition of names for files that are generated by a workflow run."""
# -- Preprocessing ------------------------------------------------------------
"""Tree file generated by the first dataset preprocessing step."""
RAW_TREE_FILE = 'tree_test_jets.pkl'
"""Result file of the dataset preprocessing step."""
PROCESSED_TREE_FILE = 'processed_test_jets.pkl'
"""Additional pre-processing input files."""
CARD_FILE = 'jet_image_trim_pt800-900_card.dat'
TRANSFORMER_FILE = 'transformer.pkl'
# -- Evaluate -----------------------------------------------------------------
"""Name of file containing run parameter dictionary."""
PARAMS_FILE = 'params.json'
"""Prefix for run directories."""
RUN_DIR_PREFIX = 'run_'
"""Result files for each run."""
METRICS_FILE = 'metrics_test.json'
ROC_FILE = 'roc.pkl'
Y_PROB_TRUE_FILE = 'yProbTrue.pkl'
"""Result files for run summaries."""
Y_PROB_BEST_FILE = 'yProbBest.pkl'
RESULT_FILE = 'results.json'
# -- Logging ------------------------------------------------------------------
"""Logfile for dataset preprocessing step."""
ANALYZE_LOG_FILE = 'analyze.log'
EVAL_LOG_FILE = 'evaluate.log'
PREPROC_LOG_FILE = 'preproc.log'
| 37.435897 | 79 | 0.649315 | 185 | 1,460 | 4.940541 | 0.535135 | 0.016411 | 0.078775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010861 | 0.117123 | 1,460 | 38 | 80 | 38.421053 | 0.698216 | 0.399315 | 0 | 0 | 0 | 0 | 0.401606 | 0.11245 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
c3fc520d24937531ab79fae2872238dcf67090e9 | 1,668 | py | Python | language/python/string_handle.py | morenice/til | 9b73f54045dbd65e08df4538300dd12a4a087540 | [
"Apache-2.0"
] | null | null | null | language/python/string_handle.py | morenice/til | 9b73f54045dbd65e08df4538300dd12a4a087540 | [
"Apache-2.0"
] | 13 | 2020-02-11T23:33:22.000Z | 2021-06-10T21:17:23.000Z | language/python/string_handle.py | morenice/til | 9b73f54045dbd65e08df4538300dd12a4a087540 | [
"Apache-2.0"
] | null | null | null | import json
def basic():
len('aaaa')
str(1)
try:
a = 'aaa' + 2
except TypeError as e:
print('Type Error: {0}'.format(e))
def dict_to_str():
print('dict to str')
d1 = {'a': 1, 'b': 'string'}
d1_str = str(d1)
print(d1_str)
# This isn't secure because using eval function.
d2 = eval(d1_str)
if d1 == d2:
print('eval function')
def dict_to_str2():
print('dict to str 2')
d1 = {'a': 1, 'b': 'string'}
d1_str = json.dumps(d1)
print(d1_str)
d2 = json.loads(d1_str)
if d1 == d2:
print('json function')
def split():
str1 = 'Thu,1,10,except'
print('string split example: {0}'.format(str1))
# ',' : seperator
elements = str1.split(',')
for el in elements:
print(el)
def join():
list1 = ['1', 'in', 'out']
print('string join example: {0}'.format(':'.join(list1)))
def index():
str1 = '--; select * from ...'
print('string find and index example: {0}'.format(str1))
# find function will return index
if str1.find('--;') >= 0:
print('find it --;')
# index: 3 to end
print(str1[3:])
# index: end 3 character
print(str1[-3:])
def formating():
# python3: format
name = 'Roll'
age = 20
print('{0}: {1}'.format(name, age))
# python 3.6: f-string
name2 = 'Kell'
age2 = 40
print(f'{name2}: {age2}')
# python 3.6: f-string
name3 = 'Paul Kim 22'
print(f'{name3.split()}')
if __name__ == '__main__':
print('This is ' + 'string' + ' example')
basic()
dict_to_str()
dict_to_str2()
split()
join()
index()
formating()
| 18.130435 | 61 | 0.529976 | 230 | 1,668 | 3.747826 | 0.356522 | 0.041763 | 0.041763 | 0.032483 | 0.109049 | 0.074246 | 0.037123 | 0 | 0 | 0 | 0 | 0.05476 | 0.288369 | 1,668 | 91 | 62 | 18.32967 | 0.671441 | 0.115108 | 0 | 0.103448 | 0 | 0 | 0.215553 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12069 | false | 0 | 0.017241 | 0 | 0.137931 | 0.310345 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f006596b23afd581cdbe53ae8dc6b039339fb67 | 2,455 | py | Python | qiskit/aqua/circuits/fourier_transform_circuits.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | 1 | 2020-07-14T15:32:42.000Z | 2020-07-14T15:32:42.000Z | qiskit/aqua/circuits/fourier_transform_circuits.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/circuits/fourier_transform_circuits.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | 1 | 2022-01-25T07:09:10.000Z | 2022-01-25T07:09:10.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""DEPRECATED. Quantum Fourier Transform Circuit."""
import warnings
from qiskit.circuit.library import QFT
from qiskit.aqua import AquaError
class FourierTransformCircuits:
"""DEPRECATED. Quantum Fourier Transform Circuit."""
@staticmethod
def construct_circuit(
circuit=None,
qubits=None,
inverse=False,
approximation_degree=0,
do_swaps=True
):
"""Construct the circuit representing the desired state vector.
Args:
circuit (QuantumCircuit): The optional circuit to extend from.
qubits (Union(QuantumRegister, list[Qubit])): The optional qubits to construct
the circuit with.
approximation_degree (int): degree of approximation for the desired circuit
inverse (bool): Boolean flag to indicate Inverse Quantum Fourier Transform
do_swaps (bool): Boolean flag to specify if swaps should be included to align
the qubit order of
input and output. The output qubits would be in reversed order without the swaps.
Returns:
QuantumCircuit: quantum circuit
Raises:
AquaError: invalid input
"""
warnings.warn('The class FourierTransformCircuits is deprecated and will be removed '
'no earlier than 3 months after the release 0.7.0. You should use the '
'qiskit.circuit.library.QFT class instead.',
DeprecationWarning, stacklevel=2)
if circuit is None:
raise AquaError('Missing input QuantumCircuit.')
if qubits is None:
raise AquaError('Missing input qubits.')
qft = QFT(len(qubits), approximation_degree=approximation_degree, do_swaps=do_swaps)
if inverse:
qft = qft.inverse()
circuit.append(qft.to_instruction(), qubits)
return circuit
| 35.57971 | 97 | 0.654175 | 295 | 2,455 | 5.410169 | 0.477966 | 0.047619 | 0.043233 | 0.041353 | 0.090226 | 0.0401 | 0 | 0 | 0 | 0 | 0 | 0.010747 | 0.279837 | 2,455 | 68 | 98 | 36.102941 | 0.891968 | 0.514053 | 0 | 0 | 0 | 0 | 0.217474 | 0.047483 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.12 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f016cf0b2bfb7aa9dc572a51f61f1df8f05fc95 | 1,381 | py | Python | input_device_handler.py | wdomitrz/input_device_handler | 4c99f91a0ac805e559a4de497412717e15bfbe25 | [
"MIT"
] | null | null | null | input_device_handler.py | wdomitrz/input_device_handler | 4c99f91a0ac805e559a4de497412717e15bfbe25 | [
"MIT"
] | null | null | null | input_device_handler.py | wdomitrz/input_device_handler | 4c99f91a0ac805e559a4de497412717e15bfbe25 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
from os import path
from evdev import InputDevice, InputEvent, categorize, ecodes
from subprocess import Popen
CONFIG_FILE = path.expanduser('~/.config/input_device_handler/config.json')
class DeviceHandler:
def __init__(self, device_options: dict, bindings: dict):
self.device = InputDevice(device_options['path'])
self.bindings = bindings
if 'exclusive' in device_options and device_options['exclusive']:
self.device.grab()
def run(self):
for event in self.device.read_loop():
self.process_event(event)
def process_event(self, event: InputEvent):
if event.type != ecodes.EV_KEY:
return
event = categorize(event)
if event.keycode not in self.bindings:
return
action = self.bindings[event.keycode]
if event.keystate in [event.key_down, event.key_hold]:
self.perform_action(action)
def perform_action(self, action: dict):
cmd = None
if 'cmd' in action:
cmd = action['cmd']
elif 'key' in action:
cmd = ['xdotool', 'key'] + [action['key']]
if cmd is not None:
Popen(cmd)
if __name__ == '__main__':
with open(CONFIG_FILE, 'r') as config_file:
config = json.load(config_file)
DeviceHandler(**config).run()
| 27.62 | 75 | 0.62853 | 169 | 1,381 | 4.95858 | 0.378698 | 0.047733 | 0.026253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000984 | 0.264301 | 1,381 | 49 | 76 | 28.183673 | 0.823819 | 0.015206 | 0 | 0.057143 | 0 | 0 | 0.069904 | 0.030905 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.114286 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f02000898a95c1dd1e69a69da9e90223206f7da | 3,064 | py | Python | modules/plugin_dialog.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | 1 | 2017-12-01T22:46:33.000Z | 2017-12-01T22:46:33.000Z | modules/plugin_dialog.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | null | null | null | modules/plugin_dialog.py | jredrejo/sqlabs | 2cf39ff924579e72bc0092f2a6d65214dafd4bfe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This plugins is licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Authors: Kenji Hosoda <hosoda@s-cubism.jp>
from gluon import *
# For referencing static and views from other application
import os
APP = os.path.basename(os.path.dirname(os.path.dirname(__file__)))
class DIALOG(DIV):
def __init__(self, content, title=None, close_button=None,
width=90, height=80, onclose='', renderstyle=False, **attributes):
DIV.__init__(self, **attributes)
self.title, self.content, self.close_button, self.width, self.height, self.onclose = (
title, content, close_button, width, height, onclose)
self.attributes['_class'] = self.attributes.get('_class', 'dialog')
import uuid
self.attributes['_id'] = self.attributes.get('_id') or str(uuid.uuid4())
self.attributes['_style'] = self.attributes.get('_style',
'display:none; z-index:1001; position:fixed; top:0%;left:0%;width:100%;height:100%;')
if renderstyle:
_url = URL(APP, 'static', 'plugin_dialog/dialog.css')
if _url not in current.response.files:
current.response.files.append(_url)
def show(self, reload=False):
import gluon.contrib.simplejson as json
return ("""(function(){
var el = jQuery("#%(id)s");""" +
("""
el.remove(); el = [];
""" if reload else '') +
"""
if (el.length == 0) {
el = jQuery(%(xml)s); jQuery(document.body).append(el);
}
el.css('zIndex', (parseInt(el.css('zIndex')) || 1000) + 10);
el.show();})();""") % dict(id=self.attributes['_id'],
xml=json.dumps(self.xml().replace('<!--', '').replace('//-->', '')))
def close(self):
return '%s;jQuery("#%s").hide();' % (self.onclose, self.attributes['_id'])
def xml(self):
self.components += [
DIV(_style='width:100%;height:100%;',
_class='dialog-back',
_onclick='%s;return false;' % self.close()),
DIV(DIV(
SPAN(self.title, _style='font-weight:bold:font-size:18px;') if self.title else '',
SPAN('[', A(self.close_button, _href='#', _onclick='%s;return false;' % self.close()), ']',
_style='float:right'
) if self.close_button else '',
HR() if self.title else '',
self.content, _id='c%s' % self.attributes['_id'],
_style=("""
position:absolute;top:%(top)s%%;left:%(left)s%%;
width:%(width)s%%;height:%(height)s%%;
z-index:1100;overflow:auto;
""" % dict(left=(100 - self.width) / 2, top=(100 - self.height) / 2, width=self.width, height=self.height)),
_class='dialog-front',
_onclick="""
var e = arguments[0] || window.event;
if (jQuery(e.target).parent().attr('id') == "c%s") {%s;};
""" % (self.attributes['_id'], self.close())
),
)]
return DIV.xml(self)
| 42.555556 | 108 | 0.555157 | 367 | 3,064 | 4.520436 | 0.376022 | 0.092827 | 0.048222 | 0.024111 | 0.033755 | 0.033755 | 0 | 0 | 0 | 0 | 0 | 0.020087 | 0.252611 | 3,064 | 71 | 109 | 43.15493 | 0.704367 | 0.071475 | 0 | 0 | 0 | 0.037037 | 0.224841 | 0.108866 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0.018519 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f0d71457ff5029aa819b2d18c137d93a17d27a6 | 11,698 | py | Python | GnuPG-System_Pi-Version/util/_util.py | fabianHAW/GnuPG-Distributer-Mailing-System | 7b06ce99481528bdf742c3ee5fe348731daadcc4 | [
"MIT"
] | null | null | null | GnuPG-System_Pi-Version/util/_util.py | fabianHAW/GnuPG-Distributer-Mailing-System | 7b06ce99481528bdf742c3ee5fe348731daadcc4 | [
"MIT"
] | null | null | null | GnuPG-System_Pi-Version/util/_util.py | fabianHAW/GnuPG-Distributer-Mailing-System | 7b06ce99481528bdf742c3ee5fe348731daadcc4 | [
"MIT"
] | null | null | null | '''
Created on 13.06.2016
@author: Fabian Reiber
@version: 1.0
This helper-module offers some helpful methods for the GnuPG-System.
'''
import base64
from dns.resolver import NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers, \
NoMetaqueries, Timeout
import dns.resolver
from email.message import Message
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr
import netifaces
from utilException.InvalidKeyException import InvalidKeyException
from utilException.NoEncryptionPartException import NoEncryptionPartException
from utilException.NoMXRecordException import NoMXRecordException
from utilException.NoSignedPartException import NoSignedPartException
"""
The possible PGP-Hash-Algorithms.
"""
__GNUPG_ALGO = {
'1' : 'pgp-md5',
'2' : 'pgp-sha1',
'3' : 'pgp-ripemd160',
'8' : 'pgp-sha256',
'9' : 'pgp-sha384',
'10' : 'pgp-sha512',
'11' : 'pgp-sha224'
}
def extractKey(msgList):
"""
Extract a PGP-key from a given MIME message.
@param msgList: The messages in a List in MIME format.
@raise InvalidKeyException: If there is no key in the MIME part.
@return: The extracted key.
"""
keyValid = False
for elem in msgList:
for subPart in elem.walk():
key = subPart.get_payload(decode=True)
if __checkIfKey(key):
keyValid = True
break
if keyValid:
return key
else:
raise InvalidKeyException('KEY INVALID')
def generateMIMEAttachmentPart(part, partTmp, filename):
"""
Generates the PGP-Attachment-MIME-Part.
@param part: The decrypted content of the attachment.
@param partTmp: The origin attachment in MIME format.
@param filename: The filename of the attachment.
@return: An attachment MIME part.
"""
attachment = Message()
attachment['Content-Type'] = partTmp.get_content_type() + filename
attachment['Content-Disposition'] = partTmp.get("Content-Disposition", None)
attachment['Content-Transfer-Encoding'] = 'base64'
attachment.set_payload(base64.b64encode(part))
return attachment
def generateMIMEEncryptionPart(enc):
"""
Generates the encrypted-MIME-Part.
@param enc: The encrypted message.
@return: A pgp-encrypted MIME part.
"""
encMime = Message()
encMime['Content-Type'] = 'application/octet-stream; name="encrypted.asc"'
encMime['Content-Description'] = 'OpenPGP encrypted message'
encMime.set_payload(str(enc))
return encMime
def generateMIMEKeyPart(key):
"""
Generates the PGP-Key-MIME-Part.
@param key: The key for the MIME part.
@return: A pgp-keys MIME part.
"""
keyMime = Message()
keyMime['Content-Type'] = 'application/pgp-keys; name=keys.asc'
keyMime['Content-Disposition'] = 'attachment; filename=\'keys.asc\''
keyMime.set_payload(str(key))
return keyMime
def generateMIMEMsg(subtype, msg, signature, senderAddr, recipientAddr, subject, optinal=None):
"""
Generates a MIME message depending on the subtype.
@param subtype: The subtype of the MultipartMIME message.
@param msg: The message part for the MIME message.
@param signature: The signature of the given message part.
@param senderAddr: The sender mail-address of that message.
@param recipientAddr: The recipient mail-address of the message.
@param subject: The subject for the message
@param optional: An additional message for the message, e.g. the public-keys of a distributer.
@return: The MultipartMIME message, or a simple MIME text message.
"""
multiMime = MIMEMultipart(_subtype=subtype)
if subtype == 'signed':
sigAlg = __GNUPG_ALGO.get(signature.sig_hash_algo)
sigPart = generateMIMESignaturePart(signature)
multiMime.set_param(param='protocol', value='application/pgp-signature')
multiMime.set_param(param='micalg', value=sigAlg)
multiMime.attach(msg)
multiMime.attach(sigPart)
if subject is '':
# If a message exclusively is signed and not encrypted.
multiMime.add_header('To', formataddr((recipientAddr, recipientAddr)))
multiMime.add_header('From', formataddr((senderAddr, senderAddr)))
multiMime.add_header('Subject', '')
elif subtype == 'encrypted':
firstPart = MIMEApplication(_data='', _subtype="pgp-encrypted")
encPart = generateMIMEEncryptionPart(msg)
multiMime.set_param(param='protocol', value='application/pgp-encrypted')
multiMime.attach(firstPart)
multiMime.attach(encPart)
multiMime.add_header('To', formataddr((recipientAddr, recipientAddr)))
multiMime.add_header('From', formataddr((senderAddr, senderAddr)))
multiMime.add_header('Subject', subject)
elif subtype == 'plain':
textPart = MIMEText(msg)
if (recipientAddr is not None) and (senderAddr is not None):
# If the message will send without a signature.
textPart.add_header('To', formataddr((recipientAddr, recipientAddr)))
textPart.add_header('From', formataddr((senderAddr, senderAddr)))
textPart.add_header('Subject', '')
return textPart
elif subtype == 'mixed':
firstPart = MIMEText(optinal)
keyPart = generateMIMEKeyPart(msg)
multiMime.attach(firstPart)
multiMime.attach(keyPart)
return multiMime
def generateMIMESignaturePart(sig):
"""
Generates the signature-MIME-Part.
@param sig: The signature of a message.
@return: A pgp-signature MIME part.
"""
sigMime = Message()
sigMime['Content-Type'] = 'application/pgp-signature; name="signature.asc"'
sigMime['Content-Description'] = 'OpenPGP digital signature'
sigMime.set_payload(str(sig))
return sigMime
def getMIMEPartEnc(msg):
"""
Separates a PGP-Mime-Encrypted or Inline-PGP message in the existing MIME-Parts.
@param msg: The encrypted message in MIME-Format.
@raise NoEncryptionPartException: If there is no encrypted part found.
@return: 2-Tuple: 1.element := partType (mime or inline),
2. element := the encrypted part of the MIME message.
"""
encrytedPartList = list()
msgConsist = False
#Check if message is PGP/MIME
for part in msg.walk():
if part.get_content_type() == 'application/pgp-encrypted':
msgConsist = True
elif (part.get_content_type() == 'application/octet-stream') and msgConsist:
encrytedPartList.append(part.get_payload(decode=True))
if encrytedPartList:
return ('mime', encrytedPartList)
else:
"""
If the MIME message is not PGP/MIME, it is possible that
the message has the Inline-PGP format.
"""
#It needs to be at least one PGP specific message part in the origin mail.
encPartFound = False
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if not encPartFound:
if __checkIfPGPMsg(part.get_payload(decode=True)):
encrytedPartList.append(part)
encPartFound = True
else:
break
else:
encrytedPartList.append(part)
if not encrytedPartList:
raise NoEncryptionPartException('NO ENCRYPTED PART FOUND IN MAIL')
return ('inline', encrytedPartList)
def getMIMEPartsSig(msg):
"""
Separates a PGP-Mime-Signature message in the existing MIME-Parts.
@param msg: The signed message in MIME-Format.
@raise NoSignedPartException: If there is no signed part found.
@return: 2-Tuple: 1.element := partType (mime or inline),
2. element := a message-dictionary: key := signed message; value := signature of message.
"""
msgSig = None
sig = None
mailType = ''
sigDict = {}
if msg.get_content_type() == 'multipart/signed':
#MIME-format
for part in msg.walk():
if part.is_multipart() and part.get_content_subtype() != 'signed':
msgSig = part
elif part.get_content_type() == 'application/pgp-signature':
sig = part.get_payload(decode=True)
elif (msgSig is None) and (part.get_content_type() != 'multipart/signed') and (part.get_content_type() != 'application/pgp-signature'):
#If part has another MIME-format as multipart/signed or application/pgp-signature
msgSig = part
if objectsNotNone(msgSig, sig):
sigDict[msgSig] = sig
mailType = 'mime'
else:
raise NoSignedPartException('NO SIGNED PART FOUND IN MAIL')
else:
#Inline-format
attach = None
mailType = 'inline'
for part in msg.walk():
if part.is_multipart():
continue
dispo = part.get('Content-Disposition', '')
if dispo.startswith('attachment'):
if attach is None:
#Set the signed part
attach = part
sigDict[attach] = None
else:
#Set the signature for the signed part
sigDict[attach] = part
attach = None
else:
sigDict[part] = None
return (mailType, sigDict)
def getIPAddress(interface):
"""
Determine the IP-address of a given interface.
@param interface: The given interface.
@return: IP-address for the interface.
"""
return netifaces.ifaddresses(interface)[2][0]['addr']
def getMXRecords(userAddr):
"""
Returns a list of all available MX-Records of a specific domain.
@param userAddr: A mail-address to search the MX-records for.
@raise NoMXRecordException: If there occurred DNS specific error.
@return: A list of all MX-Records for the given domain.
"""
domain = userAddr.rsplit('@')[-1]
try:
return dns.resolver.query(domain, 'MX')
except (Timeout, NXDOMAIN, YXDOMAIN, NoAnswer, NoNameservers, NoMetaqueries) as e:
raise NoMXRecordException(e.__str__())
def objectsNotNone(*objs):
"""
Checks if a tuple of objects is None.
@param *objs: Tuple of objects to check.
@return: False, if one object is None, else True.
"""
for obj in objs:
if obj is None:
return False
return True
def __checkIfKey(key):
"""
Helper-Method: Check if a given key starts and ends witch the specific PGP key block syntax.
@param key: The key to check.
@return: True if it is a valid PGP key, else False.
"""
if key is not None:
if key.strip().startswith(b'-----BEGIN PGP PUBLIC KEY BLOCK-----'):
return key.strip().endswith(b'-----END PGP PUBLIC KEY BLOCK-----')
return False
def __checkIfPGPMsg(msg):
"""
Helper-Method: Check if a given decoded messages starts and ends witch the specific
PGP message block syntax.
@param msg: The message to check.
@return: True if it is a valid PGP message, else False.
"""
if msg.strip().startswith(b'-----BEGIN PGP MESSAGE-----'):
return msg.strip().endswith(b'-----END PGP MESSAGE-----')
return False
| 38.354098 | 150 | 0.631646 | 1,311 | 11,698 | 5.588101 | 0.196796 | 0.016517 | 0.015288 | 0.017063 | 0.241469 | 0.149195 | 0.122577 | 0.102648 | 0.089271 | 0.060606 | 0 | 0.006206 | 0.269961 | 11,698 | 304 | 151 | 38.480263 | 0.851639 | 0.282698 | 0 | 0.174157 | 0 | 0 | 0.130446 | 0.032283 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073034 | false | 0 | 0.073034 | 0 | 0.247191 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f0d8bfebfcc1ee1973bd4edbe065b03d9b94a93 | 19,788 | py | Python | ceilometerclient/tests/v2/test_alarms.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | ceilometerclient/tests/v2/test_alarms.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | ceilometerclient/tests/v2/test_alarms.py | zqfan/python-ceilometerclient | 2d4c6446ff6985c3eb9c4742df1c8d0682dee6ea | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2013 Red Hat, Inc
#
# Author: Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import six
from six.moves import xrange # noqa
import testtools
from ceilometerclient import exc
from ceilometerclient.openstack.common.apiclient import client
from ceilometerclient.openstack.common.apiclient import fake_client
from ceilometerclient.v2 import alarms
AN_ALARM = {u'alarm_actions': [u'http://site:8000/alarm'],
u'ok_actions': [u'http://site:8000/ok'],
u'description': u'An alarm',
u'type': u'threshold',
u'threshold_rule': {
u'meter_name': u'storage.objects',
u'query': [{u'field': u'key_name',
u'op': u'eq',
u'value': u'key_value'}],
u'evaluation_periods': 2,
u'period': 240.0,
u'statistic': u'avg',
u'threshold': 200.0,
u'comparison_operator': 'gt'},
u'time_constraints': [
{
u'name': u'cons1',
u'description': u'desc1',
u'start': u'0 11 * * *',
u'duration': 300,
u'timezone': u''},
{
u'name': u'cons2',
u'description': u'desc2',
u'start': u'0 23 * * *',
u'duration': 600,
u'timezone': ''}],
u'timestamp': u'2013-05-09T13:41:23.085000',
u'enabled': True,
u'alarm_id': u'alarm-id',
u'state': u'ok',
u'insufficient_data_actions': [u'http://site:8000/nodata'],
u'user_id': u'user-id',
u'project_id': u'project-id',
u'state_timestamp': u'2013-05-09T13:41:23.085000',
u'repeat_actions': False,
u'name': 'SwiftObjectAlarm'}
CREATE_ALARM = copy.deepcopy(AN_ALARM)
del CREATE_ALARM['timestamp']
del CREATE_ALARM['state_timestamp']
del CREATE_ALARM['alarm_id']
CREATE_ALARM_WITHOUT_TC = copy.deepcopy(CREATE_ALARM)
del CREATE_ALARM_WITHOUT_TC['time_constraints']
DELTA_ALARM = {u'alarm_actions': ['url1', 'url2']}
DELTA_ALARM_RULE = {u'comparison_operator': u'lt',
u'threshold': 42.1,
u'meter_name': u'foobar',
u'query': [{u'field': u'key_name',
u'op': u'eq',
u'value': u'key_value'}]}
DELTA_ALARM_TC = [{u'name': u'cons1',
u'duration': 500}]
DELTA_ALARM['time_constraints'] = DELTA_ALARM_TC
UPDATED_ALARM = copy.deepcopy(AN_ALARM)
UPDATED_ALARM.update(DELTA_ALARM)
UPDATED_ALARM['threshold_rule'].update(DELTA_ALARM_RULE)
DELTA_ALARM['remove_time_constraints'] = 'cons2'
UPDATED_ALARM['time_constraints'] = [{u'name': u'cons1',
u'description': u'desc1',
u'start': u'0 11 * * *',
u'duration': 500,
u'timezone': u''}]
DELTA_ALARM['threshold_rule'] = DELTA_ALARM_RULE
UPDATE_ALARM = copy.deepcopy(UPDATED_ALARM)
UPDATE_ALARM['remove_time_constraints'] = 'cons2'
del UPDATE_ALARM['user_id']
del UPDATE_ALARM['project_id']
del UPDATE_ALARM['name']
del UPDATE_ALARM['alarm_id']
del UPDATE_ALARM['timestamp']
del UPDATE_ALARM['state_timestamp']
AN_LEGACY_ALARM = {u'alarm_actions': [u'http://site:8000/alarm'],
u'ok_actions': [u'http://site:8000/ok'],
u'description': u'An alarm',
u'matching_metadata': {u'key_name': u'key_value'},
u'evaluation_periods': 2,
u'timestamp': u'2013-05-09T13:41:23.085000',
u'enabled': True,
u'meter_name': u'storage.objects',
u'period': 240.0,
u'alarm_id': u'alarm-id',
u'state': u'ok',
u'insufficient_data_actions': [u'http://site:8000/nodata'],
u'statistic': u'avg',
u'threshold': 200.0,
u'user_id': u'user-id',
u'project_id': u'project-id',
u'state_timestamp': u'2013-05-09T13:41:23.085000',
u'comparison_operator': 'gt',
u'repeat_actions': False,
u'name': 'SwiftObjectAlarm'}
CREATE_LEGACY_ALARM = copy.deepcopy(AN_LEGACY_ALARM)
del CREATE_LEGACY_ALARM['timestamp']
del CREATE_LEGACY_ALARM['state_timestamp']
del CREATE_LEGACY_ALARM['alarm_id']
DELTA_LEGACY_ALARM = {u'alarm_actions': ['url1', 'url2'],
u'comparison_operator': u'lt',
u'meter_name': u'foobar',
u'threshold': 42.1}
DELTA_LEGACY_ALARM['time_constraints'] = [{u'name': u'cons1',
u'duration': 500}]
DELTA_LEGACY_ALARM['remove_time_constraints'] = 'cons2'
UPDATED_LEGACY_ALARM = copy.deepcopy(AN_LEGACY_ALARM)
UPDATED_LEGACY_ALARM.update(DELTA_LEGACY_ALARM)
UPDATE_LEGACY_ALARM = copy.deepcopy(UPDATED_LEGACY_ALARM)
del UPDATE_LEGACY_ALARM['user_id']
del UPDATE_LEGACY_ALARM['project_id']
del UPDATE_LEGACY_ALARM['name']
del UPDATE_LEGACY_ALARM['alarm_id']
del UPDATE_LEGACY_ALARM['timestamp']
del UPDATE_LEGACY_ALARM['state_timestamp']
FULL_DETAIL = ('{"alarm_actions": [], '
'"user_id": "8185aa72421a4fd396d4122cba50e1b5", '
'"name": "scombo", '
'"timestamp": "2013-10-03T08:58:33.647912", '
'"enabled": true, '
'"state_timestamp": "2013-10-03T08:58:33.647912", '
'"rule": {"operator": "or", "alarm_ids": '
'["062cc907-3a9f-4867-ab3b-fa83212b39f7"]}, '
'"alarm_id": "alarm-id, '
'"state": "insufficient data", '
'"insufficient_data_actions": [], '
'"repeat_actions": false, '
'"ok_actions": [], '
'"project_id": "57d04f24d0824b78b1ea9bcecedbda8f", '
'"type": "combination", '
'"description": "Combined state of alarms '
'062cc907-3a9f-4867-ab3b-fa83212b39f7"}')
ALARM_HISTORY = [{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"state": "alarm"}',
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'state transition'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': 'c74a8611-6553-4764-a860-c15a6aabb5d0',
'timestamp': '2013-10-03T08:59:28.326000',
'detail': '{"description": "combination of one"}',
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'rule change'},
{'on_behalf_of': '57d04f24d0824b78b1ea9bcecedbda8f',
'user_id': '8185aa72421a4fd396d4122cba50e1b5',
'event_id': '4fd7df9e-190d-4471-8884-dc5a33d5d4bb',
'timestamp': '2013-10-03T08:58:33.647000',
'detail': FULL_DETAIL,
'alarm_id': 'alarm-id',
'project_id': '57d04f24d0824b78b1ea9bcecedbda8f',
'type': 'creation'}]
fixtures = {
'/v2/alarms':
{
'GET': (
{},
[AN_ALARM],
),
'POST': (
{},
CREATE_ALARM,
),
},
'/v2/alarms/alarm-id':
{
'GET': (
{},
AN_ALARM,
),
'PUT': (
{},
UPDATED_ALARM,
),
'DELETE': (
{},
None,
),
},
'/v2/alarms/unk-alarm-id':
{
'GET': (
{},
None,
),
'PUT': (
{},
None,
),
},
'/v2/alarms/alarm-id/state':
{
'PUT': (
{},
{'alarm': 'alarm'}
),
'GET': (
{},
{'alarm': 'alarm'}
),
},
'/v2/alarms?q.field=project_id&q.field=name&q.op=&q.op='
'&q.type=&q.type=&q.value=project-id&q.value=SwiftObjectAlarm':
{
'GET': (
{},
[AN_ALARM],
),
},
'/v2/alarms/victim-id':
{
'DELETE': (
{},
None,
),
},
'/v2/alarms/alarm-id/history':
{
'GET': (
{},
ALARM_HISTORY,
),
},
'/v2/alarms/alarm-id/history?q.field=timestamp&q.op=&q.type=&q.value=NOW':
{
'GET': (
{},
ALARM_HISTORY,
),
},
}
class AlarmManagerTest(testtools.TestCase):
def setUp(self):
super(AlarmManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = alarms.AlarmManager(self.api)
def test_list_all(self):
alarms = list(self.mgr.list())
expect = [
'GET', '/v2/alarms'
]
self.http_client.assert_called(*expect)
self.assertEqual(len(alarms), 1)
self.assertEqual(alarms[0].alarm_id, 'alarm-id')
def test_list_with_query(self):
alarms = list(self.mgr.list(q=[{"field": "project_id",
"value": "project-id"},
{"field": "name",
"value": "SwiftObjectAlarm"}]))
expect = [
'GET',
'/v2/alarms?q.field=project_id&q.field=name&q.op=&q.op='
'&q.type=&q.type=&q.value=project-id&q.value=SwiftObjectAlarm',
]
self.http_client.assert_called(*expect)
self.assertEqual(len(alarms), 1)
self.assertEqual(alarms[0].alarm_id, 'alarm-id')
def test_get(self):
alarm = self.mgr.get(alarm_id='alarm-id')
expect = [
'GET', '/v2/alarms/alarm-id'
]
self.http_client.assert_called(*expect)
self.assertIsNotNone(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
self.assertEqual(alarm.rule, alarm.threshold_rule)
def test_create(self):
alarm = self.mgr.create(**CREATE_ALARM)
expect = [
'POST', '/v2/alarms'
]
self.http_client.assert_called(*expect, body=CREATE_ALARM)
self.assertIsNotNone(alarm)
def test_update(self):
alarm = self.mgr.update(alarm_id='alarm-id', **UPDATE_ALARM)
expect_get = [
'GET', '/v2/alarms/alarm-id'
]
expect_put = [
'PUT', '/v2/alarms/alarm-id', UPDATED_ALARM
]
self.http_client.assert_called(*expect_get, pos=0)
self.http_client.assert_called(*expect_put, pos=1)
self.assertIsNotNone(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in six.iteritems(UPDATED_ALARM):
self.assertEqual(getattr(alarm, key), value)
def test_update_delta(self):
alarm = self.mgr.update(alarm_id='alarm-id', **DELTA_ALARM)
expect_get = [
'GET', '/v2/alarms/alarm-id'
]
expect_put = [
'PUT', '/v2/alarms/alarm-id', UPDATED_ALARM
]
self.http_client.assert_called(*expect_get, pos=0)
self.http_client.assert_called(*expect_put, pos=1)
self.assertIsNotNone(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in six.iteritems(UPDATED_ALARM):
self.assertEqual(getattr(alarm, key), value)
def test_set_state(self):
state = self.mgr.set_state(alarm_id='alarm-id', state='alarm')
expect = [
'PUT', '/v2/alarms/alarm-id/state'
]
self.http_client.assert_called(*expect, body='alarm')
self.assertEqual(state, {'alarm': 'alarm'})
def test_get_state(self):
state = self.mgr.get_state(alarm_id='alarm-id')
expect = [
'GET', '/v2/alarms/alarm-id/state'
]
self.http_client.assert_called(*expect)
self.assertEqual(state, {'alarm': 'alarm'})
def test_delete(self):
deleted = self.mgr.delete(alarm_id='victim-id')
expect = [
'DELETE', '/v2/alarms/victim-id'
]
self.http_client.assert_called(*expect)
self.assertIsNone(deleted)
def test_get_from_alarm_class(self):
alarm = self.mgr.get(alarm_id='alarm-id')
self.assertIsNotNone(alarm)
alarm.get()
expect = [
'GET', '/v2/alarms/alarm-id'
]
self.http_client.assert_called(*expect, pos=0)
self.http_client.assert_called(*expect, pos=1)
self.assertEqual('alarm-id', alarm.alarm_id)
self.assertEqual(alarm.threshold_rule, alarm.rule)
def test_get_state_from_alarm_class(self):
alarm = self.mgr.get(alarm_id='alarm-id')
self.assertIsNotNone(alarm)
state = alarm.get_state()
expect_get_1 = [
'GET', '/v2/alarms/alarm-id'
]
expect_get_2 = [
'GET', '/v2/alarms/alarm-id/state'
]
self.http_client.assert_called(*expect_get_1, pos=0)
self.http_client.assert_called(*expect_get_2, pos=1)
self.assertEqual('alarm', state)
def test_update_missing(self):
alarm = None
try:
alarm = self.mgr.update(alarm_id='unk-alarm-id', **UPDATE_ALARM)
except exc.CommandError:
pass
self.assertEqual(alarm, None)
def test_delete_from_alarm_class(self):
alarm = self.mgr.get(alarm_id='alarm-id')
self.assertIsNotNone(alarm)
deleted = alarm.delete()
expect_get = [
'GET', '/v2/alarms/alarm-id'
]
expect_delete = [
'DELETE', '/v2/alarms/alarm-id'
]
self.http_client.assert_called(*expect_get, pos=0)
self.http_client.assert_called(*expect_delete, pos=1)
self.assertIsNone(deleted)
def _do_test_get_history(self, q, url):
history = self.mgr.get_history(q=q, alarm_id='alarm-id')
expect = ['GET', url]
self.http_client.assert_called(*expect)
for i in xrange(len(history)):
change = history[i]
self.assertIsInstance(change, alarms.AlarmChange)
for k, v in six.iteritems(ALARM_HISTORY[i]):
self.assertEqual(getattr(change, k), v)
def test_get_all_history(self):
url = '/v2/alarms/alarm-id/history'
self._do_test_get_history(None, url)
def test_get_constrained_history(self):
q = [dict(field='timestamp', value='NOW')]
url = ('/v2/alarms/alarm-id/history?q.field=timestamp'
'&q.op=&q.type=&q.value=NOW')
self._do_test_get_history(q, url)
class AlarmLegacyManagerTest(testtools.TestCase):
def setUp(self):
super(AlarmLegacyManagerTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = alarms.AlarmManager(self.api)
def test_create(self):
alarm = self.mgr.create(**CREATE_LEGACY_ALARM)
expect = [
'POST', '/v2/alarms', CREATE_ALARM_WITHOUT_TC,
]
self.http_client.assert_called(*expect)
self.assertIsNotNone(alarm)
def test_create_counter_name(self):
create = {}
create.update(CREATE_LEGACY_ALARM)
create['counter_name'] = CREATE_LEGACY_ALARM['meter_name']
del create['meter_name']
alarm = self.mgr.create(**create)
expect = [
'POST', '/v2/alarms', CREATE_ALARM_WITHOUT_TC,
]
self.http_client.assert_called(*expect)
self.assertIsNotNone(alarm)
def test_update(self):
alarm = self.mgr.update(alarm_id='alarm-id', **DELTA_LEGACY_ALARM)
expect_put = [
'PUT', '/v2/alarms/alarm-id', UPDATED_ALARM
]
self.http_client.assert_called(*expect_put)
self.assertIsNotNone(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in six.iteritems(UPDATED_ALARM):
self.assertEqual(getattr(alarm, key), value)
def test_update_counter_name(self):
updated = {}
updated.update(UPDATE_LEGACY_ALARM)
updated['counter_name'] = UPDATED_LEGACY_ALARM['meter_name']
del updated['meter_name']
alarm = self.mgr.update(alarm_id='alarm-id', **updated)
expect_put = [
'PUT', '/v2/alarms/alarm-id', UPDATED_ALARM
]
self.http_client.assert_called(*expect_put)
self.assertIsNotNone(alarm)
self.assertEqual(alarm.alarm_id, 'alarm-id')
for (key, value) in six.iteritems(UPDATED_ALARM):
self.assertEqual(getattr(alarm, key), value)
class AlarmTimeConstraintTest(testtools.TestCase):
def setUp(self):
super(AlarmTimeConstraintTest, self).setUp()
self.http_client = fake_client.FakeHTTPClient(fixtures=fixtures)
self.api = client.BaseClient(self.http_client)
self.mgr = alarms.AlarmManager(self.api)
def test_add_new(self):
new_constraint = dict(name='cons3',
start='0 0 * * *',
duration=500)
kwargs = dict(time_constraints=[new_constraint])
self.mgr.update(alarm_id='alarm-id', **kwargs)
body = copy.deepcopy(AN_ALARM)
body[u'time_constraints'] = \
AN_ALARM[u'time_constraints'] + [new_constraint]
expect = [
'PUT', '/v2/alarms/alarm-id', body
]
self.http_client.assert_called(*expect)
def test_update_existing(self):
updated_constraint = dict(name='cons2',
duration=500)
kwargs = dict(time_constraints=[updated_constraint])
self.mgr.update(alarm_id='alarm-id', **kwargs)
body = copy.deepcopy(AN_ALARM)
body[u'time_constraints'][1] = dict(name='cons2',
description='desc2',
start='0 23 * * *',
duration=500,
timezone='')
expect = [
'PUT', '/v2/alarms/alarm-id', body
]
self.http_client.assert_called(*expect)
def test_remove(self):
kwargs = dict(remove_time_constraints=['cons2'])
self.mgr.update(alarm_id='alarm-id', **kwargs)
body = copy.deepcopy(AN_ALARM)
body[u'time_constraints'] = AN_ALARM[u'time_constraints'][:1]
expect = [
'PUT', '/v2/alarms/alarm-id', body
]
self.http_client.assert_called(*expect)
| 36.57671 | 78 | 0.553214 | 2,188 | 19,788 | 4.811243 | 0.120201 | 0.057851 | 0.041227 | 0.033248 | 0.680821 | 0.604636 | 0.531395 | 0.506887 | 0.446566 | 0.424052 | 0 | 0.047012 | 0.309885 | 19,788 | 540 | 79 | 36.644444 | 0.723858 | 0.031534 | 0 | 0.467909 | 0 | 0.010352 | 0.23629 | 0.077301 | 0 | 0 | 0 | 0 | 0.124224 | 1 | 0.05383 | false | 0.00207 | 0.016563 | 0 | 0.076605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f113383c517811a2fada36a91c193fbf6aadc4b | 1,642 | py | Python | source-code/Shortest Word Distance II 244.py | ttungl/Coding-Interview-Challenge | d80c3e15468d50b42ee53fcc73e9326c6c816495 | [
"MIT"
] | null | null | null | source-code/Shortest Word Distance II 244.py | ttungl/Coding-Interview-Challenge | d80c3e15468d50b42ee53fcc73e9326c6c816495 | [
"MIT"
] | null | null | null | source-code/Shortest Word Distance II 244.py | ttungl/Coding-Interview-Challenge | d80c3e15468d50b42ee53fcc73e9326c6c816495 | [
"MIT"
] | null | null | null | # 244. Shortest Word Distance II
# ttungl@gmail.com
# This is a follow up of Shortest Word Distance. The only difference is now you are given the list of words and your method will be called repeatedly many times with different parameters. How would you optimize it?
# Design a class which receives a list of words in the constructor, and implements a method that takes two words word1 and word2 and return the shortest distance between these two words in the list.
# For example,
# Assume that words = ["practice", "makes", "perfect", "coding", "makes"].
# Given word1 = “coding”, word2 = “practice”, return 3.
# Given word1 = "makes", word2 = "coding", return 1.
# Note:
# You may assume that word1 does not equal to word2, and word1 and word2 are both in the list.
# sol 1:
# runtime: 85ms
class WordDistance(object):
def __init__(self, words):
"""
:type words: List[str]
"""
self.words = words
self.d = collections.defaultdict(list)
newlist = [i for i in words] # flatten list
for i,v in enumerate(newlist):
self.d[v].append(i)
def shortest(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
l1, l2 = self.d[word1], self.d[word2]
i = j = 0
res = len(self.words)
while i < len(l1) and j < len(l2):
res = min(res, abs(l1[i]-l2[j]))
if l1[i] < l2[j]: i += 1
else: j += 1
return res
# Your WordDistance object will be instantiated and called as such:
# obj = WordDistance(words)
# param_1 = obj.shortest(word1,word2) | 33.510204 | 214 | 0.622412 | 242 | 1,642 | 4.202479 | 0.442149 | 0.019666 | 0.039331 | 0.011799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031799 | 0.272229 | 1,642 | 49 | 215 | 33.510204 | 0.819247 | 0.596224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f127a6ef0e549684fc462a22aab994d7ae0ab9f | 10,674 | py | Python | tests/acceptance/fetch_test.py | benbariteau/fido | e6839917aaba5097af857b8e9086ca9b7f426621 | [
"Apache-2.0"
] | 23 | 2015-02-18T04:00:59.000Z | 2021-01-08T04:51:22.000Z | tests/acceptance/fetch_test.py | benbariteau/fido | e6839917aaba5097af857b8e9086ca9b7f426621 | [
"Apache-2.0"
] | 61 | 2015-03-05T23:42:02.000Z | 2021-03-02T01:53:33.000Z | tests/acceptance/fetch_test.py | benbariteau/fido | e6839917aaba5097af857b8e9086ca9b7f426621 | [
"Apache-2.0"
] | 21 | 2015-02-18T04:01:11.000Z | 2020-12-21T23:38:16.000Z | # -*- coding: utf-8 -*-
import logging
import time
import zlib
from multiprocessing import Process
import crochet
import pytest
from six.moves import BaseHTTPServer
from six.moves import socketserver as SocketServer
from yelp_bytes import to_bytes
import fido
from fido.fido import DEFAULT_USER_AGENT
from fido.fido import GZIP_WINDOW_SIZE
from fido.exceptions import TCPConnectionError
from fido.exceptions import HTTPTimeoutError
from fido.exceptions import GzipDecompressionError
SERVER_OVERHEAD_TIME = 2.0
TIMEOUT_TEST = 1.0
ECHO_URL = '/echo'
GZIP_URL = '/gzip'
def _compress_gzip(buffer):
compress_gzip = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED,
GZIP_WINDOW_SIZE,
)
return compress_gzip.compress(buffer) + compress_gzip.flush()
# Verifies that setting TCP_NODELAY does not affect
# the output of fido
@pytest.fixture(scope="module", params=[False, True])
def tcp_nodelay(request):
return request.param
@pytest.yield_fixture(scope="module")
def server_url():
"""Spin up a localhost web server for testing."""
# Surpress 'No handlers could be found for logger "twisted"' messages
logging.basicConfig()
logging.getLogger('twisted').setLevel(logging.CRITICAL)
class TestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def echo(self):
if 'slow' in self.path:
time.sleep(SERVER_OVERHEAD_TIME)
self.send_response(200)
for k, v in self.headers.items():
self.send_header(k, v)
self.end_headers()
content_length = int(self.headers.get('Content-Length', 0))
if content_length > 0:
self.wfile.write(self.rfile.read(content_length))
def gzip(self):
accept_encoding_headers = []
for k, v in self.headers.items():
if k.lower() == 'accept-encoding':
accept_encoding_headers.append(v)
if 'gzip' not in accept_encoding_headers:
self.send_response(500)
self.end_headers()
return
self.send_response(200)
self.send_header('Content-Encoding', 'gzip')
self.end_headers()
content_length = int(self.headers.get('Content-Length', 0))
if content_length > 0:
self.wfile.write(_compress_gzip(
self.rfile.read(content_length)))
def content_length(self):
"""Send back the content-length number as the response."""
self.send_response(200)
response = to_bytes(self.headers.get('Content-Length'))
content_length = len(response)
self.send_header('Content-Length', content_length)
self.end_headers()
self.wfile.write(response)
def do_GET(self):
if ECHO_URL in self.path:
self.echo()
elif GZIP_URL in self.path:
self.gzip()
def do_POST(self):
if 'content_length' in self.path:
self.content_length()
elif ECHO_URL in self.path:
self.echo()
class MultiThreadedHTTPServer(
SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer
):
request_queue_size = 1000
httpd = MultiThreadedHTTPServer(('localhost', 0), TestHandler)
web_service_process = Process(target=httpd.serve_forever)
try:
web_service_process.start()
server_address = 'http://{host}:{port}'.format(
host=httpd.server_address[0],
port=httpd.server_address[1],
)
yield server_address
finally:
web_service_process.terminate()
def test_fetch_basic(server_url):
response = fido.fetch(server_url + ECHO_URL).wait(timeout=1)
assert response.headers.get(b'User-Agent') == [
to_bytes(DEFAULT_USER_AGENT),
]
assert response.reason == b'OK'
assert response.code == 200
def test_eventual_result_timeout(server_url):
"""
Testing timeout on result retrieval
"""
# fetch without setting timeouts -> we could potentially wait forever
eventual_result = fido.fetch(server_url + ECHO_URL + '/slow')
# make sure no timeout error is thrown here but only on result retrieval
assert eventual_result.original_failure() is None
with pytest.raises(crochet.TimeoutError):
eventual_result.wait(timeout=TIMEOUT_TEST)
assert eventual_result.original_failure() is None
def test_agent_timeout(server_url, tcp_nodelay):
"""
Testing that we don't wait forever on the server sending back a response
"""
eventual_result = fido.fetch(
server_url + ECHO_URL + '/slow',
timeout=TIMEOUT_TEST,
tcp_nodelay=tcp_nodelay,
)
# wait for fido to estinguish the timeout and abort before test-assertions
time.sleep(2 * TIMEOUT_TEST)
# timeout errors were thrown and handled in the reactor thread.
# EventualResult stores them and re-raises on result retrieval
assert eventual_result.original_failure() is not None
with pytest.raises(HTTPTimeoutError) as excinfo:
eventual_result.wait(timeout=1)
assert (
"Connection was closed by fido because the server took "
"more than timeout={timeout} seconds to "
"send the response".format(timeout=TIMEOUT_TEST)
in str(excinfo.value)
)
def test_agent_connect_timeout(tcp_nodelay):
"""
Testing that we don't wait more than connect_timeout to establish a http
connection
"""
# google drops TCP SYN packets
eventual_result = fido.fetch(
"http://www.google.com:81",
connect_timeout=TIMEOUT_TEST,
tcp_nodelay=tcp_nodelay,
)
# wait enough for the connection to be dropped by Twisted Agent
time.sleep(3 * TIMEOUT_TEST)
# timeout errors were thrown and handled in the reactor thread.
# EventualResult stores them and re-raises on result retrieval
assert eventual_result.original_failure() is not None
with pytest.raises(TCPConnectionError) as excinfo:
eventual_result.wait(timeout=1)
assert (
"Connection was closed by Twisted Agent because there was "
"a problem establishing the connection or the "
"connect_timeout={connect_timeout} was reached."
.format(connect_timeout=TIMEOUT_TEST)
in str(excinfo.value)
)
def test_fetch_headers(server_url, tcp_nodelay):
headers = {'foo': ['bar']}
eventual_result = fido.fetch(
server_url + ECHO_URL,
headers=headers,
tcp_nodelay=tcp_nodelay,
)
actual_headers = eventual_result.wait(timeout=1).headers
assert actual_headers.get(b'Foo') == [b'bar']
def test_json_body(server_url, tcp_nodelay):
body = b'{"some_json_data": 30}'
eventual_result = fido.fetch(
server_url + ECHO_URL,
method='POST',
body=body,
tcp_nodelay=tcp_nodelay,
)
assert eventual_result.wait(timeout=1).json()['some_json_data'] == 30
def test_content_length_readded_by_twisted(server_url, tcp_nodelay):
headers = {'Content-Length': '250'}
body = b'{"some_json_data": 30}'
eventual_result = fido.fetch(
server_url + '/content_length',
method='POST',
headers=headers,
body=body,
tcp_nodelay=tcp_nodelay,
)
content_length = int(eventual_result.wait(timeout=1).body)
assert content_length == 22
def test_fetch_content_type(server_url, tcp_nodelay):
expected_content_type = b'text/html'
eventual_result = fido.fetch(
server_url + ECHO_URL,
headers={'Content-Type': expected_content_type},
tcp_nodelay=tcp_nodelay,
)
actual_content_type = eventual_result.wait(
timeout=1,
).headers.get(b'Content-Type')
assert [expected_content_type] == actual_content_type
@pytest.mark.parametrize(
'header_name', ('User-Agent', 'user-agent')
)
def test_fetch_user_agent(server_url, header_name, tcp_nodelay):
expected_user_agent = [b'skynet']
headers = {header_name: expected_user_agent}
eventual_result = fido.fetch(
server_url + ECHO_URL,
headers=headers,
tcp_nodelay=tcp_nodelay,
)
actual_user_agent = eventual_result.wait(
timeout=1,
).headers.get(b'User-Agent')
assert expected_user_agent == actual_user_agent
def test_fetch_body(server_url, tcp_nodelay):
expected_body = b'corpus'
eventual_result = fido.fetch(
server_url + ECHO_URL,
body=expected_body,
tcp_nodelay=tcp_nodelay,
)
actual_body = eventual_result.wait(timeout=1).body
assert expected_body == actual_body
def test_fido_request_no_timeout_when_header_value_not_list(tcp_nodelay):
fido.fetch(
'http://www.yelp.com',
headers={
'Accept-Charset': 'utf-8',
'Accept-Language': ['en-US']
},
tcp_nodelay=tcp_nodelay,
).wait(timeout=5)
def test_fido_request_decompress_gzip(server_url):
expected_body = b'hello world'
# Ensure invalid gzipped responses raise a
# GzipDecompressionError exception.
with pytest.raises(GzipDecompressionError):
# Here we trick the client into decompressing the text
# response by echoing a gzip content-encoding response
# header. The client should then fail to decompress the
# text response.
_ = fido.fetch(
server_url + ECHO_URL,
headers={'Content-Encoding': 'gzip'},
body=expected_body,
decompress_gzip=True,
).wait(timeout=1)
# Ensure valid gzipped responses are decompressed
# when gzip_enabled is True.
response = fido.fetch(
server_url + GZIP_URL,
# Ensure that fido successfully appends gzip to accept-encoding.
headers={
'Content-Encoding': 'gzip',
'Accept-Encoding': 'deflate, br, identity'
},
body=expected_body,
decompress_gzip=True,
).wait(timeout=1)
actual_body = response.body
assert response.code == 200
assert expected_body == actual_body
def test_fido_request_gzip_disabled(server_url):
expected_body = b'hello world'
# Ensure that gzipped responses with decompress_gzip set
# to false remain compressed.
response = fido.fetch(
server_url + GZIP_URL,
body=expected_body,
headers={'Accept-Encoding': 'gzip'},
decompress_gzip=False,
).wait(timeout=1)
actual_body = response.body
assert response.code == 200
assert _compress_gzip(expected_body) == actual_body
| 30.410256 | 78 | 0.661514 | 1,305 | 10,674 | 5.196935 | 0.213793 | 0.04276 | 0.026541 | 0.031849 | 0.393542 | 0.341345 | 0.31967 | 0.273371 | 0.21616 | 0.151873 | 0 | 0.008218 | 0.247611 | 10,674 | 350 | 79 | 30.497143 | 0.836259 | 0.137624 | 0 | 0.338776 | 0 | 0 | 0.092743 | 0.003618 | 0 | 0 | 0 | 0 | 0.077551 | 1 | 0.085714 | false | 0 | 0.061224 | 0.004082 | 0.171429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f12be13b6d2453bad70a353595fcd008e638a77 | 14,028 | py | Python | ospy/helpers.py | teodoryantcheff/OSPy | 07f44e262383054b276e34d7fc16b3cc10a6d9cf | [
"CC-BY-3.0"
] | 1 | 2018-07-10T18:33:53.000Z | 2018-07-10T18:33:53.000Z | ospy/helpers.py | teodoryantcheff/OSPy | 07f44e262383054b276e34d7fc16b3cc10a6d9cf | [
"CC-BY-3.0"
] | null | null | null | ospy/helpers.py | teodoryantcheff/OSPy | 07f44e262383054b276e34d7fc16b3cc10a6d9cf | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Rimco'
# System imports
import datetime
import logging
import random
import time
import errno
from threading import Lock
BRUTEFORCE_LOCK = Lock()
def del_rw(action, name, exc):
import os
import stat
if os.path.exists(name):
os.chmod(name, stat.S_IWRITE)
if os.path.isfile(name):
os.remove(name)
elif os.path.isdir(name):
os.rmdir(name)
def now():
return time.time() + (datetime.datetime.now() - datetime.datetime.utcnow()).total_seconds()
def try_float(val, default=0):
try:
return float(val)
except ValueError:
return default
def datetime_string(timestamp=None):
if timestamp:
if hasattr(timestamp, 'strftime'):
return timestamp.strftime("%Y-%m-%d %H:%M:%S")
else:
return time.strftime("%Y-%m-%d %H:%M:%S", timestamp)
else:
return time.strftime("%Y-%m-%d %H:%M:%S")
def two_digits(n):
return '%02d' % int(n)
def program_delay(program):
today = datetime.datetime.combine(datetime.date.today(), datetime.time.min)
result = (program.start - today).total_seconds()
while result < 0:
result += program.modulo*60
return int(result/24/3600)
def formatTime(t):
from options import options
if options.time_format:
return t
else:
hour = int(t[0:2])
newhour = hour
if hour == 0:
newhour = 12
if hour > 12:
newhour = hour-12
return str(newhour) + t[2:] + (" am" if hour<12 else " pm")
def themes():
import os
return os.listdir(os.path.join('static', 'themes'))
def determine_platform():
import os
try:
import RPi.GPIO
return 'pi'
except Exception:
pass
try:
import Adafruit_BBIO.GPIO
return 'bo'
except Exception:
pass
if os.name == 'nt':
return 'nt'
return ''
def get_rpi_revision():
try:
import RPi.GPIO as GPIO
return GPIO.RPI_REVISION
except ImportError:
return 0
def reboot(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Rebooting...")
import subprocess
if determine_platform() == 'nt':
subprocess.Popen('shutdown /r /t 0'.split())
else:
subprocess.Popen(['reboot'])
else:
from threading import Thread
t = Thread(target=reboot, args=(wait, True))
t.daemon = False
t.start()
def poweroff(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Powering off...")
import subprocess
if determine_platform() == 'nt':
subprocess.Popen('shutdown /t 0'.split())
else:
subprocess.Popen(['poweroff'])
else:
from threading import Thread
t = Thread(target=poweroff, args=(wait, True))
t.daemon = False
t.start()
def restart(wait=1, block=False):
if block:
# Stop the web server first:
from ospy import server
server.stop()
from ospy.stations import stations
stations.clear()
time.sleep(wait)
logging.info("Restarting...")
import sys
if determine_platform() == 'nt':
import subprocess
# Use this weird construction to start a separate process that is not killed when we stop the current one
subprocess.Popen(['cmd.exe', '/c', 'start', sys.executable] + sys.argv)
else:
import os
os.execl(sys.executable, sys.executable, *sys.argv)
else:
from threading import Thread
t = Thread(target=restart, args=(wait, True))
t.daemon = False
t.start()
def uptime():
"""Returns UpTime for RPi"""
try:
with open("/proc/uptime") as f:
total_sec = float(f.read().split()[0])
string = str(datetime.timedelta(seconds=total_sec)).split('.')[0]
except Exception:
string = 'Unknown'
return string
def get_ip():
"""Returns the IP address if available."""
try:
import subprocess
arg = 'ip route list'
p = subprocess.Popen(arg, shell=True, stdout=subprocess.PIPE)
data = p.communicate()
split_data = data[0].split()
ipaddr = split_data[split_data.index('src') + 1]
return ipaddr
except Exception:
return 'Unknown'
def get_mac():
"""Return MAC from file"""
try:
return str(open('/sys/class/net/eth0/address').read())
except Exception:
return 'Unknown'
def get_meminfo():
"""Return the information in /proc/meminfo as a dictionary"""
try:
meminfo = {}
with open('/proc/meminfo') as f:
for line in f:
meminfo[line.split(':')[0]] = line.split(':')[1].strip()
return meminfo
except Exception:
return {
'MemTotal': 'Unknown',
'MemFree': 'Unknown'
}
def get_netdevs():
"""RX and TX bytes for each of the network devices"""
try:
with open('/proc/net/dev') as f:
net_dump = f.readlines()
device_data = {}
for line in net_dump[2:]:
line = line.split(':')
if line[0].strip() != 'lo':
device_data[line[0].strip()] = {'rx': float(line[1].split()[0])/(1024.0*1024.0),
'tx': float(line[1].split()[8])/(1024.0*1024.0)}
return device_data
except Exception:
return {}
def get_cpu_temp(unit=None):
"""Returns the temperature of the CPU if available."""
import os
try:
platform = determine_platform()
if platform == 'bo':
res = os.popen('cat /sys/class/hwmon/hwmon0/device/temp1_input').readline()
temp = str(int(float(res) / 1000))
elif platform == 'pi':
res = os.popen('vcgencmd measure_temp').readline()
temp = res.replace("temp=", "").replace("'C\n", "")
else:
temp = str(0)
if unit == 'F':
return str(9.0 / 5.0 * float(temp) + 32)
elif unit is not None:
return str(float(temp))
else:
return temp
except Exception:
return '!!'
def mkdir_p(path):
import os
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def duration_str(total_seconds):
minutes, seconds = divmod(total_seconds, 60)
return '%02d:%02d' % (minutes, seconds)
def timedelta_duration_str(time_delta):
return duration_str(time_delta.total_seconds())
def timedelta_time_str(time_delta, with_seconds=False):
days, remainder = divmod(time_delta.total_seconds(), 24*3600)
hours, remainder = divmod(remainder, 3600)
if hours == 24:
hours = 0
minutes, seconds = divmod(remainder, 60)
return '%02d:%02d' % (hours, minutes) + ((':%02d' % seconds) if with_seconds else '')
def minute_time_str(minute_time, with_seconds=False):
return timedelta_time_str(datetime.timedelta(minutes=minute_time), with_seconds)
def short_day(index):
return ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"][index]
def long_day(index):
return ["Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
"Sunday"][index]
def stop_onrain():
"""Stop stations that do not ignore rain."""
from ospy.stations import stations
for station in stations.get():
if not station.ignore_rain:
station.activated = False
def save_to_options(qdict):
from ospy.options import options
for option in options.OPTIONS:
key = option['key']
multi_enum = option.get('multi_options')
if 'category' in option:
if key in qdict:
value = qdict[key]
if isinstance(option['default'], bool):
options[key] = True if value and value != "off" else False
elif isinstance(option['default'], int) or isinstance(option['default'], float):
if 'min' in option and float(qdict[key]) < option['min']:
continue
if 'max' in option and float(qdict[key]) > option['max']:
continue
options[key] = type(option['default'])(qdict[key])
else:
options[key] = qdict[key]
elif multi_enum:
if hasattr(multi_enum, '__call__'):
multi_enum = multi_enum()
value = []
for name in multi_enum:
v_name = key + '_' + name
if v_name in qdict and qdict[v_name] and qdict[v_name] != "off":
value.append(name)
options[key] = value
else:
if isinstance(option['default'], bool):
options[key] = False
########################
#### Login Handling ####
def password_salt():
return "".join(chr(random.randint(33, 127)) for _ in xrange(64))
def password_hash(password, salt):
import hashlib
m = hashlib.sha1()
m.update(password + salt)
return m.hexdigest()
def test_password(password):
from ospy.options import options
# Brute-force protection:
with BRUTEFORCE_LOCK:
if options.password_time > 0:
time.sleep(options.password_time)
result = options.password_hash == password_hash(password, options.password_salt)
if result:
options.password_time = 0
else:
if options.password_time < 30:
options.password_time += 1
return result
def check_login(redirect=False):
from ospy import server
import web
from ospy.options import options
qdict = web.input()
try:
if options.no_password:
return True
if server.session.validated:
return True
except KeyError:
pass
if 'pw' in qdict:
if test_password(qdict['pw']):
return True
if redirect:
raise web.unauthorized()
return False
if redirect:
raise web.seeother('/login', True)
return False
def get_input(qdict, key, default=None, cast=None):
result = default
if key in qdict:
result = qdict[key]
if cast is not None:
result = cast(result)
return result
def template_globals():
import json
import plugins
import urllib
from web import ctx
from ospy.inputs import inputs
from ospy.log import log
from ospy.options import level_adjustments, options, rain_blocks
from ospy.programs import programs, ProgramType
from ospy.runonce import run_once
from ospy.stations import stations
from ospy import version
from ospy.server import session
result = {
'str': str,
'bool': bool,
'int': int,
'round': round,
'isinstance': isinstance,
'sorted': sorted,
'hasattr': hasattr,
'now': now
}
result.update(globals()) # Everything in the global scope of this file will be available
result.update(locals()) # Everything imported in this function will be available
return result
def help_files_in_directory(docs_dir):
import os
result = []
if os.path.isdir(docs_dir):
for filename in sorted(os.listdir(docs_dir)):
if filename.endswith('.md'):
name = os.path.splitext(os.path.basename(filename))[0]
name = name.replace('.', ' ').replace('_', ' ').title()
filename = os.path.relpath(os.path.join(docs_dir, filename))
result.append((name, filename))
return result
def get_help_files():
import os
result = []
result.append((1, 'OSPy'))
result.append((2, 'Readme', 'README.md'))
for doc in help_files_in_directory(os.path.join('ospy', 'docs')):
result.append((2, doc[0], doc[1]))
result.append((1, 'API'))
result.append((2, 'Readme', os.path.join('api', 'README.md')))
for doc in help_files_in_directory(os.path.join('api', 'docs')):
result.append((2, doc[0], doc[1]))
result.append((1, 'Plug-ins'))
result.append((2, 'Readme', os.path.join('plugins', 'README.md')))
from plugins import plugin_names, plugin_dir, plugin_docs_dir
for module, name in plugin_names().iteritems():
readme_file = os.path.join(os.path.relpath(plugin_dir(module)), 'README.md')
readme_exists = os.path.isfile(readme_file)
docs = help_files_in_directory(plugin_docs_dir(module))
if readme_exists or docs:
if readme_exists:
result.append((2, name, readme_file))
else:
result.append((2, name))
for doc in docs:
result.append((3, doc[0], doc[1]))
return result
def get_help_file(id):
import web
try:
id = int(id)
docs = get_help_files()
if 0 <= id < len(docs):
option = docs[id]
if len(option) > 2:
filename = option[2]
with open(filename) as fh:
import markdown
converted = markdown.markdown(fh.read(), extensions=['partial_gfm', 'markdown.extensions.codehilite'])
return web.template.Template(converted, globals=template_globals())()
except Exception:
pass
return ''
| 26.82218 | 122 | 0.571072 | 1,691 | 14,028 | 4.649911 | 0.222945 | 0.019331 | 0.008902 | 0.01399 | 0.197253 | 0.167748 | 0.152486 | 0.125779 | 0.109755 | 0.081903 | 0 | 0.014506 | 0.3071 | 14,028 | 522 | 123 | 26.873563 | 0.794444 | 0.04876 | 0 | 0.301266 | 0 | 0 | 0.059852 | 0.007463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093671 | false | 0.043038 | 0.141772 | 0.017722 | 0.362025 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f14d80c30ce922024138d28c7ffb4f35b817d6e | 724 | py | Python | restaurant/migrations/0006_auto_20160707_2014.py | aggolb/dinnerpad | 57558908e10f218bae32d1f99d72b6aeca9c5836 | [
"MIT"
] | null | null | null | restaurant/migrations/0006_auto_20160707_2014.py | aggolb/dinnerpad | 57558908e10f218bae32d1f99d72b6aeca9c5836 | [
"MIT"
] | null | null | null | restaurant/migrations/0006_auto_20160707_2014.py | aggolb/dinnerpad | 57558908e10f218bae32d1f99d72b6aeca9c5836 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0005_auto_20160707_1939'),
]
operations = [
migrations.AddField(
model_name='menuitem',
name='category',
field=models.CharField(blank=True, max_length=3, null=True, choices=[(b'STA', b'Starters'), (b'MAI', b'Main'), (b'DRI', b'Drinks'), (b'DES', b'Dessert')]),
),
migrations.AlterField(
model_name='menuitem',
name='restaurant',
field=models.ForeignKey(related_name='menu_items', to='restaurant.Restaurant'),
),
]
| 28.96 | 167 | 0.598066 | 76 | 724 | 5.526316 | 0.657895 | 0.042857 | 0.080952 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.033088 | 0.248619 | 724 | 24 | 168 | 30.166667 | 0.738971 | 0.029006 | 0 | 0.222222 | 0 | 0 | 0.192582 | 0.062767 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f15c1992685c6fd92039732b2bac2299b5fda58 | 257 | py | Python | jp.atcoder/abc003/abc003_3/8763316.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc003/abc003_3/8763316.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc003/abc003_3/8763316.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
n, k, *r = map(int, sys.stdin.read().split())
r.sort()
cand = r[-k:]
def main():
rate = 0
for c in cand:
if rate < c:
rate = (rate + c) / 2
print(rate)
if __name__ == "__main__":
main()
| 13.526316 | 46 | 0.451362 | 37 | 257 | 2.918919 | 0.621622 | 0.092593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012579 | 0.381323 | 257 | 18 | 47 | 14.277778 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0.033473 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f161c3e730e9a961c38fd31d9be347a04f925d2 | 1,580 | py | Python | day 02/Hans - Python/day2.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | 1 | 2021-12-02T12:09:11.000Z | 2021-12-02T12:09:11.000Z | day 02/Hans - Python/day2.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | null | null | null | day 02/Hans - Python/day2.py | AE-nv/aedvent-code-2021 | 7ce199d6be5f6cce2e61a9c0d26afd6d064a86a7 | [
"MIT"
] | 1 | 2021-12-01T21:14:41.000Z | 2021-12-01T21:14:41.000Z | from typing import List, Tuple
from collections import namedtuple
from enum import Enum
class Direction(Enum):
FORWARD = "FORWARD"
UP = "UP"
DOWN = "DOWN"
Instruction = namedtuple('Instruction', 'direction units')
def get_combined_directions(instructions_list: List[Instruction]) -> Tuple[int, int]:
result = dict.fromkeys([d for d in Direction], 0)
for i in instructions_list:
result[i.direction] += int(i.units)
return result[Direction.FORWARD], result[Direction.DOWN] - result[Direction.UP]
def get_combined_directions_aim(instructions_list: List[Instruction]) -> Tuple[int, int, int]:
c_aim = 0
c_horizontal = 0
c_vertical = 0
for i in instructions_list:
if i.direction in [Direction.UP, Direction.DOWN]:
c_aim += int(i.units) if i.direction == Direction.DOWN else -int(i.units)
elif i.direction == Direction.FORWARD:
c_vertical += c_aim * int(i.units)
c_horizontal += int(i.units)
return c_horizontal, c_vertical, c_aim
def parse_input(input_str: str):
direction, unit = input_str.upper().split()
return Instruction(Direction(direction), int(unit))
if __name__ == '__main__':
instructions = list(map(parse_input, open('./example.txt').readlines()))
horizontal, vertical = get_combined_directions(instructions_list=instructions)
print(f"Solution of part 1: {horizontal * vertical}")
horizontal, vertical, aim = get_combined_directions_aim(instructions_list=instructions)
print(f"Solution of part 2: {horizontal * vertical}")
| 29.811321 | 94 | 0.698101 | 205 | 1,580 | 5.195122 | 0.278049 | 0.105164 | 0.042254 | 0.04507 | 0.321127 | 0.257277 | 0.169014 | 0.090141 | 0 | 0 | 0 | 0.004677 | 0.187975 | 1,580 | 52 | 95 | 30.384615 | 0.825409 | 0 | 0 | 0.060606 | 0 | 0 | 0.092405 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.393939 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f16280a4ee127c66ec6881146bdacb0609fbd70 | 5,100 | py | Python | sopel_torrentinfo/providers.py | dgw/sopel-torrentinfo | 8e73324574b97b11e220a9dacf33f3f8e56ce758 | [
"EFL-2.0"
] | null | null | null | sopel_torrentinfo/providers.py | dgw/sopel-torrentinfo | 8e73324574b97b11e220a9dacf33f3f8e56ce758 | [
"EFL-2.0"
] | 3 | 2017-10-08T09:13:59.000Z | 2020-12-22T19:37:40.000Z | sopel_torrentinfo/providers.py | dgw/sopel-torrentinfo | 8e73324574b97b11e220a9dacf33f3f8e56ce758 | [
"EFL-2.0"
] | 2 | 2017-10-07T18:16:05.000Z | 2018-01-18T15:09:13.000Z | # coding=utf-8
"""Link handling provider logic."""
import abc
from collections import OrderedDict
import re
from lxml import etree
class ProviderManager:
"""Manager of info providers. Possibly also slayer of dragons."""
def __init__(self):
self.providers = OrderedDict()
"""Mapping of each known provider's URL pattern to an instance of that provider."""
# TODO: Can be a regular dict in py3.7+ only;
# dict remembering insertion order is guaranteed as of py3.7
def register_provider(self, provider):
"""Make the manager aware of ``provider`` and its URL pattern."""
if not isinstance(provider, TorrentInfoProvider):
try:
provider = provider()
except Exception:
# doesn't matter what happened; bail if something's fucky
raise ValueError("Not a TorrentInfoProvider subclass: %s" % provider)
self.providers[provider.get_url_pattern()] = provider
def remove_provider(self, provider):
"""Forget about ``provider`` and its URL pattern."""
try:
del self.providers[provider.get_url_pattern()]
except KeyError:
raise RuntimeError('Attempt to remove a provider that was not registered.')
def map_url_to_provider(self, url):
"""Given ``url``, return an instance of the best-matching provider."""
for pattern, provider in self.providers.items():
if pattern.match(url):
return provider
# no matching provider
# explicit better than implicit
return None
class TorrentInfoProvider(abc.ABC):
"""Base class for torrent link info providers."""
@property
@abc.abstractmethod
def URL_PATTERN(self):
"""Required URL pattern, as it would be passed to ``@plugin.url`` decorator."""
def get_url_pattern(self):
"""Compile and return the URL pattern for Sopel's rule manager."""
return re.compile(self.URL_PATTERN)
@property
def DISPLAY_NAME(self):
"""Define a human-readable name for this provider.
For example, ``Nyaa`` or ``TokyoTosho``.
If not overridden, will return the class's ``__name__``.
"""
return self.__class__.__name__
@abc.abstractmethod
def get_fetch_url(self, trigger):
"""Return the URL to fetch, given a matching URL ``trigger``."""
@abc.abstractmethod
def parse(self, response, trigger):
"""Parse the fetched ``response`` data.
The ``response`` is a ``requests.Response`` object, just as if the
provider called ``requests.get()`` itself.
This method is expected to return an iterable of pieces, for example::
[
'Title: 60th Annual Kohaku',
'Uploader: NHK Official',
'Size: 420.69 GiB',
...,
]
These pieces will be joined together by the plugin's output stage, in
combination with a prefix based on the provider's ``display_name()``.
"""
class Nyaa(TorrentInfoProvider):
"""Handler for Nyaa.si links."""
URL_PATTERN = r'https?:\/\/(?:www\.)?nyaa\.si\/(view|download)\/(\d+)'
def get_fetch_url(self, trigger):
return 'https://nyaa.si/view/%s' % trigger.group(2)
def parse(self, response, trigger):
page = etree.HTML(response.content)
t = []
t.append(page.cssselect('meta[property="og:title"]')[0].get('content').replace(' :: Nyaa', ''))
t.append(page.cssselect('meta[property="og:description"]')[0].get('content').split("|", 1)[0])
t.append(page.cssselect('meta[property="og:description"]')[0].get('content').split("|", 2)[1])
t.append(page.cssselect('meta[property="og:description"]')[0].get('content').split("|", 3)[2])
if trigger.group(1) != 'view':
t.append(self.get_fetch_url(trigger))
return t
class TokyoTosho(TorrentInfoProvider):
"""Handler for TokyoTosho links."""
URL_PATTERN = r'https?:\/\/(?:www\.)?tokyotosho\.info\/details\.php\?id=(\d+)'
def get_fetch_url(self, trigger):
return 'https://www.tokyotosho.info/details.php?id=%s' % trigger.group(1)
def parse(self, response, trigger):
details = etree.HTML(response.content).cssselect('div.details')[0]
items = []
# title
items.append(details.xpath('//a[@type="application/x-bittorrent"]/text()[normalize-space()]')[0])
# category
items.append(details.xpath('//li[contains(text(), "Torrent Type")]/following::li[1]/a/text()')[0])
# size
items.append(details.xpath('//li[contains(text(), "Filesize")]/following::li[1]/text()')[0])
# submitter and timestamp
items.append("Submitted by {} at {}".format(
# user
details.xpath('//li/em[contains(text(), "Submitter")]/following::li[1]/text()')[0].rstrip(),
# timestamp
details.xpath('//li[contains(text(), "Date Submitted")]/following::li[1]/text()')[0]
))
return items
| 34.693878 | 106 | 0.608039 | 614 | 5,100 | 4.983713 | 0.348534 | 0.039216 | 0.014379 | 0.026144 | 0.252614 | 0.185294 | 0.15 | 0.083987 | 0.083987 | 0.059804 | 0 | 0.009089 | 0.244902 | 5,100 | 146 | 107 | 34.931507 | 0.78551 | 0.288824 | 0 | 0.19697 | 0 | 0.015152 | 0.232287 | 0.156652 | 0 | 0 | 0 | 0.006849 | 0 | 1 | 0.19697 | false | 0 | 0.060606 | 0.030303 | 0.469697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f1a78253af63ce99cd33cff95dd1f2d2d7c9566 | 6,210 | py | Python | tools/dali.py | luzai/InsightFace_Pytorch | 2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd | [
"MIT"
] | 4 | 2019-01-24T03:43:36.000Z | 2020-10-24T08:36:28.000Z | tools/dali.py | luzai/InsightFace_Pytorch | 2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd | [
"MIT"
] | null | null | null | tools/dali.py | luzai/InsightFace_Pytorch | 2f3d865aa5fa14896df27fe9b43a5c4ceb02c7dd | [
"MIT"
] | null | null | null | from lz import *
from config import conf
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
from nvidia.dali.plugin.pytorch import DALIGenericIterator
base = "/media/mem_data/" + conf.dataset_name + "/"
idx_files = [base + "train.tc.idx"]
rec_files = [base + "train.rec"]
class PlainMxnetDs(object):
def __init__(self):
from mxnet import recordio
self.imgrec = recordio.MXIndexedRecordIO(
base + "train.idx", rec_files[0],
'r')
s = self.imgrec.read_idx(0)
header, _ = recordio.unpack(s)
assert header.flag > 0, 'ms1m or glint ...'
logging.info(f'header0 label {header.label}')
self.header0 = (int(header.label[0]), int(header.label[1]))
self.id2range = {}
self.idx2id = {}
self.imgidx = []
self.ids = []
ids_shif = int(header.label[0])
for identity in list(range(int(header.label[0]), int(header.label[1]))):
s = self.imgrec.read_idx(identity)
header, _ = recordio.unpack(s)
a, b = int(header.label[0]), int(header.label[1])
self.id2range[identity] = (a, b)
self.ids.append(identity)
self.imgidx += list(range(a, b))
self.ids = np.asarray(self.ids)
self.num_classes = len(self.ids)
self.ids_map = {identity - ids_shif: id2 for identity, id2 in
zip(self.ids, range(self.num_classes))} # now cutoff==0, this is identitical
ids_map_tmp = {identity: id2 for identity, id2 in zip(self.ids, range(self.num_classes))}
self.ids = np.asarray([ids_map_tmp[id_] for id_ in self.ids])
self.id2range = {ids_map_tmp[id_]: range_ for id_, range_ in self.id2range.items()}
for id_, range_ in self.id2range.items():
for idx_ in range(range_[0], range_[1]):
self.idx2id[idx_] = id_
conf.num_clss = self.num_classes
plmxds = PlainMxnetDs()
# Let us define a simple pipeline that takes images stored in recordIO format, decodes them and prepares them for ingestion in DL framework (crop, normalize and NHWC -> NCHW conversion).
class RecordIOPipeline(Pipeline):
def __init__(self, batch_size, device_id, num_gpus, num_threads=2):
super(RecordIOPipeline, self).__init__(batch_size,
num_threads,
device_id,
prefetch_queue_depth={"cpu_size": 6, "gpu_size": 2}
)
self.input = ops.MXNetReader(path=rec_files,
index_path=idx_files,
random_shuffle=True,
shard_id=device_id,
num_shards=num_gpus,
initial_fill=len(plmxds.imgidx) // num_gpus,
)
self.decode = ops.ImageDecoder(device="mixed", output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device="gpu",
output_dtype=types.FLOAT,
output_layout=types.NCHW,
image_type=types.RGB,
mean=[0.5 * 255., 0.5 * 255., 0.5 * 255.],
std=[0.5 * 255., 0.5 * 255., 0.5 * 255.]
)
self.coin = ops.CoinFlip(probability=0.5)
self.iter = 0
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
rng = self.coin()
images = self.cmnp(images, mirror=rng)
return (images, labels)
def iter_setup(self):
pass
num_gpus = conf.num_devs
batch_size = conf.batch_size // conf.num_devs
pipes = [RecordIOPipeline(batch_size=batch_size, device_id=device_id, num_gpus=num_gpus,
) for device_id in
range(num_gpus)]
pipes[0].build()
class FDALIGenericIterator(DALIGenericIterator):
def __len__(self):
return (len(plmxds.imgidx) // conf.batch_size) + 1
def force_reset(self):
if self._stop_at_epoch:
self._counter = 0
else:
self._counter = self._counter % self._size
for p in self._pipes:
p.reset()
def __next__(self):
data = super(FDALIGenericIterator, self).__next__()
if isinstance(data, list):
labels = []
imgs = []
for d in data:
l = d["labels"]
if len(d["labels"].shape) == 2:
l = l[:, 0]
labels.append(l.long())
imgs.append(d["imgs"].to(0))
labels = torch.cat(labels)
imgs = torch.cat(imgs)
return {"imgs": imgs, "labels": labels, "labels_cpu": labels}
else:
return data
fdali_iter = FDALIGenericIterator(pipes, ['imgs', 'labels'],
pipes[0].epoch_size("Reader"))
def get_loader_enum(loader):
succ = False
while not succ:
loader_enum = (enumerate(loader))
succ = True
return loader_enum
if __name__ == '__main__':
loader_enum = get_loader_enum(fdali_iter)
while True:
try:
ind_data, data = next(loader_enum)
except StopIteration as err:
logging.info(f'one epoch finish err is {err}, {ind_data}')
fdali_iter.reset()
loader_enum = get_loader_enum(fdali_iter)
ind_data, data = next(loader_enum)
label = data["labels"]
imgs = data["imgs"]
print(ind_data, imgs.shape, label.shape, np.unique(label, return_counts=True)[1].mean())
# plt_imshow(imgs[0].cpu() )
plt_imshow_tensor(imgs[label == 12].cpu(), ncol=6)
plt.show()
# pipe_out = pipes[0].run()
# images, labels = pipe_out
# im1 = images.asCPU()
# im2 = im1.as_array()
# print(im2.shape)
# plt_imshow(im2[0])
# plt.show()
| 37.409639 | 186 | 0.541063 | 732 | 6,210 | 4.387978 | 0.293716 | 0.021793 | 0.030511 | 0.01868 | 0.151619 | 0.129203 | 0.113636 | 0.093711 | 0.06538 | 0.05604 | 0 | 0.020157 | 0.344928 | 6,210 | 165 | 187 | 37.636364 | 0.76942 | 0.062319 | 0 | 0.061069 | 0 | 0 | 0.040262 | 0 | 0 | 0 | 0 | 0 | 0.007634 | 1 | 0.061069 | false | 0.007634 | 0.053435 | 0.007634 | 0.175573 | 0.007634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f1c6481280e8c66c3c95bb553f4716418bdc89b | 3,663 | py | Python | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exec.py | inna-btc/secrets-manager | 5c65fea092e80b25d2466b395fa03eabd6a98f9b | [
"MIT"
] | null | null | null | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exec.py | inna-btc/secrets-manager | 5c65fea092e80b25d2466b395fa03eabd6a98f9b | [
"MIT"
] | null | null | null | integration/keeper_secrets_manager_cli/keeper_secrets_manager_cli/exec.py | inna-btc/secrets-manager | 5c65fea092e80b25d2466b395fa03eabd6a98f9b | [
"MIT"
] | 1 | 2021-12-18T03:15:54.000Z | 2021-12-18T03:15:54.000Z | # -*- coding: utf-8 -*-
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Secrets Manager
# Copyright 2021 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import os
import sys
import subprocess
from keeper_secrets_manager_cli.exception import KsmCliException
from keeper_secrets_manager_core.core import SecretsManager
import re
import json
class Exec:
def __init__(self, cli):
self.cli = cli
# Since the cli is short lived, this won't stick around long.
self.local_cache = {}
def _get_secret(self, notation):
# If not in the cache, go get the secret and then store it in the cache.
if notation not in self.local_cache:
value = self.cli.client.get_notation(notation)
if type(value) is dict or type(value) is list:
value = json.dumps(value)
self.local_cache[notation] = str(value)
return self.local_cache[notation]
def env_replace(self):
for env_key, env_value in list(os.environ.items()):
if env_value.startswith(SecretsManager.notation_prefix) is True:
os.environ["_" + env_key] = "_" + env_value
os.environ[env_key] = self._get_secret(env_value)
def inline_replace(self, cmd=None):
if cmd is None:
cmd = []
new_cmd = []
for item in cmd:
# Due to custom fields, that allow spaces in the label, we have not idea
# where the notation ends.
results = re.search(r'{}://.*?$'.format(SecretsManager.notation_prefix), item)
if results is not None:
env_value = results.group()
item = item.replace(env_value, self._get_secret(env_value))
new_cmd.append(item)
cmd = new_cmd
return cmd
def execute(self, cmd, capture_output=False, inline=False):
# Make a version of the command before replacing secrets. We don't want to expose them if
# there is error.
full_cmd = " ".join(cmd)
if len(cmd) == 0:
raise Exception("Cannot execute command, it's missing.")
else:
self.env_replace()
if inline is True:
cmd = self.inline_replace(cmd)
# Python 3.6's subprocess.run does not have a capture flag. Instead it used the PIPE with
# the stderr parameter.
kwargs = {}
if (sys.version_info[0] == 3 and sys.version_info[1] < 7) and capture_output is True:
kwargs["stdout"] = subprocess.PIPE
else:
kwargs["capture_output"] = capture_output
try:
completed = subprocess.run(cmd, **kwargs)
except OSError as err:
message = str(err)
if (re.search(r'WinError 193', message) is not None and
re.search(r'\.ps1', full_cmd, re.IGNORECASE) is not None):
raise KsmCliException("Cannot execute command. If this was a powershell script, please use"
" the command 'powershell {}'".format(full_cmd))
else:
raise KsmCliException("Cannot execute command: {}".format(message))
except Exception as err:
raise KsmCliException("Cannot execute command: {}".format(err))
if completed.returncode != 0:
raise KsmCliException("Return code was: " + str(completed.returncode))
if capture_output is True:
print(completed.stdout)
| 34.885714 | 111 | 0.577669 | 436 | 3,663 | 4.651376 | 0.366972 | 0.027613 | 0.027613 | 0.048817 | 0.085799 | 0.045365 | 0 | 0 | 0 | 0 | 0 | 0.006885 | 0.325962 | 3,663 | 104 | 112 | 35.221154 | 0.814095 | 0.180999 | 0 | 0.046875 | 0 | 0 | 0.083893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078125 | false | 0 | 0.109375 | 0 | 0.234375 | 0.015625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f213145b2e79b609f449693f2315622fd4c8619 | 11,729 | py | Python | notebooks/diffable_python/population_characteristics.py | opensafely/nhs-covid-vaccination-coverage | 61cfafdb9d023546af01ff91c0457f80a787ce7c | [
"MIT"
] | 12 | 2021-01-27T11:49:01.000Z | 2022-02-17T10:19:26.000Z | notebooks/diffable_python/population_characteristics.py | opensafely/nhs-covid-vaccination-coverage | 61cfafdb9d023546af01ff91c0457f80a787ce7c | [
"MIT"
] | 8 | 2021-02-02T16:00:55.000Z | 2022-02-15T14:44:26.000Z | notebooks/diffable_python/population_characteristics.py | opensafely/nhs-covid-vaccination-coverage | 61cfafdb9d023546af01ff91c0457f80a787ce7c | [
"MIT"
] | 6 | 2021-02-16T00:58:14.000Z | 2022-02-17T10:06:57.000Z | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: all
# notebook_metadata_filter: all,-language_info
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown]
# # Vaccines and patient characteristics
# -
# ### Import libraries and data
#
# The datasets used for this report are created using the study definition [`/analysis/study_definition.py`](../analysis/study_definition.py), using codelists referenced in [`/codelists/codelists.txt`](../codelists/codelists.txt).
# +
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import subprocess
from IPython.display import display, Markdown, HTML
import os
suffix = "_tpp"
# get current branch
current_branch = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"], capture_output=True).stdout.decode("utf8").strip()
# -
# ### Import our custom functions
# import custom functions from 'lib' folder
import sys
sys.path.append('../lib/')
from data_processing import load_data
from report_results import find_and_save_latest_date, create_output_dirs
# create output directories to save files into
savepath, savepath_figure_csvs, savepath_table_csvs = create_output_dirs()
# ### Load and Process the raw data
df = load_data()
latest_date, formatted_latest_date = find_and_save_latest_date(df, savepath=savepath)
print(f"Latest Date: {formatted_latest_date}")
# ### Summarise by group and demographics at latest date
# #### Calculate cumulative sums at each date and select latest date + previous figures for comparison
from report_results import cumulative_sums
# +
# population subgroups - in a dict to indicate which field to filter on
population_subgroups = {"80+":1,
"70-79":2,
"care home":3,
"shielding (aged 16-69)":4,
"65-69": 5,
"LD (aged 16-64)": 6,
"60-64": 7,
"55-59": 8,
"50-54": 9,
"40-49": 10,
"30-39": 11,
"18-29": 12,
"16-17": 0
# NB if the population denominator is not included for the final group (0), the key must contain phrase "not in other eligible groups" so that data is presented appropriately
}
groups = population_subgroups.keys()
# list demographic/clinical factors to include for given group
DEFAULT = ["sex","ageband_5yr","ethnicity_6_groups","ethnicity_16_groups", "imd_categories",
"bmi", "chronic_cardiac_disease", "current_copd", "dialysis", "dmards", "dementia",
"psychosis_schiz_bipolar","LD","ssri",
"chemo_or_radio", "lung_cancer", "cancer_excl_lung_and_haem", "haematological_cancer"]
#for specific age bands remove features which are included elsehwere or not prevalent
o65 = [d for d in DEFAULT if d not in ("ageband_5yr", "dialysis")]
o60 = [d for d in DEFAULT if d not in ("ageband_5yr", "dialysis", "LD")]
o50 = [d for d in DEFAULT if d not in ("ageband_5yr", "dialysis", "LD", "dementia",
"chemo_or_radio", "lung_cancer", "cancer_excl_lung_and_haem", "haematological_cancer"
)]
# under50s
u50 = ["sex", "ethnicity_6_groups", "ethnicity_16_groups","imd_categories"]
# dictionary mapping population subgroups to a list of demographic/clinical factors to include for that group
features_dict = {0: u50, ## patients not assigned to a priority group
"care home": ["sex", "ageband_5yr", "ethnicity_6_groups", "dementia"],
"shielding (aged 16-69)": ["newly_shielded_since_feb_15", "sex", "ageband", "ethnicity_6_groups", "imd_categories",
"LD"],
"65-69": o65,
"60-64": o60,
"55-59": o50,
"50-54": o50,
"40-49": u50,
"30-39": u50,
"18-29": u50,
"16-17": ["sex", "ethnicity_6_groups", "imd_categories"],
"LD (aged 16-64)": ["sex", "ageband_5yr", "ethnicity_6_groups"],
"DEFAULT": DEFAULT # other age groups
}
# -
df_dict_cum = cumulative_sums(df, groups_of_interest=population_subgroups, features_dict=features_dict, latest_date=latest_date)
# +
# for details on second/third doses, no need for breakdowns of any groups (only "overall" figures will be included)
second_dose_features = {}
for g in groups:
second_dose_features[g] = []
df_dict_cum_second_dose = cumulative_sums(df, groups_of_interest=population_subgroups, features_dict=second_dose_features,
latest_date=latest_date, reference_column_name="covid_vacc_second_dose_date")
df_dict_cum_third_dose = cumulative_sums(df, groups_of_interest=population_subgroups, features_dict=second_dose_features,
latest_date=latest_date, reference_column_name="covid_vacc_third_dose_date")
# -
# ### Cumulative vaccination figures - overall
from report_results import make_vaccine_graphs
make_vaccine_graphs(df, latest_date=latest_date, grouping="priority_status", savepath_figure_csvs=savepath_figure_csvs, savepath=savepath, suffix=suffix)
make_vaccine_graphs(df, latest_date=latest_date, include_total=False, savepath=savepath, savepath_figure_csvs=savepath_figure_csvs, suffix=suffix)
# ### Reports
from report_results import summarise_data_by_group
summarised_data_dict = summarise_data_by_group(df_dict_cum, latest_date=latest_date, groups=groups)
# +
summarised_data_dict_2nd_dose = summarise_data_by_group(df_dict_cum_second_dose, latest_date=latest_date, groups=groups)
summarised_data_dict_3rd_dose = summarise_data_by_group(df_dict_cum_third_dose, latest_date=latest_date, groups=groups)
# -
# ### Proportion of each eligible population vaccinated to date
from report_results import create_summary_stats, create_detailed_summary_uptake
summ_stat_results, additional_stats = create_summary_stats(df, summarised_data_dict, formatted_latest_date, groups=groups,
savepath=savepath, suffix=suffix)
# +
summ_stat_results_2nd_dose, _ = create_summary_stats(df, summarised_data_dict_2nd_dose, formatted_latest_date,
groups=groups, savepath=savepath,
vaccine_type="second_dose", suffix=suffix)
summ_stat_results_3rd_dose, _ = create_summary_stats(df, summarised_data_dict_3rd_dose, formatted_latest_date,
groups=groups, savepath=savepath,
vaccine_type="third_dose", suffix=suffix)
# -
# display the results of the summary stats on first and second doses
display(pd.DataFrame(summ_stat_results).join(pd.DataFrame(summ_stat_results_2nd_dose)).join(pd.DataFrame(summ_stat_results_3rd_dose)))
display(Markdown(f"*\n figures rounded to nearest 7"))
# +
# other information on vaccines
for x in additional_stats.keys():
display(Markdown(f"{x}: {additional_stats[x]}"))
display(Markdown(f"*\n figures rounded to nearest 7"))
# -
# # Detailed summary of coverage among population groups as at latest date
create_detailed_summary_uptake(summarised_data_dict, formatted_latest_date,
groups=population_subgroups.keys(),
savepath=savepath)
# # Demographics time trend charts
from report_results import plot_dem_charts
plot_dem_charts(summ_stat_results, df_dict_cum, formatted_latest_date, pop_subgroups=["80+", "70-79", "65-69","shielding (aged 16-69)", "60-64", "55-59", "50-54", "40-49", "30-39", "18-29"], groups_dict=features_dict,
groups_to_exclude=["ethnicity_16_groups", "current_copd", "chronic_cardiac_disease", "dmards", "chemo_or_radio", "lung_cancer", "cancer_excl_lung_and_haem", "haematological_cancer"],
savepath=savepath, savepath_figure_csvs=savepath_figure_csvs, suffix=suffix)
# ## Completeness of ethnicity recording
# +
from data_quality import *
ethnicity_completeness(df=df, groups_of_interest=population_subgroups)
# -
# # Second doses
# +
# only count second doses where the first dose was given at least 14 weeks ago
# to allow comparison of the first dose situation 14w ago with the second dose situation now
# otherwise bias could be introduced from any second doses given early in certain subgroups
date_14w = pd.to_datetime(df["covid_vacc_date"]).max() - timedelta(weeks=14)
date_14w = str(date_14w)[:10]
df_s = df.copy()
# replace any second doses not yet "due" with "0"
df_s.loc[(pd.to_datetime(df_s["covid_vacc_date"]) >= date_14w), "covid_vacc_second_dose_date"] = 0
# also ensure that first dose was dated after the start of the campaign, otherwise date is likely incorrect
# and due date for second dose cannot be calculated accurately
# this also excludes any second doses where first dose date = 0 (this should affect dummy data only!)
df_s.loc[(pd.to_datetime(df_s["covid_vacc_date"]) <= "2020-12-07"), "covid_vacc_second_dose_date"] = 0
formatted_date_14w = datetime.strptime(date_14w, "%Y-%m-%d").strftime("%d %b %Y")
with open(os.path.join(savepath["text"], f"latest_date_of_first_dose_for_due_second_doses.txt"), "w") as text_file:
text_file.write(formatted_date_14w)
display(Markdown(formatted_date_14w))
# +
# add "brand of first dose" to list of features to break down by
import copy
features_dict_2 = copy.deepcopy(features_dict)
for k in features_dict_2:
ls = list(features_dict_2[k])
ls.append("brand_of_first_dose")
features_dict_2[k] = ls
# +
df_dict_cum_second_dose = cumulative_sums(df_s, groups_of_interest=population_subgroups, features_dict=features_dict_2,
latest_date=latest_date, reference_column_name="covid_vacc_second_dose_date")
# -
second_dose_summarised_data_dict = summarise_data_by_group(df_dict_cum_second_dose, latest_date=latest_date, groups=groups)
create_detailed_summary_uptake(second_dose_summarised_data_dict, formatted_latest_date,
groups=groups,
savepath=savepath, vaccine_type="second_dose")
# ## For comparison look at first doses UP TO 14 WEEKS AGO
#
# +
# latest date of 14 weeks ago is entered as the latest_date when calculating cumulative sums below.
# Seperately, we also ensure that first dose was dated after the start of the campaign,
# to be consistent with the second doses due calculated above
df_14w = df.copy()
df_14w.loc[(pd.to_datetime(df_14w["covid_vacc_date"]) <= "2020-12-07"), "covid_vacc_date"] = 0
df_dict_cum_14w = cumulative_sums(
df_14w, groups_of_interest=population_subgroups, features_dict=features_dict_2,
latest_date=date_14w
)
summarised_data_dict_14w = summarise_data_by_group(
df_dict_cum_14w,
latest_date=date_14w,
groups=groups
)
create_detailed_summary_uptake(summarised_data_dict_14w, formatted_latest_date=date_14w,
groups=groups,
savepath=savepath, vaccine_type="first_dose_14w_ago")
| 40.725694 | 231 | 0.675335 | 1,528 | 11,729 | 4.884817 | 0.244764 | 0.05493 | 0.013264 | 0.026795 | 0.432342 | 0.387594 | 0.321543 | 0.294748 | 0.242765 | 0.221061 | 0 | 0.031665 | 0.229943 | 11,729 | 287 | 232 | 40.867596 | 0.79473 | 0.255435 | 0 | 0.061538 | 0 | 0 | 0.177884 | 0.053609 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.123077 | 0 | 0.123077 | 0.007692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2201229ba0be087f7e74b4e6169d1738424400 | 6,723 | py | Python | python/infinite_precision_arithmetic/Rational.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 1 | 2021-01-23T13:50:34.000Z | 2021-01-23T13:50:34.000Z | python/infinite_precision_arithmetic/Rational.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 40 | 2018-02-19T19:37:24.000Z | 2022-03-25T18:34:22.000Z | python/infinite_precision_arithmetic/Rational.py | quasarbright/quasarbright.github.io | 942710adf4a2531d033023a6f750efeddf3e9050 | [
"MIT"
] | 1 | 2018-12-07T03:07:21.000Z | 2018-12-07T03:07:21.000Z | from BigInt import *
class Rational:
'''
constructs a fraction, doesn't simplify
immutable
'''
def __init__(self, numerator, denominator=BigInt(1)):
if type(numerator) is not type(BigInt(0)) or type(denominator) is not type(BigInt(0)):
raise TypeError('{0}, {1}'.format(type(numerator), type(denominator)))
if denominator == BigInt(0):
raise ZeroDivisionError('Rational({0}, {1})'.format(numerator, denominator))
else:
self.numerator = abs(numerator)
self.denominator = abs(denominator)
self.positive = numerator.positive == denominator.positive
if numerator == BigInt(0):
self.positive = True
def __str__(self):
if self.positive:
return '{0} / {1}'.format(self.numerator, self.denominator)
else:
return '-{0} / {1}'.format(self.numerator, self.denominator)
def __repr__(self):
if self.positive:
return 'Rational({0}, {1})'.format(self.numerator, self.denominator)
else:
return 'Rational(-{0}, {1})'.format(self.numerator, self.denominator)
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.numerator == BigInt(0):
return other.numerator == BigInt(0)
else:
# check signs and cross multiply
return self.positive == other.positive and self.numerator * other.denominator == self.denominator * other.numerator
def __lt__(self, other):
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
if self.positive and not other.positive:
return False
if other.positive and not self.positive:
return True
if self == other:
return False
# same sign and unequal
s, o = self.lcd(other)
if self.positive:
return s.numerator < o.numerator
else:
return s.numerator > o.numerator
def __le__(self, other):
return self < other or self == other
def __hash__(self):
s = self.simplify()
return hash((s.numerator, s.denominator, s.positive))
def __abs__(self):
ans = Rational(self.numerator, self.denominator)
ans.positive = True
return ans
def __neg__(self):
if self.numerator == BigInt(0):
return self
else:
ans = Rational(self.numerator, self.denominator)
ans.positive = not self.positive
return ans
def __add__(self, other):
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
if self.numerator == BigInt(0):
return other
elif other.numerator == BigInt(0):
return self
elif self.positive == other.positive:
s, o = self.lcd(other)
numerator = s.numerator + o.numerator
denominator = s.denominator
ans = Rational(numerator, denominator)
ans.positive = s.positive
return ans
else:
return self - -other
def __sub__(self, other):
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
if self.numerator == BigInt(0):
return -other
elif other.numerator == BigInt(0):
return self
elif self.positive == other.positive:
s, o = self.lcd(other)
numerator = s.numerator - o.numerator
denominator = s.denominator
ans = Rational(numerator, denominator)
if numerator.positive:
ans.positive = self.positive
else:
ans.positive = not self.positive
return ans
else:
return self + -other
def __mul__(self, other):
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
ans = Rational(self.numerator * other.numerator, self.denominator * other.denominator)
ans.positive = self.positive == other.positive
if ans.numerator == BigInt(0):
ans.positive = True
return ans
def __truediv__(self, other):
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
return self * other.inverse()
def __pow__(self, other):
if type(other) is not type(BigInt(1)):
raise TypeError('exponent must be a BigInt, got {1}'.format(type(other)))
if not other.positive:
return self.inverse() ** abs(other) # may divide by zero
if other == BigInt(0) or self == Rational(BigInt(1)):
return Rational(BigInt(1))
elif self.numerator == BigInt(0):
return Rational(BigInt(0))
elif other <= BigInt(10):
ans = self
counter = BigInt(1)
while counter < other:
ans = ans * self
counter = counter.add1()
return ans
else:
ans = Rational(BigInt(1))
pow = self
for digit in other.digits[::-1]:
ans = ans * (pow ** BigInt(digit))
pow = pow ** BigInt(10)
return ans
def simplify(self):
if self.numerator == BigInt(0):
return Rational(BigInt(0), BigInt(1))
else:
gcd = self.numerator.gcd(self.denominator)
numerator = self.numerator // gcd
denominator = self.denominator // gcd
ans = Rational(numerator, denominator)
ans.positive = self.positive
return ans
def lcd(self, other):
'''
returns two rationals with the same, lowest common denominator (simplifies first)
in order (self, other)
maintains signs and numerical value
1/2.lcd(-3/5) => (5/10, -6/10)
'''
if type(self) is not type(other):
raise TypeError('{0} {1}'.format(type(self), type(other)))
s = self.simplify()
o = other.simplify()
denominator = s.denominator.lcm(o.denominator)
sn = s.numerator * (denominator // s.denominator)
on = o.numerator * (denominator // o.denominator)
s = Rational(sn, denominator)
s.positive = self.positive
o = Rational(on, denominator)
o.positive = other.positive
return s, o
def inverse(self):
ans = Rational(self.denominator, self.numerator)
ans.positive = self.positive
return ans
| 33.282178 | 127 | 0.557787 | 757 | 6,723 | 4.879789 | 0.129458 | 0.066865 | 0.025988 | 0.047645 | 0.480238 | 0.432052 | 0.384678 | 0.354088 | 0.270709 | 0.221169 | 0 | 0.014428 | 0.329912 | 6,723 | 201 | 128 | 33.447761 | 0.805549 | 0.04373 | 0 | 0.44586 | 0 | 0 | 0.024843 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10828 | false | 0 | 0.006369 | 0.006369 | 0.350318 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f22ff6c6016d0aee4ec29ba1f292219db1a16c0 | 7,915 | py | Python | amazon.py | DhruvAwasthi/Scrape-Amazon_Reviews | 6cd02c23fa1a5aadf91c26c3b18e67b18be194a0 | [
"MIT"
] | 1 | 2021-02-03T12:59:46.000Z | 2021-02-03T12:59:46.000Z | amazon.py | DhruvAwasthi/Scraping-Amazon | 6cd02c23fa1a5aadf91c26c3b18e67b18be194a0 | [
"MIT"
] | null | null | null | amazon.py | DhruvAwasthi/Scraping-Amazon | 6cd02c23fa1a5aadf91c26c3b18e67b18be194a0 | [
"MIT"
] | null | null | null | import pandas as pd
from urllib import request
from bs4 import BeautifulSoup
num_reviews = 100
reviews_dir = 'data/'
base_url = 'https://www.amazon.in'
product_url = 'https://www.amazon.in/Samsung-EO-BG920BBEGIN-Bluetooth-Headphones-Black-Sapphire/dp/B01A31SHF0/ref=sr_1_1?dchild=1&keywords=headphones+level&qid=1608538861&sr=8-1'
def getPage(url):
req = request.Request(
url,
headers={
'authority': 'www.amazon.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
)
return request.urlopen(req)
def getReviewsUrl(product_bsobj):
try:
reviews_url = product_bsobj.find('a', {'class': 'a-link-emphasis a-text-bold', 'data-hook': 'see-all-reviews-link-foot'}).attrs.get('href', False)
except:
reviews_url = False
if not reviews_url:
print('No reviews present for this product')
return reviews_url
def getKeywords(bsobj):
try:
res = list()
keywords = bsobj.find(id='cr-lighthut-1-').findAll('span', {'class': 'a-declarative'})
for keyword in keywords:
res.append(keyword.a.getText().strip())
return res
except:
return list()
def getOverallRating(bsobj):
try:
return bsobj.find('div', {'class': 'a-fixed-left-grid AverageCustomerReviews a-spacing-small'}).span.getText()
except:
return ''
def getPerStarAnaytics(bsobj):
try:
res = list()
rows = bsobj.find(id='histogramTable').findAll('tr')
for row in rows:
res.append(row.span.a.attrs['title'])
return res
except:
return list()
def byFeatureRatings(bsobj):
try:
res = list()
rows = bsobj.find(id='cr-summarization-attributes-list').findAll('div')
for row in rows:
if 'id' in row.attrs:
res.append(row.find('div', {'class': 'a-row'}).span.getText() + ' have ' + row.find('span', {'class': 'a-icon-alt'}).getText() + ' stars')
return list(set(res))
except:
return list()
def getProductIdentifier(bsobj):
try:
return bsobj.find(id='productDetails_detailBullets_sections1').td.getText().strip()
except:
try:
for req in bsobj.find(id='detailBullets_feature_div').ul.findAll('li'):
if req.find('span', {'class': 'a-text-bold'}).getText().lower().split()[0].strip() == 'asin':
for count, j in enumerate(req.span.findAll('span')):
if count == 1:
return j.getText()
except:
return ''
def getNumberOfRatings(bsobj):
try:
return bsobj.find('div', {'class': 'a-row a-spacing-medium averageStarRatingNumerical'}).span.getText().strip().split()[0]
except:
return ''
def addRatingsAndReviewsNumber(bsobj):
numRatings = getNumberOfRatings(bsobj)
numReviews = len(reviews_df['Review'])
reviews_df.insert(5, 'Number of Ratings', pd.Series(numRatings))
reviews_df.insert(6, 'Number of Reviews', pd.Series(numReviews))
def getReviews(reviews_bsobj):
reviews = list()
reviews_div = reviews_bsobj.find('div', {'id': 'cm_cr-review_list'})
for i in reviews_div:
if i.attrs.get('class') == ['a-section', 'review', 'aok-relative']:
try:
review_stars = int(i.find('a', {'class': 'a-link-normal'}).attrs.get('title', 0).split('.')[0])
except:
review_stars = int(i.find('a', {'class': 'a-link-normal'}).attrs.get('title', 0).split('.')[0].split()[0])
review_date = i.find('span', {'class': 'a-size-base a-color-secondary review-date'}).getText().split('Reviewed in India on ')[-1]
review_title = i.find('a', {'class': 'a-size-base a-link-normal review-title a-color-base review-title-content a-text-bold'}).getText().strip()
review_text = i.find('span', {'class': 'a-size-base review-text review-text-content'}).getText().strip()
try:
review_useful = i.find('span', {'class': 'a-size-base a-color-tertiary cr-vote-text'}).getText().split()[0]
except:
review_useful = '0'
row = [review_date, review_stars, review_title, review_useful, review_text]
reviews.append(row)
if len(reviews) == 0:
print('Error while finding reviews in this page')
return False
return reviews
def saveReviews(brand, product, reviews_dir):
getAnalytics(product_bsobj)
addRatingsAndReviewsNumber(product_bsobj)
reviews_df.insert(0, 'Brand Name', pd.Series([brand] + [''] * (len(reviews_df) - 1)))
reviews_df.insert(1, 'Product Name', pd.Series([product] + [''] * (len(reviews_df) - 1)))
reviews_df.to_excel(brand + ' - ' + product + ' Reviews.xlsx', index=False)
print(f'Reviews scraping done for product {product}')
def nextReviewPageUrl(brand, product, reviews_dir, reviews_bsobj):
try:
for i in reviews_bsobj.find(id='cm_cr-pagination_bar').children:
try:
next_review_page_url = i.find('li', {'class': 'a-last'}).a.attrs.get('href')
review_url = base_url + next_review_page_url
return review_url
except:
print('You are visiting the last page of reviews for this product.')
saveReviews(brand, product, reviews_dir)
return False
except:
print('Reviews are not present on this page')
return False
def getAnalytics(product_bsobj):
overall_rating = getOverallRating(product_bsobj)
per_star_analytics = getPerStarAnaytics(product_bsobj)
by_feature_ratings = byFeatureRatings(product_bsobj)
keywords = getKeywords(product_bsobj)
product_identifier = getProductIdentifier(product_bsobj)
reviews_df.insert(0, 'ASIN', pd.Series(product_identifier)) # Amazon Standard Identification Number (ASIN)
reviews_df.insert(1, 'Overall Rating', pd.Series(overall_rating))
reviews_df.insert(2, 'Per Star Analytics', pd.Series(per_star_analytics))
reviews_df.insert(3, 'By Feature Ratings', pd.Series(by_feature_ratings))
reviews_df.insert(4, 'Keywords', pd.Series(keywords))
def fetchReviews(brand, product, product_url, num_reviews, reviews_dir):
global reviews_df, product_bsobj
reviews_df = pd.DataFrame(columns=['Date', 'Stars', 'Title', 'People found this useful', 'Review'])
product_page = getPage(product_url)
product_bsobj = BeautifulSoup(product_page)
reviews_url = getReviewsUrl(product_bsobj)
if not reviews_url:
saveReviews(brand, product, reviews_dir)
return
reviews_url = base_url + reviews_url
while reviews_url:
print(f'Reviews URL is:\n{reviews_url}')
reviews_page = getPage(reviews_url)
reviews_bsobj = BeautifulSoup(reviews_page)
reviews = getReviews(reviews_bsobj)
if not reviews:
saveReviews(brand, product, reviews_dir)
return
reviews_df = reviews_df.append(pd.DataFrame(reviews, columns=reviews_df.columns), ignore_index=True)
if len(reviews_df) >= num_reviews:
saveReviews(brand, product, reviews_dir)
return
reviews_url = nextReviewPageUrl(brand, product, reviews_dir, reviews_bsobj)
fetchReviews('Samsung', 'Wireless Headphones', product_url, num_reviews, reviews_dir)
| 39.575 | 178 | 0.633986 | 995 | 7,915 | 4.919598 | 0.259296 | 0.034934 | 0.027579 | 0.031461 | 0.204903 | 0.169969 | 0.117875 | 0.092339 | 0.034729 | 0.02288 | 0 | 0.016074 | 0.221857 | 7,915 | 199 | 179 | 39.773869 | 0.778698 | 0.005559 | 0 | 0.295181 | 0 | 0.03012 | 0.243106 | 0.043589 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0 | 0.018072 | 0 | 0.240964 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7f2404cb9daa894c644ef3b4e0780cecfc37e6cc | 2,252 | py | Python | tengp/genotype_factory.py | Jarino/tengp | 875ea583adf1194f1be4cb3dc25568f3859f9011 | [
"MIT"
] | 4 | 2018-10-17T21:46:40.000Z | 2021-11-09T06:17:05.000Z | tengp/genotype_factory.py | Jarino/tengp | 875ea583adf1194f1be4cb3dc25568f3859f9011 | [
"MIT"
] | 1 | 2018-10-18T08:42:27.000Z | 2018-10-18T08:42:27.000Z | tengp/genotype_factory.py | Jarino/tengp | 875ea583adf1194f1be4cb3dc25568f3859f9011 | [
"MIT"
] | 2 | 2018-10-18T00:36:51.000Z | 2018-12-12T03:52:14.000Z | from random import randint
from .utils import clamp_bottom
class GenotypeFactory():
def __init__(self, parameters):
"""
Initialize a genotype factory.
Parameters
----------
parameters: Parameters object
object holding info about individuals
"""
self.n_ins = parameters.n_inputs
self.n_outs = parameters.n_outputs
self.n_cols = parameters.n_columns
self.n_rows = parameters.n_rows
self.max_back = parameters.max_back
self.arity = parameters.function_set.max_arity
self.funset = parameters.function_set
self.n_fun_nodes = self.n_cols * self.n_rows
self.n_funs = len(self.funset)
def create(self):
"""
Create an individual primitives
Returns
-------
tuple
tuple holding list of genes and list of bounds
"""
genes = []
u_bounds = []
l_bounds = []
for i in range(self.n_ins, self.n_ins + self.n_fun_nodes):
# upper and lower bound for function gene
upper_bound = self.n_funs - 1
lower_bound = 0
function_gene = randint(0, upper_bound)
genes.append(function_gene)
u_bounds.append(upper_bound)
l_bounds.append(lower_bound)
current_column = (i - self.n_ins) // self.n_rows
# upper and lower bound for input gene
upper_bound = self.n_ins + current_column * self.n_rows - 1
lower_bound = clamp_bottom(upper_bound - self.max_back + 1, 0)
for _ in range(self.arity):
u_bounds.append(upper_bound)
l_bounds.append(lower_bound)
genes.append(randint(lower_bound, upper_bound))
output_gene_upper_bound = self.n_ins + self.n_fun_nodes - 1
output_gene_lower_bound = clamp_bottom(
output_gene_upper_bound - self.max_back + 1, 0
)
for i in range(self.n_outs):
u_bounds.append(output_gene_upper_bound)
l_bounds.append(output_gene_lower_bound)
genes.append(randint(output_gene_lower_bound, output_gene_upper_bound))
return genes, (l_bounds, u_bounds)
| 32.637681 | 83 | 0.60524 | 284 | 2,252 | 4.489437 | 0.242958 | 0.07451 | 0.037647 | 0.037647 | 0.347451 | 0.196863 | 0.145882 | 0.112941 | 0.072157 | 0.072157 | 0 | 0.005837 | 0.315275 | 2,252 | 68 | 84 | 33.117647 | 0.821012 | 0.137211 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.05 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61303415b144b93a1348117d120e53ba1d088ec4 | 1,596 | py | Python | practical6a.py | himanshubalani/DSPDLab | 18ca05bc21c16025a9c87fcdf8de6dff6fe6218c | [
"MIT"
] | null | null | null | practical6a.py | himanshubalani/DSPDLab | 18ca05bc21c16025a9c87fcdf8de6dff6fe6218c | [
"MIT"
] | null | null | null | practical6a.py | himanshubalani/DSPDLab | 18ca05bc21c16025a9c87fcdf8de6dff6fe6218c | [
"MIT"
] | null | null | null | class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
class LinkedList:
def __init__(self):
self.head = None
def push(self, newElement):
newNode = Node(newElement)
if(self.head == None):
self.head = newNode
return
else:
temp = self.head
while(temp.next != None):
temp = temp.next
temp.next = newNode
newNode.prev = temp
def push_at(self, newElement, position):
newNode = Node(newElement)
if(position < 1):
print("\nposition should be >= 1.")
elif (position == 1):
newNode.next = self.head
self.head.prev = newNode
self.head = newNode
else:
temp = self.head
for i in range(1, position-1):
if(temp != None):
temp = temp.next
if(temp != None):
newNode.next = temp.next
newNode.prev = temp
temp.next = newNode
if (newNode.next != None):
newNode.next.prev = newNode
else:
print("\nThe previous node is null.")
def PrintList(self):
temp = self.head
if(temp != None):
print("The list contains:", end=" ")
while (temp != None):
print(temp.data, end=" ")
temp = temp.next
print()
else:
print("The list is empty.")
dlllist = LinkedList()
dlllist.push(10)
dlllist.push(20)
dlllist.push(30)
dlllist.PrintList()
#Insert an element at position 2
dlllist.push_at(100, 2)
dlllist.PrintList()
#Insert an element at position 1
dlllist.push_at(200, 1)
dlllist.PrintList() | 22.8 | 47 | 0.577068 | 201 | 1,596 | 4.527363 | 0.253731 | 0.079121 | 0.052747 | 0.050549 | 0.09011 | 0.09011 | 0.09011 | 0 | 0 | 0 | 0 | 0.018817 | 0.300752 | 1,596 | 70 | 48 | 22.8 | 0.796595 | 0.038847 | 0 | 0.40678 | 0 | 0 | 0.060013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0 | 0 | 0.135593 | 0.101695 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61326e6043ecf9b6f3be3431593e534006fc31a8 | 22,334 | py | Python | ruuvigw_aioclient.py | hulttis/ruuvigw | 914eb657e3f2792cecf6848dfa7607ad45f17ab4 | [
"MIT"
] | 7 | 2019-11-08T07:30:05.000Z | 2022-02-20T21:58:44.000Z | ruuvigw_aioclient.py | hulttis/ruuvigw | 914eb657e3f2792cecf6848dfa7607ad45f17ab4 | [
"MIT"
] | null | null | null | ruuvigw_aioclient.py | hulttis/ruuvigw | 914eb657e3f2792cecf6848dfa7607ad45f17ab4 | [
"MIT"
] | 1 | 2021-06-19T16:52:55.000Z | 2021-06-19T16:52:55.000Z | # coding=utf-8
#-------------------------------------------------------------------------------
# Name: ruuvigw_aioclient.py
# Purpose: ruuvigw aioclient
# Copyright: (c) 2019 TK
# Licence: MIT
#-------------------------------------------------------------------------------
import logging
logger = logging.getLogger('ruuvi')
import asyncio
import time
import json
from datetime import datetime as _dt, timedelta as _td
from collections import defaultdict
from mixinQueue import mixinAioQueue as _mixinQueue
from mixinSchedulerEvent import mixinSchedulerEvent
from aioruuvitag.ruuvitag_misc import get_ms as _get_ms
from aioruuvitag.ruuvitag_calc import ruuvitag_calc as _tagcalc
import ruuvigw_defaults as _def
#===============================================================================
class ruuvi_aioclient(_mixinQueue, mixinSchedulerEvent):
QUEUE_PUT_TIMEOUT = 0.2
SCHEDULER_MAX_INSTANCES = 5
_func = 'execute_ruuvi'
#-------------------------------------------------------------------------------
def __init__(self, *,
cfg,
hostname,
outqueues,
inqueue,
# fbqueue,
loop,
scheduler
):
"""
cfg - ruuvi configuration
hostname - name of the system
outqueues - list of queues for outgoing data (influx(s) / mqtt(s))
inqueue - incoming queue for data (ruuvitag)
fbqueue - feedback queue for parent
loop - asyncio loop
scheduler - used scheduler for scheduled tasks
"""
super().__init__()
if not cfg:
logger.error('cfg is required parameter and cannot be None')
raise ValueError('cfg is required parameter and cannot be None')
self._name = cfg.get('name', _def.RUUVI_NAME)
logger.debug(f'{self._name } enter')
self._cfg = cfg
self._max_interval = (int(cfg.get('max_interval', _def.RUUVI_MAX_INTERVAL)) * 1000) # sec --> msec
self._write_lastdata_int = int(self._cfg.get('write_lastdata_int', _def.RUUVI_WRITE_LASTDATA_INT))
self._write_lastdata_cnt = int(self._cfg.get('write_lastdata_cnt', _def.RUUVI_WRITE_LASTDATA_CNT))
if self._write_lastdata_int:
self._write_lastdata_int = max(self._write_lastdata_int, (self._max_interval+_def.RUUVI_WRITE_LASTDATA_DIFF))
logger.debug(f'{self._name} write_lastdata_int:{int(self._write_lastdata_int)}s')
if self._write_lastdata_cnt:
logger.debug(f'{self._name} write_lastdata_cnt:{int(self._write_lastdata_cnt)}')
else:
logger.debug(f'{self._name} write_lastdata_cnt unlimited')
else:
logger.debug(f'{self._name} write lastdata disabled')
self._meas = cfg.get('MEASUREMENTS', [])
logger.debug(f'{self._name} measurements:{self._meas}')
self._outqueues = outqueues
self._inqueue = inqueue
self._loop = loop
self._stop_event = asyncio.Event(loop=loop)
self._lastdata_lock = asyncio.Lock(loop=loop)
self._scheduler = scheduler
self._schedule(scheduler=scheduler)
self._hostname = hostname
self._lastdata = defaultdict(dict)
self._cnt = defaultdict(dict)
logger.info(f'{self._name} initialized')
#-------------------------------------------------------------------------------
def stop(self):
self._stop_event.set()
#-------------------------------------------------------------------------------
def _schedule(self, *, scheduler):
logger.debug(f'{self._name} enter {type(scheduler)}')
if self._write_lastdata_int:
try:
l_jobid = f'{self._name}_lastdata'
scheduler.add_job(
self._check_lastdata,
'interval',
seconds = 1,
id = l_jobid,
replace_existing = True,
max_instances = self.SCHEDULER_MAX_INSTANCES,
coalesce = True,
next_run_time = _dt.now()+_td(seconds=_def.RUUVI_WRITE_LASTDATA_DELAY)
)
logger.info(f'{self._name} {l_jobid} scheduled')
except:
logger.exception(f'*** {self._name}')
#-------------------------------------------------------------------------------
async def run(self):
logger.info(f'{self._name} started')
l_json = None
if self._inqueue:
while not self._stop_event.is_set():
try:
l_json = await self.queue_get(inqueue=self._inqueue)
await self._handle_data(indata=l_json)
except asyncio.CancelledError:
logger.warning(f'{self._name} CanceledError')
return
except GeneratorExit:
logger.warning(f'GeneratorExit')
return
except Exception:
logger.exception(f'*** {self._name}')
continue
else:
logger.critical(f'{self._name} FAILED TO START. NO QUEUE')
# for l_mea in self._cnt:
# for l_mac in self._cnt[l_mea]:
# logger.info(f'{self._name} {l_mea} {l_mac} cnt:{self._cnt[l_mea][l_mac]}')
logger.info(f'{self._name} completed')
#-------------------------------------------------------------------------------
def _update_cnt(self, *, measurname, mac):
try:
l_cnt = self._cnt[measurname][mac]
except:
l_cnt = 0
self._cnt[measurname][mac] = (l_cnt + 1)
return l_cnt
#-------------------------------------------------------------------------------
# xcnt ... how many lastdata updates
# ycnt ... how many denied updates because of maxdelta
async def _update_lastdata(self, *, measur, mac, xtime, datas, reason, xcnt=0, ycnt=0):
async with self._lastdata_lock:
l_measurname = measur.get('name', _def.RUUVI_NAME)
self._lastdata[l_measurname][mac] = (xtime, datas, measur, reason, xcnt, ycnt)
# logger.debug(f'{self._name} {l_measurname} {mac} lastdata:{self._lastdata}')
#-------------------------------------------------------------------------------
async def _remove_lastdata(self, *, measur, mac):
async with self._lastdata_lock:
l_measurname = measur.get('name', _def.RUUVI_NAME)
try:
del self._lastdata[l_measurname][mac]
except:
pass
#-------------------------------------------------------------------------------
async def _get_lastdata(self, *, measur, mac):
async with self._lastdata_lock:
l_measurname = measur.get('name', _def.RUUVI_NAME)
try:
return self._lastdata[l_measurname][mac]
except:
pass
return (None, None, None, None, None, None)
#-------------------------------------------------------------------------------
async def _get_lastdata_items(self):
async with self._lastdata_lock:
return self._lastdata.items()
#-------------------------------------------------------------------------------
async def _check_lastdata(self):
"""
scheduled task
"""
l_now = _get_ms()
# logger.debug(f'{self._name}')
if self._write_lastdata_int:
try:
for l_measurname, l_tmp_measurdata in await self._get_lastdata_items():
# logger.debug(f'{self._name} measur:{l_measurname} data:{l_measurdata}')
l_measurdata = {**l_tmp_measurdata}
for l_mac in l_measurdata.keys():
l_macdata = l_measurdata[l_mac]
# logger.debug(f'{self._name} mac:{l_mac} data:{l_macdata}')
(l_lasttime, l_datas, l_measur, _, l_xcnt, _) = l_macdata
l_tagname = l_datas['tagname']
if abs(l_now-l_lasttime) > self._write_lastdata_int:
if not self._write_lastdata_cnt or l_xcnt < self._write_lastdata_cnt:
# if write_lastdata_cnt forver or l_xcnt < write_lastdata_cnt
if 'time' in l_datas: # delete time from datas - will be set to utcnow byt _get_json
del l_datas['time']
l_fdata = json.dumps({
'func': self._func,
'jobid': f'{l_measurname}_lastdata',
'json': await self._get_json(measur=l_measur, mac=l_mac, datas=l_datas, reason='lastdata:'+str(l_xcnt), lasttime=l_lasttime)
})
await self._update_lastdata(measur=l_measur, mac=l_mac, xtime=(l_lasttime + self._write_lastdata_int), datas=l_datas, reason='lastdata', xcnt=(l_xcnt+1)) # ycnt=0
await self._queue_output(measur=l_measur, datas=l_fdata)
logger.debug(f'{self._name} {l_measurname} {l_mac} {l_tagname} cnt:{l_xcnt} data:{l_fdata}')
elif self._write_lastdata_cnt and l_xcnt >= self._write_lastdata_cnt:
await self._remove_lastdata(measur=l_measur, mac=l_mac)
logger.debug(f'{self._name} {l_measurname} {l_mac} {l_tagname} write_lastdata_cnt:{self._write_lastdata_cnt} reached')
except Exception:
logger.exception(f'*** {self._name}')
#-------------------------------------------------------------------------------
async def _check_delta(self, *, measur, mac, datas, field, delta):
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
try:
l_newvalue = datas.get(field, None)
(_, l_olddata, _, _, _, _) = await self._get_lastdata(measur=measur, mac=mac)
if l_olddata:
l_oldvalue = l_olddata.get(field, None)
if l_newvalue and l_oldvalue:
if abs(l_newvalue - l_oldvalue) < delta:
return False
else:
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname:20s} {field:17s} old:{l_oldvalue:.2f} new:{l_newvalue:.2f} diff:{abs(l_newvalue-l_oldvalue):.2f} delta:{delta:.2f}')
except:
logger.exception(f'*** {self._name}')
return True
#-------------------------------------------------------------------------------
async def _check_maxdelta(self, *, measur, mac, datas, field, maxdelta):
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
l_maxchange = maxdelta.get('maxchange', None)
l_maxcount = maxdelta.get('maxcount', None)
if not l_maxchange or not l_maxcount:
return True
try:
l_newvalue = datas.get(field, None)
(_, l_olddata, _, _, _, l_ycnt) = await self._get_lastdata(measur=measur, mac=mac)
l_oldvalue = l_olddata.get(field, None)
if l_newvalue and l_oldvalue:
if abs(l_newvalue-l_oldvalue) > l_maxchange:
# value is changed more than maxchange
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname:20s} {field:17s} old:{l_oldvalue:.2f} new:{l_newvalue:.2f} diff:{abs(l_newvalue-l_oldvalue):.2f} maxchange:{l_maxchange:.2f} maxcount:{l_maxcount} ycnt:{l_ycnt}')
# Value has been > maxchange less than count times
if (l_ycnt < l_maxcount):
return False
except:
logger.exception(f'*** {self._name}')
return True
#-------------------------------------------------------------------------------
async def _is_diff(self, *, measur, mac, datas):
l_now = _get_ms()
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
try:
(l_lasttime, _, _, _, l_xcnt, l_ycnt) = await self._get_lastdata(measur=measur, mac=mac)
if not l_lasttime:
await self._update_lastdata(measur=measur, mac=mac, xtime=l_now, datas=datas, reason='first') # xcnt=0 ycnt=0
return (True, 'first', 0)
# check if max_interval has been passed
if abs(l_now - l_lasttime) > self._max_interval:
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} max interval {self._max_interval/1000} sec passed')
l_xtime = l_lasttime + self._max_interval
if (l_xtime + self._max_interval) < l_now:
l_xtime = l_now
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} lasttime adjusted now:{l_now}')
await self._update_lastdata(measur=measur, mac=mac, xtime=l_xtime, datas=datas, reason='max_interval') # xcnt=0 ycnt=0
return (True, 'max_interval', l_lasttime)
# check maxdelta (maximum allowed value change)
l_maxdeltas = measur.get('MAXDELTA', _def.RUUVI_MAXDELTA)
for l_field in l_maxdeltas.keys():
l_maxdelta = l_maxdeltas[l_field]
if not await self._check_maxdelta(measur=measur, mac=mac, datas=datas, field=l_field, maxdelta=l_maxdelta):
await self._update_lastdata(measur=measur, mac=mac, xtime=l_now, datas=datas, reason='max_delta '+l_field, xcnt=l_xcnt, ycnt=(l_ycnt+1))
return (False, None, 0)
# check delta (minimum change to trigger database update)
l_deltas = measur.get('DELTA', _def.RUUVI_DELTA)
for l_field in l_deltas.keys():
l_delta = l_deltas[l_field]
if not await self._check_delta(measur=measur, mac=mac, datas=datas, field=l_field, delta=l_delta):
return (False, None, 0)
await self._update_lastdata(measur=measur, mac=mac, xtime=l_now, datas=datas, reason=l_field) # xcnt=0 ycnt=0
return (True, l_field, l_lasttime)
except:
logger.exception(f'*** {self._name}')
return (False, None, 0)
#-------------------------------------------------------------------------------
def _field_value(self, *, measur, field, datas):
try:
l_precision = measur['ROUND'][field]
# print(f'precision found: {field} {l_precision}')
except:
# print(f'precision not found: {field}')
l_precision = _def.RUUVI_PRECISION
try:
return round(datas[field], l_precision)
except:
return None
#-------------------------------------------------------------------------------
async def _get_fields(self, *, measur, mac, datas):
# logger.debug(f'{self._name}')
if not datas:
return None
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname}')
l_fields = {}
l_meafields = measur.get('FIELDS', None)
if l_meafields:
for l_field in l_meafields:
l_value = self._field_value(measur=measur, field=l_field, datas=datas)
if l_value:
l_fields[l_meafields[l_field]] = l_value
else:
for l_field in datas:
if datas[l_field]:
l_fields[l_field] = datas[l_field]
if len(l_fields):
l_fields['time'] = datas['time'] if ('time' in datas) else _dt.utcnow().strftime(_def.RUUVI_TIMEFMT)
else:
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} fields empty')
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} fields:{l_fields}')
return (l_fields)
#-------------------------------------------------------------------------------
async def _get_debugs(self, *, measur, mac, reason, datas, tagdatas=None, lasttime=0):
# logger.debug(f'{self._name}')
try:
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
l_now = _get_ms()
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} {lasttime} {l_now}')
l_debugs = {}
if measur.get('debug', _def.RUUVI_DEBUG):
l_debugs['debugReason'] = reason
l_debugs['debugCount'] = int(self._update_cnt(measurname=l_measurname, mac=mac))
l_debugs['debugInterval'] = int(l_now-lasttime) if ((lasttime<l_now) and lasttime) else 0 # ms
# l_debugs['debugHost'] = self._cfg.get('hostname', _def.RUUVI_HOSTNAME)
if tagdatas:
l_debugs['debugTagCount'] = int(tagdatas['count'])
l_debugs['debugTagInterval'] = int(tagdatas['interval']) # ms
l_debugs['debugTagRecvTime'] = int(tagdatas['recvtime']) # ms
# l_debugs['debugTagElapsed'] = int(tagdatas['elapsed']) # ms
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} debugs:{l_debugs}')
return l_debugs
except:
logger.exception(f'*** {self._name}')
return {}
#-------------------------------------------------------------------------------
async def _get_calcs(self, *, measur, mac, datas):
# logger.debug(f'{self._name}')
try:
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
# logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname}')
l_calcs = {}
if measur.get('calcs', _def.RUUVI_CALCS):
_tagcalc.calc(datas=datas, out=l_calcs)
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} calcs:{l_calcs}')
return l_calcs
except:
logger.exception(f'*** {self._name}')
return {}
#-------------------------------------------------------------------------------
async def _get_json(self, *, measur, mac, datas, reason, tagdatas=None, lasttime=0):
# logger.debug(f'{self._name}')
if not datas:
return None
l_measurname = measur.get('name', _def.RUUVI_NAME)
l_tagname = datas['tagname']
# logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname}')
l_fields = await self._get_fields(measur=measur, mac=mac, datas=datas)
if not l_fields:
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} fields empty')
return None
l_debugs = await self._get_debugs(measur=measur, mac=mac, reason=reason, datas=datas, tagdatas=tagdatas, lasttime=lasttime)
if l_debugs:
l_fields = {**l_fields, **l_debugs}
l_calcs = await self._get_calcs(measur=measur, mac=mac, datas=datas)
if l_calcs:
l_fields = {**l_fields, **l_calcs}
l_json = [
{
"measurement": measur.get('name', _def.RUUVI_NAME),
"tags": {
"mac": mac,
"name": l_tagname,
"dataFormat": str(self._field_value(measur=measur, field='_df', datas=datas)),
"hostname": self._hostname
},
"fields": l_fields
}
]
logger.debug(f'{self._name} {l_measurname} {mac} {l_tagname} json:{l_json}')
return l_json
#-------------------------------------------------------------------------------
async def _handle_data(self, *, indata):
logger.debug(f'{self._name} {type(indata)} indata:{indata}')
if not indata or not len(indata):
return
try:
l_dict = json.loads(indata)
l_mac = l_dict['mac']
l_datas = l_dict['datas']
l_tagdatas = l_dict.get('_aioruuvitag', None)
l_tagname = l_datas['tagname']
for l_measur in self._meas:
l_measurname = l_measur.get('name', _def.RUUVI_NAME)
logger.debug(f'{self._name} {l_measurname} {l_mac} {l_tagname} datas:{l_datas}')
(l_status, l_reason, l_lasttime) = await self._is_diff(measur=l_measur, mac=l_mac, datas=l_datas)
if l_status:
l_fdata = {
'func': self._func,
'jobid': l_measurname,
'json': await self._get_json(measur=l_measur, mac=l_mac, datas=l_datas, reason=l_reason, tagdatas=l_tagdatas, lasttime=l_lasttime)
}
await self._queue_output(measur=l_measur, datas=l_fdata)
logger.debug(f'{self._name} {l_measurname} {l_mac} {l_tagname} fdata:{l_fdata}')
# else:
# logger.debug(f'{self._name} {l_measurname} {l_mac} {l_tagname} data ignored')
except:
logger.exception(f'*** {self._name}')
#-------------------------------------------------------------------------------
async def _queue_output(self, *, measur, datas):
try:
if self._outqueues:
if isinstance(self._outqueues, dict):
for l_out in measur.get('OUTPUT', []):
l_outqueue = self._outqueues.get(l_out, None)
if l_outqueue:
# print(f'out:{l_out} {l_outqueue}')
if not await self.queue_put(outqueue=l_outqueue, data=datas):
return False
return True
else:
return await self.queue_put(outqueue=self._outqueues, data=datas)
except:
logger.exception(f'*** {self._name}')
return False
| 45.86037 | 242 | 0.515313 | 2,418 | 22,334 | 4.466915 | 0.109181 | 0.039996 | 0.044163 | 0.051847 | 0.459309 | 0.404685 | 0.359689 | 0.317285 | 0.281826 | 0.250903 | 0 | 0.003397 | 0.301379 | 22,334 | 486 | 243 | 45.954733 | 0.688842 | 0.167099 | 0 | 0.318182 | 0 | 0.014205 | 0.143937 | 0.019677 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014205 | false | 0.008523 | 0.03125 | 0 | 0.150568 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6132d42fd438eecafe7615bca9c8d7ecfec60301 | 221 | py | Python | cosmicmatter/misc/basicloadtest.py | bclements/cosmicmatter | 93eeb09fff9db8e4bc2e3062b7d3de2aa5bab7b4 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T22:55:56.000Z | 2022-03-24T22:55:56.000Z | cosmicmatter/misc/basicloadtest.py | SpaceElements/spacepy | dada27f8af142c839060d525b2ef653d07195ee3 | [
"BSD-3-Clause"
] | 2 | 2022-03-25T08:23:02.000Z | 2022-03-29T22:45:32.000Z | cosmicmatter/misc/basicloadtest.py | SpaceElements/spacepy | dada27f8af142c839060d525b2ef653d07195ee3 | [
"BSD-3-Clause"
] | 1 | 2022-03-25T07:14:53.000Z | 2022-03-25T07:14:53.000Z | # very basic load test
# import webbrowser
import time
count = 0
while count < 22:
print(count)
webbrowser.open("https://www.someurlhere.com", new=2, autoraise=True)
count += 1
time.sleep(1.3) # Seconds
| 20.090909 | 73 | 0.669683 | 32 | 221 | 4.625 | 0.78125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039773 | 0.20362 | 221 | 10 | 74 | 22.1 | 0.801136 | 0.208145 | 0 | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6133c29d8b9279ccf50193d636c2f1625a49fccd | 1,620 | py | Python | preprocess.py | ShiraStarL/Credit_Card_Detection | d01fd888e28efbda93f87f32cc8a7b0255067950 | [
"MIT"
] | 5 | 2020-09-02T09:44:15.000Z | 2020-09-02T11:43:31.000Z | preprocess.py | ShiraStarL/Credit_Card_Detection | d01fd888e28efbda93f87f32cc8a7b0255067950 | [
"MIT"
] | null | null | null | preprocess.py | ShiraStarL/Credit_Card_Detection | d01fd888e28efbda93f87f32cc8a7b0255067950 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
def preprocess(img, debug=True):
# image dimensions
height, width = img.shape[:2]
# crop image as card rectangle
a = int(height * 0.125)
b = int(width * 0.406)
x1 = int(width/2) - b
x2 = int(width/2) + b
y1 = int(height/2) - a
y2 = int(height/2) + a
img = img[y1:y2, x1:x2].copy()
height, width = img.shape[:2]
img_size = height*width
# convert RGB to Gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold for binary image (black and white)
ret, thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_BINARY)
# clean noise in background
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# find all contours in image
contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if debug:
# draw all contours
counts = img.copy()
cv2.drawContours(counts, contours, -1, (0, 255, 0), 3)
# find the biggest contour (c) by the area
c = max(contours, key = cv2.contourArea)
x,y,w,h = cv2.boundingRect(c)
if debug:
# draw rectangle around contour
card_rect = img.copy()
cv2.rectangle(card_rect, (x, y), (x+w, y+h), (0, 255, 0), 2)
# check if the bigger counter is the credit card by calculate the percentage of it from the all image
per = (h*w)/img_size
if per > 0.5:
card_image = img[y:y+h, x:x+w].copy()
if debug:
cv2.imwrite("debug/card_image.jpg", card_image)
return card_image
else:
return img
| 27.457627 | 105 | 0.612346 | 249 | 1,620 | 3.927711 | 0.429719 | 0.03681 | 0.02863 | 0.038855 | 0.0409 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052233 | 0.267284 | 1,620 | 58 | 106 | 27.931034 | 0.771693 | 0.217284 | 0 | 0.147059 | 0 | 0 | 0.015924 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6133d641ec757816ea3c46a2cb3da107b1a91db7 | 16,690 | py | Python | app.py | jyio/jamwithfriends | 3727af6bf276d6366011cdaf784102b9d8d60484 | [
"MIT"
] | 6 | 2015-02-25T02:00:44.000Z | 2016-04-17T09:45:50.000Z | app.py | jyio/jamwithfriends | 3727af6bf276d6366011cdaf784102b9d8d60484 | [
"MIT"
] | null | null | null | app.py | jyio/jamwithfriends | 3727af6bf276d6366011cdaf784102b9d8d60484 | [
"MIT"
] | 2 | 2015-04-10T18:52:59.000Z | 2021-10-06T03:41:33.000Z | #!/usr/bin/env python
import re
import json
import math
import time
import random
import hashlib
import sqlite3
import weakref
from collections import Counter, deque
try:
import cPickle as pickle
except ImportError:
import pickle
import bottle
from bottle import Bottle, static_file
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import BroadcastMixin, RoomsMixin
import gevent.monkey
gevent.monkey.patch_socket()
gevent.monkey.patch_ssl()
import youtube_dl
ydl = youtube_dl.YoutubeDL({'outtmpl': '%(id)s%(ext)s'})
ydl.add_default_info_extractors()
exttype = {
'mp3': 'audio/mp3',
'm4a': 'video/mp4',
'webm': 'video/webm'
}
class memottl(object):
def __init__(self, ttl):
self.cache = {}
self.ttl = ttl
def __call__(self, f):
def wrapped_f(*args):
now = time.time()
try:
value, last_update = self.cache[args]
if self.ttl > 0 and now - last_update > self.ttl:
raise AttributeError
return value
except (KeyError, AttributeError):
value = f(*args)
self.cache[args] = (value, now)
return value
except TypeError:
return f(*args)
return wrapped_f
def baseencode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
if not isinstance(number, (int, long)):
raise TypeError('number must be an integer')
result = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
result = alphabet[i] + result
return sign + result
def base32encode(number):
return baseencode(number, '0123456789abcdefghjkmnpqrstvwxyz')
def hashhash(s, times=8):
fn = hashlib.sha1
salt = buffer(s)
for i in xrange(times):
s = fn(salt + s).digest()
return 'sha1.' + str(times) + '.' + base32encode(int(s.encode('hex'), 16))
def filter_vidkey(vidkey):
svc, subkey = vidkey.split(':', 1)
if svc == 'youtube':
return not not re.match(r'[^#\&\?]*$', subkey)
elif svc == 'soundcloud':
slashes = subkey.count('/')
if slashes < 1:
return not not re.match(r'[^#\&\?]*$', subkey)
elif slashes < 2:
return True
def denormalize(vidkey):
svc, subkey = vidkey.split(':', 1)
if svc == 'youtube':
return 'http://www.youtube.com/watch?v=' + subkey
if svc == 'soundcloud':
if '/' in subkey:
return 'http://soundcloud.com/' + subkey
else:
return 'http://snd.sc/' + subkey
@memottl(600)
def fetchdata(vidkey):
svc, subkey = vidkey.split(':', 1)
if svc == 'youtube':
return fetchdata_youtube(vidkey)
if svc == 'soundcloud':
return fetchdata_soundcloud(vidkey)
def fetchdata_youtube(vidkey):
try:
result = ydl.extract_info(denormalize(vidkey), download=False)
except youtube_dl.utils.DownloadError:
return None
if 'entries' in result:
video = result['entries'][0]
else:
video = result
formats = {}
for fmt in video['formats']:
if 'abr' in fmt:
if fmt['ext'] not in formats:
formats[fmt['ext']] = []
formats[fmt['ext']].append((fmt['abr'], fmt['url']))
for k in formats.keys():
fmt = sorted(formats[k])[-1]
formats[k] = {
'ext': k,
'type': exttype[k],
'abr': fmt[0],
'url': fmt[1]
}
return {
'vidkey': vidkey,
'url': denormalize(vidkey),
'title': video['title'],
'format': formats
}
def fetchdata_soundcloud(vidkey):
try:
result = ydl.extract_info(denormalize(vidkey), download=False)
except youtube_dl.utils.DownloadError:
return None
return {
'vidkey': vidkey,
'url': denormalize(vidkey),
'title': result['title'],
'format': {},
}
def median(data):
data = sorted(data)
n = len(data)
if n % 2:
return data[(n + 1) / 2 - 1]
else:
return (data[n / 2 - 1] + data[n / 2]) / 2.
class DataStore(object):
def __init__(self, database=':memory:'):
self.db = db = sqlite3.connect(database)
db.row_factory = sqlite3.Row
with db:
db.execute('CREATE TABLE IF NOT EXISTS queuestate (time REAL, dst TEXT, track TEXT, state BLOB)')
db.execute('CREATE INDEX IF NOT EXISTS idx_queuestate ON queuestate (dst, time)')
db.execute('CREATE UNIQUE INDEX IF NOT EXISTS idx_queuestate_unique ON queuestate (dst)')
db.execute('CREATE TABLE IF NOT EXISTS play (time REAL, dst TEXT, track TEXT)')
db.execute('CREATE INDEX IF NOT EXISTS idx_play ON play (dst, track, time)')
db.execute('CREATE UNIQUE INDEX IF NOT EXISTS idx_play_unique ON play (dst, track)')
db.execute('CREATE TABLE IF NOT EXISTS chat (time REAL, dst TEXT, src TEXT, snick TEXT, playing TEXT, body TEXT)')
db.execute('CREATE INDEX IF NOT EXISTS idx_chat ON chat (dst, time)')
def store_queuestate(self, dst, now, track, state):
with self.db as db:
db.execute('INSERT OR REPLACE INTO queuestate (time, dst, track, state) values (?, ?, ?, ?)', (
now,
dst,
track,
sqlite3.Binary(pickle.dumps(state, pickle.HIGHEST_PROTOCOL))
))
def recall_queuestate(self, dst):
r = self.db.cursor().execute('SELECT state FROM queuestate WHERE dst=? AND ?-time < 900 ORDER BY time DESC', (dst, time.time())).fetchone()
if r is not None:
return pickle.loads(str(r['state']))
def store_play(self, dst, track):
with self.db as db:
db.execute('INSERT OR REPLACE INTO play (time, dst, track) values (?, ?, ?)', (
time.time(),
dst,
track
))
def recall_play(self, dst, limit=None):
if limit is None:
res = self.db.execute('SELECT * FROM play WHERE dst=? AND ?-time < 604800 ORDER BY time DESC', (dst, time.time()))
else:
res = self.db.execute('SELECT * FROM play WHERE dst=? AND ?-time < 604800 ORDER BY time DESC LIMIT ?', (dst, time.time(), limit))
return (r['track'] for r in res)
def random_play(self, dst):
now = time.time()
for i in xrange(1, 5):
if random.randint(0, 1):
r = self.db.cursor().execute('SELECT track FROM play WHERE dst=? AND ?-time < ? ORDER BY RANDOM() LIMIT 1', (dst, now, i * 604800)).fetchone()
if r is not None:
return r[0]
r = self.db.cursor().execute('SELECT track FROM play WHERE dst=? ORDER BY RANDOM() LIMIT 1', (dst,)).fetchone()
return None if r is None else r[0]
def store_chat(self, payload):
with self.db as db:
db.execute('INSERT INTO chat (time, dst, src, snick, playing, body) values (?, ?, ?, ?, ?, ?)', (
payload['time'],
payload['dst'],
payload['src'],
payload['snick'],
payload['playing'],
payload['body']
))
def recall_chat(self, dst, limit=None):
if limit is None:
res = self.db.execute('SELECT * FROM chat WHERE dst=? AND ?-time < 86400 ORDER BY time DESC', (dst, time.time()))
else:
res = self.db.execute('SELECT * FROM chat WHERE dst=? AND ?-time < 86400 ORDER BY time DESC LIMIT ?', (dst, time.time(), limit))
return (dict(r) for r in res)
def recall_channel(self, limit=None):
if limit is None:
res = self.db.execute('SELECT DISTINCT dst FROM play WHERE ?-time < 604800 ORDER BY time DESC', (time.time(),))
else:
res = self.db.execute('SELECT DISTINCT dst FROM play WHERE ?-time < 604800 ORDER BY time DESC LIMIT ?', (time.time(), limit))
return (r['dst'] for r in res)
class Playloop(object):
def __init__(self, datastore, name):
self.datastore = datastore
self.name = name
self.req = {}
self.count = Counter()
self.done = set()
self.queue = ()
self.threshold = 0
self.current = None
def __iter__(self):
return self
def store(self):
state = {
'current': self.current,
'done': self.done
}
self.datastore.store_queuestate(self.name, self.current['time'] if self.current is not None else time.time(), self.current['vidkey'] if self.current is not None else None, state)
def recall(self):
state = self.datastore.recall_queuestate(self.name)
if state is not None:
self.done = set(state['done'])
if state['current'] is not None:
self.current = dict(state['current'])
else:
self.current = None
def next(self):
for vidkey, freq in self.queue:
if freq < self.threshold:
continue
self.done.add(vidkey)
data = fetchdata(vidkey)
if data is not None:
self.rehash()
break
else:
vidkey = self.datastore.random_play(self.name)
if vidkey is None:
return
data = fetchdata(vidkey)
if data is None:
return
self.current = {
'vidkey': data['vidkey'],
'url': data['url'],
'title': data['title'],
'format': data['format'],
'requester': list(self.getkey(vidkey)),
'time': time.time(),
}
return self.current
def reset(self):
self.current = None
def rehash(self):
next = []
later = []
never = []
for f, h, k in sorted(((f, hash(k), k) for k, f in self.count.most_common()), reverse=True):
if f >= self.threshold:
if k in self.done:
later.append((k, f))
else:
next.append((k, f))
else:
never.append((k, f))
if len(next) < 1:
self.done.clear()
self.queue = tuple(next + later + never)
def request(self, key, value=None):
if value is not None:
value = set(value)
if key in self.req:
if self.req[key] != value:
self.count.subtract(self.req[key])
else:
return False
elif value is None:
return False
if value is not None:
self.count.update(value)
self.req[key] = value
else:
del self.req[key]
self.count += Counter()
self.threshold = median(i[1] for i in self.count.most_common()) if len(self.count) > 0 else 0
self.rehash()
return True
def getkey(self, value):
return (k for k, v in self.req.iteritems() if value in v)
class Channel(object):
def __init__(self, datastore, namespace, name):
self.datastore = datastore
self.namespace = namespace
self.name = name
self.sock = weakref.WeakSet()
self.participant = {}
self.nickname = {}
self.set_stopped = {}
self.quorum = 1
self.playloop = Playloop(self.datastore, self.name)
self.playloop.recall()
def request(self, sock=None, req=None):
current = self.playloop.current
if sock is not None and sock.session['userhash'] in self.participant:
if self.playloop.request(sock.session['userhash'], req):
self.emit('queue', {'queue': list(self.playloop.queue), 'threshold': self.playloop.threshold})
if current is None:
current = self.playloop.next()
if current is not None:
self.set_stopped.clear()
self.playloop.store()
self.emit('play', current)
self.emit('queue', {'queue': list(self.playloop.queue), 'threshold': self.playloop.threshold})
def stop(self, sock=None, vidkey=None, reason=None):
current = self.playloop.current
if sock is not None and current is not None and current['vidkey'] == vidkey and sock.session['userhash'] in self.participant:
self.set_stopped[sock.session['userhash']] = reason
if len(self.set_stopped) >= self.quorum:
if self.playloop.current is not None:
for i in self.set_stopped.itervalues():
if i == 'end':
self.datastore.store_play(self.name, current['vidkey'])
self.emit('played', current['vidkey'])
break
self.playloop.reset()
self.request()
def rehash_quorum(self):
try:
self.quorum = int(max(1, math.ceil(math.log(len(self.participant)))))
except ValueError:
self.quorum = 1
def join(self, sock):
if 'channel' in sock.session and sock.session['channel'] is not None:
sock.session['channel'].part(sock)
userhash = sock.session['userhash']
self.sock.add(sock)
if userhash not in self.participant:
self.participant[userhash] = weakref.WeakSet()
self.emit('join', {'id': userhash})
else:
self.emit_one(sock, 'join', {'id': userhash})
self.participant[userhash].add(sock)
self.rehash_quorum()
usernick = sock.session['usernick']
if userhash not in self.nickname or usernick != self.nickname[userhash]:
self.nickname[userhash] = usernick
self.emit('nick', {'id': userhash, 'nick': usernick})
else:
self.emit_one(sock, 'nick', {'id': userhash, 'nick': usernick})
sock.session['channel'] = self
self.emit_one(sock, 'nicks', self.nickname)
self.emit_one(sock, 'queue', {'queue': list(self.playloop.queue), 'threshold': self.playloop.threshold})
if self.playloop.current is not None:
self.emit_one(sock, 'play', self.playloop.current)
self.emit_one(sock, 'history', {
'play': list(self.datastore.recall_play(self.name, 16)),
'chat': list(self.datastore.recall_chat(self.name, 16))
})
return sock
def part(self, sock):
try:
self.sock.remove(sock)
sock.session['channel'] = None
userhash = sock.session['userhash']
try:
del self.set_stopped[userhash]
except KeyError:
pass
try:
self.participant[userhash].remove(sock)
if len(self.participant[userhash]) < 1:
del self.participant[userhash]
self.rehash_quorum()
del self.nickname[userhash]
self.emit('part', {'id': userhash})
if self.playloop.request(sock.session['userhash'], None):
self.emit('queue', {'queue': list(self.playloop.queue), 'threshold': self.playloop.threshold})
self.stop()
except KeyError:
pass
return sock
except KeyError:
return None
def nick(self, sock):
userhash = sock.session['userhash']
usernick = sock.session['usernick']
if userhash in self.participant and usernick != self.nickname[userhash]:
self.nickname[userhash] = usernick
self.emit('nick', {'id': userhash, 'nick': usernick})
def chat(self, sock, body):
if sock is not None and sock.session['userhash'] in self.participant:
userhash = sock.session['userhash']
msg = {'time': time.time(), 'dst': self.name, 'src': userhash, 'snick': self.nickname[userhash], 'playing': None if self.playloop.current is None else self.playloop.current['vidkey'], 'body': body}
self.emit('chat', msg)
self.datastore.store_chat(msg)
def emit(self, event, args):
pkt = {
'type': 'event',
'name': event,
'args': args,
'endpoint': self.namespace
}
for sock in self.sock:
sock.send_packet(pkt)
def emit_one(self, sock, event, args):
sock.send_packet({
'type': 'event',
'name': event,
'args': args,
'endpoint': self.namespace
})
class SocketManager(BaseNamespace):
datastore = DataStore('./data.sqlite')
channel = weakref.WeakValueDictionary()
def initialize(self):
self.session['channel'] = None
def channel_join(self, name):
channel = None
if name not in self.channel:
channel = self.channel[name] = Channel(self.datastore, self.ns_name, name)
self.channel[name].join(self.socket)
def channel_part(self):
try:
self.session['channel'].part(self.socket)
except (KeyError, AttributeError):
pass
def recv_disconnect(self):
self.channel_part()
def on_user(self, msg):
if 'userhash' not in self.session:
self.session['userhash'] = hashhash(msg['cid'])
self.session['usernick'] = msg['nick']
self.emit('user', {'id': self.session['userhash']})
def on_nick(self, msg):
if 'userhash' in self.session:
self.session['usernick'] = msg['nick']
try:
channel = self.session['channel']
if channel is None:
return
except KeyError:
return
channel.nick(self.socket)
def on_join(self, msg):
if 'userhash' in self.session:
self.channel_join(msg)
def on_tdelta(self, msg):
self.emit('tdelta', time.time() - msg)
def on_request(self, msg):
if 'userhash' in self.session:
try:
channel = self.session['channel']
if channel is None:
return
except KeyError:
return
req = set(vidkey for vidkey in msg if isinstance(vidkey, basestring) and filter_vidkey(vidkey))
channel.request(self.socket, req)
def on_stop(self, msg):
if 'userhash' in self.session:
try:
channel = self.session['channel']
if channel is None:
return
except KeyError:
return
channel.stop(self.socket, msg['vidkey'], msg['reason'])
def on_chat(self, msg):
if 'userhash' in self.session:
try:
channel = self.session['channel']
if channel is None:
return
except KeyError:
return
channel.chat(self.socket, msg['body'])
def appfactory():
app = Bottle()
app.debug = True
@app.route('/c/<channel>')
@app.route('/')
def cb(channel=None):
return static_file('index.htm', root='./www')
@app.get('/socket.io/socket.io.js')
def cb():
return static_file('socket.io/socket.io.js', root='./www')
@app.route('/a/recentchannels')
def cb():
return {
'channels': list(SocketManager.datastore.recall_channel(8))
}
@app.get('/socket.io')
@app.get('/socket.io/')
@app.get('/socket.io/<path:path>')
def cb(path=None):
socketio_manage(bottle.request.environ, {'/channel': SocketManager}, bottle.request)
@app.route('/<path:path>')
def cb(path):
return static_file(path, root='./www')
return app
if __name__ == "__main__":
bottle.run(
app=appfactory(),
host='',
port=8100,
server='geventSocketIO',
debug=True,
reloader=True,
)
| 30.235507 | 200 | 0.666028 | 2,359 | 16,690 | 4.662569 | 0.136499 | 0.024002 | 0.01391 | 0.009546 | 0.368761 | 0.306846 | 0.281662 | 0.237022 | 0.215292 | 0.199836 | 0 | 0.009499 | 0.179988 | 16,690 | 551 | 201 | 30.290381 | 0.794169 | 0.001198 | 0 | 0.309021 | 0 | 0.003839 | 0.165337 | 0.009359 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109405 | false | 0.005758 | 0.036468 | 0.013436 | 0.259117 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6136fde30ad02b90e794e3bd0cef50c81f080ec1 | 501 | py | Python | 8_2.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | null | null | null | 8_2.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | null | null | null | 8_2.py | rursvd/pynumerical2 | 4b2d33125b64a39099ac8eddef885e0ea11b237d | [
"MIT"
] | 1 | 2019-12-03T01:34:19.000Z | 2019-12-03T01:34:19.000Z | from numpy import zeros, sign
# Define bisection function
def bisection(f,a,b,n):
c = zeros(n)
for i in range(n):
c[i] = (a + b)/2.0
if sign(f(c[i])) == sign(f(a)):
a = c[i]
else:
b = c[i]
return c
# Define function
def f(x):
return -x**2 + 6.0 * x - 5.0
# Execute bisection function
a = -2.0
b = 3.0
n = 7
xb = bisection(f,a,b,n)
# Print results
print("%5s %8s" % ('k','c'))
for k in range(n):
print("%5d %9.4f" % (k+1,xb[k]))
| 17.892857 | 39 | 0.497006 | 95 | 501 | 2.621053 | 0.410526 | 0.032129 | 0.088353 | 0.096386 | 0.104418 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051724 | 0.305389 | 501 | 27 | 40 | 18.555556 | 0.663793 | 0.163673 | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0.052632 | 0.263158 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61382bc4afdbca2e8ccf4130880a465632c1b519 | 1,457 | py | Python | asdc/main.py | jancervenka/airbus-ship-detection | 87cb1c786182afa248a324f65b23153aa998e6ae | [
"MIT"
] | 3 | 2020-06-02T11:46:24.000Z | 2020-12-15T23:30:51.000Z | asdc/main.py | jancervenka/airbus-ship-detection | 87cb1c786182afa248a324f65b23153aa998e6ae | [
"MIT"
] | 2 | 2021-08-25T14:50:24.000Z | 2021-11-10T19:57:14.000Z | asdc/main.py | jancervenka/airbus-ship-detection | 87cb1c786182afa248a324f65b23153aa998e6ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, Jan Cervenka
import argparse
import asdc.core
import asdc.service
from . import version
def _create_arg_parser():
"""
Creates the CLI argument parser.
"""
parser = argparse.ArgumentParser(conflict_handler='resolve',
description='asdc')
parser.add_argument('-v', '--version', action='version',
version='version: "{0}"'.format(version.__version__))
subparsers = parser.add_subparsers()
parser_service = subparsers.add_parser('service')
parser_service.add_argument('-d', '--debug', action='store_const',
default=False, const=True, dest='debug',
help='app debug mode')
parser_service.add_argument('-m', '--model', type=str, required=True,
dest='model', help='Path to the model h5 file.')
parser_training = subparsers.add_parser('training')
parser_training.add_argument('-c', '--config', type=str, required=True,
dest='config', help='path to the config file')
parser_service.set_defaults(func=asdc.service.run_service)
parser_training.set_defaults(func=asdc.core.run_training)
return parser
def main():
"""
Runs the ASDC system.
"""
args = _create_arg_parser().parse_args()
args.func(args)
if __name__ == '__main__':
main()
| 28.568627 | 80 | 0.602608 | 162 | 1,457 | 5.179012 | 0.438272 | 0.077473 | 0.035757 | 0.057211 | 0.054827 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006518 | 0.262869 | 1,457 | 50 | 81 | 29.14 | 0.774674 | 0.079616 | 0 | 0 | 0 | 0 | 0.140673 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.148148 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6138630a4dc2a9c9e1dbd63c0ab09714aa15f8bf | 1,612 | py | Python | service_lib/database.py | qwc-services/sogis-agdi | f278612c42f648da07448905f2b8021b279e66bc | [
"MIT"
] | null | null | null | service_lib/database.py | qwc-services/sogis-agdi | f278612c42f648da07448905f2b8021b279e66bc | [
"MIT"
] | null | null | null | service_lib/database.py | qwc-services/sogis-agdi | f278612c42f648da07448905f2b8021b279e66bc | [
"MIT"
] | 1 | 2021-04-15T09:54:49.000Z | 2021-04-15T09:54:49.000Z | from sqlalchemy import create_engine
import os
class DatabaseEngine():
"""Helper for database connections using SQLAlchemy engines"""
def __init__(self):
"""Constructor"""
self.engines = {}
def db_engine(self, conn_str, service_suffix=None):
"""Return engine."""
# conn_str:
# http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
if service_suffix:
# Append suffix to service name, e.g
# postgresql:///?service=sogis_services
# ->
# postgresql:///?service=sogis_services_write
conn_str += service_suffix
engine = self.engines.get(conn_str)
if not engine:
engine = create_engine(
conn_str, pool_pre_ping=True, echo=False)
self.engines[conn_str] = engine
return engine
def db_engine_env(self, env_name, default=None):
"""Return engine configured in environment variable."""
conn_str = os.environ.get(env_name, default)
if conn_str is None:
raise Exception(
'db_engine_env: Environment variable %s not set' % env_name)
return self.db_engine(conn_str)
def geo_db(self):
"""Return engine for default GeoDB."""
return self.db_engine_env('GEODB_URL',
'postgresql:///?service=sogis_services')
def config_db(self):
"""Return engine for default ConfigDB."""
return self.db_engine_env('CONFIGDB_URL',
'postgresql:///?service=soconfig_services')
| 33.583333 | 77 | 0.598015 | 181 | 1,612 | 5.093923 | 0.375691 | 0.06833 | 0.047722 | 0.097614 | 0.106291 | 0.060738 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298387 | 1,612 | 47 | 78 | 34.297872 | 0.815208 | 0.246898 | 0 | 0 | 0 | 0 | 0.122345 | 0.065421 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.076923 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6139d3a7a69668275919aa5180c16fec38121a2e | 4,681 | py | Python | twitterbot/quoteproviders.py | HashCollision/TwitterBot | a5214cf7e607b633bf2757b0f95ecce722f84b71 | [
"Apache-2.0"
] | 2 | 2016-05-24T00:14:24.000Z | 2016-05-24T00:23:35.000Z | twitterbot/quoteproviders.py | HashCollision/TwitterBot | a5214cf7e607b633bf2757b0f95ecce722f84b71 | [
"Apache-2.0"
] | null | null | null | twitterbot/quoteproviders.py | HashCollision/TwitterBot | a5214cf7e607b633bf2757b0f95ecce722f84b71 | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
import requests, pickle
from abc import ABCMeta, abstractmethod
from bs4 import BeautifulSoup
from utilities import *
from random import randint
from threading import Thread
# Tuple object
Quote = namedtuple('Quote', ['text', 'author'])
class QuoteException(Exception):
def __init__(self, message):
super().__init__(self, message)
# Base abstract class
class QuoteProvider:
__metaclass__ = ABCMeta
def __init__(self, filename='quotes.txt', url=None):
self.url = url
self.filename = filename
self.quotes = list()
# Public API
def save(self, quote):
''' Saves a quote object in a pickle file '''
try:
with open(self.filename, 'ab') as quotefile:
pickle.dump(quote, quotefile)
return True
except Exception as err:
raise QuoteException("Could not save quote!\nErr: %s" % err)
return False
def exists(self, quote):
''' Checks if a quote object exists in a pickle file '''
try:
with open(self.filename, 'rb') as quotefile:
while True:
data = pickle.load(quotefile)
if quote == data:
return True
except:
pass
return False
def randomize(self):
''' Return a random quote from the list '''
if len(self.quotes) > 0:
number = randint(0, len(self.quotes) - 1)
return self.quotes[number]
@abstractmethod
def load(self):
''' Function that must be overwritten in sub-classes, it handles loading all the quotes into 'self.quotes' '''
return
# Private API
@abstractmethod
def __parse__(self, input):
''' Function that must be overwritten in sub-classes, it handles parsing the return output from 'self.html' '''
return
@abstractmethod
def __fetch__(self, url):
''' abstract method that handles fetching data and adding to 'self.quotes' '''
pass
def __request__(self, url):
''' Make a GET request on a specific uri and return all the response from said GET request. '''
url = url or self.url
if not url or not Utilities.validate_uri(url):
raise QuoteException("Url not valid!")
r = requests.get(url)
if r.status_code == 200:
return r.text
else:
raise QuoteException("%s could not return quotes!" % self.url)
def __html__(self, html):
''' Return a BeautifulSoup object from a given text string '''
if not html:
raise QuoteException("No html arg!")
try:
return BeautifulSoup(html)
except Exception as err:
raise QuoteException('Could not parse text into BeautifulSoup!')
# Subclass
class GoodreadQuote(QuoteProvider):
def __init__(self):
return super().__init__(url='')
def __parse__(self, input):
return
def load(self):
return
def __fetch__(self, url):
return
# Subclass
class BrainyQuote(QuoteProvider):
def __init__(self):
super().__init__(url='http://www.brainyquote.com/quotes/keywords/list%s.html')
# Overwritten
def __parse__(self, input):
try:
if not input:
raise QuoteException("Can't parse input!")
# find all divs with correct class
for div in [ x for x in input.find_all('div', attrs={'class': 'boxyPaddingBig'}) ]:
# get text and author
text, auth = [ y for y in div.text.split('\n') if y != '"' and y ]
yield (text, auth)
except Exception as err:
raise QuoteException("Can't parse input!\nErr: %s" % err)
def load(self):
''' Load all data in a multi threaded env '''
threads = []
for i in range(14): # 13 pages
url = self.url % ('_{0}'.format(i) if i > 0 else '')
t = Thread(target=self.__fetch__, args=(url,))
threads.append(t)
t.start()
for thread in threads:
thread.join()
def __fetch__(self, url):
''' Utilizes all methods to fetch the data from pre specfied configuration '''
# GET request for data
data = self.__request__(url)
# Change into HTML
html = self.__html__(data)
# Parse html and iterate
for data in self.__parse__(html):
text, auth = data
quote = Quote(text, auth)
self.quotes.append(quote)
| 28.369697 | 119 | 0.574023 | 549 | 4,681 | 4.741348 | 0.300546 | 0.021514 | 0.016904 | 0.02305 | 0.135229 | 0.135229 | 0.10219 | 0.10219 | 0.066078 | 0.038417 | 0 | 0.004149 | 0.330699 | 4,681 | 164 | 120 | 28.542683 | 0.826684 | 0.186712 | 0 | 0.316832 | 0 | 0 | 0.075027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178218 | false | 0.019802 | 0.069307 | 0.039604 | 0.425743 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
613b28a17ffdfc4fef538068198f63de8c5ff4b3 | 1,085 | py | Python | day9.py | dos1/AoC21 | 9095b96b831aac76cb9f0ce06e3f639db2da3977 | [
"MIT"
] | null | null | null | day9.py | dos1/AoC21 | 9095b96b831aac76cb9f0ce06e3f639db2da3977 | [
"MIT"
] | null | null | null | day9.py | dos1/AoC21 | 9095b96b831aac76cb9f0ce06e3f639db2da3977 | [
"MIT"
] | null | null | null | data = [list(map(int,line.strip())) for line in open("inputday9")]
def fieldVal(f):
i,j = f
return data[i][j]
def adjacent(field):
fields = []
i, j = field
if i > 0:
fields.append((i-1, j))
if j > 0:
fields.append((i, j-1))
if i < len(data) - 1:
fields.append((i+1, j))
if j < len(data[0]) - 1:
fields.append((i, j+1))
return fields
def minimum(fields):
result = (None, 10)
for field in fields:
val = fieldVal(field)
if val < result[1]:
result = (field, val)
return result[0]
def basin(field, encountered):
size = 1
encountered.append(field)
for adj in adjacent(field):
if adj not in encountered and fieldVal(adj) < 9:
size += basin(adj, encountered)
return size
lowpoints = []
for i in range(len(data)):
for j in range(len(data[i])):
field = (i,j)
if fieldVal(field) < fieldVal(minimum(adjacent(field))):
lowpoints.append(field)
basins = sorted([basin(field, []) for field in lowpoints])
print(sum([1 + fieldVal(field) for field in lowpoints]),
basins[-1] * basins[-2] * basins[-3])
| 23.085106 | 66 | 0.61106 | 169 | 1,085 | 3.923077 | 0.260355 | 0.0181 | 0.078431 | 0.042232 | 0.171946 | 0.054299 | 0.054299 | 0 | 0 | 0 | 0 | 0.023725 | 0.223041 | 1,085 | 46 | 67 | 23.586957 | 0.762752 | 0 | 0 | 0 | 0 | 0 | 0.008295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0 | 0 | 0.205128 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
613ca0e7d669ae62d1b3a594a9cc5a7023782a4e | 8,879 | py | Python | Movement/Hexapod_orig.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | 7 | 2021-03-15T10:06:20.000Z | 2022-03-23T02:53:15.000Z | Movement/Hexapod_orig.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | null | null | null | Movement/Hexapod_orig.py | IanSMoyes/SpiderPi | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
# Copyright HiWonder.hk
# Further development by ians.moyes@gmail.com
# Translation by Google
# Library to control the hexapod using reverse kinematics of
# lower servo values on the port side lead to
# forward positions at the shoulder
# Lifting the knee
# Dropping the ankle
# Lower servo values on the starboard side lead to
# Rearward positions at the shoulder
# Dropping the knee
# Lifting the ankle
import math # Standard library of mathematical functions
import time # Standard library of time & date functions
from LegClass import Leg # Class to define & control a hexapod leg with 3 degrees of freedom
# import PTHeadCtrl as PTH # Library to define & control a Pan & Tilt Head
SpiderPi = ()
leg_names = ("Port rear", "Port centre", "Port front", "Starboard rear", "Starboard centre", "Starboard front")
# Define hexapod
def create_hexapod(): # Ceate the hexapod from 6 legs @ a head
global SpiderPi # Make sure you can access the hexapod from anywhere in the code
for id in range(6):
SpiderPi += (Leg(id),) # Add 6 legs. 0 indexed
PTH.inithead() # Create the pan & tilt head
time.sleep(1) # Pause until the hexapod finishes
# Stance variables
default_pos = (100,100,-70) # Initial stnding position
sit_pos = (100.0, 100.0, 20.0) # Belly flop
lift_pos = (100, 100, -40) # Legs lifted
tall_pos = (100, 100, -120) # Stand tall
def standby(leg, position, tim):
'''
输入腿的编号和腿的足端坐标,控制腿的运动
Enter the number of the leg and the coordinates of the foot to control the movement of the leg
param:leg: 0~5
param: position:数组,存放足端的坐标 Tuple to store the coordinates of the foot (X, Y, Z)
param: tim: 运行该动作的速度 time to destination
'''
global SpiderPi # Bring the hexapod with us
angle = () # Angles of the 3 elements of inverse kinematics
output = [] # new shoulder, knee & ankle servo positions
# Lengths of the 3 elements of inverse kinemetics
thigh = 44.60
calf = 75.00
foot = 126.50
factor = 180 / math.pi / 0.24 # Cnvert degrees to radians
angle += (math.atan(position[1]/position[0]),) # anti tangent (Y / X)
# Append shoulder joint angle
L = position[1] / math.sin(angle[0]) # Y / sine (shoulder angle)
temp = (position[2] ** 2) + ((L - thigh) ** 2) # Z squared + (L - thigh length ) squared
ft = math.sqrt(temp) # square root of temp
a = math.atan(position[2] / (L - thigh)) # anti tangent (Z / (L - thigh))
b = math.acos(((calf ** 2) + (ft ** 2) - (foot ** 2)) / (2 * calf * ft))
angle += ((a + b),) # Knee joint angle
# Ankle joint angle
angle += (math.acos(((ft ** 2) - (calf ** 2) - (foot ** 2)) / (2 * calf * foot)),)
if leg < 3: # Port side of the hexapod
output += (int(313 + angle[0] * factor), )
output += (int(500 - angle[1] * factor), )
output += (int(687 - angle[2] * factor - 5), )
else: # Starboard side of the hexapod
output += (int(687 - angle[0] * factor), )
output += (int(500 + angle[1] * factor), )
output += (int(313 + angle[2] * factor + 5),)
# Move each of the servos on this leg
SpiderPi[leg].standby_pos = (output, tim)
def trigger():
'''
Triggers hexapod movement
:param:
:return: True = success or error code
'''
oksofar = True
for id in range (6):
oksofar = SpiderPi[id].trigger
if oksofar != True: return oksofar
return oksofar
def pivot(angle, speed):
'''
Turn the hexapod on it's centre point. A static pivot.
param: angle: 为正时,右转, 为负时,左转 When +, turn right, -, turn left
一个完整的转向周期所旋转的角度是angle*2
The angle rotated by a complete turning cycle is angle*2
所以检测到的角度要先除以2再传入
So the detected angle must be divided by 2 before using it
param: speed: 完成转向所用的毫秒数,最快建议不要小于100ms
The number of milliseconds used to complete the turn,
the fastest suggestion is >=100ms
:return: True = success or error code
'''
if angle >= 23:
angle = 23
# print('R')
elif angle <= -23:
angle = -23
# print('L')
leg0 = toe_coord(0, angle)
leg1 = toe_coord(1, -angle)
leg2 = toe_coord(2, angle)
leg3 = toe_coord(3, -angle)
leg4 = toe_coord(4, angle)
leg5 = toe_coord(5, -angle)
standby(0, leg0, 2 * speed)
standby(1, lift_pos, speed)
standby(2, leg2_pos, 2 * speed)
standby(3, lift, speed)
standby(4, leg4, 2 * speed)
standby(5, lift_pos, speed)
trigger() # Trigger the movement
time.sleep(speed * 0.001)
standby(1, leg1, speed)
standby(3, leg3, speed)
standby(5, leg5, speed)
trigger() # Trigger the movement
time.sleep(speed * 0.001)
leg0 = toe_coord(0, -angle)
leg1 = toe_coord(1, angle)
leg2 = toe_coord(2, -angle)
leg3 = toe_coord(3, angle)
leg4 = toe_coord(4, -angle)
leg5 = toe_coord(5, angle)
standby(0, lift_pos, speed)
standby(1, leg1, 2 * speed)
standby(2, lift_pos, speed)
standby(3, leg3, 2 * speed)
standby(4, lift_pos, speed)
standby(5, leg5, 2 * speed)
trigger() # Trigger the movement
time.sleep(speed * 0.001)
standby(0, leg0, speed)
standby(2, leg2, speed)
standby(4, leg4, speed)
trigger() # Trigger the movement
time.sleep(speed * 0.001)
# angle:为正时,足端逆时针旋转 When +, the foot rotates counterclockwise
# 为负时,足端顺时针旋转 When -, the foot rotates clockwise
def toe_coord(leg, angle):
'''
Takes an angle in the X axis & returns X & Y coordinates for the toe
param:leg: 0~5. 0 indexed
param: angle: 为正时,足端逆时针旋转 为负时,足端顺时针旋转
turn angle + turn to port, - turn to starboard
'''
# Takes an angle in the X axis &
# converts it to X & Y coordinates a foot
angle = angle * math.pi / 180 # 角度制转弧度制 Angle to radians
R = 271.5
RM = 232.5 # Middle legs just pivot, corner legs step
base_angle_FB = 0.9465
base_angle_M = 0.7853
if leg == 0:
x = R * math.cos(base_angle_FB + angle) - 58.5
y = R * math.sin(base_angle_FB + angle) - 120.0
elif leg == 1:
x = RM * math.cos(base_angle_M + angle) - 64.70
y = RM * math.sin(base_angle_M + angle) - 64.70
elif leg == 2:
x = R * math.sin(base_angle_FB - angle) - 120.0
y = R * math.cos(base_angle_FB - angle) - 58.5
elif leg == 3:
x = R * math.cos(base_angle_FB - angle) - 58.5
y = R * math.sin(base_angle_FB - angle) - 120.0
elif leg == 4:
x = RM * math.cos(base_angle_M - angle) - 64.70
y = RM * math.sin(base_angle_M - angle) - 64.70
elif leg == 5:
x = R * math.sin(base_angle_FB + angle) - 120.0
y = R * math.cos(base_angle_FB + angle) - 58.5
else:
x = 100
y = 100
return [x, y, -70] # -70 is normal stance height
def init():
'''
Initialise the hexapod
param:
:return: True = success or error code
'''
for leg in range(6): # For all the legs
standby(leg, default_pos, 1000) # Move them to the default position
trigger() # Trigger the movement
return True
def sit():
'''Function causes the hexapod to withdraw it's legs and rest on it's belly
param:
return: True = complete
'''
for leg in range(6): # For all legs
standby(leg, sit_pos, 500) # Withdraw legs over 1 second
trigger() # Trigger the movement
time.sleep(0.5)
unload()
return True
def position(preset):
'''Function causes the hexapod to adopt a preset position
param:
return: True = complete
'''
for leg in range(6): # For all legs
standby_leg(leg, preset, 500) # Move legs over 0.5 seconds
trigger() # Trigger the movement
time.sleep(0.5)
return True
def unload():
'''
Unload all of the servos in the hexapod
param:
:return: True = success or error code
'''
global SpiderPi # Bring the hexapod with us
for leg in range(6): # For all the legs
SpiderPi[leg].unload # Unload the leg
time.sleep(0.5)
return
def diag():
global SpiderPi
for leg in range(6): # For all the legs
print(leg_names[leg], "leg")
print(SpiderPi[leg].offset) # report offsets
print(SpiderPi[leg].rotation_limits) # report rotation limits
print(SpiderPi[leg].pos) # report position
print(SpiderPi[leg].load_mode) # report loaded/unloaded
print(SpiderPi[leg].vin_limits) # report vin limits
print(SpiderPi[leg].vin) # report vin
print(SpiderPi[leg].temp_limit) # report temperature alarm limit
print(SpiderPi[leg].temp) # report temperature
print("Diagnostics complete")
return
if __name__ == '__main__':
create_hexapod()
print("Hexapod under test.")
diag()
| 31.154386 | 112 | 0.615159 | 1,293 | 8,879 | 4.169374 | 0.238206 | 0.031163 | 0.018364 | 0.023743 | 0.328881 | 0.282693 | 0.259692 | 0.232424 | 0.219069 | 0.188091 | 0 | 0.048452 | 0.272441 | 8,879 | 284 | 113 | 31.264085 | 0.786068 | 0.403762 | 0 | 0.201258 | 0 | 0 | 0.025 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.062893 | false | 0 | 0.018868 | 0 | 0.125786 | 0.069182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
613de78cdbae49064c79b2114c044757966e3e48 | 3,474 | py | Python | services/keyphrase/s3io.py | etherlabsio/ai-engine | e73a4419a34db42a410e2a7e7629eb946b86f2c2 | [
"MIT"
] | null | null | null | services/keyphrase/s3io.py | etherlabsio/ai-engine | e73a4419a34db42a410e2a7e7629eb946b86f2c2 | [
"MIT"
] | null | null | null | services/keyphrase/s3io.py | etherlabsio/ai-engine | e73a4419a34db42a410e2a7e7629eb946b86f2c2 | [
"MIT"
] | 1 | 2020-04-19T11:07:42.000Z | 2020-04-19T11:07:42.000Z | import logging
from timeit import default_timer as timer
from pathlib import Path
import os
logger = logging.getLogger(__name__)
class S3IO(object):
# S3 storage utility functions
def __init__(self, s3_client, graph_utils_obj, utils):
self.s3_client = s3_client
self.gutils = graph_utils_obj
self.utils = utils
def upload_s3(
self, graph_obj, context_id, instance_id, s3_dir, file_format=".pickle"
):
graph_id = graph_obj.graph.get("graphId")
if graph_id == context_id + ":" + instance_id:
serialized_graph_string = self.gutils.write_to_pickle(graph_obj=graph_obj)
s3_key = context_id + s3_dir + graph_id + file_format
resp = self.s3_client.upload_object(
body=serialized_graph_string, s3_key=s3_key
)
if resp:
return True
else:
return False
else:
logger.error(
"graphId and context info not matching",
extra={
"graphId": graph_id,
"contextInfo": context_id + ":" + instance_id,
},
)
return False
def download_s3(self, context_id, instance_id, s3_dir, file_format=".pickle"):
start = timer()
graph_id = context_id + ":" + instance_id
s3_path = context_id + s3_dir + graph_id + file_format
file_obj = self.s3_client.download_file(file_name=s3_path)
file_obj_bytestring = file_obj["Body"].read()
graph_obj = self.gutils.load_graph_from_pickle(byte_string=file_obj_bytestring)
end = timer()
logger.info(
"Downloaded graph object from s3",
extra={
"graphId": graph_obj.graph.get("graphId"),
"nodes": graph_obj.number_of_nodes(),
"edges": graph_obj.number_of_edges(),
"responseTime": end - start,
},
)
return graph_obj
def upload_npz(self, context_id, instance_id, feature_dir, npz_file_name):
s3_path = (
context_id
+ feature_dir
+ instance_id
+ "/features/segments/"
+ npz_file_name
)
self.s3_client.upload_to_s3(file_name=npz_file_name, object_name=s3_path)
# Once uploading is successful, check if NPZ exists on disk and delete it
local_npz_path = Path(npz_file_name).absolute()
if os.path.exists(local_npz_path):
os.remove(local_npz_path)
return s3_path
def upload_validation(
self, context_id, instance_id, feature_dir, validation_file_name
):
s3_path = (
context_id
+ feature_dir
+ instance_id
+ "/validation/"
+ validation_file_name
)
self.s3_client.upload_to_s3(file_name=validation_file_name, object_name=s3_path)
# Once uploading is successful, check if NPZ exists on disk and delete it
local_path = Path(validation_file_name).absolute()
if os.path.exists(local_path):
os.remove(local_path)
return s3_path
def download_npz(self, npz_file_path):
npz_file_obj = self.s3_client.download_file(file_name=npz_file_path)
npz_file_string = npz_file_obj["Body"].read()
npz_file = self.utils.deserialize_from_npz(npz_file_string)
return npz_file
| 32.166667 | 88 | 0.603051 | 427 | 3,474 | 4.5363 | 0.208431 | 0.049561 | 0.043366 | 0.068663 | 0.447083 | 0.384099 | 0.357253 | 0.32318 | 0.255034 | 0.173464 | 0 | 0.012616 | 0.315486 | 3,474 | 107 | 89 | 32.46729 | 0.801934 | 0.049511 | 0 | 0.214286 | 0 | 0 | 0.056095 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.047619 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
614370770bf4f2125beed54d00eade84072999da | 5,242 | py | Python | tests/unit_tests/test_solute_tempering.py | MauriceKarrenbrock/HREMGromacs | 3741820bee466ae3b4a69a8241c0905b5beffe0c | [
"MIT"
] | null | null | null | tests/unit_tests/test_solute_tempering.py | MauriceKarrenbrock/HREMGromacs | 3741820bee466ae3b4a69a8241c0905b5beffe0c | [
"MIT"
] | null | null | null | tests/unit_tests/test_solute_tempering.py | MauriceKarrenbrock/HREMGromacs | 3741820bee466ae3b4a69a8241c0905b5beffe0c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=wildcard-import
# pylint: disable=unused-wildcard-import
# pylint: disable=no-self-use
#############################################################
# Copyright (c) 2021-2021 Maurice Karrenbrock #
# #
# This software is open-source and is distributed under the #
# MIT License #
#############################################################
from pathlib import Path
from unittest.mock import MagicMock
import pytest
import HREMGromacs.solute_tempering as _sol
class Testscale_topology_with_plumed():
def test_ValueError(self):
with pytest.raises(ValueError):
_sol.scale_topology_with_plumed('input', 'input', 1.0)
def test_RuntimeError(self, mocker, tmp_path):
m_abspath = mocker.patch(
'PythonAuxiliaryFunctions.path.absolute_programpath',
return_value='plumed')
run_output = MagicMock(name='mock_output')
run_output.returncode.return_value = 1
m_run = mocker.patch('subprocess.run', return_value=run_output)
input_top = tmp_path / 'input.top'
input_top.write_text('a')
output_top = tmp_path / 'output.top'
output_top.write_text('a')
with pytest.raises(RuntimeError):
_sol.scale_topology_with_plumed(input_top, output_top, 1.0)
m_abspath.assert_called_once_with('plumed')
m_run.assert_called_once()
class Testpreprocess_topology():
def test_works(self, mocker):
m_run = mocker.patch('PythonAuxiliaryFunctions.run.subprocess_run')
m_abspath = mocker.patch(
'PythonAuxiliaryFunctions.path.absolute_programpath',
return_value='gmx')
_sol.preprocess_topology(input_top_file='i_top',
output_top_file='o_top',
gro_file='gro',
mdp_file='mdp',
gmx_path='gmx')
commands = [
'gmx', 'grompp', '-f', 'mdp', '-c', 'gro', '-p', 'i_top',
'-maxwarn', '100', '-pp', 'o_top'
]
m_abspath.assert_called_once_with('gmx')
m_run.assert_called_once_with(commands,
error_string='grompp -pp failed',
shell=False)
class Testgeometrical_progression():
def test_works(self):
generator = _sol.geometrical_progression(basis=0.2, denom=7)
expected_output = (1., 0.7945974047018523, 0.6313850355589192,
0.5016969106227039, 0.3986470631277377,
0.3167639217533158, 0.25169979012836535, 0.2)
for expected in expected_output:
assert next(generator) == expected
class Testprepare_topologies_for_hrem():
def test_works(self, mocker):
scaling_values = (1., 0.5, 0.2)
def _scaling_values_generator():
for i in scaling_values:
yield i
scaling_values_generator = _scaling_values_generator()
m_preprocess_topology = mocker.patch(
'HREMGromacs.solute_tempering.preprocess_topology')
m_edit_preprocessed_top = mocker.patch(
'HREMGromacs.solute_tempering.edit_preprocessed_top')
m_geometrical_progression = mocker.patch(
'HREMGromacs.solute_tempering.geometrical_progression',
return_value=scaling_values_generator)
m_scale_topology_with_plumed = mocker.patch(
'HREMGromacs.solute_tempering.scale_topology_with_plumed')
input_top = 'test.top'
expected_output = [
Path(i).resolve() for i in
['test_scaled_0.top', 'test_scaled_1.top', 'test_scaled_2.top']
]
output = _sol.prepare_topologies_for_hrem(top_file=input_top,
resSeq_to_scale=(1, 2, 3),
mdp_file='test.mdp',
gro_file='test.gro',
number_of_replicas=3)
assert output == expected_output
m_preprocess_topology.assert_called_once_with(
input_top_file=Path(input_top).resolve(),
output_top_file=Path('TMP_elaborated_top.top').resolve(),
gro_file=Path('test.gro').resolve(),
mdp_file=Path('test.mdp').resolve(),
gmx_path='gmx')
m_edit_preprocessed_top.assert_called_once_with(
input_top_file=Path('TMP_elaborated_top.top').resolve(),
output_top_file=Path('TMP_elaborated_top.top').resolve(),
resSeq_to_scale=(1, 2, 3))
m_geometrical_progression.assert_called_once_with(basis=0.2, denom=2)
for n, i in enumerate(expected_output):
m_scale_topology_with_plumed.assert_any_call(
input_top=Path('TMP_elaborated_top.top').resolve(),
output_top=i,
scaling_value=scaling_values[n],
plumed='plumed')
| 37.442857 | 77 | 0.579168 | 555 | 5,242 | 5.133333 | 0.254054 | 0.030888 | 0.039312 | 0.04212 | 0.298701 | 0.200421 | 0.135837 | 0.135837 | 0.09126 | 0.09126 | 0 | 0.039441 | 0.30351 | 5,242 | 139 | 78 | 37.71223 | 0.740893 | 0.072491 | 0 | 0.106383 | 0 | 0 | 0.147713 | 0.093202 | 0 | 0 | 0 | 0 | 0.106383 | 1 | 0.06383 | false | 0 | 0.042553 | 0 | 0.148936 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61444ae72f9460a2cd081ed2da2655c89e4a8499 | 55,822 | py | Python | rubin_sim/maf/utils/snNSNUtils.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/utils/snNSNUtils.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | rubin_sim/maf/utils/snNSNUtils.py | RileyWClarke/flarubin | eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a | [
"MIT"
] | null | null | null | from rubin_sim.photUtils import SignalToNoise
from rubin_sim.photUtils import PhotometricParameters
from rubin_sim.photUtils import Bandpass, Sed
from rubin_sim.data import get_data_dir
import numpy as np
from scipy.constants import *
from functools import wraps
import os
import h5py
import multiprocessing
from astropy.table import Table
import pandas as pd
from scipy import interpolate
from scipy.interpolate import RegularGridInterpolator
from astropy.cosmology import FlatLambdaCDM
STERADIAN2SQDEG = 180.**2 / np.pi**2
# Mpc^3 -> Mpc^3/sr
norm = 1. / (4. * np.pi)
__all__ = ['LCfast', 'Throughputs', 'Telescope',
'Load_Reference', 'GetReference', 'SN_Rate', 'CovColor']
class LCfast:
"""class to simulate supernovae light curves in a fast way
The method relies on templates and broadcasting to increase speed
Parameters
---------------
reference_lc:
x1: float
SN stretch
color: float
SN color
telescope: Telescope()
telescope for the study
mjdCol: str, optional
name of the MJD col in data to simulate (default: observationStartMJD)
RACol: str, optional
name of the RA col in data to simulate (default: fieldRA)
DecCol: str, optional
name of the Dec col in data to simulate (default: fieldDec)
filterCol: str, optional
name of the filter col in data to simulate (default: filter)
exptimeCol: str, optional
name of the exposure time col in data to simulate (default: visitExposureTime)
m5Col: str, optional
name of the fiveSigmaDepth col in data to simulate (default: fiveSigmaDepth)
seasonCol: str, optional
name of the season col in data to simulate (default: season)
snr_min: float, optional
minimal Signal-to-Noise Ratio to apply on LC points (default: 5)
"""
def __init__(self, reference_lc, x1, color,
telescope, mjdCol='observationStartMJD',
RACol='fieldRA', DecCol='fieldDec',
filterCol='filter', exptimeCol='visitExposureTime',
m5Col='fiveSigmaDepth', seasonCol='season',
nexpCol='numExposures',
snr_min=5.):
# grab all vals
self.RACol = RACol
self.DecCol = DecCol
self.filterCol = filterCol
self.mjdCol = mjdCol
self.m5Col = m5Col
self.exptimeCol = exptimeCol
self.seasonCol = seasonCol
self.nexpCol = nexpCol
self.x1 = x1
self.color = color
# Loading reference file
self.reference_lc = reference_lc
self.telescope = telescope
# This cutoffs are used to select observations:
# phase = (mjd - DayMax)/(1.+z)
# selection: min_rf_phase < phase < max_rf_phase
# and blue_cutoff < mean_rest_frame < red_cutoff
# where mean_rest_frame = telescope.mean_wavelength/(1.+z)
self.blue_cutoff = 380.
self.red_cutoff = 800.
# SN parameters for Fisher matrix estimation
self.param_Fisher = ['x0', 'x1', 'daymax', 'color']
self.snr_min = snr_min
# getting the telescope zp
self.zp = {}
for b in 'ugrizy':
self.zp[b] = telescope.zp(b)
def __call__(self, obs, gen_par=None, bands='grizy'):
""" Simulation of the light curve
Parameters
----------------
obs: array
array of observations
gen_par: array, optional
simulation parameters (default: None)
bands: str, optional
filters to consider for simulation (default: grizy)
Returns
------------
astropy table with:
columns: band, flux, fluxerr, snr_m5,flux_e,zp,zpsys,time
metadata : SNID,RA,Dec,DayMax,X1,Color,z
"""
if len(obs) == 0:
return None
tab_tot = pd.DataFrame()
# multiprocessing here: one process (processBand) per band
for band in bands:
idx = obs[self.filterCol] == band
# print('multiproc',band,j,len(obs[idx]))
if len(obs[idx]) > 0:
res = self.processBand(obs[idx], band, gen_par)
tab_tot = tab_tot.append(res, ignore_index=True)
# return produced LC
return tab_tot
def processBand(self, sel_obs, band, gen_par, j=-1, output_q=None):
"""LC simulation of a set of obs corresponding to a band
The idea is to use python broadcasting so as to estimate
all the requested values (flux, flux error, Fisher components, ...)
in a single path (i.e no loop!)
Parameters
---------------
sel_obs: array
array of observations
band: str
band of observations
gen_par: array
simulation parameters
j: int, optional
index for multiprocessing (default: -1)
output_q: multiprocessing.Queue(), optional
queue for multiprocessing (default: None)
Returns
-------
astropy table with fields corresponding to LC components
"""
# method used for interpolation
method = 'linear'
interpType = 'regular'
# if there are no observations in this filter: return None
if len(sel_obs) == 0:
if output_q is not None:
output_q.put({j: None})
else:
return None
# Get the fluxes (from griddata reference)
# xi = MJD-T0
xi = sel_obs[self.mjdCol]-gen_par['daymax'][:, np.newaxis]
# yi = redshift simulated values
# requested to avoid interpolation problems near boundaries
yi = np.round(gen_par['z'], 4)
# yi = gen_par['z']
# p = phases of LC points = xi/(1.+z)
p = xi/(1.+yi[:, np.newaxis])
yi_arr = np.ones_like(p)*yi[:, np.newaxis]
if interpType == 'regular':
pts = (p, yi_arr)
fluxes_obs = self.reference_lc.flux[band](pts)
fluxes_obs_err = self.reference_lc.fluxerr[band](pts)
# Fisher components estimation
dFlux = {}
# loop on Fisher parameters
for val in self.param_Fisher:
dFlux[val] = self.reference_lc.param[band][val](pts)
# get the reference components
# z_c = self.reference_lc.lc_ref[band]['d'+val]
# get Fisher components from interpolation
# dFlux[val] = griddata((x, y), z_c, (p, yi_arr),
# method=method, fill_value=0.)
# replace crazy fluxes by dummy values
fluxes_obs[fluxes_obs <= 0.] = 1.e-10
fluxes_obs_err[fluxes_obs_err <= 0.] = 1.e-10
# Fisher matrix components estimation
# loop on SN parameters (x0,x1,color)
# estimate: dF/dxi*dF/dxj/sigma_flux**2
Derivative_for_Fisher = {}
for ia, vala in enumerate(self.param_Fisher):
for jb, valb in enumerate(self.param_Fisher):
if jb >= ia:
Derivative_for_Fisher[vala +
valb] = dFlux[vala] * dFlux[valb]
# remove LC points outside the restframe phase range
min_rf_phase = gen_par['min_rf_phase'][:, np.newaxis]
max_rf_phase = gen_par['max_rf_phase'][:, np.newaxis]
flag = (p >= min_rf_phase) & (p <= max_rf_phase)
# remove LC points outside the (blue-red) range
mean_restframe_wavelength = np.array(
[self.telescope.mean_wavelength[band]]*len(sel_obs))
mean_restframe_wavelength = np.tile(
mean_restframe_wavelength, (len(gen_par), 1))/(1.+gen_par['z'][:, np.newaxis])
flag &= (mean_restframe_wavelength > self.blue_cutoff) & (
mean_restframe_wavelength < self.red_cutoff)
flag_idx = np.argwhere(flag)
# Correct fluxes_err (m5 in generation probably different from m5 obs)
# gamma_obs = self.telescope.gamma(
# sel_obs[self.m5Col], [band]*len(sel_obs), sel_obs[self.exptimeCol])
gamma_obs = self.reference_lc.gamma[band](
(sel_obs[self.m5Col], sel_obs[self.exptimeCol]/sel_obs[self.nexpCol], sel_obs[self.nexpCol]))
mag_obs = -2.5*np.log10(fluxes_obs/3631.)
m5 = np.asarray([self.reference_lc.m5_ref[band]]*len(sel_obs))
gammaref = np.asarray([self.reference_lc.gamma_ref[band]]*len(sel_obs))
m5_tile = np.tile(m5, (len(p), 1))
srand_ref = self.srand(
np.tile(gammaref, (len(p), 1)), mag_obs, m5_tile)
srand_obs = self.srand(np.tile(gamma_obs, (len(p), 1)), mag_obs, np.tile(
sel_obs[self.m5Col], (len(p), 1)))
correct_m5 = srand_ref/srand_obs
"""
print(band, gammaref, gamma_obs, m5,
sel_obs[self.m5Col], sel_obs[self.exptimeCol])
"""
fluxes_obs_err = fluxes_obs_err/correct_m5
# now apply the flag to select LC points
fluxes = np.ma.array(fluxes_obs, mask=~flag)
fluxes_err = np.ma.array(fluxes_obs_err, mask=~flag)
phases = np.ma.array(p, mask=~flag)
snr_m5 = np.ma.array(fluxes_obs/fluxes_obs_err, mask=~flag)
nvals = len(phases)
obs_time = np.ma.array(
np.tile(sel_obs[self.mjdCol], (nvals, 1)), mask=~flag)
seasons = np.ma.array(
np.tile(sel_obs[self.seasonCol], (nvals, 1)), mask=~flag)
z_vals = gen_par['z'][flag_idx[:, 0]]
daymax_vals = gen_par['daymax'][flag_idx[:, 0]]
mag_obs = np.ma.array(mag_obs, mask=~flag)
Fisher_Mat = {}
for key, vals in Derivative_for_Fisher.items():
Fisher_Mat[key] = np.ma.array(vals, mask=~flag)
# Store in a panda dataframe
lc = pd.DataFrame()
ndata = len(fluxes_err[~fluxes_err.mask])
if ndata > 0:
lc['flux'] = fluxes[~fluxes.mask]
lc['fluxerr'] = fluxes_err[~fluxes_err.mask]
lc['phase'] = phases[~phases.mask]
lc['snr_m5'] = snr_m5[~snr_m5.mask]
lc['time'] = obs_time[~obs_time.mask]
lc['mag'] = mag_obs[~mag_obs.mask]
lc['band'] = ['LSST::'+band]*len(lc)
lc.loc[:, 'zp'] = self.zp[band]
lc['season'] = seasons[~seasons.mask]
lc['season'] = lc['season'].astype(int)
lc['z'] = z_vals
lc['daymax'] = daymax_vals
for key, vals in Fisher_Mat.items():
lc.loc[:, 'F_{}'.format(
key)] = vals[~vals.mask]/(lc['fluxerr'].values**2)
# lc.loc[:, 'F_{}'.format(key)] = 999.
lc.loc[:, 'x1'] = self.x1
lc.loc[:, 'color'] = self.color
lc.loc[:, 'n_aft'] = (np.sign(lc['phase']) == 1) & (
lc['snr_m5'] >= self.snr_min)
lc.loc[:, 'n_bef'] = (np.sign(lc['phase'])
== -1) & (lc['snr_m5'] >= self.snr_min)
lc.loc[:, 'n_phmin'] = (lc['phase'] <= -5.)
lc.loc[:, 'n_phmax'] = (lc['phase'] >= 20)
# transform `bool` to int because of some problems in the sum()
for colname in ['n_aft', 'n_bef', 'n_phmin', 'n_phmax']:
lc.loc[:, colname] = lc[colname].astype(int)
"""
idb = (lc['z'] > 0.65) & (lc['z'] < 0.9)
print(lc[idb][['z', 'ratio', 'm5', 'flux_e_sec', 'snr_m5']])
"""
if output_q is not None:
output_q.put({j: lc})
else:
return lc
def srand(self, gamma, mag, m5):
"""Method to estimate :math:`srand=\sqrt((0.04-\gamma)*x+\gamma*x^2)`
with :math:`x = 10^{0.4*(m-m_5)}`
Parameters
-----------
gamma: float
gamma value
mag: float
magnitude
m5: float
fiveSigmaDepth value
Returns
-------
srand : `float`
srand = np.sqrt((0.04-gamma)*x+gamma*x**2) with x = 10**(0.4*(mag-m5))
"""
x = 10**(0.4*(mag-m5))
return np.sqrt((0.04-gamma)*x+gamma*x**2)
class Throughputs(object):
""" class to handle instrument throughput
Parameters
-------------
through_dir : str, optional
throughput directory. If None, uses $THROUGHPUTS_DIR/baseline
atmos_dir : str, optional
directory of atmos files. If None, uses $THROUGHPUTS_DIR
telescope_files : list(str), optional
list of of throughput files
Default : ['detector.dat', 'lens1.dat','lens2.dat',
'lens3.dat','m1.dat', 'm2.dat', 'm3.dat']
filterlist: list(str), optional
list of filters to consider
Default : 'ugrizy'
wave_min : float, optional
min wavelength for throughput
Default : 300
wave_max : float, optional
max wavelength for throughput
Default : 1150
atmos : bool, optional
to include atmosphere affects
Default : True
aerosol : bool, optional
to include aerosol effects
Default : True
Returns
---------
Accessible throughputs (per band):
lsst_system: system throughput (lens+mirrors+filters)
lsst_atmos: lsst_system+atmosphere
lsst_atmos_aerosol: lsst_system+atmosphere+aerosol
Note: I would like to see this replaced by a class in sims_photUtils instead. This does not belong in MAF.
"""
def __init__(self, **kwargs):
params = {}
params['through_dir'] = os.path.join(get_data_dir(), 'throughputs', 'baseline')
params['atmos_dir'] = os.path.join(get_data_dir(), 'throughputs', 'atmos')
params['atmos'] = True
params['aerosol'] = True
params['telescope_files'] = ['detector.dat', 'lens1.dat',
'lens2.dat', 'lens3.dat',
'm1.dat', 'm2.dat', 'm3.dat']
params['filterlist'] = 'ugrizy'
params['wave_min'] = 300.
params['wave_max'] = 1150.
# This lets a user override the atmosphere and throughputs directories.
for par in ['through_dir', 'atmos_dir', 'atmos', 'aerosol',
'telescope_files', 'filterlist', 'wave_min', 'wave_max']:
if par in kwargs.keys():
params[par] = kwargs[par]
self.atmosDir = params['atmos_dir']
self.throughputsDir = params['through_dir']
self.telescope_files = params['telescope_files']
self.filter_files = ['filter_'+f+'.dat' for f in params['filterlist']]
if 'filter_files' in kwargs.keys():
self.filter_files = kwargs['filter_files']
self.wave_min = params['wave_min']
self.wave_max = params['wave_max']
self.filterlist = params['filterlist']
self.filtercolors = {'u': 'b', 'g': 'c',
'r': 'g', 'i': 'y', 'z': 'r', 'y': 'm'}
self.lsst_std = {}
self.lsst_system = {}
self.mean_wavelength = {}
self.lsst_detector = {}
self.lsst_atmos = {}
self.lsst_atmos_aerosol = {}
self.airmass = -1.
self.aerosol_b = params['aerosol']
self.Load_System()
self.Load_DarkSky()
if params['atmos']:
self.Load_Atmosphere()
else:
for f in self.filterlist:
self.lsst_atmos[f] = self.lsst_system[f]
self.lsst_atmos_aerosol[f] = self.lsst_system[f]
self.Mean_Wave()
@property
def system(self):
return self.lsst_system
@property
def telescope(self):
return self.lsst_telescope
@property
def atmosphere(self):
return self.lsst_atmos
@property
def aerosol(self):
return self.lsst_atmos_aerosol
def Load_System(self):
""" Load files required to estimate throughputs
"""
for f in self.filterlist:
self.lsst_std[f] = Bandpass()
self.lsst_system[f] = Bandpass()
if len(self.telescope_files) > 0:
index = [i for i, x in enumerate(
self.filter_files) if f+'.dat' in x]
telfiles = self.telescope_files+[self.filter_files[index[0]]]
else:
telfiles = self.filter_files
self.lsst_system[f].readThroughputList(telfiles,
rootDir=self.throughputsDir,
wavelen_min=self.wave_min,
wavelen_max=self.wave_max)
def Load_DarkSky(self):
""" Load DarkSky
"""
self.darksky = Sed()
self.darksky.readSED_flambda(os.path.join(
self.throughputsDir, 'darksky.dat'))
def Load_Atmosphere(self, airmass=1.2):
""" Load atmosphere files
and convolve with transmissions
Parameters
--------------
airmass : float, optional
airmass value
Default : 1.2
"""
self.airmass = airmass
if self.airmass > 0.:
atmosphere = Bandpass()
path_atmos = os.path.join(
self.atmosDir, 'atmos_%d.dat' % (self.airmass*10))
if os.path.exists(path_atmos):
atmosphere.readThroughput(os.path.join(
self.atmosDir, 'atmos_%d.dat' % (self.airmass*10)))
else:
atmosphere.readThroughput(
os.path.join(self.atmosDir, 'atmos.dat'))
self.atmos = Bandpass(wavelen=atmosphere.wavelen, sb=atmosphere.sb)
for f in self.filterlist:
wavelen, sb = self.lsst_system[f].multiplyThroughputs(
atmosphere.wavelen, atmosphere.sb)
self.lsst_atmos[f] = Bandpass(wavelen=wavelen, sb=sb)
if self.aerosol_b:
atmosphere_aero = Bandpass()
atmosphere_aero.readThroughput(os.path.join(
self.atmosDir, 'atmos_%d_aerosol.dat' % (self.airmass*10)))
self.atmos_aerosol = Bandpass(
wavelen=atmosphere_aero.wavelen, sb=atmosphere_aero.sb)
for f in self.filterlist:
wavelen, sb = self.lsst_system[f].multiplyThroughputs(
atmosphere_aero.wavelen, atmosphere_aero.sb)
self.lsst_atmos_aerosol[f] = Bandpass(
wavelen=wavelen, sb=sb)
else:
for f in self.filterlist:
self.lsst_atmos[f] = self.lsst_system[f]
self.lsst_atmos_aerosol[f] = self.lsst_system[f]
def Mean_Wave(self):
"""Estimate mean wave
"""
for band in self.filterlist:
self.mean_wavelength[band] = np.sum(
self.lsst_atmos[band].wavelen*self.lsst_atmos[band].sb)\
/ np.sum(self.lsst_atmos[band].sb)
# decorator to access parameters of the class
def get_val_decor(func):
@wraps(func)
def func_deco(theclass, what, xlist):
for x in xlist:
if x not in theclass.data[what].keys():
func(theclass, what, x)
return func_deco
class Telescope(Throughputs):
"""Telescope class
inherits from Throughputs
estimate quantities defined in LSE-40
The following quantities are accessible:
mag_sky: sky magnitude
m5: 5-sigma depth
Sigmab: see eq. (36) of LSE-40
zp: see eq. (43) of LSE-40
counts_zp:
Skyb: see eq. (40) of LSE-40
flux_sky:
Parameters
-------------
through_dir : str, optional
throughput directory
Default : LSST_THROUGHPUTS_BASELINE
atmos_dir : str, optional
directory of atmos files
Default : THROUGHPUTS_DIR
telescope_files : list(str), optional
list of of throughput files
Default : ['detector.dat', 'lens1.dat','lens2.dat',
'lens3.dat','m1.dat', 'm2.dat', 'm3.dat']
filterlist: list(str), optional
list of filters to consider
Default : 'ugrizy'
wave_min : float, optional
min wavelength for throughput
Default : 300
wave_max : float, optional
max wavelength for throughput
Default : 1150
atmos : bool, optional
to include atmosphere affects
Default : True
aerosol : bool, optional
to include aerosol effects
Default : True
airmass : float, optional
airmass value
Default : 1.
Returns
---------
Accessible throughputs (per band, from Throughput class):
lsst_system: system throughput (lens+mirrors+filters)
lsst_atmos: lsst_system+atmosphere
lsst_atmos_aerosol: lsst_system+atmosphere+aerosol
Note: I would like to see this replaced by a class in sims_photUtils instead. This does not belong in MAF.
"""
def __init__(self, name='unknown', airmass=1., **kwargs):
self.name = name
super().__init__(**kwargs)
params = ['mag_sky', 'm5', 'FWHMeff', 'Tb',
'Sigmab', 'zp', 'counts_zp', 'Skyb', 'flux_sky']
self.data = {}
for par in params:
self.data[par] = {}
self.data['FWHMeff'] = dict(
zip('ugrizy', [0.92, 0.87, 0.83, 0.80, 0.78, 0.76]))
# self.atmos = atmos
self.Load_Atmosphere(airmass)
@get_val_decor
def get(self, what, band):
"""
Decorator to access quantities
Parameters
---------------
what: str
parameter to estimate
band: str
filter
"""
filter_trans = self.system[band]
wavelen_min, wavelen_max, wavelen_step = filter_trans.getWavelenLimits(
None, None, None)
bandpass = Bandpass(wavelen=filter_trans.wavelen, sb=filter_trans.sb)
flatSedb = Sed()
flatSedb.setFlatSED(wavelen_min, wavelen_max, wavelen_step)
flux0b = np.power(10., -0.4*self.mag_sky(band))
flatSedb.multiplyFluxNorm(flux0b)
photParams = PhotometricParameters(bandpass=band)
norm = photParams.platescale**2/2.*photParams.exptime/photParams.gain
trans = filter_trans
if self.atmos:
trans = self.atmosphere[band]
self.data['m5'][band] = SignalToNoise.calcM5(
flatSedb, trans, filter_trans,
photParams=photParams,
FWHMeff=self.FWHMeff(band))
adu_int = flatSedb.calcADU(bandpass=trans, photParams=photParams)
self.data['flux_sky'][band] = adu_int*norm
@get_val_decor
def get_inputs(self, what, band):
"""
decorator to access Tb, Sigmab, mag_sky
Parameters
---------------
what: str
parameter to estimate
band: str
filter
"""
myup = self.Calc_Integ_Sed(self.darksky, self.system[band])
self.data['Tb'][band] = self.Calc_Integ(self.atmosphere[band])
self.data['Sigmab'][band] = self.Calc_Integ(self.system[band])
self.data['mag_sky'][band] = -2.5 * \
np.log10(myup/(3631.*self.Sigmab(band)))
@get_val_decor
def get_zp(self, what, band):
"""
decorator get zero points
formula used here are extracted from LSE-40
Parameters
---------------
what: str
parameter to estimate
band: str
filter
"""
photParams = PhotometricParameters(bandpass=band)
Diameter = 2.*np.sqrt(photParams.effarea*1.e-4 /
np.pi) # diameter in meter
Cte = 3631.*np.pi*Diameter**2*2.*photParams.exptime/4/h/1.e36
self.data['Skyb'][band] = Cte*np.power(Diameter/6.5, 2.)\
* np.power(2.*photParams.exptime/30., 2.)\
* np.power(photParams.platescale, 2.)\
* 10.**0.4*(25.-self.mag_sky(band))\
* self.Sigmab(band)
Zb = 181.8*np.power(Diameter/6.5, 2.)*self.Tb(band)
mbZ = 25.+2.5*np.log10(Zb)
filtre_trans = self.system[band]
wavelen_min, wavelen_max, wavelen_step = filtre_trans.getWavelenLimits(
None, None, None)
bandpass = Bandpass(wavelen=filtre_trans.wavelen, sb=filtre_trans.sb)
flatSed = Sed()
flatSed.setFlatSED(wavelen_min, wavelen_max, wavelen_step)
flux0 = np.power(10., -0.4*mbZ)
flatSed.multiplyFluxNorm(flux0)
photParams = PhotometricParameters(bandpass=band)
# number of counts for exptime
counts = flatSed.calcADU(bandpass, photParams=photParams)
self.data['zp'][band] = mbZ
self.data['counts_zp'][band] = counts/2.*photParams.exptime
def return_value(self, what, band):
"""
accessor
Parameters
---------------
what: str
parameter to estimate
band: str
filter
"""
if len(band) > 1:
return self.data[what]
else:
return self.data[what][band]
def m5(self, filtre):
"""m5 accessor
"""
self.get('m5', filtre)
return self.return_value('m5', filtre)
def Tb(self, filtre):
"""Tb accessor
"""
self.get_inputs('Tb', filtre)
return self.return_value('Tb', filtre)
def mag_sky(self, filtre):
"""mag_sky accessor
"""
self.get_inputs('mag_sky', filtre)
return self.return_value('mag_sky', filtre)
def Sigmab(self, filtre):
"""
Sigmab accessor
Parameters
----------------
band: str
filter
"""
self.get_inputs('Sigmab', filtre)
return self.return_value('Sigmab', filtre)
def zp(self, filtre):
"""
zp accessor
Parameters
----------------
band: str
filter
"""
self.get_zp('zp', filtre)
return self.return_value('zp', filtre)
def FWHMeff(self, filtre):
"""
FWHMeff accessor
Parameters
----------------
band: str
filter
"""
return self.return_value('FWHMeff', filtre)
def Calc_Integ(self, bandpass):
"""
integration over bandpass
Parameters
--------------
bandpass : `rubin_sim.photUtils.Bandpass`
Returns
---------
integration: `float`
"""
resu = 0.
dlam = 0
for i, wave in enumerate(bandpass.wavelen):
if i < len(bandpass.wavelen)-1:
dlam = bandpass.wavelen[i+1]-wave
resu += dlam*bandpass.sb[i]/wave
# resu+=dlam*bandpass.sb[i]
return resu
def Calc_Integ_Sed(self, sed, bandpass, wavelen=None, fnu=None):
"""
SED integration
Parameters
--------------
sed : float
sed to integrate
bandpass : float
bandpass
wavelength : float, optional
wavelength values
Default : None
fnu : float, optional
fnu values
Default : None
Returns
----------
integrated sed over the bandpass
"""
use_self = sed._checkUseSelf(wavelen, fnu)
# Use self values if desired, otherwise use values passed to function.
if use_self:
# Calculate fnu if required.
if sed.fnu is None:
# If fnu not present, calculate. (does not regrid).
sed.flambdaTofnu()
wavelen = sed.wavelen
fnu = sed.fnu
# Make sure wavelen/fnu are on the same wavelength grid as bandpass.
wavelen, fnu = sed.resampleSED(
wavelen, fnu, wavelen_match=bandpass.wavelen)
# Calculate the number of photons.
nphoton = (fnu / wavelen * bandpass.sb).sum()
dlambda = wavelen[1] - wavelen[0]
return nphoton * dlambda
def flux_to_mag(self, flux, band, zp=None):
"""
Flux to magnitude conversion
Parameters
--------------
flux : float
input fluxes
band : str
input band
zp : float, optional
zeropoints
Default : None
Returns
---------
magnitudes
"""
if zp is None:
zp = self.zero_points(band)
# print 'zp',zp,band
m = -2.5 * np.log10(flux) + zp
return m
def mag_to_flux(self, mag, band, zp=None):
"""
Magnitude to flux conversion
Parameters
--------------
mag : float
input mags
band : str
input band
zp : float, optional
zeropoints
Default : None
Returns
---------
fluxes
"""
if zp is None:
zp = self.zero_points(band)
return np.power(10., -0.4 * (mag-zp))
def zero_points(self, band):
"""
Zero points estimation
Parameters
--------------
band : `list` [`str`]
list of bands
Returns
---------
array of zp
"""
return np.asarray([self.zp[b] for b in band])
def mag_to_flux_e_sec(self, mag, band, exptime):
"""
Mag to flux (in photoelec/sec) conversion
Parameters
--------------
mag : float
input magnitudes
band : str
input bands
exptime : float
input exposure times
Returns
----------
counts : float
number of ADU counts
e_per_sec : float
flux in photoelectron per sec.
"""
if not hasattr(mag, '__iter__'):
wavelen_min, wavelen_max, wavelen_step = self.atmosphere[band].getWavelenLimits(
None, None, None)
sed = Sed()
sed.setFlatSED()
flux0 = 3631.*10**(-0.4*mag) # flux in Jy
flux0 = sed.calcFluxNorm(mag, self.atmosphere[band])
sed.multiplyFluxNorm(flux0)
photParams = PhotometricParameters(nexp=exptime/15.)
counts = sed.calcADU(
bandpass=self.atmosphere[band], photParams=photParams)
e_per_sec = counts
e_per_sec /= exptime/photParams.gain
# print('hello',photParams.gain,exptime)
return counts, e_per_sec
else:
return np.asarray([self.mag_to_flux_e_sec(m, b, expt) for m, b, expt in zip(mag, band, exptime)])
def gamma(self, mag, band, exptime):
"""
gamma parameter estimation
cf eq(5) of the paper LSST : from science drivers to reference design and anticipated data products
with sigma_rand = 0.2 and m=m5
Parameters
--------------
mag : float
magnitudes
band : str
band
exptime : float
exposure time
Returns
----------
gamma: `float`
"""
if not hasattr(mag, '__iter__'):
photParams = PhotometricParameters(nexp=exptime/15.)
counts, e_per_sec = self.mag_to_flux_e_sec(mag, band, exptime)
return 0.04-1./(photParams.gain*counts)
else:
return np.asarray([self.gamma(m, b, e) for m, b, e in zip(mag, band, exptime)])
class Load_Reference:
"""
class to load template files requested for LCFast
These files should be stored in a reference_files directory
Parameters
---------------
server: str, optional
where to get the files (default: https://me.lsst.eu/gris/DESC_SN_pipeline/Reference_Files)
templateDir: str, optional
where to put the files (default: reference_files)
"""
def __init__(self, server='https://me.lsst.eu/gris/DESC_SN_pipeline',
templateDir=None):
if templateDir is None:
sims_maf_contrib_dir = get_data_dir()
templateDir = os.path.join(sims_maf_contrib_dir, 'maf/SNe_data')
self.server = server
# define instrument
self.Instrument = {}
self.Instrument['name'] = 'LSST' # name of the telescope (internal)
# dir of throughput
self.Instrument['throughput_dir'] = os.path.join(get_data_dir(), 'throughputs', 'baseline')
self.Instrument['atmos_dir'] = os.path.join(get_data_dir(), 'throughputs', 'atmos')
self.Instrument['airmass'] = 1.2 # airmass value
self.Instrument['atmos'] = True # atmos
self.Instrument['aerosol'] = False # aerosol
x1_colors = [(-2.0, 0.2), (0.0, 0.0)]
lc_reference = {}
# create this directory if it does not exist
if not os.path.isdir(templateDir):
os.system('mkdir {}'.format(templateDir))
list_files = ['gamma.hdf5']
for j in range(len(x1_colors)):
x1 = x1_colors[j][0]
color = x1_colors[j][1]
fname = 'LC_{}_{}_380.0_800.0_ebvofMW_0.0_vstack.hdf5'.format(
x1, color)
list_files += [fname]
self.check_grab(templateDir, list_files)
# gamma_reference
self.gamma_reference = '{}/gamma.hdf5'.format(templateDir)
# print('Loading reference files')
resultdict = {}
for j in range(len(x1_colors)):
x1 = x1_colors[j][0]
color = x1_colors[j][1]
fname = '{}/LC_{}_{}_380.0_800.0_ebvofMW_0.0_vstack.hdf5'.format(
templateDir, x1, color)
resultdict[j] = self.load(fname)
for j in range(len(x1_colors)):
if resultdict[j] is not None:
lc_reference[x1_colors[j]] = resultdict[j]
self.ref = lc_reference
def load(self, fname):
"""
Method to load reference files
Parameters
---------------
fname: str
file name
"""
lc_ref = GetReference(
fname, self.gamma_reference, self.Instrument)
return lc_ref
def check_grab(self, templateDir, listfiles):
"""
Method that check if files are on disk.
If not: grab them from a server (self.server)
Parameters
---------------
templateDir: `str`
directory where files are (or will be)
listfiles: `list` [`str`]
list of files that are (will be) in templateDir
"""
for fi in listfiles:
# check whether the file is available; if not-> get it!
fname = '{}/{}'.format(templateDir, fi)
if not os.path.isfile(fname):
if 'gamma' in fname:
fullname = '{}/reference_files/{}'.format(self.server, fi)
else:
fullname = '{}/Template_LC/{}'.format(self.server, fi)
print('wget path:', fullname)
cmd = 'wget --no-clobber --no-verbose {} --directory-prefix {}'.format(
fullname, templateDir)
os.system(cmd)
class GetReference:
"""
Class to load reference data
used for the fast SN simulator
Parameters
----------------
lcName: str
name of the reference file to load (lc)
gammaName: str
name of the reference file to load (gamma)
tel_par: dict
telescope parameters
param_Fisher : list(str), optional
list of SN parameter for Fisher estimation to consider
(default: ['x0', 'x1', 'color', 'daymax'])
Returns
-----------
The following dict can be accessed:
mag_to_flux_e_sec : Interp1D of mag to flux(e.sec-1) conversion
flux : dict of RegularGridInterpolator of fluxes (key: filters, (x,y)=(phase, z), result=flux)
fluxerr : dict of RegularGridInterpolator of flux errors (key: filters, (x,y)=(phase, z), result=fluxerr)
param : dict of dict of RegularGridInterpolator of flux derivatives wrt SN parameters
(key: filters plus param_Fisher parameters; (x,y)=(phase, z), result=flux derivatives)
gamma : dict of RegularGridInterpolator of gamma values (key: filters)
"""
def __init__(self, lcName, gammaName, tel_par, param_Fisher=['x0', 'x1', 'color', 'daymax']):
# Load the file - lc reference
f = h5py.File(lcName, 'r')
keys = list(f.keys())
# lc_ref_tot = Table.read(filename, path=keys[0])
lc_ref_tot = Table.from_pandas(pd.read_hdf(lcName))
idx = lc_ref_tot['z'] > 0.005
lc_ref_tot = np.copy(lc_ref_tot[idx])
# telescope requested
telescope = Telescope(name=tel_par['name'],
throughput_dir=tel_par['throughput_dir'],
atmos_dir=tel_par['atmos_dir'],
atmos=tel_par['atmos'],
aerosol=tel_par['aerosol'],
airmass=tel_par['airmass'])
# Load the file - gamma values
if not os.path.exists(gammaName):
print('gamma file {} does not exist')
print('will generate it - few minutes')
mag_range = np.arange(15., 38., 1.)
exptimes = np.arange(1., 3000., 10.)
Gamma('ugrizy', telescope, gammaName,
mag_range=mag_range,
exptimes=exptimes)
print('end of gamma estimation')
fgamma = h5py.File(gammaName, 'r')
# Load references needed for the following
self.lc_ref = {}
self.gamma_ref = {}
self.gamma = {}
self.m5_ref = {}
self.mag_to_flux_e_sec = {}
self.flux = {}
self.fluxerr = {}
self.param = {}
bands = np.unique(lc_ref_tot['band'])
mag_range = np.arange(10., 38., 0.01)
# exptimes = np.linspace(15.,30.,2)
# exptimes = [15.,30.,60.,100.]
# gammArray = self.loopGamma(bands, mag_range, exptimes,telescope)
method = 'linear'
# for each band: load data to be used for interpolation
for band in bands:
idx = lc_ref_tot['band'] == band
lc_sel = Table(lc_ref_tot[idx])
lc_sel['z'] = lc_sel['z'].data.round(decimals=2)
lc_sel['phase'] = lc_sel['phase'].data.round(decimals=1)
"""
select phases between -20 and 50 only
"""
idx = lc_sel['phase'] < 50.
idx &= lc_sel['phase'] > -20.
lc_sel = lc_sel[idx]
fluxes_e_sec = telescope.mag_to_flux_e_sec(
mag_range, [band]*len(mag_range), [30]*len(mag_range))
self.mag_to_flux_e_sec[band] = interpolate.interp1d(
mag_range, fluxes_e_sec[:, 1], fill_value=0., bounds_error=False)
# these reference data will be used for griddata interp.
self.lc_ref[band] = lc_sel
self.gamma_ref[band] = lc_sel['gamma'][0]
self.m5_ref[band] = np.unique(lc_sel['m5'])[0]
# Another interpolator, faster than griddata: regulargridinterpolator
# Fluxes and errors
zmin, zmax, zstep, nz = self.limVals(lc_sel, 'z')
phamin, phamax, phastep, npha = self.limVals(lc_sel, 'phase')
zstep = np.round(zstep, 1)
phastep = np.round(phastep, 1)
zv = np.linspace(zmin, zmax, nz)
# zv = np.round(zv,2)
# print(band,zv)
phav = np.linspace(phamin, phamax, npha)
print('Loading ', lcName, band, len(lc_sel), npha, nz)
index = np.lexsort((lc_sel['z'], lc_sel['phase']))
flux = np.reshape(lc_sel[index]['flux'], (npha, nz))
fluxerr = np.reshape(lc_sel[index]['fluxerr'], (npha, nz))
self.flux[band] = RegularGridInterpolator(
(phav, zv), flux, method=method, bounds_error=False, fill_value=0.)
self.fluxerr[band] = RegularGridInterpolator(
(phav, zv), fluxerr, method=method, bounds_error=False, fill_value=0.)
# Flux derivatives
self.param[band] = {}
for par in param_Fisher:
valpar = np.reshape(
lc_sel[index]['d{}'.format(par)], (npha, nz))
self.param[band][par] = RegularGridInterpolator(
(phav, zv), valpar, method=method, bounds_error=False, fill_value=0.)
# gamma estimator
rec = Table.read(gammaName, path='gamma_{}'.format(band))
rec['mag'] = rec['mag'].data.round(decimals=4)
rec['single_exptime'] = rec['single_exptime'].data.round(
decimals=4)
magmin, magmax, magstep, nmag = self.limVals(rec, 'mag')
expmin, expmax, expstep, nexpo = self.limVals(
rec, 'single_exptime')
nexpmin, nexpmax, nexpstep, nnexp = self.limVals(rec, 'nexp')
mag = np.linspace(magmin, magmax, nmag)
exp = np.linspace(expmin, expmax, nexpo)
nexp = np.linspace(nexpmin, nexpmax, nnexp)
index = np.lexsort(
(rec['nexp'], np.round(rec['single_exptime'], 4), rec['mag']))
gammab = np.reshape(rec[index]['gamma'], (nmag, nexpo, nnexp))
fluxb = np.reshape(rec[index]['flux_e_sec'], (nmag, nexpo, nnexp))
self.gamma[band] = RegularGridInterpolator(
(mag, exp, nexp), gammab, method='linear', bounds_error=False, fill_value=0.)
"""
self.mag_to_flux[band] = RegularGridInterpolator(
(mag, exp, nexp), fluxb, method='linear', bounds_error=False, fill_value=0.)
print('hello', rec.columns)
rec['mag'] = rec['mag'].data.round(decimals=4)
rec['exptime'] = rec['exptime'].data.round(decimals=4)
magmin, magmax, magstep, nmag = self.limVals(rec, 'mag')
expmin, expmax, expstep, nexp = self.limVals(rec, 'exptime')
mag = np.linspace(magmin, magmax, nmag)
exp = np.linspace(expmin, expmax, nexp)
index = np.lexsort((np.round(rec['exptime'], 4), rec['mag']))
gammab = np.reshape(rec[index]['gamma'], (nmag, nexp))
self.gamma[band] = RegularGridInterpolator(
(mag, exp), gammab, method=method, bounds_error=False, fill_value=0.)
"""
# print(band, gammab, mag, exp)
def limVals(self, lc, field):
""" Get unique values of a field in a table
Parameters
----------
lc: Table
astropy Table (here probably a LC)
field: str
name of the field of interest
Returns
-------
vmin: float
min value of the field
vmax: float
max value of the field
vstep: float
step value for this field (median)
nvals: int
number of unique values
"""
lc.sort(field)
vals = np.unique(lc[field].data.round(decimals=4))
# print(vals)
vmin = np.min(vals)
vmax = np.max(vals)
vstep = np.median(vals[1:]-vals[:-1])
return vmin, vmax, vstep, len(vals)
def Read_Ref(self, fi, j=-1, output_q=None):
"""" Load the reference file and
make a single astopy Table from a set of.
Parameters
----------
fi: str,
name of the file to be loaded
Returns
-------
tab_tot: astropy table
single table = vstack of all the tables in fi.
"""
tab_tot = Table()
"""
keys=np.unique([int(z*100) for z in zvals])
print(keys)
"""
f = h5py.File(fi, 'r')
keys = f.keys()
zvals = np.arange(0.01, 0.9, 0.01)
zvals_arr = np.array(zvals)
for kk in keys:
tab_b = Table.read(fi, path=kk)
if tab_b is not None:
tab_tot = vstack([tab_tot, tab_b], metadata_conflicts='silent')
"""
diff = tab_b['z']-zvals_arr[:, np.newaxis]
# flag = np.abs(diff)<1.e-3
flag_idx = np.where(np.abs(diff) < 1.e-3)
if len(flag_idx[1]) > 0:
tab_tot = vstack([tab_tot, tab_b[flag_idx[1]]])
"""
"""
print(flag,flag_idx[1])
print('there man',tab_b[flag_idx[1]])
mtile = np.tile(tab_b['z'],(len(zvals),1))
# print('mtile',mtile*flag)
masked_array = np.ma.array(mtile,mask=~flag)
print('resu masked',masked_array,masked_array.shape)
print('hhh',masked_array[~masked_array.mask])
for val in zvals:
print('hello',tab_b[['band','z','time']],'and',val)
if np.abs(np.unique(tab_b['z'])-val)<0.01:
# print('loading ref',np.unique(tab_b['z']))
tab_tot=vstack([tab_tot,tab_b])
break
"""
if output_q is not None:
output_q.put({j: tab_tot})
else:
return tab_tot
def Read_Multiproc(self, tab):
"""
Multiprocessing method to read references
Parameters
---------------
tab: astropy Table of data
Returns
-----------
stacked astropy Table of data
"""
# distrib=np.unique(tab['z'])
nlc = len(tab)
print('ici pal', nlc)
# n_multi=8
if nlc >= 8:
n_multi = min(nlc, 8)
nvals = nlc/n_multi
batch = range(0, nlc, nvals)
batch = np.append(batch, nlc)
else:
batch = range(0, nlc)
# lc_ref_tot={}
# print('there pal',batch)
result_queue = multiprocessing.Queue()
for i in range(len(batch)-1):
ida = int(batch[i])
idb = int(batch[i+1])
p = multiprocessing.Process(
name='Subprocess_main-'+str(i), target=self.Read_Ref, args=(tab[ida:idb], i, result_queue))
p.start()
resultdict = {}
for j in range(len(batch)-1):
resultdict.update(result_queue.get())
for p in multiprocessing.active_children():
p.join()
tab_res = Table()
for j in range(len(batch)-1):
if resultdict[j] is not None:
tab_res = vstack([tab_res, resultdict[j]])
return tab_res
class SN_Rate:
"""
Estimate production rates of typeIa SN
Available rates: Ripoche, Perrett, Dilday
Parameters
----------
rate : str, optional
type of rate chosen (Ripoche, Perrett, Dilday) (default : Perrett)
H0 : float, optional
Hubble constant value :math:`H_{0}` (default : 70.)
Om0 : float, optional
matter density value :math:`\Omega_{0}` (default : 0.25)
min_rf_phase : float, optional
min rest-frame phase (default : -15.)
max_rf_phase : float, optional
max rest-frame phase (default : 30.)
"""
def __init__(self, rate='Perrett', H0=70, Om0=0.25,
min_rf_phase=-15., max_rf_phase=30.):
self.astropy_cosmo = FlatLambdaCDM(H0=H0, Om0=Om0)
self.rate = rate
self.min_rf_phase = min_rf_phase
self.max_rf_phase = max_rf_phase
def __call__(self, zmin=0.1, zmax=0.2,
dz=0.01, survey_area=9.6,
bins=None, account_for_edges=False,
duration=140., duration_z=None):
"""
Parameters
----------------
zmin : float, optional
minimal redshift (default : 0.1)
zmax : float, optional
max redshift (default : 0.2)
dz : float, optional
redshift bin (default : 0.001)
survey_area : float, optional
area of the survey (:math:`deg^{2}`) (default : 9.6 :math:`deg^{2}`)
bins : `list` [`float`], optional
redshift bins (default : None)
account_for_edges : bool
to account for season edges.
If true, duration of the survey will be reduced by (1+z)*(maf_rf_phase-min_rf_phase)/365.25
(default : False)
duration : float, optional
survey duration (in days) (default : 140 days)
duration_z : list(float), optional
survey duration (as a function of z) (default : None)
Returns
-----------
Lists :
zz : float
redshift values
rate : float
production rate
err_rate : float
production rate error
nsn : float
number of SN
err_nsn : float
error on the number of SN
"""
if bins is None:
thebins = np.arange(zmin, zmax+dz, dz)
zz = 0.5 * (thebins[1:] + thebins[:-1])
else:
zz = bins
thebins = bins
rate, err_rate = self.SNRate(zz)
error_rel = err_rate/rate
area = survey_area / STERADIAN2SQDEG
# or area= self.survey_area/41253.
dvol = norm*self.astropy_cosmo.comoving_volume(thebins).value
dvol = dvol[1:] - dvol[:-1]
if account_for_edges:
margin = (1.+zz) * (self.max_rf_phase-self.min_rf_phase) / 365.25
effective_duration = duration / 365.25 - margin
effective_duration[effective_duration <= 0.] = 0.
else:
# duration in days!
effective_duration = duration/365.25
if duration_z is not None:
effective_duration = duration_z(zz)/365.25
normz = (1.+zz)
nsn = rate * area * dvol * effective_duration / normz
err_nsn = err_rate*area * dvol * effective_duration / normz
return zz, rate, err_rate, nsn, err_nsn
def RipocheRate(self, z):
"""The SNLS SNIa rate according to the (unpublished) Ripoche et al study.
Parameters
--------------
z : float
redshift
Returns
----------
rate : float
error_rate : float
"""
rate = 1.53e-4*0.343
expn = 2.14
my_z = np.copy(z)
my_z[my_z > 1.] = 1.
rate_sn = rate * np.power((1+my_z)/1.5, expn)
return rate_sn, 0.2*rate_sn
def PerrettRate(self, z):
"""The SNLS SNIa rate according to (Perrett et al, 201?)
Parameters
--------------
z : float
redshift
Returns
----------
rate : float
error_rate : float
"""
rate = 0.17E-4
expn = 2.11
err_rate = 0.03E-4
err_expn = 0.28
my_z = np.copy(z)
rate_sn = rate * np.power(1+my_z, expn)
err_rate_sn = np.power(1+my_z, 2.*expn)*np.power(err_rate, 2.)
err_rate_sn += np.power(rate_sn*np.log(1+my_z)*err_expn, 2.)
return rate_sn, np.power(err_rate_sn, 0.5)
def DildayRate(self, z):
"""The Dilday rate according to
Parameters
--------------
z : float
redshift
Returns
----------
rate : float
error_rate : float
"""
rate = 2.6e-5
expn = 1.5
err_rate = 0.01
err_expn = 0.6
my_z = np.copy(z)
my_z[my_z > 1.] = 1.
rate_sn = rate * np.power(1+my_z, expn)
err_rate_sn = rate_sn*np.log(1+my_z)*err_expn
return rate_sn, err_rate_sn
"""
def flat_rate(self, z):
return 1., 0.1
"""
def SNRate(self, z):
"""SN rate estimation
Parameters
--------------
z : float
redshift
Returns
----------
rate : float
error_rate : float
"""
if self.rate == 'Ripoche':
return self.RipocheRate(z)
if self.rate == 'Perrett':
return self.PerrettRate(z)
if self.rate == 'Dilday':
return self.DildayRate(z)
def PlotNSN(self, zmin=0.1, zmax=0.2,
dz=0.01, survey_area=9.6,
bins=None, account_for_edges=False,
duration=140., duration_z=None, norm=False):
""" Plot integrated number of supernovae as a function of redshift
uses the __call__ function
Parameters
--------------
zmin : float, optional
minimal redshift (default : 0.1)
zmax : float, optional
max redshift (default : 0.2)
dz : float, optional
redshift bin (default : 0.001)
survey_area : float, optional
area of the survey (:math:`deg^{2}`) (default : 9.6 :math:`deg^{2}`)
bins : list(float), optional
redshift bins (default : None)
account_for_edges : bool
to account for season edges.
If true, duration of the survey will be reduced by (1+z)*(maf_rf_phase-min_rf_phase)/365.25
(default : False)
duration : float, optional
survey duration (in days) (default : 140 days)
duration_z : list(float), optional
survey duration (as a function of z) (default : None)
norm: bool, optional
to normalise the results (default: False)
"""
import pylab as plt
zz, rate, err_rate, nsn, err_nsn = self.__call__(
zmin=zmin, zmax=zmax, dz=dz, bins=bins,
account_for_edges=account_for_edges,
duration=duration, survey_area=survey_area)
nsn_sum = np.cumsum(nsn)
if norm is False:
plt.errorbar(zz, nsn_sum, yerr=np.sqrt(np.cumsum(err_nsn**2)))
else:
plt.errorbar(zz, nsn_sum/nsn_sum[-1])
plt.xlabel('z')
plt.ylabel('N$_{SN}$ <')
plt.grid()
class CovColor:
"""
class to estimate CovColor from lc using Fisher matrix element
Parameters
---------------
lc: pandas df
lc to process. Should contain the Fisher matrix components
ie the sum of the derivative of the fluxes wrt SN parameters
"""
def __init__(self, lc):
self.Cov_colorcolor = self.varColor(lc)
def varColor(self, lc):
"""
Method to estimate the variance color from matrix element
Parameters
--------------
lc: pandas df
data to process containing the derivative of the flux with respect to SN parameters
Returns
----------
float: Cov_colorcolor
"""
a1 = lc['F_x0x0']
a2 = lc['F_x0x1']
a3 = lc['F_x0daymax']
a4 = lc['F_x0color']
b1 = a2
b2 = lc['F_x1x1']
b3 = lc['F_x1daymax']
b4 = lc['F_x1color']
c1 = a3
c2 = b3
c3 = lc['F_daymaxdaymax']
c4 = lc['F_daymaxcolor']
d1 = a4
d2 = b4
d3 = c4
d4 = lc['F_colorcolor']
detM = a1*self.det(b2, b3, b4, c2, c3, c4, d2, d3, d4)
detM -= b1*self.det(a2, a3, a4, c2, c3, c4, d2, d3, d4)
detM += c1*self.det(a2, a3, a4, b2, b3, b4, d2, d3, d4)
detM -= d1*self.det(a2, a3, a4, b2, b3, b4, c2, c3, c4)
res = -a3*b2*c1+a2*b3*c1+a3*b1*c2-a1*b3*c2-a2*b1*c3+a1*b2*c3
return res/detM
def det(self, a1, a2, a3, b1, b2, b3, c1, c2, c3):
"""
Method to estimate the det of a matrix from its values
Parameters
-------------
Values of the matrix
(a1 a2 a3)
(b1 b2 b3)
(c1 c2 c3)
Returns
-----------
det value
"""
resp = a1*b2*c3+b1*c2*a3+c1*a2*b3
resm = a3*b2*c1+b3*c2*a1+c3*a2*b1
return resp-resm
| 32.229792 | 110 | 0.542958 | 6,690 | 55,822 | 4.400598 | 0.12003 | 0.012806 | 0.00574 | 0.004076 | 0.319429 | 0.249253 | 0.212874 | 0.19127 | 0.164063 | 0.141848 | 0 | 0.022242 | 0.333919 | 55,822 | 1,731 | 111 | 32.248411 | 0.769539 | 0.292501 | 0 | 0.114047 | 0 | 0 | 0.056707 | 0.003373 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069541 | false | 0.034771 | 0.022253 | 0.005563 | 0.157163 | 0.008345 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
614690b30fc8452b96340bd8f0827de2f164d10d | 6,818 | py | Python | teslabot/plugins/corecommands/corecommands.py | Anachronos/teslabot | 4e8b5fef42c8cc2ecb7a6bdb0a26d52a002501a0 | [
"MIT"
] | 2 | 2019-06-05T12:19:39.000Z | 2021-11-23T19:50:02.000Z | teslabot/plugins/corecommands/corecommands.py | Anachronos/teslabot | 4e8b5fef42c8cc2ecb7a6bdb0a26d52a002501a0 | [
"MIT"
] | null | null | null | teslabot/plugins/corecommands/corecommands.py | Anachronos/teslabot | 4e8b5fef42c8cc2ecb7a6bdb0a26d52a002501a0 | [
"MIT"
] | 3 | 2015-12-30T10:04:01.000Z | 2021-11-23T19:50:06.000Z | from pluginbase import PluginBase
import logging
class CoreCommands(PluginBase):
"""Provides basic commands."""
def __init__(self):
PluginBase.__init__(self)
self.name = 'CoreCommands'
self.logger = logging.getLogger('teslabot.plugin.corecommands')
self.set_cmd('kick', self.CMD_CHANNEL)
self.set_cmd('kickban', self.CMD_CHANNEL)
self.set_cmd('ban', self.CMD_CHANNEL)
self.set_cmd('unban', self.CMD_CHANNEL)
self.admin_commands = ['reload', 'say', 'action', 'join', 'leave', 'quit', 'nick', 'plugins']
self.lang_001 = 'Plugins: {0}'
self.lang_002 = 'Type \x0310{0}commands\x03 for a list of available commands. Type \x0310{0}(command) help\x03 ' \
'to view the help text of a specific command. Note that the parentheses should not be included.'
self.lang_003 = 'Goodbye.'
self.users = {}
def command_plugins(self, user, dst, args):
if user.admin:
self.irch.notice(self.lang_001.format(', '.join([p.name for p in self.irch._import_plugins()])), user.nick)
else:
raise self.InvalidPermission
def command_help(self, user, dst, args):
self.irch.notice(self.lang_002.format(self.irch.trigger), user.nick)
def command_commands(self, user, dst, args):
"""Displays a list of commands available through this medium."""
pobjects = self.irch._import_plugins()
cmds = []
for p in pobjects:
for cmd, ctype in p.chat_commands:
if cmd in p.admin_commands and user.admin:
continue
cmds.append(cmd)
cmds = ', '.join(cmds)
self.irch.notice('Commands: {0}'.format(cmds), user.nick)
def command_reload(self, user, dst, args):
"""Reloads the plugin system. Requires admin privileges."""
if user.admin:
self.irch.reload_plugins()
self.irch.say('Plugins reloaded.', dst)
else:
raise self.InvalidPermission
def command_say(self, user, dst, args):
"""Syntax: {0}say <destination> <message>"""
if user.admin:
try:
dst, msg = args.split(' ', 1)
self.irch.say(msg, dst)
except ValueError:
raise self.InvalidSyntax
else:
raise self.InvalidPermission
def command_action(self, user, dst, args):
"""Syntax: {0}action <destination> <message>"""
if user.admin:
if args and len(args.split()) > 2:
dst, msg = args.split(' ', 1)
self.irch.action(msg, dst)
else:
raise self.InvalidSyntax
else:
raise self.InvalidPermission
def command_join(self, user, dst, args):
"""Syntax: {0}join <channel>"""
if user.admin:
if args:
self.irch.join(args.split(' ', 1)[0])
else:
raise self.InvalidSyntax
else:
raise self.InvalidPermission
def command_leave(self, user, dst, args):
"""Syntax: {0}leave <channel> [reason]"""
if user.admin:
arg_len = len(args.split())
if arg_len < 1:
raise self.InvalidSyntax
else:
if arg_len > 1:
chan, reason = args.split()
else:
chan = args
reason = self.lang_003
self.irch.leave(chan, reason)
else:
raise self.InvalidPermission
def command_quit(self, user, dst, args):
"""Shuts down the bot. Requires admin privileges."""
if user.admin:
if args:
reason = args
else:
reason = self.lang_003
self.irch.quit(reason)
else:
raise self.InvalidPermission
def command_nick(self, user, dst, args):
"""Changes the bot's user. Requires admin privileges."""
if user.admin:
if args and len(args.split()) > 1:
raise self.InvalidSyntax
self.irch.nick = args
def command_ban(self, user, dst, args):
"""Syntax: {0}ban <user>"""
if user.modes.is_owner(dst) or user.admin:
if self.irch.channels[dst].is_oper(user):
self.irch.mode(dst, '+b', args)
else:
raise self.InvalidPermission
def command_unban(self, user, dst, args):
"""Syntax: {0}unban <user>"""
if user.modes.is_owner(dst) or user.admin:
if self.irch.channels[dst].is_oper(user):
self.irch.mode(dst, '-b', args)
else:
raise self.InvalidPermission
def command_kick(self, user, dst, args):
"""Kicks a given user.
Syntax: {0}kick <user> <reason>"""
if user.modes.is_owner(dst) or user.admin:
num = len(args.split())
if num > 1:
nick, reason = args.split(' ', 1)
elif num == 1:
nick = args
reason = ''
else:
raise self.InvalidSyntax
self.irch.kick(nick, dst, reason)
else:
raise self.InvalidPermission
def command_kickban(self, user, dst, args):
"""Kicks and bans a given user.
Syntax: {0}kickban <user> [reason] [duration]"""
if user.modes.is_owner(dst) or user.admin:
num = len(args.split())
if num > 1:
nick, reason = args.split(' ', 1)
elif num == 1:
nick = args
reason = ''
else:
raise self.InvalidSyntax
self.irch.mode(dst, '+b', nick)
self.irch.kick(nick, dst, reason)
else:
raise self.InvalidPermission
def command_deop(self, user, dst, args):
if user.modes.is_owner(dst) or user.admin:
num = len(args.split())
if num == 1:
nick = args
else:
raise self.InvalidSyntax
self.irch.mode(dst, '-o', nick)
else:
raise self.InvalidPermission
def command_op(self, user, dst, args):
if user.modes.is_owner(dst) or user.admin:
num = len(args.split())
if num == 1:
nick = args
else:
raise self.InvalidSyntax
self.irch.mode(dst, '+o', nick)
else:
raise self.InvalidPermission | 35.14433 | 122 | 0.51144 | 757 | 6,818 | 4.529723 | 0.169089 | 0.05366 | 0.072033 | 0.069991 | 0.593467 | 0.527559 | 0.42578 | 0.369204 | 0.34879 | 0.312044 | 0 | 0.014363 | 0.37709 | 6,818 | 194 | 123 | 35.14433 | 0.79303 | 0.080815 | 0 | 0.562092 | 0 | 0.006536 | 0.057388 | 0.007921 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.026144 | 0 | 0.143791 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6147f22b0796759c5a32f0ffdb28f704d41d8247 | 2,587 | py | Python | unittest_reinvent/library_design/test_fragment_reactions.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | null | null | null | unittest_reinvent/library_design/test_fragment_reactions.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | null | null | null | unittest_reinvent/library_design/test_fragment_reactions.py | MolecularAI/reinvent-chemistry | bf0235bc2b1168b1db54c1e04bdba04b166ab7bf | [
"MIT"
] | 1 | 2022-03-22T15:24:13.000Z | 2022-03-22T15:24:13.000Z | import unittest
from reinvent_chemistry import Conversions
from reinvent_chemistry.library_design.fragment_reactions import FragmentReactions
from unittest_reinvent.library_design.fixtures import FRAGMENT_REACTION_SUZUKI, SCAFFOLD_SUZUKI
from unittest_reinvent.fixtures.test_data import CELECOXIB, ASPIRIN, CELECOXIB_FRAGMENT, METHYLPHEMYL_FRAGMENT
class TestFragmentReactions(unittest.TestCase):
def setUp(self):
self.reactions = FragmentReactions()
self._suzuki_reaction_dto_list = self.reactions.create_reactions_from_smirks(FRAGMENT_REACTION_SUZUKI)
self.suzuki_positive_smile = CELECOXIB
self.suzuki_negative_smile = ASPIRIN
self.suzuki_fragment = SCAFFOLD_SUZUKI
self.chemistry = Conversions()
def test_slicing_molecule_to_fragments(self):
molecule = self.chemistry.smile_to_mol(self.suzuki_positive_smile)
all_fragment_pairs = self.reactions.slice_molecule_to_fragments(molecule, self._suzuki_reaction_dto_list)
smile_fragments = []
for pair in all_fragment_pairs:
smiles_pair = []
for fragment in pair:
smile = self.chemistry.mol_to_smiles(fragment)
smiles_pair.append(smile)
smile_fragments.append(tuple(smiles_pair))
self.assertEqual(METHYLPHEMYL_FRAGMENT, smile_fragments[0][0])
self.assertEqual(CELECOXIB_FRAGMENT, smile_fragments[0][1])
def test_slicing_wrong_molecule_to_fragments(self):
molecule = self.chemistry.smile_to_mol(self.suzuki_negative_smile)
all_fragment_pairs = self.reactions.slice_molecule_to_fragments(molecule, self._suzuki_reaction_dto_list)
smile_fragments = []
for pair in all_fragment_pairs:
smiles_pair = []
for fragment in pair:
smile = self.chemistry.mol_to_smiles(fragment)
smiles_pair.append(smile)
smile_fragments.append(tuple(smiles_pair))
self.assertEqual(0, len(smile_fragments))
def test_slicing_suzuki_fragment(self):
molecule = self.chemistry.smile_to_mol(self.suzuki_fragment)
all_fragment_pairs = self.reactions.slice_molecule_to_fragments(molecule, self._suzuki_reaction_dto_list)
smile_fragments = []
for pair in all_fragment_pairs:
smiles_pair = []
for fragment in pair:
smile = self.chemistry.mol_to_smiles(fragment)
smiles_pair.append(smile)
smile_fragments.append(tuple(smiles_pair))
self.assertEqual(2, len(smile_fragments))
| 45.385965 | 113 | 0.722845 | 299 | 2,587 | 5.882943 | 0.177258 | 0.05685 | 0.054576 | 0.047754 | 0.561683 | 0.54747 | 0.54747 | 0.54747 | 0.54747 | 0.521887 | 0 | 0.002933 | 0.209123 | 2,587 | 56 | 114 | 46.196429 | 0.856794 | 0 | 0 | 0.510638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 1 | 0.085106 | false | 0 | 0.106383 | 0 | 0.212766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61483b47ea13bcf3672a9c8fa8c62b5a43a043b2 | 4,998 | py | Python | plataformas.py | AlejandroArango/Plataforma | 590efe70e5fe87cc758f22984a3f7b1dab05b650 | [
"MIT"
] | 1 | 2016-05-10T13:10:38.000Z | 2016-05-10T13:10:38.000Z | plataformas.py | AlejandroArango/Plataforma | 590efe70e5fe87cc758f22984a3f7b1dab05b650 | [
"MIT"
] | 9 | 2016-05-10T13:13:30.000Z | 2016-06-13T17:27:54.000Z | plataformas.py | AlejandroArango/Plataforma | 590efe70e5fe87cc758f22984a3f7b1dab05b650 | [
"MIT"
] | null | null | null | """
administrar la plataforma, aca ira todo el diseño de obstaculos
"""
import pygame
from funcion_sprites import Sprite
# variables que se tomaran para pintar las plataformas
#nombre objeto x y xtam/ytam
################################# nivel 1
suelo_inicio = (140, 0, 70, 40)
cuerda_a = (420, 70, 70, 70)
cuerda_b = (350,840, 70, 70)
inicio_izq_a = (140,420, 70, 70)
inicio_der_a = (140,280, 70, 70)
inicio_izq_b = (140, 70, 70, 40)
inicio_cen_b = (140, 0, 70, 40)
inicio_der_b = ( 70,840, 70, 40)
bloque_muro_a = (140,560, 70, 70)
bloque_muro_b = (140,630, 70, 40)
bloque_muro_c = (140,600, 70, 30)
inicio_izquierdo = (140,350, 70, 70)
inicio_centro = ( 70,420, 70, 70)
inicio_derecho = (140,210, 70, 70)
inicio_plat_mov = (140,140, 70, 40)
################################# nivel 2
suelo_bosque = (560,280, 70, 40)
agua = (420,560, 70, 70)
bosque_izq_a = (560,700, 70, 70)
bosque_der_a = (560,560, 70, 70)
bosque_izq_b = (560,350, 70, 40)
bosque_cen_b = (560,280, 70, 40)
bosque_der_b = (560,210, 70, 40)
bloque_bosque_a = (560,840, 70, 70)
bloque_bosque_b = (630, 0, 70, 40)
bloque_bosque_c = (560,840, 70, 30)
bosque_izquierdo = (560,630, 70, 70)
bosque_centro = (490,560, 70, 70)
bosque_derecho = (560,490, 70, 70)
bosque_plat_mov = (560,420, 70, 40)
################################# nivel 3
suelo_desierto = (350,280, 70, 40)
lava = (420,770, 70, 70)
desierto_izq_curva_a = (350,700, 70, 70)
desierto_der_curva_a = (350,560, 70, 70)
desierto_izq_curva_b = (350,630, 70, 70)
desierto_der_curva_b = (350,490, 70, 70)
desierto_centro = (280,560, 70, 70)#sirve para curva a o b
desierto_completo_izq = (280,530, 70, 70)
desierto_completo_der = (280,490, 70, 70)
desierto_izq_plano_a = (350,350, 70, 40)
desierto_cen_plano_a = (350,280, 70, 40)
desierto_der_plano_a = (350,210, 70, 40)
desierto_plat_madera = (280,740, 70, 30)
desierto_plataforma = (350,420, 70, 40)
bloque_desierto = (560,840, 70, 70)
bloque_desierto_arriba = (350,770, 70, 40)
desierto_esqui_izq = (350,350, 30, 40)
desierto_esqui_der = (390,210, 30, 40)
class Plataforma(pygame.sprite.Sprite):
""" Plataforma que se usa para saltar en ella """
def __init__(self, sprite_imagen_data):
super().__init__()
sprite_imagen = Sprite("img/tiles_spritesheet.png")
# toma la imagen para la plataforma
self.image = sprite_imagen.get_imagen(sprite_imagen_data[0],#pos x
sprite_imagen_data[1],#pos y
sprite_imagen_data[2],#ancho
sprite_imagen_data[3])#alto
self.rect = self.image.get_rect()
class MovimientoPlataforma(Plataforma):
""" clase que determina los movimientos de la plataforna. """
def __init__(self, sprite_imagen_data):
super().__init__(sprite_imagen_data)
self.change_x = 0
self.change_y = 0
self.boundary_top = 0
self.boundary_bottom = 0
self.boundary_left = 0
self.boundary_right = 0
self.level = None
self.jugador = None
def update(self):
""" actualiza para ver la interaccion del jugador con la plataforma en movimiento. """
# mover izquierda/derecha
self.rect.x += self.change_x
# ver si colisiono con el jugador
hit = pygame.sprite.collide_rect(self, self.jugador)
if hit:
#si vamos a la derecha verifica con el lado izquierdo de la plataforma
if self.change_x < 0:
self.jugador.rect.right = self.rect.left
else:
# lo contrario
self.jugador.rect.left = self.rect.right
# mover arriba/abajo
self.rect.y += self.change_y
# verifica si choco con el jugador
hit = pygame.sprite.collide_rect(self, self.jugador)
if hit:
# igual que el de izquierda/derecha, solo que este recrea la posicion
# original, dezplasandolo arriba o abajo
if self.change_y < 0:
self.jugador.rect.bottom = self.rect.top
else:
self.jugador.rect.top = self.rect.bottom
# verifica el limite para invertir la direccion
if self.rect.bottom > self.boundary_bottom or self.rect.top < self.boundary_top:
self.change_y *= -1
cur_pos = self.rect.x - self.level.world_shift
if cur_pos < self.boundary_left or cur_pos > self.boundary_right:
self.change_x *= -1 | 31.043478 | 95 | 0.560024 | 668 | 4,998 | 3.979042 | 0.232036 | 0.039127 | 0.031603 | 0.014673 | 0.156509 | 0.079759 | 0.079759 | 0.079759 | 0.079759 | 0.079759 | 0 | 0.140708 | 0.321729 | 4,998 | 161 | 96 | 31.043478 | 0.643363 | 0.155462 | 0 | 0.089888 | 0 | 0 | 0.006378 | 0.006378 | 0 | 0 | 0 | 0.006211 | 0 | 1 | 0.033708 | false | 0 | 0.022472 | 0 | 0.078652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6148e688ffc31bbe8333eace2095c1273f09545a | 1,781 | py | Python | nanitosbaby/store/models.py | Hector-hedb12/nanitosbaby | 86eff05157dab02a7daca61e1f70ec76bbf6cbdf | [
"MIT"
] | null | null | null | nanitosbaby/store/models.py | Hector-hedb12/nanitosbaby | 86eff05157dab02a7daca61e1f70ec76bbf6cbdf | [
"MIT"
] | null | null | null | nanitosbaby/store/models.py | Hector-hedb12/nanitosbaby | 86eff05157dab02a7daca61e1f70ec76bbf6cbdf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(_(u'Categoría'), max_length=150)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Categoria'
verbose_name_plural = 'Categorias'
@python_2_unicode_compatible
class Size(models.Model):
name = models.CharField(_('Nombre'), max_length=5)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Talla'
@python_2_unicode_compatible
class ProductAmount(models.Model):
product = models.ForeignKey('Product', models.CASCADE, verbose_name=_('Producto'))
size = models.ForeignKey('Size', models.CASCADE, verbose_name=_('Talla'))
amount = models.IntegerField(_('Cantidad'), default=0)
def __str__(self):
return '{} - {}'.format(self.product, self.size)
class Meta:
verbose_name = 'Cantidad de Producto'
@python_2_unicode_compatible
class Product(models.Model):
name = models.CharField(_('Nombre'), max_length=150)
description = models.TextField(_(u'Descripción'), default='')
price = models.DecimalField(_('Precio'), max_digits=12, decimal_places=2, default=0)
image = models.ImageField(upload_to='products/%Y/%m/%d', blank=True)
category = models.ForeignKey('Category', models.SET_NULL, verbose_name=_(u'Categoría'), null=True)
sizes = models.ManyToManyField('Size', through='ProductAmount', verbose_name=_('Tallas'))
def __str__(self):
return self.name
class Meta:
verbose_name = 'Producto'
| 29.683333 | 102 | 0.708029 | 214 | 1,781 | 5.584112 | 0.383178 | 0.082845 | 0.058577 | 0.100418 | 0.30795 | 0.185774 | 0.185774 | 0.185774 | 0.11046 | 0.11046 | 0 | 0.012129 | 0.16676 | 1,781 | 59 | 103 | 30.186441 | 0.793127 | 0.011791 | 0 | 0.375 | 0 | 0 | 0.105802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0.1 | 0.775 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6149697222d0da55c214eb02250d731291f31b6d | 2,357 | py | Python | danceschool/private_events/admin.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 32 | 2017-09-12T04:25:25.000Z | 2022-03-21T10:48:07.000Z | danceschool/private_events/admin.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 97 | 2017-09-01T02:43:08.000Z | 2022-01-03T18:20:34.000Z | danceschool/private_events/admin.py | django-danceschool/django-danceschool | 65ae09ffdcb0821e82df0e1f634fe13c0384a525 | [
"BSD-3-Clause"
] | 19 | 2017-09-26T13:34:46.000Z | 2022-03-21T10:48:10.000Z | from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from django.forms import ModelForm
from danceschool.core.admin import EventChildAdmin, EventOccurrenceInline
from danceschool.core.models import Event
from danceschool.core.forms import LocationWithDataWidget
from .models import PrivateEvent, PrivateEventCategory, EventReminder
class EventReminderInline(admin.StackedInline):
model = EventReminder
extra = 0
class PrivateEventAdminForm(ModelForm):
'''
Custom form for private events is needed to include necessary
Javascript for room selection, even though capacity is not
an included field in this admin.
'''
class Meta:
model = PrivateEvent
exclude = [
'month', 'year', 'startTime', 'endTime', 'duration',
'submissionUser', 'registrationOpen', 'capacity', 'status'
]
widgets = {
'location': LocationWithDataWidget,
}
class Media:
js = ('js/serieslocation_capacity_change.js', 'js/location_related_objects_lookup.js')
class PrivateEventAdmin(EventChildAdmin):
base_model = PrivateEvent
form = PrivateEventAdminForm
show_in_index = True
list_display = ('name', 'category', 'nextOccurrenceTime', 'firstOccurrenceTime', 'location_given', 'displayToGroup')
list_filter = ('category', 'displayToGroup', 'location', 'locationString')
search_fields = ('title', )
ordering = ('-endTime', )
inlines = [EventOccurrenceInline, EventReminderInline]
fieldsets = (
(None, {
'fields': ('title', 'category', 'descriptionField', 'link')
}),
('Location', {
'fields': (('location', 'room'), 'locationString')
}),
('Visibility', {
'fields': ('displayToGroup', 'displayToUsers'),
})
)
def location_given(self, obj):
if obj.room and obj.location:
return _('%s, %s' % (obj.room.name, obj.location.name))
if obj.location:
return obj.location.name
return obj.locationString
def save_model(self, request, obj, form, change):
obj.status = Event.RegStatus.disabled
obj.submissionUser = request.user
obj.save()
admin.site.register(PrivateEvent, PrivateEventAdmin)
admin.site.register(PrivateEventCategory)
| 31.013158 | 120 | 0.662707 | 219 | 2,357 | 7.054795 | 0.502283 | 0.028479 | 0.036893 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000549 | 0.227832 | 2,357 | 75 | 121 | 31.426667 | 0.848352 | 0.064913 | 0 | 0.037037 | 0 | 0 | 0.193119 | 0.033486 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.12963 | 0 | 0.518519 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
614a153769e50dd807fe365bb1c9feed089ce54f | 8,124 | py | Python | build/lib/lsh_example/pos_analyzer.py | huangbeidan/activeLearner_autophrase | c7f1a4f1c7ea57a36c29c246ba393ba31e040353 | [
"MIT"
] | 1 | 2021-03-05T15:42:32.000Z | 2021-03-05T15:42:32.000Z | build/lib/lsh_example/pos_analyzer.py | huangbeidan/activeLearner_autophrase | c7f1a4f1c7ea57a36c29c246ba393ba31e040353 | [
"MIT"
] | null | null | null | build/lib/lsh_example/pos_analyzer.py | huangbeidan/activeLearner_autophrase | c7f1a4f1c7ea57a36c29c246ba393ba31e040353 | [
"MIT"
] | null | null | null | import itertools
import os
import pickle
import re
from collections import defaultdict
import matplotlib.pyplot as plt
plt.style.use('seaborn-white')
import numpy as np
import pandas as pd
import ast
import dill
from tqdm import tqdm
from active_learner.Phrases import Phrases
class PosTag_Query_Fetcher:
def __init__(self, phrase_interface, tokenized_train_dir="input/tokenized_train.txt",
tokenized_postags_train_dir="input/pos_tags_tokenized_train.txt",
thres_unique_counts=5, thres_parent_chil_diff=0.1):
"""
:param phrase_interface:
:param tokenized_train_dir: tmp result from Autophrase
:param tokenized_postags_train_dir: tmp result from Autophrase
:param thres_unique_counts: threshold 1
:param thres_parent_chil_diff: threshold 2
Example files have been put under input/ directory
"""
self.phrase_interface = phrase_interface
self.phrases = self.phrase_interface.phrases
self.token2phrase_dict = self.phrase_interface.token2word
self.tokenized_train_dir = tokenized_train_dir
self.tokenized_postags_train_dir = tokenized_postags_train_dir
self.thres_unique_counts = thres_unique_counts
self.thres_parent_chil_diff = thres_parent_chil_diff
def get_all_tokens(self):
tokens = []
with open(self.tokenized_train_dir) as content:
cnt = 0
for line in content:
vector = line.split(' ')
tokens += vector
print("total tokens: ", len(tokens))
return tokens
def get_all_tags(self):
tags = []
with open(self.tokenized_postags_train_dir) as content:
for line in content:
tags.append(line.strip().replace("\n", ""))
print("total tags: ", len(tags))
return tags
def build_index(self, plist):
inverted = defaultdict(lambda: list())
for idx, token in enumerate(plist):
inverted[token].append(idx)
return inverted
def find_one_v2(self, target, inverted_idx, plist):
targets = target.split(" ")
indices = []
start_pos = inverted_idx[targets[0]]
for idx, pos in enumerate(start_pos):
flag = False
for j in range(len(targets)):
if plist[pos+j] != targets[j]:
flag = False
break
flag = True
if flag:
indices.append(pos)
return indices, len(targets)
def find_one(self, target, plist):
targets = target.split(" ")
indices = []
i = 0
while i < (len(plist)):
flag = False
if plist[i] == targets[0]:
for j in range(len(targets)):
if plist[i + j] != targets[j]:
flag = False
break
flag = True
if flag:
indices.append(i)
i += len(targets)
else:
i += 1
# print("indices: ", indices)
return indices, len(targets)
def find_pos_tag_patterns(self):
tokens = self.get_all_tokens()
tags = self.get_all_tags()
phrases = self.phrases
pos_tags_dict = defaultdict(lambda: list())
scores_dict = defaultdict()
inverted_idx = self.build_index(tokens)
for phr_raw in tqdm(phrases):
phr = phr_raw.tokens
indices, len_target = self.find_one_v2(phr, inverted_idx, tokens)
# print("phr: ", phr, "indices: ", indices, "phrase length: ", len_target)
if(len(indices)==0):
continue
for idx in indices:
pattern = ""
for l in range(len_target):
pattern += (tags[idx+l] + " ")
pos_tags_dict[phr].append(pattern)
if phr not in scores_dict:
scores_dict[phr] = phr_raw.quality
print("hello")
return pos_tags_dict, scores_dict
def pos_pattern_generator(self):
if os.path.isfile("tmp/pos_tags_patterns_backup"):
pos_tags_patterns = dill.load(open('tmp/pos_tags_patterns_backup', 'rb'))
else:
pos_tags_dict, scores_dict = self.find_pos_tag_patterns()
# pos_tags_patterns_backup should be like: posTag : [score1, score2, score3 .... ]
pos_tags_patterns = defaultdict(lambda: list())
# initialize the first time and then save to pickle
for idx, phr in tqdm(enumerate(pos_tags_dict)):
#phr in token: 563 564
score = scores_dict[phr]
for pos in pos_tags_dict[phr]:
pos_tags_patterns[pos].append(score)
dill.dump(pos_tags_patterns, open('tmp/pos_tags_patterns_backup', 'wb'))
# 2nd time and onwards, load from pickle
return pos_tags_patterns
def analyzer(self):
pos_tags_statistics = dict()
pos_tags_patterns = self.pos_pattern_generator()
for idx, pattern in enumerate(pos_tags_patterns):
scores = (pos_tags_patterns[pattern])
scores = list(map(float, scores))
weighted_mean = np.mean(scores)
freq = len(scores)
minVal = min(scores)
maxVal = max(scores)
sd = np.std(scores)
pos_tags_statistics[pattern] = [weighted_mean, freq, minVal, maxVal, sd]
pos_tags_statistics_df = pd.DataFrame.from_dict(pos_tags_statistics, orient='index')
pos_tags_statistics_df.columns = ['weighted_mean','freq','min','max','std']
pos_tags_statistics_df.to_csv('tmp/pos_tags_statistics.csv', index=True)
return pos_tags_patterns
def get_pos_tag_unique_count(self):
pos_tags_dict, scores_dict = self.find_pos_tag_patterns()
unique_set = dict()
for phr in pos_tags_dict:
unique_set[phr] = len(set(pos_tags_dict[phr]))
return unique_set
def query_pos_tags_1(self):
unique_set = self.get_pos_tag_unique_count()
unique_set = sorted(unique_set.items(), key=lambda x:x[1], reverse=True)
#TODO: THRESHOLD1
output = [phr[0] for phr in unique_set if phr[1] > self.thres_unique_counts]
return output
def query_pos_tags_2(self):
# find sub-chunks whose score differs a lot from parents'
tokens = [p.tokens for p in self.phrases]
tokens.sort()
i = 0
res = defaultdict(lambda: 0)
while i < len(tokens):
j = i
tmp = []
if i < len(tokens) - 1 and tokens[j] in tokens[i + 1]:
tmp.append(tokens[j])
while i < len(tokens) - 1 and tokens[j] in tokens[i + 1]:
tmp.append(tokens[i + 1])
i += 1
if len(tmp) > 0:
diff = abs(
float(self.token2phrase_dict[tmp[0]].quality) - float(
self.token2phrase_dict[tmp[-1]].quality))
# TODO: Threshold can be set here
if diff > self.thres_parent_chil_diff:
#tmp = [self.token2phrase_dict[t] for t in tmp]
res[str(tmp)] = diff
i += 1
res = sorted(res, key=lambda x:x[1], reverse=True)
#convert back to list
res = [ast.literal_eval(phr) for phr in res]
res = [[self.token2phrase_dict[token] for token in group] for group in res]
return res
if __name__ == "__main__":
token_mapping_dir = "input/token_mapping.txt"
intermediate_labels_dir = "input/intermediate_labels.txt"
phrases_interface = Phrases(token_mapping_dir, intermediate_labels_dir)
pos_interface = PosTag_Query_Fetcher(phrases_interface)
# pos_tags_patterns_backup = pos_interface.analyzer()
# query_pos_tags_1()
# res1 = pos_interface.query_pos_tags_2()
res2 = pos_interface.query_pos_tags_1()
print("hello")
| 33.295082 | 94 | 0.588996 | 1,000 | 8,124 | 4.536 | 0.208 | 0.055556 | 0.046296 | 0.026455 | 0.219136 | 0.12522 | 0.099647 | 0.073633 | 0.061287 | 0.061287 | 0 | 0.009586 | 0.319424 | 8,124 | 243 | 95 | 33.432099 | 0.810816 | 0.104874 | 0 | 0.196429 | 0 | 0 | 0.044475 | 0.030855 | 0 | 0 | 0 | 0.004115 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.214286 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615107e9730f254889331e6cdcc60a9ff7200755 | 5,569 | py | Python | circletracking/fluctuation.py | caspervdw/circletracking | 2d981a1bd3f2982d5d36932d7d5a38e912fcdba3 | [
"BSD-3-Clause"
] | 4 | 2016-03-08T14:10:05.000Z | 2022-02-03T21:26:51.000Z | circletracking/fluctuation.py | caspervdw/circletracking | 2d981a1bd3f2982d5d36932d7d5a38e912fcdba3 | [
"BSD-3-Clause"
] | 2 | 2016-03-29T12:40:28.000Z | 2016-04-01T09:36:58.000Z | circletracking/fluctuation.py | caspervdw/circletracking | 2d981a1bd3f2982d5d36932d7d5a38e912fcdba3 | [
"BSD-3-Clause"
] | 2 | 2016-03-08T14:10:48.000Z | 2020-12-23T05:59:26.000Z | from __future__ import (division, unicode_literals)
import numpy as np
from .algebraic import fit_ellipse
import matplotlib.pyplot as plt
def circle_deviation(coords):
""" Fits a circle to given coordinates using an algebraic fit. Additionally
returns the deviations from the circle radius in a list sorted on angle.
Parameters
----------
coords :
(n, 2) array of (y, x) coordinates
Returns
-------
tuple of center, radius, array of theta, array of deviatory radius
"""
# algebraicly fit a circle to all coords. don't check for outliers because
# refine_ellipse should only look in a small deviatory radius interval,
# it already rejected radii that were to close to the interval boundary.
(r, _), (yc, xc), _ = fit_ellipse(coords.T, mode='circle')
# calculate deviatory radius and angle
y = coords[:, 0] - yc
x = coords[:, 1] - xc
r_dev = np.sqrt(y**2 + x**2) - r
theta = np.arctan2(y, x)
# sort the radial coordinates on the value of theta (which is -pi to +pi)
sortindices = np.argsort(theta)
theta = theta[sortindices]
r_dev = r_dev[sortindices]
return (yc, xc), r, theta, r_dev
def power_spectrum(theta, r_dev, r, modes=None, part=None):
""" From deviatory radius as function of theta, calculates the power
spectrum of fluctuations upto a certain mode. Fast Fourier Transform is not
used because theta values could be unevenly spaced. Instead, numerical
integration is performed using the trapezoid rule.
Theta must be sorted, but the phase shift is irrelevant as the absolute
value is taken (phase information is lost). -pi to pi works, 0-2pi too.
Parameters
----------
theta :
array of angles, in radius, sorted, rangeing from -pi to pi
r_dev :
array of deviatory radius, in um, belonging to theta values
r :
avarage radius, in um
modes :
array of integers. the modes for which the DFT is done
Returns
-------
array of numbers, power spectrum (squared of absolute value) of DFT. The
wavenumber values belonging to each mode are given by mode / <R>, in which
R is the average radius of the fluctuating circle.
"""
if modes is None:
modes = np.arange(1, 101)
if part is None:
part = 1
if part <= 2:
Ntheta = len(theta)
fft = np.sum(r_dev[np.newaxis, :] *
np.exp(-1j * modes[:, np.newaxis] * theta[np.newaxis, :]),
axis=1) / Ntheta
powersp = np.abs(fft)**2
else:
half_angle = np.pi / part
mask = ((theta >= (np.pi/2 - half_angle)) *
(theta < (np.pi/2 + half_angle)))
fft_btm = np.sum(r_dev[np.newaxis, mask] *
np.exp(-1j * modes[:, np.newaxis] * part *
theta[np.newaxis, mask]), axis=1) / mask.sum()
mask = ((theta >= (-np.pi/2 - half_angle)) *
(theta < (-np.pi/2 + half_angle)))
fft_top = np.sum(r_dev[np.newaxis, mask] *
np.exp(-1j * modes[:, np.newaxis] * part *
theta[np.newaxis, mask]), axis=1) / mask.sum()
powersp = (np.abs(fft_top)**2 + np.abs(fft_btm)**2) / 2
return 2 * np.pi * r * powersp # rescale with circumference
def epower_spectrum(coords, max_mode, max_r_dev=0.1, mpp=1., part=1,
minpx_fullwave=None, show=False):
""" From an iterable of coordinates, calculates average DFT powerspectrum
of fluctuations around a circle.
Parameters
----------
coords :
iterable of (n, 2) arrays of (y, x) coordinates in pixels
max_mode :
fluctuation upto this mode are calculated
max_r_dev :
circlefits with with a circle radius that differ more than max_r_dev
from the ensemble median, are dropped.
mpp :
microns per pixel
minpx_fullwave :
truncates the produced fft so that each full wave has given minimum of
pixels, as well as in the original picture and as in the sampling
Returns
-------
qx : wavenumbers in 1 / um
fft2 : powerspectrum in um^(3/2)
"""
frame_count = len(coords)
if minpx_fullwave is not None:
_, r, theta, _ = circle_deviation(coords[0])
spacing = np.median(np.diff(theta))
# pixels in original picture
maxmode1 = round(2*np.pi*r / minpx_fullwave)
# sampled pixels
maxmode2 = round(2*np.pi / spacing / minpx_fullwave)
max_mode = min(max_mode, maxmode1, maxmode2)
modes = np.arange(1, max_mode+1)
fft2 = np.empty((frame_count, max_mode), dtype=np.float)
radii = np.empty(frame_count, dtype=np.float)
# spacing = np.empty(frame_count, dtype=np.float)
for i, coord in enumerate(coords):
_, r, theta, r_dev = circle_deviation(coord)
fft2[i] = power_spectrum(theta, r_dev*mpp, r*mpp, modes, part)
radii[i] = r*mpp
# spacing[i] = np.median(np.diff(theta))
avrad = np.median(radii)
mask = ((radii > (avrad * (1 - max_r_dev))) &
(radii < (avrad * (1 + max_r_dev))))
avrad = np.average(radii[mask])
qx = modes / avrad
if part > 2:
qx *= part
fft2 = np.average(fft2[mask], axis=0)
if show:
plt.plot(qx, fft2, marker='.')
plt.xlabel(r'$q_x [\mu m^{-1}]$')
plt.ylabel(r'$L \langle|u(q_x)|^2\rangle [\mu m^{3}]$')
plt.ylim(0,np.max(fft2[6:]))
plt.grid()
plt.show()
return qx, fft2
| 35.025157 | 79 | 0.601724 | 794 | 5,569 | 4.13728 | 0.29471 | 0.019483 | 0.010654 | 0.012177 | 0.139726 | 0.114764 | 0.091933 | 0.074277 | 0.074277 | 0.074277 | 0 | 0.015805 | 0.284252 | 5,569 | 158 | 80 | 35.246835 | 0.808329 | 0.415874 | 0 | 0.056338 | 0 | 0 | 0.021516 | 0.007944 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042254 | false | 0 | 0.056338 | 0 | 0.140845 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615208940aacf2ea5bf13b4e47d62132c7e2ae78 | 2,083 | py | Python | Demos/collindemo.py | ToyVo/PygamePhysics | 0645c0d5965f042c13e74df4e07d9147a936a482 | [
"MIT"
] | null | null | null | Demos/collindemo.py | ToyVo/PygamePhysics | 0645c0d5965f042c13e74df4e07d9147a936a482 | [
"MIT"
] | null | null | null | Demos/collindemo.py | ToyVo/PygamePhysics | 0645c0d5965f042c13e74df4e07d9147a936a482 | [
"MIT"
] | null | null | null | from typing import Tuple, List
import pygame
from Contact.Contact import handle_contact
from Forces.Bond import Bond
from Forces.Gravity import Gravity
from Forces.PairForce import PairForce
from Forces.SingleForce import SingleForce
from Objects.Circle import Circle
from Objects.Particle import Particle
from Vec2 import Vec2
def main() -> None:
bg_color: Tuple[int, int, int] = 0, 0, 0
screen_size: Tuple[int, int] = 1200, 800
pygame.init()
screen = pygame.display.set_mode(size=screen_size)
screen.fill(bg_color)
objects: List[Particle] = [Circle(radius=100, color=(255, 0, 0), pos=Vec2(100, 300), mass=1, vel=Vec2(0, 0)),
Circle(radius=100, color=(0, 255, 0), pos=Vec2(300, 300), mass=1, vel=Vec2(0, 0)),
Circle(radius=100, color=(0, 0, 255), pos=Vec2(500, 300), mass=1, vel=Vec2(0, 0)),
Circle(radius=100, color=(255, 255, 255), pos=Vec2(700, 300), mass=1, vel=Vec2(0, 0))]
forces: List[SingleForce or PairForce or Bond] = []
gravity: Gravity = Gravity(objects, Vec2(0, 10))
forces.append(gravity)
# Game loop
running: bool = True
fps: float = 60
dt: float = 1 / fps
clock = pygame.time.Clock()
while running:
# Add force and update objects
for o in objects:
o.clear_force()
for f in forces:
f.apply()
for o in objects:
o.update(dt)
# Redraw
screen.fill(bg_color)
for o in objects:
o.draw(screen)
# Show what we have drawn
pygame.display.flip()
clock.tick(fps)
handle_contact(objects)
# Event Loop
for e in pygame.event.get():
if e.type == pygame.QUIT or e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE:
running = False
elif e.type == pygame.MOUSEBUTTONDOWN and e.button == 1:
objects.append(Circle(radius=100, pos=Vec2(e.pos), mass=1))
# Shut down pygame
pygame.quit()
if __name__ == "__main__":
try:
main()
finally:
pygame.quit()
| 28.534247 | 113 | 0.610178 | 296 | 2,083 | 4.233108 | 0.324324 | 0.012769 | 0.059856 | 0.063847 | 0.158021 | 0.103751 | 0.103751 | 0.090184 | 0.090184 | 0.090184 | 0 | 0.068988 | 0.269323 | 2,083 | 72 | 114 | 28.930556 | 0.754271 | 0.046567 | 0 | 0.137255 | 0 | 0 | 0.004042 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.196078 | 0 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61528e35d9ee5f29afe80a029bf86e560218db6f | 10,455 | py | Python | student_teacher_trainer.py | ashwinpn/Computer-Vision | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | [
"MIT"
] | 1 | 2021-03-26T14:35:21.000Z | 2021-03-26T14:35:21.000Z | student_teacher_trainer.py | ashwinpn/Computer-Vision | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | [
"MIT"
] | null | null | null | student_teacher_trainer.py | ashwinpn/Computer-Vision | 9dc3abfe416385171b76e2bad6872e10f36a12b4 | [
"MIT"
] | null | null | null | import torch
import torch.nn
import torch.nn.functional as F
from tqdm import tqdm
import gc
from run_nerf_helpers import *
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
import argparse
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def save_model(global_step, student_model, student_model_fine, student_optim, model_save_path):
torch.save({
'global_step': global_step,
'network_fn_state_dict': student_model.state_dict(),
'network_fine_state_dict': student_model_fine.state_dict(),
'optimizer_state_dict': student_optim.state_dict(),
}, model_save_path)
print("saved to", model_save_path)
parser = argparse.ArgumentParser()
parser.add_argument(dest="nerf_path", type=str, help="Path to NeRF file")
parser.add_argument(dest="nerf_path2", type=str, help="Path to NeRF file 2")
parser.add_argument("--t_depth", type=int, default=8, help="Depth of teacher NeRF")
parser.add_argument("--t_width", type=int, default=256, help="Width of teacher NeRF")
parser.add_argument("--s_depth", type=int, default=8, help="Depth of student NeRF")
parser.add_argument("--s_width", type=int, default=256, help="Width of student NeRF")
parser.add_argument("--s_skips", nargs='+', default=[4], type=int, help="Skip connections to be used in student")
parser.add_argument("--input_ch", type=int, default=63, help="Number of input channels, after positional enocding")
parser.add_argument("--input_ch_views", type=int, default=27, help="Number of input channels (views), after positional encoding")
parser.add_argument("--log_freq", type=int, default=1000, help="Frequency to log statisitics during training")
parser.add_argument("--status_freq", type=int, default=1000, help="Frequency to output status during training")
parser.add_argument("--lr", type=float, default=5e-4, help="Initial learning rate for distillation")
parser.add_argument("--loss_thresh", type=float, default=.2, help="Active layers are done training when total loss is below this amount")
parser.add_argument("--max_epochs", type=int, default=500000, help="Number of epochs to train for")
parser.add_argument("--layer_queue", type=str, default="0,0|1,1|2,2|3,3|4,4|5,5|6,6|7,7|9,9|10,10|O,O", help="Layers to be compared during distillation")
parser.add_argument("--plot_path", type=str, default="./layer_{}_.png", help="Path to save plots to, include {} for layer number")
parser.add_argument("--save_path", type=str, default="./student_model_{}.tar", help="Path to save student models to, include {} for later formatting")
parser.add_argument("--save_freq", type=int, default=50000, help="Frequency of saving (regardless of layer progress)")
parser.add_argument("--hyper", action='store_true', help="Use HyperNeRF as model")
parser.add_argument("--aug", action='store_true', help="Use AugNeRF as model")
args = parser.parse_args()
tmp = []
for pair in args.layer_queue.split('|'):
s,t = pair.split(',')
try:
tmp.append((int(s),int(t)))
except ValueError:
tmp.append((s,t))
args.layer_queue = deque(tmp)
del tmp
print("Arguments received:")
for arg in args.__dict__:
print(arg, '=', getattr(args, arg))
print("")
teacher_models = []
teacher_models_fine = []
paths = [args.nerf_path, args.nerf_path2]
mins = None
maxes = None
for path in paths:
# Load pretrained "teacher" NeRF models
saved = torch.load(path)
teacher_model = NeRF(D=args.t_depth, W=args.t_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, use_viewdirs=True)
teacher_model.load_state_dict(saved['network_fn_state_dict'])
teacher_model.eval()
teacher_model_fine = NeRF(D=args.t_depth, W=args.t_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, use_viewdirs=True)
teacher_model_fine.load_state_dict(saved['network_fine_state_dict'])
teacher_model_fine.eval()
print("Teacher model =", teacher_model)
teacher_models.append(teacher_model)
teacher_models_fine.append(teacher_model_fine)
# NeRF class has been modified to track mins and maxes for all input
if maxes != None:
maxes = torch.max(maxes, saved['maxes'].to(device)).to(device)
else:
maxes = saved['maxes'].to(device)
print("maxes =", maxes)
if mins != None:
mins = torch.min(mins, saved['mins'].to(device)).to(device)
else:
mins = saved['mins'].to(device)
print("mins =", mins)
del teacher_model, teacher_model_fine
class_vectors = []
class_vectors.append(torch.tensor([1, 0], dtype=torch.float32, requires_grad=False).to(device))
class_vectors.append(torch.tensor([0, 1], dtype=torch.float32, requires_grad=False).to(device))
# Instantiate student models
if args.aug:
student_model = AugNeRF(D=args.s_depth, W=args.s_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, skips=args.s_skips, use_viewdirs=True, dev=device)
student_model_fine = AugNeRF(D=args.s_depth, W=args.s_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, skips=args.s_skips, use_viewdirs=True, dev=device)
else:
student_model = HyperNeRF(NeRF(D=args.s_depth, W=args.s_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, skips=args.s_skips, use_viewdirs=True))
student_model_fine = HyperNeRF(NeRF(D=args.s_depth, W=args.s_width, input_ch=args.input_ch, input_ch_views=args.input_ch_views, skips=args.s_skips, use_viewdirs=True))
print("Student model =", student_model)
num_params_teacher = 0
for param in teacher_models[0].parameters():
num_params_teacher += param.numel()
num_params_student = 0
for param in student_model.parameters():
num_params_student += param.numel()
print("Number of parameters in teacher network:", num_params_teacher, "\nNumber of parameters in student network:", num_params_student)
print("Size of student model: {:.2f}% of teacher model.".format((num_params_student/num_params_teacher)*100))
# Start of network distillation code
OUTPUT = 'O'
active_layers = [args.layer_queue.popleft()]
loss_over_time = []
# Send all models to device
for model in teacher_models:
model.to(device)
student_model.to(device)
for model_fine in teacher_models_fine:
model_fine.to(device)
student_model_fine.to(device)
# Use same optimizer for both student models
student_optim = torch.optim.Adam(list(student_model.parameters()) + list(student_model_fine.parameters()), lr=args.lr)
total_epochs = 0
done = False
for _ in tqdm(range(args.max_epochs//args.status_freq), desc='Total progress'):
for epoch in range(args.status_freq): #tqdm(range(args.status_freq), bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'):
# Generate random input
rand_input = torch.rand(int(1024*64), args.input_ch + args.input_ch_views).to(device)
rand_input = (maxes - mins) * rand_input + mins
rand_input = rand_input.to(device)
# Compute a forward pass
# Do student first
loss = torch.zeros(1).to(device)
# Now iterate through teachers
for teacher_model, teacher_model_fine, c in zip(teacher_models, teacher_models_fine, class_vectors):
student_model.Class = c
student_model_fine.Class = c
student_out = student_model(rand_input, track_values=False)
student_fine_out = student_model_fine(rand_input, track_values=False)
student_out_hidden = student_model.hidden_states
student_fine_out_hidden = student_model_fine.hidden_states
teacher_out = teacher_model(rand_input, track_values=False)
teacher_fine_out = teacher_model_fine(rand_input, track_values=False)
teacher_out_hidden = teacher_model.hidden_states
teacher_fine_out_hidden = teacher_model_fine.hidden_states
# Compute loss as mse between active layers in both student and teacher models
for layer_tuple in active_layers:
if layer_tuple[0] == OUTPUT:
student_layer = student_out
student_fine_layer = student_fine_out
else:
student_layer = student_out_hidden[layer_tuple[0]]
student_fine_layer = student_fine_out_hidden[layer_tuple[0]]
if layer_tuple[1] == OUTPUT:
teacher_layer = teacher_out
teacher_fine_layer = teacher_fine_out
else:
teacher_layer = teacher_out_hidden[layer_tuple[1]]
teacher_fine_layer = teacher_fine_out_hidden[layer_tuple[1]]
loss += F.mse_loss(student_layer, teacher_layer) + F.mse_loss(student_fine_layer, teacher_fine_layer)
# Backprop
student_optim.zero_grad()
loss.backward()
student_optim.step()
# Check to see if current active layers are within threshold
if loss < args.loss_thresh:
print("")
print("Completed layers: ", active_layers)
# Plot loss to file
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.plot(loss_over_time)
ax.set_yscale('log')
plot_path = args.plot_path.format(active_layers[-1][0])
fig.savefig(plot_path)
plt.close(fig)
print("Plotted to", plot_path)
# Save weights after each layer is finished
model_save_path = args.save_path.format("layer_" + str(active_layers[-1][0]))
save_model(saved['global_step'], student_model, student_model_fine, student_optim, model_save_path)
# Get next layer from queue, unless done!
if args.layer_queue:
active_layers.append(args.layer_queue.popleft())
#active_layers = [args.layer_queue.popleft()]
else:
#active_layers = []
done = True
# Record loss according to log frequency
if (total_epochs + epoch) % args.log_freq == 0:
loss_over_time.append(loss)
# end for epoch in tqdm
total_epochs += epoch + 1
# Print out a status according to frequency
print("")
print("Epoch: {}, Loss: {}".format(total_epochs, loss.item()))
print("Active layer:", active_layers)
print("Layers in queue:", args.layer_queue)
if total_epochs % args.save_freq == 0:
model_save_path = args.save_path.format(str(total_epochs) + "_epochs")
save_model(saved['global_step'], student_model, student_model_fine, student_optim, model_save_path)
if done:
break
# end while total_epochs < args.max_epochs and active_layers != []:
# Saving weights after each layer is finished
model_save_path = args.save_path.format(str(total_epochs) + "_epochs")
save_model(saved['global_step'], student_model, student_model_fine, student_optim, model_save_path) | 45.064655 | 175 | 0.726351 | 1,563 | 10,455 | 4.600768 | 0.174024 | 0.048394 | 0.047281 | 0.015575 | 0.363232 | 0.268252 | 0.222361 | 0.197886 | 0.158114 | 0.158114 | 0 | 0.011641 | 0.153706 | 10,455 | 232 | 176 | 45.064655 | 0.801085 | 0.088283 | 0 | 0.08 | 0 | 0.005714 | 0.161338 | 0.016302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005714 | false | 0 | 0.057143 | 0 | 0.062857 | 0.097143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61539c546a71ee8c53ffbc384e4ce521dc6eee99 | 4,864 | py | Python | generatePassionlyUser.py | killax-d/Passionly-API | 8f6df152a2d05ea3cc9aa97bc0cb369b3881b51e | [
"MIT"
] | null | null | null | generatePassionlyUser.py | killax-d/Passionly-API | 8f6df152a2d05ea3cc9aa97bc0cb369b3881b51e | [
"MIT"
] | null | null | null | generatePassionlyUser.py | killax-d/Passionly-API | 8f6df152a2d05ea3cc9aa97bc0cb369b3881b51e | [
"MIT"
] | null | null | null | import random
N = int(input("Number of users needed : "))
last_address_id = int(input("Last address id : "))
last_user_id = int(input("Last user id : "))
file = open('users.sql', 'w+')
questions = {
1: {'unique': True, 'criteria': 'random', 'answers': [1, 2, 3]},
2: {'unique': True, 'criteria': 'random', 'answers': [4, 5, 6]},
3: {'unique': True, 'criteria': 'random', 'answers': [7, 8]},
4: {'unique': True, 'criteria': 'random', 'answers': [9, 10, 11]},
5: {'unique': True, 'criteria': 'random', 'answers': [12, 13, 14, 15]},
6: {'unique': False, 'criteria': 'random', 'answers': [16, 17, 18, 19, 20]},
7: {'unique': True, 'criteria': 'range', 'ranges': [18, 100, 1]},
8: {'unique': True, 'criteria': 'slide', 'ranges': [0, 1000, 5]},
9: {'unique': True, 'criteria': 'random', 'answers': [23, 24, 25]}
}
ADDRESS_FORMAT = "({0}, '{1}', '{2}', {3})"
USER_FORMAT = "('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', {7}, {8}, {9}, '{10}')"
SURVEY_FORMAT = "({0}, {1}, {2}, {3}, {4})"
SUBSCRIPTION_FORMAT = "({0}, 2, 999)"
password = "$2a$10$dnDZhS4hqAR1rWnAwiMi/uqQfulrQbP9jO.d4h.v3fyQbyaIZMpGW" # password
cities = ['Marseille', 'Douai', 'Lens', 'Paris', 'Bordeaux', 'Auby', 'Nantes', 'Montpellier', 'Lille', 'Reims', 'Angers', 'Dunkerque']
firstname = [['John', 'Dylan', 'Jason', 'Maxime', 'Kévin', 'Aurélien', 'Quentin', 'Aymeric'], ['Amélie', 'Aurélie', 'Carla', 'Jessie', 'Noémie', 'Ambre', 'Rose', 'Chloé']]
langs = ['fr', 'en']
def generateAnswers(user, question):
q = questions[question]
if (q['criteria'] == 'random'):
if (q['unique']):
return SURVEY_FORMAT.format(user, question, random.choice(q['answers']), 'NULL', 'NULL')
else:
answers = []
for i in random.sample(q['answers'], random.randint(1, len(q['answers'])-1)):
answers.append(SURVEY_FORMAT.format(user, question, i, 'NULL', 'NULL'))
return ', '.join(answers)
elif (q['criteria'] == 'range'):
mini, maxi = random.randint(q['ranges'][0], q['ranges'][1]), random.randint(q['ranges'][0], q['ranges'][1])
#mini, maxi = q['ranges'][0], q['ranges'][1]
if (mini > maxi):
mini, maxi = maxi, mini
return SURVEY_FORMAT.format(user, question, 'NULL', mini, maxi)
elif (q['criteria'] == 'slide'):
maxi = random.randint(q['ranges'][0], q['ranges'][1])
maxi -= maxi % q['ranges'][2]
return SURVEY_FORMAT.format(user, question, 'NULL', q['ranges'][0], maxi)
def generateAddress():
address_id = last_address_id
address = [address_id, 'Rue Lambda', random.choice(cities), 1]
return (address_id, ADDRESS_FORMAT.format(address_id, 'Rue Lambda', random.choice(cities), 1))
def generatePhoneNumber():
phone = random.randint(10000000, 99999999)
return '06{0}'.format(phone)
def generateBirthdate():
year = random.randint(1920, 2004)
month = random.randint(1, 12)
day = random.randint(1, 28)
return '{0}-{1}-{2}'.format(year, month, day)
# return postgres string
def generateLastActivity():
return "(NOW() - '{0} days'::interval)".format(random.randint(1, 35))
def generateUser(id):
gender = random.choice([0, 1])
address = generateAddress() # [id, address array]
user = ['generated-{0}'.format(id), password, random.choice(firstname[gender]), '{0}-lastname'.format('Male' if gender == 0 else 'Female'), 'generated-{0}@mail.fr'.format(id), generatePhoneNumber(), generateBirthdate(), gender, address[0], generateLastActivity(), random.choice(langs)]
survey = []
for i in questions.keys():
survey.append(generateAnswers(id, i))
return user, address[1], survey
generated_users = []
for i in range(N):
last_address_id += 1
last_user_id += 1
user = generateUser(last_user_id)
generated_users.append({'user': user[0], 'address': user[1], 'survey': user[2], 'subscription': SUBSCRIPTION_FORMAT.format(last_user_id)})
users = []
addresses = []
surveys = []
subscriptions = []
for user in generated_users:
users.append(str(user['user']).replace("[", "(").replace("]", ")").replace('"', ''))
addresses.append(str(user['address']).replace("[", "(").replace("]", ")"))
surveys.append(", ".join(user['survey']).replace("[", "(").replace("]", ")").replace("'", ""))
subscriptions.append(str(user['subscription']).replace("[", "(").replace("]", ")"))
users = ",\n".join(users) + ";"
addresses = ",\n".join(addresses) + ";"
surveys = ",\n".join(surveys) + ";"
subscriptions = ",\n".join(subscriptions) + ";"
file.write(
"""-- ADDRESSES : --
INSERT INTO addresses (street_nr, street, city, fk_country) VALUES
{0}
-- USERS : --
INSERT INTO users (username, password, firstname, lastname, email, phone, birthdate, gender, fk_address, last_login, language) VALUES
{1}
-- SURVEYS : --
INSERT INTO surveys (fk_userid, fk_questionid, fk_answerid, min, max) VALUES
{2}
-- SUBSCRIPTION : --
INSERT INTO subscription (fk_userid, gender_desired, match_left) VALUES
{3}
"""
.format(addresses, users, surveys, subscriptions));
file.close() | 38 | 286 | 0.632813 | 621 | 4,864 | 4.890499 | 0.280193 | 0.023049 | 0.047415 | 0.047415 | 0.180771 | 0.106355 | 0.081989 | 0.055647 | 0.021732 | 0 | 0 | 0.038407 | 0.127467 | 4,864 | 128 | 287 | 38 | 0.677191 | 0.019326 | 0 | 0 | 0 | 0.011494 | 0.238768 | 0.018759 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0.022989 | 0.011494 | 0.011494 | 0.183908 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61573280417ff62fd3e211db5ccac015351a1d2f | 614 | py | Python | whisper/templatetags/chat.py | PragmaticMates/django-whisper | 2cf94d5adcc5d897502b4a379034184c8ec833a1 | [
"Apache-2.0"
] | 4 | 2019-02-15T16:37:18.000Z | 2021-12-01T04:10:09.000Z | whisper/templatetags/chat.py | PragmaticMates/django-whisper | 2cf94d5adcc5d897502b4a379034184c8ec833a1 | [
"Apache-2.0"
] | null | null | null | whisper/templatetags/chat.py | PragmaticMates/django-whisper | 2cf94d5adcc5d897502b4a379034184c8ec833a1 | [
"Apache-2.0"
] | null | null | null | from django import template
from django.contrib.auth import get_user_model
register = template.Library()
@register.simple_tag(takes_context=True)
def room_slug(context, subject):
request = context['request']
if isinstance(subject, get_user_model()):
if not request.user.is_authenticated:
return None
users_pks = [subject.pk, request.user.pk]
users_pks = sorted(users_pks)
users_pks = list(map(str, users_pks))
return 'users-{}'.format('-'.join(users_pks))
model_name = subject.__class__.__name__.lower()
return f'{model_name}-{subject.pk}'
| 27.909091 | 53 | 0.688925 | 80 | 614 | 4.9875 | 0.5125 | 0.120301 | 0.06015 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.192182 | 614 | 21 | 54 | 29.238095 | 0.804435 | 0 | 0 | 0 | 0 | 0 | 0.066775 | 0.040717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6157c056f3cdc35a654509bab1d1b88e2a48945a | 6,919 | py | Python | chement/core.py | NaleRaphael/chement | fcd32c60107f2cdb15abda1c858e1444fc6ff6cc | [
"MIT"
] | null | null | null | chement/core.py | NaleRaphael/chement | fcd32c60107f2cdb15abda1c858e1444fc6ff6cc | [
"MIT"
] | null | null | null | chement/core.py | NaleRaphael/chement | fcd32c60107f2cdb15abda1c858e1444fc6ff6cc | [
"MIT"
] | null | null | null | import logging
import requests
from .config import MeshConfiguration, ChebiConfiguration, EntrezConfiguration
from .parser import BasicParser, ChebiObjectParser
from .objbase import MeshObject
from .sparql import SparqlQuery, QueryTerm2UI
__all__ = ['MeshURI', 'MeshRDFRequest', 'MeshRDFResponse', 'MeshSearchRequest', 'MeshSearchResponse',
'ChebiRequest', 'ChebiSearchResponse', 'EntrezSearchRequest', 'EntrezSearchResponse',
'MeshESearchRequest', 'MeshESearchResponse']
MeshConfig = MeshConfiguration().load()
ChebiConfig = ChebiConfiguration().load()
EntrezConfig = EntrezConfiguration().load()
class BaseRequest(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError('This method should be implemented by child class.')
def get_response(self):
raise NotImplementedError('This method should be implemented by child class.')
class BaseResponse(object):
def __init__(self, response, parser, **kwargs):
if not isinstance(response, requests.Response):
raise TypeError('Type of given response should be `requests.Response`.')
self.response = response
self.parser = parser()
self.content = None
self.parse()
def parse(self):
try:
self.content = self.parser.parse(self.response)
except Exception as ex:
print(self.response.url)
logging.exception(ex)
self.content = []
class MeshURI(object):
@classmethod
def build(cls, limit, year, format, inference, query, offset):
uri = MeshConfig.sparql.base_url + '?'
uri += 'query={}'.format(query)
uri += 'limit={}'.format(limit)
uri += 'year={}'.format(year)
uri += 'inference={}'.format(inference)
uri += 'offset={}'.format(offset)
return uri
class MeshRDFRequest(BaseRequest):
def __init__(self, fmt='json', inference=True, limit=10,
offset=0, query='', year='current'):
if not isinstance(query, SparqlQuery):
raise TypeError('Given `query` should be an instance of `SparqlQuery`.')
self.fmt = fmt
self.inference = inference
self.limit = limit
self.inference = 'true' if inference else 'false'
self.limit = limit
self.offset = offset
self.query = query
self.year = year
def get_response(self):
uri = MeshConfig.sparql.base_url + '?'
payload = {
'query': self.query,
'limit': self.limit,
'year': self.year,
'format': self.fmt,
'inference': self.inference,
'offset': self.offset
}
resp = None
try:
resp = requests.get(uri, params=payload)
except ex:
logging.exception(ex)
finally:
return resp
class MeshRDFResponse(BaseResponse):
def __init__(self, response, parser):
super(MeshRDFResponse, self).__init__(response, parser)
class MeshSearchRequest(BaseRequest):
def __init__(self, query='', exact=True):
self.query = query
self.exact = exact
def get_response(self):
d = MeshConfig.search.option
uri = MeshConfig.search.base_url + '?' + MeshConfig.search.query.format(query=self.query)
payload = {
"searchInField": d.searchInField.terms[1], # "termDescriptor"
"size": d.size,
"searchType": d.searchType[0] if self.exact else d.searchType[2],
"searchMethod": d.searchMethod[0], # "FullWord"
"sort": d.sort.Relevance
}
resp = None
try:
resp = requests.get(uri, params=payload)
except ex:
logging.exception(ex)
finally:
return resp
class MeshSearchResponse(BaseResponse):
def __init__(self, response, parser):
super(MeshSearchResponse, self).__init__(response, parser)
class MeshESearchRequest(BaseRequest):
def __init__(self, query='', exact=True, api_key=None):
self.query = query
self.exact = exact
self.api_key = MeshConfig.api_key if api_key is None else api_key
def get_response(self):
d = MeshConfig.esearch.option
uri = MeshConfig.esearch.base_url + '?' + MeshConfig.esearch.query.format(query=self.query)
if self.exact:
uri += '+AND+{}'.format(MeshConfig.esearch.cond.orgn_human)
payload = {k: d.get(k) for k in d.keys()}
if self.api_key is not None:
payload['api_key'] = self.api_key
resp = None
try:
resp = requests.get(uri, params=payload)
except ex:
logging.exceotion(ex)
finally:
return resp
class MeshESearchResponse(BaseResponse):
def __init__(self, response, parser):
super(MeshESearchResponse, self).__init__(response, parser)
class ChebiSearchRequest(BaseRequest):
def __init__(self, query='', exact=True):
self.query = query
self.exact = exact
def get_response(self):
d = ChebiConfig.search.option
uri = ChebiConfig.search.base_url + '?' + ChebiConfig.search.query.format(query=self.query)
payload = {k: d.get(k) for k in d.keys()}
payload['exact'] = 'true' if self.exact else 'false'
resp = None
try:
resp = requests.get(uri, params=payload)
except ex:
logging.exception(ex)
finally:
return resp
class ChebiSearchResponse(BaseResponse):
def __init__(self, response, parser):
super(ChebiSearchResponse, self).__init__(response, parser)
class EntrezSearchRequest(BaseRequest):
def __init__(self, query='', api_key=None, human_only=True):
self.query = query
self.api_key = EntrezConfig.api_key if api_key is None else api_key
self.human_only = human_only
def get_response(self):
d = EntrezConfig.esearch.option
uri = EntrezConfig.esearch.base_url + '?' + EntrezConfig.esearch.query.format(query=self.query)
# Search genes that only in human
if self.human_only:
uri += '+AND+{}'.format(EntrezConfig.esearch.cond.orgn_human)
payload = {k: d.get(k) for k in d.keys()}
if self.api_key is not None:
payload['api_key'] = self.api_key
resp = None
try:
resp = requests.get(uri, params=payload)
except ex:
logging.exception(ex)
finally:
return resp
class EntrezSearchResponse(BaseResponse):
def __init__(self, response, parser):
super(EntrezSearchResponse, self).__init__(response, parser)
if __name__ == '__main__':
query = 'Rab10'
req = EntrezSearchRequest(query)
resp = EntrezSearchRequest(req.get_response(), ChebiObjectParser)
import pdb; pdb.set_trace()
print(resp)
| 31.738532 | 103 | 0.6222 | 741 | 6,919 | 5.653171 | 0.182186 | 0.022917 | 0.031511 | 0.025782 | 0.40487 | 0.338744 | 0.307472 | 0.232275 | 0.232275 | 0.232275 | 0 | 0.001971 | 0.266802 | 6,919 | 217 | 104 | 31.884793 | 0.823773 | 0.008527 | 0 | 0.423529 | 0 | 0 | 0.085327 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.041176 | 0 | 0.270588 | 0.011765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615842aa488c11ca9578f55dc30c65b00ee3a393 | 5,815 | py | Python | deeptables/fe/dae.py | daBawse167/deeptables | 74254d451107e567b4497e0fe81ac484201713ec | [
"Apache-2.0"
] | 828 | 2020-05-24T02:42:33.000Z | 2022-03-31T01:37:36.000Z | deeptables/fe/dae.py | daBawse167/deeptables | 74254d451107e567b4497e0fe81ac484201713ec | [
"Apache-2.0"
] | 36 | 2020-06-02T14:20:20.000Z | 2022-02-23T11:05:09.000Z | deeptables/fe/dae.py | daBawse167/deeptables | 74254d451107e567b4497e0fe81ac484201713ec | [
"Apache-2.0"
] | 170 | 2020-05-26T15:43:13.000Z | 2022-03-25T06:35:37.000Z | # -*- coding:utf-8 -*-
__author__ = 'yangjian'
"""
Denoise Auto-encoder
Denosing auto encoders are an important and crucial tools for feature selection and extraction.
"""
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
class DAE:
def __init__(self, encoder_units=(500, 500), feature_units=20, activation='relu',
kernel_initializer='glorot_uniform', optimizer=Adam(learning_rate=0.001), noise_rate=0):
self.encoder_units = encoder_units
self.feature_units = feature_units
self.activate = activation
self.kernel_initializer = kernel_initializer
self.optimizer = optimizer
self.noise_rate = noise_rate
return
def build_dae2(self, X):
inputs = Input((X.shape[1],))
x = Dense(100, activation='relu')(inputs) # 1500 original
x = Dense(20, activation='relu', name="feature_layer")(x) # 1500 original
x = Dense(100, activation='relu')(x) # 1500 original
outputs = Dense(X.shape[1], activation='relu')(x)
model = Model(inputs=inputs, outputs=outputs)
# model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='mse')
return model
def build_dae(self, X):
# denoising autoencoder
inputs = Input((X.shape[1],), name='input_layer')
n_stacks = len(self.encoder_units) - 1
# input
x = inputs
# internal layers in encoder
for i in range(n_stacks):
x = Dense(self.encoder_units[i + 1], activation=self.activate, kernel_initializer=self.kernel_initializer,
name='encoder_%d' % i)(x)
# hidden layer
x = Dense(self.feature_units, kernel_initializer=self.kernel_initializer,
name='feature_layer')(x)
# hidden layer, features are extracted from here
# internal layers in decoder
for i in range(n_stacks, 0, -1):
x = Dense(self.encoder_units[i], activation=self.activate, kernel_initializer=self.kernel_initializer,
name='decoder_%d' % i)(x)
# output
x = Dense(X.shape[1], activation=self.activate, kernel_initializer=self.kernel_initializer,
name='output_layer')(x)
output = x
return Model(inputs=inputs, outputs=output, name='AE')
def fit(self, X, batch_size=128, epochs=1000):
es = EarlyStopping(monitor='mse', min_delta=0.001, patience=5,
verbose=1, mode='min', baseline=None, restore_best_weights=True)
rlr = ReduceLROnPlateau(monitor='mse', factor=0.5,
patience=3, min_lr=1e-6, mode='min', verbose=1)
autoencoder = self.build_dae(X)
# autoencoder.compile(optimizer=self.optimizer, loss='mse',metrics=['mse'])
autoencoder.compile(optimizer=self.optimizer, loss='mse', metrics=['mse'])
if self.noise_rate <= 0:
print('no noise.')
autoencoder.fit(X, X, batch_size=batch_size, epochs=epochs, callbacks=[es, rlr])
else:
print(f'noise rate:{self.noise_rate}')
gen = self.mix_generator(X, batch_size, swaprate=self.noise_rate)
autoencoder.fit_generator(generator=gen,
steps_per_epoch=np.ceil(X.shape[0] / batch_size),
epochs=epochs,
callbacks=[es, rlr],
verbose=1,
)
return autoencoder
def fit_transform(self, X, batch_size=128, epochs=1000):
ae = self.fit(X, batch_size, epochs)
proxy_model = self.__buld_proxy_model(ae, 'feature_layer')
features = proxy_model.predict(X, batch_size=batch_size)
return features
def __buld_proxy_model(self, model, output_layer):
model.trainable = False
output = model.get_layer(output_layer).output
proxy = Model(inputs=model.input, outputs=output)
return proxy
def x_generator(self, x, batch_size, shuffle=True):
# batch generator of input
batch_index = 0
n = x.shape[0]
while True:
if batch_index == 0:
index_array = np.arange(n)
if shuffle:
index_array = np.random.permutation(n)
current_index = (batch_index * batch_size) % n
# print("current_index:{}".format(current_index))
if n >= current_index + batch_size:
current_batch_size = batch_size
batch_index += 1
else:
current_batch_size = n - current_index
batch_index = 0
batch_x = x[index_array[current_index: current_index + current_batch_size]]
yield batch_x
def mix_generator(self, x, batch_size, swaprate=0.15, shuffle=True):
# generator of noized input and output
# swap 0.15% of values of datasets with values of another
num_value = x.shape[1]
# print("X.shape[1]={}, x.shape[1]={}".format(X.shape[1], x.shape[1]))
num_swap = int(num_value * swaprate)
gen1 = self.x_generator(x, batch_size, shuffle)
gen2 = self.x_generator(x, batch_size, shuffle)
while True:
batch1 = next(gen1)
batch2 = next(gen2)
new_batch = batch1.copy()
for i in range(batch1.shape[0]):
swap_idx = np.random.choice(num_value, num_swap, replace=False)
new_batch[i, swap_idx] = batch2[i, swap_idx]
yield (new_batch, batch1)
| 41.241135 | 118 | 0.601204 | 710 | 5,815 | 4.74507 | 0.240845 | 0.050757 | 0.029682 | 0.032057 | 0.261799 | 0.191748 | 0.146631 | 0.091422 | 0.091422 | 0.038587 | 0 | 0.025255 | 0.291831 | 5,815 | 140 | 119 | 41.535714 | 0.792861 | 0.103869 | 0 | 0.06 | 0 | 0 | 0.035721 | 0.004342 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.05 | 0 | 0.2 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615a9a697c37336d51a7cdc176a1e9ceeb62b8d2 | 1,133 | py | Python | Tree/leetcode_tree/medium/94.py | catdog001/leetcode_python | b70588121ef2ce5c27ff4cb610c81c33c961a3db | [
"Apache-2.0"
] | null | null | null | Tree/leetcode_tree/medium/94.py | catdog001/leetcode_python | b70588121ef2ce5c27ff4cb610c81c33c961a3db | [
"Apache-2.0"
] | null | null | null | Tree/leetcode_tree/medium/94.py | catdog001/leetcode_python | b70588121ef2ce5c27ff4cb610c81c33c961a3db | [
"Apache-2.0"
] | null | null | null | """
给定一个二叉树,返回它的中序 遍历。
示例:
输入: [1,null,2,3]
1
\
2
/
3
输出: [1,3,2]
进阶: 递归算法很简单,你可以通过迭代算法完成吗?
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/binary-tree-inorder-traversal
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
"""
非递归方法进行中序遍历,因为递归可以用栈来进行模拟,故而需要借助于数据结构栈。
左右孩子节点需要以根节点为基础,在知道根节点的情况下才可以访问左右孩子节点。故而当访问根节点时需要将根节点入栈保存。同时将左孩子节点赋值给根节点(记为p)
直到某一个根节点的左孩子为空,此时将栈中元素出栈(此时出栈的为根节点元素)即为q,然后将该节点的右孩子节点赋值给根节点(p = q.right)。
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
# 中序遍历结果
result = []
# 栈
stack = []
while root or len(stack) > 0:
if root:
stack.append(root)
root = root.left
else:
tmp = stack.pop()
result.append(tmp.val)
root = tmp.right
return result
| 20.981481 | 77 | 0.560459 | 121 | 1,133 | 5.214876 | 0.661157 | 0.006339 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01297 | 0.319506 | 1,133 | 53 | 78 | 21.377358 | 0.805447 | 0.381289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615b87236ff51760bc21a84e0a677faa94a2d249 | 690 | py | Python | programmers/42840/python/krwns97.py | algorithm-everyday/algorithm-everyday | b79a34b4db626c15b540443b8c929edc21992e14 | [
"MIT"
] | 2 | 2021-03-29T14:30:39.000Z | 2021-03-29T15:08:55.000Z | programmers/42840/python/krwns97.py | algorithm-everyday/algorithm-everyday | b79a34b4db626c15b540443b8c929edc21992e14 | [
"MIT"
] | 50 | 2021-02-16T13:50:33.000Z | 2021-06-15T04:33:46.000Z | programmers/42840/python/krwns97.py | gon125/algorithm-everyday | b79a34b4db626c15b540443b8c929edc21992e14 | [
"MIT"
] | 5 | 2021-02-08T14:12:10.000Z | 2021-02-24T13:21:22.000Z | def calculate_result(p_answer,answer):
result=0
for i in range(len(answer)):
if p_answer[i%len(p_answer)] == answer[i]:
result+=1
return result
def solution(answers):
p1_answer=[1,2,3,4,5]
p2_answer=[2,1,2,3,2,4,2,5]
p3_answer=[3,3,1,1,2,2,4,4,5,5]
p1=calculate_result(p1_answer,answers)
p2=calculate_result(p2_answer,answers)
p3=calculate_result(p3_answer,answers)
#print("{0} {1} {2}".format(p1,p2,p3))
result=[]
if (p1>=p2) and (p1>=p3):
result.append(1)
if (p2>=p1) and (p2>=p3):
result.append(2)
if (p3>=p2) and (p3>=p1):
result.append(3)
return result | 28.75 | 51 | 0.571014 | 115 | 690 | 3.313043 | 0.234783 | 0.15748 | 0.068241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106796 | 0.253623 | 690 | 24 | 52 | 28.75 | 0.63301 | 0.053623 | 0 | 0.095238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615c5121211df490e4956df5cee84fd40785167a | 1,154 | py | Python | bqq/data/schemas.py | martintupy/bqq | e07bc021ffaa6c4fa9aa51a77492003aebabd54d | [
"Apache-2.0"
] | null | null | null | bqq/data/schemas.py | martintupy/bqq | e07bc021ffaa6c4fa9aa51a77492003aebabd54d | [
"Apache-2.0"
] | null | null | null | bqq/data/schemas.py | martintupy/bqq | e07bc021ffaa6c4fa9aa51a77492003aebabd54d | [
"Apache-2.0"
] | null | null | null | import glob
import json
import os
import shutil
from pathlib import Path
from typing import List
from bqq import const
from bqq.types import JobInfo
from google.cloud.bigquery.schema import SchemaField
class Schemas:
def __init__(self):
self.path = const.BQQ_SCHEMAS
def clear(self):
dirs = glob.glob(f"{self.path}/*")
for dir in dirs:
shutil.rmtree(dir)
def write(self, project: str, id: str, schema: List[SchemaField]):
path = f"{self.path}/{project}"
Path(path).mkdir(exist_ok=True)
filename = f"{self.path}/{project}/{id}.json"
with open(filename, "w") as f:
f.write(json.dumps([field.to_api_repr() for field in schema]))
def read(self, job_info: JobInfo) -> List[SchemaField]:
filename = f"{self.path}/{job_info.project}/{job_info.job_id}.json"
schema = []
if os.path.isfile(filename):
with open(filename) as f:
columns = json.load(f)
for col in columns:
field = SchemaField.from_api_repr(col)
schema.append(field)
return schema
| 28.146341 | 75 | 0.607452 | 154 | 1,154 | 4.461039 | 0.383117 | 0.058224 | 0.052402 | 0.046579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.278163 | 1,154 | 40 | 76 | 28.85 | 0.82473 | 0 | 0 | 0 | 0 | 0 | 0.10312 | 0.090988 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.28125 | 0 | 0.46875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
615e397ada402e05ece5e8287e4aabf1e19a3939 | 5,929 | py | Python | apis/posts_api/tests/test_posts_utils.py | MasterBDX/find-your-api | 23e22093df36574cb81703f315da5bfdf1efc51e | [
"MIT"
] | 2 | 2020-12-31T10:32:18.000Z | 2021-01-01T03:15:30.000Z | apis/posts_api/tests/test_posts_utils.py | MasterBDX/find-your-api | 23e22093df36574cb81703f315da5bfdf1efc51e | [
"MIT"
] | null | null | null | apis/posts_api/tests/test_posts_utils.py | MasterBDX/find-your-api | 23e22093df36574cb81703f315da5bfdf1efc51e | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from apis.models import UserApiModel,PostApiModel
from ..utils import (get_new_post,
get_serialized_data,
create_api_posts)
from ..serializers import PostApiSerializer
from datetime import date
class TestMethods(TestCase):
'''Test utils file method for posts API module'''
def setUp(self):
self.today = date.today()
# This user not from Main User Model but from User Fake API Model.
#------------------------------------------------------------------
self.user = UserApiModel.objects.create(first_name='test',
last_name='bdx',
gender='male',
birthday=self.today,
birth_place='test place',
email='test@email.com',
phone_number='218931239763',
address='Test Street',
)
self.data = {'title':'Test Post',
'overview':'test overview',
'content':'Test Content',
'author_id':self.user.id,
'published_at':self.today}
self.updated_data = {'title':'Test Post Updated',
'overview':'test overview Updated',
'content':'Test Content Updated',
'author_id':self.user.id,
'published_at':self.today}
self.post = PostApiModel.objects.create(
title=self.data['title'],
overview=self.data['overview'],
content=self.data['content'],
author_id=self.data['author_id'],
published_at=self.data['published_at'])
def test_new_post_method(self):
'''Test get_new_post method'''
obj = get_new_post(self.data)
self.assertEqual(obj.title,self.data['title'])
self.assertEqual(obj.overview,self.data['overview'])
self.assertEqual(obj.content,self.data['content'])
self.assertEqual(obj.author_id,self.user.id)
self.assertEqual(obj.published_at,self.today)
self.assertTrue(type(obj.id)==int)
def test_serialized_data_method(self):
'''Test get_serialized_data_method without pass any data '''
pk = self.post.pk
serilaizer_data = PostApiSerializer(self.post).data
data,status_code = get_serialized_data(pk)
self.assertEqual(data,serilaizer_data)
self.assertEqual(status_code,status.HTTP_200_OK)
def test_serialized_data_method2(self):
'''Test get_serialized_data_method with pass data kwarg
& without partial kwarg to test if fail with non full
data passed
'''
pk = self.post.pk
serilaizer_data = PostApiSerializer(self.post).data
data,status_code = get_serialized_data(pk,self.updated_data)
data2,status_code2 = get_serialized_data(pk,{'title':'Test Title 2'})
self.assertEqual(self.updated_data['title'],data['title'])
self.assertNotEqual(data,serilaizer_data)
self.assertEqual(status_code,status.HTTP_200_OK)
self.assertEqual(status_code2,status.HTTP_400_BAD_REQUEST)
def test_serialized_data_method2(self):
'''Test get_serialized_data_method with pass data kwarg
& partial kwarg
'''
pk = self.post.pk
serilaizer_data = PostApiSerializer(self.post).data
data,status_code = get_serialized_data(pk,{'title':'Test Title 2'},partial=True)
self.assertEqual('Test Title 2',data.get('title',None))
self.assertNotEqual(data,serilaizer_data)
self.assertEqual(status_code,status.HTTP_200_OK)
class TestMethods2(TestCase):
def setUp(self):
self.today = date.today()
self.user = UserApiModel.objects.create(first_name='test',
last_name='bdx',
gender='male',
birthday=self.today,
birth_place='test place',
email='test@email.com',
phone_number='218931239763',
address='Test Street')
self.user2 = UserApiModel.objects.create(first_name='test2',
last_name='bdx',
gender='male',
birthday=self.today,
birth_place='test place',
email='test2@email.com',
phone_number='218931239763',
address='Test Street')
self.user3 = UserApiModel.objects.create(first_name='test3',
last_name='bdx',
gender='male',
birthday=self.today,
birth_place='test place',
email='test3@email.com',
phone_number='218931239763',
address='Test Street')
def test_posts_creator_method(self):
created = create_api_posts(num=3)
posts_num = PostApiModel.objects.count()
self.assertEqual(posts_num,9)
self.assertTrue(created)
| 43.277372 | 88 | 0.508517 | 560 | 5,929 | 5.205357 | 0.203571 | 0.066895 | 0.046655 | 0.041166 | 0.521441 | 0.483705 | 0.47307 | 0.452487 | 0.419211 | 0.401372 | 0 | 0.021324 | 0.39096 | 5,929 | 136 | 89 | 43.595588 | 0.785932 | 0.075561 | 0 | 0.48 | 0 | 0 | 0.098519 | 0 | 0 | 0 | 0 | 0 | 0.17 | 1 | 0.07 | false | 0 | 0.08 | 0 | 0.17 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6163a41389d6c9447247605bd31c2fde594d5dae | 748 | py | Python | mintohours_norm.py | GiacomoCrevani/CESP_MSc_thesis | bce0b6ccee06630a04c5590caf0b4ed42b359c90 | [
"CC0-1.0"
] | null | null | null | mintohours_norm.py | GiacomoCrevani/CESP_MSc_thesis | bce0b6ccee06630a04c5590caf0b4ed42b359c90 | [
"CC0-1.0"
] | null | null | null | mintohours_norm.py | GiacomoCrevani/CESP_MSc_thesis | bce0b6ccee06630a04c5590caf0b4ed42b359c90 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 10 10:18:55 2021
@author: giaco
"""
#code useful to shift from min resolution to hourly, such that the csv load demand can be fed as input to MicrogridsPy
#yearly profile is required
import numpy as np
import pandas as pd
#minute and hour indexes
minute_index = pd.date_range("2021-01-01 00:00:00", "2021-12-31 23:59:00", freq="1min")
hour_index = np.linspace(1,8760,8760,dtype=int)
#from load to loadH
load=pd.read_csv('#INSERT THE PATH TO THE .csv FILE FROM RAMP OUTPUT#',usecols=['0'])
load.index = minute_index
loadH = load.resample('H').mean()
loadH.index = hour_index
#export in .csv
loadH.to_csv('#XXX.csv TO NAME THE FILE OF OUTPUT IN HOURLY RESOLUTION#')
| 27.703704 | 119 | 0.695187 | 130 | 748 | 3.946154 | 0.607692 | 0.023392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 0.183155 | 748 | 26 | 120 | 28.769231 | 0.754501 | 0.364973 | 0 | 0 | 0 | 0 | 0.351039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
616495d99eaadbc172ec861f51af65378cdd26cd | 2,314 | py | Python | Git-Gamer.py | nikilson/git-gamer | d0862f8d2cbbc47d057975b9af87fdebece9132f | [
"MIT"
] | null | null | null | Git-Gamer.py | nikilson/git-gamer | d0862f8d2cbbc47d057975b9af87fdebece9132f | [
"MIT"
] | null | null | null | Git-Gamer.py | nikilson/git-gamer | d0862f8d2cbbc47d057975b9af87fdebece9132f | [
"MIT"
] | null | null | null | import os
from os import path
from datetime import datetime
import subprocess
# Checking the default location
# add remote location
home = (path.expanduser("~"))
git_gamer_location = path.join(home, "GitGamer")
current_cwd = os.getcwd()
#hide console
# si = subprocess.STARTUPINFO()
# si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
""" passing starttupinfo=si argument into subprocess.call function"""
if not (path.isdir(git_gamer_location)):
os.mkdir(git_gamer_location)
os.chdir(git_gamer_location)
subprocess.call('git init -b "main"', shell=True)
os.chdir(current_cwd)
with open("git-repo-url.txt", 'r') as url_file:
remote_url = url_file.readline()
os.chdir(git_gamer_location)
subprocess.call(f"git remote add origin {remote_url}", shell=True)
os.chdir(current_cwd)
url_file.close()
else:
os.chdir(git_gamer_location)
subprocess.call("git pull origin", shell=True)
os.chdir(current_cwd)
# read location file
def remove_lines(list_path):
output_list = []
for line in list_path:
if "\n" in line:
line = line.replace('\n', '')
output_list.append(line)
return output_list
save_location_file = open("save-location.txt", 'r')
game_name_file = open("game-name.txt", 'r')
save_location_list = save_location_file.readlines()
game_name_list = game_name_file.readlines()
save_location_list = remove_lines(save_location_list)
game_name_list = remove_lines(game_name_list)
for num1 in range(len(save_location_list)):
temp_loc = save_location_list[num1]
temp_name = game_name_list[num1]
temp_path = path.join(git_gamer_location, temp_name)
if not (path.isdir(temp_path)):
os.mkdir(temp_path)
temp_loc = temp_loc + "\\*.*"
# print(f'copy "{temp_loc}" "{temp_path}"')
subprocess.call(f'del /Q /S /F "{temp_path}" | cls', shell=True)
subprocess.call(f'xcopy /S /Q /F /Y "{temp_loc}" "{temp_path}"', shell=True)
os.chdir(git_gamer_location)
subprocess.call("git add .", shell=True)
comment = input("Please enter a commit message : ")
date_time = datetime.now().strftime("%H-%M:%d-%m-%Y")
subprocess.call(f"git commit -m '{comment}-{date_time}'", shell=True)
subprocess.call("git push -u origin main", shell=True)
print("The repository is updated sucessfully!!!") | 32.138889 | 80 | 0.703976 | 341 | 2,314 | 4.557185 | 0.316716 | 0.081081 | 0.082368 | 0.03861 | 0.151223 | 0.151223 | 0.10103 | 0.07722 | 0 | 0 | 0 | 0.001541 | 0.1586 | 2,314 | 72 | 81 | 32.138889 | 0.79661 | 0.085566 | 0 | 0.137255 | 0 | 0 | 0.178922 | 0.011275 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.078431 | 0 | 0.117647 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6164c0914aca12f9a4b8a2c1a13e7d76578e7120 | 2,564 | py | Python | src/objective.py | azane/chomp | 843ce07148389a63f46091daed12c83c8a3ac52c | [
"MIT"
] | 1 | 2022-03-27T11:02:54.000Z | 2022-03-27T11:02:54.000Z | src/objective.py | azane/chomp | 843ce07148389a63f46091daed12c83c8a3ac52c | [
"MIT"
] | null | null | null | src/objective.py | azane/chomp | 843ce07148389a63f46091daed12c83c8a3ac52c | [
"MIT"
] | 1 | 2022-03-27T11:03:22.000Z | 2022-03-27T11:03:22.000Z | import numpy as np
import theano as th
import sympy as sm
import theano.tensor as tt
from typing import *
def slow_fdiff_1(n: int) -> np.ndarray:
K = np.diag(np.ones(n) * -1, 0)
K += np.diag(np.ones(n - 1) * 1, 1)
K = np.vstack((np.zeros(n), K))
K[0, 0] = 1.
K[-1, -1] = -1.
return K
def slow_naive_prior(q: np.ndarray) -> float:
assert q.ndim == 2
tot = 0.
for i, qq in enumerate(q[:-1]):
dd = (q[i+1] - qq)
tot += np.inner(dd, dd)
return .5 * tot
def slow_fdmat_prior(q: np.ndarray) -> float:
assert q.ndim == 2
# Set up boundary condition vector.
e = np.zeros(q[1:].shape)
e[0] = -q[0]
e[-1] = q[-1]
# Difference over all but boundaries.
K = slow_fdiff_1(len(q)-2)
dd = K.dot(q[1:-1]) + e
return .5 * np.tensordot(dd, dd)
def th_smoothness(q: tt.TensorVariable=None, w: tt.TensorConstant=None):
if q is None:
q = tt.dmatrix("q") # type: tt.TensorVariable
# Backward differences.
dd = abs(q[1:] - q[:-1])
if w is not None:
dd = dd * w.dimshuffle('x', 0)
y = .5 * tt.tensordot(dd, dd)
return y, q
def ffp_smoothness(q: tt.TensorVariable=None):
y, q = th_smoothness(q)
f = th.function(inputs=[q], outputs=y)
dfdq = th.grad(cost=y, wrt=q)
fp = th.function(inputs=[q], outputs=dfdq)
return f, fp, q
def th_obstacle(q: tt.TensorVariable, u: tt.TensorConstant,
xf: Callable[[tt.TensorVariable, tt.TensorConstant], tt.Tensor],
cf: Callable[[tt.Tensor], tt.Tensor]):
"""
:param q: The configurations over the trajectory, in order of time.
:param u: Points on the discretized robot body.
:param xf: A function mapping workspace config and body to workspace.
:param cf: A function mapping workspace to obstacle costs.
"""
# Pass our configuration and robot body to get workspace coords.
xqu = xf(q, u) # .shape == (Q, U, D)
# Pass our workspace coords to get our obstacle cost function.
cxqu = cf(xqu) # .shape == (Q, U)
# Average of adjacent t for each robot element.
cxqu_cd = .5 * (cxqu[1:, :] + cxqu[:-1, :]) # .shape == (Q-1, U)
# Backward differences...
xqu_bd = xqu[1:] - xqu[:-1] # .shape == (Q-1, U, D)
return tt.sum(cxqu_cd.dimshuffle(0, 1, 'x') * xqu_bd), q
def ffp_obstacle(q, *args, **kwargs):
y, q = th_obstacle(q, *args, **kwargs)
f = th.function(inputs=[q], outputs=y)
dfdq = th.grad(cost=y, wrt=q)
fp = th.function(inputs=[q], outputs=dfdq)
return f, fp, q | 26.163265 | 80 | 0.585803 | 417 | 2,564 | 3.558753 | 0.302158 | 0.010782 | 0.043127 | 0.045822 | 0.231806 | 0.177898 | 0.177898 | 0.157682 | 0.157682 | 0.114555 | 0 | 0.021455 | 0.25468 | 2,564 | 98 | 81 | 26.163265 | 0.755102 | 0.24688 | 0 | 0.181818 | 0 | 0 | 0.001586 | 0 | 0 | 0 | 0 | 0 | 0.036364 | 1 | 0.127273 | false | 0 | 0.090909 | 0 | 0.345455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6166fba4b94593c2647210c0e98e1420a4c404f3 | 649 | py | Python | regex-improve.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | null | null | null | regex-improve.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | 1 | 2020-12-28T23:01:49.000Z | 2020-12-29T15:47:39.000Z | regex-improve.py | tylerjereddy/regex-improve | 526ac8ae1bb97bbc5401f3e1796a065ca6d30d98 | [
"MIT"
] | null | null | null | import lib
from lib import extra_char_class, general
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-d",
help="target directory")
parser.add_argument("-n",
help="name of regex offense")
args = parser.parse_args()
if args.n == 'extra_char_class':
operator_instance = lib.extra_char_class.FileOperatorExtraCharClass()
# adjust the source code using
# general loop + operator_instance
# from above
lib.general.walk_replace(rootdir=args.d,
operator_instance=operator_instance)
| 30.904762 | 77 | 0.644068 | 72 | 649 | 5.5 | 0.527778 | 0.161616 | 0.106061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.268105 | 649 | 20 | 78 | 32.45 | 0.833684 | 0.11094 | 0 | 0 | 0 | 0 | 0.113438 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61678ae5338640a2f904c427e986cfa9ce80ea04 | 2,200 | py | Python | radarly/user.py | gvannest/radarly-py | fba9a575b0d1e4d6a2f1041506ece55298b17e0f | [
"Apache-2.0"
] | 6 | 2018-04-27T10:01:46.000Z | 2019-02-19T13:14:16.000Z | radarly/user.py | gvannest/radarly-py | fba9a575b0d1e4d6a2f1041506ece55298b17e0f | [
"Apache-2.0"
] | 1 | 2021-02-16T04:12:44.000Z | 2021-03-19T08:05:10.000Z | radarly/user.py | gvannest/radarly-py | fba9a575b0d1e4d6a2f1041506ece55298b17e0f | [
"Apache-2.0"
] | 5 | 2019-02-04T17:11:06.000Z | 2021-11-12T05:07:43.000Z | """
This module defines objects used to explore information about a Radarly User.
For security reasons, you can only retrieve information about the
current user of the API.
"""
from .api import RadarlyApi
from .model import SourceModel
from .project import InfoProject
class User(SourceModel):
"""Object used to explore user information returned by the API. Given that
this object inherits from the ``SourceModel``, you can get the structure of the
object with the ``draw_structure`` method.
Examples:
>>> user = User.find(uid='me')
>>> user
<User.id=1234.email='john.doe@linkfluence.com'>
>>> user.keys()
{'projects', 'current_project_id', ..., 'is_disabled'}
Args:
id (int): unique identifier of the user
name (str): registred name of the user
email (str): regitred email of the user
projects (list[InfoProject]): list in which each item
is an object storing some information about a project (notice
that all information about a project are not stored in this
object)
created (datetime.datetime): creation datetime of the user
...
"""
def __init__(self, data):
super().__init__()
translator = dict(
projects=InfoProject._builder,
)
super().add_data(data, translator)
def __repr__(self):
uid, email = self['id'], self['email']
return "<User.id={}.email='{}'>".format(uid, email)
@classmethod
def find(cls, uid, api=None):
"""
Get information about an user.
Args:
uid (string): because you can only access data about you, this
argument must be set to ``me``
api (RadarlyApi, optional): API used to make the
request. If None, the default API will be used.
Returns:
User: User object storing information retrieved from the API
"""
api = api or RadarlyApi.get_default_api()
if uid == 'me':
user_data = api.get(api.router.user['me'])
return cls(user_data)
raise ValueError("The 'uid' argument must be set to 'me'.")
| 34.375 | 83 | 0.612727 | 277 | 2,200 | 4.787004 | 0.415162 | 0.022624 | 0.027149 | 0.036199 | 0.031674 | 0.031674 | 0 | 0 | 0 | 0 | 0 | 0.002558 | 0.289091 | 2,200 | 63 | 84 | 34.920635 | 0.845269 | 0.591364 | 0 | 0 | 0 | 0 | 0.1 | 0.031507 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61723a1b20b4237f505c9fd4feae8442da74d3da | 1,430 | py | Python | utils/reshuffle.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | 196 | 2020-05-05T01:29:52.000Z | 2022-03-29T04:07:54.000Z | utils/reshuffle.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | 15 | 2020-06-29T13:48:21.000Z | 2022-03-30T06:51:02.000Z | utils/reshuffle.py | Longday0923/CODAH_Baseline | e9e331452a12c85e35969833cbfc824d6c0256c1 | [
"MIT"
] | 48 | 2020-05-07T12:11:07.000Z | 2022-03-18T05:28:08.000Z | import argparse
import json
from tqdm import tqdm
import numpy as np
params = {
'src':{
'train': './data/{ds}/statement/train.statement.jsonl',
'dev': './data/{ds}/statement/dev.statement.jsonl',
},
'tgt':{
'train': './data/{ds}/statement/train.statement_.jsonl',
'dev': './data/{ds}/statement/dev.statement_.jsonl',
}
}
parser = argparse.ArgumentParser()
parser.add_argument('--ds', default='obqa', choices=['csqa', 'obqa', 'socialiqa'])
parser.add_argument('--seed', default=0, type=int)
args = parser.parse_args()
print(args)
print(args.ds)
np.random.seed(args.seed)
def read_file(filename):
nrow = sum(1 for _ in open(filename, 'r'))
li = []
with open(filename, 'r') as fin:
for line in tqdm(fin, total=nrow):
json_line = json.loads(line)
li.append(json_line)
return li, len(li)
all = []
cnt = []
for split in ['train', 'dev']:
li, length = read_file(params['src'][split].format(ds=args.ds))
all.extend(li)
cnt.append(length)
idxs = np.arange(len(all))
np.random.shuffle(idxs)
res = []
for length in cnt:
res.append([all[idx] for idx in idxs[:length]])
idxs = idxs[length:]
for split in ['train', 'dev']:
with open(params['tgt'][split].format(ds=args.ds), 'w') as fout:
for item in tqdm(res[0], total=len(res[0])):
fout.write(json.dumps(item) + '\n')
res.pop(0)
| 22.698413 | 82 | 0.603497 | 203 | 1,430 | 4.20197 | 0.35468 | 0.028136 | 0.07034 | 0.046893 | 0.260258 | 0.173505 | 0.173505 | 0.173505 | 0.173505 | 0.173505 | 0 | 0.004401 | 0.205594 | 1,430 | 62 | 83 | 23.064516 | 0.746479 | 0 | 0 | 0.043478 | 0 | 0 | 0.175562 | 0.119382 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021739 | false | 0 | 0.086957 | 0 | 0.130435 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6174f1287314ede549a08cfd2e4fe1fae8d82fdd | 1,813 | py | Python | solar/orchestration/executor.py | Mirantis/solar | 7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa | [
"Apache-2.0"
] | 7 | 2015-09-07T22:52:32.000Z | 2016-01-14T09:27:09.000Z | solar/orchestration/executor.py | Mirantis/solar | 7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa | [
"Apache-2.0"
] | 117 | 2015-09-08T05:46:16.000Z | 2016-04-14T16:46:33.000Z | solar/orchestration/executor.py | Mirantis/solar | 7d12e56d403d70a923cd1caa9c7e3c8cf6fc57aa | [
"Apache-2.0"
] | 21 | 2015-09-08T06:34:50.000Z | 2015-12-09T09:14:24.000Z | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from celery import group
from solar.orchestration.runner import app
def celery_executor(dg, tasks, control_tasks=()):
to_execute = []
for task_name in tasks:
# task_id needs to be unique, so for each plan we will use
# generated uid of this plan and task_name
task_id = '{}:{}'.format(dg.graph['uid'], task_name)
task = app.tasks[dg.node[task_name]['type']]
dg.node[task_name]['status'] = 'INPROGRESS'
dg.node[task_name]['start_time'] = time.time()
for t in generate_task(task, dg.node[task_name], task_id):
to_execute.append(t)
return group(to_execute)
def generate_task(task, data, task_id):
subtask = task.subtask(
data['args'], task_id=task_id,
time_limit=data.get('time_limit', None),
soft_time_limit=data.get('soft_time_limit', None))
# NOTE(dshulyak) it seems that we agreed that celery wont be installed
# on every slave and transport will be chosen in handler
# if data.get('target', None):
# subtask.set(queue=data['target'])
yield subtask
def all_success(dg, nodes):
return all((dg.node[n]['status'] == 'SUCCESS' for n in nodes))
| 32.375 | 78 | 0.675124 | 269 | 1,813 | 4.446097 | 0.483271 | 0.046823 | 0.033445 | 0.046823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005662 | 0.220629 | 1,813 | 55 | 79 | 32.963636 | 0.840764 | 0.478764 | 0 | 0 | 0 | 0 | 0.086768 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0.047619 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617a966d2ec2515454023b0e3b38beba6de4a70c | 3,164 | py | Python | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Optimization/line_plot.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Optimization/line_plot.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | SUAVE/SUAVE-2.5.0/trunk/SUAVE/Optimization/line_plot.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | ## @ingroup Optimization
# line_plot.py
#
# Created: Oct 2017, M. Vegh
# Modified: Nov 2017, M. Vegh
# May 2021, E. Botero
# ----------------------------------------------------------------------
# Imports
# -------------------------------------------
from SUAVE.Core import Data
import numpy as np
import matplotlib.pyplot as plt
# ----------------------------------------------------------------------
# line_plot
# ----------------------------------------------------------------------
def line_plot(problem, number_of_points, plot_obj=1, plot_const=1, sweep_index=0):
"""
Takes in an optimization problem and runs a line plot of the first variable of sweep index
sweep_index. i.e. sweep_index=0 means you want to sweep the first variable, sweep_index = 4 is the 5th variable)
Assumptions:
N/A
Source:
N/A
Inputs:
problem [Nexus Class]
number_of_points [int]
plot_obj [int]
plot_const [int]
sweep_index [int]
Outputs:
Beautiful plots!
Outputs:
inputs [array]
objective [array]
constraint [array]
Properties Used:
N/A
"""
idx0 = sweep_index # local name
opt_prob = problem.optimization_problem
base_inputs = opt_prob.inputs
names = base_inputs[:,0] # Names
bndl = base_inputs[:,2] # Bounds
bndu = base_inputs[:,3] # Bounds
base_objective = opt_prob.objective
obj_name = base_objective[0][0] #objective function name (used for scaling)
obj_scaling = base_objective[0][1]
base_constraints= opt_prob.constraints
constraint_names= base_constraints[:,0]
#define inputs, output, and constraints for sweep
inputs = np.zeros([2,number_of_points])
obj = np.zeros([number_of_points])
constraint_num = np.shape(base_constraints)[0] # of constraints
constraint_val = np.zeros([constraint_num,number_of_points])
#create inputs matrix
inputs[0,:] = np.linspace(bndl[idx0], bndu[idx0], number_of_points)
#inputs defined; now run sweep
for i in range(0, number_of_points):
opt_prob.inputs[:,1][idx0]= inputs[0,i]
obj[i] = problem.objective()*obj_scaling
constraint_val[:,i]= problem.all_constraints().tolist()
if plot_obj==1:
plt.figure(0)
plt.plot(inputs[0,:], obj, lw = 2)
plt.xlabel(names[idx0])
plt.ylabel(obj_name)
if plot_const==1:
for i in range(0, constraint_num):
plt.figure(i+1)
plt.plot(inputs[0,:], constraint_val[i,:], lw = 2)
plt.xlabel(names[idx0])
plt.ylabel(constraint_names[i])
plt.show(block=True)
#pack outputs
outputs= Data()
outputs.inputs = inputs
outputs.objective = obj
outputs.constraint_val =constraint_val
return outputs
| 28.763636 | 116 | 0.523704 | 353 | 3,164 | 4.524079 | 0.31728 | 0.035066 | 0.061365 | 0.013776 | 0.052599 | 0.03757 | 0.03757 | 0.03757 | 0 | 0 | 0 | 0.02153 | 0.310051 | 3,164 | 110 | 117 | 28.763636 | 0.710032 | 0.378003 | 0 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.073171 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617be70d6fecc6506f598c0678a0433f7d103f61 | 1,636 | py | Python | tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py | uve/tensorflow | e08079463bf43e5963acc41da1f57e95603f8080 | [
"Apache-2.0"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2021-05-20T00:58:04.000Z | 2021-05-20T00:58:04.000Z | Lib/site-packages/tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the layer abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as framework_variables
class HybridLayer(object):
"""Layers are building blocks for hybrid models."""
def _define_vars(self,
params,
**kwargs):
"""Override to define the TensorFlow variables for the layer."""
raise NotImplementedError
# pylint: disable=unused-argument
def __init__(self, params, layer_num, device_assigner, *args, **kwargs):
self.layer_num = layer_num
self.device_assigner = (
device_assigner or framework_variables.VariableDeviceChooser())
self.params = params
self._define_vars(params, **kwargs)
def inference_graph(self, data, data_spec=None):
raise NotImplementedError
| 38.952381 | 85 | 0.690709 | 199 | 1,636 | 5.517588 | 0.572864 | 0.054645 | 0.043716 | 0.029144 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006056 | 0.192543 | 1,636 | 41 | 86 | 39.902439 | 0.825132 | 0.517726 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.235294 | 0 | 0.470588 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617d27d4548b76f46d756f8b92e2089d0b92bead | 2,553 | py | Python | src/webpubsub/azext_webpubsub/commands.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | null | null | null | src/webpubsub/azext_webpubsub/commands.py | sanmishra18/azure-cli-extensions | 05499b7931a1fe4cd4536a6b83fa4f8f13663996 | [
"MIT"
] | 1 | 2020-07-30T06:44:01.000Z | 2020-07-30T06:44:01.000Z | src/webpubsub/azext_webpubsub/commands.py | Juliehzl/azure-cli-extensions | b0b33f4d45c2e4c50ece782851291d967e1f36e2 | [
"MIT"
] | 1 | 2020-11-09T17:17:42.000Z | 2020-11-09T17:17:42.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.commands import CliCommandType
from azure.cli.core.util import empty_on_404
from ._client_factory import cf_webpubsub
def load_command_table(self, _):
webpubsub_general_utils = CliCommandType(
operations_tmpl='azext_webpubsub.custom#{}',
client_factory=cf_webpubsub
)
webpubsub_key_utils = CliCommandType(
operations_tmpl='azext_webpubsub.key#{}',
client_factory=cf_webpubsub
)
webpubsub_network_utils = CliCommandType(
operations_tmpl='azext_webpubsub.network#{}',
client_factory=cf_webpubsub
)
webpubsub_eventhandler_utils = CliCommandType(
operations_tmpl='azext_webpubsub.eventhandler#{}',
client_factory=cf_webpubsub
)
with self.command_group('webpubsub', webpubsub_general_utils, is_preview=True) as g:
g.command('create', 'webpubsub_create')
g.command('delete', 'webpubsub_delete')
g.command('list', 'webpubsub_list')
g.show_command('show', 'webpubsub_show', exception_handler=empty_on_404)
g.command('restart', 'webpubsub_restart', exception_handler=empty_on_404)
g.generic_update_command('update', getter_name='webpubsub_get',
setter_name='webpubsub_set',
custom_func_name='update_webpubsub')
with self.command_group('webpubsub key', webpubsub_key_utils) as g:
g.show_command('show', 'webpubsub_key_list')
g.command('regenerate', 'webpubsub_key_regenerate')
with self.command_group('webpubsub network-rule', webpubsub_network_utils) as g:
g.show_command('show', 'list_network_rules')
g.command('update', 'update_network_rules')
with self.command_group('webpubsub event-handler', webpubsub_eventhandler_utils) as g:
g.show_command('show', 'event_handler_list')
g.command('update', 'event_handler_update')
g.command('clear', 'event_handler_clear')
with self.command_group('webpubsub event-handler hub', webpubsub_eventhandler_utils) as g:
g.command('remove', 'event_handler_hub_remove')
g.command('update', 'event_handler_hub_update')
| 42.55 | 94 | 0.655307 | 280 | 2,553 | 5.646429 | 0.292857 | 0.050601 | 0.047438 | 0.063251 | 0.466161 | 0.330803 | 0.097407 | 0 | 0 | 0 | 0 | 0.004276 | 0.17548 | 2,553 | 59 | 95 | 43.271186 | 0.746793 | 0.143361 | 0 | 0.095238 | 0 | 0 | 0.268684 | 0.080697 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.071429 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617ec542b17aa0614e17eb7e4ef071d83563f0fa | 5,730 | py | Python | wte/src/ebook.py | webalorn/web-to-ebook | 19ca8810ad1da04dda7e348c709fa93e56f48377 | [
"MIT"
] | null | null | null | wte/src/ebook.py | webalorn/web-to-ebook | 19ca8810ad1da04dda7e348c709fa93e56f48377 | [
"MIT"
] | null | null | null | wte/src/ebook.py | webalorn/web-to-ebook | 19ca8810ad1da04dda7e348c709fa93e56f48377 | [
"MIT"
] | null | null | null | import json
import datetime
import imghdr
import os
from ebooklib import epub
from .util import Log, do_hash, downoad_image, format_filename
from .util import ImageData, TmpImageData
class Chapter:
def __init__(self, title='', content=''):
self.title = title
self.content = content
self.filename = None
def get_filename(self):
name = str(self.filename or do_hash(self.title))
return 'index_' + name + '.xhtml'
def get_real_content(self):
return (f'<h2>{self.title}</h2>'
+ self.content)
def to_epub(self):
chap = epub.EpubHtml(
title=self.title,
file_name=self.get_filename(),
)
chap.set_content(self.get_real_content())
return chap
class FrontPageChapter(Chapter):
def __init__(self, title='', content='', tags=[], author=None, source=None,
date=True, status=None):
self.title = title
parts = []
if tags:
parts.append(('Tags', ', '.join(tags)))
if author:
parts.append(('Author', author))
if source:
parts.append(('From', source))
if status:
parts.append(('Status', status))
if date:
if date is True:
now = datetime.date.today()
date = now.strftime("%d/%m/%Y")
parts.append(('Date', date))
parts = [f'<li><strong>{a}:<strong> {b}\n' for a, b in parts]
content = content.replace("\n", "<br/>")
self.content = f"""
<h1>{title}</h1>
<p style='font-style:italic;'>
{content}
</p>
<ul>{''.join(parts)}</ul>
"""
def get_filename(self):
return 'index_frontpage.xhtml'
def get_real_content(self):
return self.content
class Book:
def __init__(self, title='', identifier=None, cover=None, author=None, status=None,
source=None, lang='en', description=None, date=None, tags=[]):
self.title = title
self.chapters = []
self._cover = None
self.set_cover(cover)
self.author = author
self.source = source
self.identifier = identifier
self.lang = lang
self.description = description
self.date = date
self.tags = tags
self.images = []
self.status = status
if not self.date:
now = datetime.date.today()
self.date = now.strftime("%Y-%m-%d")
def add_chapter(self, *kargs, **kwargs):
self.chapters.append(Chapter(*kargs, **kwargs))
def cover_from_url(self, url):
self._cover = None
if url is not None:
self._cover = TmpImageData(downoad_image(url), 'cover')
def set_cover(self, path):
self._cover = None
if path is not None:
self._cover = ImageData(path, 'cover')
def epub_name(self):
title = self.title.replace("'", " ").lower()
parts = title.split()
if self.source:
parts.append(self.source)
if self.identifier:
parts.append(self.identifier)
return format_filename('-'.join(parts)) + '.epub'
def get_front_page(self):
if self.description is None:
return None
return FrontPageChapter(
title=self.title,
content=self.description,
tags=self.tags,
author=self.author,
source=self.source,
status=self.status,
)
def to_epub(self, dest_path=None):
ef = epub.EpubBook()
identifier = self.identifier or do_hash(self.title)
if self.source is not None:
identifier = str(self.source) + '-' + identifier
# Main data
ef.set_identifier(identifier)
ef.set_title(self.title)
ef.set_language(self.lang)
if self.author is not None:
authors = self.author if isinstance(
self.author, list) else[self.author]
for auth in authors:
ef.add_author(auth)
# Metadata
if self.description:
ef.add_metadata('DC', 'description', self.description)
if self.source:
ef.add_metadata('DC', 'publisher', self.source)
# Cover and image
if self._cover:
im = self._cover.read()
ef.set_cover(self._cover.epub_location(), im, True)
for img in self.images:
ef.add_item(img.to_epub())
# Create chapters
book_parts = []
chapters = self.chapters
for i_chap, chap in enumerate(self.chapters):
chap.filename = f'chapter_{i_chap+1}'
front_page = self.get_front_page()
if front_page:
chapters = [front_page] + chapters
for chap in chapters:
epub_chap = chap.to_epub()
ef.add_item(epub_chap)
book_parts.append(epub_chap)
ef.toc = tuple(book_parts)
ef.spine = ['nav'] + book_parts
ef.add_item(epub.EpubNcx())
ef.add_item(epub.EpubNav())
# Write the ebook
if dest_path is not None:
dest_path = str(dest_path)
if os.path.exists(dest_path):
if not Log.confirm(f"The file {dest_path} already exists. Overwride ?"):
Log.warning(
f"The epub file was not written because {dest_path} already exists")
dest_path = None
if dest_path:
epub.write_epub(dest_path, ef)
Log.success(
f"The file {dest_path} has been successfully written")
return ef
| 29.536082 | 92 | 0.549215 | 671 | 5,730 | 4.554396 | 0.205663 | 0.04123 | 0.014725 | 0.015707 | 0.073953 | 0.040576 | 0.020942 | 0 | 0 | 0 | 0 | 0.001313 | 0.335602 | 5,730 | 193 | 93 | 29.689119 | 0.801418 | 0.011518 | 0 | 0.104575 | 0 | 0 | 0.092435 | 0.020855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091503 | false | 0 | 0.045752 | 0.019608 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617f4db708e6677801930a8cb78c9f9531107ac6 | 537 | py | Python | Curso_Python/funcoes.py | FranciscoCabrita1/Cabrita | af9dfb12dbc64cf6181d4e906156170c5449877e | [
"MIT"
] | 5 | 2020-08-24T23:29:58.000Z | 2022-02-07T19:58:07.000Z | Curso_Python/funcoes.py | lulavalenca/Curso-Completo-de-Python-no-Youtube | af9dfb12dbc64cf6181d4e906156170c5449877e | [
"MIT"
] | null | null | null | Curso_Python/funcoes.py | lulavalenca/Curso-Completo-de-Python-no-Youtube | af9dfb12dbc64cf6181d4e906156170c5449877e | [
"MIT"
] | 2 | 2020-08-24T23:30:06.000Z | 2021-12-23T18:23:38.000Z | def soma(num1, num2):
soma1 = num1
soma2 = num2
soma_total = soma1 + soma2
return soma_total
def sub(num1, num2):
sub1 = num1
sub2 = num2
sub_total = sub1 - sub2
return sub_total
def mult(num1, num2):
mult1 = num1
mult2 = num2
mult_total = mult1 * mult2
return mult_total
def div(num1, num2):
div1 = num1
div2 = num2
div_total = div1/div2
return div_total
soma = soma(5,3)
sub = sub(10,7)
mult = mult(2,5)
div = div(20,4)
conta_final = mult + sub
print(conta_final)
| 15.794118 | 30 | 0.621974 | 83 | 537 | 3.903614 | 0.325301 | 0.098765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108808 | 0.281192 | 537 | 33 | 31 | 16.272727 | 0.73057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.307692 | 0.038462 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
617fe9d2491f54f1bdb39bd1f350bb89243e2b5c | 2,510 | py | Python | setup.py | all-in-one-of/vfxwindow | 247571512ffbfd4deb2ba505a75c5de658efffbd | [
"MIT"
] | null | null | null | setup.py | all-in-one-of/vfxwindow | 247571512ffbfd4deb2ba505a75c5de658efffbd | [
"MIT"
] | null | null | null | setup.py | all-in-one-of/vfxwindow | 247571512ffbfd4deb2ba505a75c5de658efffbd | [
"MIT"
] | 1 | 2021-02-17T00:00:10.000Z | 2021-02-17T00:00:10.000Z | import os
from setuptools import setup, find_packages
# Get the README.md text
with open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r') as f:
readme = f.read()
# Parse vfxwindow/__init__.py for a version
with open(os.path.join(os.path.dirname(__file__), 'vfxwindow/__init__.py'), 'r') as f:
for line in f:
if line.startswith('__version__'):
version = eval(line.split('=')[1].strip())
break
else:
raise RuntimeError('no version found')
setup(
name = 'vfxwindow',
packages = find_packages(),
version = version,
license='MIT',
description = 'Qt window class for designing tools to be compatible between multiple VFX programs.',
long_description=readme,
long_description_content_type='text/markdown',
author = 'Peter Hunt',
author_email='peterh@blue-zoo.co.uk',
url = 'https://github.com/Peter92/vfxwindow',
download_url = 'https://github.com/Peter92/vfxwindow/archive/{}.tar.gz'.format(version),
project_urls={
'Documentation': 'https://github.com/Peter92/vfxwindow/wiki',
'Source': 'https://github.com/Peter92/vfxwindow',
'Issues': 'https://github.com/Peter92/vfxwindow/issues',
},
keywords = [
'qt', 'pyside', 'pyside2', 'pyqt', 'pyqt4', 'pyqt5', 'gui', 'window',
'maya', 'mayapy', 'nuke', 'nukescripts', 'houdini', 'unreal', 'ue4', 'blender', '3dsmax', '3ds',
'vfx', 'visualfx', 'fx', 'cgi', '3d',
],
package_data={'vfxwindow': ['palettes/*.json']},
install_requires=[],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Multimedia :: Graphics :: 3D Modeling',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Software Development :: User Interfaces',
],
include_package_data=True,
python_requires=('>=2.7, !=3.0.*, !=3.1.*, !=3.2.*')
)
| 39.21875 | 104 | 0.604781 | 278 | 2,510 | 5.33813 | 0.543165 | 0.115229 | 0.151617 | 0.105121 | 0.196766 | 0.140162 | 0.04717 | 0.04717 | 0.04717 | 0 | 0 | 0.022063 | 0.223506 | 2,510 | 63 | 105 | 39.84127 | 0.739354 | 0.025498 | 0 | 0.035088 | 0 | 0 | 0.507163 | 0.017192 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.035088 | 0 | 0.035088 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
618125d2be7dce4de9f6012471e1a0187416e62e | 1,085 | py | Python | tests/tamr_client/dataset/test_unified.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
] | 9 | 2019-08-13T11:07:06.000Z | 2022-01-14T18:15:13.000Z | tests/tamr_client/dataset/test_unified.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
] | 166 | 2019-08-09T18:51:05.000Z | 2021-12-02T15:24:15.000Z | tests/tamr_client/dataset/test_unified.py | ianbakst/tamr-client | ae7a6190a2251d31f973f9d5a7170ac5dc097f97 | [
"Apache-2.0"
] | 21 | 2019-08-12T15:37:31.000Z | 2021-06-15T14:06:23.000Z | import pytest
import tamr_client as tc
from tests.tamr_client import fake
@fake.json
def test_from_project():
s = fake.session()
project = fake.mastering_project()
unified_dataset = tc.dataset.unified.from_project(s, project)
assert unified_dataset.name == "dataset 1 name"
assert unified_dataset.description == "dataset 1 description"
assert unified_dataset.key_attribute_names == ("tamr_id",)
@fake.json
def test_from_project_dataset_not_found():
s = fake.session()
project = fake.mastering_project()
with pytest.raises(tc.dataset.unified.NotFound):
tc.dataset.unified.from_project(s, project)
@fake.json
def test_apply_changes_async():
s = fake.session()
unified_dataset = fake.unified_dataset()
op = tc.dataset.unified._apply_changes_async(s, unified_dataset)
assert op.type == "SPARK"
assert op.description == "operation 1 description"
assert op.status == {
"state": "PENDING",
"startTime": "",
"endTime": "",
"message": "Job has not yet been submitted to Spark",
}
| 26.463415 | 68 | 0.693088 | 138 | 1,085 | 5.23913 | 0.362319 | 0.135546 | 0.08852 | 0.062241 | 0.276625 | 0.276625 | 0.204703 | 0 | 0 | 0 | 0 | 0.003425 | 0.192627 | 1,085 | 40 | 69 | 27.125 | 0.821918 | 0 | 0 | 0.266667 | 0 | 0 | 0.132719 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6182c969ad7fb85cbef2147f3700b1928d79dc52 | 827 | py | Python | benchmarks/cof/benchmark_kagome.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | benchmarks/cof/benchmark_kagome.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | benchmarks/cof/benchmark_kagome.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | from __future__ import annotations
import pytest
import stk
def build_kagome(
lattice_size: tuple[int, int, int],
) -> stk.ConstructedMolecule:
bb1 = stk.BuildingBlock('BrCCBr', [stk.BromoFactory()])
bb2 = stk.BuildingBlock(
smiles='BrC1C(Br)CC(Br)C(Br)C1',
functional_groups=[stk.BromoFactory()],
)
cof = stk.ConstructedMolecule(
topology_graph=stk.cof.Kagome(
building_blocks=(bb1, bb2),
lattice_size=lattice_size,
),
)
return cof
@pytest.fixture(
params=(
(1, 1, 1),
(2, 2, 2),
(4, 4, 4),
),
)
def lattice_size(request) -> tuple[int, int, int]:
return request.param
def benchmark_kagome(
benchmark,
lattice_size: tuple[int, int, int],
) -> None:
benchmark(build_kagome, lattice_size)
| 20.170732 | 59 | 0.61185 | 97 | 827 | 5.051546 | 0.43299 | 0.134694 | 0.067347 | 0.085714 | 0.102041 | 0.102041 | 0 | 0 | 0 | 0 | 0 | 0.024351 | 0.255139 | 827 | 40 | 60 | 20.675 | 0.771104 | 0 | 0 | 0.125 | 0 | 0 | 0.033857 | 0.026602 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.09375 | 0.03125 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61844ed5b13cdfaa913f4dc893bd0de862df86b6 | 4,538 | py | Python | model_compress/distil/theseus/classifier.py | Oneflow-Inc/Oneflow-Model-Compression | 1a346fa0a586ee0277814ecb56e5bee772d9bb05 | [
"Apache-2.0"
] | 4 | 2021-03-19T02:40:41.000Z | 2022-01-10T15:25:47.000Z | model_compress/distil/theseus/classifier.py | Oneflow-Inc/Oneflow-Model-Compression | 1a346fa0a586ee0277814ecb56e5bee772d9bb05 | [
"Apache-2.0"
] | 1 | 2022-03-04T07:19:43.000Z | 2022-03-04T07:19:43.000Z | model_compress/distil/theseus/classifier.py | Oneflow-Inc/Oneflow-Model-Compression | 1a346fa0a586ee0277814ecb56e5bee772d9bb05 | [
"Apache-2.0"
] | 3 | 2021-03-19T02:40:46.000Z | 2021-08-10T06:42:17.000Z | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import bert as bert_util
import bert_theseus as bert_theseus_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
def GlueBERT(
input_ids_blob,
input_mask_blob,
token_type_ids_blob,
label_blob,
vocab_size,
seq_length=512,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02,
label_num=2,
replace_prob=0.0,
compress_ratio=1
):
# print('| replace_prob: {} | compress_ratio: {}'.format(replace_prob, compress_ratio))
backbone = bert_theseus_util.BertTheseusBackbone(
input_ids_blob=input_ids_blob,
input_mask_blob=input_mask_blob,
token_type_ids_blob=token_type_ids_blob,
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
replace_prob=replace_prob,
compress_ratio=compress_ratio
)
pooled_output = PooledOutput(
sequence_output=backbone.sequence_output(),
hidden_size=hidden_size,
initializer_range=initializer_range,
is_train=False
)
loss, _, logit_blob = _AddClassficationLoss(
input_blob=pooled_output,
label_blob=label_blob,
hidden_size=hidden_size,
label_num=label_num,
initializer_range=initializer_range,
scope_name='classification',
is_train=False
)
return loss, logit_blob
def PooledOutput(sequence_output, hidden_size, initializer_range, is_train=True):
with flow.scope.namespace("bert-pooler"):
first_token_tensor = flow.slice(
sequence_output, [None, 0, 0], [None, 1, -1])
first_token_tensor = flow.reshape(
first_token_tensor, [-1, hidden_size])
pooled_output = bert_util._FullyConnected(
first_token_tensor,
input_size=hidden_size,
units=hidden_size,
weight_initializer=bert_util.CreateInitializer(initializer_range),
name="dense",
is_train=is_train
)
pooled_output = flow.math.tanh(pooled_output)
return pooled_output
def _AddClassficationLoss(input_blob, label_blob, hidden_size, label_num, initializer_range,
scope_name='classification', is_train=True):
with flow.scope.namespace(scope_name):
output_weight_blob = flow.get_variable(
name="output_weights",
shape=[label_num, hidden_size],
dtype=input_blob.dtype,
# initializer=bert_util.CreateInitializer(initializer_range),
initializer=flow.random_normal_initializer(
mean=0.0, stddev=initializer_range, seed=None, dtype=None),
trainable=is_train
)
output_bias_blob = flow.get_variable(
name="output_bias",
shape=[label_num],
dtype=input_blob.dtype,
initializer=flow.constant_initializer(0.0),
trainable=is_train
)
logit_blob = flow.matmul(
input_blob, output_weight_blob, transpose_b=True)
logit_blob = flow.nn.bias_add(logit_blob, output_bias_blob)
pre_example_loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit_blob, labels=label_blob
)
loss = pre_example_loss
return loss, pre_example_loss, logit_blob
| 35.732283 | 92 | 0.697223 | 569 | 4,538 | 5.198594 | 0.304042 | 0.043949 | 0.018932 | 0.017241 | 0.220419 | 0.158553 | 0.07336 | 0.022312 | 0 | 0 | 0 | 0.01375 | 0.230718 | 4,538 | 126 | 93 | 36.015873 | 0.833572 | 0.160423 | 0 | 0.117647 | 0 | 0 | 0.019205 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.039216 | 0 | 0.098039 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
618468ad6f14a6779b255dc46c34d2d8d1593c9f | 3,027 | py | Python | learned_control.py | roatienza/gym-miniworld | 2c377975e34ab588900a3c019ade126f14336610 | [
"Apache-2.0"
] | null | null | null | learned_control.py | roatienza/gym-miniworld | 2c377975e34ab588900a3c019ade126f14336610 | [
"Apache-2.0"
] | null | null | null | learned_control.py | roatienza/gym-miniworld | 2c377975e34ab588900a3c019ade126f14336610 | [
"Apache-2.0"
] | null | null | null |
"""
This script allows a trained policy to control the simulator.
Usage:
"""
import sys
import argparse
import pyglet
import math
from pyglet import clock
import numpy as np
import gym
import gym_miniworld
import torch
from policy import DDPGActor
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def step(action):
print('step {}/{}: {}'.format(env.step_count+1, env.max_episode_steps, env.actions(action).name))
obs, reward, done, info = env.step(action)
if env.is_render_depth:
print("next state", obs)
#for i in range(len(obs)):
# #print("obs[%d] shape: %s" % (i, obs[i].shape))
# print(obs[i])
else:
print("obs shape: ", obs.shape)
print('min: %f, max: %f' % (np.amin(obs), np.amax(obs)))
#if reward > 0:
print('reward={:.2f}'.format(reward))
if done:
print('done!')
obs = env.reset()
env.render('pyglet', view=view_mode)
return obs
#if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--env-name', default='MiniWorld-Hallway-v0')
parser.add_argument("--hidden-dim", default=32, type=int, help="Actor MLP hidden dim")
parser.add_argument('--checkpoint', default='results/DDPG-best_reward.pth')
parser.add_argument('--no-time-limit', action='store_true', help='ignore time step limits')
parser.add_argument('--agent-view', action='store_true', help='show the agent view instead of the top view')
args = parser.parse_args()
env = gym.make(args.env_name)
if args.no_time_limit:
env.max_episode_steps = math.inf
view_mode = 'agent' if args.agent_view else 'top'
state = env.reset()
# Create the display window
env.render('pyglet', view=view_mode)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
max_action = 1.
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"hidden_dim": args.hidden_dim
}
actor = DDPGActor(**kwargs).to(device)
print("Loading checkpoint: ", args.checkpoint)
actor.load_state_dict(torch.load(args.checkpoint, map_location=torch.device('cpu')))
actor.eval()
@env.unwrapped.window.event
def on_key_press(symbol, modifiers):
global state
print("Symbol: ", symbol)
if symbol == 32:
step(env.actions.done)
pyglet.app.exit()
else:
with torch.no_grad():
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
print("State:", state)
action = actor(state).cpu().data.numpy().flatten()
print("Action:", action)
action = np.argmax(action)
print("Action:", action)
state = step(action)
@env.unwrapped.window.event
def on_key_release(symbol, modifiers):
pass
@env.unwrapped.window.event
def on_draw():
env.render('pyglet', view='top')
@env.unwrapped.window.event
def on_close():
pyglet.app.exit()
# Enter main event loop
pyglet.app.run()
env.close() | 25.436975 | 108 | 0.664024 | 427 | 3,027 | 4.569087 | 0.35363 | 0.023065 | 0.043567 | 0.047155 | 0.08816 | 0.08816 | 0.031779 | 0 | 0 | 0 | 0 | 0.004858 | 0.184011 | 3,027 | 119 | 109 | 25.436975 | 0.78502 | 0.08226 | 0 | 0.15 | 0 | 0 | 0.157133 | 0.010138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.0125 | 0.1375 | 0 | 0.2125 | 0.1375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61868506a540e6d64a7676c2f81831e96a954fd0 | 7,000 | py | Python | welib/beams/theory.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 24 | 2019-07-24T23:37:10.000Z | 2022-03-30T20:40:40.000Z | welib/beams/theory.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | null | null | null | welib/beams/theory.py | moonieann/welib | 0e430ad3ca034d0d2d60bdb7bbe06c947ce08f52 | [
"MIT"
] | 11 | 2019-03-14T13:47:04.000Z | 2022-03-31T15:47:27.000Z | import numpy as np
import scipy.optimize as sciopt
def UniformBeamBendingModes(Type,EI,rho,A,L,w=None,x=None,Mtop=0,norm='tip_norm',nModes=4):
"""
returns Mode shapes and frequencies for a uniform beam in bending
References:
Inman : Engineering variation
Author: E. Branlard"""
if x is None or len(x)==0:
x = np.linspace(0,L,101)
if np.amax(x) != L:
raise Exception('Max of x should be equal to L')
# Dimensionless spanwise position
x0 = x / L
s = Type.split('-')
if s[0].lower()=='unloaded':
# --- "Theory" (clamped-free, vertical, no weight)
# See Inman, p.335 or Nielsen1 p129
if 'unloaded-clamped-free' == (Type.lower()):
# NOTE: cosh(beta_n)cos(beta_n) =-1
# sigma_n = [ np.sinh(beta_n) - sin(beta_n) ]/[cosh(beta_n) + cos(beta_n)]
# for j>5, a good approx is B(j) = (2*j-1)np.pi/2 and S(j)=1;
#B = [1.87510407, 4.69409113, 7.85475744,10.99554073,14.13716839, (2*6-1)*np.pi/2];
#S = [0.734095514 1.018467319 0.999224497 1.000033553 0.999998550 1];
B = np.zeros(nModes)
for i in np.arange(nModes):
B[i] = sciopt.fsolve(lambda x: 1 + np.cosh(x) * np.cos(x), (2*(i+1)-1)*np.pi/2)
elif 'unloaded-topmass-clamped-free' == (Type.lower()):
# The geometrical stiffning is not accounted for here
if Mtop is None:
raise Exception('Please specify value for Mtop for %s',Type)
M = rho * A * L
B = np.zeros(nModes)
for i in np.arange(nModes):
B[i] = sciopt.fsolve(lambda x: 1+np.cosh(x)*np.cos(x)-x*Mtop/M*(np.sin(x)*np.cosh(x)-np.cos(x)*np.sinh(x)),(2*(i+1)-1)*np.pi/2)
else:
raise Exception('unknown type %s',Type)
#S = ( sinh(B)-sin(B) ) ./ ( cosh(B) + cos(B)); # Sigma
#C = ( cosh(B)+cos(B) ) ./ ( sinh(B) + sin(B)); # Sigma
SS = np.sinh(B) + np.sin(B)
CC = np.cosh(B) + np.cos(B)
# Frequency
freq = (B / L) ** 2 / (2 * np.pi) * np.sqrt(EI / (rho * A))
# --- Mode shapes
ModesU = np.zeros((len(B),len(x0)))
ModesV = np.zeros((len(B),len(x0)))
ModesK = np.zeros((len(B),len(x0)))
for i in np.arange(nModes):
ModesU[i,:] = SS[i] * (np.cosh(B[i]*x0) - np.cos(B[i] * x0)) - CC[i] * (np.sinh(B[i] * x0) - np.sin(B[i] * x0))
ModesV[i,:] = B[i] * (SS[i] * (np.sinh(B[i]*x0) + np.sin(B[i] * x0)) - CC[i] * (np.cosh(B[i] * x0) - np.cos(B[i] * x0)))
ModesK[i,:] = B[i]**2 * (SS[i] * (np.cosh(B[i]*x0) + np.cos(B[i] * x0)) - CC[i] * (np.sinh(B[i] * x0) + np.sin(B[i] * x0)))
# ModesU(i,:) = cosh(B[i]*x0)-cos(B[i]*x0) - S[i]*(sinh(B[i]*x0)-sin(B[i]*x0)) ;
# ModesV(i,:) = B[i] *(sinh(B[i]*x0)+sin(B[i]*x0) - S[i]*(cosh(B[i]*x0)-cos(B[i]*x0)));
# ModesK(i,:) = B[i]^2*(cosh(B[i]*x0)+cos(B[i]*x0) - S[i]*(sinh(B[i]*x0)+sin(B[i]*x0)));
# ModesU(i,:) = cosh(B[i]*x0)-cos(B[i]*x0) - C[i]*(sinh(B[i]*x0)-sin(B[i]*x0)) ;
# ModesV(i,:) = B[i] *(sinh(B[i]*x0)+sin(B[i]*x0) - C[i]*(cosh(B[i]*x0)-cos(B[i]*x0)));
# ModesK(i,:) = B[i]^2*(cosh(B[i]*x0)+cos(B[i]*x0) - C[i]*(sinh(B[i]*x0)+sin(B[i]*x0)));
elif s[0].lower()=='loaded':
if 'loaded-clamped-free' == (Type.lower()):
if w is None:
w = A * rho
if L==0:
raise Exception('Please specify value for L for %s',Type)
B = np.array([1.875,4.694])
freq = (B / L) ** 2 / (2 * np.pi) * np.sqrt(EI / w)
else:
raise Exception('unknown type %s',Type)
else:
raise Exception('Unknown %s'^Type)
## Going back to physical dimension
x = x0 * L
ModesV = ModesV/L
ModesK = ModesK/L**2
## Normalization of modes
if norm=='tip_norm':
for i in np.arange(nModes):
fact = 1 / ModesU[i,-1]
ModesU[i,:] = ModesU[i,:] * fact
ModesV[i,:] = ModesV[i,:] * fact
ModesK[i,:] = ModesK[i,:] * fact
else:
raise Exception('Norm not implemented or incorrect: `%s`'%norm)
return freq,x,ModesU,ModesV,ModesK
# --------------------------------------------------------------------------------}
# --- Longitudinal modes
# --------------------------------------------------------------------------------{
def UniformBeamLongiModes(Type,E,rho,A,L,x=None,nModes=4,norm='tip_norm'):
"""
Returns longitudinals modes for a uniform beam
"""
if x is None:
x = np.linspace(0,L,101)
if np.amax(x) != L:
raise Exception('Max of x should be equal to L')
# Dimensionless spanwise position
x0 = x / L
if Type.lower()=='unloaded-clamped-free':
c = np.sqrt(E / rho)
freq = np.zeros(nModes)
ModesU = np.full([nModes,len(x0)],np.nan)
for j in np.arange(nModes):
omega_j = c/L*(np.pi/2 + j*np.pi)
freq[j] = omega_j/(2*np.pi)
ModesU[j,:] = np.sin(omega_j/c*x)
else:
raise Exception('Unknown %s'%Type)
## Computation of derivatives if no analytical functions
# V=fgradient_regular(U(i,:),4,dx);
# K=fgradient_regular(V(i,:),4,dx);
## Going back to physical dimension
# x=x0*L;
# ModesV = ModesV/L;
# ModesK = ModesK/L^2;
## Normalization of modes
if norm=='tip_norm':
for i in np.arange(nModes):
fact = 1 / ModesU[i,-1]
ModesU[i,:] = ModesU[i,:] * fact
return freq,x,ModesU
def UniformBeamTorsionModes(Type,G,Kt,Ip,rho,A,L,x=None,nModes=4,norm='tip_norm'):
"""
Returns torsional modes for a uniform beam
"""
if x is None:
x = np.linspace(0,L,101)
if np.amax(x) != L:
raise Exception('Max of x should be equal to L')
# Dimensionless spanwise position
x0 = x / L
if Type.lower()=='unloaded-clamped-free':
c = np.sqrt(G*Kt/(rho*Ip))
freq = np.zeros(nModes)
ModesV = np.full([nModes,len(x0)],np.nan)
# NOTE: equations are the same as longi
for j in np.arange(nModes):
omega_j = c/L*(np.pi/2 + j*np.pi)
freq[j] = omega_j/(2*np.pi)
ModesV[j,:] = np.sin(omega_j/c*x)
else:
raise Exception('Unknown %s'%Type)
## Normalization of modes
if norm=='tip_norm':
for i in np.arange(nModes):
fact = 1 / ModesV[i,-1]
ModesV[i,:] = ModesV[i,:] * fact
## Computation of derivatives if no analytical functions
#if exist('fgradient_regular'):
# dx = x(2) - x(1)
# ModesK = np.zeros((ModesV.shape,ModesV.shape))
# for i in np.arange(1,ModesV.shape[1-1]+1).reshape(-1):
# ModesK[i,:-1] = fgradient_regular(ModesV(i,:),4,dx)
ModesK = []
return freq,x,ModesV,ModesK
if __name__=='__main__':
pass
| 40.462428 | 143 | 0.503286 | 1,086 | 7,000 | 3.21547 | 0.162983 | 0.0252 | 0.041237 | 0.020619 | 0.597938 | 0.583047 | 0.520905 | 0.461913 | 0.453895 | 0.451031 | 0 | 0.045782 | 0.285429 | 7,000 | 172 | 144 | 40.697674 | 0.652339 | 0.320143 | 0 | 0.5 | 0 | 0 | 0.093938 | 0.019776 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0.009804 | 0.019608 | 0 | 0.078431 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6189d5672ebf077fa41e985fa4017fb1eed01eb4 | 2,095 | py | Python | Simulacoes_Mdf/calor1d-mdf.py | Devsart/MecFlu-TransCal-Comp-EM | 65997ad52decbd18ed9f2cba24773831a60821cd | [
"MIT"
] | 2 | 2021-11-17T15:36:11.000Z | 2022-03-22T03:10:34.000Z | Simulacoes_Mdf/calor1d-mdf.py | Devsart/MecFlu-TransCal-Comp-EM | 65997ad52decbd18ed9f2cba24773831a60821cd | [
"MIT"
] | null | null | null | Simulacoes_Mdf/calor1d-mdf.py | Devsart/MecFlu-TransCal-Comp-EM | 65997ad52decbd18ed9f2cba24773831a60821cd | [
"MIT"
] | null | null | null | ## =================================================================== ##
# this is file calor1d-mdf.py, created at 10-Aug-2021 #
# maintained by Gustavo Rabello dos Anjos #
# e-mail: gustavo.rabello@gmail.com #
## =================================================================== ##
import numpy as np
import matplotlib.pyplot as plt
# parametros da simulacao
L = 1.0
npoints = 50
ne = npoints-1
dx = L/ne
#Q = 0.0 # fonte de calor
#k = 1.0 # condutividade termica do material
# cond. termica e fonte de calor variaveis
k = np.ones( (npoints),dtype='float' )
Q = np.ones( (npoints),dtype='float' )
X = np.linspace(0,L,npoints)
#--------------------------------------------------
for i in range(0,npoints):
if X[i] > 0.3:
k[i] = 0.01
if X[i] > 0.5:
Q[i] = 3.0
#--------------------------------------------------
# condicao de contorno de Dirichlet
Te = 1.0
Td = 0.0
# geracao dos pontos
# geracao da matriz de conectividade
IEN = np.zeros( (ne,2),dtype='int' )
for e in range(0,ne):
IEN[e] = [e,e+1]
# vetor de indices de contorno
cc = [0,npoints-1]
# vetor dos valores do contorno
bcc = np.zeros( (npoints),dtype='float' )
bcc[0] = Te
bcc[-1] = Td
#--------------------------------------------------
# plt.plot(X,0*X,'ko-')
# plt.plot(X[cc],0*X[cc],'ro')
# plt.show()
#--------------------------------------------------
# inicializar a matriz A e o vetor b
A = np.zeros( (npoints,npoints),dtype='float' )
b = np.zeros( (npoints),dtype='float' )
# populando os valores dos pontos internos
for i in range(1,npoints-1):
A[i,i] = -2.0/(dx*dx) # diagonal principal
A[i,i-1] = 1.0/(dx*dx) # diagonal inferior
A[i,i+1] = 1.0/(dx*dx) # diagonal superior
b[i] = -Q[i]/k[i]
# populando os valores dos pontos de contorno
for i in cc:
A[i,i] = 1.0
b[i] = bcc[i]
# solucao do sistema linear Ax=b
#Ainv = np.linalg.inv(A)
#T = Ainv@b
T = np.linalg.solve(A,b)
plt.plot(X,T,'ko-')
plt.xlabel('comprimento da barra [m]')
plt.ylabel('temperatura [oC]')
plt.show()
| 24.940476 | 73 | 0.500716 | 313 | 2,095 | 3.351438 | 0.354633 | 0.011439 | 0.08103 | 0.037178 | 0.175405 | 0.034318 | 0.034318 | 0.034318 | 0.034318 | 0 | 0 | 0.030916 | 0.197136 | 2,095 | 83 | 74 | 25.240964 | 0.592747 | 0.538902 | 0 | 0 | 0 | 0 | 0.07684 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
618bc5da5028f8a5ae914bfcd162c1293c24f9e3 | 1,130 | py | Python | scripts/sup13b.py | seqcode/multimds | 8dbda98675a6d20bcecb76ab85ea8fd1571b4da3 | [
"MIT"
] | 1 | 2019-10-29T12:33:57.000Z | 2019-10-29T12:33:57.000Z | scripts/sup13b.py | seqcode/multimds | 8dbda98675a6d20bcecb76ab85ea8fd1571b4da3 | [
"MIT"
] | null | null | null | scripts/sup13b.py | seqcode/multimds | 8dbda98675a6d20bcecb76ab85ea8fd1571b4da3 | [
"MIT"
] | null | null | null | from matplotlib import pyplot as plt
import numpy as np
gene_names = ("Hxt1", "Has1", "Tda1", "Gal1", "Gal7", "Gal10", "Gal3", "Gal4", "Gal2")
sgds = np.array(["YHR094C", "YMR290C", "YMR291W", "YBR020W", "YBR018C", "YBR019C", "YDR009W", "YPL248C", "YLR081W"])
logfcs = np.zeros((len(sgds), 1))
with open("rnaseq_counts.tsv") as infile:
for line in infile:
line = line.strip().split()
if line[0] in sgds:
index = np.where(sgds == line[0])[0][0]
fc = np.mean([float(line[i]) for i in range(4,7)])/np.mean([float(line[i]) for i in range(1,4)])
logfcs[index][0] = np.log(fc)
infile.close()
#no need to do anything fancy when defining our figure
fig, ax = plt.subplots()
plt.subplot2grid((10,10), (0,0), 10, 5, frameon=False)
plt.pcolor(logfcs, cmap=plt.cm.coolwarm, vmin=-8, vmax=8)
#plot ticks
indices = np.arange(len(logfcs)) + 0.5
labels = gene_names
plt.yticks(indices, labels)
plt.xticks(indices, [])
plt.tick_params(top=False, right=False, left=False, bottom=False, labelsize=12) #don't want any ticks showing
cbaxes = fig.add_axes([0.3, 0.1, 0.02, 0.4])
plt.colorbar(cax=cbaxes)
plt.savefig("sup13b.svg")
| 35.3125 | 116 | 0.669912 | 191 | 1,130 | 3.937173 | 0.591623 | 0.007979 | 0.029255 | 0.039894 | 0.071809 | 0.071809 | 0.071809 | 0.071809 | 0.071809 | 0 | 0 | 0.075356 | 0.130973 | 1,130 | 31 | 117 | 36.451613 | 0.690428 | 0.080531 | 0 | 0 | 0 | 0 | 0.122587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
618cb0aba3761441ae45aecbf80394c671104e77 | 1,221 | py | Python | NBABet/Api.py | davideganna/NBA_Bet | dba00542b8ed63a5a7290f25209270b32d18fb86 | [
"MIT"
] | 4 | 2021-08-02T07:49:51.000Z | 2021-12-14T18:49:27.000Z | NBABet/Api.py | davideganna/NBA_Bet | dba00542b8ed63a5a7290f25209270b32d18fb86 | [
"MIT"
] | 1 | 2021-08-03T14:55:13.000Z | 2021-08-03T14:55:13.000Z | NBABet/Api.py | davideganna/NBA_Bet | dba00542b8ed63a5a7290f25209270b32d18fb86 | [
"MIT"
] | null | null | null | from datetime import datetime, time, timedelta
import requests
class Api:
"""
Base class for interfacing with the Basketball API.
"""
def __init__(self):
self.url = 'https://v1.basketball.api-sports.io/'
with open('secrets/api_key') as f:
self.api_key = f.readline()
self.league = '12' # NBA League
self.season = '2021-2022'
self.headers = {
'x-rapidapi-key' : self.api_key,
'x-rapidapi-host' : self.url
}
def get_tonights_games(self):
date = 'date=' + (datetime.today() + timedelta(1)).strftime('%Y-%m-%d')
endpoint = 'games?' + date + '&league=' + self.league + '&season=' + self.season
query = self.url + endpoint
payload = {}
response = requests.request("GET", query, headers=self.headers, data=payload).json()
# Next games organized as a dictionary with keys = HomeTeam --> values: AwayTeam
next_games = {}
for match in response['response']:
home_team = match['teams']['home']['name']
away_team = match['teams']['away']['name']
next_games[home_team] = away_team
return next_games
| 33 | 92 | 0.568387 | 142 | 1,221 | 4.774648 | 0.5 | 0.053097 | 0.029499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013857 | 0.290745 | 1,221 | 36 | 93 | 33.916667 | 0.769053 | 0.116298 | 0 | 0 | 0 | 0 | 0.153484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.08 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6190b9256d260af348eecbf353cef77fea75bac1 | 18,619 | py | Python | lib/data/PTFTrainDataset.py | KORguy/PIFu_Part | bd199d439a94f8bc8b4036898b0f1ec01e56ab9e | [
"MIT"
] | null | null | null | lib/data/PTFTrainDataset.py | KORguy/PIFu_Part | bd199d439a94f8bc8b4036898b0f1ec01e56ab9e | [
"MIT"
] | null | null | null | lib/data/PTFTrainDataset.py | KORguy/PIFu_Part | bd199d439a94f8bc8b4036898b0f1ec01e56ab9e | [
"MIT"
] | null | null | null | from torch.utils.data import Dataset
import numpy as np
import os
import random
import torchvision.transforms as transforms
from PIL import Image, ImageOps
import cv2
import torch
from PIL.ImageFilter import GaussianBlur
import trimesh
import logging
import json
from math import sqrt
import datetime
log = logging.getLogger('trimesh')
log.setLevel(40)
def get_part(file, vertices, points, body_parts):
def get_dist(pt1, pt2):
return sqrt((pt1[0]-pt2[0])**2 +(pt1[1]-pt2[1])**2 + (pt1[2]-pt2[2])**2)
part = []
for point in points:
_min = float('inf')
_idx = 0
for idx, vertice in enumerate(vertices[::5]):
dist = get_dist(point, vertice)
if _min > dist:
_min = dist
_idx = idx
tmp = [0 for i in range(20)]
tmp[ body_parts.index(file[str(_idx)]) ] = 1 # one-hot vector making
part.append(tmp)
part = np.array(part)
return part
def load_trimesh(root_dir):
folders = os.listdir(root_dir)
meshs = {}
for i, f in enumerate(folders):
sub_name = f
meshs[sub_name] = trimesh.load(os.path.join(root_dir, f, '%s_posed.obj' % sub_name), process=False, maintain_order=True,
skip_uv=True)
#### mesh = trimesh.load("alvin_t_posed.obj",process=False, maintain_order=True, skip_uv=True)
return meshs
class PTFTrainDataset(Dataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def __init__(self, opt, phase='train'):
self.opt = opt
self.projection_mode = 'orthogonal'
# Path setup
self.root = self.opt.dataroot
self.RENDER = os.path.join(self.root, 'RENDER')
self.PART = os.path.join(self.root, 'PART')
self.MASK = os.path.join(self.root, 'MASK')
self.PARAM = os.path.join(self.root, 'PARAM')
self.UV_MASK = os.path.join(self.root, 'UV_MASK')
self.UV_NORMAL = os.path.join(self.root, 'UV_NORMAL')
self.UV_RENDER = os.path.join(self.root, 'UV_RENDER')
self.UV_POS = os.path.join(self.root, 'UV_POS')
self.OBJ = os.path.join(self.root, 'GEO', 'OBJ')
self.T_OBJ = os.path.join(self.root, 'GEO', 'T')
self.BG = self.opt.bg_path
self.bg_img_list = []
if self.opt.random_bg:
self.bg_img_list = [os.path.join(self.BG, x) for x in os.listdir(self.BG)]
self.bg_img_list.sort()
self.B_MIN = np.array([-128, -28, -128]) / 128
self.B_MAX = np.array([128, 228, 128]) / 128
self.num_views = 1
self.is_train = (phase == 'train')
self.load_size = self.opt.loadSizeSmall
self.num_sample_inout = self.opt.num_sample_inout
self.num_sample_color = self.opt.num_sample_color
self.yaw_list = list(range(0,360,1))
self.pitch_list = [0]
self.subjects = self.get_subjects()
# PIL to tensor
self.to_tensor = transforms.Compose([
transforms.Resize(self.load_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# augmentation
self.aug_trans = transforms.Compose([
transforms.ColorJitter(brightness=opt.aug_bri, contrast=opt.aug_con, saturation=opt.aug_sat,
hue=opt.aug_hue)
])
self.mesh_dic = load_trimesh(self.OBJ)
self.t_mesh_dic = load_trimesh(self.T_OBJ)
def get_subjects(self):
all_subjects = os.listdir(self.RENDER)
var_subjects = np.loadtxt(os.path.join(self.root, 'val.txt'), dtype=str)
if len(var_subjects) == 0:
return all_subjects
if self.is_train:
return sorted(list(set(all_subjects) - set(var_subjects)))
else:
return sorted(list(var_subjects))
def __len__(self):
return len(self.subjects) * len(self.yaw_list) * len(self.pitch_list)
def get_render(self, subject, num_views, yid=0, pid=0, random_sample=False):
'''
Return the render data
:param subject: subject name
:param num_views: how many views to return
:param view_id: the first view_id. If None, select a random one.
:return:
'img': [num_views, C, W, H] images
'calib': [num_views, 4, 4] calibration matrix
'extrinsic': [num_views, 4, 4] extrinsic matrix
'mask': [num_views, 1, W, H] masks
'''
pitch = self.pitch_list[pid]
# The ids are an even distribution of num_views around view_id
view_ids = [self.yaw_list[(yid + len(self.yaw_list) // num_views * offset) % len(self.yaw_list)]
for offset in range(num_views)]
if random_sample:
view_ids = np.random.choice(self.yaw_list, num_views, replace=False)
calib_list = []
render_list = []
mask_list = []
extrinsic_list = []
vid = 0
param_path = os.path.join(self.PARAM, subject, '%d_%d_%02d.npy' % (vid, pitch, 0))
render_path = os.path.join(self.RENDER, subject, '%d_%d_%02d.jpg' % (vid, pitch, 0))
mask_path = os.path.join(self.MASK, subject, '%d_%d_%02d.png' % (vid, pitch, 0))
# loading calibration data
param = np.load(param_path, allow_pickle=True)
# pixel unit / world unit
ortho_ratio = param.item().get('ortho_ratio')
# world unit / model unit
scale = param.item().get('scale')
# camera center world coordinate
center = param.item().get('center')
# model rotation
R = param.item().get('R')
translate = -np.matmul(R, center).reshape(3, 1)
extrinsic = np.concatenate([R, translate], axis=1)
extrinsic = np.concatenate([extrinsic, np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
# Match camera space to image pixel space
scale_intrinsic = np.identity(4)
scale_intrinsic[0, 0] = scale / ortho_ratio
scale_intrinsic[1, 1] = -scale / ortho_ratio
scale_intrinsic[2, 2] = scale / ortho_ratio
# Match image pixel space to image uv space
uv_intrinsic = np.identity(4)
uv_intrinsic[0, 0] = 1.0 / float(self.load_size // 2)
uv_intrinsic[1, 1] = 1.0 / float(self.load_size // 2)
uv_intrinsic[2, 2] = 1.0 / float(self.load_size // 2)
# Transform under image pixel space
trans_intrinsic = np.identity(4)
mask = Image.open(mask_path).convert('L')
render = Image.open(render_path).convert('RGB')
if self.is_train:
# Pad images
pad_size = int(0.1 * self.load_size)
render = ImageOps.expand(render, pad_size, fill=0)
mask = ImageOps.expand(mask, pad_size, fill=0)
w, h = render.size
th, tw = self.load_size, self.load_size
# random flip
if self.opt.random_flip and np.random.rand() > 0.5:
scale_intrinsic[0, 0] *= -1
render = transforms.RandomHorizontalFlip(p=1.0)(render)
mask = transforms.RandomHorizontalFlip(p=1.0)(mask)
# random scale
if self.opt.random_scale:
rand_scale = random.uniform(0.9, 1.1)
w = int(rand_scale * w)
h = int(rand_scale * h)
render = render.resize((w, h), Image.BILINEAR)
mask = mask.resize((w, h), Image.NEAREST)
scale_intrinsic *= rand_scale
scale_intrinsic[3, 3] = 1
# random translate in the pixel space
if self.opt.random_trans:
dx = random.randint(-int(round((w - tw) / 10.)),
int(round((w - tw) / 10.)))
dy = random.randint(-int(round((h - th) / 10.)),
int(round((h - th) / 10.)))
else:
dx = 0
dy = 0
trans_intrinsic[0, 3] = -dx / float(self.load_size // 2)
trans_intrinsic[1, 3] = -dy / float(self.load_size // 2)
x1 = int(round((w - tw) / 2.)) + dx
y1 = int(round((h - th) / 2.)) + dy
render = render.crop((x1, y1, x1 + tw, y1 + th))
mask = mask.crop((x1, y1, x1 + tw, y1 + th))
render = self.aug_trans(render)
# random blur
if self.opt.aug_blur > 0.00001:
blur = GaussianBlur(np.random.uniform(0, self.opt.aug_blur))
render = render.filter(blur)
intrinsic = np.matmul(trans_intrinsic, np.matmul(uv_intrinsic, scale_intrinsic))
calib = torch.Tensor(np.matmul(intrinsic, extrinsic)).float()
extrinsic = torch.Tensor(extrinsic).float()
mask = transforms.Resize(self.load_size)(mask)
mask = transforms.ToTensor()(mask).float()
mask_list.append(mask)
render = self.to_tensor(render)
render = mask.expand_as(render) * render
render_list.append(render)
calib_list.append(calib)
extrinsic_list.append(extrinsic)
if self.opt.random_bg: # background에도 augmentation 추가하기
bg_path = self.bg_img_list[np.random.randint(len(self.bg_img_list))]
bg = Image.open(bg_path).convert('RGB').resize((self.load_size, self.load_size), Image.NEAREST)
bg = self.to_tensor(bg)
render = (1-mask).expand_as(render) * bg + render
return {
'img': render_list[0],
'calib': calib_list[0],
'extrinsic': extrinsic_list[0],
'mask': mask_list[0]
}
def select_sampling_method(self, subject):
if not self.is_train:
random.seed(1997)
np.random.seed(1997)
torch.manual_seed(1997)
mesh = self.mesh_dic[subject]
t_mesh = self.t_mesh_dic[subject]
surface_points, surface_points_face_indices = trimesh.sample.sample_surface(mesh, 4 * self.num_sample_inout)
sample_points = surface_points + np.random.normal(scale=(self.opt.sigma / 128.), size=surface_points.shape)
body_parts = [ 'head', 'neck','spine', 'hip',
'shoulder_l', 'upperarm_l', 'lowerarm_l', 'hand_l', 'finger_l',
'shoulder_r', 'upperarm_r', 'lowerarm_r', 'hand_r', 'finger_r',
'upperleg_l', 'lowerleg_l', 'foot_l',
'upperleg_r', 'lowerleg_r', 'foot_r' ]
# one-hot vectors of the sampled points
surface_points_body_parts = []
with open(os.path.join(self.PART, subject, "%s_part.json" % subject.split('_')[0])) as f:
json_data = json.load(f)
surface_points_faces = mesh.faces[surface_points_face_indices]
surface_points_vertices_indices = []
correspondences = []
get_correspondences(surface_points, surface_points_faces, )
for single_face in surface_points_faces:
surface_points_vertices_indices.append(min(single_face))
## get correspondences
surface_points_correspondences = []
for idx_num in surface_points_vertices_indices:
idx = str(idx_num)
temp = [0 for i in range(20)]
temp[ body_parts.index(json_data[idx]) ] = 1 # one-hot vector making
surface_points_body_parts.append(temp)
# add random points within image space
length = self.B_MAX - self.B_MIN
### New
random_points = np.random.rand(self.num_sample_inout // 4, 3) * length + self.B_MIN
sample_points = np.concatenate([sample_points, random_points], 0)
random_parts = get_part(json_data, mesh.vertices, random_points, body_parts)
surface_points_body_parts = np.concatenate([surface_points_body_parts, random_parts], 0)
###
# for i in range(0, self.num_sample_inout // 4):
# surface_points_body_parts.append([0 for i in range(20)]) # append zero vectors [0, 0, 0, ... , 0]
s = np.arange(sample_points.shape[0])
np.random.shuffle(s)
sample_points = sample_points[s]
surface_points_body_parts = np.array(surface_points_body_parts)[s]
#np.random.shuffle(sample_points)
inside = mesh.contains(sample_points)
inside_points = sample_points[inside]
inside_parts = surface_points_body_parts[inside]
outside_points = sample_points[np.logical_not(inside)]
outside_parts = surface_points_body_parts[np.logical_not(inside)]
nin = inside_points.shape[0]
inside_points = inside_points[
:self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else inside_points
inside_parts = inside_parts[
:self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else inside_parts
outside_points = outside_points[
:self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else outside_points[
:(self.num_sample_inout - nin)]
outside_parts = outside_parts[
:self.num_sample_inout // 2] if nin > self.num_sample_inout // 2 else outside_parts[
:(self.num_sample_inout - nin)]
samples = np.concatenate([inside_points, outside_points], 0).T
labels = np.concatenate([np.ones((1, inside_points.shape[0])), np.zeros((1, outside_points.shape[0]))], 1)
parts = np.concatenate([inside_parts, outside_parts], 0).T
# save_samples_truncted_prob('./out.ply', samples.T, labels.T)
# exit()
# save_samples_truncated_part('./part.ply', samples.T, parts.T)
# exit()
samples = torch.Tensor(samples).float()
labels = torch.Tensor(labels).float()
parts = torch.Tensor(parts).float()
del mesh
del t_mesh
return {
'samples': samples,
'labels': labels,
'parts': parts,
'correspondences': correspondences
}
def get_color_sampling(self, subject, yid, pid=0):
yaw = self.yaw_list[yid]
pitch = self.pitch_list[pid]
uv_render_path = os.path.join(self.UV_RENDER, subject, '%d_%d_%02d.jpg' % (yaw, pitch, 0))
uv_mask_path = os.path.join(self.UV_MASK, subject, '%02d.png' % (0))
uv_pos_path = os.path.join(self.UV_POS, subject, '%02d.exr' % (0))
uv_normal_path = os.path.join(self.UV_NORMAL, subject, '%02d.png' % (0))
# Segmentation mask for the uv render.
# [H, W] bool
uv_mask = cv2.imread(uv_mask_path)
uv_mask = uv_mask[:, :, 0] != 0
# UV render. each pixel is the color of the point.
# [H, W, 3] 0 ~ 1 float
uv_render = cv2.imread(uv_render_path)
uv_render = cv2.cvtColor(uv_render, cv2.COLOR_BGR2RGB) / 255.0
# Normal render. each pixel is the surface normal of the point.
# [H, W, 3] -1 ~ 1 float
uv_normal = cv2.imread(uv_normal_path)
uv_normal = cv2.cvtColor(uv_normal, cv2.COLOR_BGR2RGB) / 255.0
uv_normal = 2.0 * uv_normal - 1.0
# Position render. each pixel is the xyz coordinates of the point
uv_pos = cv2.imread(uv_pos_path, 2 | 4)[:, :, ::-1]
### In these few lines we flattern the masks, positions, and normals
uv_mask = uv_mask.reshape((-1))
uv_pos = uv_pos.reshape((-1, 3))
uv_render = uv_render.reshape((-1, 3))
uv_normal = uv_normal.reshape((-1, 3))
surface_points = uv_pos[uv_mask]
surface_colors = uv_render[uv_mask]
surface_normal = uv_normal[uv_mask]
if self.num_sample_color:
sample_list = random.sample(range(0, surface_points.shape[0] - 1), self.num_sample_color)
surface_points = surface_points[sample_list].T
surface_colors = surface_colors[sample_list].T
surface_normal = surface_normal[sample_list].T
# Samples are around the true surface with an offset
normal = torch.Tensor(surface_normal).float()
samples = torch.Tensor(surface_points).float() \
+ torch.normal(mean=torch.zeros((1, normal.size(1))), std=self.opt.sigma).expand_as(normal) * normal
# Normalized to [-1, 1]
rgbs_color = 2.0 * torch.Tensor(surface_colors).float() - 1.0
return {
'color_samples': samples,
'rgbs': rgbs_color
}
def get_item(self, index):
# In case of a missing file or IO error, switch to a random sample instead
# try:
sid = index % len(self.subjects)
tmp = index // len(self.subjects)
yid = tmp % len(self.yaw_list)
pid = tmp // len(self.yaw_list)
# name of the subject 'rp_xxxx_xxx'
subject = self.subjects[sid]
res = {
'name': subject,
'mesh_path': os.path.join(self.OBJ, subject + '.obj'),
'sid': sid,
'yid': yid,
'pid': pid,
'b_min': self.B_MIN,
'b_max': self.B_MAX,
}
render_data = self.get_render(subject, num_views=self.num_views, yid=yid, pid=pid)
res.update(render_data)
if self.opt.num_sample_inout:
sample_data = self.select_sampling_method(subject)
res.update(sample_data)
# img = np.uint8((np.transpose(render_data['img'][0].numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0)
# rot = render_data['calib'][0,:3, :3]
# trans = render_data['calib'][0,:3, 3:4]
# pts = torch.addmm(trans, rot, sample_data['samples'][:, sample_data['labels'][0] > 0.5]) # [3, N]
# pts = 0.5 * (pts.numpy().T + 1.0) * render_data['img'].size(2)
# for p in pts:
# img = cv2.circle(img, (p[0], p[1]), 2, (0,255,0), -1)
# cv2.imshow('test', img)
# cv2.waitKey(1)
if self.num_sample_color:
color_data = self.get_color_sampling(subject, yid=yid, pid=pid)
res.update(color_data)
return res
# except Exception as e:
# print(e)
# return self.get_item(index=random.randint(0, self.__len__() - 1))
def __getitem__(self, index):
return self.get_item(index)
| 40.127155 | 128 | 0.576669 | 2,455 | 18,619 | 4.173523 | 0.150306 | 0.034257 | 0.021472 | 0.028694 | 0.211009 | 0.116826 | 0.047628 | 0.037868 | 0.030256 | 0.024205 | 0 | 0.026088 | 0.300016 | 18,619 | 463 | 129 | 40.213823 | 0.760071 | 0.132982 | 0 | 0.053797 | 0 | 0 | 0.030965 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037975 | false | 0 | 0.044304 | 0.012658 | 0.126582 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61931b1ad82c48c08668bff66dafa37dd46511d7 | 1,452 | py | Python | corehq/apps/app_manager/management/commands/migrate_template_apps_form_ids.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/app_manager/management/commands/migrate_template_apps_form_ids.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | 1 | 2022-03-12T01:03:25.000Z | 2022-03-12T01:03:25.000Z | corehq/apps/app_manager/management/commands/migrate_template_apps_form_ids.py | johan--/commcare-hq | 86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd | [
"BSD-3-Clause"
] | null | null | null | import re
from corehq.apps.app_manager.management.commands.helpers import AppMigrationCommandBase
from corehq.apps.app_manager.models import Application, load_app_template, ATTACHMENT_REGEX
from corehq.apps.app_manager.util import update_unique_ids
from corehq.apps.es import AppES
def _get_first_form_id(app):
return app['modules'][0]['forms'][0]['unique_id']
class Command(AppMigrationCommandBase):
help = "Migrate apps that have been created from template apps " \
"to make sure that their form ID's are unique."
include_builds = False
def migrate_app(self, app_doc):
should_save = False
template_slug = app_doc['created_from_template']
template = load_app_template(template_slug)
if _get_first_form_id(app_doc) == _get_first_form_id(template):
should_save = True
app = Application.wrap(app_doc)
_attachments = {}
for name in app_doc.get('_attachments', {}):
if re.match(ATTACHMENT_REGEX, name):
_attachments[name] = app.fetch_attachment(name)
app_doc['_attachments'] = _attachments
app_doc = update_unique_ids(app_doc)
return Application.wrap(app_doc) if should_save else None
def get_app_ids(self):
q = AppES().created_from_template(True).is_build(False).fields('_id')
results = q.run()
return [app['_id'] for app in results.hits]
| 33.767442 | 91 | 0.681818 | 191 | 1,452 | 4.884817 | 0.376963 | 0.057878 | 0.060021 | 0.054662 | 0.113612 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001781 | 0.226584 | 1,452 | 42 | 92 | 34.571429 | 0.829029 | 0 | 0 | 0 | 0 | 0 | 0.118539 | 0.014473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.172414 | 0.034483 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6196974d359ee517dc4c5d99b45a48b0d864e2dc | 3,131 | py | Python | vouchers/forms.py | mnalam30/django-online-shopping | e6ee11b79cb70390e55063624de5afe5a55dfb6c | [
"Apache-2.0"
] | 1 | 2021-03-04T13:41:46.000Z | 2021-03-04T13:41:46.000Z | vouchers/forms.py | mnalam30/django-online-shopping | e6ee11b79cb70390e55063624de5afe5a55dfb6c | [
"Apache-2.0"
] | null | null | null | vouchers/forms.py | mnalam30/django-online-shopping | e6ee11b79cb70390e55063624de5afe5a55dfb6c | [
"Apache-2.0"
] | null | null | null | from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Voucher, VoucherUser, Campaign
from .settings import VOUCHER_TYPES
class VoucherGenerationForm(forms.Form):
quantity = forms.IntegerField(label=_("Quantity"))
value = forms.IntegerField(label=_("Voucher Code"))
type = forms.ChoiceField(label=_("Type"), choices=VOUCHER_TYPES)
valid_until = forms.SplitDateTimeField(
label=_("Valid until"), required=False,
help_text=_("Note: Leave empty for vouchers that never expire")
)
prefix = forms.CharField(label="Prefix", required=False)
campaign = forms.ModelChoiceField(
label=_("Campaign"), queryset=Campaign.objects.all(), required=False
)
class VoucherForm(forms.Form):
code = forms.CharField(label=_("Voucher code"))
def __init__(self, *args, **kwargs):
self.user = None
self.types = None
if 'user' in kwargs:
self.user = kwargs['user']
del kwargs['user']
if 'types' in kwargs:
self.types = kwargs['types']
del kwargs['types']
super(VoucherForm, self).__init__(*args, **kwargs)
def clean_code(self):
code = self.cleaned_data['code']
try:
voucher = Voucher.objects.get(code=code)
except Voucher.DoesNotExist:
raise forms.ValidationError(_("This code is not valid."))
self.voucher = voucher
if self.user is None and voucher.user_limit is not 1:
# vouchers with can be used only once can be used without tracking the user, otherwise there is no chance
# of excluding an unknown user from multiple usages.
raise forms.ValidationError(_(
"The server must provide an user to this form to allow you to use this code. Maybe you need to sign in?"
))
if voucher.is_redeemed:
raise forms.ValidationError(_("This code has already been used."))
try: # check if there is a user bound voucher existing
user_voucher = voucher.users.get(user=self.user)
if user_voucher.redeemed_at is not None:
raise forms.ValidationError(_("This code has already been used by your account."))
except VoucherUser.DoesNotExist:
if voucher.user_limit is not 0: # zero means no limit of user count
# only user bound vouchers left and you don't have one
if voucher.user_limit is voucher.users.filter(user__isnull=False).count():
raise forms.ValidationError(_("This code is not valid for your account."))
if voucher.user_limit is voucher.users.filter(redeemed_at__isnull=False).count(): # all vouchers redeemed
raise forms.ValidationError(_("This code has already been used."))
if self.types is not None and voucher.type not in self.types:
raise forms.ValidationError(_("This code is not meant to be used here."))
if voucher.expired():
raise forms.ValidationError(_("This code is expired."))
return code
| 46.731343 | 122 | 0.647716 | 390 | 3,131 | 5.087179 | 0.335897 | 0.040323 | 0.100806 | 0.102319 | 0.225806 | 0.203629 | 0.185988 | 0.166835 | 0.085181 | 0.059476 | 0 | 0.000865 | 0.261258 | 3,131 | 66 | 123 | 47.439394 | 0.856896 | 0.099329 | 0 | 0.071429 | 0 | 0.017857 | 0.16957 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61970ad9c7f8e5b9b733cf5f5d1fca63b77e8dc8 | 603 | py | Python | alembic/versions/5a99c71e171_position.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 4 | 2015-01-20T15:03:15.000Z | 2017-03-15T09:56:07.000Z | alembic/versions/5a99c71e171_position.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 3 | 2021-03-31T18:53:12.000Z | 2022-03-21T22:16:35.000Z | alembic/versions/5a99c71e171_position.py | wenbs/mptracker | e011ab11954bbf785ae11fea7ed977440df2284a | [
"MIT"
] | 6 | 2015-12-13T08:56:49.000Z | 2021-08-07T20:36:29.000Z | revision = '5a99c71e171'
down_revision = '1b8378b8914'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table('position',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('interval', postgresql.DATERANGE(), nullable=True),
sa.Column('person_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], ['person.id']),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('position')
| 27.409091 | 69 | 0.6733 | 68 | 603 | 5.897059 | 0.485294 | 0.079801 | 0.079801 | 0.119701 | 0.154613 | 0.154613 | 0 | 0 | 0 | 0 | 0 | 0.034137 | 0.174129 | 603 | 21 | 70 | 28.714286 | 0.771084 | 0 | 0 | 0 | 0 | 0 | 0.135987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61975dd02f9311f448952592cb72dfbe22211c3e | 3,401 | py | Python | ipfshttpclient/client/p2p.py | emendir/py-ipfs-http-client | 2d7c113a14841295059e098836ab29f8cadc6b88 | [
"MIT"
] | null | null | null | ipfshttpclient/client/p2p.py | emendir/py-ipfs-http-client | 2d7c113a14841295059e098836ab29f8cadc6b88 | [
"MIT"
] | null | null | null | ipfshttpclient/client/p2p.py | emendir/py-ipfs-http-client | 2d7c113a14841295059e098836ab29f8cadc6b88 | [
"MIT"
] | null | null | null | import typing as ty
from . import base
class Section(base.SectionBase):
@base.returns_no_item
def forward(self, protocol: str, peer_id: str, port: str, **kwargs: base.CommonArgs):
"""Forward connections to libp2p service
Forward connections made to the specified port to another IPFS node.
.. code-block:: python
# forwards connections made to port 8888 to 'QmHash' as protocol '/x/testproto'
>>> client.p2p.forward('/x/testproto', 'QmHash', 8888)
[]
Parameters
----------
protocol
specifies the libp2p protocol name to use for libp2p connections and/or handlers. It must be prefixed with '/x/'.
PeerID
Target endpoint
port
Listening endpoint
Returns
-------
list
An empty list
"""
args = (protocol, peer_id, port)
return self._client.request('/p2p/forward', args, decoder='json', **kwargs)
@base.returns_no_item
def listen(self, protocol: str, port: str, **kwargs: base.CommonArgs):
"""Create libp2p service to forward IPFS connections to port
Creates a libp2p service that forwards IPFS connections to it
to the specified port on the local computer.
.. code-block:: python
# listens for connections of protocol '/x/testproto' and forwards them to port 8888
>>> client.p2p.listen('/x/testproto', 8888)
[]
Parameters
----------
protocol
specifies the libp2p handler name. It must be prefixed with '/x/'.
port
Listener port to which to forward incoming connections
Returns
-------
list
An empty list
"""
args = (protocol, port)
return self._client.request('/p2p/listen', args, decoder='json', **kwargs)
# @base.returns_single_item(base.ResponseBase)
def close(self, all: bool = False, protocol: str = None, listenaddress: str = None, targetaddress: str = None, **kwargs: base.CommonArgs):
"""Stop listening for new connections to forward.
Stops all forwarding and listening libp2p services that match the input arguments.
.. code-block:: python
# Close listening and forwarding connections of protocol '/x/testproto' and port 8888.
>>> client.p2p.close(protocol='/x/testproto', port='8888')
[]
Parameters
----------
protocol
specifies the libp2p handler name. It must be prefixed with '/x/'.
port
Listener port to which to forward incoming connections
Returns
-------
list
An empty list
"""
opts = {}
if all is not None:
opts.update({"all": all})
if protocol is not None:
opts.update({"protocol": str(protocol)})
if listenaddress is not None:
opts.update({"listen-address": str(listenaddress)})
if targetaddress is not None:
opts.update({"target-address": str(targetaddress)})
kwargs.setdefault("opts", {}).update(opts)
args = (all,) # if all is not None else ()
return self._client.request('/p2p/close', decoder='json', **kwargs)
| 32.084906 | 142 | 0.57042 | 373 | 3,401 | 5.171582 | 0.286863 | 0.031104 | 0.023328 | 0.026957 | 0.414204 | 0.336444 | 0.174184 | 0.150337 | 0.150337 | 0.150337 | 0 | 0.016551 | 0.324904 | 3,401 | 105 | 143 | 32.390476 | 0.823606 | 0.510438 | 0 | 0.083333 | 0 | 0 | 0.071197 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.083333 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
61976d319862419967d46e66ec95fec2253bac78 | 2,562 | py | Python | teek/_timeouts.py | Akuli/tkinder | c360fbfe086ca09cdd856a8636de05b24e1b7093 | [
"MIT"
] | 23 | 2019-01-15T00:07:30.000Z | 2022-01-18T06:19:18.000Z | teek/_timeouts.py | Akuli/tkinder | c360fbfe086ca09cdd856a8636de05b24e1b7093 | [
"MIT"
] | 12 | 2019-01-13T19:51:52.000Z | 2021-05-17T17:55:51.000Z | teek/_timeouts.py | Akuli/pythotk | c360fbfe086ca09cdd856a8636de05b24e1b7093 | [
"MIT"
] | 7 | 2019-01-13T19:48:26.000Z | 2021-04-21T13:30:21.000Z | import teek
from teek._tcl_calls import make_thread_safe
# there's no after_info because i don't see how it would be useful in
# teek
class _Timeout:
def __init__(self, after_what, callback, args, kwargs):
if kwargs is None:
kwargs = {}
self._callback = callback
self._args = args
self._kwargs = kwargs
self._state = 'pending' # just for __repr__ and error messages
self._tcl_command = teek.create_command(self._run)
self._id = teek.tcl_call(str, 'after', after_what, self._tcl_command)
def __repr__(self):
name = getattr(self._callback, '__name__', self._callback)
return '<%s %r timeout %r>' % (self._state, name, self._id)
def _run(self):
needs_cleanup = True
# this is important, thread tests freeze without this special
# case for some reason
def quit_callback():
nonlocal needs_cleanup
needs_cleanup = False
teek.before_quit.connect(quit_callback)
try:
self._callback(*self._args, **self._kwargs)
self._state = 'successfully completed'
except Exception as e:
self._state = 'failed'
raise e
finally:
teek.before_quit.disconnect(quit_callback)
if needs_cleanup:
teek.delete_command(self._tcl_command)
@make_thread_safe
def cancel(self):
"""Prevent this timeout from running as scheduled.
:exc:`RuntimeError` is raised if the timeout has already ran or
it has been cancelled.
There is example code in :source:`examples/timeout.py`.
"""
if self._state != 'pending':
raise RuntimeError("cannot cancel a %s timeout" % self._state)
teek.tcl_call(None, 'after', 'cancel', self._id)
self._state = 'cancelled'
teek.delete_command(self._tcl_command)
@make_thread_safe
def after(ms, callback, args=(), kwargs=None):
"""Run ``callback(*args, **kwargs)`` after waiting for the given time.
The *ms* argument should be a waiting time in milliseconds, and
*kwargs* defaults to ``{}``. This returns a timeout object with a
``cancel()`` method that takes no arguments; you can use that to
cancel the timeout before it runs.
"""
return _Timeout(ms, callback, args, kwargs)
@make_thread_safe
def after_idle(callback, args=(), kwargs=None):
"""Like :func:`after`, but runs the timeout as soon as possible."""
return _Timeout('idle', callback, args, kwargs)
| 32.025 | 77 | 0.635441 | 328 | 2,562 | 4.734756 | 0.396341 | 0.040567 | 0.069543 | 0.03284 | 0.079202 | 0.061816 | 0.061816 | 0.061816 | 0.061816 | 0.061816 | 0 | 0 | 0.266198 | 2,562 | 79 | 78 | 32.43038 | 0.826064 | 0.290788 | 0 | 0.113636 | 0 | 0 | 0.070568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0.045455 | 0 | 0.295455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6198d569648753c3575588522b2c2f89e1b9135e | 4,173 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/web/application.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/web/application.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/headstock/example/microblog/microblog/web/application.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | # -*- coding: utf-8 -*-
import cherrypy
from microblog.web import MICROBLOG_SESSION_PROFILE
from microblog.web.oidtool import DEFAULT_SESSION_NAME
from microblog.profile.manager import ProfileManager
from microblog.profile.user import EmptyUserProfile, UserProfile
from microblog.web.speakup import SpeakUpWebApplication
from microblog.web.atompub import CollectionHandler, CollectionPagingHandler,\
CollectionTagingHandler
__all__ = ['WebApplication']
class WebApplication(object):
def __init__(self, base_dir, atompub, tpl_lookup):
self.base_dir = base_dir
self.atompub = atompub
self.tpl_lookup = tpl_lookup
self.new_profiles_atompub_app = None
self.profiles_atompub_app = None
def index(self):
profile = cherrypy.session.get(MICROBLOG_SESSION_PROFILE, None)
if not profile:
tpl = self.tpl_lookup.get_template('welcome.mako')
return tpl.render()
cherrypy.session[MICROBLOG_SESSION_PROFILE] = profile
tpl = self.tpl_lookup.get_template('index.mako')
return tpl.render(profile=profile)
def help(self):
tpl = self.tpl_lookup.get_template('help.mako')
return tpl.render()
def signin(self):
tpl = self.tpl_lookup.get_template('signin.mako')
return tpl.render()
@cherrypy.tools.profile_required()
def signout(self):
del cherrypy.session[MICROBLOG_SESSION_PROFILE]
if DEFAULT_SESSION_NAME in cherrypy.session:
del cherrypy.session[DEFAULT_SESSION_NAME]
cherrypy.request.openid = None
if hasattr(cherrypy.request, 'microblog'):
delattr(cherrypy.request, 'microblog')
raise cherrypy.HTTPRedirect('/')
def signup(self):
username = cherrypy.request.openid.sreg.get('nickname', '')
username = username.strip()
cherrypy.session['creationprocess'] = True
tpl = self.tpl_lookup.get_template('newaccount_step2.mako')
return tpl.render(username=username)
def signup_complete(self, username):
username = username.strip()
valid = True
if not username:
valid = False
if ProfileManager.has_profile(self.atompub, username):
valid = False
if not valid:
tpl = self.tpl_lookup.get_template('newaccount_step2.mako')
return tpl.render(username=username, error="Username already taken")
profile = UserProfile(username)
profile.fill(nickname=username)
ProfileManager.store_profile(self.atompub, profile)
cherrypy.session[MICROBLOG_SESSION_PROFILE] = profile
if cherrypy.request.openid:
oid = cherrypy.request.openid.info.identity_url
cherrypy.session[oid] = profile
w = self.atompub.add_workspace(profile.username)
c = self.atompub.add_collection(w, profile.username)
self.atompub.save_service()
self.attach_serving_collection_application(c, profile,
d = cherrypy.request.dispatch)
self.new_profiles_atompub_app.add_profile(profile)
self.profiles_atompub_app.add_profile(profile)
raise cherrypy.HTTPRedirect('/%s' % profile.username)
def attach_serving_collection_application(self, c, profile, d):
profile_name = profile.username
route = '/%s' % profile_name.encode('utf-8')
controller = CollectionPagingHandler(c)
d.add('%s/paging[/{start:digits}]' % route, GET=controller.GET)
controller = CollectionTagingHandler(c)
d.add('%s/tag/{tag}' % route, GET=controller.index)
controller = CollectionHandler(c)
speakup = SpeakUpWebApplication(self.base_dir, self.atompub,
self.tpl_lookup, profile, controller)
d.add('%s[/]' % route, GET=speakup.index,
POST=controller.create)
d.add('%s/feed' % route, GET=controller.feed)
d.add('%s/{id:any}' % route, GET=controller.retrieve,
PUT=controller.replace,
DELETE=controller.remove)
| 37.594595 | 81 | 0.654445 | 452 | 4,173 | 5.878319 | 0.25 | 0.033873 | 0.039142 | 0.036131 | 0.208129 | 0.164471 | 0.106887 | 0.05796 | 0.05796 | 0.05796 | 0 | 0.001273 | 0.247304 | 4,173 | 110 | 82 | 37.936364 | 0.844635 | 0.005032 | 0 | 0.127907 | 0 | 0 | 0.056386 | 0.016386 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093023 | false | 0 | 0.081395 | 0 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |