hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b6670f5cb5c8d632fc48bea8d155de2c30d4d414 | 1,014 | py | Python | tests/test_geometry.py | Zepmanbc/cps_workflow | 0926a01c3ef1f163b43edcdae84cc77a1842f3b0 | [
"MIT"
] | 35 | 2019-07-03T16:45:47.000Z | 2022-03-31T16:08:35.000Z | tests/test_geometry.py | Zepmanbc/cps_workflow | 0926a01c3ef1f163b43edcdae84cc77a1842f3b0 | [
"MIT"
] | 42 | 2019-07-03T17:12:34.000Z | 2022-03-17T12:46:40.000Z | tests/test_geometry.py | Zepmanbc/cps_workflow | 0926a01c3ef1f163b43edcdae84cc77a1842f3b0 | [
"MIT"
] | null | null | null | """Geometry testing."""
import creopyson
from .fixtures import mk_creoson_post_dict, mk_creoson_post_None, mk_getactivefile
def test_geometry_bound_box(mk_creoson_post_dict, mk_getactivefile):
"""Test bound_box."""
c = creopyson.Client()
result = c.geometry_bound_box(file_="file")
assert isinstance(result, (dict))
result = c.geometry_bound_box()
assert isinstance(result, (dict))
def test_geometry_get_edges(mk_creoson_post_dict, mk_getactivefile):
"""Test get_edges."""
c = creopyson.Client()
result = c.geometry_get_edges(["12", "34"], file_="file")
assert isinstance(result, (list))
result = c.geometry_get_edges(["12", "34"])
assert isinstance(result, (list))
def test_geometry_get_surfaces(mk_creoson_post_dict, mk_getactivefile):
"""Test get_surfaces."""
c = creopyson.Client()
result = c.geometry_get_surfaces(file_="file")
assert isinstance(result, (list))
result = c.geometry_get_surfaces()
assert isinstance(result, (list))
| 32.709677 | 82 | 0.719921 | 132 | 1,014 | 5.181818 | 0.212121 | 0.061404 | 0.131579 | 0.099415 | 0.630117 | 0.489766 | 0.444444 | 0.266082 | 0.152047 | 0.152047 | 0 | 0.00927 | 0.148915 | 1,014 | 30 | 83 | 33.8 | 0.783314 | 0.067061 | 0 | 0.45 | 0 | 0 | 0.021622 | 0 | 0 | 0 | 0 | 0 | 0.3 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b667207fcb57bd4ec887cd84ed8d9aa150c17bab | 356 | py | Python | answers/Ananya Chandra/day4/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Ananya Chandra/day4/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Ananya Chandra/day4/question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | #print prime factorisation of a number
n= int(input("enter the number \n"))
def prime(t, j):
if(j<t):
if(t%j!=0):
return prime(t, j=j + 1)
else:
return 0
else:
return 1
i=2
while(n>1):
if (prime(i, 2) == 1):
while n % i == 0:
print(i, ",",end="")
n=n/i
i+=1
| 17.8 | 38 | 0.429775 | 58 | 356 | 2.637931 | 0.396552 | 0.039216 | 0.091503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046729 | 0.398876 | 356 | 19 | 39 | 18.736842 | 0.668224 | 0.103933 | 0 | 0.125 | 0 | 0 | 0.062893 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66835431fc34e47226d3889e3140fdd215f8887 | 1,865 | py | Python | awx/main/tests/functional/api/test_project.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 2 | 2018-11-12T18:52:24.000Z | 2020-05-22T18:41:21.000Z | awx/main/tests/functional/api/test_project.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 4 | 2022-02-15T01:33:35.000Z | 2022-03-02T12:47:41.000Z | awx/main/tests/functional/api/test_project.py | gitEdouble/awx | 5885654405ccaf465f08df4db998a6dafebd9b4d | [
"Apache-2.0"
] | 9 | 2019-05-11T00:03:30.000Z | 2021-07-07T16:09:17.000Z | import os
from backports.tempfile import TemporaryDirectory
from django.conf import settings
import pytest
from awx.api.versioning import reverse
@pytest.mark.django_db
class TestInsightsCredential:
def test_insights_credential(self, patch, insights_project, admin_user, insights_credential):
patch(insights_project.get_absolute_url(),
{'credential': insights_credential.id}, admin_user,
expect=200)
def test_non_insights_credential(self, patch, insights_project, admin_user, scm_credential):
patch(insights_project.get_absolute_url(),
{'credential': scm_credential.id}, admin_user,
expect=400)
@pytest.mark.django_db
def test_project_custom_virtualenv(get, patch, project, admin):
with TemporaryDirectory(dir=settings.BASE_VENV_PATH) as temp_dir:
os.makedirs(os.path.join(temp_dir, 'bin', 'activate'))
url = reverse('api:project_detail', kwargs={'pk': project.id})
patch(url, {'custom_virtualenv': temp_dir}, user=admin, expect=200)
assert get(url, user=admin).data['custom_virtualenv'] == os.path.join(temp_dir, '')
@pytest.mark.django_db
def test_project_invalid_custom_virtualenv(get, patch, project, admin):
url = reverse('api:project_detail', kwargs={'pk': project.id})
resp = patch(url, {'custom_virtualenv': '/foo/bar'}, user=admin, expect=400)
assert resp.data['custom_virtualenv'] == [
'/foo/bar is not a valid virtualenv in {}'.format(settings.BASE_VENV_PATH)
]
@pytest.mark.django_db
@pytest.mark.parametrize('value', ["", None])
def test_project_unset_custom_virtualenv(get, patch, project, admin, value):
url = reverse('api:project_detail', kwargs={'pk': project.id})
resp = patch(url, {'custom_virtualenv': value}, user=admin, expect=200)
assert resp.data['custom_virtualenv'] is None
| 39.680851 | 97 | 0.716354 | 241 | 1,865 | 5.323651 | 0.282158 | 0.112237 | 0.049883 | 0.056118 | 0.590023 | 0.441933 | 0.357755 | 0.307872 | 0.144193 | 0.110678 | 0 | 0.009536 | 0.156568 | 1,865 | 46 | 98 | 40.543478 | 0.806103 | 0 | 0 | 0.257143 | 0 | 0 | 0.131903 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6688604687d78d27bb9bf2999491a71b429148b | 761 | py | Python | download codigo fontes/PythonExercicios/ex058.py | tidesjunior2018/Exercicios-da-linguagem-pyhton | 82bb9e7a121ce9fcb12591615120dd9f3a493555 | [
"MIT"
] | null | null | null | download codigo fontes/PythonExercicios/ex058.py | tidesjunior2018/Exercicios-da-linguagem-pyhton | 82bb9e7a121ce9fcb12591615120dd9f3a493555 | [
"MIT"
] | null | null | null | download codigo fontes/PythonExercicios/ex058.py | tidesjunior2018/Exercicios-da-linguagem-pyhton | 82bb9e7a121ce9fcb12591615120dd9f3a493555 | [
"MIT"
] | 1 | 2021-03-13T18:26:50.000Z | 2021-03-13T18:26:50.000Z | '''
58-Melhore o jogo do desafio 028 onde o computador vai "pensar" em um numero
entre 0 e 10.Só que agora vai tentar advinhar até acertar mostrando no final
quantos palpites foram necessários até vencer.
'''
import random
palpite=0
print('\033[33m{:=^40}'.format('JOGO DA ADVINHAÇÂO 2.0'))
print('\033[m')
numerosorteado=random.randint(0,10)
print(numerosorteado)
acertou=False
while not acertou:
numero=int(input('Digite o valor entre 0 e 10: '))
palpite+=1
if numero == numerosorteado:
acertou=True
else:
if numero < numerosorteado:
print('É maior.Tente mais uma vez!')
elif numero > numerosorteado:
print('É menos.Tente mais uma vez!')
print('Você acertou com {} palpites.'.format(palpite))
| 29.269231 | 76 | 0.68594 | 110 | 761 | 4.745455 | 0.627273 | 0.114943 | 0.02682 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046129 | 0.202365 | 761 | 25 | 77 | 30.44 | 0.813839 | 0.262812 | 0 | 0 | 0 | 0 | 0.280289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66b180fc94b3f3077d165f9eba30fc51374504c | 20,727 | py | Python | main.py | HeloiseKatharine/Analise-de-dados | e05e337c0ef03ef68815aa684bdf4466226e02cb | [
"MIT"
] | null | null | null | main.py | HeloiseKatharine/Analise-de-dados | e05e337c0ef03ef68815aa684bdf4466226e02cb | [
"MIT"
] | null | null | null | main.py | HeloiseKatharine/Analise-de-dados | e05e337c0ef03ef68815aa684bdf4466226e02cb | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
from pymongo import MongoClient
def get_database():
from pymongo import MongoClient
CONNECTION_STRING = "#"
from pymongo import MongoClient
client = MongoClient(CONNECTION_STRING) #conexão com o cliente
return client["socioeconomico"] #base de dados
dbname = get_database()
collection_name = dbname["venezuela2021"]
detalhes_itens = collection_name.find()
# consulta o db no Mongo, coloca todos os dados do database nessa variavel.
df = pd.DataFrame(list(detalhes_itens)) #criei um df com o banco de dados
perfil = df[["gender", "age", "geography", "financial_situation"]] # criando um df apenas com as chaves interessantes para o perfil
#########################################################################################
###Criar gráfico que mostra a quantidade de pessoas entrevistadas em cada faixa etária
def faixaetaria(perfil):
age_qtd = (perfil["age"]).value_counts() # crio uma variavel em que quantifica cada faixa etaria obtida na pesquisa
print(age_qtd) # exemplo: 1304 pessoas tem idade de 26 a 35 anos
plt.style.use("ggplot")
age_qtd.plot.barh() # defino o tipo de grafico
plt.title("Número de pessoas entrevistadas por faixa etária") # adiciona titulo
plt.xlabel("Número de pessoas") # nomeia eixo x
plt.ylabel("Faixa etária") # nomeia eixo y
plt.show() # exibe o grafico
###Criar grafico com gênero e idade no formato barras
def generoidade(df):
perfil2 = df[["gender", "age"]]
print(perfil2)
plt.style.use("ggplot")
graf = (perfil2).value_counts()
print(graf)
graf.plot.barh()
plt.title("Número de pessoas entrevistadas por gênero e faixa etária") # adiciona titulo
plt.xlabel("Número de pessoas") # nomeia eixo x
plt.ylabel("Gênero e Faixa etária") # nomeia eixo y
plt.show() # exibe o grafico
###Criar grafico de Gênero no formato Pizza:
def genero(perfil):
gen_qtd = (perfil["gender"]).value_counts() # crio uma variavel em que quantifica cada genero obtido na pesquisa
print(gen_qtd)
df1 = gen_qtd.iloc[[2,3,4]]
df2=gen_qtd.drop(gen_qtd.index[[2,3,4]]) #aqui estou eliminando essas linhas para colocar o resultado da soma em uma linha só
print(df2) #só tem genero female e male
df2.loc['Others: Non-Binary, Non Available, Prefer not to Answer'] = sum(df1) # acrescento uma nova linha com index others e o valor da soma.
plt.style.use("ggplot")
df2.plot.pie(ylabel='',autopct='%1.1f%%',startangle = 90) # defino o tipo de grafico
plt.title("Número de pessoas entrevistadas por gênero") # adiciona titulo
plt.show() # exibe o grafico
###Criar gráfico das situações financeiras da pessoas entrevistadas pela pesquisa
def sitfin(perfil):
sitfin = (perfil["financial_situation"]).value_counts() # crio uma variavel em que quantifica a sit financeira de cada pessoa da pesquisa
print(sitfin) # exemplo: 1445 só conseguem custear comida e nada mais
plt.style.use("ggplot")
sitfin.plot.pie(autopct = "%1.1f%%", ylabel='')
plt.title("Situação financeira das pessoas entrevistadas") # adiciona titulo
plt.show() # exibe o grafico
###Criar gráfico que mostra a quantidade de pessoas entrevistadas por cada região em que vivem
def geografia(perfil):
geography = (perfil["geography"]).value_counts() # crio uma variavel em que quantifica cada faixa etaria obtida na pesquisa
print(geography) # exemplo: 1304 pessoas tem idade de 26 a 35 anos
plt.style.use("ggplot")
geography.plot.barh(color = "lightsalmon") # defino o tipo de grafico
plt.title("Número de pessoas entrevistadas por região em que vivem") # adiciona titulo
plt.xlabel("Número de pessoas") # nomeia eixo x
plt.ylabel("Região") # nomeia eixo y
plt.show() # exibe o grafico
###Criar um grafico em que mostra a relação entre a região e as pessoas que são muito vulneraveis financeiramente
def relregiaositfin(df):
perfil3 = df[["geography", "financial_situation"]] #cria dataframe com as chaves de interesse
aux = perfil3[(perfil3['financial_situation'] == 'I cannot afford enough food for my family')] #crio uma variavel em que recebe a sit fin desejada
print(aux.groupby('geography').count()) #relaciona a sit financeira desejada com a geografia e faz a contagem do num de pessoas.
graf1 = aux.groupby('geography').count() #crio uma variavel que relaciona a auxiliar (sit fin) com a geografia e quantifica
graf1.plot()
plt.title("Região em que vivem as pessoas que não conseguem comprar comida suficiente para a família") #Não consigo comprar comida suficiente para a minha família.
plt.ylabel("Número de pessoas")
plt.xlabel("Geografia")
plt.show()
def favoVulne(df):
#perfil de pessoa na situação mais confortavel, universidade/faculdade/pos graduação completa ou nao e criança com acesso a internet.
df_fvvn = df[['_id', 'financial_situation', 'education', 'do_children_have_internet_connection']]
docTotais = 4436
favo1 = len(df_fvvn[(df_fvvn['financial_situation'] == "I can comfortably afford food, clothes, and furniture, and I have savings") & (df_fvvn['education'] == "University or college degree completed") & (df_fvvn['do_children_have_internet_connection'] == '1')])
favoPorcent1 = (favo1 * 100) / docTotais
favo2 = len(df_fvvn[(df_fvvn['financial_situation'] == "I can comfortably afford food, clothes, and furniture, and I have savings") & (df_fvvn['education'] == "Some university or college") & (df_fvvn['do_children_have_internet_connection'] == '1')])
favoPorcent2 = (favo2 * 100) / docTotais
favo3 = len(df_fvvn[(df_fvvn['financial_situation'] == "I can comfortably afford food, clothes, and furniture, and I have savings") & (df_fvvn['education'] == "Post-graduate education") & (df_fvvn['do_children_have_internet_connection'] == '1')])
favoPorcent3 = (favo3 * 100) / docTotais
favo4 = len(df_fvvn[(df_fvvn['financial_situation'] == "I can comfortably afford food, clothes, and furniture, and I have savings") & (df_fvvn['education'] == "Post graduate") & (df_fvvn['do_children_have_internet_connection'] == '1')])
favoPorcent4 = (favo4 * 100) / docTotais
pessoasFavoravel = favo1 + favo2 + favo3 + favo4
pessoasFavoravelPorc = favoPorcent1 + favoPorcent2 + favoPorcent3 + favoPorcent4
print(f"{pessoasFavoravel} documentos apontaram que tem condições financeiras confortaveis, alto nivel educacional e criança com acesso a internet \nIsso representa {pessoasFavoravelPorc} % da amostra total\n")
#perfil de pessoa na situação mais vulneravel, baixo nivel educacional e criança sem acesso a internet
vulne1 = len(df_fvvn[(df_fvvn['financial_situation'] == "I cannot afford enough food for my family") & (df_fvvn['education'] == "No formal education") & (df_fvvn['do_children_have_internet_connection'] == '0')])
vulnePorcent1 = (vulne1 * 100) / docTotais
vulne2 = len(df_fvvn[(df_fvvn['financial_situation'] == "I cannot afford enough food for my family") & (df_fvvn['education'] == "Some primary education") & (df_fvvn['do_children_have_internet_connection'] == '0')])
vulnePorcent2 = (vulne2 * 100) / docTotais
vulne3 = len(df_fvvn[(df_fvvn['financial_situation'] == "I cannot afford enough food for my family") & (df_fvvn['education'] == "Primary school completed") & (df_fvvn['do_children_have_internet_connection'] == '0')])
vulnePorcent3 = (vulne3 * 100) / docTotais
pessoasVulneraveis = vulne1 + vulne2 + vulne3
pessoasVulneraveisPorc = vulnePorcent1 + vulnePorcent2 + vulnePorcent3
print(f"{pessoasVulneraveis} documentos apontaram que não tem condições de custear alimentação suficiente, tem baixo nivel educacional e criança sem acesso a internet \nIsso representa {pessoasVulneraveisPorc} % da amostra total\n")
grupos = ['Condição Mais \n Favorável', 'Condição Menos \n Favorável']
valores = [pessoasFavoravel, pessoasVulneraveis]
plt.title('OS DOIS PERFIS EXTREMOS')
plt.ylabel('Numero de formularios')
plt.bar(grupos, valores)
plt.show()
def desfavoravel(df):
df_vul = df[['_id', 'financial_situation', 'education', 'do_children_have_internet_connection']]
docTotais = 4436
semAlimentacao = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family")])
#perfil de pessoa na situação mais vulneravel, universidade/faculdade/pos graduação completa ou nao e criança sem acesso a internet
alto1 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "University or college degree completed") & (df_vul['do_children_have_internet_connection'] == '0')])
altoPorcent1 = (alto1 * 100) / docTotais
alto2 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Some university or college") & (df_vul['do_children_have_internet_connection'] == '0')])
altoPorcent2 = (alto2 * 100) / docTotais
alto3 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Post-graduate education") & (df_vul['do_children_have_internet_connection'] == '0')])
altoPorcent3 = (alto3 * 100) / docTotais
alto4 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Post graduate") & (df_vul['do_children_have_internet_connection'] == '0')])
altoPorcent4 = (alto4 * 100) / docTotais
pessoasEducAlta = alto1 + alto2 + alto3 + alto4
educAlta = altoPorcent1 + altoPorcent2 + altoPorcent3 + altoPorcent4
print(f"{pessoasEducAlta} documentos apontaram não ter condições de custear alimentação suficiente, tem alto nivel educacional e criança sem acesso a internet \nIsso representa {educAlta} % da amostra total\n")
#perfil de pessoa na situação mais vulneravel, educação secundaria e criança sem acesso a internet
med1 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Secondary school/ high school completed") & (df_vul['do_children_have_internet_connection'] == '0')])
medPorcent1 = (med1 * 100) / docTotais
med2 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Some secondary school / high school") & (df_vul['do_children_have_internet_connection'] == '0')])
medPorcent2 = (med2 * 100) / docTotais
med3 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Secondary/high school") & (df_vul['do_children_have_internet_connection'] == '0')])
medPorcent3 = (med3 * 100) / docTotais
pessoasEducMedia = med1 + med2 + med3
educMedia = medPorcent1 + medPorcent2 + medPorcent3
print(f"{pessoasEducMedia} documentos apontaram não ter condições de custear alimentação suficiente, tem medio nivel educacional e criança sem acesso a internet \nIsso representa {educMedia} % da amostra total\n")
#perfil de pessoa na situação mais vulneravel, educação tecnica completa ou nao (agrupadas) e criança sem acesso a internet
tec1 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Technical school diploma or degree completed") & (df_vul['do_children_have_internet_connection'] == '0')])
tecPorcent1 = (tec1 * 100) / docTotais
tec2 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Some technical education (e.g polytechnic school") & (df_vul['do_children_have_internet_connection'] == '0')])
tecPorcent2 = (tec2 * 100) / docTotais
tec3 = len(df_vul[(df_vul['financial_situation'] == "I cannot afford enough food for my family") & (df_vul['education'] == "Technical school") & (df_vul['do_children_have_internet_connection'] == '0')])
tecPorcent3 = (tec3 * 100) / docTotais
pessoasEducTecnica = tec1 + tec2 + tec3
educTecnica = tecPorcent1 + tecPorcent2 + tecPorcent3
print(f"{pessoasEducTecnica} documentos apontaram não ter condições de custear alimentação suficiente para a familia, tem nivel educacional técnico e criança sem acesso a internet \nIsso representa {educTecnica} % da amostra total\n")
grupos = ['Não conseguem \nCustear alimentação', 'Ensino Superior', 'Ensino Médio', 'Ensino Tecnico']
valores = [semAlimentacao, pessoasEducAlta, pessoasEducMedia, pessoasEducTecnica]
plt.title('RELAÇÃO VULNERABILIDADE X NIVEL EDUCACIONAL')
plt.ylabel('Numero de formularios')
plt.bar(grupos, valores)
plt.show()
def intAcess1(df):
#se a criança tem acesso a internet e tem energia eletrica consistentes, se perde aula. se nao tem acesso, esta com aula presencial
df_vul = df[['_id', 'do_children_have_internet_connection', 'does_home_shows_severe_deficit_of_electricity', 'does_home_shows_severe_deficit_of_internet', 'do_children_3_to_17_yrs_miss_virtual_class_due_to_lack_of_electricity', 'are_children_attending_face_to_face_classes', 'are_children_being_teached_by_unqualified_people']]
docTotais = 4436
perfil1 = len(df_vul[(df_vul['does_home_shows_severe_deficit_of_electricity'] == '0') & (df_vul['does_home_shows_severe_deficit_of_internet'] == '0') & (df_vul['do_children_have_internet_connection'] == '1') & (df_vul['do_children_3_to_17_yrs_miss_virtual_class_due_to_lack_of_electricity'] == '0')])
porcentagem1 = (perfil1 * 100) / docTotais
print(f"{perfil1} documentos apontaram que há crianças sem problemas de conexão com internet ou falta de energia eletrica e não perdem aulas por estes motivos.\nIsso representa {porcentagem1} % da amostra total\n")
perfil2 = len(df_vul[(df_vul['does_home_shows_severe_deficit_of_electricity'] == '1') | (df_vul['does_home_shows_severe_deficit_of_internet'] == '1') & (df_vul['do_children_have_internet_connection'] == '1') & (df_vul['do_children_3_to_17_yrs_miss_virtual_class_due_to_lack_of_electricity'] == '1')])
porcentagem2 = (perfil2 * 100) / docTotais
print(f"{perfil2} documentos apontaram que há crianças com problemas de conexão com internet ou falta de energia eletrica e perdem aulas por estes motivos.\nIsso representa {porcentagem2} % da amostra total\n")
perfil3 = len(df_vul[(df_vul['are_children_attending_face_to_face_classes'] == '1') | (df_vul['does_home_shows_severe_deficit_of_internet'] == '1') & (df_vul['do_children_have_internet_connection'] == '0')])
porcentagem3 = (perfil3 * 100) / docTotais
print(f"{perfil3} documentos apontaram que há crianças sem acesso a internet ou tem problemas de conexão e estão tendo aulas presenciais.\nIsso representa {porcentagem3} % da amostra total\n")
perfil4 = len(df_vul[(df_vul['are_children_attending_face_to_face_classes'] == '0') & (df_vul['does_home_shows_severe_deficit_of_internet'] == '1') & (df_vul['do_children_have_internet_connection'] == '0')])
porcentagem4 = (perfil4 * 100) / docTotais
print(f"{perfil4} documentos apontaram que há crianças sem acesso a internet ou tem problemas de conexão e não estão tendo aulas presenciais.\nIsso representa {porcentagem4} % da amostra total\n")
perfil5 = len(df_vul[(df_vul['are_children_attending_face_to_face_classes'] == '0') & (df_vul['does_home_shows_severe_deficit_of_internet'] == '1') & (df_vul['do_children_have_internet_connection'] == '0') & (df_vul['are_children_being_teached_by_unqualified_people'] == '1')])
porcentagem5 = (perfil5 * 100) / docTotais
print(f"{perfil5} documentos apontaram que há crianças sem acesso a internet ou tem problemas de conexão e não estão tendo aulas presenciais \n e estão sendo ensinadas por pessoas sem qualificação.Isso representa {porcentagem5} % da amostra total\n")
grupos = ['Não perdem \naula virtual', 'Problemas técnicos \nPerdem aula virtual', 'Sem acesso \nAula presencial', 'Sem aula virtual\n nem presencial', 'Aula com pessoas\n não qualificadas']
valores = [perfil1, perfil2, perfil3, perfil4, perfil5]
plt.title('RELAÇÃO ACESSOS A INTERNET E ENERGIA x AULA VIRTUAL/PRESENCIAL')
plt.ylabel(' Numero de formularios')
plt.bar(grupos, valores)
plt.show()
def inseg(df):#Grafico barra alimentação
data=df[["financial_situation","do_children_3_and_17_yrs_receive_regular_school_meals"]]
aux = data[(data['do_children_3_and_17_yrs_receive_regular_school_meals'] == "No")]
graf = aux.groupby('financial_situation').count()
graf.plot.barh()
plt.title("Insegurança alimentar x Situação Financeira")
plt.ylabel("")
L=plt.legend(bbox_to_anchor=(1.1,1.1),\
bbox_transform=plt.gcf().transFigure)
L.get_texts()[0].set_text('Crianças que recebem comida na escola')
plt.savefig('temp.png')
plt.show()
def evesao(df): #Grafico barra para evasão escolar
data=df[["education","were_children_3_to_17_yrs_enrolled_and_did_not_return_to_school"]]
aux = data[(data['were_children_3_to_17_yrs_enrolled_and_did_not_return_to_school'] == "0")]
graf = aux.groupby('education').count()
graf.plot.barh()
plt.title("Evasão escolar x nível educacional do responsável")
plt.ylabel("")
L=plt.legend(bbox_to_anchor=(1.1,1.1),\
bbox_transform=plt.gcf().transFigure)
L.get_texts()[0].set_text('Crianças que não retornaram a escola')
plt.savefig('temp.png')
plt.show()
#retorna um gráfico do grau de escolaridade das pessoas que responderam o questionário
def educacao(df):
df_new = df[['education']]
#destaca a coluna com o maior valor
explode = (0.1, 0, 0, 0, 0, 0, 0, 0)
colors = ['#FFFF00', '#800080','#B22222','#483D8B','#FA8072','#CD853F','#2E8B57', '#FF4500']
labels = ['Graduação em faculdade completa','Segundo grau (Ensino médio) completo', 'Diploma de escola técnica ou algum título completo','Possui alguma educação universitária','Possui alguma educação técnica','Possui alguma educação secundária/ensino médio', 'Pós-graduação completa','Outros']
#gráfico de pizza da educação
graf = (df_new["education"]).value_counts()
# autopct = rotular as fatias com seu valor numérico
# shadow = sombra
graf2 = graf
soma = sum(graf2.iloc[[12, 13, 14, 15, 8, 11, 10, 7, 9]])
graf2 = graf2.drop(graf2.index[[12, 13, 14, 15, 8, 11, 10, 7, 9]])
graf2.loc['Others'] = soma
graf2.plot.pie(autopct='%1.1f%%', explode= explode, shadow=True, startangle = 90, labels=labels, ylabel='', colors = colors)
print(graf2)
plt.title('Educação na Venezuela')
plt.show()
#retorna um gráfico da situação financeira em relação a educação de pessoas que possuem algum diploma
def financial_situation_education(df):
colunas = ['financial_situation', 'education']
df_new = df.filter(items = colunas)
aux = df_new[(df_new['education'] == 'University or college degree completed') | (df_new['education'] == 'Secondary school/ high school completed') | (df_new['education'] == 'Technical school diploma or degree completed')]
graf = aux.groupby('financial_situation').count()
graf.plot.pie(autopct='%1.1f%%', shadow=True, startangle = 90, subplots=True, ylabel='')
L = plt.legend(bbox_to_anchor=(1.9, 1.1))
print(graf)
plt.savefig('temp.png')
plt.title('Situação finaceira X Educação')
plt.show()
#Chamando as funções
#faixaetaria(perfil) #gráfico que mostra a quantidade de pessoas entrevistadas em cada faixa etária
#generoidade(df) #grafico que exibe gênero e idade das pessoas entrevistadas no formato barras
#genero(perfil) #grafico que mostra a qtd de pessoas por gênero no formato Pizza
#sitfin(perfil) #gráfico das situações financeiras das pessoas entrevistadas na pesquisa
#geografia(perfil) #gráfico que mostra a quantidade de pessoas entrevistadas por cada região em que vivem
#relregiaositfin(df) #grafico mostra a relação entre a região e as pessoas que são vulneraveis financeiramente
#favoVulne(df) # apresenta perfis opostos: mais condições desfavoraveis e mais condições favoraveis
#intAcess1(df) #Relaciona acesso a internet e modalidade de aulas
#desfavoravel(df) # apresenta relação de pessoas que nao conseguem custear alimentação e nivel educacional alto e medio
#inseg(df) #grafico que compara a sitação financeira com alimentação na escola
#evesao(df) #grafico que compara evesão escolar com o nivel educacional do responsável
#educacao(df) #mostra o gráfico da educação
#financial_situation_education(df) #mostra o gráfico da situação financeira e educação
| 58.716714 | 331 | 0.726444 | 2,819 | 20,727 | 5.184108 | 0.181979 | 0.022923 | 0.02395 | 0.037635 | 0.548241 | 0.523402 | 0.495415 | 0.455522 | 0.415287 | 0.372041 | 0 | 0.023087 | 0.157813 | 20,727 | 352 | 332 | 58.883523 | 0.814104 | 0.200174 | 0 | 0.230047 | 0 | 0.046948 | 0.493564 | 0.124459 | 0 | 0 | 0 | 0.002841 | 0 | 1 | 0.065728 | false | 0 | 0.028169 | 0 | 0.098592 | 0.093897 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66d2d5519a0ef3b8c25d34eaafb9a51b73e5edc | 2,235 | py | Python | utils/preprocess_word_embedding.py | JiaqiYao/dynamic_multi_label | dae8b34349f6da80e962fefd5349a29a0f5630f1 | [
"MIT"
] | 2 | 2020-11-29T07:09:29.000Z | 2020-12-22T07:40:24.000Z | utils/preprocess_word_embedding.py | JiaqiYao/dynamic_multi_label | dae8b34349f6da80e962fefd5349a29a0f5630f1 | [
"MIT"
] | null | null | null | utils/preprocess_word_embedding.py | JiaqiYao/dynamic_multi_label | dae8b34349f6da80e962fefd5349a29a0f5630f1 | [
"MIT"
] | null | null | null | import os
import json
from tqdm import tqdm
def build_vocabulary(data_dir):
with open(os.path.join(data_dir, 'train_texts.txt'), 'rt') as fin:
train_texts = json.load(fin)
print("train text cuts load done")
with open(os.path.join(data_dir, "Telegram", 'train_key_words.dat'), 'rb') as fin:
train_key_words = pickle.load(fin)
print("train key_words load done")
words = set()
for train_text in tqdm(train_texts,miniters=1000):
for word in train_text:
words.add(word)
for key_word in tqdm(train_key_words,miniters=1000):
for word in key_word:
words.add(word)
with open(os.path.join(data_dir, "Telegram", "words.dat"), 'wb') as fout:
pickle.dump(words, fout)
print("Build Vocabulary Done!!!")
def get_word_embedding(data_home, word2vec_name):
with open(os.path.join(data_home, "Telegram", "words.dat"), 'rb') as fin:
words = pickle.load(fin)
telegram_word_embeddings = dict()
print("The number of words is {}".format(len(words)))
word2vec_path = os.path.join(data_home, "word_embedding", word2vec_name)
with open(word2vec_path, 'rt') as fin:
line = fin.readline()
words_num, embed_size = line.split()
print("The number of words is {}, the embedding size is {}".format(words_num, embed_size))
for line in tqdm(fin, miniters=5000):
word, embed = line.split(maxsplit=1)
if word in words:
try:
telegram_word_embeddings[word] = [float(vec) for vec in embed.split()]
except Exception as e:
print(e)
print(line)
vocab_size = len(telegram_word_embeddings)
with open(os.path.join(data_home, "word_embedding", "telegram_word_embedding.dat"), 'wb') as fout:
pickle.dump(telegram_word_embeddings, fout)
print("done!!!")
if __name__ == "__main__":
data_dir = r'/home/yaojq/data/text/reuters'
word2vec_path = "/home/yaojq/data/word_embedding/GoogleNews-vectors-negative300.bin"
print("build vocabulary")
build_vocabulary(data_dir)
get_word_embedding(data_dir, word2vec_path)
| 39.910714 | 103 | 0.632215 | 305 | 2,235 | 4.42623 | 0.259016 | 0.036296 | 0.044444 | 0.062222 | 0.257037 | 0.203704 | 0.138519 | 0.048889 | 0 | 0 | 0 | 0.013095 | 0.248322 | 2,235 | 55 | 104 | 40.636364 | 0.790476 | 0 | 0 | 0.041667 | 0 | 0 | 0.19229 | 0.055989 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.0625 | 0 | 0.104167 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66d60f785036a36c6d1ed53d8206d68665248c2 | 2,797 | py | Python | stories.py | grantat/news-similarity-core | 278399bb215954510fa265ba4bc5b28f7f02e1ee | [
"MIT"
] | null | null | null | stories.py | grantat/news-similarity-core | 278399bb215954510fa265ba4bc5b28f7f02e1ee | [
"MIT"
] | null | null | null | stories.py | grantat/news-similarity-core | 278399bb215954510fa265ba4bc5b28f7f02e1ee | [
"MIT"
] | null | null | null | import requests
import json
import hashlib
import os
import argparse
def load_links(filename):
with open(filename) as f:
data = json.load(f)
return data
def get_story(session, uri):
""" Get mementos for html using Internet Archive """
try:
print(uri)
r = session.get(uri, verify=False)
# return entire response
return r
except Exception as e:
print("Failed with error", e)
return
if __name__ == "__main__":
# months to download stories from
months = ["2016_12", "2017_01"]
parser = argparse.ArgumentParser()
# parser.add_argument("links_json", type=str,
# help="Links per day JSON file to iterate upon")
parser.add_argument("--kval", type=str,
help="Links per day JSON file to iterate upon")
args = parser.parse_args()
session = requests.Session()
session.headers = headers = {
'user-agent': 'Web Science and Digital Libraries (@WebSciDL) '
'<gatki001@odu.edu>'}
session.max_redirects = 100
for mo in months:
print("Month {}".format(mo))
links_by_day = load_links(
"data/links_per_day/{}/links_per_day_{}.json".format(mo,
args.kval))
error_file = "data/errors/links_{}.txt".format(mo)
with open(error_file, 'w') as err_out:
for day in links_by_day:
links = links_by_day[day]
print("Day {}".format(day))
for uri in links:
directory = "./data/stories/if_/{}/{}/".format(mo, day)
link_hash = hashlib.md5(uri.encode()).hexdigest()
outfile = directory + link_hash + ".html"
if not os.path.exists(directory):
os.makedirs(directory)
if os.path.exists(outfile):
continue
resp = get_story(session, uri)
if not resp:
print("Error with response:", resp)
print("{}\nError with response: {}".format(
uri, resp), file=err_out)
continue
if resp.status_code == 200:
with open(outfile, "w") as out:
out.write(resp.text)
else:
print(resp.history)
print("ERR::{} response code".format(resp.status_code))
print("{}\nError with response code: {}".format(
uri, resp.status_code), file=err_out)
if os.path.getsize(error_file) == 0:
os.remove(error_file)
| 34.530864 | 79 | 0.505899 | 302 | 2,797 | 4.539735 | 0.377483 | 0.023341 | 0.032093 | 0.032823 | 0.062728 | 0.062728 | 0.062728 | 0.062728 | 0.062728 | 0.062728 | 0 | 0.013411 | 0.386843 | 2,797 | 80 | 80 | 34.9625 | 0.786006 | 0.076153 | 0 | 0.032258 | 0 | 0 | 0.144134 | 0.035742 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.080645 | 0 | 0.16129 | 0.145161 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66d7aa9b11fd773a698acfd6bb25cc487a20615 | 3,564 | py | Python | python/downsample_images_in_rosbag.py | JzHuai0108/vio_common | 2d9c5fce761034cb4e55b3395d259ce392da8ee6 | [
"BSD-3-Clause"
] | 16 | 2017-06-02T07:22:31.000Z | 2022-03-23T02:39:39.000Z | python/downsample_images_in_rosbag.py | JzHuai0108/vio_common | 2d9c5fce761034cb4e55b3395d259ce392da8ee6 | [
"BSD-3-Clause"
] | 2 | 2020-08-10T04:01:35.000Z | 2021-01-18T08:21:17.000Z | python/downsample_images_in_rosbag.py | JzHuai0108/vio_common | 2d9c5fce761034cb4e55b3395d259ce392da8ee6 | [
"BSD-3-Clause"
] | 19 | 2017-08-03T02:23:11.000Z | 2021-09-22T02:17:46.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import argparse
import rosbag
import rospy
from cv_bridge import CvBridge
import cv2
import play_images_in_rosbag
def decide_output_encoding(cv_img):
"""
For 16UC1 input image, we need to use mono16 as output encoding option.
see http://library.isr.ist.utl.pt/docs/roswiki/cv_bridge(2f)Tutorials(2f)
UsingCvBridgeToConvertBetweenROSImagesAndOpenCVImages.html
:param cv_img:
:return:
"""
coding = 'passthrough'
if cv_img.dtype == 'uint16':
coding = 'mono16'
return coding
def main():
parser = argparse.ArgumentParser(
description=("Downscale images and shift timestamps for sensor messages in "
"a ROS bag with topics '/cam0/image_raw', '/cam1/image_raw', '/imu0'."))
parser.add_argument("bag_file", help="Input ROS bag.")
parser.add_argument(
'--time_delay',
help="unit nanoseconds, time delay + original.header.stamp = "
"shifted.header.stamp. If not provided, time delay will set as "
"ros message time - message[0].header.stamp",
type=int,
default=None)
parser.add_argument("--out_bag_file",
help="Output ROS bag file.",
default=None)
args = parser.parse_args()
out_bag_file = args.out_bag_file
if args.out_bag_file is None:
out_bag_file = os.path.join(
os.path.splitext(args.bag_file)[0] + '_half.bag')
in_bag = rosbag.Bag(args.bag_file, "r")
out_bag = rosbag.Bag(out_bag_file, 'w')
time_shift = None
if args.time_delay is not None:
time_shift = rospy.Duration(args.time_delay // 1000000000,
args.time_delay % 1000000000)
print('Raw message time offset set to {}'.format(time_shift))
count = 0
for topic, msg, t in in_bag.read_messages(topics=['/imu0']):
if time_shift is None:
time_shift = t - msg.header.stamp
print('Raw message time offset set to {}'.format(time_shift))
msg.header.stamp = time_shift + msg.header.stamp
out_bag.write(topic, msg, msg.header.stamp)
count += 1
print('Saved {} messages on topic /imu0'.format(count))
bridge = CvBridge()
for k in range(2):
count = 0
image_topic = '/cam{}/image_raw'.format(k)
encoding = ''
for _, msg, t in in_bag.read_messages(topics=[image_topic]):
cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
h, w = cv_img.shape[:2]
cv_half_img = cv2.pyrDown(cv_img, dstsize=(w // 2, h // 2))
if count == 0:
print('Image info before and after half sampling:')
play_images_in_rosbag.print_image_info(cv_img)
play_images_in_rosbag.print_image_info(cv_half_img)
encoding = decide_output_encoding(cv_img)
cv2.imshow('Downsampled frame', cv_half_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count += 1
rosimage = bridge.cv2_to_imgmsg(cv_half_img, encoding=encoding)
rosimage.header.stamp = time_shift + msg.header.stamp
out_bag.write(image_topic, rosimage, rosimage.header.stamp)
print('Saved {} images on topic {}'.format(count, image_topic))
cv2.destroyAllWindows()
out_bag.close()
in_bag.close()
print("Output bag: {}".format(out_bag_file))
if __name__ == '__main__':
main()
| 33.942857 | 84 | 0.623457 | 471 | 3,564 | 4.494692 | 0.314225 | 0.036372 | 0.033066 | 0.025508 | 0.16958 | 0.144544 | 0.144544 | 0.144544 | 0.085026 | 0.085026 | 0 | 0.021714 | 0.263468 | 3,564 | 104 | 85 | 34.269231 | 0.784762 | 0.076038 | 0 | 0.103896 | 0 | 0 | 0.192945 | 0.019939 | 0 | 0 | 0.001227 | 0 | 0 | 1 | 0.025974 | false | 0.025974 | 0.103896 | 0 | 0.142857 | 0.116883 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66e79df977dcaa16a976367fddaca5f9d45f186 | 1,770 | py | Python | gen_dataset.py | mantydze/knapsack-problem-py | 15caf65c8abde693f04eae0291933fa426aad1d9 | [
"MIT"
] | null | null | null | gen_dataset.py | mantydze/knapsack-problem-py | 15caf65c8abde693f04eae0291933fa426aad1d9 | [
"MIT"
] | null | null | null | gen_dataset.py | mantydze/knapsack-problem-py | 15caf65c8abde693f04eae0291933fa426aad1d9 | [
"MIT"
] | null | null | null | import json
import random
import sys
import time
sys.setrecursionlimit(2500)
memo = {}
def ks(capacity_left, n):
"""
capacity_left(int): remaining storage capacity of a bag
n(int): current item position
"""
if n == -1 or capacity_left == 0:
# No more items to add
return 0
# h = hash("%d_%d" % (capacity_left, n))
h = capacity_left * 2000 + n
if h in memo:
# print("memo", capacity_left, n)
return memo[h]
if weights[n] > capacity_left:
# Current item is too heavy for remaining capacity, ignore it and continue
return ks(capacity_left, n-1)
else:
# Do not add item, just move the pointer to the left
_without = ks(capacity_left, n-1)
# Add item into bag
_with = values[n] + ks(capacity_left-weights[n], n-1)
# Save value into memory
val = max(_with, _without)
memo[h] = val
return val
weights = []
values = []
capacities = []
bests = []
capacity = 0
for i in range(2001):
begin = time.time()
weights.append(random.randint(0, 100))
values.append(random.randint(0, 100))
capacity += random.randint(0, 25)
capacities.append(capacity)
best = ks(capacity, len(weights)-1)
bests.append(best)
memo = {}
end = time.time()
seconds = end - begin
print("Items", i)
# print(weights)
# print(values)
print("Capacity:", capacity)
print("Best:", best)
print("Seconds:", seconds)
print("*"*40)
with open("dataset.json", "w+") as f:
ds = {
"values": values,
"weights": weights,
"capacities": capacities,
"bests": bests
}
json.dump(ds, f, indent=4) | 21.851852 | 82 | 0.564407 | 227 | 1,770 | 4.334802 | 0.38326 | 0.121951 | 0.066057 | 0.045732 | 0.079268 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027823 | 0.309605 | 1,770 | 81 | 83 | 21.851852 | 0.777414 | 0.210169 | 0 | 0.040816 | 0 | 0 | 0.051357 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.081633 | 0 | 0.183673 | 0.102041 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b66f2f7d9d9618cebbca9ca3940382b757594d51 | 15,629 | py | Python | Juris_Cam.py | eugeniu1994/Stereo-Camera-LiDAR-calibration | 54eec1b911f78ca6b66c35803c47d016b7069499 | [
"Unlicense"
] | 6 | 2021-06-02T03:42:11.000Z | 2022-02-17T12:30:00.000Z | Juris_Cam.py | eugeniu1994/Stereo-Camera-LiDAR-calibration | 54eec1b911f78ca6b66c35803c47d016b7069499 | [
"Unlicense"
] | 1 | 2021-06-09T07:16:09.000Z | 2021-06-09T07:16:09.000Z | Juris_Cam.py | eugeniu1994/Stereo-Camera-LiDAR-calibration | 54eec1b911f78ca6b66c35803c47d016b7069499 | [
"Unlicense"
] | 1 | 2021-08-13T05:20:19.000Z | 2021-08-13T05:20:19.000Z | ''' CONFIDENTIAL
Copyright (c) 2021 Eugeniu Vezeteu,
Department of Remote Sensing and Photogrammetry,
Finnish Geospatial Research Institute (FGI), National Land Survey of Finland (NLS)
PERMISSION IS HEREBY LIMITED TO FGI'S INTERNAL USE ONLY. THE CODE
MAY BE RE-LICENSED, SHARED, OR TAKEN INTO OTHER USE ONLY WITH
A WRITTEN CONSENT FROM THE HEAD OF THE DEPARTMENT.
The software is provided "as is", without warranty of any kind, express or
implied, including but not limited to the warranties of merchantability,
fitness for a particular purpose and noninfringement. In no event shall the
authors or copyright holders be liable for any claim, damages or other
liability, whether in an action of contract, tort or otherwise, arising from,
out of or in connection with the software or the use or other dealings in the
software.
'''
import numpy as np
import cv2
import glob
import pickle
np.set_printoptions(suppress=True)
from sympy import *
class StereoChess_Calibrator(object):
def __init__(self, path):
self.term_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 1000, 0.0001)
self.square = 0.1 # m (the size of each chessboard square is 10cm)
self.objp = np.zeros((10 * 7, 3), np.float32) #chessboard is 7x10
self.objp[:, :2] = np.mgrid[0:10, 0:7].T.reshape(-1, 2) * self.square
self.see = True
self.path = path
self.thermaImg, self.rgbImg, self.monoImg = [], [], []
self.axis = np.float32([[0,0,0], [9,0,0], [0,7,0], [0,0,-5]]).reshape(-1,3)*self.square
def draw(self, img, corners, imgpts):
corner = tuple(corners[0])
img = cv2.line(img, corner, tuple(imgpts[0]), (255, 0, 0), 5)
img = cv2.line(img, corner, tuple(imgpts[1]), (0, 255, 0), 5)
img = cv2.line(img, corner, tuple(imgpts[2]), (0, 0, 255), 5)
return img
def read_images(self):
'''
real all camera images (thermal, monochrome and rgb)
'''
thermal = glob.glob(self.path + '/themal_image_*.png')
rgb = glob.glob(self.path + '/rgb_image_*.png')
mono = glob.glob(self.path + '/monochrome_image_*.png')
thermal.sort()
rgb.sort()
mono.sort()
for i, fname in enumerate(thermal):
thermal_img = cv2.imread(thermal[i])
rgb_img = cv2.imread(rgb[i])
mono_img = cv2.imread(mono[i])
self.thermaImg.append(thermal_img)
self.rgbImg.append(rgb_img)
self.monoImg.append(mono_img)
self.thermaImg, self.rgbImg, self.monoImg = np.array(self.thermaImg), np.array(self.rgbImg), np.array(
self.monoImg)
print('read_images: thermaImg->{}, rgbImg->{}, monoImg->{} '.format(np.shape(self.thermaImg),
np.shape(self.rgbImg),
np.shape(self.monoImg)))
def read_points(self, camera=None): # camera in [mono,rgb,thermal]
'''
extract points from camera (thermal, monochrome and rgb)
'''
self.see = True
wait = 0
if camera == 'mono':
print('Mono camera calibration')
images = self.monoImg.copy()
elif camera == 'rgb':
print('RGB camera calibration')
images = self.rgbImg.copy()
elif camera == 'thermal':
print('Thermal camera calibration')
images = self.thermaImg.copy()
else:
print('Add right camera')
print('images -> {}'.format(np.shape(images)))
objpoints, imgpoints, img_shape = [], [], 0
# extract points
for i, fname in enumerate(images):
img = images[i]
if camera == 'thermal': # invert the thermal camera
img = np.array(256 - img, dtype='uint8')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (10, 7), flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
if ret:
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), self.term_criteria)
cv2.drawChessboardCorners(img, (10, 7), corners2, ret)
objpoints.append(self.objp)
imgpoints.append(corners2)
# else:
# print('No board at {}'.format(i))
if self.see:
if camera == 'thermal':
cv2.imshow('Image', img)
else:
cv2.imshow('Image', cv2.resize(img, None, fx=.4, fy=.4))
k = cv2.waitKey(wait)
if k % 256 == 32: # pressed space
self.see = False
cv2.destroyAllWindows()
img_shape = gray.shape[::-1]
print('Camera {} objpoints->{},imgpoints->{}, img_shape->{}'.format(camera, np.shape(objpoints),
np.shape(imgpoints), img_shape))
return objpoints, imgpoints, img_shape
def calibrate(self, camera=None):
'''
perform internal calibration for given camera
'''
objpoints, imgpoints, img_shape = self.read_points(camera)
rms, K, D, _, _ = cv2.calibrateCamera(
objectPoints=objpoints,
imagePoints=imgpoints,
imageSize=img_shape,
cameraMatrix=None, distCoeffs=None,
flags=0, criteria=self.term_criteria)
print('{} camera calibration done with RMS:{}'.format(camera, rms))
print('K')
print(K)
print('D')
print(D)
return K, D
def stereoCalibrate(self, K_thermal, D_thermal,K,D, camera): # camera in [rgb,thermal]
'''
perform stereo calibration between thermal camera and given camera (mono or rgb)
'''
objpoints = [] # 3d point in real world space
imgpoints_l = [] # 2d points in image plane. - thermal camera
imgpoints_r = [] # 2d points in image plane. - mono or rgb camera
if camera == 'mono':
Second_images = self.monoImg.copy()
elif camera == 'rgb':
Second_images = self.rgbImg.copy()
images = self.thermaImg.copy()
# extract points
for i, fname in enumerate(images):
thermal_img = np.array(256 - images[i], dtype='uint8')
thermal_gray = cv2.cvtColor(thermal_img, cv2.COLOR_BGR2GRAY)
self.img_shape = thermal_gray.shape[::-1]
thermal_ret, thermal_corners = cv2.findChessboardCorners(thermal_gray, (10, 7),
flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
img = Second_images[i]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.second_img_shape = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (10, 7), flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
if thermal_ret and ret:
objpoints.append(self.objp)
imgpoints_l.append(thermal_corners)
imgpoints_r.append(corners)
print('Thermal -> {} cam, {}-poses'.format(camera, len(objpoints)))
flags = cv2.CALIB_FIX_INTRINSIC
rms_stereo, _, _, _, _, R, T, E, F = cv2.stereoCalibrate(
objpoints, imgpoints_l, imgpoints_r, K_thermal, D_thermal, K, D, imageSize=None, criteria=self.term_criteria, flags=flags)
print('Stereo calibraion Therma-{} done'.format(camera))
print('rms_stereo:{}'.format(rms_stereo))
print('Rotation R')
print(R)
print('Translation T')
print(T)
return R,T,E,F
def doStuff(self):
'''
-Read all images for all cameras
-Do internal calibration for each cam
-Estimate R rotation and T translation between thermal cam and mono cam
-Estimate R rotation and T translation between thermal cam and rgb cam
-Save the data
'''
#Read all images
self.read_images()
#Calibrate mono camera
K_mono, D_mono = calib.calibrate(camera='mono')
#Calibrate rgb camera
K_rgb, D_rgb = calib.calibrate(camera='rgb')
#Calibrate thermal camera
K_thermal, D_thermal = calib.calibrate(camera='thermal')
#Stereo calibrate between Thermal and Mono camera
R_th_mono, T_th_mono, E_th_mono, F_th_mono = self.stereoCalibrate(K_thermal,D_thermal,K_mono,D_mono,camera='mono')
# Stereo calibrate between Thermal and Rgb camera
R_th_rgb, T_th_rgb, E_th_rgb, F_th_rgb = self.stereoCalibrate(K_thermal, D_thermal, K_rgb, D_rgb, camera='rgb')
calib_data = dict([('K_mono', K_mono), ('D_mono', D_mono),
('K_rgb', K_rgb),('D_rgb', D_rgb),
('K_thermal', K_thermal), ('D_thermal', D_thermal),
('R_th_mono', R_th_mono), ('T_th_mono', T_th_mono),('E_th_mono', E_th_mono), ('F_th_mono', F_th_mono),
('R_th_rgb', R_th_rgb), ('T_th_rgb', T_th_rgb), ('E_th_rgb', E_th_rgb),('F_th_rgb', F_th_rgb),
])
with open('calib_data.pkl', 'wb') as f:
pickle.dump(calib_data, f, protocol=2)
print('calib_data.pkl Object saved')
def testCalibration(self):
'''
-loads images
-load the calibration data
-check if patter is visible in all 3 images:
-Estimate the extrinsic R,T from world to thermal camera
-Use estimated R,T and reproject pixels from thermal camera to mono and rgb cam
'''
self.thermaImg, self.rgbImg, self.monoImg = [], [], []
# Read all images
self.read_images()
with open('calib_data.pkl', 'rb') as f:
calib_data = pickle.load(f)
K_mono = calib_data['K_mono']
D_mono = calib_data['D_mono']
K_rgb = calib_data['K_rgb']
D_rgb = calib_data['D_rgb']
K_thermal = calib_data['K_thermal']
D_thermal = calib_data['D_thermal']
R_th_mono = calib_data['R_th_mono']
T_th_mono = calib_data['T_th_mono']
R_th_rgb = calib_data['R_th_rgb']
T_th_rgb = calib_data['T_th_rgb']
F = calib_data['F_th_rgb']
# Define test the calibration-----------------------
for i, fname in enumerate(self.thermaImg):
thermal_img = np.array(256 - self.thermaImg[i], dtype='uint8')
thermal_gray = cv2.cvtColor(thermal_img, cv2.COLOR_BGR2GRAY)
thermal_ret, thermal_corners = cv2.findChessboardCorners(thermal_gray, (10, 7),
flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
mono_img = self.monoImg[i]
mono_gray = cv2.cvtColor(mono_img, cv2.COLOR_BGR2GRAY)
mono_ret, mono_corners = cv2.findChessboardCorners(mono_gray, (10, 7), flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
rgb_img = self.rgbImg[i]
rgb_gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
rgb_ret, _ = cv2.findChessboardCorners(rgb_gray, (10, 7), flags=cv2.CALIB_CB_ADAPTIVE_THRESH)
if thermal_ret and rgb_ret and mono_ret:
thermal_corners2 = cv2.cornerSubPix(thermal_gray, thermal_corners, (11, 11), (-1, -1),
self.term_criteria)
# Find the rotation and translation vectors.
ret, rvecs, tvecs = cv2.solvePnP(self.objp, thermal_corners2, K_thermal, D_thermal)
# project 3D points to thermal image plane
imgpts_thermal, jac = cv2.projectPoints(self.axis[1:], rvecs, tvecs, K_thermal,
D_thermal) # thermal camera frame
thermaImg = self.draw(thermal_img, np.asarray(thermal_corners2).squeeze(),
np.asarray(imgpts_thermal).squeeze())
T_01 = np.vstack(
(np.hstack((cv2.Rodrigues(rvecs)[0], tvecs)), [0, 0, 0, 1])) # from world to thermal camera
# project thermal to rgb --------------------------------------------------------------------------------------
T_12 = np.vstack((np.hstack((R_th_rgb, T_th_rgb)), [0, 0, 0, 1])) # from thermal cam to rgb cam
T = np.dot(T_12, T_01) # world to rgb cam
rotation, translation = T[:3, :3], T[:3, -1]
imgpts_rgb, _ = cv2.projectPoints(self.axis, rotation, translation, K_rgb, D_rgb)
imgpts_rgb = np.array(imgpts_rgb).squeeze()
rgbImg = self.draw(rgb_img, [imgpts_rgb[0]], imgpts_rgb[1:])
# project thermal to mono ------------------------------------------------------------------------------------
'''T_12 = np.vstack((np.hstack((R_th_mono, T_th_mono)), [0, 0, 0, 1])) # from thermal cam to mono cam
T = np.dot(T_12, T_01) # world to mono cam
rotation, translation = T[:3, :3], T[:3, -1]
imgpts_mono, _ = cv2.projectPoints(self.axis, rotation, translation, K_mono, D_mono)
imgpts_mono = np.array(imgpts_mono).squeeze()
monoImg = self.draw(mono_img, [imgpts_mono[0]], imgpts_mono[1:])'''
thermal_corners2 = np.array(thermal_corners2).squeeze()
x_1 = thermal_corners2[0] #pixel in thermal camera
x_1 = np.array([x_1[0],x_1[1],1])
print(x_1)
'''Z = 1
Z = tvecs[-1]
print('tvecs -> {}, Z:{}'.format(tvecs,Z))
x_1 = x_1*Z
X_cam1 = np.linalg.inv(K_thermal).dot(x_1)
X_cam1 = np.array([X_cam1[0],X_cam1[1],X_cam1[2],1])
print('X_cam1 -> {}'.format(X_cam1))
P = np.hstack((R_th_rgb, T_th_rgb)) # from thermal cam to rgb cam
print(P)
x_2 = K_rgb.dot(P) @ X_cam1
print('x_2 -> {}'.format(x_2))
x_2 = np.array([x_2[0]/x_2[-1],x_2[1]/x_2[-1]]).astype(int)
print('x_2 -> {}'.format(x_2))
print('rgbImg -> {}'.format(np.shape(rgbImg)))
cv2.circle(rgbImg, (x_2[0], x_2[1]), 12, (0, 255, 0), 12)
cv2.circle(thermaImg, (thermal_corners2[0][0], thermal_corners2[0][1]), 6, (0, 255, 0), 6)'''
print('F')
print(F)
#x_1 * F * x_2 = 0
x1 = np.asarray(thermaImg).reshape(-1,3)
x2 = np.asarray(rgbImg).reshape(-1,3)
print('x1:{}, F:{}, x2:{}'.format(np.shape(x1), np.shape(F),np.shape(x2)))
x1F = x1 @ F
print('x1 * F = {}'.format(np.shape(x1F)))
x1Fx2 = x1F.dot(x2.T)
print('x1Fx2= {}'.format(np.shape(x1Fx2)))
cv2.imshow('thermal_img', thermaImg)
#cv2.imshow('monoImg', cv2.resize(monoImg, None, fx=.4, fy=.4))
cv2.imshow('rgbImg', cv2.resize(rgbImg, None, fx=.3, fy=.3))
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
path = '/home/eugeniu/cool'
calib = StereoChess_Calibrator(path)
#calib.doStuff() #this function load the data, does internal and stereo calibration - > save the data
calib.testCalibration()
| 44.274788 | 134 | 0.550259 | 1,950 | 15,629 | 4.236923 | 0.161538 | 0.012709 | 0.018155 | 0.017429 | 0.298959 | 0.230816 | 0.183733 | 0.144275 | 0.10046 | 0.088356 | 0 | 0.032379 | 0.318255 | 15,629 | 352 | 135 | 44.400568 | 0.743031 | 0.173524 | 0 | 0.15311 | 0 | 0 | 0.072117 | 0.004552 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038278 | false | 0 | 0.023923 | 0 | 0.086124 | 0.129187 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6726617c09fe5e7abbd64cdec30ed1653450f24 | 4,851 | py | Python | process_design.py | Multiscale-Genomics/C-HiC | 65e189acc79f5420a276a2f7fd740cb2a3ae8e27 | [
"Apache-2.0"
] | null | null | null | process_design.py | Multiscale-Genomics/C-HiC | 65e189acc79f5420a276a2f7fd740cb2a3ae8e27 | [
"Apache-2.0"
] | 1 | 2018-09-06T12:27:49.000Z | 2018-09-06T12:27:49.000Z | process_design.py | Multiscale-Genomics/CHi-C | 65e189acc79f5420a276a2f7fd740cb2a3ae8e27 | [
"Apache-2.0"
] | 1 | 2021-01-28T23:44:37.000Z | 2021-01-28T23:44:37.000Z | #!/usr/bin/env python
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import argparse
from basic_modules.workflow import Workflow
from utils import logger
from CHiC.tool.makeDesignFiles import makeDesignFilesTool
#####################################################
class process_design(Workflow):
"""
This class generates the Design files and chinput files,
imput for CHiCAGO. Starting from rmap and baitmap and capture
HiC BAM files.
"""
def __init__(self, configuration=None):
"""
Initiate the class
Parameters
----------
Configuration: dict
dictionary with parameters for different tools from the class
indicating how to run each of them.
"""
logger.info("Generating CHiCAGO input Design files")
if configuration is None:
configuration = {}
self.configuration.update(configuration)
def run(self, input_files, metadata, output_files):
"""
Main function to run the tools, MakeDesignFiles_Tool.py and
bam2chicago_Tool.py
Parameters
----------
input_files: dict
designDir: path to the folder with .rmap and .baitmap files
rmapFile: path to the .rmap file
baitmapFile: path to the .baitmap file
bamFile: path to the capture HiC bamfiles
metadata: dict
input metadata
output_files: dict
outPrefixDesign : Path and name of the output prefix,
recommend to be the same as rmap and baitmap files.
sample_name: Path and name of the .chinput file
Returns
-------
bool
output_metadata
"""
try:
design_caller = makeDesignFilesTool(self.configuration)
design_out, design_meta = design_caller.run(
{
"RMAP" : input_files["RMAP"],
"BAITMAP": input_files["BAITMAP"]
},
{
"RMAP" : metadata["RMAP"],
"BAITMAP" : metadata["BAITMAP"]
},
{
"nbpb" : output_files["nbpb"],
"npb" : output_files["npb"],
"poe" : output_files["poe"]
}
)
logger.info("design files succesfully generated =)")
return design_out, design_meta
except IOError:
logger.fatal("process_makeDesign failed to" +
"generate design files")
#############################################################
def main_json(config, in_metadata, out_metadata):
"""
Alternative main function
This function lauch the app using the configuration written
in two json files:
"""
#1.Instantiate and launch the app
print("Instantiate and launch the App")
from apps.jsonapp import JSONApp
app = JSONApp()
results = app.launch(process_design,
config,
in_metadata,
out_metadata)
#2. The App has finished
print("2. Execution finished: see " + out_metadata)
print(results)
return results
#########################################################
if __name__ == "__main__":
#set up the command line parameters
PARSER = argparse.ArgumentParser(
description="Pipeline to generate .baitmap file")
PARSER.add_argument("--config", help="Configuration file")
PARSER.add_argument(
"--in_metadata", help="Location of metadata file")
PARSER.add_argument(
"--out_metadata", help="Location of output metadata file")
PARSER.add_argument(
"--local", action="store_const", const=True, default=False)
#Get matching parameters from the command line
ARGS = PARSER.parse_args()
CONFIG = ARGS.config
IN_METADATA = ARGS.in_metadata
OUT_METADATA = ARGS.out_metadata
LOCAL = ARGS.local
if LOCAL:
import sys
sys._run_from_cmdl = True # pylint: disable=protected-access
RESULTS = main_json(CONFIG, IN_METADATA, OUT_METADATA)
print(RESULTS)
| 29.760736 | 76 | 0.59328 | 525 | 4,851 | 5.36 | 0.384762 | 0.027363 | 0.012793 | 0.029851 | 0.084932 | 0.024876 | 0.024876 | 0 | 0 | 0 | 0 | 0.002351 | 0.298495 | 4,851 | 162 | 77 | 29.944444 | 0.824567 | 0.37518 | 0 | 0.046154 | 0 | 0 | 0.161027 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046154 | false | 0 | 0.107692 | 0 | 0.2 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6746a5b4942abdb9bcb80fa96fed7162930c929 | 5,136 | py | Python | em2/utils/web_push.py | samuelcolvin/em2 | a587eaa80c09a2b44d9c221d09a563aad5b05d78 | [
"MIT"
] | 5 | 2019-03-20T19:07:45.000Z | 2020-10-03T01:16:05.000Z | em2/utils/web_push.py | samuelcolvin/em2 | a587eaa80c09a2b44d9c221d09a563aad5b05d78 | [
"MIT"
] | 51 | 2019-03-12T16:19:46.000Z | 2021-03-09T00:52:24.000Z | em2/utils/web_push.py | samuelcolvin/em2 | a587eaa80c09a2b44d9c221d09a563aad5b05d78 | [
"MIT"
] | 1 | 2019-05-31T14:41:18.000Z | 2019-05-31T14:41:18.000Z | import asyncio
import base64
import hashlib
import logging
import re
import time
from typing import Optional
import http_ece
import ujson
from aiohttp import ClientSession
from arq import ArqRedis
from atoolbox import JsonErrors, RequestError
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import ec
from py_vapid import Vapid02 as Vapid
from pydantic import BaseModel, HttpUrl
from em2.core import get_flag_counts
from em2.settings import Settings
from em2.utils.db import Connections
logger = logging.getLogger('em2.web_push')
def web_push_user_key_prefix(user_id):
return f'web-push-subs:{user_id}:'
class SubscriptionModel(BaseModel):
"""
Model as generated from PushSubscription.toJSON()
https://developer.mozilla.org/en-US/docs/Web/API/PushSubscription/toJSON
"""
endpoint: HttpUrl
expirationTime: Optional[int]
class SubKeys(BaseModel):
p256dh: bytes
auth: bytes
keys: SubKeys
def hash(self):
return hashlib.md5(b'|'.join([self.endpoint.encode(), self.keys.p256dh, self.keys.auth])).hexdigest()
async def subscribe(conns: Connections, client_session: ClientSession, sub: SubscriptionModel, user_id):
key = web_push_user_key_prefix(user_id) + sub.hash()
# we could use expirationTime here, but it seems to generally be null
await conns.redis.setex(key, 86400, sub.json())
msg = await conns.main.fetchval(
"""
select json_build_object('user_v', v, 'user_id', id)
from users where id=$1
""",
user_id,
)
if not msg:
raise JsonErrors.HTTPUnauthorized('user not found')
await _sub_post(conns, client_session, sub, user_id, msg)
async def unsubscribe(conns: Connections, sub: SubscriptionModel, user_id):
key = web_push_user_key_prefix(user_id) + sub.hash()
await conns.redis.delete(key)
async def web_push(ctx, actions_data: str):
settings: Settings = ctx['settings']
if not settings.vapid_private_key or not settings.vapid_sub_email:
return 'web push not configured'
session: ClientSession = ctx['client_session']
data = ujson.loads(actions_data)
participants = data.pop('participants')
# hack to avoid building json for every user, remove the ending "}" so extra json can be appended
msg_json_chunk = ujson.dumps(data)[:-1]
coros = [_user_web_push(ctx, session, p, msg_json_chunk) for p in participants]
pushes = await asyncio.gather(*coros)
return sum(pushes)
async def _user_web_push(ctx, session: ClientSession, participant: dict, msg_json_chunk: str):
user_id = participant['user_id']
match = web_push_user_key_prefix(user_id) + '*'
subs = []
redis: ArqRedis = ctx['redis']
with await redis as conn:
cur = b'0'
while cur:
cur, keys = await conn.scan(cur, match=match)
for key in keys:
subs.append(await conn.get(key))
if subs:
async with ctx['pg'].acquire() as conn:
conns = Connections(conn, redis, ctx['settings'])
participant['flags'] = await get_flag_counts(conns, user_id)
msg = msg_json_chunk + ',' + ujson.dumps(participant)[1:]
subs = [SubscriptionModel(**ujson.loads(s)) for s in subs]
await asyncio.gather(*[_sub_post(conns, session, s, user_id, msg) for s in subs])
return len(subs)
else:
return 0
async def _sub_post(conns: Connections, session: ClientSession, sub: SubscriptionModel, user_id: int, msg: str):
body = http_ece.encrypt(
msg.encode(),
private_key=ec.generate_private_key(ec.SECP256R1, default_backend()),
dh=_prepare_vapid_key(sub.keys.p256dh),
auth_secret=_prepare_vapid_key(sub.keys.auth),
version=vapid_encoding,
)
async with session.post(sub.endpoint, data=body, headers=_vapid_headers(sub, conns.settings)) as r:
text = await r.text()
if r.status == 410:
await unsubscribe(conns, sub, user_id)
elif r.status == 403 and text == 'invalid JWT provided\n':
# seems to happen with https://fcm.googleapis.com/fcm/send/...
await unsubscribe(conns, sub, user_id)
elif r.status != 201:
logger.error(
f'unexpected response from webpush %s: %s',
r.status,
repr(text[:100]),
extra={'headers': dict(r.headers), 'text': text, 'url': sub.endpoint},
)
raise RequestError(r.status, sub.endpoint, text=text)
vapid_encoding = 'aes128gcm'
aud_re = re.compile('https?://[^/]+')
def _vapid_headers(sub: SubscriptionModel, settings: Settings):
vapid_claims = {
'aud': aud_re.match(sub.endpoint).group(0),
'sub': 'mailto:' + settings.vapid_sub_email,
'ext': int(time.time()) + 300,
}
return {
'ttl': '60',
'content-encoding': vapid_encoding,
**Vapid.from_string(private_key=settings.vapid_private_key).sign(vapid_claims),
}
def _prepare_vapid_key(data: bytes) -> bytes:
return base64.urlsafe_b64decode(data + b'===='[: len(data) % 4])
| 33.350649 | 112 | 0.672702 | 681 | 5,136 | 4.9163 | 0.325991 | 0.030466 | 0.013142 | 0.016726 | 0.135603 | 0.096774 | 0.077061 | 0.061529 | 0.061529 | 0.037037 | 0 | 0.014353 | 0.213201 | 5,136 | 153 | 113 | 33.568627 | 0.814155 | 0.067757 | 0 | 0.035398 | 0 | 0 | 0.059455 | 0.005151 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035398 | false | 0 | 0.168142 | 0.026549 | 0.318584 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b677bba2bd9632d1c8e6d24dee8f3549fac2379b | 14,928 | py | Python | wrapper/utils.py | DaWeSearch/backend | 809e575ed730fce55d0e89a2fbc2031ba116f5e0 | [
"MIT"
] | 1 | 2021-02-15T01:05:22.000Z | 2021-02-15T01:05:22.000Z | wrapper/utils.py | DaWeSearch/backend | 809e575ed730fce55d0e89a2fbc2031ba116f5e0 | [
"MIT"
] | null | null | null | wrapper/utils.py | DaWeSearch/backend | 809e575ed730fce55d0e89a2fbc2031ba116f5e0 | [
"MIT"
] | null | null | null | """Helper functions useful for all wrapper classes."""
import re
from typing import Callable, Optional, Union
from urllib.parse import quote_plus
from requests import exceptions, Response
from .output_format import OUTPUT_FORMAT
def get(nest: Union[dict, list, str], *args, default=None):
"""Get a value in a nested mapping/iterable.
Args:
nest: The object that contains nested mappings and lists.
*args: The keys/indices.
default: The default value for when a key does not exist or an index is out of range.
The default value is `None`.
Returns:
The value at the end of the 'args-chain' in `nest` if all keys/indices
can be accessed.
`default` otherwise and when no args/nest is given.
Examples:
>>> utils.get({"foo": {"bar": [1,2,3]}}, "foo", "bar", 2)
3
>>> utils.get("foobar", 3)
'b'
>>> utils.get({"foo": [1,2,3]}, "bar", default=-1)
-1
>>> utils.get([1,2,3], 4, default=-1)
-1
>>> utils.get({"foo": {"bar": [1,2,3]}}, default=-1)
-1
"""
if not nest or not args:
return default
try:
for arg in args:
nest = nest[arg]
except (TypeError, IndexError, KeyError):
return default
else:
return nest
def build_group(items: [str], match: str, match_pad: str = " ", negater: str = "NOT ") -> str:
"""Build and return a search group by inserting <match> between each of the items.
Args:
items: List of items that should be connected.
match: The connection between the items. Has to be one of ["AND", "OR", "NOT"].
When using "NOT", the items are connected with "OR" and then negated.
match_pad: The padding characters around match.
negater: The characters that are used to negate a group.
Returns:
The created search group.
Raises:
ValueError: When given match is unknown.
Examples:
>>> print(build_group(["foo", "bar", "baz"], "AND", match_pad="_"))
(foo_AND_bar_AND_baz)
>>> print(build_group(["foo", "bar", "baz"], "NOT", negater="-"))
-(foo OR bar OR baz)
"""
if match not in ["AND", "OR", "NOT"]:
raise ValueError("Unknown match.")
group = "("
# connect with OR and negate group
if match == "NOT":
group = negater + group
match = "OR"
# Insert and combine
group += (match_pad + match + match_pad).join(items)
group += ")"
return group
def clean_output(out: dict, format_dict: dict = OUTPUT_FORMAT):
"""Delete undefined fields in the return JSON.
Args:
out: The returned JSON.
format_dict: Override the output format
"""
# NOTE: list() has to be used to avoid a:
# "RuntimeError: dictionary changed size during iteration"
for key in list(out.keys()):
if key not in format_dict.keys():
del out[key]
def invalid_output(
query: dict, db_query: Union[str, dict], api_key: str, error: str, start_record: int,
page_length: int) -> dict:
"""Create and return the output for a failed request.
Args:
query: The query in format as defined in wrapper/input_format.py.
db_query: The query that was sent to the API in its language.
api_key: The key used for the request.
error: The error message returned.
start_record: The index of the first record requested.
page_length: The page length requested.
Returns:
A dict containing the passed values and "-1" as index where necessary
to be compliant with wrapper/output_format.
"""
out = dict()
out["query"] = query
out["dbQuery"] = db_query
out["apiKey"] = api_key
out["error"] = error
out["result"] = {
"total": "-1",
"start": str(start_record),
"pageLength": str(page_length),
"recordsDisplayed": "0",
}
out["records"] = list()
return out
def request_error_handling(req_func: Callable[..., Response], req_kwargs: dict, max_retries: int,
invalid: dict) -> Optional[Response]:
"""Make an HTTP request and handle error that possibly occur.
Args:
req_func: The function that makes the HTTP request.
For example `requests.put`.
req_kwargs: The arguments that will be unpacked and passed to `req_func`.
invalid: A dictionary conforming to wrapper/output_format.py. It will be modified if an
error occurs ("error" field will be set).
Returns:
If no errors occur, the return of `req_func` will be returned. Otherwise `None` will be
returned and `invalid` modified.
"""
for i in range(max_retries + 1):
try:
response = req_func(**req_kwargs)
# Raise an HTTP error if there were any
response.raise_for_status()
except exceptions.HTTPError as err:
invalid["error"] = "HTTP error: " + str(err)
return None
except exceptions.ConnectionError as err:
invalid["error"] = "Connection error: Failed to establish a connection: " \
"Name or service not known."
return None
except exceptions.Timeout as err:
if i < max_retries:
# Try again
continue
# Too many failed attempts
invalid["error"] = "Connection error: Failed to establish a connection: Timeout."
return None
except exceptions.RequestException as err:
invalid["error"] = "Request error: " + str(err)
return None
# request successful
break
return response
def translate_get_query(query: dict, match_pad: str, negater: str, connector: str) -> str:
"""Translate a GET query.
Translate a query in format `wrapper/input_format.py` into a string that can
be used in the query part of the url of GET requests.
Args:
query: The query complying to `wrapper/input_format.py`. This is modified.
match_pad: The padding around the match values.
negater: The negater used for negating a search group.
conn: The connector between the different parameters.
Returns:
The translated query.
"""
# Deep copy is necessary here since we url encode the search terms
groups = query.get("search_groups", [])
for i in range(len(groups)):
if groups[i].get("match") == "NOT" and query["match"] == "OR":
raise ValueError("Only AND NOT supported.")
for j in range(len(groups[i].get("search_terms", []))):
term = groups[i].get("search_terms")[j]
# Enclose search term in quotes if it contains a space and is not
# quoted already to prevent splitting.
if " " in term:
if term[0] != '"':
term = '"' + term
if term[-1] != '"':
term += '"'
# Urlencode search term
groups[i].get("search_terms")[j] = quote_plus(term)
groups[i] = build_group(
groups[i].get("search_terms", []), groups[i].get("match"), match_pad, negater
)
search_terms = build_group(groups, query.get("match"), match_pad, negater)
query_str = ""
for field in query.get("fields") or []:
query_str += field + search_terms + connector
return query_str[:-len(connector)]
def build_get_query(params: dict, delim: str, connector: str) -> str:
"""Build a manual GET query from set parameters.
Build a string that can be used in the query part of the url of a GET
request from a dictionary containing the search parameters.
Args:
params: Dictionary of key, value pairs.
delim: Delimiter between key and value.
connector: Connector between different pairs.
Returns:
Built query.
"""
url = ""
for key, value in params.items():
# Enclose value in quotes if it contains a space and is not quoted
# already to prevent splitting.
if " " in value:
if value[0] != '"':
value = '"' + value
if value[-1] != '"':
value += '"'
# Url encode and add key value pair
url += key + delim + quote_plus(value) + connector
# Remove trailing connector and return
return url[:-len(connector)]
# List of stopwords bases on (added did)
# http://ir.dcs.gla.ac.uk/resources/linguistic_utils/stop_words
STOP_WORDS = [
'a', 'about', 'above', 'across', 'after', 'afterwards', 'again', 'against',
'all', 'almost', 'alone', 'along', 'already', 'also', 'although', 'always',
'am', 'among', 'amongst', 'amoungst', 'amount', 'an', 'and', 'another',
'any', 'anyhow', 'anyone', 'anything', 'anyway', 'anywhere', 'are',
'around', 'as', 'at', 'back', 'be', 'became', 'because', 'become',
'becomes', 'becoming', 'been', 'before', 'beforehand', 'behind', 'being',
'below', 'beside', 'besides', 'between', 'beyond', 'bill', 'both', 'bottom',
'but', 'by', 'call', 'can', 'cannot', 'cant', 'co', 'computer', 'con',
'could', 'couldnt', 'cry', 'de', 'describe', 'detail', 'did', 'do', 'done',
'down', 'due', 'during', 'each', 'eg', 'eight', 'either', 'eleven', 'else',
'elsewhere', 'empty', 'enough', 'etc', 'even', 'ever', 'every', 'everyone',
'everything', 'everywhere', 'except', 'few', 'fifteen', 'fify', 'fill',
'find', 'fire', 'first', 'five', 'for', 'former', 'formerly', 'forty',
'found', 'four', 'from', 'front', 'full', 'further', 'get', 'give', 'go',
'had', 'has', 'hasnt', 'have', 'he', 'hence', 'her', 'here', 'hereafter',
'hereby', 'herein', 'hereupon', 'hers', 'herself', 'him', 'himself', 'his',
'how', 'however', 'hundred', 'i', 'ie', 'if', 'in', 'inc', 'indeed',
'interest', 'into', 'is', 'it', 'its', 'itself', 'keep', 'last', 'latter',
'latterly', 'least', 'less', 'ltd', 'made', 'many', 'may', 'me',
'meanwhile', 'might', 'mill', 'mine', 'more', 'moreover', 'most', 'mostly',
'move', 'much', 'must', 'my', 'myself', 'name', 'namely', 'neither',
'never', 'nevertheless', 'next', 'nine', 'no', 'nobody', 'none', 'noone',
'nor', 'not', 'nothing', 'now', 'nowhere', 'of', 'off', 'often', 'on',
'once', 'one', 'only', 'onto', 'or', 'other', 'others', 'otherwise', 'our',
'ours', 'ourselves', 'out', 'over', 'own', 'part', 'per', 'perhaps',
'please', 'put', 'rather', 're', 'same', 'see', 'seem', 'seemed', 'seeming',
'seems', 'serious', 'several', 'she', 'should', 'show', 'side', 'since',
'sincere', 'six', 'sixty', 'so', 'some', 'somehow', 'someone', 'something',
'sometime', 'sometimes', 'somewhere', 'still', 'such', 'system', 'take',
'ten', 'than', 'that', 'the', 'their', 'them', 'themselves', 'then',
'thence', 'there', 'thereafter', 'thereby', 'therefore', 'therein',
'thereupon', 'these', 'they', 'thick', 'thin', 'third', 'this', 'those',
'though', 'three', 'through', 'throughout', 'thru', 'thus', 'to',
'together', 'too', 'top', 'toward', 'towards', 'twelve', 'twenty', 'two',
'un', 'under', 'until', 'up', 'upon', 'us', 'very', 'via', 'was', 'we',
'well', 'were', 'what', 'whatever', 'when', 'whence', 'whenever', 'where',
'whereafter', 'whereas', 'whereby', 'wherein', 'whereupon', 'wherever',
'whether', 'which', 'while', 'whither', 'who', 'whoever', 'whole', 'whom',
'whose', 'why', 'will', 'with', 'within', 'without', 'would', 'yet', 'you',
'your', 'yours', 'yourself', 'yourselves',
]
def into_keywords_format(keywords: dict) -> list:
"""Convert a dictionary of keyword, counter pairs into a list of dicts.
Args:
keywords: A dictionary that contains a counter for every keyword.
Returns:
The keywords in the format specified in wrapper/output_format.py.
"""
keywords_list = []
for word, count in keywords.items():
keywords_list.append({
"text": word,
"value": count,
})
return keywords_list
def from_keywords_format(keywords: list) -> dict:
"""Convert a list of keywords in a specific format into a dictionary.
Args:
keywords: A list in the format specified in wrapper/output_format.py
Returns:
The keywords as a dictionary with the keyword as key and its counter as
value.
"""
keywords_dict = {}
for keyword in keywords:
keywords_dict[keyword.get("text", "Unknown")] = keyword.get("value", 0)
return keywords_dict
def titles_to_keywords(titles: str) -> list:
"""Count words and format that data.
Args:
titles: A string containing all titles concatinated.
Returns:
A list in the format specified in ["facets"]["keywords"] in
wrapper.output_format.py
"""
# Delete everything except alphanumeric characters, digits and spaces,
# convert to lowercase and then split on spaces
pat = re.compile("[^a-zA-Z0-9 ]+")
words = pat.sub("", titles).lower().split(" ")
freqs = {}
for word in words:
# Kick out stop words
if word in STOP_WORDS:
continue
# Add to counter/init if new word
elif word not in freqs:
freqs[word] = 1
else:
freqs[word] += 1
# Convert into right format
return into_keywords_format(freqs)
def combine_facets(facets: [dict]):
"""Combine facets.
Combine the facet counters of different wrappers.
Args:
facets: List of the facets dictionaries.
NOTE: The first element will be modified!
Returns:
The combined facets.
"""
total = {
"countries": {},
"keywords": {},
}
# Save one iteration.
if len(facets) == 0:
return total
total["countries"] = get(facets, 0, "countries", default={})
total["keywords"] = from_keywords_format(get(facets, 0, "keywords", default=[]))
# Combine the rest.
for i in range(1, len(facets)):
if not isinstance(facets[i], dict):
continue
for category in facets[i]:
if category not in total:
continue
for facet in get(facets, i, category, default=[]):
if category == "countries":
key = facet
value = get(facets, i, category, facet, default=1)
elif category == "keywords":
# Bring in dict format
key, value = list(from_keywords_format([facet]).items())[0]
else:
continue
if key in total[category]:
total[category][key] += int(value)
else:
total[category][key] = int(value)
total["keywords"] = into_keywords_format(total["keywords"])
return total | 37.507538 | 97 | 0.579448 | 1,830 | 14,928 | 4.669945 | 0.319672 | 0.012637 | 0.007021 | 0.009829 | 0.100281 | 0.068804 | 0.063187 | 0.04938 | 0.04938 | 0.026445 | 0 | 0.003778 | 0.273044 | 14,928 | 398 | 98 | 37.507538 | 0.783727 | 0.351755 | 0 | 0.097436 | 0 | 0 | 0.234145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05641 | false | 0 | 0.025641 | 0 | 0.169231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6795fc3685731a886d5d284ea5740aaa1d445a0 | 1,179 | py | Python | serverdensity/proxy/runserver.py | serverdensity/sd-proxy | 3726b391e0e40258a3e58004568c9737898f4b01 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2016-08-12T17:49:23.000Z | 2016-08-12T17:49:23.000Z | serverdensity/proxy/runserver.py | serverdensity/sd-proxy | 3726b391e0e40258a3e58004568c9737898f4b01 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | serverdensity/proxy/runserver.py | serverdensity/sd-proxy | 3726b391e0e40258a3e58004568c9737898f4b01 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """Main WSGI server runner for sd-proxy
"""
import os
import logging
from sys import argv, path, stderr, exit
from gevent.wsgi import WSGIServer
class VersionedWSGIServer(WSGIServer):
def __init__(self, server_version, *args, **kwargs):
self.base_env['SERVER_SOFTWARE'] = server_version
super(VersionedWSGIServer, self).__init__(*args, **kwargs)
def run(app, port=8889, listener=None):
if listener is None:
listener = ('', port)
version = 'sd-proxy/%s' % (app._version,)
http_server = VersionedWSGIServer(version, listener, app)
http_server.serve_forever()
def main():
if len(argv) < 1:
print >> stderr, 'Please provide a path to your config file.'
return 1
os.environ['SD_PROXY_CONFIG'] = argv[1]
from serverdensity.proxy import settings, setup_logging
from serverdensity.proxy.app import app
setup_logging(app)
app.debug = settings.debug
app.logger.info('Starting sd-proxy on port %s..' % (settings.port,))
run(app, settings.port)
return 0
if __name__ == '__main__':
path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
exit(main())
| 24.061224 | 79 | 0.679389 | 155 | 1,179 | 4.96129 | 0.425806 | 0.036411 | 0.050715 | 0.039012 | 0.050715 | 0.050715 | 0 | 0 | 0 | 0 | 0 | 0.009504 | 0.196777 | 1,179 | 48 | 80 | 24.5625 | 0.802534 | 0.030534 | 0 | 0 | 0 | 0 | 0.106514 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.206897 | 0 | 0.413793 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b679e07e4853c1ac8702e21a433f7100a58636c7 | 1,553 | py | Python | dblp/python/citations.py | DocSeven/spark | a88330f554a4afc70696dac8d00bcf4d2f512acf | [
"Apache-2.0"
] | null | null | null | dblp/python/citations.py | DocSeven/spark | a88330f554a4afc70696dac8d00bcf4d2f512acf | [
"Apache-2.0"
] | null | null | null | dblp/python/citations.py | DocSeven/spark | a88330f554a4afc70696dac8d00bcf4d2f512acf | [
"Apache-2.0"
] | 1 | 2019-11-06T11:29:31.000Z | 2019-11-06T11:29:31.000Z | import citationsCommon
def countByIdAndYear(rdd):
docsplit = rdd.flatMap(lambda row:
[('{}.{}'.format(ref, row[2]), 1) for ref in row[1]])
return docsplit.reduceByKey(lambda c, d: c + d)
def joinIdYearAge(idYearCount, ddpairs):
# idYear: id, year cited
idYear = idYearCount.map(lambda row: (row[0][:-5], int(row[0][-4:])))
# ddpairs is expected to be: id, year published
# idYearAge: id, year cited - year published
return idYear.join(ddpairs).filter(lambda row: (row[1][0] - row[1][1] >= -2)).map(
lambda row: ('{}.{}'.format(row[0], row[1][0]), (row[1][0] - row[1][1])))
def citationCountArrays(idYearAge, idYearCount):
p2Afunc = citationsCommon.pairsToArrayHelper.pairsToArray
return idYearAge.join(idYearCount).map(
lambda row: (row[0][:-5], [(row[1][0], row[1][1])])).reduceByKey(
lambda c, d: c + d).mapValues(lambda x: p2Afunc(x))
# df is the dataframe read from json before we've filtered out rows where
# references is NULL
# partitionCount says how many partitions to coalesce the intermediate
# data to.
def citationCountsE2E(df, partitionCount=34):
dd = df.select("id", "references", "year").filter("references is not NULL").rdd
idYearCount = countByIdAndYear(dd)
# For publication dates, include publications with no references.
idYearAge = joinIdYearAge(idYearCount, df.select("id", "year").rdd)
citCountArrays = citationCountArrays(idYearAge.coalesce(partitionCount),
idYearCount)
return citCountArrays
| 41.972973 | 86 | 0.666452 | 198 | 1,553 | 5.227273 | 0.40404 | 0.030918 | 0.024155 | 0.030918 | 0.129469 | 0.129469 | 0.054106 | 0 | 0 | 0 | 0 | 0.023734 | 0.186091 | 1,553 | 36 | 87 | 43.138889 | 0.795095 | 0.221507 | 0 | 0 | 0 | 0 | 0.045075 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.190476 | false | 0 | 0.047619 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b67a5113b21316f83812bfead9269a89744903e8 | 8,727 | py | Python | src/ext/cstruct.py | X-EcutiOnner/fileobj | 7e4120759450bbdd1eee4ec26c8a757a8af48093 | [
"BSD-2-Clause"
] | 17 | 2015-05-23T11:09:46.000Z | 2021-12-10T14:28:01.000Z | src/ext/cstruct.py | X-EcutiOnner/fileobj | 7e4120759450bbdd1eee4ec26c8a757a8af48093 | [
"BSD-2-Clause"
] | 3 | 2015-03-23T04:35:25.000Z | 2017-09-15T07:12:15.000Z | src/ext/cstruct.py | X-EcutiOnner/fileobj | 7e4120759450bbdd1eee4ec26c8a757a8af48093 | [
"BSD-2-Clause"
] | 2 | 2016-01-07T00:38:13.000Z | 2020-12-02T08:27:28.000Z | # Copyright (c) 2009, Tomohiro Kusumi
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division
import os
import re
import sys
from .. import extension
from .. import filebytes
from .. import kernel
from .. import libc
from .. import path
from .. import screen
from .. import setting
from .. import util
def I(x):
return ' ' * 4 * x
class _node (object):
def __init__(self, type):
self.type = type
def get_size(self):
return 0
def get_repr(self, buf, name, indent):
return []
class _builtin (_node):
def __init__(self):
super(_builtin, self).__init__(util.get_class_name(self))
def get_repr(self, buf, name, indent):
s = "{0}{1} {2};".format(I(indent), self.type, name)
if len(buf) == self.get_size():
v = self.__get_value_expr(buf)
a = ''.join(["\\x{0:02X}".format(x) for x in
filebytes.iter_ords(buf)])
b = ''.join([screen.chr_repr[x] for x in filebytes.iter_ords(buf)])
s += " {0} {1} [{2}]".format(v, a, b)
return [s]
def __get_value_expr(self, buf):
n = self.to_int(buf)
m = _builtin_xtype_regex.match(self.type)
if m:
siz = builtin_int(m.group(1))
siz //= 4 # string size in hex
fmt = "0x{0:0" + str(siz) + "X}"
return fmt.format(n)
else:
return str(n)
_toplevel_regex = re.compile(r"\s*struct\s+(\S+)\s*{([\s\S]+?)}\s*;")
_struct_member_regex = re.compile(r"^(\S+)\[([0-9]+)\]$")
_builtin_type_regex = re.compile(r"^(u|s|x)(8|16|32|64)(le|be)$")
_builtin_xtype_regex = re.compile(r"^x(8|16|32|64)") # only to detect x
# XXX
# This is necessary as this module uses int()
# while __create_builtin_class() overwrites int.
builtin_int = util.get_builtin("int")
_classes = []
def __create_builtin_class(name, size):
def get_size(self):
return size
sign = (name[0] == 's')
m = _builtin_type_regex.match(name)
if not m:
def to_int(self, b):
return util.host_to_int(b, sign)
elif m.group(3) == "le":
def to_int(self, b):
return util.le_to_int(b, sign)
elif m.group(3) == "be":
def to_int(self, b):
return util.be_to_int(b, sign)
else:
assert False, m.group(0)
cls = type(name, (_builtin,), dict(get_size=get_size, to_int=to_int,),)
assert cls not in _classes
_classes.append(cls)
setattr(sys.modules[__name__], name, cls)
def __init_class():
for x in util.get_xrange(4):
size = 2 ** x
for sign in "usx":
for suffix in ("", "le", "be"):
name = "{0}{1}{2}".format(sign, size * 8, suffix)
__create_builtin_class(name, size)
for name, func_name, fn in libc.iter_defined_type():
__create_builtin_class(name, fn())
# A node for this class can't be added on import
class _string (_node):
def __init__(self, size):
self.__size = size
super(_string, self).__init__(_string_type(self.__size))
def get_size(self):
return self.__size
def get_repr(self, buf, name, indent):
i = buf.find(filebytes.ZERO)
b = filebytes.str(buf[:i])
s = "{0}string {1}; \"{2}\"".format(I(indent), name, b)
return [s]
def _string_type(n):
return "string{0}".format(n)
class _struct (_node):
def __init__(self, type, defs):
super(_struct, self).__init__(type)
self.__member = []
for type, name in self.__iter_member(defs):
o = get_node(type)
if not o:
extension.fail(type + " not defined yet")
self.__member.append((o, name))
def get_size(self):
return sum(_[0].get_size() for _ in self.__member)
def get_repr(self, buf, name, indent):
l = ["{0}struct {1} {{".format(I(indent), self.type)]
for _ in self.__member:
n = _[0].get_size()
l.extend(_[0].get_repr(buf[:n], _[1], indent+1))
buf = buf[n:]
x = " " + name
l.append("{0}}}{1};".format(I(indent), x.rstrip()))
return l
def __iter_member(self, defs):
for s in [x.strip() for x in defs.split(';')]:
l = s.split()
if l:
if l[0] == "struct":
l = l[1:]
if len(l) != 2:
extension.fail("Invalid syntax: {0}".format(l))
type, name = l
if type == "string":
yield self.__scan_string_type(type, name)
else:
# anything but string, including struct
m = _struct_member_regex.match(name)
if m:
var = m.group(1)
num = builtin_int(m.group(2))
for i in util.get_xrange(num):
yield type, "{0}[{1}]".format(var, i)
else:
yield type, name
def __scan_string_type(self, type, name):
m = _struct_member_regex.match(name)
if m:
var = m.group(1)
num = builtin_int(m.group(2))
else:
var = name
num = 1 # force "[1]"
type = _string_type(num)
if not get_node(type):
add_node(_string(num))
return type, "{0}[{1}]".format(var, num)
_nodes = []
def init_node():
global _nodes
_nodes = [cls() for cls in _classes]
def get_node(s):
for o in _nodes:
if o.type == s:
return o
def add_node(o):
while True:
x = get_node(o.type)
if x:
del _nodes[_nodes.index(x)]
else:
_nodes.append(o)
break
def get_text(co, fo, args):
pos = args.pop()
if not args:
return "No struct name"
f = path.get_path(args[0])
if os.path.exists(f):
args = args[1:]
if not args:
return "No struct name"
else:
f = setting.get_ext_path("cstruct")
if path.is_noent(f):
return "Need {0} with struct definition".format(f)
if not os.path.isfile(f):
return "Can not read " + f
try:
l = kernel.fopen_text(f).readlines()
except Exception as e:
return str(e)
l = [x.strip() for x in l] # strip whitespaces and tabs first
l = [x for x in l if not x.startswith('#')] # then ignore comments
s = ''.join([x for x in l if x])
s = re.sub(r"\s{1,}", ' ', s)
init_node()
while True:
m = _toplevel_regex.match(s)
if m:
s = s[m.end():]
add_node(_struct(*m.groups()))
else:
break
l = []
for x in args:
o = get_node(x)
if o:
buf = fo.read(pos, o.get_size())
l.extend(o.get_repr(buf, '', 0))
else:
l.append("struct {0} is not defined in {1}".format(x, f))
l.append('')
return l
def init():
setting.ext_add_name("path_cstruct", "cstruct",
"Set configuration file path for :cstruct. "
"Defaults to ~/.fileobj/cstruct if undefined.")
__init_class()
# create an empty file
f = setting.get_ext_path("cstruct")
if not os.path.exists(f):
try:
kernel.fcreat_text(f)
except Exception:
pass # ignore
def cleanup():
setting.ext_delete("path_cstruct")
init()
| 31.619565 | 81 | 0.569153 | 1,230 | 8,727 | 3.857724 | 0.229268 | 0.013909 | 0.010116 | 0.011802 | 0.214963 | 0.148577 | 0.134247 | 0.074183 | 0.053952 | 0.053952 | 0 | 0.013489 | 0.303426 | 8,727 | 275 | 82 | 31.734545 | 0.767067 | 0.184141 | 0 | 0.214953 | 0 | 0 | 0.073818 | 0.009033 | 0 | 0 | 0 | 0 | 0.009346 | 1 | 0.130841 | false | 0.004673 | 0.056075 | 0.046729 | 0.313084 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b67bd574fe892b817e3e205848dedd222bd9824b | 1,493 | py | Python | agent/nets/GraphConvNet.py | JosepLeder/RL-Graph-Matching | 5ea6b3beaf2a2f8d3739f64e7172a566d59d5468 | [
"MIT"
] | null | null | null | agent/nets/GraphConvNet.py | JosepLeder/RL-Graph-Matching | 5ea6b3beaf2a2f8d3739f64e7172a566d59d5468 | [
"MIT"
] | null | null | null | agent/nets/GraphConvNet.py | JosepLeder/RL-Graph-Matching | 5ea6b3beaf2a2f8d3739f64e7172a566d59d5468 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_mean
from torch.nn import Linear, ReLU
from torch_geometric.nn import GraphConv
class GraphConvNet(nn.Module):
def __init__(self, n_feat, n_hid, n_out):
super(GraphConvNet).__init__()
self.conv1 = GraphConv(n_feat, n_hid)
self.conv2 = GraphConv(n_hid, n_hid * 2)
self.conv3 = GraphConv(n_hid * 2, n_out)
def forward(self, data):
data.x = F.elu(self.conv1(data.x, data.edge_index))
data.x = F.elu(self.conv2(data.x, data.edge_index))
data.x = F.elu(self.conv3(data.x, data.edge_index))
x_1 = scatter_mean(data.x, data.batch, dim=0)
x = x_1
return x
class DoubleGraphConvNet(nn.Module):
def __init__(self, graph, subgraph, point):
super(DoubleGraphConvNet).__init__()
self.graph_conv = GraphConvNet(graph.n_feat, graph.n_feat * 2, graph.n_feat * 3)
self.subgraph_conv = GraphConvNet(subgraph.n_feat, subgraph.n_feat * 2, subgraph.n_feat * 3)
self.l1 = Linear(graph.n_feat * 3 + subgraph.n_feat * 3 + point, 600)
self.l2 = Linear(600, 256)
self.l3 = Linear(256, graph.n_feat)
def forward(self, graph, subgraph, point):
x1 = self.graph_conv(graph)
x2 = self.subgraph_conv(subgraph)
x = torch.cat([x1, x2, point])
x = ReLU(self.l1(x))
x = ReLU(self.l2(x))
x = self.l3(x)
return x
| 28.169811 | 100 | 0.636303 | 228 | 1,493 | 3.960526 | 0.232456 | 0.060908 | 0.055371 | 0.0299 | 0.145072 | 0.06866 | 0.06866 | 0.06866 | 0.06866 | 0.06866 | 0 | 0.034483 | 0.242465 | 1,493 | 52 | 101 | 28.711538 | 0.763926 | 0 | 0 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.171429 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b67e4db8f00b585b7a13ade5c1abf4013518f113 | 7,437 | py | Python | skoleintra/__init__.py | jona799t/skoleintra-api | b6a73aa50b53415aba7aa2249c45771b931992a9 | [
"Apache-2.0"
] | null | null | null | skoleintra/__init__.py | jona799t/skoleintra-api | b6a73aa50b53415aba7aa2249c45771b931992a9 | [
"Apache-2.0"
] | null | null | null | skoleintra/__init__.py | jona799t/skoleintra-api | b6a73aa50b53415aba7aa2249c45771b931992a9 | [
"Apache-2.0"
] | null | null | null | import json
import httpx
import requests
import urllib
import ssl
from urllib3 import poolmanager
from bs4 import BeautifulSoup
from unilogin import Unilogin
class TLSAdapter(requests.adapters.HTTPAdapter): #https://stackoverflow.com/questions/61631955/python-requests-ssl-error-during-requests
def init_poolmanager(self, connections, maxsize, block=False):
"""Create and initialize the urllib3 PoolManager."""
ctx = ssl.create_default_context()
ctx.set_ciphers('DEFAULT@SECLEVEL=1')
self.poolmanager = poolmanager.PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLS,
ssl_context=ctx)
class Skoleintra:
def __init__(self, url, type="elev", brugernavn="", adgangskode=""):
self.success = False
self.session = requests.session()
self.session.mount('https://', TLSAdapter())
self.uniloginClient = Unilogin(brugernavn=brugernavn, adgangskode=adgangskode)
self.defaultHeaders = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36"
}
if url[-1] == "/":
url = url[:-1]
if "https://" not in url and "http://" not in url:
url = "https://" + url
baseUrl = url.split("://")[1].split("/")[0]
if type == "elev":
url = f"{url}/Account/IdpLogin?role=Student&partnerSp=urn%3Aitslearning%3Ansi%3Asaml%3A2.0%3A{baseUrl}"
resp = self.session.get(url, headers=self.defaultHeaders, allow_redirects=False)
cookies = {"Pool": resp.cookies["Pool"], "SsoSessionId": resp.cookies["SsoSessionId"], "__RequestVerificationToken": resp.cookies["__RequestVerificationToken"]} #, "HasPendingSSO": resp.cookies["HasPendingSSO"]
href = f"https://{baseUrl}" + BeautifulSoup(resp.text, 'html.parser').find("a", {"class": "ccl-button sk-button-light-green sk-font-icon sk-button-text-only sk-uni-login-button"}).get("href")
headers = self.defaultHeaders
headers["cookie"] = f"Pool={cookies['Pool']}; SsoSessionId={cookies['SsoSessionId']}; __RequestVerificationToken={cookies['__RequestVerificationToken']}"
resp = self.session.get(href, headers=headers, allow_redirects=False)
location = resp.headers["location"]
authUrl = self.uniloginClient.login(href=location, referer=baseUrl)
resp = self.session.get(authUrl, headers=self.defaultHeaders, allow_redirects=False)
cookies["SsoSelectedSchool"] = resp.cookies["SsoSelectedSchool"]
cookies["UserRole"] = resp.cookies["UserRole"]
cookies["Language"] = resp.cookies["Language"]
cookies[".AspNet.SSO.ApplicationCookie"] = resp.cookies[".AspNet.SSO.ApplicationCookie"]
location = resp.headers["location"]
headers = self.defaultHeaders
headers["cookie"] = f"SsoSelectedSchool={cookies['SsoSelectedSchool']}; Language={cookies['Language']}; .AspNet.SSO.ApplicationCookie={cookies['.AspNet.SSO.ApplicationCookie']}"
resp = self.session.get(location, headers=headers, allow_redirects=False)
html = BeautifulSoup(resp.text, 'html.parser')
href = html.find('form').get('action')
samlResponse = [html.find("input", {"name": "SAMLResponse"}).get("name"), html.find("input", {"name": "SAMLResponse"}).get("value")]
replayState = [html.find("input", {"name": "RelayState"}).get("name"), html.find("input", {"name": "RelayState"}).get("value")]
payload = f"{samlResponse[0]}={urllib.parse.quote_plus(samlResponse[1])}&{replayState[0]}={urllib.parse.quote_plus(replayState[1])}"
headers = self.defaultHeaders
headers["content-length"] = str(len(payload))
headers["content-type"] = "application/x-www-form-urlencoded"
headers["cookie"] = f"Pool={cookies['Pool']}; SsoSessionId={cookies['SsoSessionId']}; __RequestVerificationToken={cookies['__RequestVerificationToken']}; SsoSelectedSchool={cookies['SsoSelectedSchool']}; UserRole={cookies['UserRole']}; Language={cookies['Language']}; .AspNet.SSO.ApplicationCookie={cookies['.AspNet.SSO.ApplicationCookie']}"
resp = self.session.post(href, headers=headers, data=payload, allow_redirects=False)
cookies[".AspNet.ApplicationCookie"] = resp.cookies[".AspNet.ApplicationCookie"]
self.cookies = cookies
self.success = True
def getWeeklyplans(self, week, year):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"cookie": f"Pool={self.cookies['Pool']}; SsoSessionId={self.cookies['SsoSessionId']}; __RequestVerificationToken={self.cookies['__RequestVerificationToken']}; SsoSelectedSchool={self.cookies['SsoSelectedSchool']}; UserRole={self.cookies['UserRole']}; Language={self.cookies['Language']}; .AspNet.SSO.ApplicationCookie={self.cookies['.AspNet.SSO.ApplicationCookie']}; .AspNet.ApplicationCookie={self.cookies['.AspNet.ApplicationCookie']}",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
resp = self.session.get(f"https://{self.cookies['SsoSelectedSchool']}/student/weeklyplans/list/item/class/{week}-{year}", headers=headers)
weeklyplan = json.loads(BeautifulSoup(resp.text, 'html.parser').find("div", {"id": "root"}).get("data-clientlogic-settings-weeklyplansapp"))
return weeklyplan
async def getWeeklyplansAsync(self, week, year):
if len(str(week)) == 1:
week = f"0{week}"
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "da-DK,da;q=0.9,en-US;q=0.8,en;q=0.7",
"cookie": f"Pool={self.cookies['Pool']}; SsoSessionId={self.cookies['SsoSessionId']}; __RequestVerificationToken={self.cookies['__RequestVerificationToken']}; SsoSelectedSchool={self.cookies['SsoSelectedSchool']}; UserRole={self.cookies['UserRole']}; Language={self.cookies['Language']}; .AspNet.SSO.ApplicationCookie={self.cookies['.AspNet.SSO.ApplicationCookie']}; .AspNet.ApplicationCookie={self.cookies['.AspNet.ApplicationCookie']}",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
}
async with httpx.AsyncClient() as client:
resp = await client.get(f"https://{self.cookies['SsoSelectedSchool']}/student/weeklyplans/list/item/class/{week}-{year}", headers=headers)
weeklyplan = json.loads(BeautifulSoup(resp.text, 'html.parser').find("div", {"id": "root"}).get("data-clientlogic-settings-weeklyplansapp"))
return weeklyplan | 67.609091 | 450 | 0.671238 | 856 | 7,437 | 5.785047 | 0.242991 | 0.042205 | 0.052504 | 0.039984 | 0.576333 | 0.52706 | 0.480412 | 0.452544 | 0.452544 | 0.452544 | 0 | 0.024204 | 0.155573 | 7,437 | 110 | 451 | 67.609091 | 0.764331 | 0.024338 | 0 | 0.26087 | 0 | 0.195652 | 0.506897 | 0.336966 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032609 | false | 0 | 0.086957 | 0 | 0.163043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b67e8c2b0c5ddd808117b7d17c7cf6d08076154f | 1,433 | py | Python | commands/leaderboad.py | classAndrew/valor | b68a72b76c111e22d8df8d56a2923185f057fc2a | [
"MIT"
] | null | null | null | commands/leaderboad.py | classAndrew/valor | b68a72b76c111e22d8df8d56a2923185f057fc2a | [
"MIT"
] | null | null | null | commands/leaderboad.py | classAndrew/valor | b68a72b76c111e22d8df8d56a2923185f057fc2a | [
"MIT"
] | 1 | 2021-11-28T00:45:25.000Z | 2021-11-28T00:45:25.000Z | from valor import Valor
from discord.ext.commands import Context
from util import ErrorEmbed, LongTextEmbed, LongFieldEmbed, guild_name_from_tag
import random
from datetime import datetime
import requests
from sql import ValorSQL
from commands.common import get_uuid, from_uuid
async def _register_leaderboard(valor: Valor):
desc = "The leaderboard"
@valor.command()
async def leaderboard(ctx: Context):
res = await ValorSQL._execute("SELECT uuid_name.name, uuid_name.uuid, player_stats.galleons_graveyard FROM player_stats LEFT JOIN uuid_name ON uuid_name.uuid=player_stats.uuid ORDER BY galleons_graveyard DESC LIMIT 50")
stats = []
for m in res:
if not m[0] and m[1]:
stats.append((await from_uuid(m[1]), m[2]))
else:
stats.append((m[0] if m[0] else "can't find name", m[2]))
table = "```\n"+'\n'.join("%3d. %24s %5d" % (i+1, stats[i][0], stats[i][1]) for i in range(len(stats)))+"\n```"
await LongTextEmbed.send_message(valor, ctx, "Galleon's Graveyard", content=table, color=0x11FFBB)
@leaderboard.error
async def cmd_error(ctx, error: Exception):
await ctx.send(embed=ErrorEmbed())
raise error
@valor.help_override.command()
async def leaderboard(ctx: Context):
await LongTextEmbed.send_message(valor, ctx, "Leaderboard", desc, color=0xFF00)
| 37.710526 | 227 | 0.667132 | 198 | 1,433 | 4.717172 | 0.414141 | 0.034261 | 0.03212 | 0.055675 | 0.205567 | 0.156317 | 0 | 0 | 0 | 0 | 0 | 0.019678 | 0.219819 | 1,433 | 38 | 228 | 37.710526 | 0.815742 | 0 | 0 | 0.071429 | 0 | 0.035714 | 0.188982 | 0.043933 | 0.035714 | 0 | 0.009763 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6810fce8cb140c6eacc9841d12f9fa404ed0bac | 3,388 | py | Python | game/alfa_beta.py | YuseqYaseq/gry-kombinatoryczne | 15e5d857cdbc6ec447c0028a90c4354ba25fc553 | [
"MIT"
] | 2 | 2020-04-26T16:57:37.000Z | 2020-04-26T16:57:40.000Z | game/alfa_beta.py | YuseqYaseq/gry-kombinatoryczne | 15e5d857cdbc6ec447c0028a90c4354ba25fc553 | [
"MIT"
] | null | null | null | game/alfa_beta.py | YuseqYaseq/gry-kombinatoryczne | 15e5d857cdbc6ec447c0028a90c4354ba25fc553 | [
"MIT"
] | null | null | null | from game.sequence import Sequence
max_value = 999888777666555
class AlfaBeta:
def __init__(self, values, state, k, player, enemy, max_deepth = None):
self.values = values
self.state = state
self.k = k
self.player = player
self.enemy = enemy
if max_deepth is None:
self.max_deepth = k
else:
self.max_deepth = max_deepth
def get_move(self):
alfa = float('-inf')
beta = float('inf')
move = None
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = self.player
child_alfa = self.alfa_beta(self.max_deepth - 1, alfa, beta, self.enemy)
if alfa < child_alfa:
alfa = child_alfa
move = i
self.state[i] = 0
if alfa >= beta:
break
return move
def alfa_beta(self, deepth, alfa, beta, current_player):
terminal_value = self.calculate_terminal_node_value(current_player)
if terminal_value is not None:
return terminal_value
if deepth == 0:
return self.evaluate_node(current_player)
if current_player != self.player:
return self.enemy_visits_children(deepth, alfa, beta, current_player)
else:
return self.visit_children(deepth, alfa, beta, current_player)
def visit_children(self, deepth, alfa, beta, current_player):
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = current_player
child_alfa = self.alfa_beta(deepth - 1, alfa, beta, self.enemy)
alfa = max(alfa, child_alfa)
self.state[i] = 0
if alfa >= beta:
break
return alfa
def enemy_visits_children(self, deepth, alfa, beta, current_player):
for i in range(0, len(self.state)):
if self.state[i] == 0:
self.state[i] = current_player
child_beta = self.alfa_beta(deepth - 1, alfa, beta, self.player)
beta = min(beta, child_beta)
self.state[i] = 0
if alfa >= beta:
break
return beta
def evaluate_node(self, current_player):
sequence = self.create_evalute_sequence(current_player)
value = sequence.evaluate()
if current_player == self.player:
return value
else:
return -value
def calculate_terminal_node_value(self, current_player):
sequence = self.create_terminal_sequence(current_player)
is_term = sequence.is_term()
if is_term:
if current_player == self.player:
return max_value
else:
return -max_value
return None
def create_terminal_sequence(self, player):
return self.create_sequence(lambda el: el == player)
def create_evalute_sequence(self, player):
return self.create_sequence(lambda el: el == player or el == 0)
def create_sequence(self, element_pred):
elements = []
for i in range(0, len(self.state)):
if element_pred(self.state[i]):
elements.append(self.values[i])
return Sequence(elements, self.k)
| 30.8 | 88 | 0.563164 | 406 | 3,388 | 4.517241 | 0.142857 | 0.113413 | 0.054526 | 0.035987 | 0.47928 | 0.47928 | 0.302617 | 0.302617 | 0.268811 | 0.20229 | 0 | 0.013618 | 0.349764 | 3,388 | 109 | 89 | 31.082569 | 0.818883 | 0 | 0 | 0.285714 | 0 | 0 | 0.002066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.119048 | false | 0 | 0.011905 | 0.02381 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68116a66a4de7ee10965623e6d1922379799cea | 4,620 | py | Python | app.py | j33mk/randomnews | 705ecde11f8097a8348a9abae384bd09031cb2ca | [
"Apache-2.0"
] | null | null | null | app.py | j33mk/randomnews | 705ecde11f8097a8348a9abae384bd09031cb2ca | [
"Apache-2.0"
] | null | null | null | app.py | j33mk/randomnews | 705ecde11f8097a8348a9abae384bd09031cb2ca | [
"Apache-2.0"
] | null | null | null | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request,jsonify
import random
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
import os
import requests
from bs4 import BeautifulSoup
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
# @app.route('/')
# def home():
# return render_template('pages/placeholder.home.html')
@app.route('/')
def news():
return render_template('pages/placeholder.news.html')
@app.route('/randomnews')
def randomnews():
html = requests.get('http://www.dawn.com')
soup = BeautifulSoup(html.text, 'html5lib')
h2 = soup.find_all('h2', {'data-layout': 'story'})
news = []
for link in h2:
mylink = BeautifulSoup(str(link), 'html.parser')
gettinglink = mylink.find('a', href=True)
newsarray = []
newsarray.append(str(gettinglink.find(text=True)))
newsarray.append(str(gettinglink['href']))
news.append(newsarray)
response = jsonify({
'data':random.choice(news),
'status':'awesome'
})
response.headers.add('Access-Control-Allow-Origin','*')
return response,200
@app.route('/fortune', methods=['GET'])
def fortune():
fortunes = [
'A feather in the hand is better than a bird in the air. ',
'A golden egg of opportunity falls into your lap this month.',
'Bide your time, for success is near.',
'Curiosity kills boredom. Nothing can kill curiosity.',
'Disbelief destroys the magic.',
'Dont just spend time. Invest it.',
'Every wise man started out by asking many questions.',
'Fortune Not Found: Abort, Retry, Ignore?',
'Good to begin well, better to end well.',
'How many of you believe in psycho-kinesis? Raise my hand.',
'Imagination rules the world.',
'Keep your face to the sunshine and you will never see shadows.',
'Listen to everyone. Ideas come from everywhere.',
'Man is born to live and not prepared to live.',
'No one can walk backwards into the future.',
'One of the first things you should look for in a problem is its positive side.',
'Pick battles big enough to matter, small enough to win.',
'Remember the birthday but never the age.',
'Success is failure turned inside out.',
'The harder you work, the luckier you get.',
'Use your eloquence where it will do the most good.',
'What is hidden in an empty box?',
'Your reputation is your wealth.'
]
response = jsonify({
'data':random.choice(fortunes),
'status':'awesome'
})
response.headers.add('Access-Control-Allow-Origin', '*')
return response,200
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
| 32.083333 | 88 | 0.5671 | 521 | 4,620 | 4.950096 | 0.493282 | 0.027142 | 0.03102 | 0.019387 | 0.107794 | 0.055836 | 0.055836 | 0.055836 | 0.055836 | 0.055836 | 0 | 0.009814 | 0.183983 | 4,620 | 143 | 89 | 32.307692 | 0.674271 | 0.20303 | 0 | 0.123457 | 0 | 0.012346 | 0.431868 | 0.033196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.08642 | 0.037037 | 0.209877 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6833905716414b7fc7d6f77dff5aa4d0c6d4d14 | 9,836 | py | Python | scripts/dfuse-pack.py | qiuchengxuan/rs-flight | 66a09afe4e24f8b49c6445f9048172e46e6a0f03 | [
"MIT"
] | 1 | 2020-09-01T08:49:24.000Z | 2020-09-01T08:49:24.000Z | scripts/dfuse-pack.py | qiuchengxuan/rs-flight | 66a09afe4e24f8b49c6445f9048172e46e6a0f03 | [
"MIT"
] | null | null | null | scripts/dfuse-pack.py | qiuchengxuan/rs-flight | 66a09afe4e24f8b49c6445f9048172e46e6a0f03 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Written by Antonio Galea - 2010/11/18
# Distributed under Gnu LGPL 3.0
# see http://www.gnu.org/licenses/lgpl-3.0.txt
import binascii
import os
import struct
import sys
import zlib
from optparse import OptionParser
try:
from intelhex import IntelHex
except ImportError:
IntelHex = None
DEFAULT_DEVICE = "0x0483:0xdf11"
DEFAULT_NAME = b'ST...'
def named(tuple, names):
return dict(list(zip(names.split(), tuple)))
def consume(fmt, data, names):
n = struct.calcsize(fmt)
return named(struct.unpack(fmt, data[:n]), names), data[n:]
def cstring(bytestring):
return bytestring.partition(b'\0')[0]
def compute_crc(data):
return 0xFFFFFFFF & -zlib.crc32(data) - 1
def parse(file, dump_images=False):
print('File: "%s"' % file)
data = open(file, 'rb').read()
crc = compute_crc(data[:-4])
prefix, data = consume('<5sBIB', data, 'signature version size targets')
print('%(signature)s v%(version)d, image size: %(size)d, targets: %(targets)d' % prefix)
for t in range(prefix['targets']):
tprefix, data = consume(
'<6sBI255s2I', data, 'signature altsetting named name size elements'
)
tprefix['num'] = t
if tprefix['named']:
tprefix['name'] = cstring(tprefix['name'])
else:
tprefix['name'] = ''
print(
'%(signature)s %(num)d, alt setting: %(altsetting)s, name: "%(name)s", size: %(size)d, elements: %(elements)d'
% tprefix
)
tsize = tprefix['size']
target, data = data[:tsize], data[tsize:]
for e in range(tprefix['elements']):
eprefix, target = consume('<2I', target, 'address size')
eprefix['num'] = e
print(' %(num)d, address: 0x%(address)08x, size: %(size)d' % eprefix)
esize = eprefix['size']
image, target = target[:esize], target[esize:]
if dump_images:
out = '%s.target%d.image%d.bin' % (file, t, e)
open(out, 'wb').write(image)
print(' DUMPED IMAGE TO "%s"' % out)
if len(target):
print("target %d: PARSE ERROR" % t)
suffix = named(struct.unpack('<4H3sBI', data[:16]), 'device product vendor dfu ufd len crc')
print(
'usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x'
% suffix
)
if crc != suffix['crc']:
print("CRC ERROR: computed crc32 is 0x%08x" % crc)
data = data[16:]
if data:
print("PARSE ERROR")
def checkbin(binfile):
data = open(binfile, 'rb').read()
if (len(data) < 16):
return
crc = compute_crc(data[:-4])
suffix = named(struct.unpack('<4H3sBI', data[-16:]), 'device product vendor dfu ufd len crc')
if crc == suffix['crc'] and suffix['ufd'] == b'UFD':
print(
'usb: %(vendor)04x:%(product)04x, device: 0x%(device)04x, dfu: 0x%(dfu)04x, %(ufd)s, %(len)d, 0x%(crc)08x'
% suffix
)
print("It looks like the file %s has a DFU suffix!" % binfile)
print("Please remove any DFU suffix and retry.")
sys.exit(1)
def build(file, targets, name=DEFAULT_NAME, device=DEFAULT_DEVICE):
data = b''
for t, target in enumerate(targets):
tdata = b''
for image in target:
tdata += struct.pack('<2I', image['address'], len(image['data'])) + image['data']
tdata = struct.pack('<6sBI255s2I', b'Target', 0, 1, name, len(tdata), len(target)) + tdata
data += tdata
data = struct.pack('<5sBIB', b'DfuSe', 1, len(data) + 11, len(targets)) + data
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
data += struct.pack('<4H3sB', 0, d, v, 0x011a, b'UFD', 16)
crc = compute_crc(data)
data += struct.pack('<I', crc)
open(file, 'wb').write(data)
if __name__ == "__main__":
usage = """
%prog [-d|--dump] infile.dfu
%prog {-b|--build} address:file.bin [-b address:file.bin ...] [{-D|--device}=vendor:device] outfile.dfu
%prog {-s|--build-s19} file.s19 [{-D|--device}=vendor:device] outfile.dfu
%prog {-i|--build-ihex} file.hex [-i file.hex ...] [{-D|--device}=vendor:device] outfile.dfu"""
parser = OptionParser(usage=usage)
parser.add_option(
"-b",
"--build",
action="append",
dest="binfiles",
help=
"build a DFU file from given BINFILES. Note that the BINFILES must not have any DFU suffix!",
metavar="BINFILES"
)
parser.add_option(
"-i",
"--build-ihex",
action="append",
dest="hexfiles",
help="build a DFU file from given Intel HEX HEXFILES",
metavar="HEXFILES"
)
parser.add_option(
"-s",
"--build-s19",
type="string",
dest="s19files",
help="build a DFU file from given S19 S-record S19FILE",
metavar="S19FILE"
)
parser.add_option(
"-D",
"--device",
action="store",
dest="device",
help="build for DEVICE, defaults to %s" % DEFAULT_DEVICE,
metavar="DEVICE"
)
parser.add_option(
"-d",
"--dump",
action="store_true",
dest="dump_images",
default=False,
help="dump contained images to current directory"
)
(options, args) = parser.parse_args()
if (options.binfiles or options.hexfiles) and len(args) == 1:
target = []
if options.binfiles:
for arg in options.binfiles:
try:
address, binfile = arg.split(':', 1)
except ValueError:
print("Address:file couple '%s' invalid." % arg)
sys.exit(1)
try:
address = int(address, 0) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
if not os.path.isfile(binfile):
print("Unreadable file '%s'." % binfile)
sys.exit(1)
checkbin(binfile)
target.append({'address': address, 'data': open(binfile, 'rb').read()})
if options.hexfiles:
if not IntelHex:
print("Error: IntelHex python module could not be found")
sys.exit(1)
for hex in options.hexfiles:
ih = IntelHex(hex)
for (address, end) in ih.segments():
try:
address = address & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
target.append({
'address': address,
'data': ih.tobinstr(start=address, end=end - 1)
})
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device = options.device
try:
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
except:
print("Invalid device '%s'." % device)
sys.exit(1)
build(outfile, [target], DEFAULT_NAME, device)
elif options.s19files and len(args) == 1:
address = 0
data = ""
target = []
name = DEFAULT_NAME
with open(options.s19files) as f:
lines = f.readlines()
for line in lines:
curaddress = 0
curdata = ""
line = line.rstrip()
if line.startswith("S0"):
name = binascii.a2b_hex(line[8:len(line) - 2]).replace(".s19", "")
elif line.startswith("S3"):
try:
curaddress = int(line[4:12], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[12:-2])
elif line.startswith("S2"):
try:
curaddress = int(line[4:10], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[10:-2])
elif line.startswith("S1"):
try:
curaddress = int(line[4:8], 16) & 0xFFFFFFFF
except ValueError:
print("Address %s invalid." % address)
sys.exit(1)
curdata = binascii.unhexlify(line[8:-2])
if address == 0:
address = curaddress
data = curdata
elif address + len(data) != curaddress:
target.append({'address': address, 'data': data})
address = curaddress
data = curdata
else:
data += curdata
outfile = args[0]
device = DEFAULT_DEVICE
if options.device:
device = options.device
try:
v, d = [int(x, 0) & 0xFFFF for x in device.split(':', 1)]
except:
print("Invalid device '%s'." % device)
sys.exit(1)
build(outfile, [target], name, device)
elif len(args) == 1:
infile = args[0]
if not os.path.isfile(infile):
print("Unreadable file '%s'." % infile)
sys.exit(1)
parse(infile, dump_images=options.dump_images)
else:
parser.print_help()
if not IntelHex:
print("Note: Intel hex files support requires the IntelHex python module")
sys.exit(1)
| 35.25448 | 122 | 0.51281 | 1,118 | 9,836 | 4.480322 | 0.208408 | 0.018167 | 0.020763 | 0.03354 | 0.291875 | 0.248153 | 0.23318 | 0.204432 | 0.204432 | 0.204432 | 0 | 0.029746 | 0.347194 | 9,836 | 278 | 123 | 35.381295 | 0.75035 | 0.013725 | 0 | 0.3 | 0 | 0.028 | 0.214396 | 0.016912 | 0 | 0 | 0.0099 | 0 | 0 | 1 | 0.028 | false | 0 | 0.032 | 0.012 | 0.08 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6857c3a9f8eac380eda21da26fac71984173300 | 12,313 | py | Python | AI.py | alexrockhill/chess | 63d6691912c14ff4c2b0bf4ad9e73a17edec2f70 | [
"MIT"
] | null | null | null | AI.py | alexrockhill/chess | 63d6691912c14ff4c2b0bf4ad9e73a17edec2f70 | [
"MIT"
] | null | null | null | AI.py | alexrockhill/chess | 63d6691912c14ff4c2b0bf4ad9e73a17edec2f70 | [
"MIT"
] | null | null | null | import numpy as np
import os
import os.path as op
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
from Board import Board
from func import opposite_color, loc2int, int2color, is_last_rank
LETTERS = [chr(i) for i in range(65, 65+26)]
BOARD_DIM = 8
N_PIECES = 6
MAX_MOVES = 100
def logistic(x):
return (2. / (1 + np.exp(-x))) - 1
class AI:
def __init__(self, color, name='rock', show=False):
self.color = color
self.name = name
if not op.isfile(op.join('networks', name + 'net.pkl')):
self.train_random()
self.network = load_network(name)
self.network.show = show
def make_decision(self, board):
self.network.make_decision(board, self.color)
def get_promotion(self, board, loc):
return self.network.get_promotion(board, loc)
def train_random(self, exp_n=2):
genomes = [Genome(name=''.join([LETTERS[i] for i, b in enumerate(format(1023, '026b')) if b == '1']),
seed=(4334 * i)) for i in tqdm(range(2**exp_n))]
while len(genomes) > 1:
genomes = self.genome_tournament(genomes)
genomes[0].network.name = self.name
genomes[0].name = self.name
genomes[0].network.save()
genomes[0].save()
def train_offspring(self, exp_n=8):
pass
def genome_tournament(self, genomes):
if len(genomes) % 2:
raise ValueError('Must have an even number of genomes for tournament')
keep_indices = []
for i in tqdm(range(int(len(genomes)/2))):
board = Board()
order = [i, len(genomes) - i - 1]
np.random.shuffle(order)
players = {'white': genomes[order[0]], 'black': genomes[order[1]]}
while not board.game_over and board.move < MAX_MOVES:
color = int2color(board.move)
players[color].network.make_decision(board, color)
outcome = board.check_check_mate()
position_difference = board.score_position('white') - board.score_position('black')
print(outcome, position_difference)
if (outcome and 'Draw' in outcome) or position_difference == 0:
keep_indices.append(np.random.choice([i, -i]))
elif outcome == 'Check mate white' or position_difference > 0:
keep_indices.append(order[0])
elif outcome == 'Check mate black' or position_difference < 0:
keep_indices.append(order[1])
else:
raise ValueError('Unrecognized outcome %s' % outcome)
return [genomes[i] for i in keep_indices]
class ConnectionWeight:
def __init__(self, weight):
self.weight = weight
class Node:
def __init__(self, loc):
self.loc = loc
self.activity = 0
self.next_nodes = dict()
self.previous_nodes = dict()
def connect(self, node, weight):
cw = ConnectionWeight(weight)
self.next_nodes[node.loc] = cw
node.previous_nodes[self.loc] = cw
class Network:
piece_dict = {'pawn': 0, 'rook': 1, 'knight': 2, 'bishop': 3,
'queen': 4, 'king': 5}
promotion_pieces = ['rook', 'knight', 'bishop', 'queen']
def __init__(self, layer_dims, tms, name='rock', seed=12, delta=0.1,
show=True):
np.random.seed(seed)
self.name = name
self.delta = delta # for backpropagation (depreciated)
self.show = show
self.input_layer = self.make_layer(layer_dims[0])
self.hidden_layers = []
if len(layer_dims) > 2:
hidden_layer = self.make_layer(layer_dims[1])
self.connect_layers(self.input_layer, hidden_layer, tms[0])
self.hidden_layers.append(hidden_layer)
for i, (hidden_dim, tm) in enumerate(zip(layer_dims[2:-1], tms[1:-1])):
hidden_layer = self.make_layer(hidden_dim)
if i < len(layer_dims) - 1:
self.connect_layers(self.hidden_layers[-1], hidden_layer, tm)
self.hidden_layers.append(hidden_layer)
self.output_layer = self.make_layer(layer_dims[-1])
self.connect_layers(hidden_layer, self.output_layer, tms[-1])
else:
self.hidden_layers = []
self.output_layer = self.make_layer(layer_dims[-1])
self.connect_layers(self.input_layer, self.output_layer, tms[-1])
def make_layer(self, shape):
layer = np.empty(shape=shape, dtype=object).flatten()
for i in range(layer.size):
layer[i] = Node(loc=i)
return layer.reshape(shape)
def connect_layers(self, layer, next_layer, tm):
tm = tm.flatten()
for i, node in enumerate(layer.flatten()):
for j, next_node in enumerate(next_layer.flatten()):
node.connect(next_node, tm[i * j])
def save(self):
with open(op.join('networks', self.name + 'net.pkl'), 'wb') as f:
pickle.dump(self, f)
def propagate(self, input_activity):
if input_activity.shape != self.input_layer.shape:
raise ValueError('Input activity dimension mismatch')
input_activity = input_activity.flatten()
for i, node in enumerate(self.input_layer.flatten()):
node.activity = input_activity[i]
if self.hidden_layers:
self.propagate_layer(self.input_layer, self.hidden_layers[0])
for i, hidden_layer in enumerate(self.hidden_layers[1:]):
self.propagate_layer(self.hidden_layers[i], hidden_layer)
self.propagate_layer(self.hidden_layers[-1], self.output_layer)
else:
self.propagate_layer(self.input_layer, self.output_layer)
if self.show:
self.show_activity()
def propagate_layer(self, layer, next_layer):
update_mat = np.zeros(shape=next_layer.shape).flatten()
for node in layer.flatten():
for loc, weight in node.next_nodes.items():
update_mat[loc] += node.activity * weight.weight
for i, node in enumerate(next_layer.flatten()):
node.activity = logistic(update_mat[i])
def show_activity(self):
input_fig, input_axes = plt.subplots(self.input_layer.shape[0])
for section, ax in zip(self.input_layer, input_axes):
self.plot_section(section, ax)
if self.hidden_layers:
hidden_fig, hidden_axes = plt.subplots(len(self.hidden_layers))
hidden_axes = hidden_axes if isinstance(hidden_axes, np.ndarray) else np.array([hidden_axes])
for hidden_layer, ax in zip(self.hidden_layers, hidden_axes):
self.plot_section(hidden_layer, ax)
output_fig, output_axes = plt.subplots(self.output_layer.shape[0])
for section, ax in zip(self.output_layer, output_axes):
self.plot_section(section, ax)
plt.show()
def plot_section(self, section, ax):
section_shape = section.shape
ax.axis('off')
activity_mat = np.zeros(section_shape).flatten()
for i, node in enumerate(section.flatten()):
activity_mat[i] = node.activity
ax.imshow(activity_mat.reshape(section_shape))
def train_king_hunt(self, n_games=1000):
for n in tqdm(range(n_games)):
board = Board()
while not board.game_over and board.move < MAX_MOVES:
color = int2color(board.move)
activity_mat = self.pieces2activity_mat(board.pieces[color], board.pieces[opposite_color(color)])
self.propagate(activity_mat)
output_activity_mat = self.layer2activity_mat(self.output_layer)
piece, move = self.activity_mat2move(output_activity_mat, board, board.pieces[color])
print(piece.name, piece.square.loc, move)
board.make_move(piece, move)
score = board.score_position(color)
output_loc = self.piece2output_layer(piece)
self.back_propagate(self.output_layer[output_loc], score, 0)
def back_propagate(self, node, score, i):
if score == 0 or i == len(self.hidden_layers) + 2:
return
if self.hidden_layers:
layer = self.hidden_layers[-i] if i < len(self.hidden_layers) else self.input_layer
else:
layer = self.input_layer
for loc, weight in node.previous_nodes.items():
node.previous_nodes[loc].weight = weight.weight + logistic(score)*self.delta
self.back_propagate(layer.flatten()[loc], score / 2, i + 1)
def make_decision(self, board, color):
activity_mat = self.pieces2activity_mat(board.pieces[color], board.pieces[opposite_color(color)])
self.propagate(activity_mat)
output_activity_mat = self.layer2activity_mat(self.output_layer)
piece, move = self.activity_mat2move(output_activity_mat, board, board.pieces[color])
board.make_move(piece, move)
self.check_promotion_or_game_end(board)
def get_promotion(self, board, loc):
output_activity_mat = self.layer2activity_mat(self.output_layer)
return self.activity_mat2promotion(output_activity_mat, loc)
def check_promotion_or_game_end(self, board):
piece, loc = board.moves[-1]
if piece.name == 'pawn' and is_last_rank(int2color(board.move - 1), loc):
name = self.get_promotion(board, loc)
board.take_piece(piece)
board.make_piece(name, piece.color, loc)
board.check_check_mate()
def pieces2activity_mat(self, my_pieces, other_pieces):
activity_mat = np.zeros(self.input_layer.shape)
for i, pieces in enumerate([my_pieces, other_pieces]):
for name in pieces:
for piece in pieces[name]:
column, row = piece.square.loc
column, row = loc2int(column, row)
activity_mat[i, self.piece_dict[name], column, row] = 1 # output_layer.shape == n_pieces
return activity_mat
def layer2activity_mat(self, layer):
activity_mat = np.zeros(layer.shape).flatten()
for i, node in enumerate(layer.flatten()):
activity_mat[i] = node.activity
return activity_mat.reshape(layer.shape)
def activity_mat2move(self, activity_mat, board, pieces):
best_move, best_score = None, -1
for name in pieces:
for piece in pieces[name]:
column, row = piece.square.loc
start_column, start_row = loc2int(column, row)
for move in board.get_moves(piece):
column, row = move
end_column, end_row = loc2int(column, row)
score = activity_mat[self.piece_dict[piece.name], start_column, start_row, end_column, end_row]
if score > best_score:
best_score = score
best_move = (piece, move)
return best_move
def activity_mat2promotion(self, activity_mat, loc):
column, row = loc
column, row = loc2int(column, row)
return self.promotion_pieces[int(np.argmax(activity_mat[1:4, column, row, column, row]))]
def piece2output_layer(self, piece):
column, row = piece.square.loc
column, row = loc2int(column, row)
return self.piece_dict[piece.name], column, row
def load_network(name):
if op.isfile(op.join('networks', name + 'net.pkl')):
with open(op.join('networks', name + 'net.pkl'), 'rb') as f:
network = pickle.load(f)
else:
raise ValueError('%s network does not exist' % name)
return network
class Genome:
DEPTH = 8
LENGTH = int(1e6)
MAX_LAYERS = 10
MAX_DIMS = 5
def __init__(self, name='rock', seed=12):
'''
name: String
for versioning
genome: String 'random' or 'load'
'random' generates a new genome, 'load' loads previously trained genome
seed: int
seed for numpy random number generator
'''
np.random.seed(seed)
self.name = name
self.i = 0
if op.isfile(op.join('genomes', self.name + 'gen.txt')):
self.load()
else:
self.genome = ''.join([format(np.random.randint(2**self.DEPTH), '0%ib' % self.DEPTH)
for _ in range(self.LENGTH)])
if op.isfile(op.join('networks', self.name + 'net.pkl')):
self.load_network()
else:
self.make_network()
def save(self):
with open(op.join('genomes', self.name + 'gen.txt'), 'w') as f:
f.write(self.genome)
def load(self):
with open(op.join('genomes', self.name + 'gen.txt'), 'r') as f:
self.genome = f.readline()
def delete(self):
os.remove(op.join('genomes', self.name + 'gen.txt'))
def make_network(self):
n_layers = max([(self.next_int() % self.MAX_LAYERS) + 1, 3])
tms = [] # transition matrices
input_dim = (2, N_PIECES, BOARD_DIM, BOARD_DIM)
layer_dims = [input_dim]
for n in range(n_layers - 2):
layer_dims.append(self.new_layer())
tms.append(self.generate_tm(layer_dims[-2], layer_dims[-1]))
output_dim = (N_PIECES, BOARD_DIM, BOARD_DIM, BOARD_DIM, BOARD_DIM)
layer_dims.append(output_dim)
tms.append(self.generate_tm(layer_dims[-2], layer_dims[-1]))
self.network = Network(layer_dims, tms, name=self.name, show=False)
def load_network(self):
self.network = load_network(self.name)
def next_int(self):
self.i += self.DEPTH
if self.i >= len(self.genome):
raise ValueError('Genome length exceeded')
return int(self.genome[self.i-self.DEPTH: self.i], base=2)
def generate_tm(self, dim0, dim1):
tm = np.zeros(dim0 + dim1).flatten()
for i in range(tm.size):
tm[i] = (self.next_int() - 2**(self.DEPTH - 1)) / 2**(self.DEPTH - 1)
return tm.reshape(dim0 + dim1)
def new_layer(self):
n_dims = (self.next_int() % self.MAX_DIMS) + 1
layer_dim = tuple((self.next_int() % BOARD_DIM) + 1 for _ in range(n_dims))
return layer_dim
if __name__ == '__main__':
ai = AI('white')
| 34.013812 | 103 | 0.70925 | 1,880 | 12,313 | 4.465426 | 0.128191 | 0.030137 | 0.0324 | 0.010721 | 0.359619 | 0.293985 | 0.231805 | 0.186182 | 0.14592 | 0.131983 | 0 | 0.013933 | 0.154796 | 12,313 | 361 | 104 | 34.108033 | 0.792736 | 0.023715 | 0 | 0.202703 | 0 | 0 | 0.036365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128378 | false | 0.003378 | 0.027027 | 0.006757 | 0.243243 | 0.006757 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b686d0afc43a520dbac2ae42822b469d33f9f3d0 | 4,901 | py | Python | python/caffe/custom_layers/adaptive_weighting_loss_layer.py | asmorkalov/training_toolbox_caffe | 08716d344da7d78cb7ede4646467c15e86852ae7 | [
"Apache-2.0"
] | null | null | null | python/caffe/custom_layers/adaptive_weighting_loss_layer.py | asmorkalov/training_toolbox_caffe | 08716d344da7d78cb7ede4646467c15e86852ae7 | [
"Apache-2.0"
] | null | null | null | python/caffe/custom_layers/adaptive_weighting_loss_layer.py | asmorkalov/training_toolbox_caffe | 08716d344da7d78cb7ede4646467c15e86852ae7 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
import numpy as np
from caffe._caffe import log as LOG
from caffe._caffe import Layer as BaseLayer
class AdaptiveWeightingLossLayer(BaseLayer):
"""Layer for adaptive weighting between the input losses."""
def _load_params(self, param_str, num_variables):
"""Loads layer parameters.
:param param_str: Input str of parameters
"""
layer_params = eval(param_str)
self._scale = float(layer_params['scale']) if 'scale' in layer_params else 1.0
self._init = layer_params['init'] if 'init' in layer_params else 0.0
self._weights = layer_params['weights'] if 'weights' in layer_params else None
if self._weights is None:
self._weights = np.ones([num_variables], dtype=np.float32)
else:
assert len(self._weights) == num_variables
assert np.all([w > 0.0 for w in self._weights])
def _create_variables(self, num_params, init_value):
"""Initializes internal state"""
self.blobs.add_blob(num_params)
self.blobs[0].data[...] = init_value
def setup(self, bottom, top):
"""Initializes layer.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
try:
self._load_params(self.param_str, num_variables=len(bottom))
num_variables = len(bottom)
self._create_variables(num_variables, self._init)
except Exception:
LOG('AdaptiveWeightingLossLayer setup exception: {}'.format(traceback.format_exc()))
exit()
def forward(self, bottom, top):
"""Carry out forward pass.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
try:
num_variables = len(bottom)
assert num_variables > 0
assert len(top) == 1 or len(top) == 1 + num_variables
samples = []
losses = []
for i in xrange(num_variables):
loss_value = np.array(bottom[i].data, dtype=np.float32).reshape([-1])
assert len(loss_value) == 1
loss_value = loss_value[0]
if loss_value > 0.0:
param_value = self.blobs[0].data[i]
loss_factor = np.exp(-param_value)
new_loss_value = param_value + self._scale * loss_factor * loss_value
samples.append((i, self._scale * loss_factor, self._scale * loss_factor * loss_value))
losses.append(self._weights[i] * new_loss_value)
top[0].data[...] = np.sum(losses) if len(losses) > 0 else 0.0
if len(top) == 1 + num_variables:
for i in xrange(num_variables):
top[i + 1].data[...] = np.copy(bottom[i].data)
self._samples = samples
except Exception:
LOG('AdaptiveWeightingLossLayer forward pass exception: {}'.format(traceback.format_exc()))
exit()
def backward(self, top, propagate_down, bottom):
"""Carry out backward pass.
:param top: List of top blobs
:param propagate_down: List of indicators to carry out back-propagation for
the specified bottom blob
:param bottom: List of bottom blobs
"""
try:
num_variables = len(bottom)
for i in xrange(num_variables):
bottom[i].diff[...] = 0.0
top_diff_value = top[0].diff[0]
for i, loss_scale, var_scale in self._samples:
if propagate_down[i]:
bottom[i].diff[...] = self._weights[i] * loss_scale * top_diff_value
self.blobs[0].diff[i] += self._weights[i] * (1.0 - var_scale) * top_diff_value
except Exception:
LOG('AdaptiveWeightingLossLayer backward pass exception: {}'.format(traceback.format_exc()))
exit()
def reshape(self, bottom, top):
"""Carry out blob reshaping.
:param bottom: List of bottom blobs
:param top: List of top blobs
"""
top[0].reshape(1)
num_variables = len(bottom)
if len(top) == 1 + num_variables:
for i in xrange(num_variables):
top[i + 1].reshape(1)
| 34.758865 | 106 | 0.598653 | 619 | 4,901 | 4.589661 | 0.240711 | 0.071806 | 0.026399 | 0.036959 | 0.250616 | 0.229145 | 0.159099 | 0.121084 | 0.090109 | 0.090109 | 0 | 0.013454 | 0.302387 | 4,901 | 140 | 107 | 35.007143 | 0.81749 | 0.243828 | 0 | 0.271429 | 0 | 0 | 0.052557 | 0.022159 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.085714 | false | 0.028571 | 0.057143 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6871d09585056a5b2254aec8a95efcb0fbeee1d | 1,843 | py | Python | src/cgr_gwas_qc/cluster_profiles/biowulf/status.py | Monia234/NCI-GwasQc | 9e3ca52085c891e1d4d7972e5337c4a1888f992c | [
"MIT"
] | null | null | null | src/cgr_gwas_qc/cluster_profiles/biowulf/status.py | Monia234/NCI-GwasQc | 9e3ca52085c891e1d4d7972e5337c4a1888f992c | [
"MIT"
] | 43 | 2021-03-02T04:10:01.000Z | 2022-03-16T20:26:55.000Z | src/cgr_gwas_qc/cluster_profiles/biowulf/status.py | Monia234/NCI-GwasQc | 9e3ca52085c891e1d4d7972e5337c4a1888f992c | [
"MIT"
] | 2 | 2021-03-02T12:27:00.000Z | 2021-12-16T03:22:20.000Z | #!/usr/bin/env python3
import logging
import re
import shlex
import subprocess as sp
import sys
import time
from typing import Optional
logger = logging.getLogger("__name__")
logger.setLevel(40)
MAX_STATUS_ATTEMPTS = 20
STATUS_CODES = {
"BOOT_FAIL": "failed",
"CANCELLED": "failed",
"COMPLETED": "success",
"DEADLINE": "failed",
"FAILED": "failed",
"NODE_FAIL": "failed",
"OUT_OF_MEMORY": "failed",
"PENDING": "running",
"PREEMPTED": "failed",
"RUNNING": "running",
"REQUEUED": "running",
"RESIZING": "running",
"REVOKED": "running",
"SUSPENDED": "failed",
"TIMEOUT": "failed",
}
def main():
job_id = int(sys.argv[1])
for _ in range(MAX_STATUS_ATTEMPTS):
job_status = check_sacct(job_id) or check_scontrol(job_id)
if job_status:
break
time.sleep(5)
print(job_status or "failed")
def check_sacct(job_id: int) -> Optional[str]:
try:
job_info = sp.check_output(shlex.split(f"sacct -P -b -j {job_id} -n"))
except sp.CalledProcessError as err:
logger.error("sacct process error")
logger.error(err)
return None
try:
status = {x.split("|")[0]: x.split("|")[1] for x in job_info.decode().strip().split("\n")}
return STATUS_CODES.get(status[f"{job_id}"], None)
except IndexError:
return None
def check_scontrol(job_id: int) -> Optional[str]:
try:
job_info = sp.check_output(shlex.split(f"scontrol -o show job {job_id}"))
except sp.CalledProcessError as err:
logger.error("scontrol process error")
logger.error(err)
return None
m = re.search(r"JobState=(\w+)", job_info.decode())
status = {job_id: m.group(1)} if m else {}
return STATUS_CODES.get(status[job_id], None)
if __name__ == "__main__":
main()
| 24.573333 | 98 | 0.62344 | 243 | 1,843 | 4.534979 | 0.395062 | 0.045372 | 0.021779 | 0.027223 | 0.284936 | 0.23775 | 0.23775 | 0.096189 | 0.096189 | 0.096189 | 0 | 0.007003 | 0.225176 | 1,843 | 74 | 99 | 24.905405 | 0.764706 | 0.011394 | 0 | 0.169492 | 0 | 0 | 0.200439 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.118644 | 0 | 0.254237 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b687fc10ba01cb9f0419af4e607ce6c85279c90f | 49,769 | py | Python | editor.py | azagoruyko/rigBuilder | bd744704d3fee1ab7cd85a08c735e6bf044fd27e | [
"Apache-2.0"
] | 2 | 2022-03-20T03:13:24.000Z | 2022-03-20T03:14:11.000Z | editor.py | azagoruyko/rigBuilder | bd744704d3fee1ab7cd85a08c735e6bf044fd27e | [
"Apache-2.0"
] | null | null | null | editor.py | azagoruyko/rigBuilder | bd744704d3fee1ab7cd85a08c735e6bf044fd27e | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from Qt.QtGui import *
from Qt.QtCore import *
from Qt.QtWidgets import *
import re
def clamp(mn, mx, val):
if val < mn:
return mn
elif val > mx:
return mx
else:
return val
def highlightLine(widget, line=-1, clear=False):
if line == -1:
block = widget.textCursor().block()
else:
block = widget.document().findBlockByLineNumber(line)
if not block.isValid():
return
fmt = QTextCharFormat()
if not clear:
fmt.setBackground(QColor(50, 80, 100))
blockPos = block.position()
cursor = widget.textCursor()
cursor.setPosition(blockPos)
cursor.select(QTextCursor.LineUnderCursor)
cursor.setCharFormat(fmt)
cursor.clearSelection()
cursor.movePosition(QTextCursor.StartOfLine)
widget.setTextCursor(cursor)
class PythonHighlighter(QSyntaxHighlighter):
def __init__(self, parent=None):
super(PythonHighlighter, self).__init__(parent)
self.highlightingRules = []
assignFormat = QTextCharFormat()
assignFormat.setForeground(QColor(200, 150, 100))
assignRegexp = QRegExp("\\b(\\w+)\\s*(?=[-+*/]*=)")
assignRegexp.setMinimal(True)
self.highlightingRules.append((assignRegexp, assignFormat))
numFormat = QTextCharFormat()
numFormat.setForeground(QColor(150, 200, 150))
self.highlightingRules.append((QRegExp("\\b(0x[0-9]+)\\b|\\b[0-9\\.]+f*\\b"), numFormat))
functionFormat = QTextCharFormat()
functionFormat.setForeground(QColor(100, 150, 200))
self.highlightingRules.append((QRegExp("\\b\\w+(?=\\s*\\()"), functionFormat))
keywordFormat = QTextCharFormat()
keywordFormat.setForeground(QColor(150, 130, 200))
keywords = ["\\b%s\\b"%k for k in ["False", "await", "else", "import", "pass",
"None", "break", "except", "in", "raise",
"True", "class", "finally", "is", "return",
"and", "continue", "for", "lambda", "try",
"as", "def", "from", "nonlocal", "while","exec", "eval",
"assert", "del", "global", "not", "with",
"async", "elif", "if", "or", "yield", "print", "self"]]
self.highlightingRules += [(QRegExp(pattern), keywordFormat) for pattern in keywords]
boolFormat = QTextCharFormat()
boolFormat.setForeground(QColor(200, 100, 50))
self.highlightingRules.append((QRegExp("\\bTrue\\b|\\bFalse\\b|\\bNone\\b"), boolFormat))
attrFormat = QTextCharFormat()
attrFormat.setForeground(QColor(100, 180, 180))
self.highlightingRules.append((QRegExp("@\\b\\w+\\b"), attrFormat))
self.quotationFormat = QTextCharFormat()
self.quotationFormat.setForeground(QColor(130, 200, 130))
self.highlightingRules.append((QRegExp("(\"(\\\\\"|[^\"])*\")|(\'(\\\\\'|[^\'])*\')"), self.quotationFormat))
singleLineCommentFormat = QTextCharFormat()
singleLineCommentFormat.setForeground(QColor(90, 90, 90))
self.highlightingRules.append((QRegExp("#[^\\n]*"), singleLineCommentFormat))
self.multiLineCommentFormat = QTextCharFormat()
self.multiLineCommentFormat.setForeground(QColor(170, 170, 100))
self.highlightedWordFormat = QTextCharFormat()
self.highlightedWordFormat.setForeground(QColor(200, 200, 200))
self.highlightedWordFormat.setBackground(QBrush(QColor(100, 55, 170)))
self.highlightedWordRegexp = None
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
if not pattern:
continue
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
# Do multi-line strings
in_multiline = self.match_multiline(text, QRegExp("'''"), 1, self.multiLineCommentFormat)
if not in_multiline:
in_multiline = self.match_multiline(text, QRegExp('"""'), 2, self.multiLineCommentFormat)
if self.highlightedWordRegexp:
expression = QRegExp(self.highlightedWordRegexp)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, self.highlightedWordFormat)
index = expression.indexIn(text, index + length)
def match_multiline(self, text, delimiter, in_state, style):
"""Do highlighting of multi-line strings. ``delimiter`` should be a
``QRegExp`` for triple-single-quotes or triple-double-quotes, and
``in_state`` should be a unique integer to represent the corresponding
state changes when inside those strings. Returns True if we're still
inside a multi-line string when this function is finished.
"""
# If inside triple-single quotes, start at 0
if self.previousBlockState() == in_state:
start = 0
add = 0
# Otherwise, look for the delimiter on this line
else:
start = delimiter.indexIn(text)
# Move past this match
add = delimiter.matchedLength()
# As long as there's a delimiter match on this line...
while start >= 0:
# Look for the ending delimiter
end = delimiter.indexIn(text, start + add)
# Ending delimiter on this line?
if end >= add:
length = end - start + add + delimiter.matchedLength()
self.setCurrentBlockState(0)
# No; multi-line string
else:
self.setCurrentBlockState(in_state)
length = len(text) - start + add
# Apply formatting
self.setFormat(start, length, style)
# Look for the next match
start = delimiter.indexIn(text, start + length)
# Return True if still inside a multi-line string, False otherwise
if self.currentBlockState() == in_state:
return True
else:
return False
class SwoopHighligher(QSyntaxHighlighter):
def __init__(self, parent=None):
super(SwoopHighligher, self).__init__(parent)
self.highlightingRules = []
linumFormat = QTextCharFormat()
linumFormat.setForeground(QColor(180, 100, 120))
self.highlightingRules.append((QRegExp("^\\s*\\d+\\s+"), linumFormat))
headerFormat = QTextCharFormat()
headerFormat.setForeground(QColor(120, 100, 180))
headerFormat.setFontWeight(QFont.Bold)
self.highlightingRules.append((QRegExp("^[a-zA-Z][\\w -]*"), headerFormat))
subHeaderFormat = QTextCharFormat()
subHeaderFormat.setForeground(QColor(120, 180, 120))
self.highlightingRules.append((QRegExp("\\[[\\w ]+\\]$"), subHeaderFormat))
commentFormat = QTextCharFormat()
commentFormat.setForeground(QColor(90, 90, 90))
self.highlightingRules.append((QRegExp("//.*$"), commentFormat))
highlightedWordsFormat = QTextCharFormat()
highlightedWordsFormat.setForeground(QColor(200, 200, 200))
highlightedWordsFormat.setBackground(QBrush(QColor(100, 55, 170)))
self.highlightingRules.append((None, highlightedWordsFormat))
def highlightBlock(self, text):
for pattern, format in self.highlightingRules:
if not pattern:
continue
expression = QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(0)
class SwoopSearchDialog(QDialog):
def __init__(self, edit, **kwargs):
super(SwoopSearchDialog, self).__init__(**kwargs)
self.edit = edit
self.setWindowFlags(Qt.FramelessWindowHint)
self.setWindowTitle("Swoop")
layout = QVBoxLayout()
self.setLayout(layout)
self.filterWidget = QLineEdit()
self.filterWidget.setToolTip("Ctrl-C - case sensitive<br>Ctrl-W - word boundary<br>Ctrl-B - find inside brackets<br>Ctrl-D - down only<br>Ctrl-R - replace mode")
self.filterWidget.textChanged.connect(lambda:self.filterTextChanged())
self.filterWidget.keyPressEvent = self.filterKeyPressEvent
self.resultsWidget = QTextEdit()
self.resultsWidget.setReadOnly(True)
self.resultsWidget.setWordWrapMode(QTextOption.NoWrap)
self.resultsWidget.syntax = SwoopHighligher(self.resultsWidget.document())
self.resultsWidget.mousePressEvent = self.resultsMousePressEvent
self.resultsWidget.keyPressEvent = self.filterWidget.keyPressEvent
self.statusWidget = QLabel()
self.statusWidget.hide()
layout.addWidget(self.filterWidget)
layout.addWidget(self.resultsWidget)
layout.addWidget(self.statusWidget)
self.rejected.connect(self.whenRejected)
self.initialize()
def initialize(self):
self.useWordBoundary = False
self.findInsideBrackets = False
self.caseSensitive = True
self.downOnly = False
self.replaceMode = False
self.numberSeparator = " "
self.previousPattern = None
self.previousLines = []
self.savedSettings = {}
self.text = unicode(self.edit.toPlainText())
lines = self.text.split("\n")
cursor = self.edit.textCursor()
self.updateSavedCursor()
self.savedSettings["lines"] = lines
findText = unicode(cursor.selectedText())
if not findText:
findText = wordAtCursor(cursor)[0]
self.filterWidget.setText(findText)
self.filterWidget.setStyleSheet("")
self.hide()
def updateSavedCursor(self):
cursor = self.edit.textCursor()
brackets = findBracketSpans(self.text, cursor.position(), brackets="{")
self.savedSettings["cursor"] = cursor
self.savedSettings["scroll"] = self.edit.verticalScrollBar().value()
self.savedSettings["brackets"] = brackets
self.findInsideBrackets = brackets[0] and self.findInsideBrackets
def showEvent(self, event):
self.updateSavedCursor()
self.reposition()
self.filterWidget.setFocus()
def update(self):
self.initialize()
self.updateStatus()
self.filterTextChanged()
def resultsMousePressEvent(self, event):
cursor = self.resultsWidget.cursorForPosition(event.pos())
highlightLine(self.resultsWidget, clear=True)
highlightLine(self.resultsWidget, cursor.block().blockNumber())
self.resultsLineChanged()
def reposition(self):
c = self.edit.cursorRect().topLeft()
w = self.resultsWidget.document().idealWidth() + 30
h = self.resultsWidget.document().blockCount()*self.resultsWidget.cursorRect().height() + 110
self.setGeometry(c.x(), c.y() + 22, clamp(0, 500, w), clamp(0, 400, h))
def resultsLineChanged(self):
if self.replaceMode:
return
cursor = self.resultsWidget.textCursor()
cursor.select(QTextCursor.LineUnderCursor)
line = unicode(cursor.selectedText())
if not line:
return
lineNumber, text = re.search("^([0-9]+)\\s-*(.*)$", line).groups("")
self.edit.gotoLine(int(lineNumber))
currentFilter = self.getFilterPattern()
r = re.search(currentFilter, text, re.IGNORECASE if not self.caseSensitive else 0)
if r:
cursor = self.edit.textCursor()
pos = cursor.block().position() + r.start() - 1
if pos >- 0:
cursor.setPosition(pos)
self.edit.setTextCursor(cursor)
cursorY = self.edit.cursorRect().top()
scrollBar = self.edit.verticalScrollBar()
scrollBar.setValue(scrollBar.value() + cursorY - self.edit.geometry().height()/2)
self.reposition()
def updateStatus(self):
items = []
if self.useWordBoundary:
items.append("[word]")
if self.caseSensitive:
items.append("[case]")
if self.findInsideBrackets:
items.append("[brackets]")
if self.downOnly:
items.append("[down]")
if self.replaceMode:
items.append("[REPLACE '%s']"%self.previousPattern)
if items:
self.statusWidget.setText(" ".join(items))
self.statusWidget.show()
else:
self.statusWidget.hide()
def filterKeyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
rw = self.resultsWidget
line = rw.textCursor().block().blockNumber()
lineCount = rw.document().blockCount()-1
if event.key() in [Qt.Key_Down, Qt.Key_Up, Qt.Key_PageDown, Qt.Key_PageUp]:
if event.key() == Qt.Key_Down:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line+1))
elif event.key() == Qt.Key_Up:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line-1))
elif event.key() == Qt.Key_PageDown:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line+5))
elif event.key() == Qt.Key_PageUp:
highlightLine(rw, clamp(0, lineCount, line), clear=True)
highlightLine(rw, clamp(0, lineCount, line-5))
self.resultsLineChanged()
elif ctrl and event.key() == Qt.Key_W: # use word boundary
if not self.replaceMode:
self.useWordBoundary = not self.useWordBoundary
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_B: # find inside brackets
if not self.replaceMode:
self.findInsideBrackets = not self.findInsideBrackets
self.updateSavedCursor()
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_D: # down only
if not self.replaceMode:
self.downOnly = not self.downOnly
self.updateSavedCursor()
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_C: # case sensitive
if self.filterWidget.selectedText():
self.filterWidget.copy()
else:
if not self.replaceMode:
self.caseSensitive = not self.caseSensitive
self.updateStatus()
self.filterTextChanged()
elif ctrl and event.key() == Qt.Key_R: # replace mode
self.replaceMode = not self.replaceMode
if self.replaceMode:
self.filterWidget.setStyleSheet("background-color: #433567")
self.previousPattern = self.getFilterPattern()
else:
self.filterWidget.setStyleSheet("")
self.filterTextChanged()
self.updateStatus()
elif event.key() == Qt.Key_F3:
self.accept()
elif event.key() == Qt.Key_Return: # accept
if self.replaceMode:
cursor = self.edit.textCursor()
savedBlock = self.savedSettings["cursor"].block()
savedColumn = self.savedSettings["cursor"].positionInBlock()
doc = self.edit.document()
cursor.beginEditBlock()
lines = unicode(self.resultsWidget.toPlainText()).split("\n")
for line in lines:
if not line.strip():
continue
lineNumber, text = re.search("^([0-9]+)%s(.*)$"%self.numberSeparator, line).groups("")
lineNumber = int(lineNumber)
blockPos = doc.findBlockByLineNumber(lineNumber-1).position()
cursor.setPosition(blockPos)
cursor.select(QTextCursor.LineUnderCursor)
cursor.removeSelectedText()
cursor.insertText(text)
cursor.endEditBlock()
cursor.setPosition(savedBlock.position() + savedColumn)
self.edit.setTextCursor(cursor)
self.edit.verticalScrollBar().setValue(self.savedSettings["scroll"])
self.edit.setFocus()
self.accept()
else:
QLineEdit.keyPressEvent(self.filterWidget, event)
def whenRejected(self):
self.edit.setTextCursor(self.savedSettings["cursor"])
self.edit.verticalScrollBar().setValue(self.savedSettings["scroll"])
self.edit.setFocus()
def getFilterPattern(self):
currentFilter = re.escape(unicode(self.filterWidget.text()))
if not currentFilter:
return ""
if self.useWordBoundary:
currentFilter = "\\b" + currentFilter + "\\b"
return currentFilter
def filterTextChanged(self):
self.resultsWidget.clear()
self.resultsWidget.setCurrentCharFormat(QTextCharFormat())
if self.replaceMode: # replace mode
subStr = unicode(self.filterWidget.text()).replace("\\", "\\\\")
pattern = self.getFilterPattern()
lines = []
for line in self.previousLines:
n, text = re.search("^([0-9]+)%s(.*)$"%self.numberSeparator, line).groups("")
text = re.sub(self.previousPattern, subStr, text, 0, re.IGNORECASE if not self.caseSensitive else 0)
newLine = "%s%s%s"%(n, self.numberSeparator, text)
lines.append(newLine)
self.resultsWidget.setText("\n".join(lines))
self.resultsWidget.syntax.highlightingRules[-1] = (pattern, self.resultsWidget.syntax.highlightingRules[-1][1])
self.resultsWidget.syntax.rehighlight()
else: # search mode
startBlock, endBlock = 0, 0
if self.findInsideBrackets:
cursor = QTextCursor(self.savedSettings["cursor"])
cursor.setPosition(self.savedSettings["brackets"][1])
startBlock = cursor.block().blockNumber()
cursor.setPosition(self.savedSettings["brackets"][2])
endBlock = cursor.block().blockNumber()
if self.downOnly:
cursor = QTextCursor(self.savedSettings["cursor"])
startBlock = cursor.block().blockNumber()
currentFilter = self.getFilterPattern()
currentBlock = self.edit.textCursor().block().blockNumber()
self.previousLines = []
currentFilterText = unicode(self.filterWidget.text()).replace("\\", "\\\\")
counter = 0
currentIndex = 0
for i, line in enumerate(self.savedSettings["lines"]):
if not line.strip():
continue
if self.findInsideBrackets and (i < startBlock or i > endBlock):
continue
if self.downOnly and i < startBlock:
continue
if i == currentBlock:
currentIndex = counter
r = re.search(currentFilter, line, re.IGNORECASE if not self.caseSensitive else 0)
if r:
item = "%s%s%s"%(i+1, self.numberSeparator, line)
self.previousLines.append(item)
counter += 1
self.resultsWidget.setText("\n".join(self.previousLines))
self.resultsWidget.syntax.highlightingRules[-1] = (currentFilter, self.resultsWidget.syntax.highlightingRules[-1][1])
self.resultsWidget.syntax.rehighlight()
highlightLine(self.resultsWidget, currentIndex)
self.resultsLineChanged()
class CodeEditorWidget(QTextEdit):
editorState = {}
TabSpaces = 4
def __init__(self, **kwargs):
super(CodeEditorWidget, self).__init__(**kwargs)
self.formatFunction = None
self.preset = "default"
self.lastSearch = ""
self.lastReplace = ""
self.thread = None
self.canShowCompletions = True
self.currentFontPointSize = 16
self.words = []
self.currentWord = ("", 0, 0)
self.searchStartWord = ("", 0, 0)
self.prevCursorPosition = 0
self.swoopSearchDialog = SwoopSearchDialog(self, parent=self)
self.setContextMenuPolicy(Qt.DefaultContextMenu)
self.completionWidget = CompletionWidget([], parent=self)
self.completionWidget.hide()
self.setTabStopWidth(32)
self.setAcceptRichText(False)
self.setWordWrapMode(QTextOption.NoWrap)
self.cursorPositionChanged.connect(self.editorCursorPositionChanged)
self.verticalScrollBar().valueChanged.connect(lambda _: self.saveState(cursor=False, scroll=True, bookmarks=False))
self.textChanged.connect(self.editorTextChanged)
def event(self, event):
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_Tab:
cursor = self.textCursor()
tabSpaces = " "*CodeEditorWidget.TabSpaces
start = cursor.selectionStart()
end = cursor.selectionEnd()
cursor.beginEditBlock()
if end == start:
cursor.insertText(tabSpaces)
else:
cursor.clearSelection()
cursor.setPosition(start)
while cursor.position() < end:
cursor.movePosition(QTextCursor.StartOfLine)
cursor.insertText(tabSpaces)
if not cursor.movePosition(QTextCursor.Down):
break
end += len(tabSpaces)
cursor.endEditBlock()
event.accept()
return True
return super(CodeEditorWidget, self).event(event)
def setBookmark(self, line=-1):
if line == -1:
block = self.textCursor().block()
else:
block = self.document().findBlockByNumber(line)
blockData = block.userData()
if not blockData:
blockData = TextBlockData()
blockData.hasBookmark = True
else:
blockData.hasBookmark = not blockData.hasBookmark
if isinstance(self.parent(), CodeEditorWithNumbersWidget):
self.parent().numberBarWidget.update()
block.setUserData(blockData)
self.saveState(cursor=False, scroll=False, bookmarks=True)
def gotoNextBookmark(self, start=-1):
doc = self.document()
if start == -1:
start = self.textCursor().block().blockNumber()+1
for i in range(start, doc.blockCount()):
b = doc.findBlockByNumber(i)
blockData = b.userData()
if blockData and blockData.hasBookmark:
self.setTextCursor(QTextCursor(b))
self.centerLine()
break
def loadState(self, cursor=True, scroll=True, bookmarks=True):
scrollBar = self.verticalScrollBar()
self.blockSignals(True)
scrollBar.blockSignals(True)
if not self.preset or not self.editorState.get(self.preset):
c = self.textCursor()
c.setPosition(0)
self.setTextCursor(c)
scrollBar.setValue(0)
else:
state = self.editorState[self.preset]
if cursor:
c = self.textCursor()
c.setPosition(state["cursor"])
self.setTextCursor(c)
if scroll:
scrollBar = self.verticalScrollBar()
scrollBar.setValue(state["scroll"])
if bookmarks:
doc = self.document()
for i in state.get("bookmarks", []):
b = doc.findBlockByNumber(i)
self.setBookmark(i)
self.blockSignals(False)
scrollBar.blockSignals(False)
def saveState(self, cursor=True, scroll=True, bookmarks=False):
if not self.preset:
return
if not self.editorState.get(self.preset):
self.editorState[self.preset] = {"cursor": 0, "scroll": 0, "bookmarks": []}
state = self.editorState[self.preset]
if cursor:
state["cursor"] = self.textCursor().position()
if scroll:
state["scroll"] = self.verticalScrollBar().value()
if bookmarks:
doc = self.document()
state["bookmarks"] = []
for i in range(doc.blockCount()):
b = doc.findBlockByNumber(i)
data = b.userData()
if data and data.hasBookmark:
state["bookmarks"].append(i)
def contextMenuEvent(self, event):
menu = QMenu(self)
if callable(self.formatFunction):
formatAction = QAction("Format\tALT-SHIFT-F", self)
formatAction.triggered.connect(lambda: self.setTextSafe((self.formatFunction(unicode(self.toPlainText())))))
menu.addAction(formatAction)
swoopAction = QAction("Swoop search\tF3", self)
swoopAction.triggered.connect(lambda: self.swoopSearch(True))
menu.addAction(swoopAction)
gotoLineAction = QAction("Goto line\tCtrl-G", self)
gotoLineAction.triggered.connect(self.gotoLine)
menu.addAction(gotoLineAction)
selectAllAction = QAction("Select All", self)
selectAllAction.triggered.connect(self.selectAll)
menu.addAction(selectAllAction)
menu.popup(event.globalPos())
def wheelEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
if ctrl:
d = event.delta() / abs(event.delta())
self.currentFontPointSize = clamp(8, 20, self.currentFontPointSize + d)
self.setStyleSheet("font-size: %dpx;"%self.currentFontPointSize)
else:
QTextEdit.wheelEvent(self, event)
def setTextSafe(self, text, withUndo=True):
scrollBar = self.verticalScrollBar()
self.blockSignals(True)
scrollBar.blockSignals(True)
scroll = scrollBar.value()
cursor = self.textCursor()
pos = cursor.position()
if withUndo:
cursor.select(QTextCursor.Document)
cursor.beginEditBlock()
cursor.removeSelectedText()
cursor.insertText(text)
cursor.endEditBlock()
else:
self.setText(text)
if pos < len(text):
cursor.setPosition(pos)
self.setTextCursor(cursor)
scrollBar.setValue(scroll)
self.blockSignals(False)
scrollBar.blockSignals(False)
def keyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
key = event.key()
if alt and shift and key == Qt.Key_F:
if callable(self.formatFunction):
self.setTextSafe((self.formatFunction(unicode(self.toPlainText()))))
elif alt and key == Qt.Key_M: # back to indentation
cursor = self.textCursor()
linePos = cursor.block().position()
cursor.select(QTextCursor.LineUnderCursor)
text = cursor.selectedText()
cursor.clearSelection()
found = re.findall("^\\s*", unicode(text))
offset = len(found[0]) if found else 0
cursor.setPosition(linePos + offset)
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_H: # highlight selected
self.highlightSelected()
elif ctrl and alt and key == Qt.Key_Space:
cursor = self.textCursor()
pos = cursor.position()
_, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
if start != end:
cursor.setPosition(start+1)
cursor.setPosition(end, QTextCursor.KeepAnchor)
self.setTextCursor(cursor)
elif key in [Qt.Key_Left, Qt.Key_Right]:
QTextEdit.keyPressEvent(self, event)
self.completionWidget.hide()
elif key == Qt.Key_F12: # full screen editor mode
pass
elif alt and key == Qt.Key_F2: # set bookmark
self.setBookmark()
elif key == Qt.Key_F2: # next bookmark
n = self.textCursor().block().blockNumber()
self.gotoNextBookmark()
if self.textCursor().block().blockNumber() == n:
self.gotoNextBookmark(0)
elif key == Qt.Key_F3: # emacs swoop
self.swoopSearch(not ctrl)
elif ctrl and key == Qt.Key_G: # goto line
self.gotoLine()
elif key == Qt.Key_Escape:
self.completionWidget.hide()
elif key == Qt.Key_Return:
if self.completionWidget.isVisible():
self.replaceWithAutoCompletion()
self.completionWidget.hide()
else:
cursor = self.textCursor()
block = unicode(cursor.block().text())
spc = re.search("^(\\s*)", block).groups("")[0]
QTextEdit.keyPressEvent(self, event)
if spc:
cursor.insertText(spc)
self.setTextCursor(cursor)
elif key == Qt.Key_Backtab:
cursor = self.textCursor()
tabSpaces = " "*CodeEditorWidget.TabSpaces
start, end = cursor.selectionStart(), cursor.selectionEnd()
cursor.clearSelection()
cursor.setPosition(start)
cursor.beginEditBlock()
while cursor.position() < end:
cursor.movePosition(QTextCursor.StartOfLine)
cursor.movePosition(QTextCursor.NextWord, QTextCursor.KeepAnchor)
selText = cursor.selectedText()
# if the text starts with the tab_char, replace it
if selText.startswith(tabSpaces):
text = selText.replace(tabSpaces, "", 1)
end -= len(tabSpaces)
cursor.insertText(text)
if not cursor.movePosition(QTextCursor.Down):
break
cursor.endEditBlock()
elif alt and key == Qt.Key_Up: # move line up
self.moveLineUp()
elif alt and key == Qt.Key_Down: # move line down
self.moveLineDown()
elif key in [Qt.Key_Up, Qt.Key_Down, Qt.Key_PageDown, Qt.Key_PageUp]:
if self.completionWidget.isVisible():
if key == Qt.Key_Down:
d = 1
elif key == Qt.Key_Up:
d = -1
elif key == Qt.Key_PageDown:
d = 10
elif key == Qt.Key_PageUp:
d = -10
line = self.completionWidget.currentLine()
highlightLine(self.completionWidget, line, clear=True)
highlightLine(self.completionWidget, clamp(0, self.completionWidget.lineCount()-1, line+d))
else:
QTextEdit.keyPressEvent(self, event)
elif ctrl and key == Qt.Key_L: # center line
self.centerLine()
elif ctrl and key == Qt.Key_K: # kill line
self.killLine()
elif ctrl and key == Qt.Key_O: # remove redundant lines
cursor = self.textCursor()
cursor.beginEditBlock()
if not unicode(cursor.block().text()).strip():
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.movePosition(QTextCursor.Up)
while not unicode(cursor.block().text()).strip() and not cursor.atStart(): # remove empty lines but last one
if unicode(cursor.block().previous().text()):
break
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.movePosition(QTextCursor.Up)
cursor.endEditBlock()
self.setTextCursor(cursor)
elif ctrl and key in [Qt.Key_BracketLeft, Qt.Key_BracketRight]:
cursor = self.textCursor()
pos = cursor.position()
_, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
if start != end:
cursor.setPosition(start if key == Qt.Key_BracketLeft else end)
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_D: # duplicate line
cursor = self.textCursor()
line = cursor.block().text()
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.beginEditBlock()
cursor.insertBlock()
cursor.insertText(line)
cursor.endEditBlock()
self.setTextCursor(cursor)
elif ctrl and key == Qt.Key_Semicolon: # comment
cursor = self.textCursor()
if cursor.selectedText():
self.toggleCommentBlock()
else:
self.toggleCommentLine()
else:
QTextEdit.keyPressEvent(self, event)
def swoopSearch(self, update=True):
if update:
self.swoopSearchDialog.update()
self.swoopSearchDialog.exec_()
def moveLineUp(self):
cursor = self.textCursor()
if not cursor.block().previous().isValid() or cursor.selectedText():
return
text = cursor.block().text()
pos = cursor.positionInBlock()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deletePreviousChar()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.insertText(text)
cursor.insertBlock()
cursor.endEditBlock()
cursor.movePosition(QTextCursor.Up)
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.Right, n=pos)
self.setTextCursor(cursor)
def moveLineDown(self):
cursor = self.textCursor()
if not cursor.block().next().isValid() or cursor.selectedText():
return
text = cursor.block().text()
pos = cursor.positionInBlock()
cursor.beginEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.deleteChar()
cursor.movePosition(QTextCursor.EndOfBlock)
cursor.insertBlock()
cursor.insertText(text)
cursor.endEditBlock()
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.Right, n=pos)
self.setTextCursor(cursor)
def centerLine(self):
cursorY = self.cursorRect().top()
scrollBar = self.verticalScrollBar()
scrollBar.setValue(scrollBar.value() + cursorY - self.geometry().height()/2)
def killLine(self):
cursor = self.textCursor()
if not cursor.block().text():
cursor.movePosition(QTextCursor.StartOfBlock)
cursor.movePosition(QTextCursor.NextBlock, QTextCursor.KeepAnchor)
else:
cursor.movePosition(QTextCursor.EndOfBlock, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
self.setTextCursor(cursor)
def toggleCommentLine(self):
comment = "# "
commentSize = len(comment)
cursor = self.textCursor()
pos = cursor.position()
linePos = cursor.block().position()
cursor.select(QTextCursor.LineUnderCursor)
lineText = cursor.selectedText()
cursor.clearSelection()
found = re.findall("^\\s*", unicode(lineText))
offset = len(found[0]) if found else 0
cursor.setPosition(linePos + offset)
newPos = pos + commentSize
cursor.beginEditBlock()
if not re.match("^\\s*%s"%comment, lineText):
cursor.insertText(comment)
else:
for i in range(len(comment)):
cursor.deleteChar()
newPos = pos - commentSize
cursor.endEditBlock()
cursor.setPosition(newPos)
self.setTextCursor(cursor)
def gotoLine(self, line=-1):
if line == -1:
cursor = self.textCursor()
currentLine = cursor.blockNumber()+1
maxLine = self.document().lineCount()
line, ok = QInputDialog.getInt(self, "Editor", "Goto line number", currentLine, 1, maxLine)
if not ok:
return
self.setTextCursor(QTextCursor(self.document().findBlockByLineNumber(line-1)))
def replaceWithAutoCompletion(self):
if self.completionWidget.lineCount() == 0:
return
modifiers = QApplication.queryKeyboardModifiers()
shift = modifiers & Qt.ShiftModifier
ctrl = modifiers & Qt.ControlModifier
alt = modifiers & Qt.AltModifier
block = self.completionWidget.textCursor().block()
row = block.blockNumber() if block.isValid() else 0
if ctrl:
word = unicode(block.text())
else:
word = re.split("\\s*", unicode(block.text()))[0]
cursor = self.textCursor()
cursor.setPosition(self.currentWord[1])
cursor.setPosition(self.currentWord[2], QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor.insertText(word)
self.setTextCursor(cursor)
self.canShowCompletions = False
def highlightSelected(self):
cursor = self.textCursor()
sel = cursor.selectedText()
reg = None
if sel:
reg = QRegExp("%s"%QRegExp.escape(sel))
else:
word, _,_ = wordAtCursor(cursor)
if word:
if word.startswith("@"):
reg = QRegExp("@\\b%s\\b"%QRegExp.escape(word[1:]))
else:
reg = QRegExp("\\b%s\\b"%QRegExp.escape(word))
self.syntax.highlightedWordRegexp = reg
self.blockSignals(True)
self.syntax.rehighlight()
self.blockSignals(False)
def editorCursorPositionChanged(self):
cursor = self.textCursor()
pos = cursor.position()
if abs(pos - self.prevCursorPosition) > 1:
self.completionWidget.hide()
if cursor.selectedText():
self.setExtraSelections([])
return
self.saveState(cursor=True, scroll=False, bookmarks=False)
self.prevCursorPosition = pos
text, start, end = findBracketSpans(unicode(self.toPlainText()), pos)
extra = []
if start != end:
for pos in [start, end]:
cursor = self.textCursor()
cursor.setPosition(pos)
cursor.setPosition(pos+1, QTextCursor.KeepAnchor)
es = QTextEdit.ExtraSelection()
es.cursor = cursor
es.format.setForeground(QColor(0, 0, 0))
es.format.setBackground(QBrush(QColor(70, 130, 140)))
extra.append(es)
self.setExtraSelections(extra)
def editorTextChanged(self):
text = unicode(self.toPlainText())
cursor = self.textCursor()
pos = cursor.position()
self.currentWord = wordAtCursor(cursor)
currentWord, start, end = self.currentWord
if start == 0 and end - start <= 1:
return
words = set(self.words)
words |= set(re.split("[^\\w@]+", text))
words -= set([currentWord])
if currentWord:
self.searchStartWord = self.currentWord
items = [w for w in words if re.match(currentWord, w, re.IGNORECASE)]
if items and cursor.position() == end:
self.showCompletions(items)
else:
self.completionWidget.hide()
else:
self.completionWidget.hide()
def showCompletions(self, items):
rect = self.cursorRect()
c = rect.center()
self.completionWidget.setGeometry(c.x(), c.y()+10, 200, 200)
if items:
self.completionWidget.update(items)
self.completionWidget.show()
def findBracketSpans(text, pos, brackets="([{"):
if not text:
return ("", 0, 0)
textLen = len(text)
# when no spaces at the current line then do nothing
start = pos-1
while start > 0 and text[start] != "\n":
start -= 1
if not re.search("^\\s+|[{\\(\\[]+", text[start+1:pos]):
return ("", 0, 0)
start = pos-1
end = pos
bracketDict = {"(":0, "[": 0, "{": 0}
bracketChar = ""
ok = False
while True:
if (bracketDict["("] < 0 and "(" in brackets) or\
(bracketDict["["] < 0 and "[" in brackets) or\
(bracketDict["{"] < 0 and "{" in brackets):
ok = True
break
if start < 0:
break
ch = text[start]
if ch in ["(", ")", "{", "}", "[", "]"]:
bracketChar = str(ch)
if ch == ")": bracketDict["("] += 1
elif ch == "(": bracketDict["("] -= 1
elif ch == "]": bracketDict["["] += 1
elif ch == "[": bracketDict["["] -= 1
elif ch == "}": bracketDict["{"] += 1
elif ch == "{": bracketDict["{"] -= 1
start -= 1
start += 1
if ok:
bracketDict = {"(":0, "[": 0, "{": 0}
ok = False
while True:
if bracketDict[bracketChar] < 0:
ok = True
break
if end >= textLen:
break
ch = text[end]
if ch in ["(", ")", "{", "}", "[", "]"]:
if ch == "(": bracketDict["("] += 1
elif ch == ")": bracketDict["("] -= 1
if ch == "[": bracketDict["["] += 1
elif ch == "]": bracketDict["["] -= 1
if ch == "{": bracketDict["{"] += 1
elif ch == "}": bracketDict["{"] -= 1
end += 1
end -= 1
if ok:
return (text[start:end], start, end)
return ("", 0, 0)
def wordAtCursor(cursor):
cursor = QTextCursor(cursor)
pos = cursor.position()
lpart = ""
start = pos-1
ch = unicode(cursor.document().characterAt(start))
while ch and re.match("[@\\w]", ch):
lpart += ch
start -= 1
if ch == "@": # @ can be the first character only
break
ch = unicode(cursor.document().characterAt(start))
rpart = ""
end = pos
ch = unicode(cursor.document().characterAt(end))
while ch and re.match("[\\w]", ch):
rpart += ch
end += 1
ch = unicode(cursor.document().characterAt(end))
return (lpart[::-1]+rpart, start+1, end)
class CompletionWidget(QTextEdit):
def __init__(self, items, **kwargs):
super(CompletionWidget, self).__init__(**kwargs)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_ShowWithoutActivating)
self.setReadOnly(True)
self.setWordWrapMode(QTextOption.NoWrap)
self.update([])
def lineCount(self):
return self.document().blockCount()
def currentLine(self):
return self.textCursor().block().blockNumber()
def mousePressEvent(self, event):
self.parent().setFocus()
event.accept()
def keyPressEvent(self, event):
shift = event.modifiers() & Qt.ShiftModifier
ctrl = event.modifiers() & Qt.ControlModifier
alt = event.modifiers() & Qt.AltModifier
line = self.textCursor().block().blockNumber()
lineCount = self.document().blockCount()-1
if event.key() == Qt.Key_Down:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line+1))
elif event.key() == Qt.Key_Up:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line-1))
elif event.key() == Qt.Key_PageDown:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line+5))
elif event.key() == Qt.Key_PageUp:
highlightLine(self, clamp(0, lineCount, line), clear=True)
highlightLine(self, clamp(0, lineCount, line-5))
elif event.key() == Qt.Key_Return: # accept
pass
else:
QTextEdit.keyPressEvent(self, event)
def update(self, items):
if not items:
return
self.clear()
self.setCurrentCharFormat(QTextCharFormat())
lines = []
for line in items:
lines.append(line)
self.setText("\n".join(lines))
highlightLine(self, 0)
self.autoResize()
def autoResize(self):
w = self.document().idealWidth() + 10
h = self.document().blockCount()*self.cursorRect().height() + 30
maxHeight = clamp(0, 400, self.parent().height() - self.parent().cursorRect().top() - 30)
self.setFixedSize(clamp(0, 500, w), clamp(0, maxHeight, h))
def showEvent(self, event):
self.autoResize()
class NumberBarWidget(QWidget):
def __init__(self, edit, *kwargs):
super(NumberBarWidget, self).__init__(*kwargs)
self.edit = edit
self.highest_line = 0
def update(self, *args):
self.setStyleSheet(self.edit.styleSheet())
width = self.fontMetrics().width(str(self.highest_line)) + 19
self.setFixedWidth(width)
QWidget.update(self, *args)
def paintEvent(self, event):
contents_y = self.edit.verticalScrollBar().value()
page_bottom = contents_y + self.edit.viewport().height()
font_metrics = self.fontMetrics()
current_block = self.edit.document().findBlock(self.edit.textCursor().position())
painter = QPainter(self)
line_count = 0
# Iterate over all text blocks in the document.
block = self.edit.document().begin()
while block.isValid():
line_count += 1
# The top left position of the block in the document
position = self.edit.document().documentLayout().blockBoundingRect(block).topLeft()
# Check if the position of the block is out side of the visible
# area.
if position.y() > page_bottom:
break
# Draw the line number right justified at the y position of the
# line. 3 is a magic padding number. drawText(x, y, text).
painter.drawText(self.width() - font_metrics.width(str(line_count)) - 3, round(position.y()) - contents_y + font_metrics.ascent(), str(line_count))
data = block.userData()
if data and data.hasBookmark:
painter.drawText(3, round(position.y()) - contents_y + font_metrics.ascent(), u"►")
block = block.next()
self.highest_line = self.edit.document().blockCount()
painter.end()
QWidget.paintEvent(self, event)
class TextBlockData(QTextBlockUserData):
def __init__(self):
super(TextBlockData, self).__init__()
self.hasBookmark = False
class CodeEditorWithNumbersWidget(QWidget):
def __init__(self, **kwargs):
super(CodeEditorWithNumbersWidget, self).__init__(**kwargs)
self.editorWidget = CodeEditorWidget()
self.numberBarWidget = NumberBarWidget(self.editorWidget)
self.editorWidget.document().blockCountChanged.connect(lambda _: self.numberBarWidget.update())
self.editorWidget.document().documentLayoutChanged.connect(self.numberBarWidget.update)
self.editorWidget.verticalScrollBar().valueChanged.connect(lambda _: self.numberBarWidget.update())
hlayout = QHBoxLayout()
hlayout.setContentsMargins(0, 0, 0, 0)
hlayout.addWidget(self.numberBarWidget)
hlayout.addWidget(self.editorWidget)
self.setLayout(hlayout)
'''
app = QApplication([])
e = CodeEditorWithNumbersWidget()
e.show()
app.exec_()
''' | 34.489951 | 169 | 0.578553 | 4,665 | 49,769 | 6.138907 | 0.126688 | 0.009253 | 0.011453 | 0.007717 | 0.370312 | 0.297158 | 0.250995 | 0.200293 | 0.180355 | 0.157483 | 0 | 0.01154 | 0.305311 | 49,769 | 1,443 | 170 | 34.489951 | 0.816741 | 0.030159 | 0 | 0.40965 | 0 | 0.000946 | 0.022831 | 0.002391 | 0 | 0 | 0 | 0 | 0.000946 | 1 | 0.053926 | false | 0.002838 | 0.00473 | 0.001892 | 0.093661 | 0.000946 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68866458224b910c1e4822374a14bdc98c7fe27 | 5,523 | py | Python | research/Issue2/purifier/text_extractor.py | johnklee/ff_crawler | 53b056bd94ccf55388d12c7f70460d280964f45f | [
"MIT"
] | null | null | null | research/Issue2/purifier/text_extractor.py | johnklee/ff_crawler | 53b056bd94ccf55388d12c7f70460d280964f45f | [
"MIT"
] | 4 | 2021-04-09T02:05:42.000Z | 2021-07-04T07:42:15.000Z | research/Issue2/purifier/text_extractor.py | johnklee/ff_crawler | 53b056bd94ccf55388d12c7f70460d280964f45f | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import importlib
import os
import inspect
from importlib import util as importlib_util
from .logb import getLogger
# from .pdf2text import simple_fact as pdf_sfact
from .html2text import simple_fact as html_sfact
from .plain2text import simple_fact as pln_sfact
################################
# Constants
################################
MODU_PATH = os.path.dirname(__file__) if os.path.dirname(__file__) else './'
''' Path of current module '''
################################
# Class Definition
################################
class TEAgent:
ERR_MSG_MTYPE_NOT_SUPPORT = 'Content type={mtype} is not supported yet!'
''' Error message for unsupported MIME'''
DEFAULT_RST = {'title': '', 'text': '', 'te_suc': False}
def __init__(self, ext_title=False, disable_policy=False, policy_path=None):
r'''
Constructor
:param ext_title: True to extract title; False otherwise
:param disable_policy: True to disable loading policy
'''
self.logger = getLogger(os.path.basename(__file__))
self.handlers = {
'text/html': html_sfact(ext_title=ext_title),
# 'application/pdf': pdf_sfact(ext_title=ext_title),
'text/plain': pln_sfact(ext_title=ext_title)
} # key as Media type; value as corresponding handler
if not disable_policy:
if policy_path is None:
policy_path = os.path.join(os.path.abspath(MODU_PATH), 'policy')
self.load_policy(policy_path)
def load_policy(self, policy_path, namespace=None, target_policy_names=None):
r'''
Loading policy stored in a given folder
:param policy_path: Path of folder to store policy file
:param namespace: Namespace used to control the import path
:param target_policy_names: If given, only the policy module name exist in here will be loaded.
:return:
Number of policy file being loaded
'''
if os.path.isdir(policy_path):
pc = 0
for pf in list(filter(lambda f: f.endswith('.py') and f.startswith('policy'), os.listdir(policy_path))):
if target_policy_names and pf.split('.')[0] not in target_policy_names:
self.logger.warning('Ignore {}!'.format(pf))
continue
self.logger.debug('Loading {}...'.format(pf))
try:
module_name = 'purifier.policy{}.{}'.format('' if namespace is None else ".{}".format(namespace), pf.split('.')[0])
spec = importlib_util.spec_from_file_location(module_name, os.path.join(policy_path, pf))
module = importlib_util.module_from_spec(spec)
spec.loader.exec_module(module)
for po, pn in list(filter(lambda t: callable(t[0]) and not inspect.isclass(t[0]), list(map(lambda n: (getattr(module, n), n), dir(module))))):
if hasattr(po, 'url_ptn'):
self.logger.debug('\tRegister {}'.format(po.url_ptn))
po.module_name = module_name
po.policy_name = pn
self.handlers[po.mime].regr(po.url_ptn, po)
pc += 1
except:
self.logger.exception('Fail to load policy from {}!'.format(pf))
return pc
else:
self.logger.warn('Policy folder={} does not exist!'.format(policy_path))
return -1
def parse(self, mtype, url, content, do_ext_link=False):
r'''
Parse the given content to do text extraction
:param mtype: Content type in string. e.g.: 'text/html'.
:param url: The source URL
:param content: The corresponding content.
:param do_ext_link: True to extract URL link from content (default:False)
:return
tuple(is_success, extraction result, reason)
'''
try:
mtype = mtype.split(';')[0].strip()
handler = self.handlers.get(mtype, None)
if handler:
try:
extract_rst = handler(url, content, do_ext_link)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': handler.reason(), 'err': "{}: {}".format(exc_type, exc_value)})
if isinstance(extract_rst, dict) and 'title' not in extract_rst:
extract_rst['title'] = ''
if (isinstance(extract_rst, dict) and extract_rst.get('te_suc', True)) or (isinstance(extract_rst, str) and extract_rst):
return (True, extract_rst, {'reason': handler.reason()})
else:
return (False, extract_rst, {'reason': handler.reason(), 'err': 'Empty TE' if not handler.err_msg else handler.err_msg})
else:
self.logger.info("Use default agent...")
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': TEAgent.ERR_MSG_MTYPE_NOT_SUPPORT.format(mtype=mtype, url=url)})
except:
self.logger.exception('Fail to parse content from URL={}!'.format(url))
exc_type, exc_value, exc_traceback = sys.exc_info()
return (False, TEAgent.DEFAULT_RST, {'reason': '?', 'err': "{}: {}".format(exc_type, exc_value)})
| 43.833333 | 162 | 0.570704 | 658 | 5,523 | 4.607903 | 0.267477 | 0.032982 | 0.022427 | 0.019789 | 0.190633 | 0.134894 | 0.076847 | 0.046834 | 0.046834 | 0.046834 | 0 | 0.003083 | 0.295311 | 5,523 | 125 | 163 | 44.184 | 0.775951 | 0.1673 | 0 | 0.148649 | 0 | 0 | 0.08434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.135135 | 0 | 0.310811 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68b69b099d27f3f50d04d0c00314efb72f42971 | 21,664 | py | Python | tests/util.py | wayneweiqiang/GaMMA | 8dcc94088e462d386e10fa7c9a2be06d646ba825 | [
"MIT"
] | 9 | 2021-11-15T10:13:05.000Z | 2022-03-03T13:41:46.000Z | tests/util.py | wayneweiqiang/GaMMA | 8dcc94088e462d386e10fa7c9a2be06d646ba825 | [
"MIT"
] | null | null | null | tests/util.py | wayneweiqiang/GaMMA | 8dcc94088e462d386e10fa7c9a2be06d646ba825 | [
"MIT"
] | 1 | 2021-11-25T05:33:11.000Z | 2021-11-25T05:33:11.000Z | import os
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from collections import defaultdict
# import fire
timestamp = lambda dt: (dt - datetime(2019, 1, 1)).total_seconds()
## ridgecrest
class Config:
degree2km = np.pi * 6371 / 180
center = (35.705, -117.504)
horizontal = 0.5
vertical = 0.5
def load_eqnet_catalog(fname, config=Config()):
catalog = pd.read_csv(fname, sep="\t", parse_dates=['time'])
catalog["date"] = catalog["time"]
catalog["X"] = catalog["x(km)"]
catalog["Y"] = catalog["y(km)"]
catalog["Z"] = catalog["z(km)"]
catalog["time"] = catalog["date"]
catalog["magnitude"] = 0.0
catalog["longitude"] = catalog["X"] / config.degree2km + (config.center[1] - config.horizontal)
catalog["latitude"] = catalog["Y"] / config.degree2km + (config.center[0] - config.vertical)
catalog["depth(m)"] = catalog["Z"] * 1e3
return catalog
def load_scsn(config=Config()):
if not os.path.exists("2019.catalog"):
os.system("wget https://raw.githubusercontent.com/SCEDC/SCEDC-catalogs/master/SCSN/2019.catalog")
catalog = defaultdict(list)
with open("2019.catalog", 'r') as fp:
for line in fp:
if line[0] in ['#', '\n', '\r\n']:
continue
catalog["YYY"].append(line[0:4].strip())
catalog["MM"].append(line[4:7].strip())
catalog["DD"].append(line[7:10].strip())
catalog["HH"].append(line[10:14].strip())
catalog["mm"].append(line[14:17].strip())
catalog["SS.ss"].append(line[17:23].strip())
catalog["LAT-deg"].append(line[23:27].strip())
catalog["LAT-sec"].append(line[27:33].strip())
catalog["LON-deg"].append(line[33:37].strip())
catalog["LON-sec"].append(line[37:43].strip())
catalog["Q"].append(line[43:45].strip())
catalog["MAG"].append(line[45:49].strip())
catalog["DEPTH"].append(line[49:59].strip())
catalog["NPH"].append(line[59:62].strip())
catalog["RMS"].append(line[62:71].strip())
catalog["EVID"].append(line[71:80].strip())
catalog = pd.DataFrame.from_dict(catalog)
catalog["LON"] = -(-catalog["LON-deg"].astype('float') + catalog["LON-sec"].astype('float') / 60)
catalog["LAT"] = catalog["LAT-deg"].astype('float').abs() + catalog["LAT-sec"].astype('float') / 60
catalog['DEPTH'] = catalog['DEPTH'].astype('float')
catalog["date"] = (
catalog["YYY"]
+ "-"
+ catalog["MM"]
+ "-"
+ catalog["DD"]
+ "T"
+ catalog["HH"]
+ ":"
+ catalog["mm"]
+ ":"
+ catalog["SS.ss"]
+ "0"
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["LON"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["LAT"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['DEPTH'].map(float)
catalog["mag"] = catalog["MAG"].map(float)
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["LAT"]
catalog["longitude"] = catalog["LON"]
catalog["depth(m)"] = catalog["Z"]*1e3
return catalog
def load_Ross2019(config=Config()):
if not os.path.exists("Ross2019.txt"):
os.system("wget https://service.scedc.caltech.edu/ftp/QTMcatalog-ridgecrest/ridgecrest_qtm.tar.gz")
os.system("tar -xzf ridgecrest_qtm.tar.gz")
os.system("rm ridgecrest_qtm.tar.gz")
os.system("mv ridgecrest_qtm.cat Ross2019.txt")
catalog = pd.read_csv(
"Ross2019.txt",
sep='\s+',
header=0,
names=[
"yr",
"mon",
"day",
"hr",
"min",
"sec",
"eID",
"latR",
"lonR",
"depR",
"mag",
"qID",
"cID",
"nbranch",
"qnpair",
"qndiffP",
"qndiffS",
"rmsP",
"rmsS",
"eh",
"ez",
"et",
"latC",
"lonC",
"depC",
],
dtype={
"yr": int,
"mon": int,
"day": int,
"hr": int,
"min": int,
"sec": float,
"eID": int,
"latR": float,
"lonR": float,
"depR": float,
"mag": float,
},
)
catalog["date"] = (
catalog["yr"].map("{:04d}".format)
+ "-"
+ catalog["mon"].map("{:02d}".format)
+ "-"
+ catalog["day"].map("{:02d}".format)
+ "T"
+ catalog["hr"].map("{:02d}".format)
+ ":"
+ catalog["min"].map("{:02d}".format)
+ ":"
+ catalog["sec"].map("{:06.3f}".format)
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lonR"] - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["latR"] - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['depR']
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["latR"]
catalog["longitude"] = catalog["lonR"]
return catalog
def load_Shelly2020(config=Config()):
if not os.path.exists("Shelly2020.txt"):
os.system(
"wget -O Shelly2020.txt https://www.sciencebase.gov/catalog/file/get/5dd715f3e4b0695797650d18?f=__disk__db%2F88%2Fa1%2Fdb88a1f6754843800f25bd63712ed438dfa7699f"
)
catalog = pd.read_csv(
"Shelly2020.txt",
sep='\s+',
header=25,
names=["yr", "mon", "day", "hr", "min", "sec", "lat", "lon", "dep", "mag", "ID"],
dtype=str,
)
catalog["date"] = (
catalog["yr"]
+ "-"
+ catalog["mon"]
+ "-"
+ catalog["day"]
+ "T"
+ catalog["hr"]
+ ":"
+ catalog["min"]
+ ":"
+ catalog["sec"]
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lon"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["lat"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['dep'].map(float)
catalog["mag"] = catalog["mag"].map(float)
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["lat"]
catalog["longitude"] = catalog["lon"]
return catalog
def load_Liu2020(config=Config()):
if not os.path.exists("Liu2020.txt"):
os.system(
"wget -O Liu2020.txt https://agupubs.onlinelibrary.wiley.com/action/downloadSupplement\?doi\=10.1029%2F2019GL086189\&file\=grl60250-sup-0002-2019GL086189-ts01.txt"
)
catalog = pd.read_csv(
"Liu2020.txt",
sep='\s+',
header=1,
names=["yr", "mon", "day", "hr", "min", "sec", "lat", "lon", "dep", "mag"],
dtype={
"yr": int,
"mon": int,
"day": int,
"hr": int,
"min": int,
"sec": float,
"lat": float,
"lon": float,
"dep": float,
"mag": float,
},
)
catalog["date"] = (
catalog["yr"].map("{:04d}".format)
+ "-"
+ catalog["mon"].map("{:02d}".format)
+ "-"
+ catalog["day"].map("{:02d}".format)
+ "T"
+ catalog["hr"].map("{:02d}".format)
+ ":"
+ catalog["min"].map("{:02d}".format)
+ ":"
+ catalog["sec"].map("{:06.3f}".format)
)
catalog["date"] = catalog["date"].map(datetime.fromisoformat)
catalog["X"] = (catalog["lon"] - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["lat"] - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['dep']
catalog["time"] = catalog["date"]
catalog["magnitude"] = catalog["mag"]
catalog["latitude"] = catalog["lat"]
catalog["longitude"] = catalog["lon"]
return catalog
def load_GaMMA_catalog(fname, config=Config()):
catalog = pd.read_csv(fname, sep='\t',)
catalog["date"] = catalog["time"].map(datetime.fromisoformat)
catalog["X"] = (catalog["longitude"].map(float) - (config.center[1] - config.horizontal)) * config.degree2km
catalog["Y"] = (catalog["latitude"].map(float) - (config.center[0] - config.vertical)) * config.degree2km
catalog["Z"] = catalog['depth(m)'].map(float)/1e3
catalog["mag"] = catalog["magnitude"]
return catalog
def filter_catalog(catalog, start_datetime, end_datetime, xmin, xmax, ymin, ymax, config=Config()):
selected_catalog = catalog[
(catalog["date"] >= start_datetime)
& (catalog["date"] <= end_datetime)
& (catalog['X'] >= xmin)
& (catalog['X'] <= xmax)
& (catalog['Y'] >= ymin)
& (catalog['Y'] <= ymax)
]
print(f"Filtered catalog {start_datetime}-{end_datetime}: {len(selected_catalog)} events")
t_event = []
xyz_event = []
mag_event = []
for _, row in selected_catalog.iterrows():
t_event.append(timestamp(row["date"]))
xyz_event.append([row['X'], row['Y'], row['Z']])
if "mag" in row:
mag_event.append(row["mag"])
t_event = np.array(t_event)
xyz_event = np.array(xyz_event)
mag_event = np.array(mag_event)
return t_event, xyz_event, mag_event, selected_catalog
def calc_detection_performance(t_pred, t_true, time_accuracy_threshold=3):
# time_accuracy_threshold = 3 #s
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
recalls = np.sum(evaluation_matrix, axis=1) > 0
num_recall = np.sum(recalls)
num_precision = np.sum(np.sum(evaluation_matrix, axis=0) > 0)
if (len(t_true) > 0) and (len(t_pred) > 0):
recall = num_recall / len(t_true)
precision = num_precision / len(t_pred)
f1 = 2 * recall * precision / (recall + precision)
return recall, precision, f1
def calc_time_loc_error(t_pred, xyz_pred, t_true, xyz_true, time_accuracy_threshold):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(xyz_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
np.sum(evaluation_matrix, axis=1) > 0
]
err_z = []
err_xy = []
err_xyz = []
err_loc = []
t = []
for i in range(len(recalled_idx)):
# tmp_z = np.abs(xyz_pred[matched_idx[i], 2] - xyz_true[recalled_idx[i], 2])
tmp_z = xyz_pred[matched_idx[i], 2] - xyz_true[recalled_idx[i], 2]
tmp_xy = np.linalg.norm(xyz_pred[matched_idx[i], 0:2] - xyz_true[recalled_idx[i], 0:2])
tmp_xyz = xyz_pred[matched_idx[i], :] - xyz_true[recalled_idx[i], :]
tmp_loc = np.linalg.norm(xyz_pred[matched_idx[i], 0:3] - xyz_true[recalled_idx[i], 0:3])
err_z.append(tmp_z)
err_xy.append(tmp_xy)
err_xyz.append(tmp_xyz)
err_loc.append(tmp_loc)
t.append(t_true[recalled_idx[i]])
return np.array(err_time), np.array(err_xyz), np.array(err_xy), np.array(err_z), np.array(err_loc), np.array(t)
def calc_time_mag_error(t_pred, mag_pred, t_true, mag_true, time_accuracy_threshold):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(mag_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
np.sum(evaluation_matrix, axis=1) > 0
]
err_mag = []
t = []
mag = []
for i in range(len(recalled_idx)):
tmp_mag = mag_pred[matched_idx[i]] - mag_true[recalled_idx[i]]
err_mag.append(tmp_mag)
t.append(t_pred[matched_idx[i]])
mag.append(mag_true[recalled_idx[i]])
return np.array(err_time), np.array(err_mag), np.array(t), np.array(mag)
def plot_loc_error(
t_pred, xyz_pred, t_true, xyz_true, time_accuracy_threshold, fig_name, xlim=None, ylim=None, station_locs=None
):
evaluation_matrix = np.abs(t_pred[np.newaxis, :] - t_true[:, np.newaxis]) < time_accuracy_threshold # s
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
matched_idx = np.argmin(np.abs(diff_time), axis=1)[np.sum(evaluation_matrix, axis=1) > 0]
recalled_idx = np.arange(xyz_true.shape[0])[np.sum(evaluation_matrix, axis=1) > 0]
# err_time = diff_time[np.arange(diff_time.shape[0]), np.argmin(np.abs(diff_time), axis=1)][
# np.sum(evaluation_matrix, axis=1) > 0
# ]
plt.figure()
# plt.scatter(xyz_true[recalled_idx,0], xyz_true[recalled_idx,1], s=2, c="C3", alpha=0.8, label="SCSN")
# plt.scatter(xyz_pred[matched_idx, 0], xyz_pred[matched_idx, 1], s=2, c="C0", marker="x", alpha=0.8, label="End2End")
plt.plot(xyz_true[recalled_idx, 0], xyz_true[recalled_idx, 1], ".", color="C3", markersize=2, alpha=0.8)
plt.plot(xyz_pred[matched_idx, 0], xyz_pred[matched_idx, 1], ".", color="C0", markersize=2, alpha=0.8)
plt.plot(-100, -100, ".", color="C3", markersize=10, alpha=0.5, label="SCSN")
plt.plot(-100, -100, ".", color="C0", markersize=10, alpha=0.5, label="End2End")
if station_locs is not None:
plt.scatter(station_locs[:, 0], station_locs[:, 1], color="k", marker="^", label="Station")
plt.axis("scaled")
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
plt.xlabel("X (km)")
plt.ylabel("Y (km)")
plt.legend()
# plt.title("Earthquake locati")
# for i in range(len(recalled_idx)):
# plt.plot([xyz_true[recalled_idx[i],0], xyz_pred[matched_idx[i], 0]], [xyz_true[recalled_idx[i],1], xyz_pred[matched_idx[i], 1]], '--')
# plt.plot([10,40], [10, 40], 'r-')
plt.savefig(fig_name + ".png", bbox_inches="tight")
# plt.savefig(fig_name + ".pdf", bbox_inches="tight")
def plot_waveform(
t_plot, xyz_plot, t_pred, t_true, station_locs, waveform, time, fig_dir, num_plot=50, type="pred", vp=6.0
):
dt = 0.01
for i in tqdm(range(min(len(t_plot), num_plot))):
t = [int(t_plot[i]) - 10, int(t_plot[i]) + 35]
dist = np.linalg.norm(xyz_plot[i] - station_locs, axis=1)
plt.figure(figsize=(15, 6))
for j in range(waveform.shape[0]):
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
waveform[j, -1, max([int(t[0] / dt), 0]) : int(t[1] / dt)] * 3 + dist[j],
linewidth=0.5,
color="k",
)
plt.xlim(t)
ylim = plt.gca().get_ylim()
t_selected = t_true[(t[0] - 30 < t_true) & (t_true < t[1] + 30)]
for j in range(len(t_selected)):
if j == 0:
label = "Catalog"
else:
label = ""
(tmp,) = plt.plot([t_selected[j], t_selected[j]], ylim, "--", color="C1", linewidth=2, label=label)
if type == "true":
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
(time[max([int(t[0] / dt), 0]) : int(t[1] / dt)] - t_true[i]) * vp,
":",
color="C1",
)
t_selected = t_pred[(t[0] - 30 < t_pred) & (t_pred < t[1] + 30)]
for j in range(len(t_selected)):
if j == 0:
label = "End2End"
else:
label = ""
(tmp,) = plt.plot([t_selected[j], t_selected[j]], ylim, "-", color="C0", linewidth=2, label=label)
if type == "pred":
plt.plot(
time[max([int(t[0] / dt), 0]) : int(t[1] / dt)],
(time[max([int(t[0] / dt), 0]) : int(t[1] / dt)] - t_pred[i]) * vp,
":",
color="C0",
)
plt.ylim(ylim)
plt.legend(loc="lower right")
plt.ylabel("Distance (km)")
plt.xlabel("Time (s)")
plt.savefig(os.path.join(fig_dir, f"{i:04d}.png"))
plt.close()
def plot_true_positive(
t_pred,
t_true,
threshold,
xyz_pred,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find true positive
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
tp_idx = np.sum(evaluation_matrix, axis=0) > 0
t_tp = t_pred[tp_idx]
xyz_tp = xyz_pred[tp_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot true positive
plot_waveform(t_tp, xyz_tp, t_tp, t_true, station_locs, waveform, time, fig_dir, type="pred")
def plot_false_positive(
t_pred,
t_true,
threshold,
xyz_pred,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find false positive
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
fp_idx = np.sum(evaluation_matrix, axis=0) == 0
t_fp = t_pred[fp_idx]
xyz_fp = xyz_pred[fp_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot false positive
plot_waveform(t_fp, xyz_fp, t_fp, t_true, station_locs, waveform, time, fig_dir, type="pred")
def plot_false_negative(
t_pred,
t_true,
threshold,
xyz_true,
date,
fig_dir,
data_dir=None,
waveform=None,
station_locs=None,
num_plot=50,
vp=6.0,
):
"""
delta_time = [[pred1-true1, pred2-true1, pred3-true1, ...]
[pred1-true2, pred2-true2, pred3-true2, ...]
[pred1-true3, pred2-true3, pred3-true3, ...]
...]
"""
dt = 0.01
## load staion and waveforms
if (waveform is None) and (data_dir is not None):
station_locs = torch.load(os.path.join(data_dir, 'stations.pt'))[1]
waveform = []
for hour in tqdm(range(24), desc="Hour"):
tmp = torch.load(os.path.join(data_dir, f"{date}/{hour:02d}.pt"))
tmp = log_transform(tmp.type(torch.DoubleTensor))
waveform.append(tmp)
waveform = np.concatenate(waveform, axis=2)
np.nan_to_num(waveform, copy=False)
time = np.arange(waveform.shape[-1]) * dt
## find false negative
diff_time = t_pred[np.newaxis, :] - t_true[:, np.newaxis]
evaluation_matrix = np.abs(diff_time) < threshold # s
fn_idx = np.sum(evaluation_matrix, axis=1) == 0
t_fn = t_true[fn_idx]
xyz_fn = xyz_true[fn_idx]
if not os.path.exists(fig_dir):
os.makedirs(fig_dir, exist_ok=True)
np.seterr("ignore")
## plot false negative
plot_waveform(t_fn, xyz_fn, t_pred, t_fn, station_locs, waveform, time, fig_dir, type="true")
if __name__ == "__main__":
# catalog = load_scsn()
# print(catalog.iloc[0])
# xmax = 101
# ymax = 101
# start_datetime = datetime.fromisoformat("2019-07-05T00:00:00.000")
# end_datetime = datetime.fromisoformat("2019-07-07T00:00:00.000")
# t_scsn, xyz_scsn = filter_scsn(load_scsn(), start_datetime, end_datetime, 0, xmax, 0, ymax)
# pass
fire.Fire(load_GaMMA_catalog)
| 34.332805 | 175 | 0.565962 | 2,885 | 21,664 | 4.10364 | 0.125477 | 0.011825 | 0.022806 | 0.024833 | 0.629614 | 0.586029 | 0.550722 | 0.523186 | 0.515584 | 0.50511 | 0 | 0.038654 | 0.252446 | 21,664 | 630 | 176 | 34.387302 | 0.692374 | 0.086088 | 0 | 0.428279 | 0 | 0.008197 | 0.102242 | 0.004932 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030738 | false | 0 | 0.016393 | 0 | 0.077869 | 0.002049 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68c42c88331b8f4284ebd3949497dbb9ed4959f | 869 | py | Python | datasetinsights/constants.py | 86sanj/datasetinsights | 8b34c434fc841ccb20f3ad06985f82dfe3829d02 | [
"Apache-2.0"
] | null | null | null | datasetinsights/constants.py | 86sanj/datasetinsights | 8b34c434fc841ccb20f3ad06985f82dfe3829d02 | [
"Apache-2.0"
] | null | null | null | datasetinsights/constants.py | 86sanj/datasetinsights | 8b34c434fc841ccb20f3ad06985f82dfe3829d02 | [
"Apache-2.0"
] | null | null | null | import os
from datetime import datetime
TIMESTAMP_SUFFIX = datetime.now().strftime("%Y%m%d-%H%M%S")
PROJECT_ROOT = os.path.dirname(os.path.dirname(__file__))
GCS_BASE_STR = "gs://"
HTTP_URL_BASE_STR = "http://"
HTTPS_URL_BASE_STR = "https://"
LOCAL_FILE_BASE_STR = "file://"
NULL_STRING = "None"
DEFAULT_DATA_ROOT = "/data"
SYNTHETIC_SUBFOLDER = "synthetic"
# Default Unity Project ID where USim jobs was executed
DEFAULT_PROJECT_ID = "474ba200-4dcc-4976-818e-0efd28efed30"
USIM_API_ENDPOINT = "https://api.simulation.unity3d.com"
# Default Timing text for codetiming.Timer decorator
TIMING_TEXT = "[{name}] elapsed time: {:0.4f} seconds."
# Click CLI context settings
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
"show_default": True,
"ignore_unknown_options": True,
"allow_extra_args": True,
}
DEFAULT_DATASET_VERSION = "latest"
| 27.15625 | 59 | 0.734177 | 121 | 869 | 4.975207 | 0.661157 | 0.046512 | 0.043189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028871 | 0.12313 | 869 | 31 | 60 | 28.032258 | 0.761155 | 0.150748 | 0 | 0 | 0 | 0 | 0.337875 | 0.079019 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68d1dbf0c5ca4f1cf1c2b340f13d2c57fe8602c | 7,829 | py | Python | server.py | danielemoro/PeepsFinder | 423e95526900aa6c932306a9670c06e41189e27b | [
"Apache-2.0"
] | 2 | 2019-07-17T22:04:25.000Z | 2021-03-03T17:41:07.000Z | server.py | danielemoro/PeepsFinder | 423e95526900aa6c932306a9670c06e41189e27b | [
"Apache-2.0"
] | 19 | 2019-12-26T17:23:33.000Z | 2022-03-21T22:19:06.000Z | server.py | danielemoro/PeepsFinder | 423e95526900aa6c932306a9670c06e41189e27b | [
"Apache-2.0"
] | null | null | null | # IMPORTANT: Change these file paths to be in the same repository as the webs server running Peeps.
# You can find the Peeps web server repository here: https://github.com/danielemoro/peeps/tree/peeps_finder
input_file = "D:/Google Drive/BSU/BSU 2018 Fall/CS401/website/peeps_finder_in.txt"
output_file = "D:/Google Drive/BSU/BSU 2018 Fall/CS401/website/peeps_finder_out.txt"
import collections
from collections import Counter
from peeps_finder import *
import re
import json
import time
from textblob import TextBlob
# important attributes
import_attr = ['email', 'phone', 'occupation', 'position held', 'organization',
'educated at', 'known for', 'knows', 'country', 'keyword']
# remove these attributes
blacklist_attr = ['number', 'important date', 'important time', 'family name']
# These words specify that the user is done validating information.
# Type these instead of a number to skip the validation step
end_words = ['end', 'stop', 'done', 'exit']
def print_attr(name, values, attr_max_len=50):
user_print(str(name).title())
for i, v in enumerate(values):
user_print((('[{0:2}] {1:' + str(attr_max_len) + '} {2:10}').format(i + 1, v[0].strip()[:attr_max_len],
v[1].strip())).replace(" ", " "))
def clean_info(info):
pdata = collections.defaultdict(list)
emails = sorted(list(Counter(info['email']).items()), key=lambda x: x[1], reverse=True)[:5]
pdata['email'] = [(i[0], 'Medium confidence (seen {} times)'.format(i[1])) for i in emails]
emails = sorted(list(Counter(info['phone']).items()), key=lambda x: x[1], reverse=True)[:5]
pdata['phone'] = [(i[0], 'Medium confidence (seen {} times)'.format(i[1])) for i in emails]
for i in info['rel_extr']:
pdata[i[0]].append((i[1], 'High confidence'))
for i in info['named_entities']:
pdata[i[0]].append((i[1], 'High confidence (seen {} times)'.format(i[2]) if i[2] > 3
else 'Medium confidence (seen {} times)'.format(i[2])))
keywords = sorted(list(Counter(info['noun_phrases'] + info['tfidf']).items()), key=lambda x: x[1], reverse=True)
keywords = [(i[0], 'Medium confidence (seen {} times)'.format(i[1]) if i[1] > 1
else 'Low confidence (seen 1 times)') for i in keywords]
pdata['keyword'] = keywords[:20]
return pdata
def print_all_info(info):
for attr in import_attr:
if attr in info:
print_attr(attr, info[attr])
for attr in info.keys():
if attr not in import_attr + blacklist_attr and attr is not None:
print_attr(attr, info[attr])
def user_print(string=''):
print(string + "\n")
if string == '': return
with open(output_file, 'a') as f:
f.write(string + "\n</br>")
def user_received_output():
done = False
while not done:
with open(output_file, 'r') as f:
lines = f.readlines()
if len(lines) == 0:
done = True
else:
time.sleep(0.01)
def user_input(string=''):
user_print(string)
done = False
while not done:
with open(input_file, 'r') as f:
lines = f.readlines()
curr_input = lines[-1].strip() if len(lines) >= 1 else ''
global last_len
if len(lines) > last_len:
done = True
last_len = len(lines)
else:
time.sleep(0.1)
return curr_input
def extract_nums(string_input, max_num):
return [int(i[0].replace(',', '')) - 1 for i in re.findall(r"([\d]+(\s|\,|$)){1}", string_input)
if int(i[0].replace(',', '')) <= max_num]
def user_search(peeps_finder, name=None, search_term=None, topn=20):
if name is None:
name = user_input("Who would you like to search for? ").strip()
name_check = re.match(r"([a-zA-Z]+(\s|$)){2}", name)
if name_check is None or name_check.group() != name:
user_print("I'm sorry, I didn't get that. Please enter a name consisting of two words separated by a space")
return user_search(peeps_finder)
user_print("\nSearching for {} ... please wait ...".format(name if search_term is None else search_term))
info = peeps_finder.retrieve_person_data(name, search=search_term, topn=topn)
info = clean_info(info)
user_print("Found some information</br>")
return info, name
def user_validation(info):
user_print("Please validate the following information. Type 'done' when done.<hr>")
attrs_to_ask = []
for attr in import_attr:
if attr in info:
attrs_to_ask.append(attr)
for attr in info.keys():
if attr not in import_attr + blacklist_attr and attr is not None:
attrs_to_ask.append(attr)
keep = []
for attr in attrs_to_ask:
print_attr("<div class=\".h3c\">" + attr + "</div>", info[attr])
num_input = user_input('\n</br>What number(s) would you like to keep? ')
if num_input.lower().strip() in end_words: break
nums = extract_nums(num_input, len(info[attr]))
if len(nums) > 0:
combined_values = ", ".join([str(info[attr][n][0])[:50] for n in nums])
user_print("\t{}: {}".format(attr, combined_values))
keep.append((attr, combined_values))
else:
user_print('\tNot keeping any {} values'.format(attr))
user_received_output()
user_print("Validation of collected information is complete!\n")
user_print("I am recording the following data:<hr>")
for i in keep:
user_print(" {:25}: {:100}".format(i[0], i[1]))
user_print()
return keep
def user_get_feedback(name, keep):
feedback = user_input("</br>How do you rate the collected data (great, ok, bad, etc)? ")
sentiment = TextBlob(feedback).sentiment.polarity
if sentiment < 0.5:
if user_input("Would you like to make a better search?").lower().strip() in ['yes', 'sure', 'ok', 'yep', 'y']:
user_print("Please select a new search term or provide your own")
for i, a in enumerate(keep):
user_print(" [{:2}]: {} {}".format(i + 1, name, a[1]))
redo = user_input()
nums = extract_nums(redo, len(keep))
search_term = str(name) + ' ' + keep[nums[0]][1] if len(nums) > 0 else str(redo)
user_print("Redoing search with the phrase {}\n".format(search_term))
return (search_term, sentiment, feedback)
return (False, sentiment, feedback)
def run_session(peeps_finder):
keep = None
feedbacks = []
keep_going = True
search_term = None
name = None
while keep_going:
info, name = user_search(peeps_finder, name=name, search_term=search_term)
keep = user_validation(info)
search_term, sentiment, feedback = user_get_feedback(name, keep)
feedbacks.append((feedback, sentiment, str(keep), name))
if not search_term:
keep_going = False
keep.insert(0, ('name', name))
with open(output_file, 'a') as f:
json.dump(keep, f)
print(keep)
with open('logfile.json', 'a') as f:
json.dump(feedbacks, f)
if user_input() == 'END':
print("SESSION ENDED")
return
if __name__ == "__main__":
last_len = 0
peeps_finder = PeepsFinder()
# Run indefinitely, as long as the partner web server is running
while True:
# Clear communications channels
last_len = 0
with open(output_file, 'w') as f:
f.write("")
with open(input_file, 'w') as f:
f.write("")
print("WAITING FOR NEW SESSION")
if user_input().strip().lower() == 'start':
print("STARTING SESSION")
run_session(peeps_finder)
else:
print("Error: unexpected input")
| 36.584112 | 120 | 0.609401 | 1,103 | 7,829 | 4.194923 | 0.24388 | 0.033067 | 0.009077 | 0.027015 | 0.229738 | 0.177005 | 0.160147 | 0.128161 | 0.109574 | 0.08861 | 0 | 0.014763 | 0.247286 | 7,829 | 213 | 121 | 36.755869 | 0.770406 | 0.059522 | 0 | 0.184049 | 0 | 0.006135 | 0.193908 | 0.01047 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067485 | false | 0 | 0.079755 | 0.006135 | 0.202454 | 0.171779 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68d3ffb40a4006bf5b8a5543641f9a3020929bc | 6,497 | py | Python | python/oak/vio_record.py | SpectacularAI/sdk-examples | 70840f7a68d9536ce473c4d20e224ecfa90284ce | [
"Apache-2.0"
] | 18 | 2021-11-02T09:32:11.000Z | 2022-03-29T17:04:32.000Z | python/oak/vio_record.py | SpectacularAI/sdk-examples | 70840f7a68d9536ce473c4d20e224ecfa90284ce | [
"Apache-2.0"
] | 4 | 2021-11-02T05:19:11.000Z | 2022-03-28T08:53:27.000Z | python/oak/vio_record.py | SpectacularAI/sdk-examples | 70840f7a68d9536ce473c4d20e224ecfa90284ce | [
"Apache-2.0"
] | 2 | 2022-02-25T14:28:45.000Z | 2022-03-05T14:14:55.000Z | """
Record data for later playback
Requirements:
ffmpeg must be installed.
On Linux you can install it with package manager
of your choise. For example with
ap-get: sudo apt-get install ffmpeg
yuM: sudo yum install ffmpeg
On Windows, you must download and install it from https://www.ffmpeg.org and
then update your environment Path variable to contain the binary path. To do
this, press Windows Key, type Path and press Enter. Open Environment Settings,
edit the row named Path and add location of the ffmpeg bin folder to the list,
for example: "C:\Program Files\ffmpeg\bin". To check that it works, open
command prompt and type ffmpeg, you should see version information.
To view the depth video file, you must use ffplay, because normal video players
cannot play 16bit grayscale video.
Plug in the OAK-D and run:
python examples/vio_record.py
"""
import depthai
import spectacularAI
import signal
import sys
import argparse
import subprocess
import os
import json
import threading
config = spectacularAI.depthai.Configuration()
p = argparse.ArgumentParser(__doc__)
p.add_argument("--output", help="Recording output folder", default="data")
p.add_argument("--no_rgb", help="Disable recording RGB video feed", action="store_true")
p.add_argument("--no_inputs", help="Disable recording JSONL and depth", action="store_true")
p.add_argument("--gray", help="Record (rectified) gray video data", action="store_true")
p.add_argument("--no_convert", help="Skip converting h265 video file", action="store_true")
p.add_argument('--no_preview', help='Do not show a live preview', action="store_true")
p.add_argument('--slam', help='Record SLAM map', action="store_true")
p.add_argument('--no_feature_tracker', help='Disable on-device feature tracking', action="store_true")
p.add_argument("--resolution", help="Gray input resolution (gray)",
default=config.inputResolution,
choices=['400p', '800p'])
args = p.parse_args()
pipeline = depthai.Pipeline()
config.inputResolution = args.resolution
if not args.no_inputs:
config.recordingFolder = args.output
if args.slam:
config.useSlam = True
try: os.makedirs(args.output) # SLAM only
except: pass
config.mapSavePath = os.path.join(args.output, 'slam_map._')
if args.no_feature_tracker:
config.useFeatureTracker = False
# Enable recoding by setting recordingFolder option
vio_pipeline = spectacularAI.depthai.Pipeline(pipeline, config)
# Optionally also record other video streams not used by the Spectacular AI SDK, these
# can be used for example to render AR content or for debugging.
if not args.no_rgb:
camRgb = pipeline.create(depthai.node.ColorCamera)
videoEnc = pipeline.create(depthai.node.VideoEncoder)
xout = pipeline.create(depthai.node.XLinkOut)
xout.setStreamName("h265-rgb")
camRgb.setBoardSocket(depthai.CameraBoardSocket.RGB)
camRgb.setResolution(depthai.ColorCameraProperties.SensorResolution.THE_1080_P)
# no need to set input resolution anymore (update your depthai package if this does not work)
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H265_MAIN)
camRgb.video.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
if args.gray:
def create_gray_encoder(node, name):
videoEnc = pipeline.create(depthai.node.VideoEncoder)
xout = pipeline.create(depthai.node.XLinkOut)
xout.setStreamName("h264-" + name)
videoEnc.setDefaultProfilePreset(30, depthai.VideoEncoderProperties.Profile.H264_MAIN)
node.link(videoEnc.input)
videoEnc.bitstream.link(xout.input)
create_gray_encoder(vio_pipeline.stereo.rectifiedLeft, 'left')
create_gray_encoder(vio_pipeline.stereo.rectifiedRight, 'right')
should_quit = False
def main_loop(plotter=None):
frame_number = 1
with depthai.Device(pipeline) as device, \
vio_pipeline.startSession(device) as vio_session:
def open_gray_video(name):
grayVideoFile = open(args.output + '/rectified_' + name + '.h264', 'wb')
queue = device.getOutputQueue(name='h264-' + name, maxSize=10, blocking=False)
return (queue, grayVideoFile)
grayVideos = []
if args.gray:
grayVideos = [
open_gray_video('left'),
open_gray_video('right')
]
if not args.no_rgb:
videoFile = open(args.output + "/rgb_video.h265", "wb")
rgbQueue = device.getOutputQueue(name="h265-rgb", maxSize=30, blocking=False)
print("Recording!")
print("")
if plotter is not None:
print("Close the visualization window to stop recording")
while not should_quit:
if not args.no_rgb:
while rgbQueue.has():
frame = rgbQueue.get()
vio_session.addTrigger(frame.getTimestamp().total_seconds(), frame_number)
frame.getData().tofile(videoFile)
frame_number += 1
for (grayQueue, grayVideoFile) in grayVideos:
if grayQueue.has():
grayQueue.get().getData().tofile(grayVideoFile)
out = vio_session.waitForOutput()
if plotter is not None:
if not plotter(json.loads(out.asJson())): break
videoFileNames = []
if not args.no_rgb:
videoFileNames.append(videoFile.name)
videoFile.close()
for (_, grayVideoFile) in grayVideos:
videoFileNames.append(grayVideoFile.name)
grayVideoFile.close()
for fn in videoFileNames:
if not args.no_convert:
withoutExt = fn.rpartition('.')[0]
ffmpegCommand = "ffmpeg -framerate 30 -y -i {} -avoid_negative_ts make_zero -c copy {}.mp4".format(fn, withoutExt)
result = subprocess.run(ffmpegCommand, shell=True)
if result.returncode == 0:
os.remove(fn)
else:
print('')
print("Use ffmpeg to convert video into a viewable format:")
print(" " + ffmpegCommand)
if args.no_preview:
plotter = None
else:
from vio_visu import make_plotter
import matplotlib.pyplot as plt
plotter, anim = make_plotter()
reader_thread = threading.Thread(target = lambda: main_loop(plotter))
reader_thread.start()
if plotter is None:
input("---- Press ENTER to stop recording ----")
should_quit = True
else:
plt.show()
reader_thread.join()
| 36.094444 | 126 | 0.687548 | 811 | 6,497 | 5.405672 | 0.360049 | 0.008212 | 0.024635 | 0.025547 | 0.188412 | 0.158531 | 0.093066 | 0.066606 | 0.045164 | 0.045164 | 0 | 0.010547 | 0.211944 | 6,497 | 179 | 127 | 36.296089 | 0.845703 | 0.1827 | 0 | 0.139344 | 0 | 0 | 0.141265 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02459 | false | 0.008197 | 0.090164 | 0 | 0.122951 | 0.04918 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b68e1a9a9505bdaff392741746a2cc8527c4f6b6 | 311 | py | Python | src/fileIOTest.py | justinsmits/dlm | 39281701f4512cfc34dede0141d83b7cd8e247f4 | [
"MIT"
] | null | null | null | src/fileIOTest.py | justinsmits/dlm | 39281701f4512cfc34dede0141d83b7cd8e247f4 | [
"MIT"
] | null | null | null | src/fileIOTest.py | justinsmits/dlm | 39281701f4512cfc34dede0141d83b7cd8e247f4 | [
"MIT"
] | null | null | null | import os
def fileTest():
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
data_path = os.path.join(dir_path, '../FileTest/data.txt')
print(data_path)
file = open(data_path, 'r')
for line in file:
print(line)
if __name__ == '__main__':
fileTest() | 20.733333 | 62 | 0.633441 | 44 | 311 | 4.068182 | 0.477273 | 0.117318 | 0.111732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.221865 | 311 | 15 | 63 | 20.733333 | 0.739669 | 0 | 0 | 0 | 0 | 0 | 0.092949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.181818 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6908e28349ce2d7d9f38b20eac30f1523c0d324 | 501 | py | Python | OSGI/predictive-python/python model/test.py | davinder2385/iot-edge-samples | e6667947440f3eb0781ab4fe22281f4c1d79f376 | [
"Apache-2.0"
] | 7 | 2019-12-03T10:05:31.000Z | 2021-01-21T19:05:55.000Z | OSGI/predictive-python/python model/test.py | davinder2385/iot-edge-samples | e6667947440f3eb0781ab4fe22281f4c1d79f376 | [
"Apache-2.0"
] | 8 | 2020-01-08T08:03:21.000Z | 2020-09-04T18:25:56.000Z | OSGI/predictive-python/python model/test.py | davinder2385/iot-edge-samples | e6667947440f3eb0781ab4fe22281f4c1d79f376 | [
"Apache-2.0"
] | 11 | 2021-06-16T15:48:33.000Z | 2022-02-13T13:05:52.000Z | import json
import os.path
import time
# Third-party libraries
import zmq
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
socket.send(b"hello")
message = socket.recv()
print(message)
while True:
# Wait for next request from client
jsonStr = '{"measures":{"R":255.0, "G":125.0, "B":64}}'
socket.send(jsonStr.encode('ascii'))
# Do some 'work'
time.sleep(1)
message = socket.recv()
print("Received request: %s" % message) | 20.875 | 59 | 0.666667 | 71 | 501 | 4.704225 | 0.661972 | 0.05988 | 0.101796 | 0.131737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036058 | 0.169661 | 501 | 24 | 60 | 20.875 | 0.766827 | 0.143713 | 0 | 0.125 | 0 | 0.0625 | 0.21831 | 0.053991 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b692e177fac102ebbcfa5f07692d3af8e28cb595 | 2,172 | py | Python | source/group.py | AaronDavidSchneider/CyclingDinner | 40521e7ffe4d11b91cca59733f3768ebf456b1af | [
"MIT"
] | null | null | null | source/group.py | AaronDavidSchneider/CyclingDinner | 40521e7ffe4d11b91cca59733f3768ebf456b1af | [
"MIT"
] | null | null | null | source/group.py | AaronDavidSchneider/CyclingDinner | 40521e7ffe4d11b91cca59733f3768ebf456b1af | [
"MIT"
] | null | null | null | from source.couple import couple
import numpy as np
import requests
import json
import source.config as c
import googlemaps
from geopy.distance import geodesic
import itertools
gd = {"H":1,"V":0, "N":2}
gd_inv = {1:"H",0:"V", 2:"N"}
# CONVERT TIMES TO POSIX TIME
from datetime import timezone, datetime, timedelta
dinner_time = {}
for t in range(len(c.TIMES)):
h = int(c.TIMES[t][:2])
m = int(c.TIMES[t][3:5])
dinner_time[gd_inv[t]] = int(datetime(int(c.YEAR),int(c.MONTH),int(c.DAY),h,m, tzinfo=timezone(timedelta(hours=c.TIMEZONE))).strftime("%s"))
class group:
"""docstring for group."""
def __init__(self, couples, host):
self.couples = couples
self.dist = np.zeros((3,3))
self.group_loss = 0
self.gmaps_client = googlemaps.Client(key = c.API_KEY)
self.host = host
def get_dist(self,A,gmaps=False):
if gmaps:
if A.transp=="transit":
x = self.gmaps_client.directions(A.address,self.couples[self.host].address,mode=A.transp,arrival_time=dinner_time[self.couples[self.host].food])
else:
x = self.gmaps_client.directions(A.address,self.couples[self.host].address,mode=A.transp)
d = x[0]["legs"][0]["distance"]["value"]
d_min = x[0]["legs"][0]["duration"]["value"]
else:
d = geodesic(A.location.point,self.couples[self.host].location.point).km
if A.transp == "bicycling":
v = 15/60 # km/min
elif A.transp == "driving":
v = 50/60 # km/min
elif A.transp == "transit":
v = 5/60 # km/min
else:
print("ERROR: false transportation was chosen")
d_min = d / v
return d_min
def calc_group_loss(self,gmaps=False):
dist = 0
for A in self.couples:
dist += np.square(self.get_dist(A.pre,gmaps))
return np.sqrt(dist)
def get_loss(self,gmaps=False):
if self.couples[self.host].food=="V":
self.group_loss = 0
else:
self.group_loss = self.calc_group_loss(gmaps)
return self.group_loss
| 34.47619 | 160 | 0.587017 | 315 | 2,172 | 3.95873 | 0.320635 | 0.070569 | 0.060144 | 0.076183 | 0.179631 | 0.142743 | 0.113873 | 0.113873 | 0.113873 | 0.113873 | 0 | 0.018239 | 0.267956 | 2,172 | 62 | 161 | 35.032258 | 0.766038 | 0.032228 | 0 | 0.111111 | 0 | 0 | 0.053034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.166667 | 0 | 0.314815 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b69435cd91d1a650c1244efbc456a6f87305ba23 | 8,616 | py | Python | text_importer/importers/olive/parsers.py | aflueckiger/impresso-text-acquisition | ed8f0586ed6a4f7de94b1504b292570bce1f51c5 | [
"MIT"
] | null | null | null | text_importer/importers/olive/parsers.py | aflueckiger/impresso-text-acquisition | ed8f0586ed6a4f7de94b1504b292570bce1f51c5 | [
"MIT"
] | null | null | null | text_importer/importers/olive/parsers.py | aflueckiger/impresso-text-acquisition | ed8f0586ed6a4f7de94b1504b292570bce1f51c5 | [
"MIT"
] | null | null | null | """Functions to parse Olive XML data."""
import codecs
import copy
import re
from typing import List, Optional
from bs4 import BeautifulSoup
from impresso_commons.path.path_fs import canonical_path, IssueDir
from text_importer.importers.olive.helpers import (normalize_language,
normalize_line)
def parse_styles(text: str) -> List[dict]:
"""Turn Olive style file into a dictionary.
Style IDs may be referred to within the ``s`` property of token elements
as defined in the impresso JSON schema for newspaper pages (see
`documentation <https://github.com/impresso/impresso-schemas/blob/master/docs/page.schema.md>`__).
:param str text: textual content of file `styleGallery.txt`
:return: A list of styles; each style has ID, font, font size, color (rgb).
:rtype: List[dict]
"""
styles = []
regex = r'(\d{3})=(".*?"),(\d+\.?\d+),(\(.*?\))'
for line in text.split("\r\n"):
if line == "":
continue
n, font, font_size, color = re.match(regex, line).groups()
styles.append(
{
"id": int(n),
"f": font.replace('"', ""),
"fs": float(font_size),
"rgb": [
int(i)
for i in color.replace("(", "")
.replace(")", "").split(",")]
}
)
return styles
def olive_image_parser(text: bytes) -> Optional[dict]:
"""Parse the Olive XML file contaning image metadata.
:param bytes text: Content of the XML file to parse.
:return: A dictionary of image metadata.
:rtype: Optional[dict]
"""
soup = BeautifulSoup(text, "lxml")
root = soup.find("xmd-entity")
try:
assert root is not None
img = {
'id': root.get('id'),
'coords': root.img.get('box').split(),
'name': root.meta.get('name'),
'resolution': root.meta.get('images_resolution'),
'filepath': root.img.get('href')
}
return img
except AssertionError:
return None
def olive_toc_parser(
toc_path: str,
issue_dir: IssueDir,
encoding: str = "windows-1252"
) -> dict:
"""Parse the TOC.xml file (Olive format).
:param str toc_path: Path to the ToC XML file.
:param IssueDir issue_dir: Corresponding ``IssueDir`` object.
:param str encoding: XML file encoding.
:return: A dictionary where keys are content item IDs and values their
metadata.
:rtype: dict
"""
with codecs.open(toc_path, 'r', encoding) as f:
text = f.read()
toc_data = {}
global_counter = 0
for page in BeautifulSoup(text, 'lxml').find_all('page'):
page_data = {}
for n, entity in enumerate(page.find_all("entity")):
global_counter += 1
item_legacy_id = entity.get("id")
item = {
"legacy_id": item_legacy_id,
"id": canonical_path(
issue_dir,
name=f"i{str(global_counter).zfill(4)}",
extension=""
),
"type": entity.get("entity_type"),
"seq": n + 1
}
# if it's a picture we want to get also the article into which
# the image is embedded
if item['type'].lower() == "picture":
if entity.get("embedded_into") is not None:
item['embedded_into'] = entity.get("embedded_into")
page_data[item_legacy_id] = item
toc_data[int(page.get('page_no'))] = page_data
# gather the IDs of all content items int the issue
ids = [
toc_data[page][item]["id"]
for page in toc_data
for item in toc_data[page]
]
# check that these IDs are unique within the issue
assert len(ids) == len(list(set(ids)))
return toc_data
def olive_parser(text: str) -> dict:
"""Parse an Olive XML file (e.g. from Le Temps corpus).
The main logic implemented here was derived from
<https://github.com/dhlab-epfl/LeTemps-preprocessing/>. Each XML file
corresponds to one article, as detected by Olive.
:param text: content of the xml file to parse
:type text: string
:return: A dictionary with keys: ``meta``, ``r``, ``stats``, ``legacy``.
:rtype: dict
"""
soup = BeautifulSoup(text, "lxml")
root = soup.find("xmd-entity")
page_no = root['page_no']
identifier = root['id']
language = root['language']
title = soup.meta['name']
entity_type = root['entity_type']
issue_date = soup.meta['issue_date']
out = {
"meta": {
"language": None,
"type": {}
},
"r": [],
"stats": {},
"legacy": {"continuation_from": None, "continuation_to": None},
}
out["meta"]["title"] = title
out["meta"]["page_no"] = [int(page_no)]
out["meta"]["language"] = normalize_language(language)
out["meta"]["type"]["raw"] = entity_type
out["meta"]["issue_date"] = issue_date
new_region = {
"c": [],
"p": []
}
new_paragraph = {
"l": []
}
new_line = {
"c": [],
"t": []
}
new_token = {
"c": [],
"tx": ""
}
for primitive in soup.find_all("primitive"):
# store coordinate of text areas (boxes) by page
# 1) page number, 2) coordinate list
region = copy.deepcopy(new_region)
region["c"] = [int(i) for i in primitive.get('box').split(" ")]
para = None
line = None
line_counter = 0
for tag in primitive.find_all(recursive=False):
if tag.name == "l":
if para is None and line is None:
para = copy.deepcopy(new_paragraph)
line = copy.deepcopy(new_line)
if line_counter > 0 and line is not None:
line = normalize_line(line, out["meta"]["language"])
para["l"].append(line)
if tag.get("p") in ["S", "SA"] and line_counter > 0:
region["p"].append(para)
para = copy.deepcopy(new_paragraph)
line = copy.deepcopy(new_line)
line["c"] = [
int(i)
for i in tag.get('box').split(" ")
]
line_counter += 1
if tag.name in ["w", "q"]:
# store coordinates of each token
# 1) token, 2) page number, 3) coordinate list
t = copy.deepcopy(new_token)
t["c"] = [int(i) for i in tag.get('box').split(" ")]
t["tx"] = tag.string
t["s"] = int(tag.get('style_ref'))
if tag.name == "q" and tag.get('qid') is not None:
qid = tag.get('qid')
normalized_form = soup.find('qw', qid=qid).text
t["nf"] = normalized_form
t["qid"] = qid
# append the token to the line
line["t"].append(t)
# append orphan lines
if line is not None:
line = normalize_line(line, out["meta"]["language"])
para["l"].append(line)
region["p"].append(para)
if para is not None:
out["r"].append(region)
out["legacy"]["id"] = identifier
out["legacy"]["source"] = soup.link['source']
"""
# I suspect this could be deleted
out["legacy"]["word_count"] = int(soup.meta['wordcnt'])
out["legacy"]["chars_count"] = int(soup.meta['total_chars_count'])
suspicious_chars_count = int(soup.meta['suspicious_chars_count'])
out["legacy"]["suspicious_chars_count"] = int(suspicious_chars_count)
"""
out["legacy"]["first_id"] = soup.link['first_id']
out["legacy"]["last_id"] = soup.link['last_id']
out["legacy"]["next_id"] = soup.link['next_id']
out["legacy"]["prev_id"] = soup.link['prev_id']
if root.has_attr('continuation_from'):
out["legacy"]["continuation_from"] = root['continuation_from']
if root.has_attr('continuation_to'):
out["legacy"]["continuation_to"] = root['continuation_to']
return out
| 31.217391 | 102 | 0.515901 | 1,008 | 8,616 | 4.297619 | 0.250992 | 0.022853 | 0.012465 | 0.007387 | 0.141043 | 0.104109 | 0.10157 | 0.10157 | 0.087719 | 0.087719 | 0 | 0.003374 | 0.346448 | 8,616 | 275 | 103 | 31.330909 | 0.765939 | 0.196147 | 0 | 0.113772 | 0 | 0 | 0.117547 | 0.010531 | 0 | 0 | 0 | 0 | 0.017964 | 1 | 0.023952 | false | 0 | 0.041916 | 0 | 0.095808 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b69613a7512da774d1b23dd4f974de878107a0bc | 550 | py | Python | comsetformat.py | paqs2020/paqs2020 | 28377f3e0aa3d3dd2885baf2b339ae3228c68192 | [
"MIT"
] | 1 | 2021-07-29T10:47:52.000Z | 2021-07-29T10:47:52.000Z | comsetformat.py | paqs2020/paqs2020 | 28377f3e0aa3d3dd2885baf2b339ae3228c68192 | [
"MIT"
] | 1 | 2021-07-29T11:04:47.000Z | 2021-07-29T11:04:47.000Z | comsetformat.py | paqs2020/paqs2020 | 28377f3e0aa3d3dd2885baf2b339ae3228c68192 | [
"MIT"
] | 2 | 2021-05-06T18:33:01.000Z | 2021-08-01T10:21:46.000Z | import pickle
databox = "/nfs/projects/paqs/qadatasetAstudy"
source = pickle.load(open(databox + "/val.pkl","rb"))
questions = databox + "/output/ques.val"
answers = databox + "/output/ans.val"
fqes = open(questions, 'w')
fans = open(answers, 'w')
for fid, value in source.items():
for sid, sentence in value.items():
if "Q" in sid:
fqes.write('{},{}, <s> {} </s>\n'.format(fid, sid, sentence))
elif "A" in sid:
fans.write('{},{}, <s> {} </s>\n'.format(fid, sid, sentence))
fqes.close()
fans.close()
| 25 | 73 | 0.587273 | 75 | 550 | 4.306667 | 0.493333 | 0.102167 | 0.043344 | 0.049536 | 0.173375 | 0.173375 | 0.173375 | 0.173375 | 0 | 0 | 0 | 0 | 0.2 | 550 | 21 | 74 | 26.190476 | 0.734091 | 0 | 0 | 0 | 0 | 0 | 0.216758 | 0.061931 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b697d352270f4d5af09a28c0d438435fc0bb409c | 15,849 | py | Python | app.py | ruianfrp/Yolov3 | 8449b37e0ee5bffeacea555036ca52e3b4ae1531 | [
"MIT"
] | 2 | 2020-04-04T08:53:25.000Z | 2020-06-14T09:13:39.000Z | app.py | ruianfrp/Yolov3 | 8449b37e0ee5bffeacea555036ca52e3b4ae1531 | [
"MIT"
] | null | null | null | app.py | ruianfrp/Yolov3 | 8449b37e0ee5bffeacea555036ca52e3b4ae1531 | [
"MIT"
] | null | null | null | import keras
from PIL import Image
from flask import Flask, request, jsonify
from flask_cors import CORS
from itsdangerous import Serializer
from concurrent.futures import ThreadPoolExecutor
from flask_apscheduler import APScheduler
import token_authorization
import AesCipher
import mysql
import functools
from yolo import YOLO
# 定时任务配置类
class SchedulerConfig(object):
JOBS = [
{
'id': 'automatic_seat',
# 任务执行程序
'func': '__main__:automatic_seat',
# 执行程序参数
'args': None,
# 任务执行类型
'trigger': 'cron',
'hour': 1,
'minute': 0
}
]
# 定义任务执行程序
def automatic_seat():
print("座位预约自动实现!")
result = mysql.appointment_automatic()
if result == 'True':
app.logger.info("座位预约自动实现成功!")
elif result == 'False':
app.logger.error("数据库操作错误!")
else:
app.logger.warn("无需操作的数据!")
executor = ThreadPoolExecutor(10)
app = Flask(__name__)
app.config.from_object(SchedulerConfig())
scheduler = APScheduler() # 实例化APScheduler
scheduler.init_app(app) # 把任务列表载入实例flask
scheduler.start() # 启动任务计划
CORS(app, supports_credentials=True)
# 座位获取(耗时任务)
def real_seat(classroom_id):
keras.backend.clear_session()
yolo = YOLO()
try:
image = Image.open("D:/SourceTree/yolov3/img/" + str(classroom_id) + ".jpg")
except:
app.logger.error("图片打开失败!")
else:
yolo.detect_image(image, classroom_id)
app.logger.info("座位实时获取成功!")
# 在上面的基础上导入
def login_required(view_func):
@functools.wraps(view_func)
def verify_token(*args, **kwargs):
try:
# 在请求头上拿到token
token = request.headers["Authorization"]
except Exception:
return jsonify(code=401, msg='缺少参数token')
s = Serializer("classroom")
try:
s.loads(token)
except Exception:
return jsonify(code=401, msg="登录已过期")
return view_func(*args, **kwargs)
return verify_token
# 登录
@app.route('/login', methods=['POST'])
def login():
if request.get_json().get('username') != 'null' and request.get_json().get('password') != 'null':
username = request.get_json().get('username')
pwd = request.get_json().get('password')
result = mysql.user_select(username)
password = str(AesCipher.encryption(pwd), 'utf-8')
if password != result[2]:
error = '密码错误!'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
else:
info = "登陆成功!"
app.logger.info(info)
tk = token_authorization.create_token(username)
data = {}
user = {
'id': result[0],
'userName': username,
'userRole': result[3]
}
data['userInfo'] = user
data['token'] = tk
return jsonify({"code": 200, "data": data, "info": info}), 200
else:
error = '请填写完整信息!'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
# 注册
@app.route('/register', methods=['POST'])
def register():
if request.get_json().get('username') != 'null' and request.get_json().get('password') != 'null':
username = request.get_json().get('username')
password = request.get_json().get('password')
return_id = mysql.user_insert(username, password)
if return_id == 0:
error = '已存在此用户'
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
elif return_id is str:
error = return_id
app.logger.error(error)
return jsonify({"code": 403, "error": error}), 403
else:
info = '注册成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info}), 200
# 添加教室
@app.route('/classroom_insert', methods=['POST'])
def insert_classroom():
if request.get_json().get('classroomName') is not None and \
request.get_json().get('seatNums') is not None and \
request.get_json().get('classroomInfo') is not None:
classroom_name = request.get_json().get('classroomName')
seat_nums = request.get_json().get('seatNums')
classroom_info = request.get_json().get('classroomInfo')
result = mysql.classroom_insert(classroom_name, seat_nums, classroom_info)
if result is None:
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
elif result == 0:
error = '该教室已存在!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = classroom_name + '教室添加成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '教室信息不得为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 删除教室
@app.route('/classroom_delete', methods=['POST'])
def delete_classroom():
if request.get_json().get('id') != 'null':
classroom_id = request.get_json().get('id')
result = mysql.classroom_delete(classroom_id)
if result == 'False':
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = '教室删除成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '教室id返回为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 修改教室信息
@app.route('/classroom_update', methods=['POST'])
def update_classroom():
if request.get_json().get('seatNums') is not None or request.get_json().get('classroomInfo') is not None:
seat_num = request.get_json().get('seatNums')
classroom_info = request.get_json().get('classroomInfo')
classroom_id = request.get_json().get('id')
result = mysql.classroom_update(seat_num, classroom_info, classroom_id)
if result == 'False':
error = '数据库操作错误!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
else:
info = '教室信息修改成功!'
app.logger.info(info)
return jsonify({"code": 200, "info": info})
else:
error = '返回参数不得全为空!'
app.logger.info(error)
return jsonify({"code": 403, "error": error})
# 获取教室列表
@app.route('/classroom_show', methods=['GET'])
def get_classroom_info():
result = mysql.classroom_select()
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result.__len__() == 0:
app.logger.error("搜索数据为空!")
return jsonify({"code": 403, "error": "搜索数据为空!"})
else:
data = {}
classrooms = []
for r in result:
classroom = {
'id': r[0],
'classroomName': r[1],
'seatNum': r[2],
'freeSeatNum': r[3],
'placeFreeSeat': 0,
'classroomInfo': r[4]
}
classrooms.append(classroom)
data['classrooms'] = classrooms
app.logger.info("教室信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "教室信息返回成功!"})
# 获取座位数量
@app.route('/seat_num_get', methods=['get'])
def seat_num_get():
result1, result2, result3, result4 = mysql.count_seat_select()
if result1 is None or result2 is None or result3 is None or result4 is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
seat_nums = []
seat_num1 = {
'seatPlaceNo': 0,
'seatPlace': '普通',
'counts': result1[0]
}
seat_nums.append(seat_num1)
seat_num2 = {
'seatPlaceNo': 1,
'seatPlace': '靠窗',
'counts': result2[0]
}
seat_nums.append(seat_num2)
seat_num3 = {
'seatPlaceNo': 2,
'seatPlace': '靠门',
'counts': result3[0]
}
seat_nums.append(seat_num3)
data['allSeatNum'] = result4[0]
data['seatNums'] = seat_nums
app.logger.info("座位位置及数量返回成功!")
return jsonify({"code": 200, "data": data, "info": "座位位置及数量返回成功!"})
# 获取实时教室座位信息
@app.route('/seat_real', methods=['POST'])
def get_real_seat_info():
if request.get_json().get('classroomId') != 'null':
classroom_id = request.get_json().get('classroomId')
# 异步
# executor.submit(real_seat(classroom_id))
result_max = mysql.seat_max_select(classroom_id)
result = mysql.seat_real_select(classroom_id)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result.__len__() == 0:
app.logger.error("搜索数据为空!")
return jsonify({"code": 403, "error": "搜索数据为空!"})
else:
data = {}
seats = [[2 for i in range(result_max[1])] for j in range(result_max[0])]
for r in result:
seats[r[1]-1][r[2]-1] = r[3]
data['seats'] = seats
data['row'] = result_max[0]
data['col'] = result_max[1]
app.logger.info("座位信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "座位信息返回成功!"})
else:
error = "返回教室id为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 教室页面特殊位置搜索
@app.route('/classroom_special', methods=['POST'])
def get_special_classroom_info():
if request.get_json().get('seatPlace') != 'null':
seat_place = request.get_json().get('seatPlace')
result = mysql.classroom_special_select(seat_place)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
classrooms = []
for r in result:
if r[4] != 0:
classroom = {
'id': r[0],
'classroomName': r[1],
'seatNum': r[2],
'freeSeatNum': r[3],
'placeFreeSeat': r[4],
'classroomInfo': r[5]
}
classrooms.append(classroom)
if len(classrooms) == 0:
app.logger.info("所有教室已无此类型座位!")
return jsonify({"code": 400, "info": "所有教室已无此类型座位!"})
data['classrooms'] = classrooms
app.logger.info("位置推荐返回成功!")
return jsonify({"code": 200, "data": data, "info": "位置推荐返回成功!"})
else:
error = "特殊位置类型返回为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 获取教室信息
@app.route('/get_classInfo_by_id', methods=['POST'])
def get_class_info_by_id():
if request.get_json().get('classroomId') != 'null':
classroom_id = request.get_json().get('classroomId')
result = mysql.get_class_info_by_id(classroom_id)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
data = {}
classroom = {
'id': result[0],
'classroomName': result[1],
'seatNum': result[2],
'freeSeatNum': result[3],
'classroomInfo': result[4]
}
data['classroom'] = classroom
app.logger.info("教室信息返回成功!")
return jsonify({"code": 200, "data": data, "info": "教室信息返回成功!"})
else:
error = "返回教室id为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 预约座位
@app.route('/seat_appointment', methods=['POST'])
def appointment_seat():
if request.get_json().get('classroomId') != 'null' and request.get_json().get('seatX') != 'null' and \
request.get_json().get('seatY') != 'null' and request.get_json().get('startTime') != 'null' and \
request.get_json().get('userNo') != 'null':
classroom_id = request.get_json().get('classroomId')
seat_x = request.get_json().get('seatX')
seat_y = request.get_json().get('seatY')
start_time = request.get_json().get('startTime')
user_no = request.get_json().get('userNo')
result = mysql.appointment(start_time, classroom_id, seat_x, seat_y, user_no)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result == "OUT":
app.logger.error("预约已满5次!")
return jsonify({"code": 403, "error": "预约已满5次!"})
elif result == 'False':
app.logger.error("该座位该日期已被预约,请更换日期!")
return jsonify({"code": 403, "error": "该座位该日期已被预约,请更换日期!"})
elif result == 'Obsolete':
app.logger.error("预约日期不得小于当前日期!")
return jsonify({"code": 403, "error": "预约日期不得小于当前日期!"})
else:
app.logger.info("预约成功!")
return jsonify({"code": 200, "info": "预约成功!"})
else:
error = "返回数据为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 获取当前预约的座位
@app.route('/currently_appointment', methods=['POST'])
def get_currently_appointment():
if request.get_json().get('userNo') != 'null':
user_no = request.get_json().get('userNo')
result = mysql.currently_appointment(user_no)
if result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
elif result == 'False':
app.logger.warn("当前无预约记录!")
return jsonify({"code": 300, "warn": "当前无预约记录!"})
else:
data = {}
appointments = []
for r in result:
seat = "第 " + str(r[1]) + " 排 第 " + str(r[0]) + " 座"
appointment = {
'seat': seat,
'classroomId': r[2],
'classroomName': r[3],
'startTime': r[4]
}
appointments.append(appointment)
data['appointments'] = appointments
app.logger.info("当前预约记录返回成功!")
return jsonify({"code": 200, "data": data, "info": "当前预约记录返回成功!"})
else:
error = "返回数据为空!"
app.logger.error(error)
return jsonify({"code": 403, "error": error})
# 座位修改
@app.route('/seat_insert', methods=['POST'])
def seat_insert():
if request.get_json().get('seatData') != 'null':
seat_data = request.get_json().get('seatData')
all_data = []
classroom_id = seat_data[0].get('classroomId')
for seat in seat_data:
fk_classroom_id = seat.get('classroomId')
seat_x = seat.get('seatX')
seat_y = seat.get('seatY')
seat_state = seat.get('seatState')
seat_place = seat.get('seatPlace')
data = (fk_classroom_id, seat_x, seat_y, seat_state, seat_place)
all_data.append(data)
# 批量添加座位信息
result = mysql.seat_insert_many(classroom_id, all_data)
if result == 'True':
app.logger.warn("座位添加成功!")
return jsonify({"code": 200, "info": "座位添加成功!"})
elif result is None:
app.logger.error("数据库操作异常!")
return jsonify({"code": 403, "error": "数据库操作异常!"})
else:
app.logger.error("返回数据为空!")
return jsonify({"code": 403, "error": "返回数据为空!"})
if __name__ == '__main__':
app.run(threaded=True, debug=True)
| 34.37961 | 109 | 0.546659 | 1,711 | 15,849 | 4.934541 | 0.150205 | 0.053299 | 0.094635 | 0.082554 | 0.513443 | 0.443207 | 0.3982 | 0.360535 | 0.336373 | 0.326898 | 0 | 0.021198 | 0.300524 | 15,849 | 460 | 110 | 34.454348 | 0.740393 | 0.015585 | 0 | 0.408629 | 0 | 0 | 0.149608 | 0.004495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043147 | false | 0.017767 | 0.030457 | 0 | 0.203046 | 0.002538 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6980fb0608d22d689e43b123d9af449a3000f0b | 1,350 | py | Python | 1d_unconstrained_optimization/golden_section_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | 1d_unconstrained_optimization/golden_section_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | 1 | 2021-06-02T10:07:26.000Z | 2021-06-03T10:23:46.000Z | 1d_unconstrained_optimization/golden_section_algorithm.py | almostdutch/numerical-optimization-algorithms | cd6c1306cb04eccce62a74420323bda83058c1d6 | [
"MIT"
] | null | null | null | """
golden_section_algorithm.py
Returns the reduced uncertainty interval containing the minimizer of the function
func - anonimous function
interval0 - initial uncertainty interval
N_iter - number of iterations
"""
import math
import numpy as np
def golden_section_algorithm_calc_N_iter(interval0, uncertainty_range_desired):
N_iter = math.ceil(math.log(uncertainty_range_desired / (interval0[1] - interval0[0]), 0.618));
return N_iter;
def golden_section_algorithm(func, interval0, N_iter):
rho = (3 - np.sqrt(5)) / 2;
left_limit = interval0[0];
right_limit = interval0[1];
smaller = 'a';
a = left_limit + (1 - rho) * (right_limit - left_limit);
f_at_a = func(a);
for iter_no in range(N_iter):
if (smaller == 'a'):
c = a;
f_at_c = f_at_a;
a = left_limit + rho * (right_limit - left_limit);
f_at_a = func(a);
else:
a = c;
f_at_a = f_at_c;
c = left_limit + (1 - rho) * (right_limit - left_limit);
f_at_c = func(c);
if (f_at_a < f_at_c):
right_limit = c;
smaller = 'a';
else:
left_limit = a;
smaller = 'c';
interval = (left_limit, right_limit);
return interval;
| 27 | 100 | 0.56963 | 179 | 1,350 | 4.005587 | 0.301676 | 0.112971 | 0.027894 | 0.07113 | 0.171548 | 0.171548 | 0.149233 | 0.149233 | 0.149233 | 0.149233 | 0 | 0.022002 | 0.326667 | 1,350 | 49 | 101 | 27.55102 | 0.766777 | 0.153333 | 0 | 0.193548 | 0 | 0 | 0.003524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b699c359961ff4c5fe1e7486390fb0c95dd99241 | 1,132 | py | Python | constants.py | JNPRAutomate/unicast2multicast-translator | d400d71745eec8f5ae7933be54a5505460173dea | [
"MIT"
] | 3 | 2021-09-30T18:07:54.000Z | 2021-10-03T01:48:17.000Z | constants.py | JNPRAutomate/unicast2multicast-translator | d400d71745eec8f5ae7933be54a5505460173dea | [
"MIT"
] | 1 | 2021-09-20T21:08:51.000Z | 2021-09-20T21:08:51.000Z | constants.py | JNPRAutomate/unicast2multicast-translator | d400d71745eec8f5ae7933be54a5505460173dea | [
"MIT"
] | null | null | null | import ipaddress
# =============================================== DEFAULT CONFIGURATION ================================================
# Default port to bind the translator's unicast server socket to.
DEFAULT_UNICAST_SRV_PORT = 9001
# Default address space to pick multicast destination addresses (groups) from for the translated unicast streams.
DEFAULT_MULTICAST_ADDR_SPACE = ipaddress.IPv4Network('232.0.0.0/8')
# Default port to use when forwarding payload received on the translator's unicast server socket as multicast.
DEFAULT_MULTICAST_PORT = 9002
# URL to use when submitting stream information to the Multicast Menu
MULTICASTMENU_ADD_URL = 'https://multicastmenu.herokuapp.com/add/'
# Email address to use when submitting stream information to the Multicast Menu. Lenny has OK'ed using his email address
# until we have a group email.
MULTICASTMENU_EMAIL = 'lenny@juniper.net'
# Number of worker threads dedicated to submitting stream information to the Multicast Menu.
MULTICASTMENU_THREADS = 10
# ======================================================================================================================
| 62.888889 | 120 | 0.659011 | 133 | 1,132 | 5.518797 | 0.518797 | 0.020436 | 0.036785 | 0.118529 | 0.333787 | 0.333787 | 0.243869 | 0.243869 | 0.147139 | 0.147139 | 0 | 0.017875 | 0.110424 | 1,132 | 17 | 121 | 66.588235 | 0.711023 | 0.732332 | 0 | 0 | 0 | 0 | 0.232082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b69b9ca24f2fb2208c924db77c7c45f56668f4d1 | 19,059 | py | Python | src/Tools/CodeGenerator/Plugins/SharedLibraryPluginImpl/VectorTypeInfo.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 15 | 2019-12-14T07:54:18.000Z | 2021-03-14T14:53:28.000Z | src/Tools/CodeGenerator/Plugins/SharedLibraryPluginImpl/VectorTypeInfo.py | Bhaskers-Blu-Org2/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 30 | 2019-12-03T20:58:56.000Z | 2020-04-21T23:34:39.000Z | src/Tools/CodeGenerator/Plugins/SharedLibraryPluginImpl/VectorTypeInfo.py | microsoft/FeaturizersLibrary | 229ae38ea233bfb02a6ff92ec3a67c1751c58005 | [
"MIT"
] | 13 | 2020-01-23T00:18:47.000Z | 2021-10-04T17:46:45.000Z | # ----------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License
# ----------------------------------------------------------------------
"""Contains the VectorTypeInfo object"""
import os
import re
import textwrap
import six
import CommonEnvironment
from CommonEnvironment import Interface
from CommonEnvironment import StringHelpers
from Plugins.SharedLibraryPluginImpl.TypeInfo import TypeInfo
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@Interface.staticderived
class VectorTypeInfo(TypeInfo):
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
TypeName = Interface.DerivedProperty(re.compile(r"vector\<(?P<type>.+)\>"))
CppType = Interface.DerivedProperty(None)
# ----------------------------------------------------------------------
# |
# | Public Types
# |
# ----------------------------------------------------------------------
def __init__(
self,
*args,
member_type=None,
create_type_info_func=None,
**kwargs
):
if member_type is None:
return
assert create_type_info_func is not None
super(VectorTypeInfo, self).__init__(*args, **kwargs)
if self.IsOptional:
raise NotImplementedError("Optional vectors are not supported at this time")
match = self.TypeName.match(member_type)
assert match, member_type
the_type = match.group("type")
type_info = create_type_info_func(the_type)
assert type_info, the_type
# The content is expressed by a range of pointers.
self._type_info = type_info
self.CppType = "std::tuple<{type} const *, {type} const *>".format(
type=self._type_info.CppType,
)
# ----------------------------------------------------------------------
@Interface.override
def GetInputInfo(self, arg_name, invocation_template):
result = self._type_info.GetInputBufferInfo(arg_name, self._InvocationTemplate)
assert result.InputBufferType is not None, self._type_info
invocation_statements, invocation_tuple = self._ExtractDecoratedInvocationStatements(result.InvocationStatements)
assert len(invocation_tuple) == 2, invocation_tuple
return self.Result(
result.Parameters,
result.ValidationStatements,
"{}{}".format(
"{}\n\n".format(invocation_statements.rstrip()) if invocation_statements else "",
invocation_template.format(
"std::make_tuple({ptr}, {ptr} + {size})".format(
ptr=invocation_tuple[0],
size=invocation_tuple[1],
),
),
),
)
# ----------------------------------------------------------------------
@Interface.override
def GetInputBufferInfo(
self,
arg_name,
invocation_template,
items_var_name=None,
):
# Don't reuse the items var (if it exists)
items_var_name = "{}_items".format(arg_name)
result = self._type_info.GetInputBufferInfo(
"{}_item".format(arg_name),
self._InvocationTemplate,
items_var_name=items_var_name,
)
assert result.InputBufferType is not None, self._type_info
input_parameters = [self.Type("{} const *".format(p.Type), "{}_ptr".format(p.Name)) for p in result.Parameters]
invocation_statements, invocation_tuple = self._ExtractDecoratedInvocationStatements(result.InvocationStatements)
assert not invocation_statements, invocation_statements
# If the input buffer type is a pointer, it means that we don't
# have to transform the input prior to passing it on. If it is not
# a pointer, transformation is required.
if self._IsPointer(result.InputBufferType.Type):
# No transformation is required
buffer_type = self.Type(
"std::vector<std::tuple<{type}, {type}>>".format(
type=result.InputBufferType.Type,
),
"{}_buffer".format(arg_name),
)
buffer_assignment = "{name}_buffer.emplace_back({invocation_ptr}, {invocation_ptr} + {invocation_size});".format(
name=arg_name,
invocation_ptr=invocation_tuple[0],
invocation_size=invocation_tuple[1],
)
validation_suffix = ""
else:
# Transformation is required
buffer_type = self.Type(
"std::vector<{}>".format(result.InputBufferType.Type),
"{}_temp_buffer".format(arg_name),
)
buffer_assignment = "{buffer_name}.emplace_back(std::move({item}));".format(
buffer_name=buffer_type.Name,
item=result.InputBufferType.Name,
)
# We have a vector of the concrete types, but need to pass a vector of tuples
# to the featurizer itself. Create a new vector that has that info.
validation_suffix = textwrap.dedent(
"""\
std::vector<std::tuple<{type}, {type}>> {name}_buffer;
{name}_buffer.reserve({temp_buffer}.size());
for(auto const & {temp_buffer}_item : {temp_buffer})
{name}_buffer.emplace_back({temp_buffer}_item.data(), {temp_buffer}_item.data() + {temp_buffer}_item.size());
""",
).format(
name=arg_name,
type="typename {}::const_pointer".format(result.InputBufferType.Type),
temp_buffer=buffer_type.Name,
)
validation_statements = textwrap.dedent(
"""\
{parameter_validation}
if({items_var_name} == 0) throw std::invalid_argument("'{items_var_name}' is 0");
{buffer_type} {buffer_name};
{buffer_name}.reserve({items_var_name});
while({buffer_name}.size() < {items_var_name}) {{
{references}
{validation_statements}
{invocation_statements}
{buffer_assignment}
{increment_pointers}
}}{validation_suffix}
""",
).format(
parameter_validation="\n".join(
[
"""if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""".format(
name=p.Name,
)
for p in input_parameters
]
),
name=arg_name,
items_var_name=items_var_name,
buffer_type=buffer_type.Type,
buffer_name=buffer_type.Name,
references=StringHelpers.LeftJustify(
"\n".join(
[
"{type}{const_and_ref}{name}(*{name}_ptr);".format(
type=p.Type,
name=p.Name,
const_and_ref=" const &" if not self._IsPointer(p.Type) else "",
)
for p in result.Parameters
]
),
4,
),
validation_statements=StringHelpers.LeftJustify(
result.ValidationStatements.rstrip(),
4,
),
invocation_statements=StringHelpers.LeftJustify(
invocation_statements.rstrip(),
4,
),
buffer_assignment=buffer_assignment,
increment_pointers=StringHelpers.LeftJustify(
"\n".join(["++{};".format(p.Name) for p in input_parameters]),
4,
),
validation_suffix="" if not validation_suffix else "\n\n{}".format(validation_suffix),
)
return self.Result(
input_parameters + [self.Type("size_t", items_var_name)],
validation_statements,
invocation_template.format(
"{name}_buffer.data(), {name}_buffer.size()".format(
name=arg_name,
),
),
input_buffer_type=self.Type(buffer_type, "{}_buffer".format(arg_name)),
)
# ----------------------------------------------------------------------
@Interface.override
def GetOutputInfo(
self,
arg_name,
result_name="result",
suppress_pointer=False,
):
result = self._type_info.GetOutputInfo(
"{}_item".format(arg_name),
result_name="{}_item".format(result_name),
)
input_parameters = [self.Type("{}*".format(p.Type), "{}_ptr".format(p.Name)) for p in result.Parameters]
if len(result.Parameters) == 1 and result.Parameters[0].Type == "bool *":
# We can't take a reference to bools within a vector, as the values are stored as bits rather than
# bool types.
for_loop = "for(bool {result_name}_item : {result_name})".format(
result_name=result_name,
)
else:
for_loop = "for(auto const & {result_name}_item : {result_name})".format(
result_name=result_name,
)
return self.Result(
input_parameters + [self.Type("size_t *", "{}_items".format(arg_name))],
textwrap.dedent(
"""\
{statements}
if({name}_items == nullptr) throw std::invalid_argument("'{name}_items' is null");
""",
).format(
statements="\n".join(
[
"""if({name} == nullptr) throw std::invalid_argument("'{name}' is null");""".format(
name=p.Name,
)
for p in input_parameters
]
),
name=arg_name,
),
textwrap.dedent(
"""\
if({result_name}.empty()) {{
{empty_allocations}
}}
else {{
// TODO: There are potential memory leaks if allocation fails
{allocations}
{initial_assignments}
{for_loop} {{
{validations}
{statements}
{ptr_increments}
}}
}}
*{name}_items = {result_name}.size();
""",
).format(
name=arg_name,
result_name=result_name,
empty_allocations=StringHelpers.LeftJustify(
"\n".join(
[
"*{}_ptr = nullptr;".format(p.Name)
for p in result.Parameters
]
),
4,
),
allocations=StringHelpers.LeftJustify(
"\n".join(
[
"*{name}_ptr = new {type}[{result_name}.size()];".format(
name=p.Name,
type=self._StripPointer(p.Type),
result_name=result_name,
)
for p in result.Parameters
]
),
4,
),
initial_assignments=StringHelpers.LeftJustify(
"\n".join(
[
"{type} {name}(*{name}_ptr);".format(
name=p.Name,
type=p.Type,
)
for p in result.Parameters
]
),
4,
),
for_loop=for_loop,
validations=StringHelpers.LeftJustify(result.ValidationStatements, 8).rstrip(),
statements=StringHelpers.LeftJustify(result.InvocationStatements, 8).rstrip(),
ptr_increments=StringHelpers.LeftJustify(
"\n".join(["++{};".format(p.Name) for p in result.Parameters]),
8,
),
),
)
# ----------------------------------------------------------------------
@Interface.override
def GetDestroyOutputInfo(
self,
arg_name="result",
):
result = self.GetOutputInfo(
arg_name,
)
input_parameters = [self.Type(self._StripPointer(p.Type), p.Name) for p in result.Parameters]
assert input_parameters[-1].Type == "size_t", input_parameters[-1].Type
assert input_parameters[-1].Name.endswith("_items"), input_parameters[-1].Name
pointer_parameters = input_parameters[:-1]
# Create the destroy statements
destroy_result = self._type_info.GetDestroyOutputInfo("{}_destroy_item".format(arg_name))
if destroy_result is not None:
assert len(destroy_result.Parameters) == len(result.Parameters) - 1
destroy_statements = textwrap.dedent(
"""\
{variable_statements}
while({name}_items--) {{
{assignment_statements}
{delete_statements}
{increment_statements}
}}
""",
).format(
name=arg_name,
variable_statements="\n".join(
[
"{type} this_{name}({name});".format(
type=p.Type,
name=p.Name,
)
for p in pointer_parameters
],
),
assignment_statements=StringHelpers.LeftJustify(
"\n".join(
[
"""{destroy_type} const & {destroy_name}(*this_{parameter_name});""".format(
destroy_type=destroy_p.Type,
destroy_name=destroy_p.Name,
parameter_name=standard_p.Name,
)
for destroy_p, standard_p in zip(destroy_result.Parameters, pointer_parameters)
]
),
4,
),
delete_statements=StringHelpers.LeftJustify(
textwrap.dedent(
"""\
{}
{}
""",
).format(
destroy_result.ValidationStatements.rstrip() if destroy_result.ValidationStatements else "// No validation statements",
destroy_result.InvocationStatements.rstrip(),
),
4,
),
increment_statements=StringHelpers.LeftJustify(
"\n".join([ "++this_{};".format(p.Name) for p in pointer_parameters]),
4,
),
)
else:
destroy_statements = "// No destroy statements"
return self.Result(
input_parameters,
textwrap.dedent(
"""\
if({initial_ptr_name} != nullptr && {name}_items == 0) throw std::invalid_argument("'{name}_items' is 0");
if({initial_ptr_name} == nullptr && {name}_items != 0) throw std::invalid_argument("'{name}_items' is not 0");
{ptr_validations}
""",
).format(
initial_ptr_name=input_parameters[0].Name,
name=arg_name,
ptr_validations="\n".join(
[
"""if(bool({name}) != bool({initial_ptr_name})) throw std::invalid_argument("'{name}' is not internally consistent");""".format(
initial_ptr_name=input_parameters[0].Name,
name=p.Name,
)
for p in input_parameters[1:]
]
),
),
textwrap.dedent(
"""\
if({initial_ptr_name} != nullptr) {{
{statements}
{delete_ptrs}
}}
""",
).format(
initial_ptr_name=input_parameters[0].Name,
statements=StringHelpers.LeftJustify(destroy_statements, 4).rstrip(),
delete_ptrs=StringHelpers.LeftJustify(
"\n".join(
[
"delete [] {};".format(p.Name)
for p in pointer_parameters
]
),
4,
),
),
)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
@staticmethod
def _StripPointer(value):
value = value.strip()
if value.endswith("const"):
value = value[:-len("const")].rstrip()
assert value.endswith("*"), value
return value[:-1].rstrip()
# ----------------------------------------------------------------------
@staticmethod
def _IsPointer(value):
value = value.strip()
if value.endswith("const"):
value = value[:-len("const")].rstrip()
return value.endswith("*")
| 38.659229 | 153 | 0.4389 | 1,460 | 19,059 | 5.500685 | 0.14589 | 0.019176 | 0.011207 | 0.016187 | 0.359233 | 0.263977 | 0.224505 | 0.190138 | 0.175819 | 0.104844 | 0 | 0.003434 | 0.404061 | 19,059 | 492 | 154 | 38.737805 | 0.703645 | 0.103153 | 0 | 0.423529 | 0 | 0 | 0.070156 | 0.018462 | 0 | 0 | 0 | 0.002033 | 0.032353 | 1 | 0.020588 | false | 0 | 0.023529 | 0 | 0.073529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6a1c2ab8cd5b17863bf873dabf0a5085e350658 | 2,492 | py | Python | reque.py | parserpp/ppppppppppp | a1c1ef1d252e7cf652e90465649483b728bf9839 | [
"MIT"
] | null | null | null | reque.py | parserpp/ppppppppppp | a1c1ef1d252e7cf652e90465649483b728bf9839 | [
"MIT"
] | null | null | null | reque.py | parserpp/ppppppppppp | a1c1ef1d252e7cf652e90465649483b728bf9839 | [
"MIT"
] | null | null | null | import time
import requests
import urllib3
from lxml import etree
from requests.models import Response
from requests.packages.urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings()
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from fake_useragent import UserAgent
ua = UserAgent()
class WebRequest(object):
name = "web_request"
def __init__(self, *args, **kwargs):
self.response = Response()
def req_header(self):
_header = {'User-Agent': ua.random,
'Accept': '*/*',
'Connection': 'keep-alive',
'Accept-Language': 'zh-CN,zh;q=0.8'}
return _header
def get(self, url, header=None, retry_time=3, retry_interval=5, timeout=10, *args, **kwargs):
"""
get method
:param url: target url
:param header: headers
:param retry_time: retry time
:param retry_interval: retry interval
:param timeout: network timeout
:return:
"""
headers = self.req_header()
if header and isinstance(header, dict):
headers.update(header)
while True:
try:
self.response = requests.get(
url
, headers=headers
, timeout=timeout
, verify=False
, *args
, **kwargs
)
return self
except Exception as e:
# self.log.error("requests: %s error: %s" % (url, str(e)))
retry_time -= 1
if retry_time <= 0:
resp = Response()
resp.status_code = 200
return self
# self.log.info("retry %s second after" % retry_interval)
time.sleep(retry_interval)
@property
def tree(self):
if self.response.status_code == 200:
return etree.HTML(self.response.content)
else:
return ""
@property
def text(self):
if self.response.status_code == 200:
return self.response.text
else:
return ""
@property
def json(self):
try:
if self.response.status_code == 200:
return self.response.json()
else:
return ""
except Exception as e:
return {}
| 28 | 97 | 0.532905 | 249 | 2,492 | 5.232932 | 0.369478 | 0.073676 | 0.039908 | 0.058327 | 0.1934 | 0.175748 | 0.175748 | 0.175748 | 0.069071 | 0 | 0 | 0.016026 | 0.373997 | 2,492 | 88 | 98 | 28.318182 | 0.819231 | 0.111958 | 0 | 0.28125 | 0 | 0 | 0.036933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.109375 | 0 | 0.390625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6a456d569704179a69c2734a46c283edcb8c45d | 1,306 | py | Python | spellvardetection/webapi/resources.py | fab-bar/SpellvarDetection | 624f472f9eec9636650bace9c091ba1fe9cda313 | [
"MIT"
] | 1 | 2019-11-08T08:02:21.000Z | 2019-11-08T08:02:21.000Z | spellvardetection/webapi/resources.py | fab-bar/SpellvarDetection | 624f472f9eec9636650bace9c091ba1fe9cda313 | [
"MIT"
] | null | null | null | spellvardetection/webapi/resources.py | fab-bar/SpellvarDetection | 624f472f9eec9636650bace9c091ba1fe9cda313 | [
"MIT"
] | null | null | null | import os
import shutil
import click
from flask import current_app
from flask.cli import AppGroup
### CLI for management of additional resources
res_cli = AppGroup('resources', short_help='Manage additional resources used by SpellvarDetection.')
@res_cli.command('list')
def list_resources():
"List existing resources."
click.echo('\n'.join(os.listdir(current_app.config['RESOURCES_PATH'])))
@res_cli.command('add')
@click.argument('filename')
def list_resources(filename):
"Add a resource."
if os.path.exists(os.path.join(current_app.config['RESOURCES_PATH'], os.path.basename(filename))):
click.echo('File does already exist in resource folder.')
else:
try:
newname = shutil.copy(filename, current_app.config['RESOURCES_PATH'])
except IOError as e:
print(e)
else:
click.echo('Added ' + os.path.basename(newname) + ' to the resources.')
@res_cli.command('remove')
@click.argument('filename')
def list_resources(filename):
"Remove a resources."
try:
os.remove(os.path.join(current_app.config['RESOURCES_PATH'], filename))
except IOError as e:
print(e)
else:
click.echo('Removed ' + filename + ' from the resources.')
def init_app(app):
app.cli.add_command(res_cli)
| 27.787234 | 102 | 0.683767 | 172 | 1,306 | 5.075581 | 0.354651 | 0.057274 | 0.07331 | 0.114548 | 0.339061 | 0.272623 | 0.272623 | 0.16953 | 0.080183 | 0 | 0 | 0 | 0.18683 | 1,306 | 46 | 103 | 28.391304 | 0.822034 | 0.079632 | 0 | 0.371429 | 0 | 0 | 0.240476 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.142857 | 0 | 0.257143 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6acc8f967fad42f11b205b325094dc3299810f3 | 1,759 | py | Python | ocr-server/ocr_server/lines.py | twerkmeister/table-annotator | 11bce00f28411a1ad047ba673d3e713060076943 | [
"MIT"
] | null | null | null | ocr-server/ocr_server/lines.py | twerkmeister/table-annotator | 11bce00f28411a1ad047ba673d3e713060076943 | [
"MIT"
] | null | null | null | ocr-server/ocr_server/lines.py | twerkmeister/table-annotator | 11bce00f28411a1ad047ba673d3e713060076943 | [
"MIT"
] | null | null | null | from typing import List
import numpy as np
import cv2
import scipy.signal
def find_line(image: np.ndarray, window_size: int = 30) -> np.ndarray:
"""Extracts a single line from the image"""
image_inverted = cv2.bitwise_not(image)
image_as_column = np.sum(image_inverted, axis=1)
window_values = [np.sum(image_as_column[idx:idx + window_size])
for idx in range(image_as_column.shape[0])]
best_window_start = np.argmax(window_values)
start = max(best_window_start, 0)
end = min(best_window_start + window_size, image_as_column.shape[0])
return image[start:end]
def find_lines(image: np.ndarray) -> List[np.ndarray]:
"""Split a cell image into multiple text lines."""
window_size = 30
image_blurred = cv2.medianBlur(image, 5)
image_inverted = cv2.bitwise_not(image_blurred)
image_squeezed = np.sum(image_inverted, axis=1)
image_horizontal_squared_diffs = \
np.sum(np.square(np.diff(image_inverted, axis=1)), axis=1)
image_squeezed = image_squeezed + image_horizontal_squared_diffs
gaussian_window = scipy.signal.windows.gaussian(window_size, 5)
values = np.convolve(image_squeezed, gaussian_window, 'same')
value_diffs = np.diff(values)
diff_signs = np.sign(value_diffs)
sign_diffs = np.diff(diff_signs)
local_maxima = [i for i, sign_diff in enumerate(sign_diffs) if sign_diff == -2]
local_maxima = [local_maximum for local_maximum in local_maxima
if local_maximum > 8 and local_maximum + 8 < image.shape[0]]
lines = []
for local_maximum in local_maxima:
start = max(local_maximum - 15, 0)
end = min(local_maximum + 15, image.shape[0])
lines.append(image[start:end])
return lines
| 36.645833 | 83 | 0.699261 | 259 | 1,759 | 4.505792 | 0.297297 | 0.071979 | 0.044559 | 0.046272 | 0.173093 | 0.140531 | 0 | 0 | 0 | 0 | 0 | 0.019149 | 0.198408 | 1,759 | 47 | 84 | 37.425532 | 0.808511 | 0.046617 | 0 | 0 | 0 | 0 | 0.002401 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.114286 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6ae1b0f0d22088c98982c121eae8e4affeac754 | 4,406 | py | Python | model.py | shaldev/java-builder-generator | 22c47067a058d6708910c869e41e1cfd66da9435 | [
"MIT"
] | null | null | null | model.py | shaldev/java-builder-generator | 22c47067a058d6708910c869e41e1cfd66da9435 | [
"MIT"
] | null | null | null | model.py | shaldev/java-builder-generator | 22c47067a058d6708910c869e41e1cfd66da9435 | [
"MIT"
] | null | null | null | package = 'org.bonadza.openrtb'
model = {
'bidResponse' : {
'id' : 'str',
'seatbid': [{
'bid' : [{
'id' : 'str',
'impid' : 'str',
'price' : 'float',
'adid' : 'str',
'nurl' : 'str',
'adm' : 'str',
'adomain' : 'str[]',
'bundle' : 'str',
'iurl' : 'str',
'cid' : 'str',
'crid' : 'str',
'cat' : 'str[]',
'attr' : 'int[]',
'dealid' : 'str',
'w' : 'int',
'h' : 'int',
'ext' : 'ext'
}],
'seat' : 'str',
'group' : 'int',
'ext' : 'ext'
}],
'bidid' : 'str',
'cur' : 'str',
'customdata' : 'str',
'nbr' : 'int',
'ext' : 'ext'
},
'bidRequest' : {
'id' : 'str',
'imp' : [{
'id' : 'str',
'banner' : {
'w' : 'int',
'h' : 'int',
'wmax' : 'int',
'hmax' : 'int',
'wmin' : 'int',
'hmin' : 'int',
'id' : 'str',
'btype' : 'int[]',
'battr' : 'int[]',
'pos' : 'int',
'mimes' : 'str[]',
'topframe' : 'int',
'expdir' : 'int[]',
'api' : 'int[]',
'ext' : 'ext',
},
'video' : {
'mimes' : 'str[]',
'minduration' : 'int',
'maxduration' : 'int',
'protocol' : 'int',
'protocols' : 'int[]',
'w' : 'int',
'h' : 'int',
'startdelay' : 'int',
'linearity' : 'int',
'sequence' : 'int',
'battr' : 'int[]',
'maxextended' : 'int',
'minbitrate' : 'int',
'maxbitrate' : 'int',
'boxingallowed' : 'int',
'playbackmethod' : 'int[]',
'delivery' : 'int[]',
'pos' : 'int',
# 'companionad' : ''
'api' : 'int[]',
'ext' : 'ext',
'companiontype' : 'int[]'
},
'native' : {
'request' : 'str[]',
'ver' : 'str',
'api' : 'int[]',
'battr' : 'int[]',
'ext' : 'ext'
},
'displaymanager' : 'str',
'displaymanagerver' : 'str',
'instl' : 'int',
'tagid' : 'str',
'bidfloor' : 'float',
'bidfloorcur' : 'str',
'secure' : 'int',
'iframebuster' : 'str[]',
'pmp' : {
'private_auction' : 'int',
'deals' : [{
'id' : 'str',
'bidfloor' : 'float',
'bidfloorcur' : 'str',
'at' : 'int',
'wseat' : 'str[]',
'wadomain' : 'str[]',
'ext' : 'ext'
}],
'ext' : 'ext'
},
'ext' : 'ext'
}],
'site' : {
'id' : 'str',
'name' : 'str',
'domain' : 'str',
'cat' : 'str[]',
'sectioncat' : 'str[]',
'pagecat' : 'str[]',
'page' : 'str',
'ref' : 'str',
'search' : 'str',
'mobile' : 'int',
'privacypolicy' : 'int',
'publisher' : {
'id' : 'str',
'name' : 'str',
'cat' : 'str[]',
'domain' : 'str',
'ext' : 'ext'
},
'content' : {
},
'keywords' : 'str',
'ext' : 'ext'
},
'app': {
'id' : 'str',
'name' : 'str',
'bundle' : 'str',
'domain' : 'str',
'storeurl' : 'str',
'cat' : 'str[]',
'sectioncat' : 'str[]',
'pagecat' : 'str[]',
'ver' : 'str',
'privacypolicy' : 'int',
'paid' : 'int',
'publisher' : {},
'content' : {},
'keywords' : 'str',
'ext' : 'ext',
},
'device' : {
'ua' : 'str',
'geo' : {
'lat' : 'float',
'lon' : 'float',
'type' : 'int',
'country' : 'str',
'region' : 'str',
'regionfips104' : 'str',
'metro' : 'str',
'city' : 'str',
'zip' : 'str',
'utcoffset' : 'int',
'ext' : 'ext',
},
'dnt' : 'int',
'lmt' : 'int',
'ip' : 'str',
'ipv6' : 'str',
'devicetype' : 'int',
'make' : 'str',
'model' : 'str',
'os' : 'str',
'osv' : 'str',
'hwv' : 'str',
'osv' : 'str',
'w' : 'int',
'h' : 'int',
'js' : 'int',
'ppi' : 'int',
'pxratio' : 'float',
'flashver' : 'str',
'language' : 'str',
'carrier' : 'str',
'connectiontype' : 'int',
'ifa' : 'str',
'didsha1' : 'str',
'didmd5' : 'str',
'dpidsha1' : 'str',
'dpidmd5' : 'str',
'macsha1' : 'str',
'macmd5' : 'str',
'ext' : 'ext',
},
'user' : {
'id' : 'str',
'buyeruid' : 'str',
'yob' : 'int',
'gender' : 'str',
'keywords' : 'str',
'customdata' : 'str',
'geo' : {},
'data' : [{
'id' : 'str',
'name' : 'str',
'segment' : [{
'id' : 'str',
'name' : 'str',
'value' : 'str',
'ext' : 'ext'
}],
'ext' : 'ext'
}],
'ext' : 'ext',
},
'test' : 'int',
'at' : 'int',
'tmax' : 'int',
'wseat' : 'str[]',
'allimps' : 'int',
'cur' : 'str[]',
'bcat' : 'str[]',
'badv' : 'str[]',
'regs' : {
'coppa' : 'int',
'ext' : 'ext'
},
'ext' : 'ext'
}
} | 19.073593 | 31 | 0.385611 | 396 | 4,406 | 4.287879 | 0.373737 | 0.084806 | 0.053004 | 0.04947 | 0.138987 | 0.062426 | 0.062426 | 0 | 0 | 0 | 0 | 0.003322 | 0.316841 | 4,406 | 231 | 32 | 19.073593 | 0.560797 | 0.004085 | 0 | 0.401747 | 0 | 0 | 0.397082 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b0d58640e8ab5ad641a791c0e3af73049572b4 | 760 | py | Python | scripts/preprocess_glsl.py | Pandinosaurus/geojs | c38b3c91a597db84bbc74c2c915bb525a82aedc1 | [
"Apache-2.0"
] | 365 | 2015-01-28T12:07:22.000Z | 2022-03-27T14:17:10.000Z | scripts/preprocess_glsl.py | Pandinosaurus/geojs | c38b3c91a597db84bbc74c2c915bb525a82aedc1 | [
"Apache-2.0"
] | 699 | 2015-01-05T21:22:40.000Z | 2022-03-30T15:58:55.000Z | scripts/preprocess_glsl.py | manthey/geojs | 9f36165133f07c8fb08102e0b3459369a052f6a3 | [
"Apache-2.0"
] | 74 | 2015-02-23T14:08:13.000Z | 2022-03-17T23:37:05.000Z | #!/usr/bin/env python3
import argparse
import os
import re
import sys
def readSource(source):
data = open(source).read()
parts = re.split('(\\$[-.\\w]+)', data)
for idx, chunk in enumerate(parts):
if chunk.startswith('$') and len(chunk) > 1:
parts[idx] = readSource(os.path.join(os.path.dirname(source), chunk[1:] + '.glsl'))
return ''.join(parts)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Preprocess glsl files to handle includes in the same way '
'as shader-loader. The output of this can sent to glslangValidator.')
parser.add_argument('source', help='Source file')
args = parser.parse_args()
data = readSource(args.source)
sys.stdout.write(data)
| 29.230769 | 95 | 0.651316 | 100 | 760 | 4.85 | 0.63 | 0.028866 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004951 | 0.202632 | 760 | 25 | 96 | 30.4 | 0.79538 | 0.027632 | 0 | 0 | 0 | 0 | 0.227642 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.210526 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b0f5fa7387a72c8d23e89c64d1524c5f61c96b | 286 | py | Python | test/task_1/fixtures/__init__.py | Quinlys/-Yakymiv_Igor--tasks | 4992ca5fd050ed35f060b5b22ed05133be5c1d5a | [
"MIT"
] | 2 | 2018-06-15T08:06:09.000Z | 2018-06-24T12:28:07.000Z | test/task_1/fixtures/__init__.py | Quinlys/-Yakymiv_Igor--tasks | 4992ca5fd050ed35f060b5b22ed05133be5c1d5a | [
"MIT"
] | null | null | null | test/task_1/fixtures/__init__.py | Quinlys/-Yakymiv_Igor--tasks | 4992ca5fd050ed35f060b5b22ed05133be5c1d5a | [
"MIT"
] | 1 | 2018-06-15T14:41:23.000Z | 2018-06-15T14:41:23.000Z | import os
import json
_location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(_location, 'small.json')) as f:
fixtures = json.load(f)
with open(os.path.join(_location, '1000.json')) as f:
fixtures['1000'] = json.load(f)
| 23.833333 | 57 | 0.681818 | 46 | 286 | 4.086957 | 0.391304 | 0.159574 | 0.159574 | 0.148936 | 0.276596 | 0.276596 | 0 | 0 | 0 | 0 | 0 | 0.03252 | 0.13986 | 286 | 11 | 58 | 26 | 0.731707 | 0 | 0 | 0 | 0 | 0 | 0.08042 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b2392dc515104106ac7e7d4892cd8bdc2834dc | 4,102 | py | Python | views.py | rajeev/lifeflow | b1de2a7b5b8a89042c1440b3e38092ef1241b9ca | [
"MIT"
] | 2 | 2015-11-24T08:51:46.000Z | 2016-05-08T10:24:42.000Z | views.py | rajeev/lifeflow | b1de2a7b5b8a89042c1440b3e38092ef1241b9ca | [
"MIT"
] | null | null | null | views.py | rajeev/lifeflow | b1de2a7b5b8a89042c1440b3e38092ef1241b9ca | [
"MIT"
] | null | null | null | """
Views.py
Author: Will Larson
Contact: lethain@gmail.com
Contains one custom view for displaying articles.
Mostly necessary to presort the articles in order
of descending size.
"""
import datetime, time, random, cgi, md5
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.http import Http404, HttpResponseRedirect
from django.conf import settings
from django.core.paginator import QuerySetPaginator
from lifeflow.models import Series, Flow, Entry, Comment
from lifeflow.forms import CommentForm
def server_error(request):
return render_to_response('500.html',{},RequestContext(request,{}))
def articles(request):
object_list = Series.objects.all()
return render_to_response('lifeflow/articles.html', {'object_list' : object_list},RequestContext(request, {}))
def comments(request, entry_id=None, parent_id=None):
def make_identifier(id, time):
secret = getattr(settings, 'SECRET_KEY')
time = time[:-4]
data = "%s%s%s%s" % ("lifeflow", id, time, secret)
return md5.md5(data).hexdigest()
# if an entry ID has been posted, use that
if request.POST.has_key('entry_id'):
id = int(request.POST['entry_id'])
# otherwise use the parameter
else:
id = int(entry_id)
# TODO: validate ID, throw 500 otherwise
entry = Entry.objects.get(pk=id)
if request.POST.has_key('parent_id') and request.POST['parent_id'] != u"":
parent_id = int(request.POST['parent_id'])
parent = Comment.objects.get(pk=parent_id)
elif parent_id is None:
parent = None
else:
parent_id = int(parent_id)
parent = Comment.objects.get(pk=parent_id)
# add an identifier to the post, part of the
# anti-spam implementation
if request.POST.has_key('identifier') is False:
now = unicode(time.time()).split('.')[0]
identifier = make_identifier(id, now)
# or make a new identifier
else:
identifier = request.POST['identifier']
now = request.POST['time']
form = CommentForm(request.POST)
form.is_valid()
# Initial submission from entry_detail.html
if request.POST.has_key('submit'):
for i in xrange(5,8):
name = u"honey%s" % i
value = request.POST[name]
if value != u"":
raise Http404
if time.time() - int(now) > 3600:
raise Http404
if identifier != make_identifier(id, now):
raise Http404
name = form.cleaned_data['name']
email = form.cleaned_data['email']
webpage = form.cleaned_data['webpage']
rendered = form.cleaned_data['rendered']
body = form.cleaned_data['body']
c = Comment(entry=entry,parent=parent,name=name,email=email,
webpage=webpage,body=body,html=rendered)
c.save()
url = u"%s#comment_%s" % (entry.get_absolute_url(), c.pk)
return HttpResponseRedirect(url)
return render_to_response(
'lifeflow/comment.html',
{'object':entry,'parent':parent,'identifier':identifier,'time':now,'form':form},
RequestContext(request, {}))
def flow(request, slug):
try:
flow = Flow.objects.get(slug=slug)
except Flow.DoesNotExist:
raise Http404
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Flow.objects.get(slug=slug).entry_set.all(), 5).page(page)
return render_to_response('lifeflow/flow_detail.html',
{'object' : flow, 'page' : page,},
RequestContext(request, {}))
def front(request):
try:
page = int(request.GET["page"])
except:
page = 1
page = QuerySetPaginator(Entry.current.all(), 3).page(page)
return render_to_response('lifeflow/front.html', {'page':page}, RequestContext(request, {}))
def rss(request):
flows = Flow.objects.all()
return render_to_response('lifeflow/meta_rss.html', {'flows' : flows }, RequestContext(request, {}))
| 32.555556 | 114 | 0.638957 | 512 | 4,102 | 5.015625 | 0.292969 | 0.047118 | 0.043614 | 0.051402 | 0.242212 | 0.136293 | 0.136293 | 0.075545 | 0.075545 | 0.043614 | 0 | 0.011498 | 0.236714 | 4,102 | 125 | 115 | 32.816 | 0.808687 | 0.102877 | 0 | 0.229885 | 0 | 0 | 0.090859 | 0.024557 | 0 | 0 | 0 | 0.008 | 0 | 1 | 0.08046 | false | 0 | 0.091954 | 0.011494 | 0.264368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b24cefc57188b709e820b0bf006c36f34376a7 | 20,566 | py | Python | singlecelltool.py | MattiazziLab/singlecelltool | 0084709448ab851010ba488e3e4cc1d95422862e | [
"MIT"
] | null | null | null | singlecelltool.py | MattiazziLab/singlecelltool | 0084709448ab851010ba488e3e4cc1d95422862e | [
"MIT"
] | null | null | null | singlecelltool.py | MattiazziLab/singlecelltool | 0084709448ab851010ba488e3e4cc1d95422862e | [
"MIT"
] | 1 | 2021-06-09T23:37:03.000Z | 2021-06-09T23:37:03.000Z | from PIL import ImageTk, Image
from tkinter import filedialog, messagebox
import tkinter as tk
import pandas as pd
import numpy as np
import platform
import math
import os
import traceback
class Menu:
def __init__(self, main):
self.main = main
self.main.title("Single Cell Labelling Tool")
self.main.geometry("1050x600")
# Declare global variables
self.os = platform.system()
self.homepath = os.path.expanduser('~')
self.global_coordfilename = tk.StringVar()
self.global_ptypefilename = tk.StringVar()
self.global_stats = tk.StringVar()
self.global_labeledcellcnt = tk.IntVar()
self.global_currentpage = tk.IntVar()
self.global_displaycellcnt = tk.IntVar()
self.global_cropsize = tk.IntVar()
self.global_limitcell = tk.StringVar()
self.global_limitmax = tk.StringVar()
self.global_colcount = tk.IntVar()
self.global_cid_input = tk.IntVar()
self.global_coordext = ['csv', 'xls', 'xlsx']
self.global_ptypeext = ['txt']
# Initialization
self.initialize()
# Initial Frame - Widgets
self.frame_initial = tk.Frame(self.main)
self.label_coordfile = tk.Label(self.frame_initial, text="Cell data file", width=13, anchor="w")
self.label_ptypefile = tk.Label(self.frame_initial, text="Phenotype list", width=13, anchor="w")
self.label_uploadedcoord = tk.Label(self.frame_initial, textvariable=self.global_coordfilename,
anchor="w", wraplength=600)
self.label_uploadedptype = tk.Label(self.frame_initial, textvariable=self.global_ptypefilename,
anchor="w", wraplength=600)
self.label_limitcell = tk.Label(self.frame_initial, text="Index minimum", width=13, anchor="w")
self.entry_limitcell = tk.Entry(self.frame_initial, textvariable=self.global_limitcell, width=12)
self.label_defaultlimitcell = tk.Label(self.frame_initial, text="Index of the first cell to be processed."
"This is optional. "
"By default, minimum is set to 1.")
self.label_limitmax = tk.Label(self.frame_initial, text="Index maximum", width=13, anchor="w")
self.entry_limitmax = tk.Entry(self.frame_initial, textvariable=self.global_limitmax, width=12)
self.label_defaultlimitmax = tk.Label(self.frame_initial, text="Index of the last cell to be processed. This "
"is optional. By default, maximum is set to "
"total number of cells on the file.")
self.label_displaycell = tk.Label(self.frame_initial, text="Display limit", width=13, anchor="w")
self.entry_displaycell = tk.Entry(self.frame_initial, textvariable=self.global_displaycellcnt, width=12)
self.label_defaultdisplaycell = tk.Label(self.frame_initial, text="Number of cells to be displayed on a "
"single page. The default is 20.")
self.label_cropsize = tk.Label(self.frame_initial, text="Crop size", width=13, anchor="w")
self.entry_cropsize = tk.Entry(self.frame_initial, textvariable=self.global_cropsize, width=12)
self.label_defaultcropsize = tk.Label(self.frame_initial, text="Pixel size to be used in cropping cells "
"from the image. The default is 50.")
self.checkbox_cid_input =tk.Checkbutton(self.frame_initial, text="Cell ID", variable=self.global_cid_input,
onvalue=1, offvalue=0, width=13, anchor="w")
self.label_cid_input = tk.Label(self.frame_initial, text="Check this box if 'Cell ID' information is included "
"in the input file")
self.button_coordfile = tk.Button(self.frame_initial, text="Choose file", anchor="w", command=self.coordfile)
self.button_ptypefile = tk.Button(self.frame_initial, text="Choose file", anchor="w", command=self.ptypefile)
self.button_start = tk.Button(self.frame_initial, text="START", state="disabled", command=self.start)
# Initial Frame - Layout
self.frame_initial.pack(fill='both', expand=True)
self.label_coordfile.grid(row=0, column=0, padx=5, pady=5)
self.button_coordfile.grid(row=0, column=1, padx=5, pady=5, sticky="w")
self.label_uploadedcoord.grid(row=0, column=2, padx=5, pady=5, sticky="w")
self.label_ptypefile.grid(row=1, column=0, padx=5, pady=5)
self.button_ptypefile.grid(row=1, column=1, padx=5, pady=5, sticky="w")
self.label_uploadedptype.grid(row=1, column=2, padx=5, pady=5, sticky="w")
self.label_limitcell.grid(row=2, column=0, padx=5, pady=5)
self.entry_limitcell.grid(row=2, column=1, padx=5, pady=5)
self.label_defaultlimitcell.grid(row=2, column=2, padx=5, pady=5, sticky="w")
self.label_limitmax.grid(row=3, column=0, padx=5, pady=5)
self.entry_limitmax.grid(row=3, column=1, padx=5, pady=5)
self.label_defaultlimitmax.grid(row=3, column=2, padx=5, pady=5, sticky="w")
self.label_displaycell.grid(row=4, column=0, padx=5, pady=5)
self.entry_displaycell.grid(row=4, column=1, padx=5, pady=5)
self.label_defaultdisplaycell.grid(row=4, column=2, padx=5, pady=5, sticky="w")
self.label_cropsize.grid(row=5, column=0, padx=5, pady=5)
self.entry_cropsize.grid(row=5, column=1, padx=5, pady=5)
self.label_defaultcropsize.grid(row=5, column=2, padx=5, pady=5, sticky="w")
self.checkbox_cid_input.grid(row=6, column=0, padx=5, pady=5)
self.label_cid_input.grid(row=6, column=2, padx=5, pady=5, sticky="w")
self.button_start.grid(row=8, column=0, padx=5, pady=15, sticky="w")
def check_uploads(self):
if (self.global_coordfilename.get() != "No file chosen") \
and (self.global_ptypefilename.get() != "No file chosen"):
self.coord_ext = self.global_coordfilename.get().split('.')[1]
ptype_ext = self.global_ptypefilename.get().split('.')[1]
if (self.coord_ext in self.global_coordext) and (ptype_ext in self.global_ptypeext):
self.button_start.config(state="normal")
else:
self.button_start.config(state="disabled")
else:
self.button_start.config(state="disabled")
def coordfile(self):
coord_filename = filedialog.askopenfilename(initialdir="/home/myra/phenomics/apps/singlecell", # self.homepath
title="Select coordinates file",
filetypes=(("CSV files", "*.csv"),
("Excel files", "*.xls*"),
("All files", "*.*")))
self.global_coordfilename.set(coord_filename)
self.check_uploads()
def ptypefile(self):
ptype_filename = filedialog.askopenfilename(initialdir="/home/myra/phenomics/apps/singlecell",
title="Select phenotype list file",
filetypes=(("Text files", "*.txt"),
("All files", "*.*")))
self.global_ptypefilename.set(ptype_filename)
self.check_uploads()
def start(self):
self.frame_initial.pack_forget()
# Process phenotype list
ptypefile = open(self.global_ptypefilename.get(), 'r')
self.phenotypes = [p.strip() for p in ptypefile.readlines()]
# Main canvas display
self.canvas_display = tk.Canvas(self.main)
self.scroll_vertical = tk.Scrollbar(self.main, orient='vertical', command=self.canvas_display.yview)
self.canvas_display.pack(expand='yes', fill='both', side='left')
self.scroll_vertical.pack(fill='y', side='right')
self.canvas_display.configure(yscrollcommand=self.scroll_vertical.set)
if self.os == 'Linux':
self.canvas_display.bind_all("<4>", self.on_mousewheel)
self.canvas_display.bind_all("<5>", self.on_mousewheel)
else:
self.canvas_display.bind_all("<MouseWheel>", self.on_mousewheel)
# Initialize frame display map
self.frame_alldisplay = {}
self.canvas_allframes = {}
# Inside the canvas
self.button_restart = tk.Button(self.canvas_display, text="HOME", command=self.restart)
self.button_export = tk.Button(self.canvas_display, text="Export labeled data", command=self.exportdata)
self.label_stats = tk.Label(self.canvas_display, textvariable=self.global_stats)
self.canvas_display.create_window(10, 10, window=self.button_restart, anchor='nw')
self.canvas_display.create_window(80, 10, window=self.button_export, anchor='nw')
self.canvas_display.create_window(700, 10, window=self.label_stats, anchor='nw')
# Process coordinates file
if self.coord_ext == 'csv':
self.coord_df = pd.read_csv(self.global_coordfilename.get())
else:
self.coord_df = pd.read_excel(self.global_coordfilename.get())
self.is_cid = self.global_cid_input.get()
try:
self.cellcnt_min = int(self.global_limitcell.get()) - 1
except ValueError:
self.cellcnt_min = 0
try:
self.cellcnt_max = int(self.global_limitmax.get())
except ValueError:
self.cellcnt_max = self.coord_df.shape[0]
self.total_cellcnt = self.cellcnt_max - self.cellcnt_min
self.coord_df = self.coord_df[self.cellcnt_min:self.cellcnt_max]
self.global_colcount.set(self.coord_df.shape[1])
self.total_batchpage = int(math.ceil(self.total_cellcnt / self.global_displaycellcnt.get()))
self.global_stats.set("Label count: %d out of %d" %(self.global_labeledcellcnt.get(), self.total_cellcnt))
# self.testdf = self.coord_df[:self.global_displaycellcnt.get()]
self.coord_df['Saved Label'] = [None for _i in range(self.total_cellcnt)]
self.selected_options = [tk.StringVar(value=self.phenotypes[0]) for _i in range(self.total_cellcnt)]
self.create_cellframes(self.coord_df, self.global_currentpage.get()) # create frame for each cell
def create_cellframes(self, dataframe, currentpage):
# Create new frame display
self.frame_display = tk.Frame(self.canvas_display)
self.frame_alldisplay[currentpage] = self.frame_display
self.canvas_allframes[currentpage] = self.canvas_display.create_window(0, 50, window=self.frame_display,
anchor='nw')
start = (currentpage-1)*self.global_displaycellcnt.get()
end = currentpage*self.global_displaycellcnt.get()
currentbatch_df = dataframe[start:end]
pos = 1
# for idx, path, center_x, center_y in currentbatch_df.iloc[:,:3].itertuples():
for data in currentbatch_df.iterrows():
idx = data[0]
alldata = data[1]
if self.global_cid_input.get() == 0:
info_startid = 0
else:
info_startid = 1
path = alldata[info_startid]
center_x = alldata[info_startid+1]
center_y = alldata[info_startid+2]
modpos = pos % 2
if modpos == 0:
row = int(pos/2) - 1
col = 1
else:
row = int(pos/2)
col = 0
# print('\tINDEX: %d - POSITION: %s - COORDINATE: %d,%d' %(idx, pos, row, col))
pos += 1
cell = self.imagecrop(path, int(center_x), int(center_y))
cellimage = ImageTk.PhotoImage(cell)
self.labelframe_cell = tk.LabelFrame(self.frame_display, text="%d" %(idx+1), bd=3)
self.labelframe_cell.grid(row=row, column=col, padx=10, pady=20)
self.label_cellimage = tk.Label(self.labelframe_cell, image=cellimage)
self.label_cellimage.image = cellimage
self.label_cellimage.grid(row=0, column=0, sticky="nw", rowspan=5)
self.label_cellpath = tk.Label(self.labelframe_cell, text="%s" % os.path.basename(path).split('.')[0])
self.label_cellcoord = tk.Label(self.labelframe_cell, text="x=%s, y=%s" % (center_x, center_y))
# self.optionmenu = tk.OptionMenu(self.labelframe_cell, self.selected_options[idx%self.total_cellcnt], *self.phenotypes)
try:
self.curidx = idx + (self.total_cellcnt - int(self.global_limitmax.get()))
except ValueError:
self.curidx = idx
initlabel = None
if info_startid == 0:
if (self.global_colcount.get() == 4):
initlabel = self.coord_df.ix[:,3].values[self.curidx]
if isinstance(initlabel, float):
initlabel = None
else:
if (self.global_colcount.get() == 5):
initlabel = self.coord_df.ix[:, 4].values[self.curidx]
if isinstance(initlabel, float):
initlabel = None
self.optionmenu = tk.OptionMenu(self.labelframe_cell, self.selected_options[self.curidx], *self.phenotypes)
self.optionmenu.config(width=20)
self.button_saveptype = tk.Button(self.labelframe_cell, text="Save", name="%s" % str(idx+1))
self.button_saveptype.configure(command=lambda bid=self.curidx, bsave=self.button_saveptype,
opts=self.optionmenu: self.save_phenotype(bid, bsave, opts))
self.label_cellpath.grid(row=0, column=1, sticky="w", padx=5, pady=(20,0))
self.label_cellcoord.grid(row=1, column=1, sticky="w", padx=5, pady=0)
if initlabel:
self.label_initiallabel = tk.Label(self.labelframe_cell, wraplength=200,
text="Initial label: %s" % initlabel)
self.label_initiallabel.grid(row=2, column=1, sticky="w", padx=5, pady=0)
self.optionmenu.grid(row=3, column=1, padx=5, pady=(20, 0))
self.button_saveptype.grid(row=4, column=1, padx=5, pady=0)
# LabelFrame for next button/batch
self.labelframe_cell = tk.LabelFrame(self.frame_display, text="", bd=0)
self.labelframe_cell.grid(row=row+1, column=0, columnspan=2, pady=15)
if self.total_batchpage > 1:
self.button_prevbatch = tk.Button(self.labelframe_cell, text="Prev",
command=lambda type='prev': self.prevnextbatch(type))
self.button_nextbatch = tk.Button(self.labelframe_cell, text="Next",
command=lambda type='next': self.prevnextbatch(type))
self.label_batchpage = tk.Label(self.labelframe_cell, text="Batch %d of %d" % (currentpage,
self.total_batchpage))
self.button_nextbatch.pack(side='right')
self.button_prevbatch.pack(side='right')
self.label_batchpage.pack(side='left')
# Setup canvas scroll region
self.frame_display.update_idletasks()
self.canvas_display.yview_moveto(0)
self.canvas_display.configure(scrollregion=(0, 0, self.frame_display.winfo_width(),
self.labelframe_cell.winfo_y() + 90))
def prevnextbatch(self, type):
if type == 'next':
if self.global_currentpage.get() != self.total_batchpage:
page = self.global_currentpage.get() + 1
else:
page = 1
else:
if self.global_currentpage.get() != 1:
page = self.global_currentpage.get() - 1
else:
page = self.total_batchpage
self.global_currentpage.set(page)
if page in self.frame_alldisplay.keys():
self.canvas_display.yview_moveto(0)
self.canvas_display.configure(scrollregion=(0, 0, self.frame_alldisplay[page].winfo_width(),
self.frame_alldisplay[page].winfo_height() + 45))
self.frame_alldisplay[page].tkraise()
self.canvas_display.itemconfigure(self.canvas_allframes[page], state='normal')
for p in self.frame_alldisplay.keys():
if p != page:
self.canvas_display.itemconfigure(self.canvas_allframes[p], state='hidden')
else:
for p in self.frame_alldisplay.keys():
if p != page:
self.canvas_display.itemconfigure(self.canvas_allframes[p], state='hidden')
self.create_cellframes(self.coord_df, page)
def initialize(self):
self.global_coordfilename.set("No file chosen")
self.global_ptypefilename.set("No file chosen")
self.global_labeledcellcnt.set(0)
self.global_currentpage.set(1)
self.global_displaycellcnt.set(20)
self.global_cropsize.set(64)
self.global_limitcell.set("")
self.global_limitmax.set("")
self.global_colcount.set(0)
self.global_cid_input.set(0)
def restart(self):
self.canvas_display.delete('all')
self.canvas_display.pack_forget()
self.scroll_vertical.pack_forget()
self.frame_initial.pack(fill=tk.BOTH, expand=True)
self.initialize()
# self.frame_alldisplay = {}
self.check_uploads()
def exportdata(self):
outpath = filedialog.asksaveasfilename(initialdir="/home/myra/phenomics/apps/singlecell",
title="Select output folder and filename")
if outpath.endswith('.csv'):
outpath = outpath.split('.')[0] + '.csv'
else:
if '.' in outpath:
outpath = outpath.split('.')[0] + '.csv'
outpath = outpath + '.csv'
save_df = self.coord_df.dropna(subset=['Saved Label'])
save_df.to_csv(outpath, index=False)
def save_phenotype(self, bid, bsave, opts):
self.coord_df.iloc[bid, self.global_colcount.get()] = self.selected_options[bid].get()
self.global_labeledcellcnt.set(self.global_labeledcellcnt.get() + 1)
self.global_stats.set("Label count: %d out of %d" % (self.global_labeledcellcnt.get(), self.total_cellcnt))
bsave.config(state="disabled", text="Saved")
opts.config(state="disabled")
def imagecrop(self, imagepath, center_x, center_y):
loc_left = center_x - self.global_cropsize.get()/2
loc_upper = center_y - self.global_cropsize.get()/2
loc_right = center_x + self.global_cropsize.get()/2
loc_lower = center_y + self.global_cropsize.get()/2
image = Image.open(imagepath)
im_arr = np.array(image).astype(float)
im_scale = 1 / im_arr.max()
im_new = ((im_arr * im_scale) * 255).round().astype(np.uint8)
image = Image.fromarray(im_new)
return image.crop((loc_left, loc_upper, loc_right, loc_lower)).resize((200, 200), Image.LANCZOS)
def on_mousewheel(self, event):
if self.os == 'Linux':
scroll = -1 if event.delta > 0 else 1
if event.num == 4:
scroll = scroll * -1
elif self.os == 'Windows':
scroll = (-1) * int((event.delta / 120) * 1)
elif self.os == 'Darwin':
scroll = event.delta
else:
scroll = 1
self.canvas_display.yview_scroll(scroll, "units")
def show_error(self, *args):
err = traceback.format_exception(*args)
messagebox.showerror('Exception', err)
# catch errors and show message to user
tk.Tk.report_callback_exception = show_error
if __name__ == "__main__":
root = tk.Tk()
menu = Menu(root)
root.mainloop()
| 51.159204 | 132 | 0.596616 | 2,475 | 20,566 | 4.803232 | 0.147879 | 0.059724 | 0.019684 | 0.016824 | 0.408059 | 0.318977 | 0.244196 | 0.213493 | 0.129795 | 0.082015 | 0 | 0.018561 | 0.284839 | 20,566 | 401 | 133 | 51.286783 | 0.789706 | 0.035495 | 0 | 0.150769 | 0 | 0 | 0.065657 | 0.00545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043077 | false | 0 | 0.027692 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b326441a9a486ddfcf807ab05bc6fc1032ac9c | 761 | py | Python | rss_temple/api/migrations/0006_dedup_feedentries_20210401.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | rss_temple/api/migrations/0006_dedup_feedentries_20210401.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | 8 | 2019-12-04T21:58:35.000Z | 2021-12-15T02:29:49.000Z | rss_temple/api/migrations/0006_dedup_feedentries_20210401.py | murrple-1/rss_temple | 289197923b1e7d1213f1673d164337df17d7269b | [
"MIT"
] | null | null | null | from django.db import migrations
def _forward_func_deduplication_feed_entries(apps, schema_editor):
FeedEntry = apps.get_model('api', 'FeedEntry')
unique_set = set()
delete_list = []
for feed_entry in FeedEntry.objects.all():
unique_desc = (feed_entry.feed_id, feed_entry.url,
feed_entry.updated_at)
if unique_desc in unique_set:
delete_list.append(feed_entry)
else:
unique_set.add(unique_desc)
for feed_entry in delete_list:
feed_entry.delete()
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20210331_1716'),
]
operations = [
migrations.RunPython(_forward_func_deduplication_feed_entries),
]
| 24.548387 | 71 | 0.660972 | 89 | 761 | 5.280899 | 0.505618 | 0.134043 | 0.102128 | 0.119149 | 0.148936 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02807 | 0.250986 | 761 | 30 | 72 | 25.366667 | 0.796491 | 0 | 0 | 0 | 0 | 0 | 0.049934 | 0.030223 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
b6b792498459995b89df7fac86e7a4c838ae82e7 | 6,200 | py | Python | setup.py | bark-simulator/bark-ml | 68a4244e91779667c98396c51ee713676bf1dfea | [
"MIT"
] | 58 | 2019-10-07T12:10:27.000Z | 2022-03-01T08:08:47.000Z | setup.py | bark-simulator/bark-ml | 68a4244e91779667c98396c51ee713676bf1dfea | [
"MIT"
] | 31 | 2019-09-10T15:33:20.000Z | 2022-03-30T08:52:08.000Z | setup.py | bark-simulator/bark-ml | 68a4244e91779667c98396c51ee713676bf1dfea | [
"MIT"
] | 14 | 2019-10-01T08:23:37.000Z | 2021-12-16T15:55:38.000Z | from setuptools import setup, find_packages, Extension
import os
from setuptools import setup, find_packages, Extension
import os,sys
import os
import shlex
import shutil
import setuptools.command.build_ext
import setuptools.command.build_py
import setuptools.command.install
import setuptools.command.sdist
import setuptools.dist
from setuptools import dist
from setuptools.command.install import install
import sysconfig
import tempfile
import pkg_resources
from distutils.command.build import build
with open("Readme.md", "r") as fh:
long_description = fh.read()
def _configure_macos_deployment_target():
# TensorStore requires MACOSX_DEPLOYMENT_TARGET >= 10.14 in
# order to support sized/aligned operator new/delete.
min_macos_target = '10.14'
key = 'MACOSX_DEPLOYMENT_TARGET'
python_macos_target = str(sysconfig.get_config_var(key))
macos_target = python_macos_target
if (macos_target and (pkg_resources.parse_version(macos_target) <
pkg_resources.parse_version(min_macos_target))):
macos_target = min_macos_target
# macos_target_override = os.getenv(key)
# if macos_target_override:
# if (pkg_resources.parse_version(macos_target_override) <
# pkg_resources.parse_version(macos_target)):
# print('%s=%s is set in environment but >= %s is required by this package '
# 'and >= %s is required by the current Python build' %
# (key, macos_target_override, min_macos_target, python_macos_target))
# sys.exit(1)
# else:
# macos_target = macos_target_override
# Set MACOSX_DEPLOYMENT_TARGET in the environment, because the `wheel` package
# checks there. Note that Bazel receives the version via a command-line
# option instead.
os.environ[key] = macos_target
return macos_target
if 'darwin' in sys.platform:
_macos_deployment_target = _configure_macos_deployment_target()
class CustomBuild(build):
def run(self):
self.build_lib = '_build'
try:
from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
class bdist_wheel(_bdist_wheel):
def finalize_options(self):
_bdist_wheel.finalize_options(self)
self.root_is_pure = False
except ImportError:
bdist_wheel = None
class BinaryDistribution(dist.Distribution):
def is_pure(self):
return False
def has_ext_modules(self):
return True
class InstallPlatlib(install):
def finalize_options(self):
install.finalize_options(self)
if self.distribution.has_ext_modules():
self.install_lib = self.install_platlib
class BuildExtCommand(setuptools.command.build_ext.build_ext):
"""Overrides default build_ext command to invoke bazel."""
def run(self):
if not self.dry_run:
prebuilt_path = os.getenv('BARK_ML_PREBUILT_DIR')
if not prebuilt_path:
# Ensure python_configure.bzl finds the correct Python verison.
os.environ['PYTHON_BIN_PATH'] = sys.executable
bazelisk = os.getenv('BARK_ML_BAZELISK', 'bazelisk.py')
# Controlled via `setup.py build_ext --debug` flag.
default_compilation_mode = 'dbg' if self.debug else 'opt'
compilation_mode = os.getenv('BARK_ML_BAZEL_COMPILATION_MODE',
default_compilation_mode)
build_command = [sys.executable, '-u', bazelisk] + [
'build',
'-c',
compilation_mode,
'//bark_ml:pip_package',
'--verbose_failures'
]
if 'darwin' in sys.platform:
# Note: Bazel does not use the MACOSX_DEPLOYMENT_TARGET environment
# variable.
build_command += ['--macos_minimum_os=%s' % _macos_deployment_target]
build_command += ['--define=build_platform=macos']
if sys.platform == 'win32':
# Disable newer exception handling from Visual Studio 2019, since it
# requires a newer C++ runtime than shipped with Python.
#
# https://cibuildwheel.readthedocs.io/en/stable/faq/#importerror-dll-load-failed-the-specific-module-could-not-be-found-error-on-windows
build_command += ['--copt=/d2FH4-']
self.spawn(build_command)
suffix = '.pyd' if os.name == 'nt' else '.so'
built_ext_path = os.path.join(
'bazel-bin/bark_ml/pip_package.runfiles/bark_ml/bark_ml/core' + suffix)
# os.makedirs(os.path.dirname(ext_full_path), exist_ok=True)
copy_to = os.path.dirname(os.path.abspath(__file__)) + "/bark_ml/core.so"
copy_to_manifest = os.path.dirname(os.path.abspath(__file__)) + "/MANIFEST.in"
print('Copying extension %s -> %s' % (
built_ext_path,
copy_to
))
shutil.copyfile(built_ext_path, copy_to)
setup(
name = "bark-ml",
version = "0.4.29",
description = "Gym Environments and Agents for Autonomous Driving",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8"],
keywords = "gym environments, reinforcement learning, autonomous driving, machine learning",
url = "https://github.com/bark-simulator/bark-ml",
author = "Patrick Hart, Julian Bernhard, Klemens Esterle, Tobias Kessler",
author_email = "patrickhart.1990@gmail.com",
license = "MIT",
packages=find_packages(),
install_requires=[
'pygame>=1.9.6',
'gym>=0.17.2',
'tensorflow>=2.2.0',
'tensorboard>=2.2.2',
'tf-agents>=0.5.0',
'tensorflow-probability>=0.10.0',
'bark-simulator>=1.4.8',
'graph-nets>=1.1.0'
],
cmdclass={
'bdist_wheel': bdist_wheel,
'build_ext': BuildExtCommand,
'install': InstallPlatlib,
'build': CustomBuild
},
test_suite='nose.collector',
tests_require=['nose'],
include_package_data=True,
zip_safe=False,
distclass=BinaryDistribution,
python_requires='>=3.7',
)
| 36.904762 | 146 | 0.67629 | 771 | 6,200 | 5.211414 | 0.372244 | 0.054754 | 0.023644 | 0.023892 | 0.123196 | 0.067944 | 0.041812 | 0.026879 | 0.026879 | 0 | 0 | 0.011944 | 0.216774 | 6,200 | 167 | 147 | 37.125749 | 0.815486 | 0.208226 | 0 | 0.07874 | 0 | 0 | 0.223611 | 0.053495 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055118 | false | 0 | 0.15748 | 0.015748 | 0.275591 | 0.007874 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcab1ef55255f164328a1ed4e5f9679cb1a3f825 | 8,198 | py | Python | ml_aos/david_net.py | jfcrenshaw/ml-aos | 762509a77d809954749aea9b2b4e594951255c47 | [
"MIT"
] | null | null | null | ml_aos/david_net.py | jfcrenshaw/ml-aos | 762509a77d809954749aea9b2b4e594951255c47 | [
"MIT"
] | null | null | null | ml_aos/david_net.py | jfcrenshaw/ml-aos | 762509a77d809954749aea9b2b4e594951255c47 | [
"MIT"
] | null | null | null | """Pytorch neural network to predict zernike coefficients from donut images.
My implementation of the network presented in David Thomas's PhD Thesis
at Stanford.
"""
import numpy as np
import torch
from torch import nn
class DavidNet(nn.Module):
"""Network to predict wavefront Zernike coefficients from donut images.
Consists of a DonutNet that creates image features from the donut image.
These are concatenated with a set of meta parameters (usually the donut's
location on the focal plane), which is then passed to the MetaNet, which
predicts a set of Zernike coefficients.
"""
def __init__(self, n_meta_layers: int) -> None:
"""Create a WaveNet to predict Zernike coefficients for donut images.
Parameters
----------
n_meta_layers: int
Number of fully connected layers in the MetaNet.
"""
super().__init__()
self.donut_net = DonutNet()
self.meta_net = MetaNet(n_meta_layers)
def forward(
self,
image: torch.Tensor,
fx: torch.Tensor,
fy: torch.Tensor,
intra: torch.Tensor,
) -> torch.Tensor:
"""Predict Zernike coefficients for the donut image.
Parameters
----------
image: torch.Tensor
The donut image
fx: torch.Tensor
The x angle of the source with respect to the optic axis
fy: torch.Tensor
The y angle of the source with respect to the optic axis
intra: torch.Tensor
Boolean indicating whether the donut is intra or extra focal
Returns
-------
torch.Tensor
Array of Zernike coefficients
"""
image_features = self.donut_net(image)
features = torch.cat([image_features, fx, fy, intra], axis=1)
return self.meta_net(features)
class DonutNet(nn.Module):
"""Network encodes donut image as latent_dim dimensional latent vector.
Takes batches of 1x256x256 donut images as input and produces a
(1 x 1024) dimensional representation.
"""
def __init__(self) -> None:
"""Create the donut encoder network."""
super().__init__()
# first apply a convolution that maintains the image dimensions
# but increases the channels from 1 to 8
self.layers = nn.ModuleList(
[
nn.Conv2d(1, 8, 3, stride=1, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(inplace=True),
]
)
# now apply a series of DownBlocks that increases the number of
# channels by a factor of 2, while decreasing height and width
# by a factor of 2.
for i in range(7):
in_channels = 2 ** (i + 3)
out_channels = 2 ** (i + 3 + 1)
self.layers.append(DownBlock(in_channels, out_channels))
# a final down block that doesn't increase the number of channels
self.layers.append(DownBlock(2 ** 10, 2 ** 10))
# Finally, flatten the output
self.layers.append(nn.Flatten())
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return latent space encoding of the donut image.
Parameters
----------
x: torch.Tensor
Input images of shape (batch x 256 x 256)
Returns
-------
torch.Tensor
Latent space encoding of shape (batch x 1024)
"""
for layer in self.layers:
x = layer(x)
return x
class DownBlock(nn.Module):
"""Convolutional block that decreases height and width by factor of 2.
Consists of a convolutional residual/skip layer, followed by a regular
convolutional layer that decreases the dimensions by a factor of 2.
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
"""Create a downblock that reduces image dimensions.
Parameters
----------
in_channels: int
The number of input channels
out_channels: int
The number of output channels
"""
super().__init__()
# create the list of layers
self.layers = nn.ModuleList(
[
# residual layer with convolution that preserves dimensions
SkipBlock(in_channels),
# this convolution decreases height and width by factor of 2
nn.Conv2d(in_channels, out_channels, 3, stride=2, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return a convolved image with half the height and weight.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x in_channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x out_channels x height/2 x width/2)
"""
for layer in self.layers:
x = layer(x)
return x
class SkipBlock(nn.Module):
"""Convolutional layer with a residual/skip connection."""
def __init__(self, channels: int) -> None:
"""Create a convolution layer with a skip connection.
Parameters
----------
channels: int
The number of input and output channels for the convolution.
"""
super().__init__()
# layers to compute dx
self.layers = nn.Sequential(
nn.Conv2d(channels, channels, 3, stride=1, padding="same"),
nn.BatchNorm2d(channels),
nn.ReLU(inplace=True),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Convolve image and add to original via the skip connection.
Parameters
----------
x: torch.Tensor
Input image of shape (batch x channels x height x width)
Returns
-------
torch.Tensor
Output image of shape (batch x channels x height x width)
"""
dx = self.layers(x)
return x + dx
class MetaNet(nn.Module):
"""Network that maps image features and meta parameters onto Zernikes.
Consists of several fully connected layers.
"""
# number of Zernike coefficients to predict
N_ZERNIKES = 18
# number of meta parameters to use in prediction
N_METAS = 3
# the dimenson of the image features. This is determined by looking
# at the dimension of outputs from DonutNet
IMAGE_DIM = 1024
def __init__(self, n_layers: int) -> None:
"""Create a MetaNet to map image features and meta params to Zernikes.
Parameters
----------
n_layers: int
The number of layers in the MetaNet.
"""
super().__init__()
# set number of nodes in network layers using a geometric series
n_nodes = np.geomspace(
self.IMAGE_DIM + self.N_METAS,
self.N_ZERNIKES,
n_layers + 1,
dtype=int,
)
# create the hidden layers, which all have BatchNorm and ReLU
self.layers = nn.ModuleList()
for i in range(n_layers - 1):
self.layers.extend(
[
nn.Linear(n_nodes[i], n_nodes[i + 1]),
nn.BatchNorm1d(n_nodes[i + 1]),
nn.ReLU(inplace=True),
]
)
# we will add dropout to the first layer for regularization
if i == 0:
self.layers.append(nn.Dropout(0.1))
# create the output layer, which doesn't have BatchNorm or ReLU
self.layers.append(nn.Linear(n_nodes[-2], n_nodes[-1]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Map image features and meta parameters onto Zernikes.
Parameters
----------
x: torch.Tensor
Input vector of image features and meta parameters.
Returns
-------
torch.Tensor
Array of Zernike coefficients. Size = cls.N_ZERNIKES
"""
for layer in self.layers:
x = layer(x)
return x
| 30.475836 | 78 | 0.580385 | 1,001 | 8,198 | 4.669331 | 0.216783 | 0.06119 | 0.020539 | 0.016688 | 0.284339 | 0.211596 | 0.18849 | 0.15169 | 0.129226 | 0.118742 | 0 | 0.014265 | 0.333008 | 8,198 | 268 | 79 | 30.589552 | 0.840527 | 0.491095 | 0 | 0.26087 | 0 | 0 | 0.001169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.032609 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcacbde3bba335a6b61bda2101f46d6c3839fe23 | 26,845 | py | Python | FastAutoAugment/utils/utils.py | ironluffy/fast-autoaugment | eaae5a6172afe28ba3053021c97e2cb09d170969 | [
"MIT"
] | null | null | null | FastAutoAugment/utils/utils.py | ironluffy/fast-autoaugment | eaae5a6172afe28ba3053021c97e2cb09d170969 | [
"MIT"
] | 12 | 2020-11-08T16:51:28.000Z | 2020-11-15T16:31:57.000Z | FastAutoAugment/utils/utils.py | ironluffy/fast-autoaugment | eaae5a6172afe28ba3053021c97e2cb09d170969 | [
"MIT"
] | null | null | null | import torch
import open3d
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import itertools
import imageio
from torch.autograd import Variable
from src.utils import point_cloud_utils as pcu
from sklearn.metrics import confusion_matrix
def weights_init(m):
classname = m.__class__.__name__
if classname in ('Conv1d', 'Linear'):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if m.bias is not None:
torch.nn.init.constant_(m.bias, 0)
def pc_to_grid(point_cloud, grid_rate):
B, C, N = point_cloud.shape
device = point_cloud.device
grid_pc = point_cloud.to(device)
for c in range(C):
point_matrix = grid_pc[:, c, :] # (B, N)
sorted_matrix = torch.sort(point_matrix, dim=-1) # (B, N)
indices_matrix = torch.stack([sorted_matrix[1]] * 3, dim=1)
grid_pc = torch.gather(grid_pc, -1, indices_matrix).view(B, C, pow(grid_rate, c + 1), -1)
return grid_pc
def pc_to_regular_grid(point_cloud, grid_rate):
B, C, N = point_cloud.shape
device = point_cloud.device
grid_scale = torch.stack([torch.linspace(-1, 1, grid_rate + 1)] * 3, dim=0).to(device)
seg_masks = torch.zeros([0], dtype=torch.bool).to(device)
seg_mean = torch.zeros([0], dtype=torch.float).to(device)
seg_std = torch.zeros([0], dtype=torch.float).to(device)
for idx in list(itertools.product(list(range(grid_rate)), repeat=3)):
idx = torch.tensor(idx).unsqueeze(1).to(device)
seg_mask = (torch.gather(grid_scale, 1, idx) < point_cloud) * \
(torch.gather(grid_scale, 1, idx + 1) >= point_cloud)
# seg_masks = torch.cat([seg_masks, seg_mask], dim=1)
seg_mean = torch.cat([seg_mean, (point_cloud * seg_mask).mean(-1).unsqueeze(-1)], dim=2)
seg_std = torch.cat([seg_std, (point_cloud * seg_mask).std(-1).unsqueeze(-1)], dim=2)
seg_mean = torch.stack([seg_mean.mean(dim=1)] * seg_mean.size(2), dim=1) \
- torch.stack([seg_mean.mean(dim=1)] * seg_mean.size(2), dim=2)
seg_std = torch.stack([seg_std.mean(dim=1)] * seg_std.size(2), dim=1) \
- torch.stack([seg_std.mean(dim=1)] * seg_std.size(2), dim=2)
return torch.stack([seg_mean, seg_std], dim=1)
def plot_3d_point_cloud(x, y, z, show=True, show_axis=True, in_u_sphere=False,
marker='.', s=8, alpha=.8, figsize=(5, 5), elev=10,
azim=240, axis=None, title=None, *args, **kwargs):
plt.switch_backend('agg')
if axis is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
else:
ax = axis
fig = axis
if title is not None:
plt.title(title)
sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs)
ax.view_init(elev=elev, azim=azim)
if in_u_sphere:
ax.set_xlim3d(-0.5, 0.5)
ax.set_ylim3d(-0.5, 0.5)
ax.set_zlim3d(-0.5, 0.5)
else:
# Multiply with 0.7 to squeeze free-space.
miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)])
mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)])
ax.set_xlim(miv, mav)
ax.set_ylim(miv, mav)
ax.set_zlim(miv, mav)
plt.tight_layout()
if not show_axis:
plt.axis('off')
if 'c' in kwargs:
plt.colorbar(sc)
if show:
plt.show()
return fig
def plot_3d_colormap(point_clouds, max_points, max_count, show_axis=True, in_u_sphere=False,
marker='.', s=10, alpha=.8, figsize=(10, 10), elev=10,
azim=240, axis=None, title=None, *args, **kwargs):
plt.switch_backend('agg')
x, y, z = point_clouds
m_x, m_y, m_z = max_points
if axis is None:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
# ax2 = fig.add_subplot(122, projection='3d')
else:
ax = axis
fig = axis
if title is not None:
plt.title(title)
sc_pc = ax.scatter(x, y, z, marker=marker, c='lightgray', s=s, alpha=alpha)
sc_max_pc = ax.scatter(m_x, m_y, m_z, marker=marker, c=max_count, cmap='rainbow', s=s, alpha=alpha)
plt.colorbar(sc_max_pc, label='max_count')
ax.view_init(elev=elev, azim=azim)
if in_u_sphere:
ax.set_xlim3d(-0.5, 0.5)
ax.set_ylim3d(-0.5, 0.5)
ax.set_zlim3d(-0.5, 0.5)
else:
# Multiply with 0.7 to squeeze free-space.
miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)])
mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)])
ax.set_xlim(miv, mav)
ax.set_ylim(miv, mav)
ax.set_zlim(miv, mav)
plt.tight_layout()
if not show_axis:
plt.axis('off')
return fig
def colormap_save(dataloader, model, device, domain, save_dir, num_class, max_num_sample, target_domain=None):
idx_to_label = {0: "bathtub", 1: "bed", 2: "bookshelf", 3: "cabinet",
4: "chair", 5: "lamp", 6: "monitor",
7: "plant", 8: "sofa", 9: "table"}
sample_num = torch.zeros([num_class], dtype=torch.int).to(device)
with torch.no_grad():
model.eval()
for data in dataloader:
point_clouds = data['point_cloud'].to(device)
labels = data['label'].to(device)
pred, max_idx = model(point_clouds)
if domain == 'source':
save_path = os.path.join(save_dir, 'src')
mask = (labels == pred.max(dim=1)[1])
point_clouds = point_clouds[mask, :]
labels = labels[mask]
max_idx = max_idx[mask, :]
elif domain == 'target':
save_path = os.path.join(save_dir, 'trg_{}'.format(target_domain))
pred_labels = pred.max(dim=1)[1]
else:
raise NotImplementedError
point_clouds = point_clouds.cpu()
for k in range(point_clouds.size(0)):
class_idx = int(labels[k])
if domain == 'target': class_idx = int(pred_labels[k])
if sample_num[class_idx] == max_num_sample: continue
sample_num[class_idx] += 1
class_label = idx_to_label[class_idx]
image_path = os.path.join(save_path, '{}'.format(class_label))
os.makedirs(image_path, exist_ok=True)
max_list, max_count = np.unique(max_idx[k].cpu(), return_counts=True)
max_list = torch.tensor(max_list)
max_count = (max_count - max_count.min()) / (max_count.max() - max_count.min())
max_pc = torch.gather(point_clouds[k, :, :], 1,
torch.stack([max_list] * 3, dim=0))
# Colormap
if domain == 'source':
img_title = '{}'.format(class_label)
else:
true_label = idx_to_label[int(labels[k])]
img_title = 'true label : {}\npred label : {}'.format(true_label, class_label)
fig = plot_3d_colormap(point_clouds[k, :, :], max_pc, max_count,
in_u_sphere=True, show=False, title=img_title)
fig.savefig(os.path.join(image_path, '{}.png'.format(sample_num[class_idx])))
plt.close(fig)
if sample_num.sum() == max_num_sample * num_class: break
def image_save(point_cloud, save_dir, save_folder, save_name, img_title, batch_idx=0, folder_numbering=True):
for k in range(point_cloud.size(0)):
fig = plot_3d_point_cloud(point_cloud[k][0], point_cloud[k][1], point_cloud[k][2],
in_u_sphere=True, show=False,
title='{}'.format(img_title))
if folder_numbering:
save_path = os.path.join(save_dir, '{}_{}'.format(save_folder, batch_idx * point_cloud.size(0) + k))
os.makedirs(save_path, exist_ok=True)
fig.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
else:
save_path = os.path.join(save_dir, '{}'.format(save_folder))
os.makedirs(save_path, exist_ok=True)
fig.savefig(
os.path.join(save_path, '{}_{}.png'.format(save_name, batch_idx * point_cloud.size(0) + k)))
plt.close(fig)
def make_training_sample(point_cloud):
B, C, N = point_cloud.shape
device = point_cloud.device
sample = torch.randn(B, C, int(N / 4)).to(device)
sigma = [0.1, 0.15, 0.2]
clip = [0.2, 0.3, 0.4]
for i in range(3):
jittering_sample = pcu.jitter(point_cloud, sigma=sigma[i], clip=clip[i])[:, :, torch.randperm(N)[:int(N / 4)]]
sample = torch.cat([sample, jittering_sample], dim=2)
sample_dist = point_cloud_distance_cp(point_cloud, sample, sampling=True).squeeze(dim=-1)
return sample, sample_dist
def knn_point_sampling(point_cloud, target_points, sample_num):
device = point_cloud.device
B, C, N = point_cloud.shape
_, _, M = target_points.shape
point_cloud_matrix = torch.stack([point_cloud] * M, dim=2)
target_points_matrix = torch.stack([target_points] * N, dim=3)
distance_matrix = (point_cloud_matrix - target_points_matrix).pow(2).sum(dim=1).sqrt().to(device)
knn_matrix = torch.topk(distance_matrix, sample_num, largest=False)
knn_indices_matrix = torch.stack([knn_matrix[1]] * 3, dim=1)
knn_points_matrix = torch.gather(point_cloud_matrix, 3, knn_indices_matrix)
return knn_points_matrix
def point_cloud_distance_svd(point_cloud, target_points, k=5, p=0.01, sampling=False):
if point_cloud.shape != target_points.shape:
raise NotImplementedError
device = point_cloud.device
B, C, N = point_cloud.shape
knn_points_matrix = knn_point_sampling(point_cloud, target_points, k)
p_hat_matrix = torch.mean(knn_points_matrix, dim=3)
p_matrix = (knn_points_matrix - p_hat_matrix.unsqueeze(dim=3))
M_matrix = torch.matmul(p_matrix.permute(0, 2, 1, 3), p_matrix.permute(0, 2, 3, 1)) / k
U_matrix, S_matrix, V_matrix = torch.svd(M_matrix)
norm_matrix = U_matrix[:, :, :, 2]
random_point_matrix = torch.gather(knn_points_matrix, 3,
torch.randint(k, knn_points_matrix.shape)[:, :, :, 0:1].to(device)).squeeze()
tangent_dist_matrix = torch.abs(torch.matmul(norm_matrix.unsqueeze(dim=2),
(target_points - random_point_matrix).permute(0, 2, 1).unsqueeze(3)))
# regularize
if sampling:
return tangent_dist_matrix
else:
point_cloud_matrix = torch.stack([point_cloud] * N, dim=2)
points_matrix = torch.stack([point_cloud] * N, dim=3)
self_dist_matrix = (point_cloud_matrix - points_matrix).pow(2).sum(dim=1).sqrt()
knn_matrix = torch.topk(self_dist_matrix, k, largest=False, sorted=True)
reg = torch.clamp(torch.mean(knn_matrix[0]), min=0.1)
loss = tangent_dist_matrix.mean() + (1 / reg) * p
return loss
def point_cloud_distance_cp(point_cloud, target, k=3, sampling=False):
if point_cloud.shape != target.shape:
raise NotImplementedError
knn_points_matrix = knn_point_sampling(point_cloud, target, k)
# Cross product
ref_point = knn_points_matrix[:, :, :, 0]
cross_norm_matrix = torch.cross((ref_point - knn_points_matrix[:, :, :, 1]).transpose(2, 1),
(ref_point - knn_points_matrix[:, :, :, 2]).transpose(2, 1))
normalize_norm = torch.mul(cross_norm_matrix,
1 / torch.stack([cross_norm_matrix.pow(2).sum(axis=2).sqrt()] * 3, dim=2))
cross_tangent_dist_matrix = torch.abs(torch.matmul(normalize_norm.unsqueeze(dim=2),
(target - ref_point).transpose(2, 1).unsqueeze(dim=3)))
if sampling:
return cross_tangent_dist_matrix
else:
loss2 = cross_tangent_dist_matrix.mean()
return loss2
def point_cloud_segmentation_tangent_loss(point_clouds, pred, knn_num, device):
tangent_loss_sum = 0.0
num_seg_class = pred.size(1)
part_mean_points = torch.zeros([0], dtype=torch.float).to(device)
weighted_part_mean_points = torch.zeros([0], dtype=torch.float).to(device)
for seg_class in range(num_seg_class):
weight = torch.softmax(pred, dim=1)[:, seg_class, :]
weight, weight_index = torch.topk(weight, k=knn_num)
part_pc = torch.gather(point_clouds, 2, torch.stack([weight_index] * 3, dim=1))
weight_part_pc = part_pc * torch.stack([weight] * 3, dim=1)
p_matrix = (part_pc - part_pc.mean(dim=2).unsqueeze(-1)) * torch.stack([weight] * 3, dim=1)
cov_matrix = torch.matmul(p_matrix[:, :, :17], p_matrix[:, :, :17].transpose(2, 1)) / knn_num
try:
U, S, V = torch.svd(cov_matrix.cpu())
except:
import ipdb;
ipdb.set_trace()
U_matrix = torch.stack([U[:, :, 2].to(device)] * knn_num, dim=1).unsqueeze(2)
tangent_dist = torch.abs(torch.matmul(U_matrix, p_matrix.transpose(2, 1).unsqueeze(-1)))
tangent_loss_sum += tangent_dist.mean()
part_mean_points = torch.cat([part_mean_points, part_pc.mean(dim=2).unsqueeze(1)], dim=1)
weighted_part_mean_points = torch.cat([weighted_part_mean_points, weight_part_pc.mean(dim=2).unsqueeze(1)],
dim=1)
tangent_loss = tangent_loss_sum / num_seg_class
return tangent_loss, part_mean_points, weighted_part_mean_points
def point_cloud_segmentation_std_loss(point_clouds, part_mean_points, pred):
num_seg_class = pred.size(1)
sum_part_std = 0.0
weight_matrix = torch.softmax(pred, dim=1)
for seg_class in range(num_seg_class):
distance_matrix = (point_clouds - part_mean_points[:, seg_class, :].unsqueeze(-1)).pow(2).sum(dim=1).sqrt()
weighted_distance = (weight_matrix[:, seg_class, :] * distance_matrix).mean(dim=1)
seg_class_std = weighted_distance / weight_matrix[:, seg_class, :].mean(dim=1)
sum_part_std += seg_class_std.mean()
part_std = sum_part_std / num_seg_class
return part_std
def point_cloud_segmentation_contrastive_loss(point_clouds, pred, theta_regressor, emd_loss, device):
num_seg_class = pred.size(1)
random_idx = torch.randperm(point_clouds.size(0)).to(device)
target_point_cloud = torch.stack([point_clouds[0, :, :]] * point_clouds.size(0), dim=0)
theta = theta_regressor(torch.cat([point_clouds, target_point_cloud], dim=1))
aligned_point_clouds = pcu.rotate_shape(point_clouds, 'z', theta)
shuffled_point_clouds = aligned_point_clouds[random_idx, :, :].to(device)
emd_loss, emd_matching_idx = emd_loss(aligned_point_clouds.permute(0, 2, 1),
shuffled_point_clouds.permute(0, 2, 1), 0.05, 3000)
emd_matching_idx = emd_matching_idx.type(torch.LongTensor).to(device)
pos_loss = 0.0
neg_loss = 0.0
for seg_class in range(num_seg_class):
softmax_weight = torch.softmax(pred, dim=1)[:, seg_class, :]
for sc in range(num_seg_class):
shuffled_weight = torch.softmax(pred, dim=1)[:, sc, :][random_idx, :]
shuffled_weight = torch.gather(shuffled_weight, 1, emd_matching_idx)
max_weight = torch.cat([softmax_weight.unsqueeze(0), shuffled_weight.unsqueeze(0)],
dim=0).max(dim=0)[0]
max_weight, seg_class_idx = torch.topk(max_weight, 50, dim=1)
if sc == seg_class:
pos_loss += (max_weight * torch.gather(emd_loss, 1, seg_class_idx)).mean()
else:
neg_loss += (max_weight * torch.gather(emd_loss, 1, seg_class_idx)).mean()
pos_loss = pos_loss / num_seg_class
neg_loss = neg_loss / (num_seg_class * num_seg_class - num_seg_class)
return pos_loss, neg_loss
def segmentation_cosine_similarity_contrastive_loss(point_clouds, pred, sim_feature_extractor, device, tau=1.0):
cosine_similarity_loss = torch.nn.CosineSimilarity(dim=-1)
seg_mask = torch.zeros([0], dtype=torch.bool).to(device)
sim_feature = torch.zeros([0], dtype=torch.float).to(device)
segmentation_label = torch.max(pred, dim=1)[1] # (B, N)
num_seg_class = pred.size(1)
softmax_layer = torch.nn.Softmax(dim=1)
pred = softmax_layer(pred)
for seg_class in range(num_seg_class):
seg_class_mask = (segmentation_label == seg_class).unsqueeze(1)
seg_class_sim_feature = sim_feature_extractor(point_clouds, seg_class_mask).unsqueeze(1)
seg_mask = torch.cat([seg_mask, seg_class_mask], dim=1)
sim_feature = torch.cat([sim_feature, seg_class_sim_feature], dim=1)
while 1:
rand_seg_idx = torch.randperm(num_seg_class)
if torch.sum(rand_seg_idx == torch.tensor(list(range(num_seg_class)))) == 0: break
rand_batch_idx = torch.randperm(pred.size(0))
rand_sim_feature = sim_feature[rand_batch_idx]
pos_loss = cosine_similarity_loss(sim_feature, rand_sim_feature) / tau
neg_loss = cosine_similarity_loss(sim_feature, rand_sim_feature[:, rand_seg_idx, :]) / tau
return pos_loss.mean(), neg_loss.mean()
def cosine_sim_loss(pred, labels, criterion, tau):
B = pred.size(0)
device = pred.device
pos_pred = torch.zeros([0], dtype=torch.float).to(device)
neg_pred = torch.zeros([0], dtype=torch.float).to(device)
for b in range(B):
rand_idx = torch.randperm(B).to(device)
pos_idx = (labels[rand_idx] == labels[b]).nonzero()[0]
neg_idx = (labels[rand_idx] != labels[b]).nonzero()[:int(B / 4) - 1].squeeze(-1)
pos_pred = torch.cat([pos_pred, pred[rand_idx, :][pos_idx, :]], dim=0)
try:
neg_pred = torch.cat([neg_pred, pred[rand_idx, :][neg_idx, :].unsqueeze(0)], dim=0)
except:
continue
sample_pred = torch.cat([pos_pred.unsqueeze(dim=1), neg_pred], dim=1)
similarity_matrix = torch.nn.CosineSimilarity(dim=-1)(torch.stack([pred] * sample_pred.size(1), dim=1),
sample_pred) / tau
sim_labels = torch.zeros(similarity_matrix.size(0), dtype=torch.long).to(device)
loss = criterion(similarity_matrix, sim_labels)
positives = similarity_matrix[:, 0].mean()
negatives = similarity_matrix[:, 1:].mean()
return positives, negatives, loss
def grid_colormap(point_grid, color, save_dir):
matplotlib.use('TkAgg')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
point_grid = point_grid.cpu()
x = point_grid[:, 0, :]
y = point_grid[:, 1, :]
z = point_grid[:, 2, :]
c = color.cpu()
img = ax.scatter(x, y, z, s=1.5, c=c, cmap=plt.hot())
fig.colorbar(img)
plt.savefig(save_dir)
def optimize_visualize(point_cloud, encoder, decoder, learning_rate, num_epoch, knn_num, save_dir, batch_idx=0):
B, C, N = point_cloud.shape
device = point_cloud.device
z = Variable(torch.randn(B, C, N).cuda(), requires_grad=True).to(device)
save_dir = os.path.join(save_dir, 'optimize_visualize')
for epoch in range(num_epoch):
knn_sampling = knn_point_sampling(point_cloud, z, knn_num)
source_latent_vector = encoder(knn_sampling)
loss = torch.abs(decoder(z, source_latent_vector)).mean()
loss.backward()
if loss < 0.05:
learning_rate = 1
elif loss < 0.01:
learning_rate = 0.1
elif loss < 0.001:
learning_rate = 0.01
with torch.no_grad():
my_vector_size = torch.stack([z.pow(2).sum(axis=1).sqrt()] * 3, dim=1)
my_norm = z / my_vector_size
my_grad = (z.grad * my_norm).sum(axis=1)
my_grad = my_norm * torch.stack([my_grad] * 3, dim=1)
z -= my_grad * learning_rate
z.grad.zero_()
if epoch % 100 == 0:
image_save(z.detach().cpu(), save_dir, 'test', 'epoch_{}'.format(epoch), 'epoch : {}'.format(epoch),
batch_idx=batch_idx)
def grid_visualize(point_clouds, encoder, decoder, grid_scale, threshold, knn_num, save_dir, batch_idx=0):
B, C, N = point_clouds.shape
device = point_clouds.device
with torch.no_grad():
scale = torch.linspace(-1.0, 1.0, grid_scale)
point_grid = torch.stack([torch.cartesian_prod(scale, scale, scale).transpose(1, 0)] * B, dim=0).to(device)
partial_size = 100
test_pred = torch.Tensor([]).to(device)
for i in range(int((grid_scale ** 3) / partial_size)):
partial_point_grid = point_grid[:, :, i * partial_size:(i + 1) * partial_size]
temp_latent_vector = encoder(knn_point_sampling(point_clouds, partial_point_grid, knn_num))
test_pred = torch.cat([test_pred, decoder(partial_point_grid, temp_latent_vector).squeeze(dim=-1)
], dim=2)
for b in range(B):
test_pred_sample = test_pred[b, :, :]
masked_index = (test_pred_sample.squeeze() < threshold).nonzero()
pred_pc = torch.gather(point_grid[b, :, :], 1, torch.stack([masked_index.squeeze()] * 3, dim=0)) \
.unsqueeze(dim=0)
if pred_pc.size(2) > N:
pred_pc, _ = pcu.random_point_sample(pred_pc, N)
elif pred_pc.size(2) < N:
new_pred_pc = pred_pc
while new_pred_pc.size(2) < N:
new_pred_pc = torch.cat([new_pred_pc, pcu.jitter(pred_pc)], dim=2)
pred_pc, _ = pcu.random_point_sample(new_pred_pc, N)
# pcu.visualize(point_clouds)
# pcu.visualize(pred_pc)
image_save(pred_pc.detach().cpu(), save_dir, 'grid_visualize', 'prediction', 'predict_pc',
batch_idx=batch_idx * B + b, folder_numbering=False)
def visualize_animation(point_cloud):
if point_cloud.size(0) != 1:
raise NotImplementedError
pcd = open3d.geometry.PointCloud()
permute = [0, 2, 1]
point_cloud = point_cloud[:, permute, :]
pcd.points = open3d.utility.Vector3dVector(np.array(point_cloud.squeeze(axis=0).permute(1, 0).cpu()))
# def capture_image(vis):
# image = vis.capture_screen_float_buffer()
# plt.imsave(os.path.join(save_dir, '{}_{}.png'.format(save_name, len(os.listdir(save_dir)))),
# np.asarray(image))
# return False
def rotate_view(vis):
ctr = vis.get_view_control()
ctr.rotate(10.0, 0.0)
# capture_image(vis)
return False
open3d.visualization.draw_geometries_with_animation_callback([pcd], rotate_view)
def save_gif(point_cloud, save_name, save_path, save_num=1):
if point_cloud.size(0) > save_num:
raise NotImplementedError
for k in range(point_cloud.size(0)):
img_list = []
img_path_list = []
point_cloud_sample = point_cloud[k, :, :].unsqueeze(0)
for i in range(20):
point_cloud_sample = point_cloud_sample.cpu()
fig = plot_3d_point_cloud(point_cloud_sample[0][0], point_cloud_sample[0][1], point_cloud_sample[0][2],
in_u_sphere=True, show=False, show_axis=False)
point_cloud_sample = pcu.rotate_shape(point_cloud_sample, 'z', rotation_angle=18 * np.pi / 180)
img_path = os.path.join(save_path, '{}.png'.format(i))
fig.savefig(img_path)
img_path_list.append(img_path)
img_list.append(imageio.imread(img_path))
plt.close(fig)
imageio.mimsave(os.path.join(save_path, '{}_{}.gif'.format(save_name, str(k))), img_list, fps=7)
for img_file in img_path_list:
if os.path.exists(img_file):
os.remove(img_file)
def save_confusion_matrix(pred_list, labels_list, num_class, save_path, save_name, cmap=None, title=None,
normalize=True):
plt.switch_backend('agg')
cm = confusion_matrix(labels_list.cpu(), pred_list.cpu())
accuracy = np.trace(cm) / float(np.sum(cm))
mis_class = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
if title is None:
title = 'Confusion matrix'
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.xticks(np.arange(num_class))
plt.yticks(np.arange(num_class))
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, mis_class))
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
plt.close()
def save_cos_sim_confusion_matrix(sim_confusion_matrix, num_class, save_path, save_name, cmap=None, title=None,
normalize=False):
plt.switch_backend('agg')
if cmap is None:
cmap = plt.get_cmap('Blues')
if title is None:
title = 'Confusion matrix'
plt.figure(figsize=(8, 6))
plt.imshow(sim_confusion_matrix, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.xticks(np.arange(num_class))
plt.yticks(np.arange(num_class))
if normalize:
sim_confusion_matrix = sim_confusion_matrix.type(torch.float) / sim_confusion_matrix.sum(axis=1)[:, np.newaxis]
thresh = sim_confusion_matrix.max() / 1.5 if normalize else sim_confusion_matrix.max() / 2
for i, j in itertools.product(range(sim_confusion_matrix.shape[0]), range(sim_confusion_matrix.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(sim_confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if sim_confusion_matrix[i, j] > thresh else "black")
else:
plt.text(j, i, "{:0.4f}".format(sim_confusion_matrix[i, j]),
horizontalalignment="center",
color="white" if sim_confusion_matrix[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label\nsimilarity value')
os.makedirs(save_path, exist_ok=True)
plt.savefig(os.path.join(save_path, '{}.png'.format(save_name)))
plt.close()
| 47.597518 | 119 | 0.620898 | 3,872 | 26,845 | 4.05501 | 0.100723 | 0.048405 | 0.01191 | 0.011592 | 0.404433 | 0.342526 | 0.295523 | 0.249729 | 0.220241 | 0.188714 | 0 | 0.022599 | 0.241758 | 26,845 | 563 | 120 | 47.68206 | 0.74876 | 0.01937 | 0 | 0.292 | 0 | 0 | 0.022847 | 0.000912 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048 | false | 0 | 0.024 | 0 | 0.104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcaccf9b0138aa7d34d6785bbb98eec7ec092b2d | 811 | py | Python | utils/config.py | wuyue92tree/nice-you-get | 5c4962d27eb23656c14992e260ba94094a9728e6 | [
"MIT"
] | null | null | null | utils/config.py | wuyue92tree/nice-you-get | 5c4962d27eb23656c14992e260ba94094a9728e6 | [
"MIT"
] | null | null | null | utils/config.py | wuyue92tree/nice-you-get | 5c4962d27eb23656c14992e260ba94094a9728e6 | [
"MIT"
] | null | null | null | import os
import json
from conf.settings import CONFIG_PATH, HOME_DIR
class Config(object):
def __init__(self) -> None:
super().__init__()
self.default_config = {
'save_path': os.path.join(HOME_DIR, 'media'),
'insecure': 0,
'merge': 0,
'caption': 0
}
def load(self):
if os.path.exists(CONFIG_PATH) is False:
return self.default_config
with open(CONFIG_PATH, 'r', encoding='utf-8') as f:
return json.loads(f.read())
def save(self, **kwargs):
config = self.load()
for k, v in kwargs.items():
config[k] = v
with open(CONFIG_PATH, 'w', encoding='utf-8') as f:
f.write(json.dumps(config, ensure_ascii=False, indent=4))
config = Config() | 25.34375 | 69 | 0.557337 | 106 | 811 | 4.09434 | 0.509434 | 0.092166 | 0.078341 | 0.082949 | 0.069124 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010695 | 0.308261 | 811 | 32 | 70 | 25.34375 | 0.762923 | 0 | 0 | 0 | 0 | 0 | 0.05665 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcad268505923f6abed1ba916aa0220d82115bca | 9,807 | py | Python | _modules/nexus3_privileges.py | jsandas/saltstack-nexus3-module | e090dfe18cd3b5d90d1c71b0747ff150eb96e328 | [
"MIT"
] | 1 | 2020-11-15T00:18:55.000Z | 2020-11-15T00:18:55.000Z | _modules/nexus3_privileges.py | jsandas/saltstack-nexus3-module | e090dfe18cd3b5d90d1c71b0747ff150eb96e328 | [
"MIT"
] | 1 | 2020-11-21T19:08:07.000Z | 2020-11-21T19:14:37.000Z | _modules/nexus3_privileges.py | jsandas/saltstack-nexus3-module | e090dfe18cd3b5d90d1c71b0747ff150eb96e328 | [
"MIT"
] | null | null | null | ''''
execution module for Nexus 3 security privileges
:version: v0.2.1
:configuration: In order to connect to Nexus 3, certain configuration is required
in /etc/salt/minion on the relevant minions.
Example:
nexus3:
hostname: '127.0.0.1:8081'
username: 'admin'
password: 'admin123'
'''
import json
import logging
import nexus3
log = logging.getLogger(__name__)
__outputter__ = {
'sls': 'highstate',
'apply_': 'highstate',
'highstate': 'highstate',
}
privileges_path = 'v1/security/privileges'
def create(name,
type,
actions=[],
contentSelector=None,
description='New Nexus privilege',
domain=None,
format=None,
pattern=None,
repository=None,
scriptName=None):
'''
name (str):
privilege name
type (str):
privilege type [application|repository-admin|respository-content-selector|repository-view|script|wildcard]
actions (list):
list of actions [ADD|ALL|BROWSE|CREATE|DELETE|EDIT|READ|UPDATE] (Default: [])
contentSelector (str):
name of content selector (Default: None
.. note::
required for respository-content-selector privilege type
content selector must exist before assigning privileges
description (str):
description of privilge (Default: 'New Nexus privilege')
domain (str):
domain of privilege [roles|scripts|search|selectors|settings|ssl-truststore|tasks|users|userschangepw] (Default: None)
.. note::
required for application privilege type
format (str):
respository format [bower|cocoapads|conan|docker|etc.] (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
pattern (regex):
regex pattern to group other privileges (Default: None)
.. note::
required for wildcard privilege type
repository (str):
repository name (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
scriptName (str):
script name (Default: None)
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.create name=nx-userschangepw actions="['ADD','READ']" description='Change password permission' domain=userschangepw type=application
salt myminion nexus3_privileges.create name=nx-repository-view-nuget-nuget-hosted-browse actions=['BROWSE'] description='Browse privilege for nuget-hosted repository views' format=nuget repository=nuget-hosted type=repository-view
'''
ret = {
'privilege': {}
}
path = privileges_path + '/' + type
payload = {
'name': name,
'description': description,
'actions': actions,
}
application = {
'domain': domain
}
repository = {
'format': format,
'repository': repository
}
repository_content_selector = {
'format': format,
'repository': repository,
'contentSelector': contentSelector
}
script = {
'scriptName': scriptName
}
wildcard = {
'name': name,
'description': description,
'pattern': pattern
}
if type == 'application':
if domain is None:
ret['comment'] = 'domain cannot be None for type {}'.format(type)
return ret
payload.update(application)
if type in ['repository-admin','repository-view']:
if format is None or repository is None:
ret['comment'] = 'format and repository cannot be None for type {}'.format(type)
return ret
payload.update(repository)
if type == 'repository-content-selector':
if format is None or repository is None or contentSelector is None:
ret['comment'] = 'format, contentSelector, and repository cannot be None for type {}'.format(type)
return ret
payload.update(repository_content_selector)
if type == 'scripts':
if script is None:
ret['comment'] = 'scriptName cannot be None for type {}'.format(type)
return ret
payload.update(script)
if type == 'wildcard':
if pattern is None:
ret['comment'] = 'pattern cannot be None for type {}'.format(type)
return ret
payload = wildcard
nc = nexus3.NexusClient()
resp = nc.post(path, payload)
if resp['status'] == 201:
ret['comment'] = 'privilege {} created.'.format(name)
ret['privilege'] = describe(name)['privilege']
else:
ret['comment'] = 'could not create privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def delete(name):
'''
name (str):
privilege name
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.delete nx-analytics-all
'''
ret = {}
path = privileges_path + '/' + name
nc = nexus3.NexusClient()
resp = nc.delete(path)
if resp['status'] == 204:
ret['comment'] = 'privilege {} delete.'.format(name)
else:
ret['comment'] = 'could not delete privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def describe(name):
'''
name (str):
privilege name
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.describe nx-analytics-all
'''
ret = {
'privilege': {},
}
path = privileges_path + '/' + name
nc = nexus3.NexusClient()
resp = nc.get(path)
if resp['status'] == 200:
ret['privilege'] = json.loads(resp['body'])
else:
ret['comment'] = 'could not retrieve privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def list_all():
'''
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.list_all
'''
ret = {
'privileges': {},
}
path = privileges_path
nc = nexus3.NexusClient()
resp = nc.get(path)
if resp['status'] == 200:
ret['privileges'] = json.loads(resp['body'])
else:
ret['comment'] = 'could not retrieve available privileges.'
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret
def update(name,
actions=None,
contentSelector=None,
description=None,
domain=None,
format=None,
pattern=None,
repository=None,
scriptName=None):
'''
name (str):
privilege name
actions (list):
list of actions [ADD|ALL|CREATE|DELETE|EDIT|READ|UPDATE] (Default: None)
contentSelector (str):
name of content selector (Default: None)
.. note::
content selector must exist before assigning privileges
description (str):
description of privilege (Default: None)
domain (str):
domain of privilege [roles|scripts|search|selectors|settings|ssl-truststore|tasks|users|userschangepw] (Default: None)
.. note::
required for application privilege type
format (str):
respository format [bower|cocoapads|conan|docker|etc.] (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
pattern (regex):
regex pattern to group other privileges (Default: None)
.. note::
required for wildcard privilege type
repository (str):
repository name (Default: None)
.. note::
required for repository-admin, respository-content-selector, and repository-view privilege types
scriptName (str):
script name (Default: None)
CLI Example::
.. code-block:: bash
salt myminion nexus3_privileges.update name=testing actions="['ADD','READ']" description='Change password permission' domain=userschangepw type=application
'''
ret = {
'privilege': {}
}
priv_description = describe(name)
if 'error' in priv_description.keys():
ret['comment'] = 'failed to update privilege.'
ret['error'] = priv_description['error']
return ret
meta = priv_description['privilege']
path = privileges_path + '/' + meta['type'] + '/' + name
if actions is not None:
meta['actions'] = actions
if contentSelector is not None and 'contentSelector' in meta.keys():
meta['contentSelctor'] = contentSelector
if description is not None:
meta['description'] = description
if domain is not None and 'domain' in meta.keys():
meta['domain'] = domain
if format is not None and 'format' in meta.keys():
meta['format'] = format
if repository is not None and 'repository' in meta.keys():
meta['repository'] = repository
if pattern is not None and 'pattern' in meta.keys():
meta['pattern'] = pattern
if scriptName is not None and 'scriptName' in meta.keys():
meta['scriptName'] = scriptName
nc = nexus3.NexusClient()
resp = nc.put(path, meta)
if resp['status'] == 204:
ret['comment'] = 'updated privilege {}.'.format(name)
ret['privilege'] = describe(name)['privilege']
else:
ret['comment'] = 'could not update privilege {}.'.format(name)
ret['error'] = {
'code': resp['status'],
'msg': resp['body']
}
return ret | 25.807895 | 238 | 0.595187 | 1,028 | 9,807 | 5.64786 | 0.148833 | 0.026524 | 0.025835 | 0.035653 | 0.59783 | 0.562694 | 0.542714 | 0.523424 | 0.512401 | 0.473131 | 0 | 0.006696 | 0.284287 | 9,807 | 380 | 239 | 25.807895 | 0.820487 | 0.379933 | 0 | 0.391061 | 0 | 0 | 0.21778 | 0.008592 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027933 | false | 0 | 0.01676 | 0 | 0.106145 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcaf861bcc515b67dfa9fe42cad17a8ff98ba4be | 1,708 | py | Python | Least_Common_Ancestor.py | cjdekker/Tree_Exercises | 30350626bfb146dc5affb51f6ab4f2a067832d4b | [
"MIT"
] | null | null | null | Least_Common_Ancestor.py | cjdekker/Tree_Exercises | 30350626bfb146dc5affb51f6ab4f2a067832d4b | [
"MIT"
] | null | null | null | Least_Common_Ancestor.py | cjdekker/Tree_Exercises | 30350626bfb146dc5affb51f6ab4f2a067832d4b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[2]:
def find_LCAs(parent):
LCA = dict() # This is the nested dictionary
def lca(u, v):
if u in list(LCA.keys()):
if v in list(LCA[u].keys()):
return
for i in list(parent[u]):
lca(i,v)
ul = [u]
def isu(u):
for i in list(parent.keys()):
if i in parent[u]:
ul.append(i)
isu(u)
for i in ul:
isu(i)
for i in ul:
if u in LCA.keys():
LCA[u].update({v : set(v)})
else:
LCA[u] = ({v : set(v)})
vl = [v]
def isv(v):
for i in list(parent.keys()):
if i in parent[v]:
vl.append(i)
isv(v)
for i in vl:
isv(i)
for i in vl:
if v in LCA.keys():
LCA[v].update({u : set(u)})
else:
LCA[v] = ({u : set(u)})
cal = list((set(ul) & set(vl)))
sl = []
for i in cal:
sl.extend(parent[i])
fl = []
for i in cal:
if i not in sl:
fl.append(i)
if u in LCA.keys():
LCA[u].update({v : set(fl)})
else:
LCA[u] = ({v : set(fl)})
if v in LCA.keys():
LCA[v].update({u : set(fl)})
else:
LCA[v] = ({u : set(fl)})
# This calls the recursive "lca" function on all pairs of nodes to populate the "LCA" dictionary
for u in parent:
for v in parent:
lca(u,v)
return LCA
# In[ ]:
| 23.39726 | 100 | 0.379977 | 235 | 1,708 | 2.757447 | 0.217021 | 0.050926 | 0.083333 | 0.074074 | 0.398148 | 0.256173 | 0.256173 | 0.256173 | 0.256173 | 0.256173 | 0 | 0.002265 | 0.483021 | 1,708 | 72 | 101 | 23.722222 | 0.731597 | 0.101288 | 0 | 0.296296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb0aac9f660d4912d9fc1b07d9b54eabce822d8 | 290 | py | Python | samples/takePicture.py | windriver-codecamp/alpha_drone | 2845784b93296f1ff8d259418208d24202f05c5d | [
"MIT"
] | null | null | null | samples/takePicture.py | windriver-codecamp/alpha_drone | 2845784b93296f1ff8d259418208d24202f05c5d | [
"MIT"
] | null | null | null | samples/takePicture.py | windriver-codecamp/alpha_drone | 2845784b93296f1ff8d259418208d24202f05c5d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import time
from djitellopy import Tello
tello = Tello()
tello.connect()
tello.streamon()
frame_read = tello.get_frame_read()
#tello.takeoff()
time.sleep(1)
cv2.imwrite("picture.png", frame_read.frame)
#tello.land()
tello.end() | 15.263158 | 44 | 0.717241 | 43 | 290 | 4.744186 | 0.604651 | 0.147059 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019455 | 0.113793 | 290 | 19 | 45 | 15.263158 | 0.774319 | 0.241379 | 0 | 0 | 0 | 0 | 0.050691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb2a18761108cac8ef931cd2742ce63d5c4b447 | 1,167 | py | Python | terrapower/physics/neutronics/dragon/tests/dragonTestingApp.py | ntouran/dragon-armi-plugin | c43e39891f9c99b87ff8ff82bd2424acbe6afec0 | [
"Apache-2.0"
] | null | null | null | terrapower/physics/neutronics/dragon/tests/dragonTestingApp.py | ntouran/dragon-armi-plugin | c43e39891f9c99b87ff8ff82bd2424acbe6afec0 | [
"Apache-2.0"
] | 9 | 2019-11-16T01:17:41.000Z | 2021-11-22T15:47:19.000Z | terrapower/physics/neutronics/dragon/tests/dragonTestingApp.py | ntouran/dragon-armi-plugin | c43e39891f9c99b87ff8ff82bd2424acbe6afec0 | [
"Apache-2.0"
] | 2 | 2019-11-18T15:13:46.000Z | 2021-07-30T18:01:40.000Z | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App for testing the DRAGON plugin."""
import armi
class DragonTestingApp(armi.apps.App):
"""App that adds only the DRAGON plugin for testing purposes."""
def __init__(self):
armi.apps.App.__init__(self)
# Only registering DRAGON, main purpose is for testing.
from terrapower.physics.neutronics.dragon.plugin import DragonPlugin
self._pm.register(DragonPlugin)
@property
def splashText(self):
return """
================================
== DRAGON Testing Application ==
================================
"""
| 30.710526 | 76 | 0.672665 | 149 | 1,167 | 5.208054 | 0.61745 | 0.07732 | 0.033505 | 0.041237 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00852 | 0.195373 | 1,167 | 37 | 77 | 31.540541 | 0.817891 | 0.600686 | 0 | 0.153846 | 0 | 0 | 0.260181 | 0.144796 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0.076923 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb31fbe47519c56c2575ea13372b07701ee10b4 | 510 | py | Python | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/04-Text-Filter.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/04-Text-Filter.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 2-Python-Fundamentals (Jan 2021)/Course-Exercises-and-Exams/08-Text-Processing/01_Lab/04-Text-Filter.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | # 4. Text Filter
# Write a program that takes a text and a string of banned words.
# All words included in the ban list should be replaced with asterisks "*", equal to the word's length. ' \
# 'The entries in the ban list will be separated by a comma and space ", ".
# The ban list should be entered on the first input line and the text on the second input line.
banned_words = input().split(', ')
text = input()
for word in banned_words:
text = text.replace(word, '*' * len(word))
print(text)
| 34 | 107 | 0.692157 | 86 | 510 | 4.081395 | 0.534884 | 0.094017 | 0.08547 | 0.068376 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0025 | 0.215686 | 510 | 14 | 108 | 36.428571 | 0.875 | 0.705882 | 0 | 0 | 0 | 0 | 0.020833 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb3567798e88484d6775c6425b603d48c728543 | 18,009 | py | Python | main.py | ghurone/tamacat | 70f0e4bb6d21cde993caa38ef7e047187b306d3d | [
"MIT"
] | null | null | null | main.py | ghurone/tamacat | 70f0e4bb6d21cde993caa38ef7e047187b306d3d | [
"MIT"
] | null | null | null | main.py | ghurone/tamacat | 70f0e4bb6d21cde993caa38ef7e047187b306d3d | [
"MIT"
] | null | null | null | import config.funcoes as cfunc
import config.saveload as csave
import config.janela as cjane
import config.gatos_ascii as cga
import objs.gatinho as ogato
import objs.geladeira as ogela
import objs.bau as obau
from random import randint, choice
from time import sleep
humores = ['feliz', 'triste', 'quieto', 'brincalhão', 'carinhoso', 'assustado', 'irritado']
class Main:
def __init__(self):
cfunc.ajustes_iniciais()
op = self.tela_inicial()
self.voltar = False
if op == '1':
objs = self.novo_gato()
elif op == '2':
objs = self.tela_carregar_gato()
elif op == '3':
exit()
if objs:
self.gato, self.gela, self.bau = objs
csave.salvar_jogo([self.gato, self.gela, self.bau])
else:
self.voltar = True
self.salvo = True
@staticmethod
def tela_inicial():
"""`Printa a tela inicial do jogo."""
fonte = [' /) ',
' |\\---/|(( ',
" | ° ° | )) ",
' \\_T_/_// ',
' ________ _______ _____ _{_}_ {_}____ ______ _______ ________ ',
'|_ _|| _ || | | || _ || ___|| _ ||_ _|',
' | | | |_| || - || |_| || | | |_| | | | ',
' | | | _ || _ _ || _ || |___ | _ | | | ',
' |____| |__| |__||__| |_| |__||__| |__||______||__| |__| |____| ']
botao = ['.-----------------------------.',
'| Aperte ENTER para jogar! |',
"'-----------------------------'"]
janela = cjane.Janela()
for i in range(len(fonte)):
janela.muda_linha(i + 1, fonte[i])
try:
janela.muda_linha(i + 15, botao[i])
except IndexError:
pass
janela.muda_linha(11, 'O MELHOR JOGO DO MUNDO!')
janela.muda_linha(21, '© RaGhu 2021 ', alin='rjust')
print(janela)
input() # para não dar para digitar nada no input além de enter
janela.muda_linha(15, '(1) Novo Jogo ')
janela.muda_linha(16, '(2) Carregar Jogo ')
janela.muda_linha(17, '(3) Sair ')
print(janela)
op = input('Digite a opção desejada: ')
while op not in ['1', '2', '3']:
print(janela)
op = input('Digite uma opção válida: ')
return op
def tela_carregar_gato(self):
gatos = csave.listar_saves()
if len(gatos) == 0:
janela = cjane.Janela()
janela.muda_linha(11, 'Você não possui nenhum gato, deseja criar um? (S)im ou (N)ão')
print(janela)
esc = input('>>> ').lower()
while esc != 's' and esc != 'n' and esc != 'sim' and esc != 'não' and esc != 'nao':
janela.muda_linha(12, 'Digite uma opção válida!')
print(janela)
esc = input('>>> ').lower()
if 's' in esc:
return self.novo_gato()
elif 'n':
return None
elif len(gatos) == 1:
save = csave.carregar_jogo(gatos[0].split(".")[0])
return save
elif len(gatos) > 1:
janela = cjane.JanelaTable({'##': 4, 'Gato': 54, 'Idade': 18})
gatitos = []
for i in range(len(gatos)):
ga, ge, ba = csave.carregar_jogo(gatos[i].split(".")[0])
gatitos.append([ga, ge, ba])
janela.add_linha([i+1, ga.nome, ga.mostrar_idade()])
janela.mostrar_janela(False)
esc = input('Digite o número do gato para carregar (ENTER para voltar): ').lower()
while esc != '' and (not esc.isnumeric() or int(esc) not in range(1, len(gatos)+1)):
janela.mostrar_janela(False)
esc = input('Digite uma opção válida: ').lower()
if esc != '':
return gatitos[int(esc)-1]
else:
return None
@staticmethod
def novo_gato():
"""Retorna um Gatinho, Geladeira e Bau para um gato inicial."""
gen_c = choice(['F', 'M'])
gen_r = choice(['F', 'M'])
if gen_c == 'F':
um_c = 'a'
letra_c = um_c
pron_c = um_c
else:
um_c = ''
letra_c = 'o'
pron_c = 'e'
if gen_r == 'F':
um_r = 'a'
letra_r = um_r
pron_r = um_r
else:
um_r = ''
letra_r = 'o'
pron_r = 'e'
textos1 = [' Você está pensando em ter um gato.',
f' Um amigo seu conhece alguém que está vendendo um{um_c} gat{letra_c} bonitinh{letra_c}.',
f' Mas também tem um{um_r} gat{letra_r} que sempre têm andado pela vizinhança,',
f' e el{pron_r} parece muito simpátic{letra_r}.',
' Por outro lado, também existe um abrigo de gatos perto da sua casa.']
cfunc.limpar_tela()
janela = cjane.Janela()
j = 1
i = 0
while i < len(textos1):
janela.muda_linha(j, textos1[i], 'ljust')
if i == 2:
j += 1
janela.muda_linha(j, textos1[i+1], 'ljust')
print(janela)
input('(Aperte ENTER para continuar...)')
j += 2
i += 1 if i != 2 else 2
janela.muda_linha(10, ' Você deseja (C)omprar, (R)esgatar ou (A)dotar o gato?', 'ljust')
print(janela)
escolha = input('>>> ')
while escolha.lower() != 'c' and escolha.lower() != 'r' and escolha.lower() != 'a' \
and escolha.lower() != 'comprar' and escolha.lower() != 'resgatar' and escolha.lower() != 'adotar':
janela.muda_linha(11, ' Digite uma opção válida!', 'ljust')
print(janela)
escolha = input('>>> ')
janela.limpar_janela()
v = 0
if escolha[0] in 'Cc':
janela.muda_linha(1, f' Você conversou com o conhecido do seu amigo e comprou {letra_c} gatinh{letra_c}!', 'ljust')
idade = randint(2, 12)
fome = 100
energia = randint(75, 100)
saude = 100
feliz = randint(80, 100)
vac = True
ga = ogato.Comprado('', idade, fome, energia, saude, feliz, gen_c, vac)
elif escolha[0] in 'Rr':
janela.muda_linha(1, f' Você resgatou {letra_r} gatinh{letra_r}. Agora el{pron_r} tem um dono!', 'ljust')
idade = randint(0, 180)
fome = randint(10, 100)
energia = randint(10, 90)
saude = randint(10, 50)
feliz = randint(10, 90)
vac = False
ga = ogato.Resgatado('', idade, fome, energia, saude, feliz, gen_r, vac)
else:
v = 1
janela.muda_linha(1, ' Você quer adotar um gatinh(o) ou uma gatinh(a)?', 'ljust')
print(janela)
i = input('>>> ')
while i.lower() != 'o' and i.lower() != 'a' and i.lower() != 'gatinho' and i.lower() != 'gatinha':
janela.muda_linha(2, ' Digite uma opção válida!', 'ljust')
print(janela)
i = input('>>> ')
if i[-1].lower() == 'a':
gen_a = 'F'
um_a = 'a'
letra_a = um_a
pron_a = um_a
elif i[-1].lower() == 'o':
gen_a = 'M'
um_a = ''
letra_a = 'o'
pron_a = 'e'
janela.muda_linha(2, f' - Gatinh{letra_a}', 'ljust')
print(janela)
sleep(1)
janela.muda_linha(4, f' Você vai adotar um{um_a} gat{letra_a} (F)ilhote, (A)dult{letra_a} ou (I)dos{letra_a}?', 'ljust')
print(janela)
i = input('>>> ')
while i.lower() != 'f' and i.lower() != 'a' and i.lower() != 'i' \
and i.lower() != 'filhote' and i.lower() != 'adulto' and i.lower() != 'idoso':
janela.muda_linha(5, ' Digite uma opção válida!', 'ljust')
print(janela)
i = input('>>>')
if i[0].lower() == 'f':
idade = randint(3, 12)
janela.muda_linha(5, ' - Filhote', 'ljust')
elif i[0].lower() == 'a':
idade = randint(13, 84)
janela.muda_linha(5, f' - Adult{letra_a}', 'ljust')
elif i[0].lower() == 'i':
idade = randint(85, 180)
janela.muda_linha(5, f' - Idos{letra_a}', 'ljust')
print(janela)
sleep(2)
janela.limpar_janela()
janela.muda_linha(1, f' Você foi até o abrigo e escolheu um{um_a} gatinh{letra_a}.', 'ljust')
janela.muda_linha(2, f' Ou será que foi el{pron_a} quem te escolheu?', 'ljust')
fome = randint(60, 100)
energia = randint(70, 100)
saude = randint(70, 90)
feliz = randint(80, 100)
vac = choice([True, True, True, False, False]) # True: 60%, False: 40%
ga = ogato.Adotado('', idade, fome, energia, saude, feliz, gen_a, vac)
print(janela)
input('(Aperte ENTER para continuar...)')
l = ga.gens['letra']
p = ga.gens['pron']
janela.muda_linha(3+v, f' Hora de uma decisão difícil... Qual vai ser o nome del{p}?', 'ljust')
print(janela)
nome = input('>>> ')
while not cfunc.verificar_nome(nome):
if cfunc.existe_save(nome):
gatolino = csave.carregar_jogo(nome)[0]
l_antigo = gatolino.gens['letra']
p_antigo = gatolino.gens['pron']
janela.muda_linha(4+v, f' Ess{p_antigo} gatinh{l_antigo} já existe! Escolha outro nome.', 'ljust')
else:
janela.muda_linha(4+v, ' Digite um nome válido (e com tamanho menor que 32)!', 'ljust')
print(janela)
nome = input('>>> ')
ga.nome = nome
ge = ogela.Geladeira()
ba = obau.Bau()
return ga, ge, ba
def menu(self, gato_img):
"""Imprime as características do gato."""
acoes = ['', 'Ver geladeira',
'Comer', '',
'Ver baú',
'Brincar'
]
acoes_jogo = ['Salvar o jogo',
f'Abandonar {self.gato.gens["letra"]} gat{self.gato.gens["letra"]} :(',
'Sair'
]
janela = cjane.JanelaMenu(gato_img, acoes, acoes_jogo, self.gato)
print(janela)
def mostra_gela(self):
"""Mostra todos os alimentos da geladeira, em ordem decrescente de magnitude do saciamento."""
cfunc.mudar_titulo('Geladeira')
janela = cjane.JanelaTable({'QTE.': 6, 'Nome': 36, 'Tipo': 15, 'Fome': 8, 'Saúde': 9})
for comida in self.gela.comidasort():
linha = [self.gela[comida.nome][1], comida.nome, comida.__class__.__name__,
comida.saciar, comida.saude]
janela.add_linha(linha)
janela.mostrar_janela()
def mostrar_bau(self):
"""Mostra todos os brinquedos do baú.
Tipos diferentes: ordem decrescente, por felicidade.
Mesmo tipo: ordem crescente, por durabilidade."""
cfunc.mudar_titulo('Baú')
janela = cjane.JanelaTable({'Nome': 32, 'Felicidade': 22, 'Usos restantes': 22})
for brinquedo in self.bau.brinquedosort():
for brinqs in sorted(self.bau[brinquedo.nome]):
brinq = [brinqs.nome, brinqs.feliz, brinqs.dura]
janela.add_linha(brinq)
janela.mostrar_janela()
def brincar(self):
"""Ações principais da ação brincar no menu."""
cfunc.mudar_titulo('Escolher brinquedo')
janela = cjane.JanelaTable({'##': 4, 'Nome': 58, 'Felicidade': 14})
# imprime os brinquedos disponíveis para brincar em ordem de felicidade
brinqs = self.bau.brinquedosort()
for i in range(len(brinqs)):
janela.add_linha([i+1, brinqs[i].nome, brinqs[i].feliz])
janela.mostrar_janela(show_input=False)
brinq = input('Digite o número do brinquedo para jogar (ENTER para voltar): ')
while brinq != '' and (not brinq.isnumeric() or int(brinq) not in range(1, len(brinqs)+1)):
janela.mostrar_janela(show_input=False)
if not brinq.isnumeric():
brinq = input('Digite um valor numérico (ENTER para voltar): ')
else:
brinq = input('Digite um número válido (ENTER para voltar): ')
if brinq != '':
# seleciona o brinquedo com menor durabilidade dentre os do tipo escolhido para brincar
brinq_nome = brinqs[int(brinq) - 1].nome
menor_dura = min(self.bau[brinq_nome])
cfunc.mudar_titulo(f'Brincando com {brinq_nome}')
self.gato.brincar(self.bau, menor_dura)
return True
else:
return False
def comer(self):
cfunc.mudar_titulo('Escolher comida')
comidas_tipos = self.gela.comida_por_classe()
tipos = list(comidas_tipos.keys())
janela_tipos = cjane.JanelaTable({'##': 4, 'Tipo': 73})
for i in range(len(tipos)):
janela_tipos.add_linha([i+1, tipos[i]])
janela_tipos.mostrar_janela(show_input=False)
tipo_index = input('Digite o número do tipo de comida para comer (ENTER para voltar): ')
while tipo_index != '' and (not tipo_index.isnumeric() or int(tipo_index) not in range(1, len(tipos)+1)):
janela_tipos.mostrar_janela(show_input=False)
if not tipo_index.isnumeric():
tipo_index = input('Digite um valor numérico (ENTER para voltar): ')
else:
tipo_index = input('Digite um número válido (ENTER para voltar): ')
if tipo_index != '':
tipo = tipos[int(tipo_index)-1]
comidas = comidas_tipos[tipo]
janela = cjane.JanelaTable({'##': 4, 'Nome': 50, 'Fome': 10, 'Saúde': 11})
for i in range(len(comidas)):
janela.add_linha([i+1, comidas[i].nome, comidas[i].saciar, comidas[i].saude])
janela.mostrar_janela(show_input=False)
comida_index = input('Digite o número da comida para comer (ENTER para voltar ao menu): ')
while comida_index != '' and (not comida_index.isnumeric() or int(comida_index) not in range(1, len(comidas)+1)):
janela_tipos.mostrar_janela(show_input=False)
if not comida_index.isnumeric():
comida_index = input('Digite um valor numérico (ENTER para voltar ao menu): ')
else:
comida_index = input('Digite um número válido (ENTER para voltar ao menu): ')
if comida_index != '':
comida = comidas[int(comida_index)-1]
cfunc.mudar_titulo(f'Comendo {comida.nome}')
self.gato.comer(self.gela, comida)
return True
else:
return False
else:
return False
def run_game(self):
while True:
cfunc.mudar_titulo('Menu')
cfunc.limpar_tela()
self.menu(gato_img=cga.gatitos['Padrão'])
esc = input('>>> ')
if esc == '1':
# Ver geladeira
cfunc.limpar_tela()
self.mostra_gela()
elif esc == '2':
cfunc.limpar_tela()
if self.comer():
self.salvo = False
elif esc == '3':
# Ver bau
cfunc.limpar_tela()
self.mostrar_bau()
elif esc == '4':
cfunc.limpar_tela()
if self.brincar():
self.salvo = False
elif esc == '5':
# Salvar jogo
cfunc.limpar_tela()
csave.salvar_jogo([self.gato, self.gela, self.bau])
self.salvo = True
cfunc.janela_salvar()
sleep(1)
elif esc == '6':
# Deletar jogo (abandonar gato)
cfunc.limpar_tela()
if cfunc.janela_deletar():
break
elif esc == '7':
# Sair do jogo
cfunc.limpar_tela()
if cfunc.janela_sair(self.salvo, self.gato, self.gela, self.bau):
break
elif esc.lower() == 'creditos' or esc.lower() == 'créditos':
cfunc.limpar_tela()
cfunc.janela_creditos()
else:
continue
if __name__ == '__main__':
game = Main()
while game.voltar:
game = Main()
game.run_game()
| 35.105263 | 133 | 0.470931 | 1,963 | 18,009 | 4.15894 | 0.180846 | 0.034297 | 0.051445 | 0.014699 | 0.28099 | 0.174057 | 0.109873 | 0.073126 | 0.073126 | 0.021803 | 0 | 0.019961 | 0.40191 | 18,009 | 512 | 134 | 35.173828 | 0.737722 | 0.038925 | 0 | 0.25 | 0 | 0.010989 | 0.193184 | 0.006666 | 0 | 0 | 0 | 0.001953 | 0 | 1 | 0.027473 | false | 0.002747 | 0.024725 | 0 | 0.087912 | 0.049451 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb3f35344db31559c493891a3016def67e71ee1 | 3,837 | py | Python | src/pyluxcoretools/pyluxcoretools/utils/netbeacon.py | OmidGhotbi/LuxCore | e83fb6bf2e2c0254e3c769ffc8e5546eb71f576a | [
"Apache-2.0"
] | 826 | 2017-12-12T15:38:16.000Z | 2022-03-28T07:12:40.000Z | src/pyluxcoretools/pyluxcoretools/utils/netbeacon.py | OmidGhotbi/LuxCore | e83fb6bf2e2c0254e3c769ffc8e5546eb71f576a | [
"Apache-2.0"
] | 531 | 2017-12-03T17:21:06.000Z | 2022-03-20T19:22:11.000Z | src/pyluxcoretools/pyluxcoretools/utils/netbeacon.py | OmidGhotbi/LuxCore | e83fb6bf2e2c0254e3c769ffc8e5546eb71f576a | [
"Apache-2.0"
] | 133 | 2017-12-13T18:46:10.000Z | 2022-03-27T16:21:00.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
# Copyright 1998-2018 by authors (see AUTHORS.txt)
#
# This file is part of LuxCoreRender.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import socket
import threading
import functools
import pyluxcoretools.utils.loghandler as loghandler
logger = logging.getLogger(loghandler.loggerName + ".netbeacon")
BROADCAST_PORT = 18019
class NetBeaconSender:
def __init__(self, ipAddress, port, broadCastAddress, period=3.0):
self.socket = None
self.thread = None
self.ipAddress = ipAddress
self.port = port
self.broadCastAddress = broadCastAddress
self.period = period
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconSender.__BeaconThread, self))
self.thread.name = "NetBeaconSenderThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join(5.0)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconSender thread started.")
pingMsg = bytearray((
"LUXNETPING\n" +
str(self.ipAddress) + "\n" +
str(self.port) + "\n"
).encode("utf-8"))
while not self.stopEvent.is_set():
logger.debug("NetBeaconSender LUXNETPING sent: " + str(pingMsg))
self.socket.sendto(pingMsg, (self.broadCastAddress, BROADCAST_PORT))
self.stopEvent.wait(self.period)
logger.info("NetBeaconSender thread done.")
class NetBeaconReceiver:
def __init__(self, callBack):
self.socket = None
self.thread = None
self.callBack = callBack
def Start(self):
# Create the socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.socket.settimeout(1)
self.socket.bind(('', BROADCAST_PORT))
# Create the thread
self.thread = threading.Thread(target=functools.partial(NetBeaconReceiver.__BeaconThread, self))
self.thread.name = "NetBeaconReceiverThread"
# Run the thread
self.stopEvent = threading.Event()
self.thread.start()
def Stop(self):
self.stopEvent.set()
self.thread.join()
# Shutdown can not be used with UDP sockets so I can not wakeup
# the thread form the socket.recvfrom()
#self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
def __BeaconThread(self):
logger.info("NetBeaconReceiver thread started.")
try:
while not self.stopEvent.is_set():
try:
data, whereFrom = self.socket.recvfrom(4096)
if (not data):
break
except socket.timeout:
continue
logger.debug("NetBeaconReceiver LUXNETPING received from " + str(whereFrom) + ": " + str(data))
tag, ipAddress, port, _ = data.decode("utf-8").split("\n")
if (tag != "LUXNETPING"):
continue
if (ipAddress == ""):
ipAddress = str(whereFrom[0])
self.callBack(ipAddress, int(port))
except Exception as e:
logger.info("BeaconThread exception:")
logger.exception(e)
logger.info("NetBeaconReceiver thread done.")
| 28.849624 | 99 | 0.691426 | 473 | 3,837 | 5.54334 | 0.35518 | 0.04958 | 0.02746 | 0.012204 | 0.318841 | 0.295957 | 0.276125 | 0.251716 | 0.218154 | 0.218154 | 0 | 0.009886 | 0.156372 | 3,837 | 132 | 100 | 29.068182 | 0.800124 | 0.23117 | 0 | 0.363636 | 0 | 0 | 0.113965 | 0.015919 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103896 | false | 0 | 0.064935 | 0 | 0.194805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb43a9aaebf1ad8cae7c5ffcb6ef4a11fa19aa8 | 389 | py | Python | api/main.py | debbie-chan/SPM | f84e62779347579287aee8a2e832f72dcc53b8dd | [
"MIT"
] | null | null | null | api/main.py | debbie-chan/SPM | f84e62779347579287aee8a2e832f72dcc53b8dd | [
"MIT"
] | null | null | null | api/main.py | debbie-chan/SPM | f84e62779347579287aee8a2e832f72dcc53b8dd | [
"MIT"
] | null | null | null | from flask import render_template
from .src.app import create_app
db_uri = (
"mongodb+srv://dbAdmin:Ve08ByJJOk5RNhWK@clusterlms.k10xd.mongodb.net/lms"
)
app = create_app(db_uri)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
def index(path):
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| 21.611111 | 77 | 0.691517 | 57 | 389 | 4.473684 | 0.614035 | 0.023529 | 0.086275 | 0.109804 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038123 | 0.123393 | 389 | 17 | 78 | 22.882353 | 0.709677 | 0 | 0 | 0 | 0 | 0 | 0.290488 | 0.182519 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0.083333 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb44301153a2a318071b686264e0d081c08940a | 2,189 | py | Python | inference.py | SolomidHero/voice-conversion-flask-heroku | 2b27f1e92dcd2d06723ab39382389fbe722c843d | [
"MIT"
] | null | null | null | inference.py | SolomidHero/voice-conversion-flask-heroku | 2b27f1e92dcd2d06723ab39382389fbe722c843d | [
"MIT"
] | null | null | null | inference.py | SolomidHero/voice-conversion-flask-heroku | 2b27f1e92dcd2d06723ab39382389fbe722c843d | [
"MIT"
] | null | null | null | import json
import torch
import sys
from common_utils import transform_audio
from engine.data import load_wav, log_mel_spectrogram, plot_mel, plot_attn
from engine.models import load_pretrained_wav2vec
from vocoder.env import AttrDict
sys.path.append("./vocoder")
from vocoder.models import Generator
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
ckpt_path = "./fragmentvc.pt"
wav2vec_path = "facebook/wav2vec2-base"
vocoder_path = "./generator.pt"
vocoder_config_path = "./generator_config.json"
preemph = 0.97
sample_rate = 16000
n_mels = 80
n_fft = 1280
hop_len = 320
win_len = 1280
f_min = 50
f_max = None
def convert(src_wav, tgt_wav):
wav2vec = load_pretrained_wav2vec(wav2vec_path).to(device)
print("[INFO] Wav2Vec is loaded from", wav2vec_path)
model = torch.jit.load(ckpt_path).to(device).eval()
print("[INFO] FragmentVC is loaded from", ckpt_path)
vocoder_config = json.loads(open(vocoder_config_path).read())
vocoder = Generator(AttrDict(vocoder_config)).to(device).eval()
vocoder_state_dict = torch.load(vocoder_path, map_location=device)
vocoder.load_state_dict(vocoder_state_dict['generator'])
print("[INFO] Vocoder is loaded from", vocoder_path)
src_wav = torch.FloatTensor(src_wav).unsqueeze(0).to(device)
print("[INFO] source waveform shape:", src_wav.shape)
tgt_mel = log_mel_spectrogram(
tgt_wav, preemph, sample_rate, n_mels, n_fft, hop_len, win_len, f_min, f_max
)
tgt_mel = torch.FloatTensor(tgt_mel.T).unsqueeze(0).to(device)
print("[INFO] target spectrograms shape:", tgt_mel.shape)
with torch.no_grad():
src_feat = wav2vec.extract_features(src_wav, None)[0]
print("[INFO] source Wav2Vec feature shape:", src_feat.shape)
out_mel, _ = model(src_feat, tgt_mel)
print("[INFO] converted spectrogram shape:", out_mel.shape)
out_wav = vocoder(out_mel).squeeze()
out_wav = out_wav.cpu().numpy()
print("[INFO] generated waveform shape:", out_wav.shape)
return out_wav
def get_prediction(src, tgt):
result_wav = convert(src, tgt)
# try:
# result_wav = convert(src, tgt)
# except Exception:
# print(Exception)
# return 0, 'error'
return result_wav
| 29.581081 | 80 | 0.740978 | 327 | 2,189 | 4.712538 | 0.327217 | 0.046723 | 0.025308 | 0.033095 | 0.063595 | 0.035042 | 0 | 0 | 0 | 0 | 0 | 0.020148 | 0.138419 | 2,189 | 73 | 81 | 29.986301 | 0.796925 | 0.042942 | 0 | 0 | 0 | 0 | 0.169459 | 0.021541 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.156863 | 0 | 0.235294 | 0.156863 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb77d0da70d5d72ccf79ec2127cc2c4373c050d | 5,412 | py | Python | suica.py | hsgwa/nfcpy-suica-sample | 7903ec3546c3e11fef0c82b6316b357a7a4d585d | [
"MIT"
] | null | null | null | suica.py | hsgwa/nfcpy-suica-sample | 7903ec3546c3e11fef0c82b6316b357a7a4d585d | [
"MIT"
] | null | null | null | suica.py | hsgwa/nfcpy-suica-sample | 7903ec3546c3e11fef0c82b6316b357a7a4d585d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import binascii
import csv
import datetime
import os
import struct
import sys
import nfc
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/nfcpy')
num_blocks = 20
service_code = 0x090f
class StationRecord(object):
db = None
def __init__(self, row):
self.area_key = int(row[0], 10)
self.line_key = int(row[1], 10)
self.station_key = int(row[2], 10)
self.company_value = row[3]
self.line_value = row[4]
self.station_value = row[5]
@classmethod
def get_none(cls):
# 駅データが見つからないときに使う
return cls(["0", "0", "0", "None", "None", "None"])
@classmethod
def get_db(cls, filename):
# 駅データのcsvを読み込んでキャッシュする
if cls.db == None:
cls.db = []
for row in csv.reader(open(filename, 'rU'),
delimiter=',',
dialect=csv.excel_tab):
cls.db.append(cls(row))
return cls.db
@classmethod
def get_station(cls, line_key, station_key):
# 線区コードと駅コードに対応するStationRecordを検索する
import os
station_code_path = os.path.dirname(os.path.abspath(__file__)) + "/StationCode.csv"
for station in cls.get_db(station_code_path):
if station.line_key == line_key and station.station_key == station_key:
return station
return cls.get_none()
class HistoryRecord(object):
def __init__(self, data):
# ビッグエンディアンでバイト列を解釈する
row_be = struct.unpack('>2B2H4BH4B', data)
# リトルエンディアンでバイト列を解釈する
row_le = struct.unpack('<2B2H4BH4B', data)
self.db = None
self.console = self.get_console(row_be[0])
self.process = self.get_process(row_be[1])
self.year = self.get_year(row_be[3]) + 2000
self.month = self.get_month(row_be[3])
self.day = self.get_day(row_be[3])
self.balance = row_le[8]
self.in_station = StationRecord.get_station(row_be[4], row_be[5])
self.out_station = StationRecord.get_station(row_be[6], row_be[7])
@classmethod
def get_console(cls, key):
# よく使われそうなもののみ対応
return {
0x03: "精算機",
0x04: "携帯型端末",
0x05: "車載端末",
0x12: "券売機",
0x16: "改札機",
0x1c: "乗継精算機",
0xc8: "自販機",
}.get(key)
@classmethod
def get_process(cls, key):
# よく使われそうなもののみ対応
return {
0x01: "運賃支払",
0x14: "運賃支払(入場時オートチャージ)",
0x15: "運賃支払(退場時オートチャージ)",
0x02: "チャージ",
0x0f: "バス",
0x46: "物販",
}.get(key)
@classmethod
def get_year(cls, date):
return (date >> 9) & 0x7f
@classmethod
def get_month(cls, date):
return (date >> 5) & 0x0f
@classmethod
def get_day(cls, date):
return (date >> 0) & 0x1f
class Station():
def __init__(self, station, company, line):
self.station = station
self.company = company
self.line = line
class SuicaRecord():
def __init__(self, history):
self.console = history.console
self.process = history.process
self.date = datetime.datetime(history.year, history.month, history.day)
self.in_station = Station(history.in_station.station_value,
history.in_station.company_value,
history.in_station.line_value)
self.out_station = Station(history.out_station.station_value,
history.out_station.company_value,
history.out_station.line_value)
self.balance = history.balance
self.payment = 0
class Suica():
def __init__(self):
clf = nfc.ContactlessFrontend('usb')
self.data = []
clf.connect(rdwr={'on-connect': self.__connected})
self.__calculate_payment()
self.data = self.data[1:]
self.data = self.data[::-1]
def __calculate_payment(self):
for record_, record in zip(self.data[:-1], self.data[1:]):
record.payment = record.balance - record_.balance
def __connected(self, tag):
if not isinstance(tag, nfc.tag.tt3.Type3Tag):
print("error: tag isn't Type3Tag")
return
try:
sc = nfc.tag.tt3.ServiceCode(service_code >> 6,
service_code & 0x3f)
for i in range(num_blocks):
bc = nfc.tag.tt3.BlockCode(i, service=0)
data = tag.read_without_encryption([sc], [bc])
history = HistoryRecord(bytes(data))
self.data.append(SuicaRecord(history))
except Exception as e:
print("error: %s" % e)
if __name__ == "__main__":
suica = Suica()
for d in suica.data:
print()
print("支払い: %s円" % d.payment)
print("端末種: %s" % d.console)
print("処理: %s" % d.process)
print("日付: %02d-%02d-%02d" % (d.date.year, d.date.month, d.date.day))
print("入線区: %s-%s" % (d.in_station.company, d.in_station.line))
print("入駅順: %s" % d.in_station.station)
print("出線区: %s-%s" % (d.out_station.company, d.out_station.line))
print("出駅順: %s" % d.out_station.station)
print("残高: %d" % d.balance)
| 29.736264 | 91 | 0.55765 | 644 | 5,412 | 4.495342 | 0.268634 | 0.017271 | 0.046978 | 0.017617 | 0.078411 | 0.044905 | 0.020725 | 0 | 0 | 0 | 0 | 0.029459 | 0.316334 | 5,412 | 181 | 92 | 29.900552 | 0.752973 | 0.034183 | 0 | 0.101449 | 0 | 0 | 0.050594 | 0 | 0 | 0 | 0.014182 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.057971 | 0.043478 | 0.282609 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcb7d3280cecce6265b2df4ab52529db9861f61d | 1,760 | py | Python | raekwon/db.py | metheoryt/raekwon | 3330559a2b655436520fad3d7edf6c871d6e8460 | [
"MIT"
] | null | null | null | raekwon/db.py | metheoryt/raekwon | 3330559a2b655436520fad3d7edf6c871d6e8460 | [
"MIT"
] | null | null | null | raekwon/db.py | metheoryt/raekwon | 3330559a2b655436520fad3d7edf6c871d6e8460 | [
"MIT"
] | null | null | null | from datetime import datetime
import marshmallow as ma
import sqlalchemy as sa
from marshmallow import fields as f
from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, scoped_session
metadata = MetaData()
Session = scoped_session(sessionmaker())
Base = declarative_base(metadata=metadata)
def reflect_field_to_column(fd: f.Field):
ct = sa.String()
if isinstance(fd, f.Decimal):
ct = sa.Numeric(scale=fd.places, decimal_return_scale=fd.places)
elif isinstance(fd, f.Bool):
ct = sa.Boolean()
elif isinstance(fd, f.DateTime):
ct = sa.DateTime()
elif isinstance(fd, f.Date):
ct = sa.Date()
elif isinstance(fd, f.Float):
ct = sa.Float()
elif isinstance(fd, f.Int):
ct = sa.Integer()
return sa.Column(fd.name, ct, nullable=not fd.required, default=fd.default)
def extract_columns_from_schema(schema: ma.Schema):
fields = schema.fields
""":type: list[f.Field]"""
columns = []
for k, field in fields.items():
col = reflect_field_to_column(field)
columns.append(col)
return columns
def make_table_from_schema(name, schema: ma.Schema):
basic_model_columns = (
sa.Column('pk', sa.String(), primary_key=True, nullable=False, unique=True),
# если дата вставки и дата обновления отличаются
# это повод выкинуть операцию в результаты сверки
sa.Column('date_create', sa.DateTime, default=datetime.now),
sa.Column('last_update', sa.DateTime, onupdate=datetime.now),
)
additional_cols = extract_columns_from_schema(schema)
table = sa.Table(name, metadata, *basic_model_columns, *additional_cols)
return table
| 28.387097 | 84 | 0.694318 | 235 | 1,760 | 5.07234 | 0.365957 | 0.017617 | 0.065436 | 0.071309 | 0.050336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.198864 | 1,760 | 61 | 85 | 28.852459 | 0.84539 | 0.053409 | 0 | 0 | 0 | 0 | 0.014661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.170732 | 0 | 0.317073 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcbb97830b78a5ce1b4a5754ca7c5afd3e05b7f0 | 3,963 | py | Python | app/FrontendMicroservice/main_frontend.py | AdavisSnakes/GrocerySaaS | 1d0b50d1c0d2e53b1bdb9fe57e94c6168b7e4c84 | [
"MIT"
] | null | null | null | app/FrontendMicroservice/main_frontend.py | AdavisSnakes/GrocerySaaS | 1d0b50d1c0d2e53b1bdb9fe57e94c6168b7e4c84 | [
"MIT"
] | null | null | null | app/FrontendMicroservice/main_frontend.py | AdavisSnakes/GrocerySaaS | 1d0b50d1c0d2e53b1bdb9fe57e94c6168b7e4c84 | [
"MIT"
] | null | null | null | #!/usr/bin/python3.7
import sys, json, os, stripe
from datetime import timedelta, datetime
from flask import Flask, render_template, redirect, request, escape, jsonify, flash, current_app
from flask_login import LoginManager, UserMixin, login_required, login_user, logout_user, current_user
from flask_wtf import CSRFProtect
# Import all the things
from setup_app import app
from frontend_action import FrontendAction
from service_calls.call_notifications_service import notification_api
from service_calls.call_user_service import user_api
from service_calls.call_stripe_service import stripe_api
csrf = CSRFProtect(app)
app.register_blueprint(notification_api)
app.register_blueprint(user_api)
app.register_blueprint(stripe_api)
action = FrontendAction(app)
@app.route("/")
def home():
variables = dict(is_authenticated=current_user.is_authenticated)
return render_template('index.html', **variables)
@app.route("/login_page")
def login_page():
if current_user.is_authenticated:
return redirect('/dashboard', code=302)
return render_template('login_page.html')
@app.route("/dashboard")
@login_required
def dashboard():
trial_period = timedelta(days=app.config['TRIAL_LENGTH_DAYS'])
sub_active = action.is_user_subscription_active(False)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(name=current_user.name,
expire_date=current_user.created_date + trial_period,
user_is_paying=sub_active,
notifications=notifications_for_display,
n_messages=len(notifications))
return render_template('dashboard.html', **variables)
@app.route("/billing")
@login_required
def billing():
sub_active, show_reactivate, sub_cancelled_at = action.is_user_subscription_active()
stripe_objs = action.get_all_stripe_subscriptions_by_user_id(current_user.id)
sub_dict = action.subscriptions_to_json(stripe_objs)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(subscription_active=sub_active,
name=current_user.name,
show_reactivate=show_reactivate,
subscription_cancelled_at=sub_cancelled_at,
subscription_data=sub_dict,
notifications=notifications_for_display,
n_messages=len(notifications))
return render_template('billing.html', **variables)
@app.route("/notifications")
@login_required
def notifications_center():
all_notifications = action.get_all_notifications_by_user_id(current_user.id)
notifications, notifications_for_display = action.get_unread_notifications(current_user.id)
variables = dict(name=current_user.name,
notifications=notifications_for_display,
all_notifications=all_notifications,
n_messages=len(notifications))
return render_template('notifications.html', **variables)
@app.route("/tos")
def terms_of_service():
variables = dict(is_authenticated=current_user.is_authenticated)
return render_template('terms_of_service.html', **variables)
@app.route("/logout")
def logout():
if current_user.is_authenticated == True:
current_user.is_authenticated = False
logout_user()
return redirect('/', code=302)
@app.errorhandler(401)
def not_logged_in(e):
variables = dict(message='Please login first')
return render_template('login_page.html', **variables)
@app.errorhandler(404)
def not_found(e):
variables = dict(is_authenticated=current_user.is_authenticated,
message = '404 Page Not Found',
stacktrace = str(e))
return render_template('error.html', **variables)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=app.config['FRONTEND_PORT']) | 36.027273 | 102 | 0.726218 | 471 | 3,963 | 5.781316 | 0.248408 | 0.064635 | 0.058759 | 0.05729 | 0.364671 | 0.302975 | 0.263313 | 0.246787 | 0.226956 | 0.226956 | 0 | 0.006479 | 0.182185 | 3,963 | 110 | 103 | 36.027273 | 0.833693 | 0.010346 | 0 | 0.192771 | 0 | 0 | 0.06682 | 0.005356 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108434 | false | 0 | 0.120482 | 0 | 0.349398 | 0.036145 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcbc22574e8ecb6bb9fb93418299a40102fc9634 | 483 | py | Python | examples/decoupledibpm/sphere3d/Re350/scripts/create_body.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | 2 | 2020-08-08T13:37:32.000Z | 2021-12-01T03:22:32.000Z | examples/decoupledibpm/sphere3d/Re350/scripts/create_body.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | null | null | null | examples/decoupledibpm/sphere3d/Re350/scripts/create_body.py | barbagroup/petibm-examples | 794de3613967c14750c750aed386602c988cff05 | [
"BSD-3-Clause"
] | 2 | 2019-12-22T08:49:01.000Z | 2021-12-01T03:22:44.000Z | """Create a sphere."""
import pathlib
import sys
import petibmpy
rootdir = pathlib.Path(__file__).absolute().parents[5]
sys.path.insert(0, str(rootdir / 'misc'))
import icosphere
R = 0.5
sphere = icosphere.create_icosphere(25)
sphere.vertices *= R
sphere.print_info()
x, y, z = sphere.vertices.T
# Center the sphere at (-5.0, 0.0, 0.0)
x += -5.0
simudir = pathlib.Path(__file__).absolute().parents[1]
filepath = simudir / 'sphere.body'
petibmpy.write_body(filepath, x, y, z)
| 18.576923 | 54 | 0.706004 | 76 | 483 | 4.342105 | 0.460526 | 0.024242 | 0.027273 | 0.139394 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0358 | 0.132505 | 483 | 25 | 55 | 19.32 | 0.75179 | 0.113872 | 0 | 0 | 0 | 0 | 0.035545 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcbc3d99f59af651cbfa762d00ff42cd63d6d739 | 1,129 | py | Python | examples/plot_therm.py | xavigisbeg/handy_plotter | 048a38dc8510b81df01348bf4e756fa846a977be | [
"MIT"
] | null | null | null | examples/plot_therm.py | xavigisbeg/handy_plotter | 048a38dc8510b81df01348bf4e756fa846a977be | [
"MIT"
] | null | null | null | examples/plot_therm.py | xavigisbeg/handy_plotter | 048a38dc8510b81df01348bf4e756fa846a977be | [
"MIT"
] | null | null | null | import context as HP
import os
OT = '2018_1101_B'
if (os.name == 'nt'):
pathData = '{}{}/{}'.format(
'//SERVIDORSQL/Datos/Desarrollos y pruebas/',
'Automatitzacio/Dades Proves/Termoparell',
OT)
pathPlot = '{}{}/{}'.format(
'//SERVIDORSQL/Datos/Desarrollos y pruebas/',
'Automatitzacio/Dades Proves/Termoparell',
OT)
else:
pathData = '/home/pi/results/therm/{}'.format(OT)
pathPlot = '/home/pi/results/plots/{}'.format(OT)
try:
plotter = HP.HandyPlotter()
allPlots = True
whatTc = {
'L1': 'Termopar A',
}
if (not allPlots):
plotter.plot_all(
pathData=pathData,
pathPlot=pathPlot,
find=whatTc['A'],
)
else:
find = {
'2018_1101_B': 'Primera Hornada Ejercicio 2018 Orden Trabajo 1101 (B)',
}
for i in find:
plotter.plot_all(
pathData=pathData,
pathPlot=pathPlot,
find={'tag': i, 'title': find[i]},
naming='column',
xPos=1,
yPos=[i for i in range(2, 14)], # [2, 5, 8, 12],
xLabel='Tiempo [min]',
yLabel='Temperatura [ºC]',
yLim=(0, 180.05),
xTicks=(0, 300.05, 20),
yTicks=(0, 180.05, 10),
)
except KeyboardInterrupt:
print('Cancel')
| 22.137255 | 74 | 0.622675 | 146 | 1,129 | 4.773973 | 0.568493 | 0.021521 | 0.025825 | 0.094692 | 0.370158 | 0.370158 | 0.370158 | 0.370158 | 0.226686 | 0.226686 | 0 | 0.061269 | 0.190434 | 1,129 | 50 | 75 | 22.58 | 0.701313 | 0.0124 | 0 | 0.297872 | 0 | 0 | 0.327044 | 0.100629 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.042553 | 0 | 0.042553 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcbd2a883473757a97803fed690a067d5c7f7016 | 7,526 | py | Python | sammba/registration/tests/test_template_registrator.py | salma1601/sammba-mri | c3c79ed806a4e5ce3524bc6053bf0c3ff1444113 | [
"CECILL-B"
] | null | null | null | sammba/registration/tests/test_template_registrator.py | salma1601/sammba-mri | c3c79ed806a4e5ce3524bc6053bf0c3ff1444113 | [
"CECILL-B"
] | null | null | null | sammba/registration/tests/test_template_registrator.py | salma1601/sammba-mri | c3c79ed806a4e5ce3524bc6053bf0c3ff1444113 | [
"CECILL-B"
] | null | null | null | import os
from nose import with_setup
from nose.tools import assert_true
import numpy as np
import nibabel
from nilearn.datasets.tests import test_utils as tst
from nilearn._utils.testing import assert_raises_regex
from nilearn._utils.niimg_conversions import _check_same_fov
from sammba import testing_data
from sammba.registration.template_registrator import TemplateRegistrator
def crop_and_oblique(in_file, out_file):
img = nibabel.load(in_file)
oblique_affine = .2 * np.eye(4)
oblique_affine[0, 1] = .01
oblique_affine[1, 0] = .01
oblique_affine[3, 3] = 1
oblique_data = img.get_data()[1:]
oblique_img = nibabel.Nifti1Image(oblique_data, oblique_affine)
oblique_img.to_filename(out_file)
def empty_img_like(in_file, out_file):
img = nibabel.load(in_file)
new_img = nibabel.Nifti1Image(np.zeros(img.get_data().shape),
img.affine)
new_img.to_filename(out_file)
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_segment():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
registrator = TemplateRegistrator(anat_file, 400, output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False)
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
_, brain_file = registrator.segment(anat_file)
assert_true(os.path.isfile(brain_file))
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_anat_and_transform_anat_like():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
# Create template
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
assert_raises_regex(
ValueError, 'has not been anat fitted',
registrator.transform_anat_like, anat_file)
# test fit_anat
registrator.fit_anat(anat_file)
assert_true(_check_same_fov(nibabel.load(registrator.registered_anat_),
nibabel.load(template_file)))
# test transform_anat_like
anat_like_file = os.path.join(tst.tmpdir, 'anat_like.nii.gz')
empty_img_like(anat_file, anat_like_file)
registrator.fit_anat(anat_file)
transformed_file = registrator.transform_anat_like(anat_like_file)
assert_true(_check_same_fov(nibabel.load(transformed_file),
nibabel.load(template_file)))
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_transform_and_inverse_modality_with_func():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
func_file = os.path.join(os.path.dirname(testing_data.__file__),
'func.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
registrator.fit_anat(anat_file)
assert_raises_regex(
ValueError, "Only 'func' and 'perf' ", registrator.fit_modality,
func_file, 'diffusion')
assert_raises_regex(
ValueError, "'t_r' is needed for slice ", registrator.fit_modality,
func_file, 'func')
assert_raises_regex(
ValueError, 'has not been func fitted',
registrator.transform_modality_like, func_file, 'func')
# test fit_modality for func
registrator.fit_modality(func_file, 'func', slice_timing=False)
registered_func_img = nibabel.load(registrator.registered_func_)
template_img = nibabel.load(template_file)
np.testing.assert_array_almost_equal(registered_func_img.affine,
template_img.affine)
np.testing.assert_array_equal(registered_func_img.shape[:-1],
template_img.shape)
# test transform_modality for func
func_like_file = os.path.join(tst.tmpdir, 'func_like.nii.gz')
empty_img_like(func_file, func_like_file)
transformed_file = registrator.transform_modality_like(func_like_file,
'func')
transformed_img = nibabel.load(transformed_file)
assert_true(_check_same_fov(transformed_img, nibabel.load(template_file)))
# test transform then inverse transform brings back to the original image
inverse_transformed_file = registrator.inverse_transform_towards_modality(
transformed_file, 'func')
inverse_transformed_img = nibabel.load(inverse_transformed_file)
func_like_img = nibabel.load(func_like_file)
assert_true(_check_same_fov(inverse_transformed_img, func_like_img))
np.testing.assert_array_equal(inverse_transformed_img.get_data(),
func_like_img.get_data())
# test inverse transform then transform brings back to the original image
transformed_file2 = registrator.transform_modality_like(
inverse_transformed_file, 'func')
transformed_img2 = nibabel.load(transformed_file2)
assert_true(_check_same_fov(transformed_img2,
transformed_img))
np.testing.assert_array_equal(transformed_img2.get_data(),
transformed_img.get_data())
@with_setup(tst.setup_tmpdata, tst.teardown_tmpdata)
def test_fit_and_transform_modality_with_perf():
anat_file = os.path.join(os.path.dirname(testing_data.__file__),
'anat.nii.gz')
func_file = os.path.join(os.path.dirname(testing_data.__file__),
'func.nii.gz')
template_file = os.path.join(tst.tmpdir, 'template.nii.gz')
crop_and_oblique(anat_file, template_file)
registrator = TemplateRegistrator(template_file, 400,
output_dir=tst.tmpdir,
use_rats_tool=False, verbose=False,
registration_kind='affine')
registrator.fit_anat(anat_file)
assert_raises_regex(
ValueError, 'has not been perf fitted',
registrator.transform_modality_like, func_file, 'perf')
func_img = nibabel.load(func_file)
m0_img = nibabel.Nifti1Image(func_img.get_data()[..., 0], func_img.affine)
m0_file = os.path.join(tst.tmpdir, 'm0.nii.gz')
m0_img.to_filename(m0_file)
# test fit_modality for perf
registrator.fit_modality(m0_file, 'perf')
assert_true(_check_same_fov(nibabel.load(registrator.registered_perf_),
nibabel.load(template_file)))
# test transform_modality for perf
m0_like_file = os.path.join(tst.tmpdir, 'm0_like.nii.gz')
empty_img_like(m0_file, m0_like_file)
transformed_file = registrator.transform_modality_like(m0_like_file,
'perf')
assert_true(_check_same_fov(nibabel.load(transformed_file),
nibabel.load(template_file)))
| 44.797619 | 78 | 0.662769 | 921 | 7,526 | 5.040174 | 0.12595 | 0.028436 | 0.030159 | 0.042223 | 0.607712 | 0.55838 | 0.467902 | 0.383455 | 0.358035 | 0.317105 | 0 | 0.008329 | 0.250199 | 7,526 | 167 | 79 | 45.065868 | 0.814283 | 0.042386 | 0 | 0.395522 | 0 | 0 | 0.050188 | 0 | 0 | 0 | 0 | 0 | 0.141791 | 1 | 0.044776 | false | 0 | 0.074627 | 0 | 0.119403 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcbe57aa0aba088b6e7c1f10ce1907df3fa001fe | 8,939 | py | Python | main.py | RabbitFored/responseJSON-bot | 45364296fe63b6db3a89003339d787d0966ae037 | [
"MIT"
] | 5 | 2021-08-11T18:24:53.000Z | 2021-11-16T13:01:41.000Z | main.py | RabbitFored/responseJSON-bot | 45364296fe63b6db3a89003339d787d0966ae037 | [
"MIT"
] | null | null | null | main.py | RabbitFored/responseJSON-bot | 45364296fe63b6db3a89003339d787d0966ae037 | [
"MIT"
] | 2 | 2021-08-10T05:42:14.000Z | 2021-08-12T14:00:24.000Z | from multiprocessing import Process
import botapi
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.handlers import MessageHandler
import json
import database
from pyrogram.errors import (PeerIdInvalid, UserIsBlocked, MessageTooLong)
from pyrogram.types import (InlineQueryResultArticle, InputTextMessageContent,
InlineKeyboardMarkup, InlineKeyboardButton)
from config import apiID, apiHASH, botTOKEN
from pyrogram import filters
async def func(_, __, m):
if m.from_user.is_self:
return False
json_object = json.loads(f"{m}")
instance = json_object["_"]
if instance == "Message":
user = m.chat.id
chattype = m.chat.type
elif instance == "CallbackQuery":
user = m.message.chat.id
chattype = m.message.chat.type
elif instance == "InlineQuery":
user = m.from_user.id
chattype = "private"
else:
print(instance)
if not database.user_exist(user, chattype):
database.scrape(m)
mode = database.find_mode(user)
return mode == "mtproto"
mode_filter = filters.create(func)
ostrich = Client("ostrich", api_id=apiID, api_hash=apiHASH, bot_token=botTOKEN)
@ostrich.on_message(filters.command(["button"]) & mode_filter)
async def buttons(client, message):
await message.reply_text(
text=f'''
**Sample Inline buttons:
**''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([
[
InlineKeyboardButton("Button1", callback_data="Button1"),
],
[
InlineKeyboardButton("Button2", callback_data="Button2"),
],
]),
reply_to_message_id=message.message_id)
@ostrich.on_message(filters.command(["help"]) & mode_filter)
async def help(client, message):
await message.reply_text(text=f'''
Here is a detailed guide to use me.
You can use me to get JSON responses of your messages.
**Supports:**
- `Messages`
- `Inline Query`
- `Callback Query`
Use /set to switch between `bot API` and `MTProto` mode and /button to generate sample inline keyboard buttons.''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Get Help",
url="https://t.me/ostrichdiscussion/"),
]]),
reply_to_message_id=message.message_id)
@ostrich.on_message(filters.command(["start"]) & mode_filter)
async def start(client, message):
await message.reply_text(text=f'''
**Hi {message.from_user.mention}!
I return JSON responses of both bot api and MTProto for your messages.
Hit help to know more about how to use me.
**''',
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("HELP",
callback_data="getHELP"),
]]),
reply_to_message_id=message.message_id)
database.scrape(message)
@ostrich.on_message(filters.command(["copy"]))
async def copy(client, message):
await client.copy_message(message.chat.id,
message.reply_to_message.chat.id,
message.reply_to_message.message_id)
@ostrich.on_message(filters.command(["set"]) & mode_filter)
async def set(client, message):
await message.reply_text(
text=f"**Select an option**",
disable_web_page_preview=True,
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("bot API", callback_data="set_botapi"),
], [
InlineKeyboardButton("MTProto", callback_data="set_mtproto"),
]]),
reply_to_message_id=message.message_id)
@ostrich.on_message(mode_filter)
async def new_message(client, message):
json_object = json.loads(f"{message}")
formatted = json.dumps(json_object, indent=4)
try:
await message.reply_text(
f"```{formatted}```",
disable_web_page_preview=True,
disable_notification=True,
)
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(message.chat.id,
document="json.txt",
caption="responseJSONbot",
disable_notification=True)
@ostrich.on_chosen_inline_result(mode_filter)
async def inline_result(client, inline_query):
mode = database.find_mode(inline_query.from_user.id)
if mode != "mtproto":
print(
f"ignoring non mtproto request by user {inline_query.from_user.id.first_name}"
)
return
json_object = json.loads(f"{inline_query}")
formatted = json.dumps(json_object, indent=4)
try:
await client.send_message(
chat_id=inline_query.from_user.id,
text=f"```{formatted}```",
# parse_mode=,
disable_web_page_preview=True,
disable_notification=True,
# reply_to_message_id=,
)
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(document="json.txt",
caption="responseJSONbot",
disable_notification=True,
quote=True)
@ostrich.on_inline_query(mode_filter)
async def inline_query(client, inline_query):
await inline_query.answer(results=[
InlineQueryResultArticle(title="MTProto API response",
input_message_content=InputTextMessageContent(
f"{inline_query}"),
description="@responseJSONbot",
thumb_url="https://i.imgur.com/JyxrStE.png"),
InlineQueryResultArticle(title="About",
input_message_content=InputTextMessageContent(
"**Response JSON BOT - @ theostrich**"),
url="https://t.me/theostrich",
description="About bot",
thumb_url="https://imgur.com/DBwZ2y9.png",
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton(
"Updates",
url="https://t.me/ostrichdiscussion")
]])),
])
@ostrich.on_callback_query(mode_filter)
async def cb_handler(client, query):
if query.data.startswith('set'):
await query.answer()
user = query.message.reply_to_message.chat.id
mode = query.data.split("_")[1]
database.set_mode(user, mode)
await query.message.reply_text(
text=f"**Mode set to {mode} successfully**")
elif query.data == "getHELP":
await query.answer()
await query.message.edit_text(
text=f'''
Here is a detailed guide to use me.
You can use me to get JSON responses of your messages.
**Supports:**
- ```Messages```
- ```Inline Query```
- ```Callback Query```
Use /set to switch between ``|bot API``` and ```MTProto``` mode and /button to generate sample inline keyboard buttons.
''',
reply_markup=InlineKeyboardMarkup([[
InlineKeyboardButton("Get Help",
url="https://t.me/ostrichdiscussion"),
]]),
disable_web_page_preview=True)
elif query.data == "closeInline":
await query.answer("done")
await query.message.delete()
else:
await query.answer()
if query.message:
user = query.message.chat.id
else:
user = query.from_user.id
json_object = json.loads(f"{query}")
formatted = json.dumps(json_object, indent=4)
try:
await client.send_message(user, text=f"```{formatted}```")
except MessageTooLong:
file = open("json.txt", "w+")
file.write(formatted)
file.close()
await client.send_document(
user,
document="json.txt",
caption="responseJSONbot",
disable_notification=True,
)
if __name__ == '__main__':
pyro = Process(target=ostrich.run)
pyro.start()
ptb = Process(target=botapi.main)
ptb.start()
pyro.join()
ptb.join()
| 33.859848 | 119 | 0.571205 | 900 | 8,939 | 5.498889 | 0.204444 | 0.026672 | 0.024247 | 0.029097 | 0.481309 | 0.409982 | 0.404526 | 0.387149 | 0.292382 | 0.25298 | 0 | 0.001655 | 0.323974 | 8,939 | 263 | 120 | 33.988593 | 0.817309 | 0.003804 | 0 | 0.337963 | 0 | 0.009259 | 0.175466 | 0.007414 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.050926 | 0 | 0.064815 | 0.009259 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc07acfd0ff24e49be1cb1b14f2f8ee025b2537 | 4,301 | py | Python | datastax/trees/heap_tree.py | warmachine028/datastax | 2898b517dee471a240a10e81bcfafee5dce615ca | [
"MIT"
] | 5 | 2021-12-25T17:08:39.000Z | 2022-03-18T16:22:57.000Z | datastax/trees/heap_tree.py | warmachine028/datastax | 2898b517dee471a240a10e81bcfafee5dce615ca | [
"MIT"
] | 1 | 2021-12-28T05:45:34.000Z | 2021-12-28T21:31:50.000Z | datastax/trees/heap_tree.py | warmachine028/datastax | 2898b517dee471a240a10e81bcfafee5dce615ca | [
"MIT"
] | null | null | null | # Heap Tree Implementation
from __future__ import annotations
import warnings
from typing import Optional, Any
from datastax.errors import DeletionFromEmptyTreeWarning
from datastax.trees.private_trees.binary_tree import BinaryTree, TreeNode
class HeapNode(TreeNode):
def __init__(self, data: Any,
left: HeapNode = None,
right: HeapNode = None):
super().__init__(data, left, right)
self.parent: Optional[HeapNode] = None
self.prev_leaf: Optional[HeapNode] = None
class HeapTree(BinaryTree):
def __init__(self, array: list[Any] = None, root: HeapNode = None):
self._root: Optional[HeapNode] = root
self._leaf: Optional[HeapNode] = root
super().__init__(array, root)
def _construct(self, array: list[Any] = None) -> Optional[HeapTree]:
if not array or array[0] is None:
return None
for item in array:
try:
self.heappush(item)
except TypeError as error:
raise error
return self
@property
def leaf(self):
return self._leaf
# Function to push an element inside a tree
def heappush(self, data: Any) -> None:
root = self.root
if data is None:
return
node = HeapNode(data)
if root is None: # Heap Tree is Empty
self._root = self._leaf = node
# Heap tree has nodes. So inserting new node
# in the left of leftmost leaf node
elif self.leaf and self.leaf.left is None:
self.leaf.left = node
node.parent = self.leaf
else:
if not self.leaf:
return
self.leaf.right = node
previous_leaf = self.leaf
node.parent = self.leaf
self._update_leaf(self.leaf)
self.leaf.prev_leaf = previous_leaf
self._heapify(node)
# Private function to convert a subtree to heap
def _heapify(self, node: HeapNode) -> None:
if node.parent and node.parent.data < node.data:
node.parent.data, node.data = node.data, node.parent.data
self._heapify(node.parent)
# Private Helper method of heappush function to
# update rightmost node in deepest level
def _update_leaf(self, node: HeapNode) -> None:
# reach extreme left of next level if current level is full
if node.parent is None:
self._leaf = node
elif node.parent.left is node:
self._leaf = node.parent.right
elif node.parent.right is node:
self._update_leaf(node.parent)
while self.leaf and self.leaf.left:
self._leaf = self.leaf.left
# Function to pop the largest element in the tree
def heappop(self) -> Optional[Any]:
if not self.root:
warnings.warn(
"Deletion Unsuccessful. Can't delete when"
"tree is Already Empty", DeletionFromEmptyTreeWarning
)
return None
deleted_data = self.root.data
if self.root is self.leaf and not any(
[self.leaf.left, self.leaf.right]):
self._root = self._leaf = None
else:
if self.leaf.right and self.root:
self.root.data = self.leaf.right.data
self.leaf.right = None
self._shift_up(self.root)
elif self.leaf.left and self.root:
self.root.data = self.leaf.left.data
self.leaf.left = None
self._shift_up(self.root)
else: # We have reached the end of a level
self._leaf = self.leaf.prev_leaf
return self.heappop()
return deleted_data
# Private helper method of heappop function
def _shift_up(self, node: HeapNode) -> None:
root = node
left_child = root.left
right_child = root.right
if left_child and left_child.data > root.data:
root = left_child
if right_child and right_child.data > root.data:
root = right_child
if root is node:
return
root.data, node.data = node.data, root.data
self._shift_up(root)
def insert(self, item: Any):
self.heappush(item)
| 34.408 | 73 | 0.589863 | 538 | 4,301 | 4.592937 | 0.204461 | 0.100364 | 0.038851 | 0.0259 | 0.161068 | 0.108458 | 0.025091 | 0.025091 | 0 | 0 | 0 | 0.000348 | 0.332016 | 4,301 | 124 | 74 | 34.685484 | 0.859729 | 0.110672 | 0 | 0.141414 | 0 | 0 | 0.016002 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.10101 | false | 0 | 0.050505 | 0.010101 | 0.262626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc09e2543fdf968d7d7db4fd1316f5b3e4dc932 | 7,443 | py | Python | pretix_mpesa/payment.py | enyachoke/pretix-mpesa | 615368b04226e72cea3b1a16002001e32d0435bb | [
"Apache-2.0"
] | 2 | 2018-07-10T15:55:47.000Z | 2020-11-28T20:42:53.000Z | pretix_mpesa/payment.py | enyachoke/pretix-mpesa | 615368b04226e72cea3b1a16002001e32d0435bb | [
"Apache-2.0"
] | null | null | null | pretix_mpesa/payment.py | enyachoke/pretix-mpesa | 615368b04226e72cea3b1a16002001e32d0435bb | [
"Apache-2.0"
] | 1 | 2021-05-27T15:20:18.000Z | 2021-05-27T15:20:18.000Z | import json
import logging
import urllib.parse
import phonenumbers
import math
from pympesa import Pympesa
from django import forms
from django.contrib import messages
from django.core import signing
from django.template.loader import get_template
from django.utils.translation import ugettext as __, ugettext_lazy as _
from django.utils.functional import cached_property
from collections import OrderedDict
from django.http import HttpRequest
from pretix.base.decimal import round_decimal
from pretix.base.models import Order, Quota, RequiredAction,OrderPayment, OrderRefund
from pretix.base.payment import BasePaymentProvider, PaymentException
from pretix.base.services.mail import SendMailException
from pretix.base.services.orders import mark_order_paid, mark_order_refunded
from pretix.helpers.urls import build_absolute_uri as build_global_uri
from pretix.multidomain.urlreverse import build_absolute_uri
from pretix.plugins.paypal.models import ReferencedPayPalObject
from pretix.presale.views.cart import (
cart_session, create_empty_cart_id, get_or_create_cart_id,
)
from .tasks import send_stk
logger = logging.getLogger('pretix.plugins.mpesa')
class Mpesa(BasePaymentProvider):
identifier = 'mpesa'
verbose_name = _('Mpesa')
payment_form_fields = OrderedDict([
])
@property
def abort_pending_allowed(self):
return False
@cached_property
def cart_session(self):
return cart_session(self.request)
@property
def settings_form_fields(self):
d = OrderedDict(
[
('endpoint',
forms.ChoiceField(
label=_('Endpoint'),
initial='sandbox',
choices=(
('production', 'Live'),
('sandbox', 'Sandbox'),
),
)),
('safaricom_consumer_key',
forms.CharField(
label=_('Safaricom Consumer Key'),
required=True,
help_text=_('<a target="_blank" rel="noopener" href="{docs_url}">{text}</a>').format(
text=_('Go to the safaricom developer portal to obtain developer keys a get guidance on going live'),
docs_url='https://developer.safaricom.co.ke'
)
)),
('safaricom_consumer_secret',
forms.CharField(
label=_('Safaricom Consumer Secret'),
required=True,
)),
('mpesa_shortcode',
forms.CharField(
label=_('Lipa na Mpesa Online shortcode'),
required=True,
help_text=_('Apply for this from safaricom')
)),
('encryption_password',
forms.CharField(
label=_('Encription Password'),
required=True,
help_text=_('The password for encrypting the request')
)),
('stk_callback_url',
forms.CharField(
label=_('Mpesa STK Callback'),
required=True,
help_text=_('This is the callback url for mpesa stk')
)),
('mpesa_phone_number_field_required',
forms.BooleanField(
label=_('Will the mpesa phone number be required to place an order'),
help_text=_("If this is not checked, entering a mpesa phone number is optional and the mpesa payment my not work."),
required=False,
)),
] + list(super().settings_form_fields.items())
)
return d
def checkout_confirm_render(self, request) -> str:
"""
Returns the HTML that should be displayed when the user selected this provider
on the 'confirm order' page.
"""
template = get_template('pretix_mpesa/checkout_payment_confirm.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def order_pending_render(self, request, order) -> str:
template = get_template('pretix_mpesa/pending.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings, 'order': order}
return template.render(ctx)
def payment_form_render(self, request) -> str:
template = get_template('pretix_mpesa/checkout_payment_form.html')
ctx = {'request': request, 'event': self.event, 'settings': self.settings}
return template.render(ctx)
def checkout_prepare(self, request, cart):
self.request = request
mpesa_phone_number = self.cart_session.get('contact_form_data', {}).get('mpesa_phone_number', '')
try:
parsed_num = phonenumbers.parse(mpesa_phone_number, 'KE')
except phonenumbers.NumberParseException:
messages.error(request, _('Please check to confirm that you entered the mpesa phone number and that it was a valid phone number'))
return False
else:
if phonenumbers.is_valid_number(parsed_num):
request.session['mpesa_phone_number'] = '254' + str(parsed_num.national_number)
return True
else:
messages.error(request, _('The Mpesa number is not a valid phone number'))
return False
def payment_is_valid_session(self, request):
return True
def order_can_retry(self, order):
return self._is_still_available(order=order)
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
"""
Will be called if the user submitted his order successfully to initiate the
payment process.
It should return a custom redirct URL, if you need special behavior, or None to
continue with default behavior.
On errors, it should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
:param order: The order object
"""
kwargs = {}
if request.resolver_match and 'cart_namespace' in request.resolver_match.kwargs:
kwargs['cart_namespace'] = request.resolver_match.kwargs['cart_namespace']
parsed_num = request.session.get('mpesa_phone_number', '')
logger.debug(parsed_num)
mode = self.settings.get('endpoint')
consumer_key = self.settings.get('safaricom_consumer_key')
consumer_secret = self.settings.get('safaricom_consumer_secret')
business_short_code = self.settings.get('mpesa_shortcode')
password = self.settings.get('encryption_password')
amount = math.ceil(payment.amount)
callback_url = self.settings.get('stk_callback_url')
logger.debug(amount)
logger.debug(callback_url)
send_stk.apply_async(kwargs={'consumer_key': consumer_key, 'consumer_secret': consumer_secret,
'business_short_code': business_short_code,
'password': password, 'amount': str(amount), 'phone': parsed_num, 'order_number': str(payment.id),
'callback_url': callback_url, 'mode': mode})
return None
| 42.775862 | 142 | 0.616015 | 800 | 7,443 | 5.5375 | 0.3 | 0.027314 | 0.032506 | 0.018059 | 0.138826 | 0.090068 | 0.0693 | 0.048984 | 0.048984 | 0.048984 | 0 | 0.000573 | 0.296655 | 7,443 | 173 | 143 | 43.023121 | 0.845654 | 0.065296 | 0 | 0.216783 | 0 | 0.006993 | 0.204844 | 0.03808 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06993 | false | 0.034965 | 0.167832 | 0.027972 | 0.34965 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc1840932d14855ac81775c732f4766b613aa60 | 1,927 | py | Python | tests/test_api_bot_snake.py | gatarelib/site | 81c71a58b949cb346e6af95d2cc3a7c4a71f36fe | [
"MIT"
] | null | null | null | tests/test_api_bot_snake.py | gatarelib/site | 81c71a58b949cb346e6af95d2cc3a7c4a71f36fe | [
"MIT"
] | null | null | null | tests/test_api_bot_snake.py | gatarelib/site | 81c71a58b949cb346e6af95d2cc3a7c4a71f36fe | [
"MIT"
] | null | null | null | """Tests the `/api/bot/snake_` endpoints."""
from tests import SiteTest, app
class TestSnakeFactsAPI(SiteTest):
"""GET method - get snake fact"""
def test_snake_facts(self):
response = self.client.get(
'/bot/snake_facts',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), str)
class TestSnakeIdiomAPI(SiteTest):
"""GET method - get snake idiom"""
def test_snake_idiom(self):
response = self.client.get(
'/bot/snake_idioms',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), str)
class TestSnakeQuizAPI(SiteTest):
"""GET method - get snake quiz"""
def test_snake_quiz(self):
response = self.client.get(
'/bot/snake_quiz',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), dict)
class TestSnakeNameAPI(SiteTest):
"""GET method - get a single snake name, or all of them."""
def test_snake_names(self):
response = self.client.get(
'/bot/snake_names',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), dict)
def test_snake_names_all(self):
response = self.client.get(
'/bot/snake_names?get_all=True',
app.config['API_SUBDOMAIN'],
headers=app.config['TEST_HEADER']
)
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.json), list)
| 29.646154 | 63 | 0.613908 | 218 | 1,927 | 5.275229 | 0.224771 | 0.078261 | 0.052174 | 0.095652 | 0.733913 | 0.668696 | 0.668696 | 0.582609 | 0.516522 | 0.516522 | 0 | 0.010526 | 0.260509 | 1,927 | 64 | 64 | 30.109375 | 0.796491 | 0.091853 | 0 | 0.533333 | 0 | 0 | 0.12355 | 0.016821 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.111111 | false | 0 | 0.022222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc6c86765fcf633c28691ba399ab19c5fd57779 | 461 | py | Python | Addresses/checkboxes.py | bruceqqqqq/Tkinter-Course | 5c4260a1b6aa45c5a7c406f940a57d778afce20b | [
"Apache-2.0"
] | null | null | null | Addresses/checkboxes.py | bruceqqqqq/Tkinter-Course | 5c4260a1b6aa45c5a7c406f940a57d778afce20b | [
"Apache-2.0"
] | null | null | null | Addresses/checkboxes.py | bruceqqqqq/Tkinter-Course | 5c4260a1b6aa45c5a7c406f940a57d778afce20b | [
"Apache-2.0"
] | null | null | null | from tkinter import *
from PIL import ImageTk, Image
root = Tk()
root.title('Batman')
root.iconbitmap('images/batman.ico')
root.geometry('400x400')
def show():
myLabel = Label(root, text=var.get())
myLabel.pack()
var = StringVar()
check = Checkbutton(root, text='Check this box', variable=var, onvalue='On', offvalue='Off')
check.deselect()
check.pack()
myButton = Button(root, text="Show Selection", command=show)
myButton.pack()
root.mainloop()
| 20.043478 | 92 | 0.707158 | 62 | 461 | 5.258065 | 0.629032 | 0.07362 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014888 | 0.125813 | 461 | 22 | 93 | 20.954545 | 0.794045 | 0 | 0 | 0 | 0 | 0 | 0.136659 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc79406b9832714fea2809a9345d147398abb57 | 38,295 | py | Python | model/interact/modeling.py | shanestorks/piglet | be8ab482503a94a61667bef66e8aac8584ee5a9c | [
"MIT"
] | 48 | 2021-06-02T01:44:06.000Z | 2022-01-19T05:10:54.000Z | model/interact/modeling.py | shanestorks/piglet | be8ab482503a94a61667bef66e8aac8584ee5a9c | [
"MIT"
] | 2 | 2021-07-07T21:53:37.000Z | 2022-01-11T20:46:31.000Z | model/interact/modeling.py | shanestorks/piglet | be8ab482503a94a61667bef66e8aac8584ee5a9c | [
"MIT"
] | 7 | 2021-06-02T14:41:05.000Z | 2022-03-11T21:37:01.000Z | import tensorflow as tf
import sys
sys.path.append('../../')
from model.model_utils import bfloat16_getter, get_shape_list, gelu, dropout, layer_norm, sequence_xe_loss, \
construct_host_call, get_assignment_map_from_checkpoint, _sequence_xe_loss_noreduce, stack_jagged, \
get_ltr_attention_mask
from data.thor_constants import THOR_OBJECT_TYPE_TO_IND, THOR_AFFORDANCES, THOR_ACTION_TYPE_TO_IND, \
load_instance_attribute_weights
from model.transformer import attention_layer, residual_mlp_layer, _argmax_sample, residual_mlp
import math
from model import optimization
from model.neat_config import NeatConfig
from model.interact.dataloader import names_and_arities
def embed_with_embedding_table(x, embedding_table, flatten=False):
"""
Embed an int tensor with the embedding table. This ignores -1 things
:param x:
:param embedding_table:
:param flatten: Keep it flat versus reshape to the original like size
:return:
"""
x_shape = get_shape_list(x)
vocab_size, embedding_dim = get_shape_list(embedding_table, 2)
# Need to do something weird bc tf.float32_ref exists
one_hot_x = tf.one_hot(tf.reshape(x, [-1]),
dtype=embedding_table.dtype if embedding_table.dtype in (
tf.float32, tf.bfloat16) else tf.float32,
depth=vocab_size)
output = tf.matmul(one_hot_x, embedding_table)
if not flatten:
output = tf.reshape(output, x_shape + [embedding_dim])
return output
def embed_2d_with_embedding_table(x, embedding_table, flatten=False):
"""
:param x: [..., num_affordances]
:param embedding_table_stacked: [num_affordances, vocab_size, hidden_size]
:return:
"""
x_shape = get_shape_list(x)
num_affordances, vocab_size, hidden_size = get_shape_list(embedding_table, 3)
# assert x_shape[-1] == num_affordances
x_oh = tf.one_hot(tf.reshape(x, [-1, num_affordances]), depth=vocab_size, dtype=tf.float32)
x_embed = tf.einsum('bav,avh->bah', x_oh, embedding_table)
if not flatten:
x_embed = tf.reshape(x_embed, x_shape + [hidden_size])
return x_embed
def summarize_transformer(object_embs, gt_affordances_embed, affordance_name_embed, num_layers=3,
dropout_prob=0.1, initializer_range=0.02):
"""
Use a transformer to summarize the delta between the GT affordances and the prototype that we'd expect from the object
:param object_embs: [batch_size, h]
:param gt_affordances_embed: [batch_size, num_affordances, h]
:param affordance_name_embed: [num_affordances, h]
:param num_layers:
:param dropout_prob:
:param initializer_range:
:return: [batch_size, h] fixed-size representations for each of the objects!
"""
batch_size, hidden_size = get_shape_list(object_embs, 2)
batch_size2, num_affordances, h2 = get_shape_list(gt_affordances_embed, 3)
num_affordances3, h3 = get_shape_list(affordance_name_embed, 2)
assert hidden_size % 64 == 0
assert hidden_size == h2
assert h2 == h3
# [POOL_IDX, OBJECT_NAME, ... attrs ... ]
seq_length = num_affordances + 1
with tf.variable_scope("summarize_transformer"):
with tf.variable_scope('embeddings'):
# starting_embed = tf.get_variable(
# name='pooler',
# shape=[hidden_size],
# initializer=tf.truncated_normal_initializer(stddev=initializer_range),
# )
ctx = layer_norm(tf.concat([
# tf.tile(starting_embed[None, None], [batch_size, 1, 1]),
object_embs[:, None],
gt_affordances_embed + affordance_name_embed[None],
], 1), name='embed_norm')
hidden_state = tf.reshape(ctx, [batch_size * seq_length, -1])
# No masks bc all embeddings are used
mask = tf.ones((seq_length, seq_length), dtype=tf.float32)
for layer_idx in range(num_layers):
with tf.variable_scope(f'layer{layer_idx:02d}'):
# [batch_size * seq_length, hidden_size]
attention_output, _ = attention_layer(
hidden_state,
mask,
batch_size=batch_size,
seq_length=seq_length,
size_per_head=64,
num_attention_heads=hidden_size // 64,
initializer_range=initializer_range,
hidden_dropout_prob=dropout_prob,
attention_probs_dropout_prob=dropout_prob,
)
hidden_state = residual_mlp_layer(hidden_state + attention_output,
intermediate_size=hidden_size * 4,
hidden_dropout_prob=dropout_prob)
h0 = tf.reshape(hidden_state, [batch_size, seq_length, -1])[:, 0]
return h0
def expand_transformer(object_full_state, gt_affordances_embed, affordance_ctx_name_embed,
affordance_trg_name_embed, num_layers=3, dropout_prob=0.1, initializer_range=0.02,
random_perms=True, reuse=False, layer_cache=None):
"""
Use a transformer to predict what the actual affordances of the object are, from the state
# The order will be
(object hidden state)
(nullctx, nullctxname, pred0name) -> pred0
(gt0, gt0name, pred1name) -> pred1
...
(gt{n-1}, gt{n-1}name, predNname) -> predN
:param object_full_state: [batch_size, h]
:param gt_affordances_embed: [batch_size, num_affordances, h]
:param affordance_ctx_name_embed: [num_affordances, h]
:param affordance_trg_name_embed: [num_affordances, h]
:param num_layers:
:param random_perms: Randomly permute
:return: hidden size of [batch_size, num_affordances, h]
"""
batch_size, hidden_size = get_shape_list(object_full_state, 2)
batch_size2, num_affordances, h2 = get_shape_list(gt_affordances_embed, 3)
num_affordances3, h3 = get_shape_list(affordance_ctx_name_embed, 2)
num_affordances4, h4 = get_shape_list(affordance_trg_name_embed, 2)
assert hidden_size % 64 == 0
assert hidden_size == h2
assert h2 == h3
# [OBJECT_NAME, ... attrs ... ]
seq_length = num_affordances + 1
with tf.variable_scope("expand_transformer", reuse=reuse):
if random_perms:
idxs = tf.argsort(tf.random.normal((batch_size, num_affordances)), 1)
else:
idxs = tf.tile(tf.range(num_affordances, dtype=tf.int32)[None], [batch_size, 1])
with tf.variable_scope('embeddings'):
null_ctx_embed = tf.get_variable(
name='nullctx',
shape=[hidden_size],
initializer=tf.truncated_normal_initializer(stddev=initializer_range),
)
ctx_embeds = tf.concat([
tf.tile(null_ctx_embed[None, None], [batch_size, 1, 1]),
tf.gather(gt_affordances_embed + affordance_ctx_name_embed[None], idxs[:, :-1], batch_dims=1),
], 1)
trg_name_embeds = tf.gather(tf.tile(affordance_trg_name_embed[None], [batch_size, 1, 1]),
idxs, batch_dims=1)
ctx = layer_norm(tf.concat([
object_full_state[:, None],
ctx_embeds + trg_name_embeds,
], 1), name='embed_norm')
# don't forget to wear a mask when you go outside!
if layer_cache is not None:
# Shrink hidden state and mask accordingly
cache_length = get_shape_list(layer_cache, expected_rank=6)[-2]
seq_length = 1
ctx = ctx[:, -seq_length:]
mask = get_ltr_attention_mask(1, 1 + cache_length, dtype=ctx.dtype)
else:
mask = get_ltr_attention_mask(seq_length, seq_length, dtype=ctx.dtype)
hidden_state = tf.reshape(ctx, [batch_size * seq_length, -1])
new_kvs = []
for layer_idx in range(num_layers):
with tf.variable_scope(f'layer{layer_idx:02d}'):
# [batch_size * seq_length, hidden_size]
attention_output, new_kv = attention_layer(
hidden_state,
mask,
batch_size=batch_size,
seq_length=seq_length,
size_per_head=64,
num_attention_heads=hidden_size // 64,
initializer_range=initializer_range,
hidden_dropout_prob=dropout_prob,
attention_probs_dropout_prob=dropout_prob,
do_cache=True,
cache=layer_cache[:, layer_idx] if layer_cache is not None else None,
)
new_kvs.append(new_kv)
hidden_state = residual_mlp_layer(hidden_state + attention_output,
intermediate_size=hidden_size * 4,
hidden_dropout_prob=dropout_prob)
# [batch_size, num_attributes, H]
if layer_cache is None:
hidden_states_per_attr = tf.gather(tf.reshape(hidden_state, [batch_size, seq_length, -1]),
tf.argsort(idxs, 1) + 1, batch_dims=1)
else:
hidden_states_per_attr = hidden_state[:, None]
return hidden_states_per_attr, tf.stack(new_kvs, axis=1)
class StateChangePredictModel(object):
def __init__(self, config: NeatConfig, is_training, object_types):
"""
A model to predict what happens to some objects when you apply an action
:param config:
:param is_training:
:param object_types: [batch_size, num_objects, (pre,post) aka 2]
"""
self.config = config
self.hidden_size = config.model['hidden_size']
self.is_training = is_training
if is_training:
self.dropout_prob = config.model.get('dropout_prob', 0.1)
tf.logging.info("Is training -> dropout={:.3f}".format(self.dropout_prob))
else:
self.dropout_prob = 0.0
self.activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity
# First embed everything, some of these are static.
with tf.variable_scope('embeddings'):
# 1. Embed everything
object_embedding_table = tf.get_variable(
name='object_embs',
shape=[len(THOR_OBJECT_TYPE_TO_IND), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
# Technically we assume as input
# [batch_size, num_objects (2), pre post (2)]
# However those last two dimensions were flattened into [batch_size, 4]
# Now we're flattening into [batch_size * 4]
self.batch_size, self.num_objects = get_shape_list(object_types, 2)
assert self.num_objects == 4
self.object_embed = embed_with_embedding_table(object_types, object_embedding_table,
flatten=True)
affordance_embed_table = []
for i, (affordance_name, a) in enumerate(names_and_arities):
if a == len(THOR_OBJECT_TYPE_TO_IND):
tf.logging.info(f"For {affordance_name}: i'm copying the object embedding table")
affordance_embed_table.append(object_embedding_table)
else:
affordance_embed_table.append(tf.get_variable(
name=f'{affordance_name}',
shape=[max(a, 2), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
))
# [num_affordances, vocab_size, hidden_size]
self.affordance_embed_table, self.affordance_embed_table_mask = stack_jagged(affordance_embed_table, 0)
self.num_affordances, self.affordance_vocab_size, _hsz = get_shape_list(self.affordance_embed_table, 3)
tf.logging.info(f"Affordance embed table: ({self.num_affordances},{self.affordance_vocab_size},{_hsz})")
self.affordance_emb_trg = tf.get_variable(
name='affordance_embs_trg',
shape=[len(names_and_arities), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
self.affordance_emb_ctx = tf.get_variable(
name='affordance_embs_ctx',
shape=[len(names_and_arities), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
def encode_affordances(self, object_states):
"""
:param object_states: [batch_size, num_objects, num_affordances]
:return: encoded hidden size. [batch_size, num_objects, hidden_size]
"""
#######################################################
# 2. Encoder side
with tf.variable_scope('encode_affordances'):
# [batch_size * num_objects, hidden_size]
gt_affordances_embed_encoder = embed_2d_with_embedding_table(object_states,
embedding_table=self.affordance_embed_table,
flatten=True)
gt_affordances_embed_encoder = dropout(gt_affordances_embed_encoder, dropout_prob=self.dropout_prob)
encoded_h = summarize_transformer(self.object_embed, gt_affordances_embed_encoder,
self.affordance_emb_ctx,
dropout_prob=self.dropout_prob)
encoded_h = tf.layers.dense(encoded_h, self.hidden_size,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.02),
name='final_proj_without_ln')
encoded_h = tf.reshape(encoded_h, [self.batch_size, self.num_objects, self.hidden_size])
return self.activation_fn(encoded_h)
def encode_action(self, action_id, action_args):
"""
Encode the action using a representation of IT as well as a representation of the embedded objects
:param action_id: [batch_size]
:param action_args: [batch_size, 2]
:return: action embed [batch_size, hidden_size]
"""
batch_size, two_ = get_shape_list(action_args, 2)
assert two_ == 2
assert batch_size == self.batch_size
# Pre and post are the same so just extract pre, doesnt matter
object_embeds = tf.reshape(self.object_embed, [self.batch_size, 2, 2, self.hidden_size])[:, :, 0]
with tf.variable_scope('encode_action'):
# Encode action
action_embedding_table = tf.get_variable(
name='action_embs',
shape=[len(THOR_ACTION_TYPE_TO_IND), self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
self.action_embedding_table = action_embedding_table
action_embed = embed_with_embedding_table(action_id, action_embedding_table)
# I originally got action args from
# action_args = []
# for k in ['object_name', 'receptacle_name']:
# ok = item['action'][k]
#
# if ok is None:
# action_args.append(0)
#
# elif ok == item['pre'][0]['index']:
# action_args.append(1)
# elif ok == item['pre'][1]['index']:
# action_args.append(2)
# else:
# import ipdb
# ipdb.set_trace()
nullctx = tf.tile(tf.get_variable(
name='nullobj',
shape=[self.hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)[None, None], [self.batch_size, 1, 1])
encoded_pre_and_zero = tf.concat([nullctx, object_embeds], 1)
object_repr_and_receptacle_repr = tf.gather(encoded_pre_and_zero, action_args, batch_dims=1)
object_repr_and_receptacle_repr = tf.reshape(object_repr_and_receptacle_repr,
[self.batch_size, 2 * self.hidden_size])
action_embed0 = tf.concat([action_embed, object_repr_and_receptacle_repr], 1)
return self.activation_fn(residual_mlp(action_embed0,
hidden_size=self.hidden_size,
final_size=self.hidden_size,
num_layers=2,
hidden_dropout_prob=self.dropout_prob
))
def apply_action_mlp(self, action_embed, encoded_h_pre):
"""
:param action_embed: [batch_size, h]
:param encoded_h_pre: [batch_size, num_objs (probably 2), h] -- one per thing we will predict.
We can model this JOINTLY or applying the model to EACH THING.
:return:
"""
batch_size, num_objs_to_apply, hidden_size = get_shape_list(encoded_h_pre, 3)
assert batch_size == self.batch_size
assert hidden_size == self.hidden_size
if self.config.model.get('fuse_action', True):
tf.logging.info("Apply action MLP -> Fuse action!")
# 3. Change the hidden state
with tf.variable_scope('apply_action_mlp'):
mlp_h = tf.concat([action_embed, tf.reshape(encoded_h_pre, [self.batch_size, -1])], 1)
encoded_h_post_pred = residual_mlp(mlp_h,
initial_proj=False,
num_layers=2,
hidden_size=3*self.hidden_size,
final_size=num_objs_to_apply * self.hidden_size,
hidden_dropout_prob=self.dropout_prob)
encoded_h_post_pred = tf.reshape(encoded_h_post_pred, [self.batch_size, num_objs_to_apply, self.hidden_size])
return self.activation_fn(encoded_h_post_pred)
else:
# 3. Change the hidden state
with tf.variable_scope('apply_action_mlp'):
mlp_h = tf.concat([tf.tile(action_embed[:, None], [1, num_objs_to_apply, 1]), encoded_h_pre], 2)
mlp_h_2d = tf.reshape(mlp_h, [self.batch_size * num_objs_to_apply, self.hidden_size + hidden_size])
encoded_h_post_pred = residual_mlp(mlp_h_2d, hidden_size=self.hidden_size, final_size=self.hidden_size,
hidden_dropout_prob=self.dropout_prob)
encoded_h_post_pred = tf.reshape(encoded_h_post_pred, [self.batch_size, num_objs_to_apply, self.hidden_size])
return self.activation_fn(encoded_h_post_pred)
def decode_affordances_when_gt_is_provided(self, all_encoded_h, gt_affordances_decoded):
"""
:param all_encoded_h: [batch_size, num_objs, hidden_size]
:param gt_affordances_decoded: [batch_size, num_objs, num_afforadnces]
:return: [batch_size, num_objs, num_affordances, vocab_size_for_affordances]
"""
# 4. Predict the states!
with tf.variable_scope('decoder'):
batch_size, num_duplicates_x_num_objs, hidden_size = get_shape_list(all_encoded_h, 3)
assert batch_size == self.batch_size
# assert num_duplicates_x_num_objs == 6
assert hidden_size == self.hidden_size
batch_size_, num_duplicates_x_num_objs_, num_affordances = get_shape_list(gt_affordances_decoded, 3)
assert num_duplicates_x_num_objs_ == num_duplicates_x_num_objs
assert batch_size_ == self.batch_size
all_encoded_h = dropout(tf.reshape(all_encoded_h, [-1, self.hidden_size]), dropout_prob=self.dropout_prob)
# Get GT affordances -- slightly different because we duplicated the postconditions for 2 losses
gt_affordances_decoder_embed = embed_2d_with_embedding_table(gt_affordances_decoded,
self.affordance_embed_table,
flatten=True)
# [batch_size, num_affordances, hidden_size]
hidden_states_per_attr, _ = expand_transformer(
object_full_state=all_encoded_h,
gt_affordances_embed=gt_affordances_decoder_embed,
affordance_ctx_name_embed=self.affordance_emb_ctx,
affordance_trg_name_embed=self.affordance_emb_trg,
dropout_prob=self.dropout_prob,
random_perms=self.is_training and self.config.data.get('random_perms', False),
)
# GET the predictions
affordances_pred = tf.einsum('bah,avh->bav', hidden_states_per_attr, self.affordance_embed_table)
apb = tf.get_variable(
name='affordance_pred_bias',
shape=[len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)
affordances_pred += apb[None]
affordance_pred_by_type = tf.reshape(affordances_pred,
[batch_size, num_duplicates_x_num_objs,
len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)])
return affordance_pred_by_type
def sample_step(self, encoded_h_flat, prev_affordances=None, cache=None, p=0.95):
"""
:param encoded_h_flat: [Batch_size * num_objs, hidden_size]
:param prev_affordances: [batch_size * num_objs, num_affordances up until now (maybe None)?
:param cache:
:return:
"""
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
batch_size, hidden_size = get_shape_list(encoded_h_flat, 2)
if prev_affordances is None:
num_affordances_to_now = 0
prev_affordances_embed = tf.zeros((batch_size, 0, self.hidden_size))
else:
batch_size, num_affordances_to_now = get_shape_list(prev_affordances, 2)
prev_affordances_embed = embed_2d_with_embedding_table(prev_affordances,
self.affordance_embed_table[
:num_affordances_to_now],
flatten=True)
prev_affordances_embed = tf.concat([prev_affordances_embed, tf.zeros((batch_size, 1, self.hidden_size))], 1)
hidden_states_per_attr, new_kvs = expand_transformer(
object_full_state=encoded_h_flat,
gt_affordances_embed=prev_affordances_embed,
affordance_ctx_name_embed=self.affordance_emb_ctx[:num_affordances_to_now + 1],
affordance_trg_name_embed=self.affordance_emb_trg[:num_affordances_to_now + 1],
dropout_prob=self.dropout_prob,
random_perms=False,
reuse=tf.AUTO_REUSE,
layer_cache=cache
)
logits = tf.einsum('bh,vh->bv', hidden_states_per_attr[:, -1],
self.affordance_embed_table[num_affordances_to_now])
apb = tf.get_variable(
name='affordance_pred_bias',
shape=[len(names_and_arities), len(THOR_OBJECT_TYPE_TO_IND)],
initializer=tf.truncated_normal_initializer(stddev=0.02),
)[num_affordances_to_now]
logits += apb[None]
cur_name, cur_arity = names_and_arities[num_affordances_to_now]
logits_mask = tf.cast(tf.less(tf.range(len(THOR_OBJECT_TYPE_TO_IND)), max(cur_arity, 2)), dtype=tf.float32)
logits = logits * logits_mask - 1e10 * (1.0 - logits_mask)
# sample_info = _top_p_sample(logits, num_samples=1, p=p)
sample_info = _argmax_sample(logits)
new_tokens = tf.squeeze(sample_info['sample'], 1)
new_probs = tf.squeeze(tf.batch_gather(sample_info['probs'], sample_info['sample']), 1)
return {
'new_tokens': new_tokens,
'new_probs': new_probs,
'new_cache': new_kvs
}
def sample(self, encoded_h):
"""
Decode into actual affordances
:param encoded_h: [batch_size, num_objects, hidden_size]
:return:
"""
bsize0, num_objs0, hidden_size = get_shape_list(encoded_h, 3)
encoded_h_flat = tf.reshape(encoded_h, [-1, self.hidden_size])
batch_size = get_shape_list(encoded_h_flat, 2)[0]
with tf.name_scope('sample'):
h0 = self.sample_step(encoded_h_flat)
ctx = h0['new_tokens'][:, None]
cache = h0['new_cache']
probs = h0['new_probs'][:, None]
# Technically we don't need tf.while_loop here bc always doing it for the same number of steps
for t in range(len(names_and_arities) - 1):
next_outputs = self.sample_step(encoded_h_flat, prev_affordances=ctx, cache=cache)
# Update everything
cache = tf.concat([cache, next_outputs['new_cache']], axis=-2)
ctx = tf.concat([ctx, next_outputs['new_tokens'][:, None]], axis=1)
probs = tf.concat([probs, next_outputs['new_probs'][:, None]], axis=1)
return {
'tokens': tf.reshape(ctx, [bsize0, num_objs0, -1]),
'probs': tf.reshape(probs, [bsize0, num_objs0, -1]),
}
def compute_losses(self, object_states, isvalid_by_type_o1o2,
encoded_h_pre,
encoded_h_post_gt,
encoded_h_post_pred,
affordance_pred_by_type,
gt_affordances_decoder,
isvalid_by_type):
"""
:param object_states: [batch_size, 4, len(names_and_arities)
:param isvalid_by_type_o1o2: first two objs whteher they're valid [batch_size, 2]
:return:
"""
batch_size, num_duplicates_x_num_objs, nlen_names_and_arities = get_shape_list(object_states, 3)
# MAGNITUDE LOSSES
###################
# Check if anything changed
norms = {}
losses = {}
pre_states, post_states = tf.unstack(
tf.reshape(object_states, [batch_size, 2, 2, len(names_and_arities)]), axis=2)
did_change = tf.not_equal(pre_states, post_states)
didchange_weight = tf.cast(tf.reduce_any(did_change, -1), dtype=tf.float32) * isvalid_by_type_o1o2
nochange_weight = (1.0 - tf.cast(tf.reduce_any(did_change, -1), dtype=tf.float32)) * isvalid_by_type_o1o2
### How much did things change
###############
encoded_h_delta = encoded_h_post_pred - encoded_h_pre
encoded_h_delta_l2 = tf.sqrt(tf.reduce_mean(tf.square(encoded_h_delta), -1))
norms['didchange_hdelta_l2'] = tf.reduce_sum(encoded_h_delta_l2 * didchange_weight) / (tf.reduce_sum(
didchange_weight) + 1e-5)
norms['nochange_hdelta_l2'] = tf.reduce_sum(encoded_h_delta_l2 * nochange_weight) / (tf.reduce_sum(
nochange_weight) + 1e-5)
# Delta between pred and GT
###
# gt_mu = tf.stop_gradient(encoded_h_post_gt[:, :, :self.hidden_size])
# pred_mu = encoded_h_post_pred[:, :, :self.hidden_size]
#
# #########################################
# # VAE loss
# all_mu, all_logvar = tf.split(tf.reshape(tf.concat([encoded_h_pre,
# encoded_h_post_gt,
# encoded_h_post_pred], 1),
# [-1, self.hidden_size * 2]), [self.hidden_size, self.hidden_size],
# axis=-1)
# kld = -0.5 * tf.reduce_mean(1.0 + all_logvar - tf.square(all_mu) - tf.exp(all_logvar))
# losses['kld'] = kld
#########################################
gt_stop = tf.stop_gradient(encoded_h_post_gt)
hidden_state_diff_l2 = tf.sqrt(tf.reduce_mean(tf.square(encoded_h_post_pred - gt_stop), -1))
hidden_state_diff_l1 = tf.reduce_mean(tf.abs(encoded_h_post_pred - gt_stop), -1)
norms['hidden_state_diff_l2'] = tf.reduce_sum(hidden_state_diff_l2 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
norms['hidden_state_diff_l1'] = tf.reduce_sum(hidden_state_diff_l1 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
hidden_state_magn_l2 = tf.sqrt(tf.reduce_mean(tf.square(gt_stop), -1))
norms['hidden_state_magn_l2'] = tf.reduce_sum(hidden_state_magn_l2 * isvalid_by_type_o1o2) / (
tf.reduce_sum(isvalid_by_type_o1o2) + 1e-5)
# Upweight changed losses
# did change: [batch_size, num_objs, num_affordances]
for i, (affordance_name, arity_) in enumerate(names_and_arities):
arity = max(arity_, 2)
losses[f'state/{affordance_name}_post'] = sequence_xe_loss(
affordance_pred_by_type[:, 4:, i, :arity],
gt_affordances_decoder[:, 4:, i],
label_weights=isvalid_by_type[:, 4:],
)
losses[f'state/{affordance_name}_pre'] = sequence_xe_loss(
affordance_pred_by_type[:, 0:2, i, :arity],
gt_affordances_decoder[:, 0:2, i],
label_weights=isvalid_by_type[:, 0:2], # + tf.cast(did_change[:, :, i], dtype=tf.float32) * 100.0,
)
losses[f'state/{affordance_name}_postgt'] = sequence_xe_loss(
affordance_pred_by_type[:, 2:4, i, :arity],
gt_affordances_decoder[:, 2:4, i],
label_weights=isvalid_by_type[:, 2:4], # + tf.cast(did_change[:, :, i], dtype=tf.float32) * 100.0,
)
# # Another way for losses
# losses_all = _sequence_xe_loss_noreduce(affordance_pred_by_type, gt_affordances_decoder)
# loss_mask = tf.reshape(tf.tile(isvalid_by_type[:, :, None], [1, 1, len(names_and_arities)]), [-1])
# losses['state/all'] = tf.reduce_sum(losses_all * loss_mask) / (tf.reduce_sum(loss_mask) + 1e-5)
return losses, norms
def model_fn_builder(config: NeatConfig):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params):
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
batch_size = get_shape_list(features['actions/action_id'], expected_rank=1)[0]
hidden_size = config.model['hidden_size']
# activation_fn = tf.nn.tanh if config.model.get('activation', 'tanh') == 'tanh' else tf.identity
scp_model = StateChangePredictModel(config,
is_training=is_training,
object_types=features['objects/object_types'],
)
encoded_h = scp_model.encode_affordances(features['objects/object_states'])
encoded_h_pre = tf.gather(encoded_h, [0, 2], axis=1)
encoded_h_post_gt = tf.gather(encoded_h, [1, 3], axis=1)
action_embed = scp_model.encode_action(features['actions/action_id'], action_args=features['actions/action_args'])
encoded_h_post_pred = scp_model.apply_action_mlp(action_embed, encoded_h_pre)
#############################################################
# Now construct a decoder
# [batch_size, 3, #objs, hidden_size] -> [batch_size, 3 * objs, hidden_size]
all_encoded_h = tf.concat([
encoded_h_pre, # [0, 2]
encoded_h_post_gt, # [1, 3]
encoded_h_post_pred, # [1, 3]
], 1)
gt_affordances_decoder = tf.gather(features['objects/object_states'], [0, 2, 1, 3, 1, 3], axis=1)
isvalid_by_type = tf.cast(tf.gather(features['objects/is_valid'], [0, 2, 1, 3, 1, 3], axis=1), dtype=tf.float32)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = scp_model.sample(all_encoded_h)
predictions.update(**features)
return tf.contrib.tpu.TPUEstimatorSpec(mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions)
affordance_pred_by_type = scp_model.decode_affordances_when_gt_is_provided(all_encoded_h,
gt_affordances_decoder)
######################
# For losses
# action_logits = action_result['action_logits']
############################################
# if params.get('demomode', False):
# action_logits['affordances_pred'] = affordance_pred_by_type[:, 4:]
# for k in action_logits:
# action_logits[k] = tf.nn.softmax(action_logits[k], axis=-1)
# return action_logits
losses, norms = scp_model.compute_losses(
object_states=features['objects/object_states'],
isvalid_by_type_o1o2=isvalid_by_type[:, :2],
encoded_h_pre=encoded_h_pre,
encoded_h_post_gt=encoded_h_post_gt,
encoded_h_post_pred=encoded_h_post_pred,
affordance_pred_by_type=affordance_pred_by_type,
gt_affordances_decoder=gt_affordances_decoder,
isvalid_by_type=isvalid_by_type)
# losses['action_success'] = sequence_xe_loss(action_logits['action_success'], features['actions/action_success'])
loss = tf.add_n([x for x in losses.values()])
for k, v in norms.items():
losses[f'norms/{k}'] = v
loss += 0.1 * norms['hidden_state_diff_l2']
loss += 0.1 * norms['hidden_state_diff_l1']
if is_training:
tvars = [x for x in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) if 'global_step' not in x.name]
else:
tvars = tf.trainable_variables()
# ckpt_to_assignment_map = {}
# initialized_variable_names = {}
# init_checkpoint = config.model.get('init_checkpoint', None)
# if init_checkpoint:
# regular_assignment_map, regular_initialized_variable_names = get_assignment_map_from_checkpoint(
# tvars, init_checkpoint=init_checkpoint
# )
#
# # If you need to disable loading certain variables, comment something like this in
# # regular_assignment_map = {k: v for k, v in regular_assignment_map.items() if
# # all([x not in k for x in ('temporal_predict',
# # 'roi_language_predict',
# # 'roi_pool/pool_c5',
# # 'aux_roi',
# # 'second_fpn',
# # 'img_mask',
# # 'roi_pool/box_feats_proj/kernel')])}
#
# ckpt_to_assignment_map['regular'] = regular_assignment_map
# initialized_variable_names.update(regular_initialized_variable_names)
#
# def scaffold_fn():
# """Loads pretrained model through scaffold function."""
# # ORDER BY PRIORITY
# return tf.train.Scaffold()
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
# if var.name in initialized_variable_names:
# init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
train_op, train_metrics = optimization.build_optimizer_from_config(
loss=loss,
optimizer_config=config.optimizer,
device_config=config.device,
)
train_metrics.update(losses)
# for k, v in affordance_loss_metrics.items():
# train_metrics[f'affordance_metrics/{k}'] = v
host_call = construct_host_call(scalars_to_log=train_metrics,
model_dir=config.device['output_dir'],
iterations_per_loop=config.device.get('iterations_per_loop', 1000))
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metrics=None,
# scaffold_fn=scaffold_fn,
host_call=host_call)
return model_fn
if __name__ == '__main__':
from model.interact import dataloader
tf.compat.v1.enable_eager_execution()
batch_size = 8
config = NeatConfig.from_yaml('configs/local_debug.yaml')
input_fn = dataloader.input_fn_builder(config, is_training=True)
features, labels = input_fn(params={'batch_size': batch_size}).make_one_shot_iterator().get_next()
lol = model_fn_builder(config)(features, labels, tf.estimator.ModeKeys.TRAIN, {'batch_size': batch_size})
# model = TrajectoryMLP(is_training=True,
# features=features,
# hidden_size=config.model['hidden_size'],
# )
| 48.352273 | 125 | 0.589215 | 4,526 | 38,295 | 4.623288 | 0.115113 | 0.035699 | 0.020741 | 0.013763 | 0.479092 | 0.386284 | 0.293763 | 0.238949 | 0.19871 | 0.179881 | 0 | 0.015195 | 0.31085 | 38,295 | 791 | 126 | 48.413401 | 0.777689 | 0.212665 | 0 | 0.248918 | 0 | 0 | 0.046717 | 0.009385 | 0 | 0 | 0 | 0 | 0.032468 | 1 | 0.030303 | false | 0 | 0.021645 | 0 | 0.08658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcc845234e6840c99dfb161a50d8b03c190825b7 | 16,564 | py | Python | tests/docker/test_docker.py | radiocosmology/alpenhorn | a91df899d864e04699b6f49c33f473383ee10cfc | [
"MIT"
] | 3 | 2017-03-10T00:51:01.000Z | 2018-04-25T16:50:13.000Z | tests/docker/test_docker.py | radiocosmology/alpenhorn | a91df899d864e04699b6f49c33f473383ee10cfc | [
"MIT"
] | 106 | 2017-03-16T23:57:10.000Z | 2021-10-05T23:44:48.000Z | tests/docker/test_docker.py | radiocosmology/alpenhorn | a91df899d864e04699b6f49c33f473383ee10cfc | [
"MIT"
] | 1 | 2018-07-26T23:32:13.000Z | 2018-07-26T23:32:13.000Z | import os
import re
import time
from os.path import dirname, exists, join
import pytest
import yaml
from alpenhorn import acquisition as ac
from alpenhorn import archive as ar
from alpenhorn import storage as st
if ("RUN_DOCKER_TESTS" not in os.environ) and ("PLAYGROUND" not in os.environ):
pytestmark = pytest.mark.skip(
reason=(
"Docker tests must be enabled by setting the RUN_DOCKER_TESTS environment "
"variable"
),
)
else:
# Try and import docker.
try:
import docker
client = docker.from_env()
except Exception:
pytestmark = pytest.mark.skip(
reason=(
"Docker tests are enabled, but docker doesn't seem to be installed, or"
"running."
),
)
# ====== Fixtures for controlling Docker ======
@pytest.fixture(scope="module")
def images():
"""Build the images for the tests."""
import os.path
context = os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
print("Building docker images from location %s..." % context)
# Build alpenhorn image
client.images.build(
path=context,
tag="alpenhorn",
rm=True,
forcerm=True,
dockerfile="tests/docker/Dockerfile.alpenhorn",
)
@pytest.fixture(scope="module")
def network():
"""Set up the network."""
# Note to connect to this network you need to pass network_mode=networks to
# .run(). See https://github.com/docker/docker-py/issues/1433
print("Setting up the network...")
network = client.networks.create("alpenhorn-net", driver="bridge")
yield network.name
network.remove()
@pytest.fixture(scope="module")
def db(network, images):
"""Set up the database and create the tables for alpenhorn.
Also connect peewee to this database, so we can query its state."""
from alpenhorn import db
print("Creating the database...")
# Create the database container
db_container = client.containers.run(
"mysql:5.7",
name="db",
detach=True,
network_mode=network,
ports={"3306/tcp": 63306},
environment={"MYSQL_ALLOW_EMPTY_PASSWORD": "yes"},
)
# Wait until the MySQL instance is properly up
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="bash -c 'while ! mysqladmin ping -h db --silent; do sleep 3; done'",
)
# Create the database
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="mysql -h db -e 'CREATE DATABASE alpenhorn_db'",
)
print("Creating the tables...")
# Initialise alpenhorn
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network=network,
command="alpenhorn init",
)
# Connect our peewee models to the database
db._connect(url="mysql://root@127.0.0.1:63306/alpenhorn_db")
yield db_container
# Take down the peewee connection
db.database_proxy.close()
print("Cleaning up db container...")
_stop_or_kill(db_container)
db_container.remove()
@pytest.fixture(scope="module")
def workers(db, network, images, tmpdir_factory):
"""Create a group of alpenhorn entries."""
workers = []
for i in range(3):
hostname = "container-%i" % i
print("Creating alpenhorn container %s" % hostname)
# Create db entries for the alpenhorn instance
group = st.StorageGroup.create(name=("group_%i" % i))
node = st.StorageNode.create(
name=("node_%i" % i),
root="/data",
username="root",
group=group,
host=hostname,
address=hostname,
active=True,
auto_import=(i == 0),
min_avail_gb=0.0,
)
# Create a temporary directory on the host to store the data, which will
# get mounted into the container
data_dir = str(tmpdir_factory.mktemp(hostname))
print("Node directory (on host): %s" % str(data_dir))
with open(str(data_dir) + "/ALPENHORN_NODE", "w") as f:
f.write(node.name)
print("Created ALPENHORN_NODE file on host: %s" % str(data_dir))
container = client.containers.run(
"alpenhorn",
name=hostname,
hostname=hostname,
network_mode=network,
detach=True,
volumes={data_dir: {"bind": "/data", "mode": "rw"}},
)
workers.append({"node": node, "container": container, "dir": data_dir})
yield workers
# Cleanup
for worker in workers:
container = worker["container"]
print("Stopping and removing alpenhorn container %s" % container.name)
_stop_or_kill(container, timeout=1)
container.remove()
def _stop_or_kill(container, timeout=10):
# Work around for:
# https://github.com/docker/docker-py/issues/1374
import requests.exceptions
try:
container.stop(timeout=timeout)
except requests.exceptions.ReadTimeout:
container.kill()
# ====== Fixtures for generating test files ======
@pytest.fixture(scope="module")
def test_files():
"""Get a set of test files.
Read the test files config, and structure it into acquisitions and files,
labelling each with their respective types.
"""
files = os.path.normpath(
os.path.join(os.path.dirname(__file__), "..", "fixtures", "files.yml")
)
with open(files, "r") as f:
fs = yaml.safe_load(f.read())
acqs = _recurse_acq(fs)
return acqs
def _recurse_acq(f, root=""):
"""Recurse over a dictionary based tree, and find the acquisitions and their files."""
def _type(x):
if "zab" in x:
return "zab"
elif "quux" in x or x == "x":
return "quux"
else:
return None
acqlist = []
for name, sub in f.items():
new_root = join(root, name)
if _type(new_root) is not None:
acqlist.append(
{
"name": new_root,
"type": _type(new_root),
"files": _recurse_files(sub),
}
)
else:
acqlist += _recurse_acq(sub, root=join(root, name))
return acqlist
def _recurse_files(f, root=""):
"""Recurse over a dictionary tree at the acq root, and get the files."""
def _type(x):
if x[-4:] == ".log":
return "log"
elif x[-4:] == ".zxc" or x == "jim":
return "zxc"
elif x[-5:] == ".lock":
return "lock"
filelist = []
for name, sub in f.items():
new_root = join(root, name)
if "md5" in sub:
fileprop = {"name": new_root, "type": _type(new_root)}
fileprop.update(sub)
filelist.append(fileprop)
else:
filelist += _recurse_files(sub, root=new_root)
return filelist
def _make_files(acqs, base, skip_lock=True):
for acq in acqs:
for file_ in acq["files"]:
path = join(base, acq["name"], file_["name"])
if not exists(dirname(path)):
os.makedirs(dirname(path))
if not skip_lock or file_["type"] != "lock":
with open(path, "w") as fh:
fh.write(file_["contents"])
# ====== Helper routines for checking the database ======
def _verify_db(acqs, copies_on_node=None, wants_on_node="Y", has_on_node="Y"):
"""Verify that files are in the database.
Parameters
----------
acqs : dict
Set of acquisitions and files as output by test_files.
copies_on_node : StorageNode, optional
Verify that what the database believes is on this node. If
`None` skip this test.
has_on_node : str, optional
'Has' state of files to check for. Default 'Y'.
`None` to skip test.
wants_on_node : str, optional
'Wants' state of files to check for. Default 'Y'.
`None` to skip test.
"""
# Loop over all acquisitions and files and check that they have been
# correctly added to the database
for acq in acqs:
# Test that the acquisition exists
acq_query = ac.ArchiveAcq.select().where(ac.ArchiveAcq.name == acq["name"])
assert acq_query.count() == 1
acq_obj = acq_query.get()
# Test that it has the correct type
assert acq_obj.type.name == acq["type"]
for file_ in acq["files"]:
# Test that the file exists
file_query = ac.ArchiveFile.select().where(
ac.ArchiveFile.acq == acq_obj, ac.ArchiveFile.name == file_["name"]
)
# Check that we haven't imported types we don't want
if file_["type"] in [None, "lock"]:
assert file_query.count() == 0
continue
assert file_query.count() == 1
file_obj = file_query.get()
# Test that it has the correct type
assert file_obj.type.name == file_["type"]
if copies_on_node is not None:
# Test that this node has a copy
copy_query = ar.ArchiveFileCopy.select().where(
ar.ArchiveFileCopy.file == file_obj,
ar.ArchiveFileCopy.node == copies_on_node,
)
assert copy_query.count() == 1
copy_obj = copy_query.get()
if has_on_node is not None:
assert copy_obj.has_file == has_on_node
if wants_on_node is not None:
assert copy_obj.wants_file == wants_on_node
def _verify_files(worker):
"""Verify the files are in place using the alpenhorn verify command."""
# Run alpenhron verify and return the exit status as a string
output = worker["container"].exec_run(
"bash -c 'alpenhorn node verify %s &> /dev/null; echo $?'" % worker["node"].name
)
# Convert the output back to an exit status
assert not output.exit_code
# ====== Test the auto_import behaviour ======
def test_import(workers, test_files):
# Add a bunch of files onto node_0, wait for them to be picked up by the
# auto_import, and then verify that they all got imported to the db
# correctly.
# Create the files
_make_files(test_files, workers[0]["dir"], skip_lock=True)
# Wait for the auto_import to catch them (it polls at 30s intervals)
time.sleep(3)
node = workers[0]["node"]
_verify_db(test_files, copies_on_node=node)
_verify_files(workers[0])
def test_status(workers, network):
"""Check for #109, `alpenhorn status` failing with MySQL storage"""
status = client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command="alpenhorn status",
).decode()
assert re.search(
r"^node_0\s+9\s+0.0\s+100\.0\s+100\.0\s+container-0:/data$",
status,
re.MULTILINE,
)
assert re.search(r"^node_1\s+0\s+0.0\s+container-1:/data$", status, re.MULTILINE)
assert re.search(r"^node_2\s+0\s+0.0\s+container-2:/data$", status, re.MULTILINE)
# ====== Test that the sync between nodes works ======
def test_sync_all(workers, network, test_files):
# Request sync onto a different node
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command="alpenhorn sync -f node_0 group_1",
)
time.sleep(3)
_verify_db(test_files, copies_on_node=workers[1]["node"])
_verify_files(workers[1])
def test_sync_acq(workers, network, test_files):
for acq in test_files:
# Request sync of a single acq onto a different node
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn sync -f node_0 group_2 --acq=%s" % acq["name"]),
)
time.sleep(3)
# Verify that the requested files have been copied
for acq in test_files:
_verify_db([acq], copies_on_node=workers[1]["node"])
_verify_files(workers[2])
# ====== Test that the clean command works ======
def _verify_clean(acqs, worker, unclean=False, check_empty=False):
"""Test the clean command.
Check the comand has been executed as expected on the node associated with
'worker'. If 'unclean' is set to True, check that files are not wanted but still
present (until additional copies on other archive nodes are found).
"""
# Check files are set to deleted / not deleted but not wanted in database
for acq in acqs:
if unclean:
_verify_db(
[acq], copies_on_node=worker["node"], has_on_node="Y", wants_on_node="N"
)
else:
_verify_db(
[acq], copies_on_node=worker["node"], has_on_node="N", wants_on_node="N"
)
# Check files are in fact gone / still there
for acq in acqs:
for f in acq["files"]:
# Ignore files not tracked by the database
if f["type"] is not None and f["type"] != "lock":
file_exists = os.path.exists(
os.path.join(worker["dir"], acq["name"], f["name"])
)
assert (file_exists and unclean) or (not file_exists and not unclean)
# If specified, check no files or directories are left over other than the
# ALPENHORN_NODE file
if not unclean and check_empty:
files = os.listdir(worker["dir"])
assert "ALPENHORN_NODE" in files
assert len(files) == 1
def test_clean(workers, network, test_files):
# Simplest clean request
node_to_clean = workers[1]["node"]
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -f {}".format(node_to_clean.name)),
)
# Check files set to 'M'
for acq in test_files:
_verify_db(
[acq], copies_on_node=node_to_clean, has_on_node="Y", wants_on_node="M"
)
# Changed my mind, delete them NOW
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -nf {}".format(node_to_clean.name)),
)
# Check files have been deleted
time.sleep(3)
_verify_clean(test_files, workers[1])
# Since no untracked files should be present, check root is empty
_verify_clean(test_files, workers[1], check_empty=True)
# Request clean on a node when only one other archive node has a copy
# Files should not be deleted
node_to_clean = workers[2]["node"]
client.containers.run(
"alpenhorn",
remove=True,
detach=False,
network_mode=network,
command=("alpenhorn node clean -nf {}".format(node_to_clean.name)),
)
# Check files are still present
time.sleep(3)
_verify_clean(test_files, workers[2], unclean=True)
# === Test that the node file is being checked successfully
def test_node_active(workers):
data_dir0 = workers[1]["dir"]
os.rename(data_dir0 + "/ALPENHORN_NODE", data_dir0 + "/DIFFERENT_NAME")
print("Changed name of ALPENHORN_NODE file in directory", data_dir0)
this_node = workers[1]["node"]
time.sleep(3)
node_0 = st.StorageNode.get(name=this_node.name)
assert not node_0.active
os.rename(data_dir0 + "/DIFFERENT_NAME", data_dir0 + "/ALPENHORN_NODE")
node_0 = st.StorageNode.get(name=this_node.name)
node_0.active = True
node_0.save(only=node_0.dirty_fields)
time.sleep(3)
node_0 = st.StorageNode.get(name=this_node.name)
assert node_0.active
@pytest.mark.skipif(
"PLAYGROUND" not in os.environ,
reason=("Set PLAYGROUND to leave alpenhorn alive for interactive fun."),
)
def test_playground(workers):
print(
"""
To connect the alpenhorn database to this instance run:
>>> from alpenhorn import db
>>> db._connect(url='mysql://root@127.0.0.1:63306/alpenhorn_db')
To interact with the individual alpenhorn instances use docker exec, e.g.
$ docker exec container_0 alpenhorn status
When you are finished playing, press enter to close the docker containers and
clean up everything."""
)
input("")
| 27.745394 | 90 | 0.604262 | 2,153 | 16,564 | 4.517882 | 0.194612 | 0.014804 | 0.021487 | 0.028786 | 0.287961 | 0.24725 | 0.228436 | 0.193276 | 0.179912 | 0.160481 | 0 | 0.010692 | 0.282903 | 16,564 | 596 | 91 | 27.791946 | 0.808217 | 0.218908 | 0 | 0.284457 | 0 | 0.008798 | 0.141078 | 0.018767 | 0 | 0 | 0 | 0 | 0.049853 | 1 | 0.061584 | false | 0.002933 | 0.043988 | 0 | 0.131965 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fccb331c2eb46090371d60f7632d229d2233551e | 10,172 | py | Python | SparseSC/optimizers/cd_line_search.py | cclauss/SparseSC | bd5c65f162a5431f92ed957df3385c803f2d3365 | [
"MIT"
] | null | null | null | SparseSC/optimizers/cd_line_search.py | cclauss/SparseSC | bd5c65f162a5431f92ed957df3385c803f2d3365 | [
"MIT"
] | null | null | null | SparseSC/optimizers/cd_line_search.py | cclauss/SparseSC | bd5c65f162a5431f92ed957df3385c803f2d3365 | [
"MIT"
] | null | null | null | import numpy as np
from scipy.optimize import line_search
import locale
locale.setlocale(locale.LC_ALL, '')
class cd_res(object):
def __init__(self, x, fun):
self.x = x
self.fun = fun
print_stop_iteration = 1
def cdl_step(score,
guess,
jac,
val = None,
aggressiveness = 0.1,
zero_eps = 1e2 * np.finfo(float).eps,
print_path = True,
decrement = 1e-1):
print("[FORCING FIRST STEP]")
assert 0 < aggressiveness < 1
assert 0 < decrement < 1
if val is None:
val = score(guess)
grad = jac(guess)
grad_copy = grad.copy()
# constrain to the positive orthant
grad[grad > 0] = 0
if (grad >= 0).all():
# this happens when we're stuck at the origin and the gradient is
# pointing in the all-negative direction
raise runtime("Failed to take a step")
# obviously I'm conflicted about what to do here...
return guess,val
direction = - (aggressiveness * val * grad) / grad.dot(grad.T)
# THE ABOVE IS EQUIVALENT TO :
# step_magnitude = aggressiveness*val/np.linalg.norm(grad)
# direction = -step_magnitude * (grad / np.linalg.norm(grad))
while True:
new_val = score( direction)
if new_val < val:
return direction, new_val
direction *= decrement
if sum(direction) < zero_eps:
raise runtime("Failed to take a step")
def cdl_search(score,
guess,
jac,
tol = 1e-4,
aggressiveness = 0.1,# aggressiveness
alpha_mult = .9,
max_iter = 3000,
min_iter = 3,
# TODO: this is a stupid default (I'm using it out of laziness)
zero_eps = 1e2 * np.finfo(float).eps,
print_path = True,
print_path_verbose = False,
preserve_angle = False):
'''
Implements coordinate descent with line search with the strong wolf
conditions. Note, this tends to give nearly identical results as L-BFGS-B,
and is *much* slower than that the super-fast 40 year old Fortran code
wrapped by SciPy.
'''
assert 0 < aggressiveness < 1
assert 0 < alpha_mult < 1
assert (guess >=0).all(), "Initial guess (`guess`) should be in the closed positive orthant"
val_old = None
grad = None
x_curr = guess
alpha_t = 0
val = score(x_curr)
if (x_curr == np.zeros(x_curr.shape[0])).all():
val0 = val
else:
val0 = score(np.zeros(x_curr.shape[0]))
#-- if (x_curr == 0).all():
#-- # Force a single step away form the origin if it is at least a little
#-- # useful. Intuition: the curvature at the origin is typically
#-- # exceedingly sharp (becasue we're going from a state with "no
#-- # information" to "some information" in the covariate space, and as
#-- # result the strong wolf conditions will have a strong tendency to
#-- # fail. However, the origin is rarely optimal so forcing a step away
#-- # form the origin will be necessary in most cases.
#-- x_curr, val = cdl_step (score, guess, jac, val, aggressiveness, zero_eps, print_path)
for _i in range(max_iter):
if grad is None:
# (this happens when `constrained == True` or the next point falls beyond zero due to rounding error)
if print_path_verbose:
print("[INITIALIZING GRADIENT]")
grad = jac(x_curr)
invalid_directions = np.logical_and(grad > 0,x_curr == 0)
if (grad[np.logical_not(invalid_directions)] == 0).all():
# this happens when we're stuck at the origin and the gradient is
# pointing in the all-negative direction
if print_stop_iteration:
print("[STOP ITERATION: gradient is zero] i: %s" % (_i,))
return cd_res(x_curr, val)
# constrain to the positive orthant
grad[invalid_directions] = 0
direction = - (aggressiveness * val * grad) / grad.dot(grad.T)
# THE ABOVE IS EQUIVALENT TO :
# step_magnitude = aggressiveness*val/np.linalg.norm(grad)
# direction = -step_magnitude * (grad / np.linalg.norm(grad))
# adaptively adjust the step size:
direction *= (alpha_mult ** alpha_t)
# constrain the gradient to being non-negative on axis where the
# current guess is already zero
if (direction<0).any() and preserve_angle:
constrained = True
alpha_ratios = - direction[ direction <0 ] / x_curr[ direction <0 ]
if (alpha_ratios > 1).any():
max_alpha = alpha_ratios.max()
else:
max_alpha = 1
else:
constrained = False
max_alpha = 1
if print_path_verbose:
print("[STARTING LINE SEARCH]")
res = line_search(f=zed_wrapper(score), myfprime=zed_wrapper(jac), xk=x_curr, pk= direction/max_alpha, gfk= grad, old_fval=val,old_old_fval=val_old) #
if print_path_verbose:
print("[FINISHED LINE SEARCH]")
alpha, _, _, _, _, _ = res
if alpha is not None:
# adjust the future step size
if alpha >= 1:
alpha_t -= 1
else:
alpha_t += 1
elif constrained:
for j in range(5): # formerly range(17), but that was excessive,
# in general, this succeeds happens when alpha >= 0.1 (super helpful) or alpha <= 1e-14 (super useless)
if score(x_curr - (.3**j)*grad/max_alpha) < val:
# This can occur when the strong wolf condition insists that the
# current step size is too small (i.e. the gradient is too
# consistent with the function to think that a small step is
# optimal for a global (unconstrained) optimization.
alpha = (.3**j)
# i secretly think this is stupid.
if print_stop_iteration:
print("[STOP ITERATION: simple line search worked :)] i: %s, alpha: 1e-%s" % (_i,j))
break
else:
# moving in the direction of the gradient yielded no improvement: stop
if print_stop_iteration:
print("[STOP ITERATION: simple line search failed] i: %s" % (_i,))
return cd_res(x_curr, val)
else:
# moving in the direction of the gradient yielded no improvement: stop
if print_stop_iteration:
print("[STOP ITERATION: alpha is None] i: %s, grad: %s, step: %s" % (_i, grad, direction/max_alpha, ))
return cd_res(x_curr, val)
# iterate
if constrained:
x_next = x_curr + min(1, alpha)*direction/max_alpha
x_old, x_curr, val_old, val, grad, old_grad = x_curr, x_next, val, score(x_next), None, grad
else:
#x_next = x_curr + alpha *direction/max_alpha
x_next = np.maximum(x_curr + alpha *direction/max_alpha,0)
x_old, x_curr, val_old, val, grad, old_grad = x_curr, x_next, val, res[3], res[5], grad
val_diff = val_old - val
# rounding error can get us really close or even across the coordinate plane.
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
#-- xtmp = x_curr.copy()
#-- x_curr[abs(x_curr) < zero_eps] = 0
#-- x_curr[x_curr < zero_eps] = 0
#-- if (xtmp != x_curr).any():
#-- if print_path_verbose:
#-- print('[CLEARING GRADIENT]')
#-- grad = None
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
# NOT SURE IF THIS IS NECESSARY NOW THAT THE GRAD IS WRAPPED IN ZED_WRAPPER
if print_path:
print("[Path] i: %s, In Sample R^2: %0.6f, incremental R^2:: %0.6f, learning rate: %0.5f, alpha: %0.5f, zeros: %s" %
(_i, 1- val / val0, (val_diff/ val0), aggressiveness * (alpha_mult ** alpha_t), alpha, sum( x_curr == 0)))
if print_path_verbose:
print("old_grad: %s,x_curr %s" % (old_grad, x_curr, ))
if (x_curr == 0).all() and (x_old == 0).all():
# this happens when we were at the origin and the gradient didn't
# take us out of the range of zero_eps
if _i == 0:
x_curr, val = cdl_step (score, guess, jac, val, aggressiveness, zero_eps, print_path)
if (x_curr == 0).all():
if print_stop_iteration:
print("[STOP ITERATION: Stuck at the origin] iteration: %s"% (_i,))
if (x_curr == 0).all():
if print_stop_iteration:
print("[STOP ITERATION: Stuck at the origin] iteration: %s"% (_i,))
return cd_res(x_curr, score(x_curr)) # tricky tricky...
if (x_curr < 0).any():
# This shouldn't ever happen if max_alpha is specified properly
raise RuntimeError("An internal Error Occured: (x_curr < 0).any()")
if val_diff/val < tol:
# this a heuristic rule, to be sure, but seems to be useful.
# TODO: this is kinda stupid without a minimum on the learning rate (i.e. `aggressiveness`).
if _i > min_iter:
if print_stop_iteration:
# this is kida stupid
print("[STOP ITERATION: val_diff/val < tol] i: %s, val: %s, val_diff: %s" % (_i, val, val_diff, ))
return cd_res(x_curr, val)
# returns solution in for loop if successfully converges
raise RuntimeError('Solution did not converge to default tolerance')
def zed_wrapper(fun):
def inner(x,*args,**kwargs):
return fun(np.maximum(0,x),*args,**kwargs)
return inner
| 42.383333 | 159 | 0.567735 | 1,353 | 10,172 | 4.128603 | 0.230599 | 0.035804 | 0.048335 | 0.025063 | 0.383101 | 0.339241 | 0.281776 | 0.261726 | 0.261726 | 0.253849 | 0 | 0.013481 | 0.336414 | 10,172 | 239 | 160 | 42.560669 | 0.814074 | 0.341329 | 0 | 0.307692 | 0 | 0.020979 | 0.119927 | 0 | 0 | 0 | 0 | 0.008368 | 0.034965 | 1 | 0.034965 | false | 0 | 0.020979 | 0.006993 | 0.125874 | 0.20979 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fccf49b1abdfc57fe1c9a3e98dcfeba511b3cef0 | 2,495 | py | Python | runOgre.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null | runOgre.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null | runOgre.py | songsiwei/Ogre_r_sv | b31dc64133f082ae395196ebedf41c8cc825ebfd | [
"BSD-3-Clause"
] | null | null | null | from ogre import generators
import argparse
from configparser import ConfigParser
from ase.io import read, write
import os
from ogre.utils.utils import print_run_time
#print('################')
#teat_a = read('TETCEN.cif', format= 'cif')
#print(teat_a)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--filename', dest='filename', default='ogre.config', type=str)
return parser.parse_args()
@print_run_time
def main():
args = parse_arguments()
filename = args.filename
config = ConfigParser()
config.read(filename, encoding='UTF-8')
io = config['io']
parameters = config['parameters']
methods = config['methods']
structure_path = io['structure_path']
#print(structure_path)
structure_name = io['structure_name']
format_string = io['format']
cleave_option = int(methods['cleave_option'])
layers_string = parameters['layers']
miller_index = [int(x) for x in parameters['miller_index'].split(" ")]
list_of_layers = []
for item in layers_string.split(' '):
if item:
if '-' in item:
start, end = item.split('-')
start, end = item.split('-')
list_of_layers.extend(list(range(int(start), int(end) + 1)))
else:
list_of_layers.append(int(item))
highest_index = int(parameters['highest_index'])
vacuum_size = int(parameters['vacuum_size'])
supercell_size = parameters['supercell_size'].split(' ')
supercell_size = None if len(supercell_size) < 3 else [
int(x) for x in supercell_size]
desired_num_of_molecules_oneLayer = int(parameters['desired_num_of_molecules_oneLayer'])
if not os.path.isdir(structure_name):
os.mkdir(structure_name)
initial_structure = read(structure_path, format= 'cif')
if cleave_option == 0:
print("Cleave single surface")
generators.atomic_task(structure_name, initial_structure, miller_index,
list_of_layers, vacuum_size, supercell_size,
format_string, desired_num_of_molecules_oneLayer)
elif cleave_option == 1:
print("Cleave surfaces for surface energy calculations")
generators.cleave_for_surface_energies(
structure_path, structure_name, vacuum_size, list_of_layers,
highest_index, supercell_size, format_string,
desired_num_of_molecules_oneLayer)
if __name__ == "__main__":
main()
| 38.384615 | 92 | 0.662926 | 299 | 2,495 | 5.240803 | 0.304348 | 0.058073 | 0.03829 | 0.053606 | 0.121251 | 0.089981 | 0.068922 | 0.068922 | 0.068922 | 0 | 0 | 0.002572 | 0.220842 | 2,495 | 64 | 93 | 38.984375 | 0.803498 | 0.034068 | 0 | 0.035714 | 0 | 0 | 0.114644 | 0.013808 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.107143 | 0 | 0.160714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcd93a3e56808b06287beaf9ab650f7bd54032be | 13,058 | py | Python | quantile_ml/utils_scoring.py | doordash/auto_ml | 7e6873265c4c2c0a03946c3f68a954930cda3bb2 | [
"MIT"
] | 17 | 2016-09-23T03:29:23.000Z | 2022-03-22T10:42:34.000Z | quantile_ml/utils_scoring.py | doordash/auto_ml | 7e6873265c4c2c0a03946c3f68a954930cda3bb2 | [
"MIT"
] | 3 | 2016-09-21T23:12:51.000Z | 2016-12-01T19:17:18.000Z | quantile_ml/utils_scoring.py | doordash/auto_ml | 7e6873265c4c2c0a03946c3f68a954930cda3bb2 | [
"MIT"
] | 3 | 2017-05-30T17:30:30.000Z | 2020-03-26T09:43:24.000Z | from collections import OrderedDict
import math
from quantile_ml import utils
import pandas as pd
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.metrics import mean_squared_error, make_scorer, brier_score_loss, accuracy_score, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score, log_loss, roc_auc_score
import numpy as np
bad_vals_as_strings = set([str(float('nan')), str(float('inf')), str(float('-inf')), 'None', 'none', 'NaN', 'NAN', 'nan', 'NULL', 'null', '', 'inf', '-inf', 'np.nan', 'numpy.nan'])
def advanced_scoring_classifiers(probas, actuals, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(probas)
print('Here is our brier-score-loss, which is the default value we optimized for while training, and is the value returned from .score() unless you requested a custom scoring metric')
print('It is a measure of how close the PROBABILITY predictions are.')
if name != None:
print(name)
# Sometimes we will be given "flattened" probabilities (only the probability of our positive label), while other times we might be given "nested" probabilities (probabilities of both positive and negative, in a list, for each item).
try:
probas = [proba[1] for proba in probas]
except:
pass
print(format(brier_score_loss(actuals, probas), '.4f'))
print('\nHere is the trained estimator\'s overall accuracy (when it predicts a label, how frequently is that the correct label?)')
predicted_labels = []
for pred in probas:
if pred >= 0.5:
predicted_labels.append(1)
else:
predicted_labels.append(0)
print(format(accuracy_score(y_true=actuals, y_pred=predicted_labels) * 100, '.1f') + '%')
print('\nHere is a confusion matrix showing predictions and actuals by label')
#it would make sense to use sklearn's confusion_matrix here but it apparently has no labels
#took this idea instead from: http://stats.stackexchange.com/a/109015
conf = pd.crosstab(pd.Series(actuals), pd.Series(predicted_labels), rownames=['v Actual v'], colnames=['Predicted >'], margins=True)
print(conf)
print('Here is the accuracy of our trained estimator at each level of predicted probabilities')
# create summary dict
summary_dict = OrderedDict()
for num in range(0, 110, 10):
summary_dict[num] = []
for idx, proba in enumerate(probas):
proba = math.floor(int(proba * 100) / 10) * 10
summary_dict[proba].append(actuals[idx])
for k, v in summary_dict.items():
if len(v) > 0:
print('Predicted probability: ' + str(k) + '%')
actual = sum(v) * 1.0 / len(v)
# Format into a prettier number
actual = round(actual * 100, 0)
print('Actual: ' + str(actual) + '%')
print('# preds: ' + str(len(v)) + '\n')
print('\n\n')
def calculate_and_print_differences(predictions, actuals, name=None):
pos_differences = []
neg_differences = []
# Technically, we're ignoring cases where we are spot on
for idx, pred in enumerate(predictions):
difference = pred - actuals[idx]
if difference > 0:
pos_differences.append(difference)
elif difference < 0:
neg_differences.append(difference)
if name != None:
print(name)
print('Count of positive differences (prediction > actual):')
print(len(pos_differences))
print('Count of negative differences:')
print(len(neg_differences))
if len(pos_differences) > 0:
print('Average positive difference:')
print(sum(pos_differences) * 1.0 / len(pos_differences))
if len(neg_differences) > 0:
print('Average negative difference:')
print(sum(neg_differences) * 1.0 / len(neg_differences))
def advanced_scoring_regressors(predictions, actuals, verbose=2, name=None):
# pandas Series don't play nice here. Make sure our actuals list is indeed a list
actuals = list(actuals)
predictions = list(predictions)
print('\n\n***********************************************')
if name != None:
print(name)
print('Advanced scoring metrics for the trained regression model on this particular dataset:\n')
# 1. overall RMSE
print('Here is the overall RMSE for these predictions:')
print(mean_squared_error(actuals, predictions)**0.5)
# 2. overall avg predictions
print('\nHere is the average of the predictions:')
print(sum(predictions) * 1.0 / len(predictions))
# 3. overall avg actuals
print('\nHere is the average actual value on this validation set:')
print(sum(actuals) * 1.0 / len(actuals))
# 2(a). median predictions
print('\nHere is the median prediction:')
print(np.median(predictions))
# 3(a). median actuals
print('\nHere is the median actual value:')
print(np.median(actuals))
# 4. avg differences (not RMSE)
print('\nHere is the mean absolute error:')
print(mean_absolute_error(actuals, predictions))
print('\nHere is the median absolute error (robust to outliers):')
print(median_absolute_error(actuals, predictions))
print('\nHere is the explained variance:')
print(explained_variance_score(actuals, predictions))
print('\nHere is the R-squared value:')
print(r2_score(actuals, predictions))
# 5. pos and neg differences
calculate_and_print_differences(predictions=predictions, actuals=actuals, name=name)
actuals_preds = list(zip(actuals, predictions))
# Sort by PREDICTED value, since this is what what we will know at the time we make a prediction
actuals_preds.sort(key=lambda pair: pair[1])
actuals_sorted = [act for act, pred in actuals_preds]
predictions_sorted = [pred for act, pred in actuals_preds]
if verbose > 2:
print('Here\'s how the trained predictor did on each successive decile (ten percent chunk) of the predictions:')
for i in range(1,10):
print('\n**************')
print('Bucket number:')
print(i)
# There's probably some fenceposting error here
min_idx = int((i - 1) / 10.0 * len(actuals_sorted))
max_idx = int(i / 10.0 * len(actuals_sorted))
actuals_for_this_decile = actuals_sorted[min_idx:max_idx]
predictions_for_this_decile = predictions_sorted[min_idx:max_idx]
print('Avg predicted val in this bucket')
print(sum(predictions_for_this_decile) * 1.0 / len(predictions_for_this_decile))
print('Avg actual val in this bucket')
print(sum(actuals_for_this_decile) * 1.0 / len(actuals_for_this_decile))
print('RMSE for this bucket')
print(mean_squared_error(actuals_for_this_decile, predictions_for_this_decile)**0.5)
calculate_and_print_differences(predictions_for_this_decile, actuals_for_this_decile)
print('')
print('\n***********************************************\n\n')
def rmse_func(y, predictions):
return mean_squared_error(y, predictions)**0.5
scoring_name_function_map = {
'rmse': rmse_func
, 'median_absolute_error': median_absolute_error
, 'r2': r2_score
, 'r-squared': r2_score
, 'mean_absolute_error': mean_absolute_error
, 'accuracy': accuracy_score
, 'accuracy_score': accuracy_score
, 'log_loss': log_loss
, 'roc_auc': roc_auc_score
, 'brier_score_loss': brier_score_loss
}
class RegressionScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'rmse'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
self.scoring_method = scoring_method
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def score(self, estimator, X, y, took_log_of_y=False, advanced_scoring=False, verbose=2, name=None):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingRegressor):
X = X.toarray()
predictions = estimator.predict(X)
if took_log_of_y:
for idx, val in enumerate(predictions):
predictions[idx] = math.exp(val)
try:
score = self.scoring_func(y, predictions)
except ValueError:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
score = self.scoring_func(y, predictions)
if advanced_scoring == True:
if hasattr(estimator, 'name'):
print(estimator.name)
advanced_scoring_regressors(predictions, y, verbose=verbose, name=name)
return - 1 * score
class ClassificationScorer(object):
def __init__(self, scoring_method=None):
if scoring_method is None:
scoring_method = 'brier_score_loss'
self.scoring_method = scoring_method
if callable(scoring_method):
self.scoring_func = scoring_method
else:
self.scoring_func = scoring_name_function_map[scoring_method]
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def clean_probas(self, probas):
print('Warning: We have found some values in the predicted probabilities that fall outside the range {0, 1}')
print('This is likely the result of a model being trained on too little data, or with a bad set of hyperparameters. If you get this warning while doing a hyperparameter search, for instance, you can probably safely ignore it')
print('We will cap those values at 0 or 1 for the purposes of scoring, but you should be careful to have similar safeguards in place in prod if you use this model')
if not isinstance(probas[0], list):
probas = [min(max(pred, 0), 1) for pred in probas]
return probas
else:
cleaned_probas = []
for proba_tuple in probas:
cleaned_tuple = []
for item in proba_tuple:
cleaned_tuple.append(max(min(item, 1), 0))
cleaned_probas.append(cleaned_tuple)
return cleaned_probas
def score(self, estimator, X, y, advanced_scoring=False):
X, y = utils.drop_missing_y_vals(X, y, output_column=None)
if isinstance(estimator, GradientBoostingClassifier):
X = X.toarray()
predictions = estimator.predict_proba(X)
if self.scoring_method == 'brier_score_loss':
# At the moment, Microsoft's LightGBM returns probabilities > 1 and < 0, which can break some scoring functions. So we have to take the max of 1 and the pred, and the min of 0 and the pred.
probas = [max(min(row[1], 1), 0) for row in predictions]
predictions = probas
try:
score = self.scoring_func(y, predictions)
except ValueError as e:
bad_val_indices = []
for idx, val in enumerate(y):
if str(val) in bad_vals_as_strings:
bad_val_indices.append(idx)
predictions = [val for idx, val in enumerate(predictions) if idx not in bad_val_indices]
y = [val for idx, val in enumerate(y) if idx not in bad_val_indices]
print('Found ' + str(len(bad_val_indices)) + ' null or infinity values in the y values. We will ignore these, and report the score on the rest of the dataset')
try:
score = self.scoring_func(y, predictions)
except ValueError:
# Sometimes, particularly for a badly fit model using either too little data, or a really bad set of hyperparameters during a grid search, we can predict probas that are > 1 or < 0. We'll cap those here, while warning the user about them, because they're unlikely to occur in a model that's properly trained with enough data and reasonable params
predictions = self.clean_probas(predictions)
score = self.scoring_func(y, predictions)
if advanced_scoring:
return (-1 * score, predictions)
else:
return -1 * score
| 40.055215 | 362 | 0.652856 | 1,735 | 13,058 | 4.767723 | 0.197695 | 0.02986 | 0.014507 | 0.01632 | 0.342722 | 0.28663 | 0.230658 | 0.230658 | 0.219536 | 0.189192 | 0 | 0.010702 | 0.24866 | 13,058 | 325 | 363 | 40.178462 | 0.832433 | 0.114566 | 0 | 0.297778 | 0 | 0.026667 | 0.192341 | 0.01083 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048889 | false | 0.004444 | 0.031111 | 0.004444 | 0.133333 | 0.284444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcda80063c8aaa6e840fe53a93438e5045c6f364 | 1,150 | py | Python | setup.py | icon-project/loopchain_tools | 3441f0ade654bbaedc382d94230f526b14baea1a | [
"Apache-2.0"
] | 1 | 2020-08-15T16:15:03.000Z | 2020-08-15T16:15:03.000Z | setup.py | icon-project/loopchain_tools | 3441f0ade654bbaedc382d94230f526b14baea1a | [
"Apache-2.0"
] | null | null | null | setup.py | icon-project/loopchain_tools | 3441f0ade654bbaedc382d94230f526b14baea1a | [
"Apache-2.0"
] | 2 | 2021-06-02T07:50:57.000Z | 2021-12-01T23:35:10.000Z | import os
from setuptools import setup, find_packages
version = os.environ.get('VERSION')
if version is None:
with open(os.path.join('.', 'VERSION')) as version_file:
version = version_file.read().strip()
setup_options = {
'name': 'loopchain tools',
'description': 'CLI tools for loopchain',
'long_description': open('README.md').read(),
'long_description_content_type': 'text/markdown',
'url': 'https://github.com/icon-project/loopchain_tools',
'version': version,
'author': 'ICON foundation',
'author_email': 't_core@iconloop.com',
'packages': find_packages(),
'license': "Apache License 2.0",
'install_requires': list(open('requirements.txt')),
'classifiers': [
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only'
]
}
setup(**setup_options)
| 31.944444 | 61 | 0.64087 | 123 | 1,150 | 5.878049 | 0.626016 | 0.078838 | 0.103734 | 0.071923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006572 | 0.206087 | 1,150 | 35 | 62 | 32.857143 | 0.785323 | 0 | 0 | 0 | 0 | 0 | 0.533913 | 0.025217 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcdb510122b20178c2555bc294630f212df04c12 | 1,315 | py | Python | lecture_6/stack_4.py | Willianan/Interview_Book | bde1a975bcffdb1179f234cb3380f51d4c800b12 | [
"Apache-2.0"
] | 2 | 2019-11-12T09:07:56.000Z | 2020-07-15T06:28:58.000Z | lecture_6/stack_4.py | Willianan/Interview_Book | bde1a975bcffdb1179f234cb3380f51d4c800b12 | [
"Apache-2.0"
] | null | null | null | lecture_6/stack_4.py | Willianan/Interview_Book | bde1a975bcffdb1179f234cb3380f51d4c800b12 | [
"Apache-2.0"
] | 1 | 2020-05-04T13:46:38.000Z | 2020-05-04T13:46:38.000Z | # -*- coding:utf-8 -*-
"""
@Author:Charles Van
@E-mail: williananjhon@hotmail.com
@Time:2019-08-13 15:52
@Project:InterView_Book
@Filename:stack_4.py
@description:
使用堆栈解决汉诺塔问题
"""
'''
题目描述:
有3根杆子,其中一根上有n快铁饼,铁饼由小到大依次从上往下排列,要求把杆1上的铁饼挪到杆2上,
杆3可以作为铁饼转移的中转站。当转移铁饼时,必须保证小铁饼只能放到大铁饼的上头,请给出移动步骤。
'''
class HanoiMove:
def __init__(self,stackNum,stackFrom,stackTo):
if stackNum <= 0 or stackFrom == stackTo or stackFrom < 0 or stackTo < 0:
raise RuntimeError("Invalid parameters")
self.stackFrom = stackFrom
self.stackTo = stackTo
self.hanoiMove = []
self.moveHanoiStack(self.stackFrom,self.stackTo,1,stackNum)
def printMoveSteps(self):
if len(self.hanoiMove) == 1:
print(self.hanoiMove.pop())
return
s = self.hanoiMove.pop()
self.printMoveSteps()
print(s)
def moveHanoiStack(self, stackFrom, stackTo, top, bottom):
s = "Moving ring " + str(bottom) + " from stack " + str(stackFrom) + " to " + str(stackTo)
if bottom - top == 0:
self.hanoiMove.append(s)
return
other = stackFrom
for i in range(1,4):
if i != stackFrom and i != stackTo:
other = i
break
self.moveHanoiStack(stackFrom,other,top,bottom - 1)
self.hanoiMove.append(s)
self.moveHanoiStack(other,stackTo,top,bottom - 1)
if __name__ == "__main__":
hm = HanoiMove(3,1,2)
hm.printMoveSteps() | 23.909091 | 92 | 0.698859 | 169 | 1,315 | 5.35503 | 0.461538 | 0.086188 | 0.044199 | 0.044199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028131 | 0.161977 | 1,315 | 55 | 93 | 23.909091 | 0.793103 | 0.13308 | 0 | 0.129032 | 0 | 0 | 0.052889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0 | 0 | 0.193548 | 0.16129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcdc751153a77f14d59683f2518a461ca9f696b9 | 34,911 | py | Python | src/genopandas/core/matrix.py | jrderuiter/genopandas | 8d4bd46cfcd3a9c27c1f561ed25b3e5c3ff4b8a5 | [
"MIT"
] | null | null | null | src/genopandas/core/matrix.py | jrderuiter/genopandas | 8d4bd46cfcd3a9c27c1f561ed25b3e5c3ff4b8a5 | [
"MIT"
] | null | null | null | src/genopandas/core/matrix.py | jrderuiter/genopandas | 8d4bd46cfcd3a9c27c1f561ed25b3e5c3ff4b8a5 | [
"MIT"
] | null | null | null | import functools
import itertools
import operator
import re
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
import toolz
from genopandas import plotting as gplot
from genopandas.util.pandas_ import DfWrapper
from .frame import GenomicDataFrame, GenomicSlice
RANGED_REGEX = r'(?P<chromosome>\w+):(?P<start>\d+)-(?P<end>\d+)'
POSITIONED_REGEX = r'(?P<chromosome>\w+):(?P<position>\d+)'
class AnnotatedMatrix(DfWrapper):
"""AnnotatedMatrix class.
Annotated matrix classes respresent 2D numeric feature-by-sample matrices
(with 'features' along the rows and samples along the columns), which can
be annotated with optional sample_data and feature_data frames that
describe the samples. The type of feature varies between different
sub-classes, examples being genes (for gene expression matrices) and
region-based bins (for copy-number data).
This (base) class mainly contains a variety of methods for querying,
subsetting and combining different annotation matrices. General plotting
methods are also provided (``plot_heatmap``).
Note that the class follows the feature-by-sample convention that is
typically followed in biological packages, rather than the sample-by-feature
orientation. This has the additional advantage of allowing more complex
indices (such as a region-based MultiIndex) for the features, which are
more difficult to use for DataFrame columns than for rows.
Attributes
----------
values : pd.DataFrame or AnnotatedMatrix
Matrix values.
sample_data : pd.DataFrame
DataFrame containing sample annotations, whose index corresponds
with the columns of the matrix.
feature_data : pd.DataFrame
DataFrame containing feature annotations, whose index corresponds
with the rows of the matrix.
"""
def __init__(self, values, sample_data=None, feature_data=None):
if isinstance(values, AnnotatedMatrix):
# Copy values from existing matrix (only copies sample/feature
# data if these are not given explictly).
sample_data = sample_data or values.sample_data
feature_data = feature_data or values.feature_data
values = values.values.copy()
else:
# Create empty annotations if none given.
if sample_data is None:
sample_data = pd.DataFrame({}, index=values.columns)
if feature_data is None:
feature_data = pd.DataFrame({}, index=values.index)
# Check {sample,feature}_data.
# assert (values.shape[1] == sample_data.shape[0]
# and all(values.columns == sample_data.index))
# assert (values.shape[0] == feature_data.shape[0]
# and all(values.index == feature_data.index))
# Check if all matrix columns are numeric.
for col_name, col_values in values.items():
if not is_numeric_dtype(col_values):
raise ValueError(
'Column {} is not numeric'.format(col_name))
super().__init__(values)
self._sample_data = sample_data.reindex(index=values.columns)
self._feature_data = feature_data.reindex(index=values.index)
def _constructor(self, values):
"""Constructor that attempts to build new instance
from given values."""
if isinstance(values, pd.DataFrame):
return self.__class__(
values.copy(),
sample_data=self._sample_data,
feature_data=self._feature_data)
return values
@property
def feature_data(self):
return self._feature_data
@feature_data.setter
def feature_data(self, value):
value = value.reindex(index=self._values.index)
self._feature_data = value
@property
def sample_data(self):
return self._sample_data
@sample_data.setter
def sample_data(self, value):
value = value.reindex(index=self._values.columns)
self._sample_data = value
@classmethod
def from_csv(cls,
file_path,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
read_data_kws=None,
**kwargs):
default_kwargs = {'index_col': 0}
kwargs = toolz.merge(default_kwargs, kwargs)
values = pd.read_csv(str(file_path), **kwargs)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls(values, sample_data=sample_data, feature_data=feature_data)
@classmethod
def _preprocess_values(cls,
values,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None):
"""Preprocesses matrix to match given sample/feature data."""
# Drop extra columns (if needed).
if drop_cols is not None:
values = values.drop(drop_cols, axis=1)
# Rename samples/features using mappings (if given).
if sample_mapping is not None or feature_mapping is not None:
values = values.rename(
columns=sample_mapping, index=feature_mapping)
# Reorder values to match annotations.
sample_order = None if sample_data is None else sample_data.index
feat_order = None if feature_data is None else feature_data.index
values = values.reindex(
columns=sample_order, index=feat_order, copy=False)
return values
def to_csv(self,
file_path,
sample_data_path=None,
feature_data_path=None,
**kwargs):
"""Writes matrix values to a csv file, using pandas' to_csv method."""
# Write matrix values.
self._values.to_csv(file_path, **kwargs)
# Write sample/feature data if paths given.
if sample_data_path is not None:
self._sample_data.to_csv(
sample_data_path, sep=kwargs.pop('sep', None), index=True)
if feature_data_path is not None:
self._feature_data.to_csv(
feature_data_path, sep=kwargs.pop('sep', None), index=True)
def rename(self, index=None, columns=None):
"""Rename samples/features in the matrix."""
renamed = self._values.rename(index=index, columns=columns)
if index is not None:
feature_data = self._feature_data.rename(index=index)
else:
feature_data = self._feature_data
if columns is not None:
sample_data = self._sample_data.rename(index=columns)
else:
sample_data = self._sample_data
return self.__class__(
renamed, feature_data=feature_data, sample_data=sample_data)
def melt(self,
with_sample_data=False,
with_feature_data=False,
value_name='value'):
"""Melts values into 'tidy' format, optionally including annotation."""
feat_col = self._feature_data.index.name or 'feature'
sample_col = self._sample_data.index.name or 'sample'
values_long = pd.melt(
self._values.rename_axis(feat_col).reset_index(),
id_vars=feat_col,
var_name=sample_col,
value_name=value_name)
if with_sample_data and self._sample_data.shape[1] > 0:
sample_data = (self._sample_data.rename_axis(sample_col)
.reset_index())
values_long = pd.merge(
values_long, sample_data, how='left', on=sample_col)
if with_feature_data and self._feature_data.shape[1] > 0:
feature_data = (self._feature_data.rename_axis(feat_col)
.reset_index())
# Merge with annotation.
values_long = pd.merge(
values_long, feature_data, how='left', on=feat_col)
return values_long
def query_samples(self, expr):
"""Subsets samples in matrix by querying sample_data with expression.
Similar to the pandas ``query`` method, this method queries the sample
data of the matrix with the given boolean expression. Any samples for
which the expression evaluates to True are returned in the resulting
AnnotatedMatrix.
Parameters
----------
expr : str
The query string to evaluate. You can refer to variables in the
environment by prefixing them with an ‘@’ character like @a + b.
Returns
-------
AnnotatedMatrix
Subsetted matrix, containing only the samples for which ``expr``
evaluates to True.
"""
sample_data = self._sample_data.query(expr)
values = self._values.reindex(columns=sample_data.index)
return self.__class__(
values, sample_data=sample_data, feature_data=self._feature_data)
def dropna_samples(self, subset=None, how='any', thresh=None):
"""Drops samples with NAs in sample_data."""
sample_data = self._sample_data.dropna(
subset=subset, how=how, thresh=thresh)
values = self._values.reindex(columns=sample_data.index)
return self.__class__(
values, sample_data=sample_data, feature_data=self._feature_data)
def __eq__(self, other):
if not isinstance(other, AnnotatedMatrix):
return False
return all(self.values == other.values) and \
all(self.sample_data == other.sample_data) and \
all(self.feature_data == other.feature_data)
def plot_heatmap(
self,
cmap='RdBu_r',
sample_cols=None,
sample_colors=None,
feature_cols=None,
feature_colors=None,
metric='euclidean',
method='complete',
transpose=False,
# legend_kws=None,
**kwargs):
"""Plots clustered heatmap of matrix values."""
import matplotlib.pyplot as plt
import seaborn as sns
if sample_cols is not None:
sample_annot, _ = gplot.color_annotation(
self._sample_data[sample_cols], colors=sample_colors)
else:
sample_annot, _ = None, None
if feature_cols is not None:
feature_annot, _ = gplot.color_annotation(
self._feature_data[feature_cols], colors=feature_colors)
else:
feature_annot, _ = None, None
clustermap_kws = dict(kwargs)
if transpose:
values = self._values.T
clustermap_kws['row_colors'] = sample_annot
clustermap_kws['col_colors'] = feature_annot
xlabel, ylabel = 'Features', 'Samples'
else:
values = self._values
clustermap_kws['col_colors'] = sample_annot
clustermap_kws['row_colors'] = feature_annot
xlabel, ylabel = 'Samples', 'Features'
cm = sns.clustermap(
values, cmap=cmap, metric=metric, method=method, **clustermap_kws)
plt.setp(cm.ax_heatmap.get_yticklabels(), rotation=0)
cm.ax_heatmap.set_xlabel(xlabel)
cm.ax_heatmap.set_ylabel(ylabel)
# TODO: handle legend drawing.
#if annot_cmap is not None:
# draw_legends(cm, annot_cmap, **(legend_kws or {}))
return cm
def pca(self,
n_components=None,
axis='columns',
transform=False,
with_annotation=False):
"""Performs PCA on matrix."""
try:
from sklearn.decomposition import PCA
except ImportError:
raise ImportError('Scikit-learn must be installed to '
'perform PCA analyses')
# Fit PCA and transform expression.
pca = PCA(n_components=n_components)
if axis in {1, 'columns', 'samples'}:
values = self._values.T
annotation = self._sample_data
elif axis in {0, 'index', 'features'}:
values = self._values
annotation = self._feature_data
else:
raise ValueError('Unknown value for axis')
pca.fit(values.values)
if transform:
transformed = pca.transform(values.values)
n_components = transformed.shape[1]
transformed = pd.DataFrame(
transformed,
columns=['pca_{}'.format(i + 1) for i in range(n_components)],
index=values.index)
if with_annotation:
transformed = pd.concat([transformed, annotation], axis=1)
return pca, transformed
else:
return pca
def plot_pca(self, components=(1, 2), axis='columns', ax=None, **kwargs):
"""Plots PCA of samples."""
pca, transformed = self.pca(
n_components=max(components),
axis=axis,
transform=True,
with_annotation=True)
# Draw using lmplot.
pca_x, pca_y = ['pca_{}'.format(c) for c in components]
ax = gplot.scatter_plot(
data=transformed, x=pca_x, y=pca_y, ax=ax, **kwargs)
var = pca.explained_variance_ratio_[components[0] - 1] * 100
ax.set_xlabel('Component {} ({:3.1f}%)'.format(components[0], var))
var = pca.explained_variance_ratio_[components[1] - 1] * 100
ax.set_ylabel('Component {} ({:3.1f}%)'.format(components[1], var))
return ax
def plot_pca_variance(self, n_components=None, axis='columns', ax=None):
"""Plots variance explained by PCA components."""
import matplotlib.pyplot as plt
import seaborn as sns
pca = self.pca(n_components=n_components, axis=axis, transform=False)
if ax is None:
_, ax = plt.subplots()
x = np.arange(pca.n_components_) + 1
y = pca.explained_variance_ratio_
ax.plot(x[:len(y)], y)
ax.set_xlabel('Component')
ax.set_ylabel('Explained variance')
sns.despine(ax=ax)
return ax
def plot_feature(self, feature, group=None, kind='box', ax=None, **kwargs):
"""Plots distribution of expression for given feature."""
import seaborn as sns
if group is not None and self._sample_data.shape[1] == 0:
raise ValueError('Grouping not possible without sample data')
# Determine plot type.
plot_funcs = {
'box': sns.boxplot,
'swarm': sns.swarmplot,
'violin': sns.violinplot
}
try:
plot_func = plot_funcs[kind]
except KeyError:
raise ValueError('Unknown plot type {!r}'.format(kind))
# Assemble plot data (sample_data + expression values).
values = self._values.loc[feature].to_frame(name='value')
plot_data = pd.concat([values, self._sample_data], axis=1)
# Plot expression.
ax = plot_func(data=plot_data, x=group, y='value', ax=ax, **kwargs)
ax.set_title(feature)
ax.set_ylabel('Value')
return ax
@classmethod
def concat(cls, matrices, axis):
"""Concatenates matrices along given axis."""
# Collect value/sample/feature data.
tuples = ((mat.values, mat.sample_data, mat.feature_data)
for mat in matrices)
value_list, sample_list, feat_list = zip(*tuples)
# Merge values.
values = pd.concat(value_list, axis=axis)
# Merge sample/feature data.
if axis == 'index' or axis == 0:
sample_data = pd.concat(sample_list, axis='columns')
feature_data = pd.concat(feat_list, axis='index')
elif axis == 'columns' or axis == 1:
sample_data = pd.concat(sample_list, axis='index')
feature_data = pd.concat(feat_list, axis='columns')
else:
raise ValueError('Unknown value for axis')
return cls(values, sample_data=sample_data, feature_data=feature_data)
def drop_duplicate_indices(self, axis='index', keep='first'):
"""Drops duplicate indices along given axis."""
if axis == 'index':
mask = ~self._values.index.duplicated(keep=keep)
values = self._values.loc[mask]
sample_data = self._sample_data
feature_data = self._feature_data.loc[mask]
elif axis == 'columns':
mask = ~self._values.columns.duplicated(keep=keep)
values = self._values.loc[:, mask]
sample_data = self._sample_data.loc[mask]
feature_data = self._feature_data
else:
raise ValueError('Unknown value for axis')
return self.__class__(
values.copy(), sample_data=sample_data, feature_data=feature_data)
class GenomicMatrix(AnnotatedMatrix):
"""Class respresenting matrices indexed by genomic positions."""
def __init__(self, values, sample_data=None, feature_data=None):
if not isinstance(values, GenomicDataFrame):
values = GenomicDataFrame(values)
super().__init__(
values, sample_data=sample_data, feature_data=feature_data)
@classmethod
def from_df(cls, values, chrom_lengths=None, **kwargs):
"""Constructs a genomic matrix from the given DataFrame."""
if not isinstance(values, GenomicDataFrame):
values = GenomicDataFrame.from_df(
values, chrom_lengths=chrom_lengths)
return cls(values, **kwargs)
@classmethod
def from_csv(cls,
file_path,
index_col,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
chrom_lengths=None,
read_data_kws=None,
**kwargs):
"""Reads values from a csv file."""
if not 2 <= len(index_col) <= 3:
raise ValueError('index_col should contain 2 entries'
' (for positioned data or 3 entries'
' (for ranged data)')
default_dtype = {index_col[0]: str}
dtype = toolz.merge(default_dtype, kwargs.pop('dtype', {}))
values = pd.read_csv(file_path, dtype=dtype, **kwargs)
values = values.set_index(index_col)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls.from_df(
values,
sample_data=sample_data,
feature_data=feature_data,
chrom_lengths=chrom_lengths)
@classmethod
def from_csv_condensed(cls,
file_path,
index_col=0,
sample_data=None,
feature_data=None,
sample_mapping=None,
feature_mapping=None,
drop_cols=None,
chrom_lengths=None,
index_regex=RANGED_REGEX,
is_one_based=False,
is_inclusive=False,
read_data_kws=None,
**kwargs):
"""Reads values from a csv file with a condensed index."""
values = pd.read_csv(file_path, index_col=index_col, **kwargs)
values.index = cls._expand_condensed_index(
values.index,
index_regex,
is_one_based=is_one_based,
is_inclusive=is_inclusive)
# If sample/feature_data are not dataframes, assume they are
# file paths or objects and try to read from them.
read_data_kws_default = {
'sep': kwargs.pop('sep', None),
'index_col': 0
}
read_data_kws = toolz.merge(read_data_kws_default, read_data_kws or {})
if not (sample_data is None or isinstance(sample_data, pd.DataFrame)):
sample_data = pd.read_csv(sample_data, **read_data_kws)
if not (feature_data is None
or isinstance(feature_data, pd.DataFrame)):
feature_data = pd.read_csv(feature_data, **read_data_kws)
values = cls._preprocess_values(
values,
sample_data=sample_data,
feature_data=feature_data,
sample_mapping=sample_mapping,
feature_mapping=feature_mapping,
drop_cols=drop_cols)
return cls.from_df(
values,
sample_data=sample_data,
feature_data=feature_data,
chrom_lengths=chrom_lengths)
@classmethod
def _expand_condensed_index(cls,
index,
regex_expr,
is_one_based=False,
is_inclusive=False):
"""Expands condensed index into a MultiIndex."""
# Parse entries.
regex = re.compile(regex_expr)
group_dicts = (regex.match(el).groupdict() for el in index)
# Extract chromosome, start, end positions.
if regex.groups == 3:
tups = ((grp['chromosome'], int(grp['start']), int(grp['end']))
for grp in group_dicts)
chrom, starts, ends = zip(*tups)
elif regex.groups == 2:
tups = ((grp['chromosome'], int(grp['position']))
for grp in group_dicts)
chrom, starts = zip(*tups)
ends = None
else:
raise ValueError('Regex should have two or three groups '
'(for positioned/ranged data, respectively)')
# Correct for one-base and inclusive-ness to match Python conventions.
starts = np.array(starts)
if is_one_based:
starts -= 1
if ends is not None and is_inclusive:
ends = np.array(ends)
ends += 1
# Build index.
if ends is None:
index = pd.MultiIndex.from_arrays(
[chrom, starts], names=['chromosome', 'position'])
else:
index = pd.MultiIndex.from_arrays(
[chrom, starts, ends], names=['chromosome', 'start', 'end'])
return index
@property
def gloc(self):
"""Genomic-position indexer.
Used to select rows from the matrix by their genomic position.
Interface is the same as for the GenomicDataFrame gloc property
(which this method delegates to).
"""
return GLocWrapper(self._values.gloc, self._gloc_constructor)
def _gloc_constructor(self, values):
"""Constructor that attempts to build new instance
from given values."""
if isinstance(values, GenomicDataFrame):
sample_data = self._sample_data.reindex(index=values.columns)
feature_data = self._feature_data.reindex(index=values.index)
return self.__class__(
values.copy(),
sample_data=sample_data,
feature_data=feature_data)
return values
def expand(self):
"""Expands matrix to include values from missing bins.
Assumes rows are regularly spaced with a fixed bin size.
"""
expanded = self._expand(self._values)
feature_data = self._feature_data.reindex(index=expanded.index)
return self.__class__(
expanded, sample_data=self._sample_data, feature_data=feature_data)
@staticmethod
def _expand(values):
def _bin_indices(grp, bin_size):
chrom = grp.index[0][0]
start = grp.index.get_level_values(1).min()
end = grp.index.get_level_values(2).max()
bins = np.arange(start, end + 1, step=bin_size)
return zip(itertools.cycle([chrom]), bins[:-1], bins[1:])
bin_size = values.index[0][2] - values.index[0][1]
# TODO: Warn if bin_size is 1? (Probably positioned data).
# Check inferred bin size.
starts = values.index.get_level_values(1)
ends = values.index.get_level_values(2)
diffs = ends - starts
if not all(diffs == bin_size):
raise ValueError('Bins do not match inferred bin size')
# Check if following bins match inferred bin size.
if not all(np.mod(np.diff(starts), bin_size) == 0):
raise ValueError('Following bins do not match inferred bin size')
indices = list(
itertools.chain.from_iterable(
_bin_indices(grp, bin_size=bin_size)
for _, grp in values.groupby(level=0)))
return values.reindex(index=indices)
def impute(self, window=11, min_probes=5, expand=True):
"""Imputes nan values from neighboring bins."""
if expand:
values = self._expand(self._values)
else:
values = self._values
# Calculate median value within window (allowing for
# window - min_probes number of NAs within the window).
rolling = values.rolling(
window=window, min_periods=min_probes, center=True)
avg_values = rolling.median()
# Copy over values for null rows for the imputation.
imputed = values.copy()
mask = imputed.isnull().all(axis=1)
imputed.loc[mask] = avg_values.loc[mask]
# Match feature data to new values.
feature_data = self._feature_data.reindex(index=imputed.index)
return self.__class__(
imputed, sample_data=self._sample_data, feature_data=feature_data)
def resample(self, bin_size, start=None, agg='mean'):
"""Resamples values at given interval by binning."""
# Perform resampling per chromosome.
resampled = pd.concat(
(self._resample_chromosome(
grp, bin_size=bin_size, agg=agg, start=start)
for _, grp in self._values.groupby(level=0)),
axis=0) # yapf: disable
# Restore original index order.
resampled = resampled.reindex(self._values.gloc.chromosomes, level=0)
return self.__class__(
GenomicDataFrame(
resampled, chrom_lengths=self._values.chromosome_lengths),
sample_data=self._sample_data)
@staticmethod
def _resample_chromosome(values, bin_size, start=None, agg='mean'):
# Bin rows by their centre positions.
starts = values.index.get_level_values(1)
ends = values.index.get_level_values(2)
positions = (starts + ends) // 2
range_start = starts.min() if start is None else start
range_end = ends.max() + bin_size
bins = np.arange(range_start, range_end, bin_size)
if len(bins) < 2:
raise ValueError('No bins in range ({}, {}) with bin_size {}'.
format(range_start, ends.max(), bin_size))
binned = pd.cut(positions, bins=bins)
# Resample.
resampled = values.groupby(binned).agg(agg)
resampled.index = pd.MultiIndex.from_arrays(
[[values.index[0][0]] * (len(bins) - 1), bins[:-1], bins[1:]],
names=values.index.names)
return resampled
def rename_chromosomes(self, mapping):
"""Returns copy of matrix with renamed chromosomes."""
return self.__class__(
values=self._values.rename_chromosomes(mapping),
sample_data=self.sample_data,
feature_data=self.feature_data)
def annotate(self, features, feature_id='gene_id'):
"""Annotates values for given features."""
# Calculate calls.
get_id = operator.attrgetter(feature_id)
annotated_calls = {}
for feature in features.itertuples():
try:
chrom, start, end = feature.Index
overlap = self._values.gloc.search(chrom, start, end)
annotated_calls[get_id(feature)] = overlap.median()
except KeyError:
pass
# Assemble into dataframe.
annotated = pd.DataFrame.from_records(annotated_calls).T
annotated.index.name = feature_id
return AnnotatedMatrix(annotated, sample_data=self._sample_data)
def plot_sample(self, sample, ax=None, **kwargs):
"""Plots values for given sample along genomic axis."""
ax = gplot.genomic_scatter_plot(
self._values, y=sample, ax=ax, **kwargs)
return ax
def plot_heatmap(self,
cmap='RdBu_r',
sample_cols=None,
sample_colors=None,
metric='euclidean',
method='complete',
transpose=True,
cluster=True,
**kwargs):
"""Plots heatmap of gene expression over samples."""
if 'row_cluster' in kwargs or 'col_cluster' in kwargs:
raise ValueError(
'GenomicMatrices only supports clustering by samples. '
'Use the \'cluster\' argument to specify whether '
'clustering should be performed.')
if cluster:
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage
# Do clustering on matrix with only finite values.
values_clust = self._values.replace([np.inf, -np.inf], np.nan)
values_clust = values_clust.dropna()
dist = pdist(values_clust.T, metric=metric)
sample_linkage = linkage(dist, method=method)
else:
sample_linkage = None
# Draw heatmap.
heatmap_kws = dict(kwargs)
if transpose:
heatmap_kws.update({
'row_cluster': sample_linkage is not None,
'row_linkage': sample_linkage,
'col_cluster': False
})
else:
heatmap_kws.update({
'col_cluster': sample_linkage is not None,
'col_linkage': sample_linkage,
'row_cluster': False
})
cm = super().plot_heatmap(
sample_cols=sample_cols,
sample_colors=sample_colors,
cmap=cmap,
metric=metric,
method=method,
transpose=transpose,
**heatmap_kws)
self._style_heatmap(cm, transpose=transpose)
return cm
def _style_heatmap(self, cm, transpose):
chrom_breaks = self._values.groupby(level=0).size().cumsum()
chrom_labels = self._values.gloc.chromosomes
chrom_label_pos = np.concatenate([[0], chrom_breaks])
chrom_label_pos = (chrom_label_pos[:-1] + chrom_label_pos[1:]) / 2
if transpose:
cm.ax_heatmap.set_xticks([])
for loc in chrom_breaks[:-1]:
cm.ax_heatmap.axvline(loc, color='grey', lw=1)
cm.ax_heatmap.set_xticks(chrom_label_pos)
cm.ax_heatmap.set_xticklabels(chrom_labels, rotation=0)
cm.ax_heatmap.set_xlabel('Genomic position')
cm.ax_heatmap.set_ylabel('Samples')
else:
cm.ax_heatmap.set_yticks([])
for loc in chrom_breaks[:-1]:
cm.ax_heatmap.axhline(loc, color='grey', lw=1)
cm.ax_heatmap.set_yticks(chrom_label_pos)
cm.ax_heatmap.set_yticklabels(chrom_labels, rotation=0)
cm.ax_heatmap.set_xlabel('Samples')
cm.ax_heatmap.set_ylabel('Genomic position')
return cm
class GLocWrapper(object):
"""Wrapper class that wraps gloc indexer from given object."""
def __init__(self, gloc, constructor):
self._gloc = gloc
self._constructor = constructor
def __getattr__(self, name):
attr = getattr(self._gloc, name)
if callable(attr):
return self._wrap_function(attr)
return attr
def __getitem__(self, item):
result = self._gloc[item]
if isinstance(result, GenomicSlice):
result = GLocSliceWrapper(
self._gloc, chromosome=item, constructor=self._constructor)
else:
result = self._constructor(result)
return result
def _wrap_function(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Wrapper that calls _constructor on returned result."""
result = func(*args, **kwargs)
return self._constructor(result)
return wrapper
class GLocSliceWrapper(object):
"""Wrapper class that wraps slice from gloc indexer on given object."""
def __init__(self, gloc, chromosome, constructor):
self._gloc = gloc
self._chromosome = chromosome
self._constructor = constructor
def __getitem__(self, item):
result = self._gloc[self._chromosome][item]
return self._constructor(result)
| 34.462981 | 80 | 0.595085 | 4,019 | 34,911 | 4.950485 | 0.132122 | 0.057801 | 0.025633 | 0.018999 | 0.379674 | 0.312123 | 0.261962 | 0.215923 | 0.209439 | 0.182097 | 0 | 0.004052 | 0.314228 | 34,911 | 1,012 | 81 | 34.497036 | 0.826991 | 0.15826 | 0 | 0.345257 | 0 | 0 | 0.0472 | 0.002909 | 0 | 0 | 0 | 0.001976 | 0 | 1 | 0.07154 | false | 0.001555 | 0.032659 | 0.00311 | 0.178849 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcdcb04e7d7ccb6b7e169672c64a338e2a347415 | 936 | py | Python | GUI/Generic/GScrollableBases.py | gcewing/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 9 | 2019-07-15T19:03:27.000Z | 2021-11-24T19:50:02.000Z | GUI/Generic/GScrollableBases.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 3 | 2019-09-11T13:22:10.000Z | 2020-08-19T20:13:00.000Z | GUI/Generic/GScrollableBases.py | mnabeelp/PyGUI | 58c6c38ccb8e66acdf98dea6b24bef1d9a03147c | [
"MIT"
] | 4 | 2020-02-23T16:50:06.000Z | 2022-02-10T07:15:35.000Z | #-------------------------------------------------------------------------------
#
# Python GUI - Scrollable objects mixin - Generic
#
#-------------------------------------------------------------------------------
from GUI.Properties import overridable_property
class ScrollableBase(object):
"""Mixin for components that can be configured to have scroll bars."""
scrolling = overridable_property('scrolling',
"String containing 'h' for horizontal and 'v' for vertical scrolling.")
hscrolling = overridable_property('hscrolling',
"True if horizontal scrolling is enabled.")
vscrolling = overridable_property('vscrolling',
"True if vertical scrolling is enabled.")
def get_scrolling(self):
chars = []
if self.hscrolling:
chars.append('h')
if self.vscrolling:
chars.append('v')
return ''.join(chars)
def set_scrolling(self, value):
self.hscrolling = 'h' in value
self.vscrolling = 'v' in value
| 28.363636 | 80 | 0.600427 | 95 | 936 | 5.852632 | 0.505263 | 0.136691 | 0.064748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 936 | 32 | 81 | 29.25 | 0.689826 | 0.290598 | 0 | 0 | 0 | 0 | 0.274119 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.055556 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcdf6498cef441e9307973c5719c094eefe1d7d2 | 5,047 | py | Python | lap/create_lap_dataset.py | saadyousuf45/age-gender-estimation | 50ff4bce716e8eba717c53009c2e0fa6feaae03c | [
"MIT"
] | 1,437 | 2017-04-10T04:55:35.000Z | 2022-03-31T04:49:26.000Z | lap/create_lap_dataset.py | saadyousuf45/age-gender-estimation | 50ff4bce716e8eba717c53009c2e0fa6feaae03c | [
"MIT"
] | 130 | 2017-05-19T02:39:38.000Z | 2022-03-21T14:10:35.000Z | lap/create_lap_dataset.py | saadyousuf45/age-gender-estimation | 50ff4bce716e8eba717c53009c2e0fa6feaae03c | [
"MIT"
] | 542 | 2017-05-10T10:18:15.000Z | 2022-03-21T05:52:35.000Z | import argparse
import better_exceptions
import sys
import time
from pathlib import Path
import zipfile
import bz2
import urllib.request
import dlib
import cv2
zip_names = ["train_1.zip", "train_2.zip", "train_gt.zip", "valid.zip", "valid_gt.zip"]
urls = ["http://***/train_1.zip",
"http://***/train_2.zip",
"http://***/train_gt.zip",
"http://***/valid.zip",
"http://***/valid_gt.zip"]
gt_pwd = b"***"
dataset_root = Path(__file__).resolve().parent.joinpath("dataset")
model_root = Path(__file__).resolve().parent.joinpath("model")
train_image_dir = dataset_root.joinpath("train_images")
validation_image_dir = dataset_root.joinpath("validation_images")
train_crop_dir = dataset_root.joinpath("train_crop")
validation_crop_dir = dataset_root.joinpath("validation_crop")
def get_args():
parser = argparse.ArgumentParser(description="This script downloads the LAP dataset "
"and preprocess for training and evaluation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers(help="subcommands", dest="subcommand")
subparsers.add_parser("download", help="Downdload the LAP dataset")
subparsers.add_parser("extract", help="Unzip the LAP dataset")
subparsers.add_parser("crop", help="Crop face regions using dlib")
args = parser.parse_args()
return parser, args
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = int(time.time() - start_time)
current_size = count * block_size
remaining_size = total_size - current_size
speed = int(current_size / (1024 * duration + 1))
percent = min(int(count * block_size * 100 / total_size), 100)
remaining_time = int(duration * (remaining_size / current_size))
sys.stdout.write("\r{}%, {:6.2f}/{:6.2f}MB, {}KB/s, passed: {}s, remaining: {}s".format(
percent, current_size / (1024 * 1024), total_size / (1024 * 1024), speed, duration, remaining_time))
sys.stdout.flush()
def download():
dataset_root.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
for zip_name, url in zip(zip_names, urls):
print("downloading {}".format(zip_name))
local_path = dataset_root.joinpath(zip_name)
urllib.request.urlretrieve(url, str(local_path), reporthook)
def crop():
detector_model_path = model_root.joinpath("mmod_human_face_detector.dat")
if not detector_model_path.is_file():
model_root.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
detector_model_url = "http://dlib.net/files/mmod_human_face_detector.dat.bz2"
detector_model_bz2 = str(detector_model_path) + ".bz2"
print("downloading {}".format(detector_model_path.name))
urllib.request.urlretrieve(detector_model_url, detector_model_bz2, reporthook)
with open(detector_model_bz2, "rb") as source, open(str(detector_model_path), "wb") as dest:
dest.write(bz2.decompress(source.read()))
detector = dlib.cnn_face_detection_model_v1(str(detector_model_path))
for image_dir, crop_dir in [[train_image_dir, train_crop_dir], [validation_image_dir, validation_crop_dir]]:
for image_path in image_dir.glob("*.jpg"):
frame = cv2.imread(str(image_path))
img_h, img_w, _ = frame.shape
factor = 800 / max(img_h, img_w)
frame_resized = cv2.resize(frame, None, fx=factor, fy=factor)
frame_rgb = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)
dets = detector(frame_rgb, 1)
if len(dets) != 1:
print("{} faces were detected for {}".format(len(dets), image_path.name))
rects = [[d.rect.left(), d.rect.right(), d.rect.top(), d.rect.bottom()] for d in dets]
print(rects)
def extract():
for zip_name in zip_names:
zip_path = dataset_root.joinpath(zip_name)
password = None
if not zip_path.is_file():
raise RuntimeError("{} was not found. Please download the LAP dataset.".format(zip_name))
with zipfile.ZipFile(str(zip_path), "r") as f:
if zip_name in ["train_1.zip", "train_2.zip"]:
extract_path = train_image_dir
elif zip_name == "valid.zip":
extract_path = validation_image_dir
else:
extract_path = dataset_root
if zip_name == "valid_gt.zip":
password = gt_pwd
extract_path.mkdir(parents=True, exist_ok=True) # requires Python 3.5 or above
f.extractall(path=str(extract_path), pwd=password)
def main():
parser, args = get_args()
if args.subcommand == "download":
download()
elif args.subcommand == "extract":
extract()
elif args.subcommand == "crop":
crop()
else:
parser.print_help()
if __name__ == '__main__':
main()
| 36.572464 | 112 | 0.650684 | 657 | 5,047 | 4.741248 | 0.277017 | 0.045907 | 0.036597 | 0.02825 | 0.19069 | 0.123274 | 0.050722 | 0.050722 | 0.050722 | 0.050722 | 0 | 0.016096 | 0.22449 | 5,047 | 137 | 113 | 36.839416 | 0.779765 | 0.01704 | 0 | 0.019048 | 0 | 0.009524 | 0.153722 | 0.005649 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0.038095 | 0.095238 | 0 | 0.171429 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce0375babdffaec108464100e4d42cc1e0f5f14 | 364 | py | Python | FastSimulation/CTPPSSimHitProducer/python/CTPPSSimHitProducer_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | FastSimulation/CTPPSSimHitProducer/python/CTPPSSimHitProducer_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | FastSimulation/CTPPSSimHitProducer/python/CTPPSSimHitProducer_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
CTPPSSimHits = cms.EDProducer('CTPPSSimHitProducer',
MCEvent = cms.untracked.InputTag("LHCTransport"),
Z_Tracker1 = cms.double(203.827),# first tracker z position in m
Z_Tracker2 = cms.double(212.550),# second tracker z position in m
Z_Timing = cms.double(215.700) # timing detector z position in m
)
| 33.090909 | 71 | 0.730769 | 51 | 364 | 5.156863 | 0.607843 | 0.102662 | 0.125475 | 0.136882 | 0.152091 | 0.152091 | 0 | 0 | 0 | 0 | 0 | 0.066007 | 0.167582 | 364 | 10 | 72 | 36.4 | 0.80198 | 0.258242 | 0 | 0 | 0 | 0 | 0.116541 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce41f5398739ee51c0146bd32f70e90c45cd487 | 10,999 | py | Python | app/api/v1/resources/deregdevice.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 6 | 2018-11-07T12:41:30.000Z | 2020-04-12T18:07:03.000Z | app/api/v1/resources/deregdevice.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 1 | 2020-10-20T12:33:18.000Z | 2020-10-20T12:33:18.000Z | app/api/v1/resources/deregdevice.py | a-wakeel/Device-Registration-Subsystem | dd9fa387e2087a6ccea9676303debe640bd99422 | [
"Unlicense"
] | 10 | 2018-11-12T06:15:19.000Z | 2021-11-18T05:45:12.000Z | """
DRS De-Registration device resource package.
Copyright (c) 2018-2020 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment is required by displaying the trademark/log as per the details provided here: https://www.qualcomm.com/documents/dirbs-logo-and-brand-guidelines
Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
This notice may not be removed or altered from any source distribution.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import json
from flask import Response, request
from flask_restful import Resource
from marshmallow import ValidationError
from flask_babel import lazy_gettext as _
from app import app, db
from app.api.v1.helpers.error_handlers import DEREG_NOT_FOUND_MSG
from app.api.v1.helpers.response import MIME_TYPES, CODES
from app.api.v1.helpers.utilities import Utilities
from app.api.v1.models.deregdetails import DeRegDetails
from app.api.v1.models.deregdevice import DeRegDevice
from app.api.v1.models.status import Status
from app.api.v1.schema.deregdevice import DeRegRequestSchema, DeRegDeviceSchema, DeRegRequestUpdateSchema
from app.api.v1.models.eslog import EsLog
class DeRegDeviceRoutes(Resource):
"""Class for handling De Registration Device routes."""
@staticmethod
def get(dereg_id):
"""GET method handler, returns device of a request."""
if not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema = DeRegDeviceSchema()
dereg_devices = DeRegDevice.get_devices_by_dereg_id(dereg_id)
response = schema.dump(dereg_devices, many=True).data
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
@staticmethod
def post():
"""POST method handler, creates new devices for request."""
dereg_id = request.form.to_dict().get('dereg_id', None)
if not dereg_id or not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema_request = DeRegRequestSchema()
device_schema = DeRegDeviceSchema()
dereg = DeRegDetails.get_by_id(dereg_id)
args = request.form.to_dict()
args = DeRegDevice.curate_args(args, dereg)
validation_errors = schema_request.validate(args)
if validation_errors:
return Response(app.json_encoder.encode(validation_errors),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
imei_tac_map = Utilities.extract_imeis_tac_map(args, dereg)
imeis_list = Utilities.extract_imeis(imei_tac_map)
not_registered_imeis = Utilities.get_not_registered_imeis(imeis_list)
if not_registered_imeis:
error = {
'not_registered_imeis': not_registered_imeis
}
return Response(json.dumps(error),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
else:
old_devices = list(map(lambda x: x.id, dereg.devices))
created = DeRegDevice.bulk_create(args, dereg)
device_id_tac_map = Utilities.get_id_tac_map(created)
devices = device_schema.dump(created, many=True)
dereg_status = 'Pending Review' if app.config['AUTOMATE_IMEI_CHECK'] else 'Awaiting Documents'
dereg.update_status(dereg_status)
db.session.commit()
log = EsLog.new_device_serialize(devices.data, 'Device Deregistration Request', regdetails=dereg,
imeis=imeis_list, reg_status=dereg_status, method='Post', dereg=True)
EsLog.insert_log(log)
DeRegDevice.bulk_insert_imeis(device_id_tac_map, imei_tac_map, old_devices, imeis_list, dereg)
response = {'devices': devices.data, 'dreg_id': dereg.id}
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
@staticmethod
def put():
"""PUT method handler, updates devices of the request."""
dereg_id = request.form.to_dict().get('dereg_id', None)
if not dereg_id or not dereg_id.isdigit() or not DeRegDetails.exists(dereg_id):
return Response(app.json_encoder.encode(DEREG_NOT_FOUND_MSG), status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
try:
schema_request = DeRegRequestUpdateSchema()
device_schema = DeRegDeviceSchema()
dereg = DeRegDetails.get_by_id(dereg_id)
args = request.form.to_dict()
args = DeRegDevice.curate_args(args, dereg)
validation_errors = schema_request.validate(args)
if validation_errors:
return Response(app.json_encoder.encode(validation_errors),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
imei_tac_map = Utilities.extract_imeis_tac_map(args, dereg)
imeis_list = Utilities.extract_imeis(imei_tac_map)
not_registered_imeis = Utilities.get_not_registered_imeis(imeis_list)
if not_registered_imeis:
error = {
'not_registered_imeis': not_registered_imeis
}
return Response(json.dumps(error),
status=CODES.get("UNPROCESSABLE_ENTITY"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
else:
processing_failed = dereg.processing_status in [Status.get_status_id('Failed'),
Status.get_status_id('New Request'),
Status.get_status_id('Pending Review')]
report_failed = dereg.report_status == Status.get_status_id('Failed')
processing_required = processing_failed or report_failed
if processing_required:
old_devices = list(map(lambda x: x.id, dereg.devices))
created = DeRegDevice.bulk_create(args, dereg)
device_id_tac_map = Utilities.get_id_tac_map(created)
devices = device_schema.dump(created, many=True)
status = Status.get_status_type(dereg.status)
db.session.commit()
log = EsLog.new_device_serialize(devices.data, 'Update Device Deregistration Request',
regdetails=dereg,
imeis=imeis_list, method='Put',
dereg=True, reg_status=status)
EsLog.insert_log(log)
DeRegDevice.bulk_insert_imeis(device_id_tac_map, imei_tac_map, old_devices, imeis_list, dereg)
response = {'devices': devices.data, 'dreg_id': dereg.id}
else:
response = {'devices': [], 'dreg_id': dereg.id}
return Response(json.dumps(response), status=CODES.get("OK"),
mimetype=MIME_TYPES.get("APPLICATION_JSON"))
except Exception as e: # pragma: no cover
app.logger.exception(e)
error = {
'message': [_('Failed to retrieve response, please try later')]
}
return Response(app.json_encoder.encode(error), status=CODES.get('INTERNAL_SERVER_ERROR'),
mimetype=MIME_TYPES.get('APPLICATION_JSON'))
finally:
db.session.close()
| 62.851429 | 844 | 0.638967 | 1,268 | 10,999 | 5.363565 | 0.227918 | 0.020585 | 0.026761 | 0.03823 | 0.58609 | 0.553301 | 0.553301 | 0.553301 | 0.536833 | 0.536833 | 0 | 0.002033 | 0.284389 | 10,999 | 174 | 845 | 63.212644 | 0.862025 | 0.224566 | 0 | 0.691781 | 0 | 0 | 0.097776 | 0.007413 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020548 | false | 0 | 0.09589 | 0 | 0.212329 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce42dcc84df1e368c75a961a2e92119ffaf9405 | 3,193 | py | Python | fixedfield-to-csv.py | curtisalexander/fixedfield-to-csv | 57ef87ed698321bb700ed2a2f4dafe07d6a7213d | [
"MIT"
] | null | null | null | fixedfield-to-csv.py | curtisalexander/fixedfield-to-csv | 57ef87ed698321bb700ed2a2f4dafe07d6a7213d | [
"MIT"
] | null | null | null | fixedfield-to-csv.py | curtisalexander/fixedfield-to-csv | 57ef87ed698321bb700ed2a2f4dafe07d6a7213d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import csv
from itertools import compress
from struct import Struct
def check_ctl(incsv):
"""Check the Length variable within the control file.
Length = Start - End + 1
"""
with open(incsv, 'rU') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
assert (int(row['End']) - int(row['Start']) + 1) == int(row['Length'])
def import_ctl(incsv):
"""Import the control file that contains the starting and ending values for
the fixed width file.
File is structured as:
Field_Name Start End Length Format Notes
field1 1 12 12 A field 1
field2 13 14 2 A field 2
field3 15 19 5 N field 3
"""
# U = universal newline
with open(incsv, 'rU') as f:
csv_reader = csv.DictReader(f)
field_widths = [], keep_fields = []
for fw in csv_reader:
field_widths.append(int(fw['Length']))
keep_fields.append(int(fw['Keep']))
return field_widths, keep_fields
def create_fmt(field_widths, keep_fields):
"""Given two lists: 1) the field widths 2) list with a 1 or 0 indicating whether or not to keep a field,
create a fmt string
Field Widths - https://docs.python.org/3.4/library/struct.html
Format C Type Python Type Standard Size
x pad byte no value
c char bytes of length 1 1
s char[] bytes
"""
keep_fields_pos_neg = [-1 if keep == 0 else keep for keep in keep_fields]
field_widths_pos_neg = [fw*keep for fw, keep in zip(field_widths, keep_fields_pos_neg)]
fmt_string = ''.join('{}{}'.format(abs(fw), 'x' if fw == 0 else 's')
for fw in field_widths_pos_neg)
return fmt_string
def read_records(record_struct, f):
"""Given a struct instance and a file handle, return a tuple containing
all fields (as strings) for a single record
"""
while True:
line = f.read(record_struct.size)
if line == b'':
break
yield decode_record(record_struct, line)
def _decode_record(record_struct, line):
return tuple(s.decode() for s in record_struct.unpack_from(line))
def decode_record(rec):
return tuple(s.decode() for s in rec)
if __name__ == '__main__':
# Will throw an AssertionError if the Length variable within the control file is wrong
check_ctl('/some/dir/to/keep.csv')
field_widths, keep_fields = import_ctl('/some/dir/to/keep.csv')
fmt_string = create_fmt(field_widths, keep_fields)
record_struct = Struct(fmt_string)
with open('/some/dir/to/fixedfield/split1_sample', 'rb') as infile:
with open('/some/dir/to/fixedfield/split1_sample.csv', 'w', newline='') as outfile:
csv_writer = csv.writer(outfile, delimiter=',')
for rec in record_struct.iter_unpack(infile.read(record_struct.size*10)):
# for rec in read_records(record_struct, infile):
csv_writer.writerow(decode_record(rec))
| 37.127907 | 108 | 0.606953 | 448 | 3,193 | 4.167411 | 0.332589 | 0.06481 | 0.048206 | 0.067488 | 0.23353 | 0.203535 | 0.151044 | 0.085699 | 0.043921 | 0.043921 | 0 | 0.016927 | 0.296899 | 3,193 | 85 | 109 | 37.564706 | 0.814699 | 0.347635 | 0 | 0.095238 | 0 | 0 | 0.083881 | 0.060637 | 0 | 0 | 0 | 0 | 0.02381 | 1 | 0.142857 | false | 0 | 0.119048 | 0.047619 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce46c304276b9b24990e4e0366dffce626e8138 | 1,515 | py | Python | bumblebee/core/respadapter.py | nosoyyo/bumblebee | 60452b03b2cd255ae4582a830b463fe7183e209e | [
"Apache-2.0"
] | null | null | null | bumblebee/core/respadapter.py | nosoyyo/bumblebee | 60452b03b2cd255ae4582a830b463fe7183e209e | [
"Apache-2.0"
] | null | null | null | bumblebee/core/respadapter.py | nosoyyo/bumblebee | 60452b03b2cd255ae4582a830b463fe7183e209e | [
"Apache-2.0"
] | null | null | null | __doc__ = 'for adapting 3 main types of resp: requests, baidubce & bumblebee'
import json
from requests import Response as RequestsResponse
from utils.bce import BceResponse
from utils import SelfAssemblingClass
class GeneralResp():
'''
0. if everything is ok, directly access this for json
1. if something wrong, giving out the raw resp for debugging
thus: not isinstance(this, dict)
'''
def __new__(self, resp):
'''
:param resp: one of the three type of resps
'''
if isinstance(resp, SelfAssemblingClass) or isinstance(resp,
GeneralResp):
return resp
elif isinstance(resp, BceResponse):
print(f'assebmling a BceResponse .obj..')
return SelfAssemblingClass(resp.metadata.__dict__)
elif isinstance(resp, RequestsResponse):
print(f'assembling a requests.Response obj...')
try:
doc = json.loads(resp.text)
print('doc seems ok, pass it for assembling...')
return SelfAssemblingClass(doc)
except Exception:
print('assembling a requests.Response')
return SelfAssemblingClass(resp.__dict__)
else:
print(
f'respadapter.GeneralResp: input must be some Response <obj>,\
got a {type(resp)}')
def __contains__(self, item):
return item in self.__dict__.keys()
| 34.431818 | 78 | 0.590759 | 158 | 1,515 | 5.512658 | 0.506329 | 0.064294 | 0.041332 | 0.061998 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002962 | 0.331353 | 1,515 | 43 | 79 | 35.232558 | 0.856861 | 0.128713 | 0 | 0 | 0 | 0 | 0.158805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.035714 | 0.142857 | 0.035714 | 0.428571 | 0.178571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce6cd181284a407deac3e65af5c7526a1141077 | 698 | py | Python | scripts/hotandcold.py | jkutner/minecraft-python-server | 9c09ffb6fb0001d89100d7f5b55c5a682a7ff81f | [
"MIT"
] | null | null | null | scripts/hotandcold.py | jkutner/minecraft-python-server | 9c09ffb6fb0001d89100d7f5b55c5a682a7ff81f | [
"MIT"
] | null | null | null | scripts/hotandcold.py | jkutner/minecraft-python-server | 9c09ffb6fb0001d89100d7f5b55c5a682a7ff81f | [
"MIT"
] | 1 | 2021-01-02T16:16:56.000Z | 2021-01-02T16:16:56.000Z | from mcpi.minecraft import Minecraft
import math
import time
import random
import pycraft
mc = pycraft.new_minecraft()
destX = random.randint(-127, 127)
destZ = random.randint(-127, 127)
destY = mc.getHeight(destX, destZ)
block = 57
mc.setBlock(destX, destY, destZ, block)
mc.postToChat("Block set")
while True:
pos = mc.player.getPos()
distance = math.sqrt((pos.x - destX) ** 2 + (pos.z - destZ) ** 2)
if distance > 100:
mc.postToChat("Freezing")
elif distance > 50:
mc.postToChat("Cold")
elif distance > 25:
mc.postToChat("Warm")
elif distance > 12:
mc.postToChat("Boiling")
elif distance > 6:
mc.postToChat("On fire")
elif distance == 0:
mc.postToChat("Found it")
break | 21.8125 | 66 | 0.696275 | 100 | 698 | 4.85 | 0.48 | 0.173196 | 0.065979 | 0.078351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046075 | 0.160458 | 698 | 32 | 67 | 21.8125 | 0.78157 | 0 | 0 | 0 | 0 | 0 | 0.067239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.178571 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fce82e902ad945e4eaa9e4c10493543217fafc73 | 2,368 | py | Python | fuzzinator/tracker/bugzilla.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/tracker/bugzilla.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | fuzzinator/tracker/bugzilla.py | elecro/fuzzinator | 2ed30127c364d50af960ad9f5cecbbae5cde2381 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
from bugzilla import *
from .base import BaseTracker
class BugzillaTracker(BaseTracker):
def __init__(self, product, url):
self.product = product
self.bzapi = Bugzilla(url)
# Remove old token and cookie files since they may be outdated.
if os.path.exists(self.bzapi.tokenfile):
os.remove(self.bzapi.tokenfile)
if os.path.exists(self.bzapi.cookiefile):
os.remove(self.bzapi.cookiefile)
@property
def logged_in(self):
return self.bzapi.user
def login(self, username, pwd):
try:
self.bzapi.login(user=username, password=pwd)
return True
except BugzillaError:
return False
def find_issue(self, query):
return self.bzapi.query(self.bzapi.build_query(product=self.product,
status=['NEW', 'REOPENED', 'ASSIGNED'],
short_desc=query,
include_fields=['id', 'summary', 'weburl']))
def report_issue(self, report_details, test, extension):
create_info = self.bzapi.build_createbug(product=report_details['product'],
component=report_details['component'],
summary=report_details['summary'],
version=report_details['version'],
description=report_details['description'],
blocks=report_details['blocks'])
bug = self.bzapi.createbug(create_info)
test_file = 'test.{ext}'.format(ext=extension)
with open(test_file, 'wb') as f:
f.write(test)
self.bzapi.attachfile(idlist=bug.bug_id, attachfile=test_file, description='Test', is_patch=False)
os.remove(test_file)
return bug
def __call__(self, issue):
pass
def issue_url(self, issue):
return issue.weburl
| 36.430769 | 106 | 0.559122 | 249 | 2,368 | 5.192771 | 0.465863 | 0.083527 | 0.015468 | 0.021655 | 0.035576 | 0.035576 | 0 | 0 | 0 | 0 | 0 | 0.006456 | 0.345861 | 2,368 | 64 | 107 | 37 | 0.828276 | 0.127534 | 0 | 0 | 0 | 0 | 0.047133 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.162791 | false | 0.046512 | 0.069767 | 0.069767 | 0.395349 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fceb36dd10a7c2e8d6eea587eff7fe47021932ef | 3,741 | py | Python | HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py | CarlGriffinsteed/UVM-ME144-Heat-Transfer | 9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca | [
"CC-BY-3.0"
] | 7 | 2017-06-02T20:31:22.000Z | 2021-04-05T13:52:33.000Z | HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py | CarlGriffinsteed/UVM-ME144-Heat-Transfer | 9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca | [
"CC-BY-3.0"
] | null | null | null | HW2/gradebook_202101-14765_HW20220Assignment_2021-03-11-18-59-42/Libraries/HT_natural_convection.py | CarlGriffinsteed/UVM-ME144-Heat-Transfer | 9c477449d6ba5d6a9ee7c57f1c0ed4aab0ce4cca | [
"CC-BY-3.0"
] | 9 | 2019-01-24T17:43:41.000Z | 2021-07-25T18:08:34.000Z | """
Object name: HorizontalCylinder
Functions: Gr(g,beta,DT,D,nu) gives the Grashoff number based on:
gravity g, thermal expansion coefficient beta, Temperature difference DT,
length scale D, viscosity nu
Ra(g,beta,DT,D,nu,alpha) gives the Rayleigh number where alpha is the thermal conductivity.
"""
import numpy as np
import scipy
import scipy.optimize
class HorizontalCylinder(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,correlation="Morgan", Ra=0.0, Pr = 0.0):
self.correlation = correlation
self.Ra = Ra
if correlation == "Morgan":
if (Ra <= 1e-2):
C=0.675
n=0.058
elif (Ra <= 1e2):
C=1.02
n=0.148
elif (Ra <= 1e4):
C=0.85
n=0.188
elif (Ra <= 1e7):
C=0.480
n=0.250
elif (Ra <= 1e12):
C=0.125
n=0.333
self.Nu = C*Ra**n
elif correlation == "Churchill-Chu":
if Pr == 0.:
print("Warning you must specify Pr for Churchill and Chu correlation")
else:
self.Nu = (0.60+(0.387*Ra**(1./6.))/(1.+(0.559/Pr)**(9./16.))**(8./27.))**2
else:
print("Warning wrong correlation name")
class VerticalEnclosure(object):
""" Natural convection about a horizontal cylinder
from NewLibraries import HT_natural_convection as natconv
cyl = natconv.HorizontalCylinder(correlation, Ra, Pr = 0.0)
where correlation is "Morgan" or "Churchill-Chu"
cyl = natconv.HorizontalCylinder("Morgan", Ra)
cyl = natconv.HorizontalCylinder("Churchill-Chu", Ra, Pr = xx)
"""
def __init__(self,Ra,Pr,H,L):
self.Ra = Ra
self.Pr = Pr
self.H = H
self.L = L
if correlation == "Morgan":
if (H/L) < 2.:
if Ra*Pr/(0.2+Pr)> 1.e3:
self.Nu = 0.18*(Pr/(0.2+Pr)*Ra)**0.29
else:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif H/L < 10:
if Ra < 1e10:
self.Nu = 0.22*(Pr/(0.2+Pr)*Ra)**0.28*(H/L)**(-0.25)
else:
print('Ra is too high for this correlation')
self.Nu = np.inf
elif Ra < 1e4:
print('Ra is too low for this correlation')
self.Nu = np.inf
elif Ra < 1e7:
if Pr > 0.6 and Pr < 2e4:
print('ok')
self.Nu =0.42*Ra**0.25*Pr**0.012*(H/L)**(-0.3)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
elif Ra < 1e9:
if Pr > 0.6 and Pr < 20.:
self.Nu =0.46*Ra**(1./3.)
else :
print('Pr is out of bounds for this correlation')
self.Nu = np.inf
else:
print('Ra is too high, got nothing for you')
self.Nu = np.inf
def Gr(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0):
return (g*beta*DT*D**3)/(nu**2)
def Ra(g=9.81,beta=0.0,DT=0.0,D=0.0,nu=1.0,alpha=1.0):
return (g*beta*DT*D**3)/(nu*alpha)
| 35.971154 | 102 | 0.493718 | 506 | 3,741 | 3.626482 | 0.254941 | 0.039237 | 0.091553 | 0.035967 | 0.531335 | 0.518256 | 0.474659 | 0.474659 | 0.474659 | 0.431608 | 0 | 0.071336 | 0.377974 | 3,741 | 103 | 103 | 36.320388 | 0.717232 | 0.260893 | 0 | 0.287671 | 0 | 0 | 0.129008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054795 | false | 0 | 0.041096 | 0.027397 | 0.150685 | 0.123288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fced7903bc90aa448acbe8f9d949ea135dc54660 | 3,765 | py | Python | bdd/contact_steps.py | Nadezhda-Sokolova/Python_training_from_software-testing.ru | 5ce65cee1d538dc5d0b25beba67547b32e612a10 | [
"Apache-2.0"
] | null | null | null | bdd/contact_steps.py | Nadezhda-Sokolova/Python_training_from_software-testing.ru | 5ce65cee1d538dc5d0b25beba67547b32e612a10 | [
"Apache-2.0"
] | null | null | null | bdd/contact_steps.py | Nadezhda-Sokolova/Python_training_from_software-testing.ru | 5ce65cee1d538dc5d0b25beba67547b32e612a10 | [
"Apache-2.0"
] | null | null | null | from pytest_bdd import given, when, then
from model.contact import Contact
import random
@given('a contact list')
def contact_list(db):
return db.get_contacts_list()
@given('a contact with <first_name>, <last_name>, <address>, <home_phone>, <work_phone>, <mobile_phone>, <fax>, <mail_1>, <mail_2> and <mail_3>')
def new_contact(first_name, last_name, address, home_phone, work_phone, mobile_phone, fax, mail_1, mail_2, mail_3):
return Contact(first_name=first_name, last_name=last_name, address=address,
home_phone=home_phone, mobile_phone=mobile_phone, work_phone=work_phone, fax=fax, mail_1=mail_1, mail_2=mail_2, mail_3=mail_3 )
@when('I add a new contact to the list')
def add_new_contact(app, new_contact):
app.contacts.New_contact_form()
app.contacts.Filling_information_form(new_contact)
app.contacts.Submit_new_contact_creation()
app.contacts.Open_home_page()
@then('the new contact list is equal to the old list with the added contact')
def verify_contact_added(db, contact_list, new_contact, app):
assert len(contact_list) + 1 == app.contacts.Count()
new_contacts = db.get_contacts_list()
contact_list.append(new_contact)
assert sorted(contact_list, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
@given('a non-empty contact list')
def non_empty_contact_list(db, app):
if len(db.get_contacts_list()) == 0:
app.contacts.New_contact_form()
app.contacts.Filling_information_form(Contact(first_name="Edited first name", last_name="Edited last name", address="Nizhny_Novgorod",
home_phone="111", work_phone="222", mobile_phone="333", fax = "0000",
mail_1="ddd@ya.by", mail_2='fff@wer.us', mail_3="kol@gmail.com"))
app.contacts.Submit_new_contact_creation()
return db.get_contacts_list()
@given('a random contact from the list')
def random_contact(non_empty_contact_list):
return random.choice(non_empty_contact_list)
@when('I delete the contact from list')
def delete_contact(app, random_contact):
app.contacts.delete_contact_by_id(random_contact.id)
@then('the new contact list is equal to the old list without the delete contact')
def verify_contact_delete(db, non_empty_contact_list, random_contact, app, check_ui):
old_contacts = non_empty_contact_list
assert len(old_contacts) - 1 == app.contacts.Count()
new_contacts = db.get_contacts_list()
old_contacts.remove(random_contact)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contact.id_or_max)
@when('I modify the contact from list')
def contact_list_modification(app, random_contact):
new_for_contact = Contact(first_name="Modify first name", last_name="Edited last name", address="Nizhny_Novgorod",
home_phone="111", work_phone="222", mobile_phone="333", fax="0000",
mail_1="ddd@ya.by", mail_2='fff@wer.us', mail_3="kol@gmail.com")
app.contacts.edit_contact_by_id(random_contact.id)
app.contacts.Filling_information_form(new_for_contact)
app.contacts.Submit_updating_form()
@then('the new contact list is equal to the old list')
def verification_list_groups_are_the_same(db, non_empty_contact_list, check_ui, app):
new_contacts = db.get_contacts_list()
assert len(non_empty_contact_list) == app.contacts.Count()
app.contacts.Open_home_page()
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contacts.get_contacts_list(),
key=Contact.id_or_max)
| 46.481481 | 160 | 0.705976 | 558 | 3,765 | 4.448029 | 0.168459 | 0.079774 | 0.048348 | 0.061241 | 0.560435 | 0.500806 | 0.420629 | 0.39726 | 0.383562 | 0.383562 | 0 | 0.015324 | 0.185392 | 3,765 | 80 | 161 | 47.0625 | 0.793935 | 0 | 0 | 0.306452 | 0 | 0.016129 | 0.176674 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 1 | 0.16129 | false | 0 | 0.048387 | 0.048387 | 0.274194 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fceeb5c97dcacc7efd221c426aaa56e4566fd2c1 | 3,050 | py | Python | tests/validate_mcts.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 29 | 2019-01-09T23:56:35.000Z | 2022-03-18T03:41:10.000Z | tests/validate_mcts.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 39 | 2019-01-10T00:32:26.000Z | 2022-03-12T00:29:05.000Z | tests/validate_mcts.py | hdelecki/AdaptiveStressTestingToolbox | 184d7d7f1b4acb65eecb749e3c3a78cbcfc3c4ed | [
"MIT"
] | 11 | 2019-01-10T08:11:47.000Z | 2021-12-28T15:56:02.000Z | from examples.AV.example_runner_mcts_av import runner as mcts_runner
def validate_mcts():
# Overall settings
max_path_length = 50
s_0 = [0.0, -4.0, 1.0, 11.17, -35.0]
base_log_dir = './data'
# experiment settings
run_experiment_args = {'snapshot_mode': 'last',
'snapshot_gap': 1,
'log_dir': None,
'exp_name': None,
'seed': 0,
'n_parallel': 1,
'tabular_log_file': 'progress.csv'
}
# runner settings
runner_args = {'n_epochs': 1,
'batch_size': 500,
'plot': False
}
# env settings
env_args = {'id': 'ast_toolbox:GoExploreAST-v1',
'blackbox_sim_state': True,
'open_loop': False,
'fixed_init_state': True,
's_0': s_0,
}
# simulation settings
sim_args = {'blackbox_sim_state': True,
'open_loop': False,
'fixed_initial_state': True,
'max_path_length': max_path_length
}
# reward settings
reward_args = {'use_heuristic': True}
# spaces settings
spaces_args = {}
sampler_args = {'n_envs': 1,
'open_loop': False}
# MCTS Settings
mcts_bpq_args = {'N': 10}
exp_log_dir = base_log_dir
max_path_length = 50
s_0 = [0.0, -4.0, 1.0, 11.17, -35.0]
env_args['s_0'] = s_0
reward_args['use_heuristic'] = True
sim_args['max_path_length'] = max_path_length
# MCTS settings
run_experiment_args['log_dir'] = exp_log_dir + '/mcts'
run_experiment_args['exp_name'] = 'mcts'
for mcts_type in ['mcts', 'mctsbv', 'mctsrs']:
for stress_test_mode in [1, 2]:
mcts_algo_args = {'max_path_length': max_path_length,
'stress_test_mode': stress_test_mode,
'ec': 100.0,
'n_itr': 1,
'k': 0.5,
'alpha': 0.5,
'clear_nodes': True,
'log_interval': 500,
'plot_tree': True,
'plot_path': run_experiment_args['log_dir'] + '/' + mcts_type + '_tree',
'log_dir': run_experiment_args['log_dir'],
}
mcts_runner(
mcts_type=mcts_type,
env_args=env_args,
run_experiment_args=run_experiment_args,
sim_args=sim_args,
reward_args=reward_args,
spaces_args=spaces_args,
algo_args=mcts_algo_args,
bpq_args=mcts_bpq_args,
runner_args=runner_args,
sampler_args=sampler_args
)
return True
if __name__ == '__main__':
validate_mcts()
| 31.770833 | 102 | 0.475738 | 325 | 3,050 | 4.049231 | 0.295385 | 0.041033 | 0.079027 | 0.036474 | 0.266717 | 0.209726 | 0.148936 | 0.103343 | 0.045593 | 0.045593 | 0 | 0.034659 | 0.422951 | 3,050 | 95 | 103 | 32.105263 | 0.713068 | 0.047541 | 0 | 0.085714 | 0 | 0 | 0.162349 | 0.009326 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.014286 | 0 | 0.042857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcf41308fbf0ea2ddf14fd935e69cbfa21da77dc | 1,274 | py | Python | SocialDistanceDetector/loggingModule.py | Rankush888/Social-Distancing-Detector | e4a73ad84c63a6152fefdf9606ccf8850be7d629 | [
"MIT"
] | null | null | null | SocialDistanceDetector/loggingModule.py | Rankush888/Social-Distancing-Detector | e4a73ad84c63a6152fefdf9606ccf8850be7d629 | [
"MIT"
] | null | null | null | SocialDistanceDetector/loggingModule.py | Rankush888/Social-Distancing-Detector | e4a73ad84c63a6152fefdf9606ccf8850be7d629 | [
"MIT"
] | null | null | null | try:
import logging
import os
except BaseException:
print('Exception got in importing the module.')
class makeLog:
def __init__(self):
current_dir = os.getcwd()
if 'log_files' in os.listdir(current_dir):
path = os.path.join(current_dir, 'log_files\\')
file_path = path + 'logfile.log'
logging.basicConfig(filename=file_path,
format='%(asctime)s %(message)s',
filemode='w')
else:
path = os.path.join(current_dir, 'log_files\\')
os.mkdir(path)
file_path = path + 'logfile.log'
logging.basicConfig(filename=file_path,
format='%(asctime)s %(message)s',
filemode='w')
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
def debug(self, string):
self.logger.debug(string)
def info(self, string):
self.logger.info(string)
def warning(self, string):
self.logger.warning(string)
def error(self, string):
self.logger.error(string)
def debug(self, string):
self.logger.critical(string)
| 28.954545 | 66 | 0.530612 | 134 | 1,274 | 4.932836 | 0.358209 | 0.1059 | 0.1059 | 0.151286 | 0.444781 | 0.444781 | 0.360061 | 0.360061 | 0.263238 | 0.263238 | 0 | 0 | 0.358713 | 1,274 | 43 | 67 | 29.627907 | 0.809058 | 0 | 0 | 0.363636 | 0 | 0 | 0.112916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.30303 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcf6780f79287563663f060074514f87c285483f | 1,618 | py | Python | modelzoo/SOK/DLRM/data/bin2bin.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 292 | 2021-12-24T03:24:33.000Z | 2022-03-31T15:41:05.000Z | modelzoo/SOK/DLRM/data/bin2bin.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 54 | 2021-12-24T06:40:09.000Z | 2022-03-30T07:57:24.000Z | modelzoo/SOK/DLRM/data/bin2bin.py | aalbersk/DeepRec | f673a950780959b44dcda99398880a1d883ab338 | [
"Apache-2.0"
] | 75 | 2021-12-24T04:48:21.000Z | 2022-03-29T10:13:39.000Z | import os
import argparse
import time
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', type=str)
parser.add_argument('output', type=str)
args = parser.parse_args()
os.makedirs(args.output)
size = os.path.getsize(args.input)
assert(size % 160 == 0)
num_samples = size // 160
chunk_size = 1024 * 1024
inp_f = open(args.input, 'rb')
label_f = open(os.path.join(args.output, 'label.bin'), 'wb')
dense_f = open(os.path.join(args.output, 'dense.bin'), 'wb')
category_f = open(os.path.join(args.output, 'category.bin'), 'wb')
num_loops = num_samples // chunk_size + 1
start_time = time.time()
for i in range(num_loops):
t = time.time()
if i == (num_loops - 1):
batch = min(chunk_size, num_samples % chunk_size)
if batch == 0:
break
else:
batch = chunk_size
raw_buffer = inp_f.read(160 * batch)
for j in range(batch):
label_buffer = raw_buffer[j*160: j*160+4]
dense_buffer = raw_buffer[j*160+4: j*160+56]
category_buffer = raw_buffer[j*160+56: j*160+160]
label_f.write(label_buffer)
dense_f.write(dense_buffer)
category_f.write(category_buffer)
print('%d/%d batch finished. write %d samples, time: %.2fms, remaining time: %.2f min'%(
i+1, num_loops, batch, (time.time() - t)*1000, ((time.time() - start_time) / 60) * (num_loops / (i+1) - 1)))
inp_f.close()
label_f.close()
dense_f.close()
category_f.close() | 29.962963 | 120 | 0.592089 | 231 | 1,618 | 3.939394 | 0.285714 | 0.026374 | 0.023077 | 0.036264 | 0.145055 | 0.082418 | 0.082418 | 0 | 0 | 0 | 0 | 0.049622 | 0.265142 | 1,618 | 54 | 121 | 29.962963 | 0.715728 | 0 | 0 | 0 | 0 | 0.02439 | 0.083385 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0 | false | 0 | 0.073171 | 0 | 0.073171 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcf7b3bbe71b81f791bb6c3c67628d01ea766d4a | 836 | py | Python | aes_classifier/helpers/database_importer.py | tinenbruno/aes_classifier | 31d358c4e34a056cf67a5e602ad945011283a6ed | [
"MIT"
] | null | null | null | aes_classifier/helpers/database_importer.py | tinenbruno/aes_classifier | 31d358c4e34a056cf67a5e602ad945011283a6ed | [
"MIT"
] | null | null | null | aes_classifier/helpers/database_importer.py | tinenbruno/aes_classifier | 31d358c4e34a056cf67a5e602ad945011283a6ed | [
"MIT"
] | null | null | null | from ml_buff.models.input_data import InputData
from ml_buff.database_helper import create_tables, drop_tables
from ml_buff.models.base_model import database
DATABASE = {
'drivername': 'postgresql',
'host': 'localhost',
'port': '5432',
'username': 'postgres',
'password': 'postgres',
'database': 'ml_buff'
}
DATASET_DEFINITIONS = r'../../AVA_dataset/AVA.txt'
drop_tables()
create_tables()
file = open(DATASET_DEFINITIONS)
data_source = []
for line in file:
line = line.strip().split(' ')
data_source.append({ 'external_id': line[1], 'dataset_name': 'AVA' })
print('datasource built with {0} entries'.format(len(data_source)))
with database.atomic():
for idx in range(0, len(data_source), 100):
InputData.insert_many(data_source[idx:idx+100]).execute()
| 26.125 | 73 | 0.671053 | 106 | 836 | 5.084906 | 0.537736 | 0.092764 | 0.055659 | 0.059369 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019006 | 0.181818 | 836 | 31 | 74 | 26.967742 | 0.769006 | 0 | 0 | 0 | 0 | 0 | 0.206938 | 0.029904 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.043478 | 0.130435 | 0 | 0.130435 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcf7d278277033217c65b945fa153e84b5082b8a | 9,050 | py | Python | title_screen.py | matthew-sirman/mongoose-game | 02751a85c3849ddb9da48414e165363a6d8a931e | [
"Unlicense"
] | null | null | null | title_screen.py | matthew-sirman/mongoose-game | 02751a85c3849ddb9da48414e165363a6d8a931e | [
"Unlicense"
] | null | null | null | title_screen.py | matthew-sirman/mongoose-game | 02751a85c3849ddb9da48414e165363a6d8a931e | [
"Unlicense"
] | null | null | null | import pygame
import socket
import errno
import threading
from button import Button
from text import Text, TextFeed
from textbox import TextBox
from message import Message
from instructions import Instruction
from cards import Deck, Card
class TitleScreen:
UPDATE_FREQUENCY = 1000
def __init__(self, screen_size=(1280, 720), title="Mongoose", clear_colour=(66, 135, 245)):
self.screen_size = screen_size
self.title = title
self.clear_colour = clear_colour
pygame.init()
self.screen = pygame.display.set_mode(screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
pygame.display.set_caption(title)
self.clock = pygame.time.Clock()
self.__title_text = Text(title, 64, text_colour=(255, 255, 255))
self.__name_input = TextBox((0.5, 0.4), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Name", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__ip_input = TextBox((0.5, 0.5), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("IP Address", font_size=32, font_hierarchy=["Verdana"],
text_colour=(64, 64, 64)),
register_group="title_screen")
self.__port_input = TextBox((0.5, 0.6), (0.4, 0.06),
Text(font_size=32, font_hierarchy=["Verdana"]),
Text("Port", font_size=32, font_hierarchy=["Verdana"], text_colour=(64, 64, 64)),
register_group="title_screen")
self.__join_button = Button("Join", (0.5, 0.8), (0.1, 0.08), register_group="title_screen")
self.__join_button.subscribe_event(self.join_game)
self.__status_text = Text("Status: Not connected", font_size=28,
font_hierarchy=["Verdana"], text_colour=(255, 0, 0))
self.__info_feed = TextFeed((0.85, 0.5), (0.3, 0.3))
self.client_socket = None
self.__connected_to_server = False
# self.__server_handling_thread = threading.Thread(target=self.handle_server_io, daemon=True)
# self.__server_handling_thread.start()
self.__sync_deck = None
self.__game_package = []
self.__join_game_thread = None
def run(self):
while not self.__game_package:
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.VIDEORESIZE:
self.screen_size = (event.w, event.h)
self.screen = pygame.display.set_mode(self.screen_size, pygame.DOUBLEBUF | pygame.RESIZABLE)
if event.type == pygame.QUIT:
self.quit()
TextBox.update_all("title_screen", self.screen_size, event)
mouse_pos = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()
Button.update_all("title_screen", self.screen_size, mouse_pos, mouse_pressed)
self.render()
self.handle_server_io()
self.clock.tick(60)
return self.__game_package
def render(self):
self.screen.fill(self.clear_colour)
self.__title_text.render(self.screen, (0.5, 0.2))
Button.render_all("title_screen", self.screen)
TextBox.render_all("title_screen", self.screen)
self.__status_text.render_from_corner(self.screen, (0.1 * self.screen_size[0], 0.8 * self.screen_size[1]))
self.__info_feed.render(self.screen)
pygame.display.flip()
def join_game(self):
if self.__join_game_thread is not None:
if self.__join_game_thread.is_alive():
return
self.__join_game_thread = threading.Thread(target=self.join_game_async)
self.__join_game_thread.start()
def join_game_async(self):
if not self.__port_input.text.isnumeric() or self.__connected_to_server:
return
ip = self.__ip_input.text
port = int(self.__port_input.text)
try:
self.__status_text.text = f"Status: Connecting to server..."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.settimeout(10)
self.client_socket.connect((ip, port))
self.client_socket.setblocking(False)
self.__status_text.text = f"Status: Connected to {ip}:{port}. Waiting for game..."
self.__status_text.text_colour = (0, 255, 0)
self.__status_text.update()
name_message = Message.new_send_message(
f"{Instruction.SET_PROPERTY}:'name':'{self.__name_input.text}'".encode("utf-8")
)
self.client_socket.sendall(name_message.encode())
self.__connected_to_server = True
except ConnectionRefusedError:
self.__status_text.text = f"Status: Connection to {ip}:{port} failed."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
except socket.timeout:
self.__status_text.text = f"Status: Connection to {ip}:{port} timed out."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
def handle_server_io(self):
if not self.__connected_to_server:
return
try:
message = Message.new_recv_message()
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
if not buffer:
self.__status_text.text = f"Status: Lost connection to server."
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
while not message.decode(buffer):
buffer = self.client_socket.recv(Message.BUFFER_SIZE)
self.decode_instruction(message.message.decode("utf-8"))
except IOError as e:
if e.errno != errno.EAGAIN and e.errno != errno.EWOULDBLOCK:
self.__status_text.text = f"Error: {e}"
self.__status_text.text_colour = (255, 0, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
def decode_instruction(self, message):
operands = []
if ":" in message:
instruction, operand = message.split(":", 1)
in_string = False
cur_operand = ""
for c in operand:
if c == "'":
in_string = not in_string
else:
if in_string:
cur_operand += c
elif c == ":":
operands.append(cur_operand)
cur_operand = ""
operands.append(cur_operand)
else:
instruction = message
if instruction == Instruction.Update.GAME_RUNNING:
self.__status_text.text = f"Status: Game already running on server."
self.__status_text.text_colour = (255, 170, 0)
self.__status_text.update()
self.client_socket.close()
self.__connected_to_server = False
if instruction == Instruction.START_GAME:
active_id = int(operands[0])
players = []
_p = []
for i, o in enumerate(operands[1:]):
# even: name, odd: id
if i % 2 == 0:
_p = [o]
else:
_p.append(int(o))
players.append(_p)
self.start_game(active_id, sorted(players, key=lambda x: x[1]))
if instruction == Instruction.Update.PLAYER_JOINED:
assert len(operands) == 1
self.__info_feed.add_line(f"Player {operands[0]} joined the game.")
if instruction == Instruction.Game.SEND_DECK:
assert len(operands) == 52
suit_map = {"0": "Spades", "1": "Diamonds", "2": "Clubs", "3": "Hearts"}
cards = []
for card in operands:
s, v = card.split("-")
cards.append(Card(suit_map[s], int(v)))
self.__sync_deck = Deck(cards)
def start_game(self, active_id, players):
self.__game_package = [active_id, players, self.client_socket, self.__sync_deck]
def quit(self):
if self.__connected_to_server:
self.client_socket.sendall(Message.new_send_message(Instruction.Update.QUIT_GAME.encode("utf-8")).encode())
# self.__server_handling_thread.join(0.5)
pygame.quit()
quit()
| 35.351563 | 119 | 0.572376 | 1,062 | 9,050 | 4.560264 | 0.188324 | 0.047491 | 0.066488 | 0.055751 | 0.367128 | 0.305389 | 0.230436 | 0.206484 | 0.186661 | 0.186661 | 0 | 0.029812 | 0.318011 | 9,050 | 255 | 120 | 35.490196 | 0.754861 | 0.020884 | 0 | 0.223464 | 0 | 0 | 0.067073 | 0.006775 | 0 | 0 | 0 | 0 | 0.011173 | 1 | 0.050279 | false | 0 | 0.055866 | 0 | 0.139665 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcf84360ff359b57ca0fd18707821b983a769abc | 1,357 | py | Python | tests/tests.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 45 | 2017-11-19T06:54:07.000Z | 2022-03-21T14:44:42.000Z | tests/tests.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 6 | 2017-12-12T05:33:16.000Z | 2018-08-28T12:30:50.000Z | tests/tests.py | CoderOJ/RLJ | 20bb7ffd1581bf8cf820e0f594b63d2760ea721c | [
"MIT"
] | 12 | 2017-12-12T04:43:04.000Z | 2022-02-23T00:13:47.000Z | # import pytest
from rlj import Judge, JudgeStatus, Config, makeConfig
import os
arguments = {
'--O2': False,
'--delete': False,
'--genConfig': False,
'--help': False,
'--silent': False,
'--version': False,
'-c': 'config.yml',
'-j': None,
'FILE': None
}
def getConfig(st):
new_arg = arguments.copy()
new_arg['-j'] = st + '.cpp'
return makeConfig('config.yml', new_arg)
def runTest1(st):
result = list(Judge(getConfig(st)).judge())
compile_status = result[0]
print(result)
print(compile_status)
assert compile_status[0] == 'DONE'
assert compile_status[1] == '编译成功'
assert result[1] == (1, ('data/test1.in', 'data/test1.ans'),
JudgeStatus(st, 2, 0.5, 0))
assert result[2] == (2, ('data/test2.in', 'data/test2.ans'),
JudgeStatus(st, 2, 0.5, 0))
def test_1():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
runTest1('AC')
runTest1('WA')
runTest1('TLE')
runTest1('MLE')
runTest1('RE')
def runTest2(st, chn):
result = list(Judge(getConfig(st)).judge())
compile_status = result[0]
assert compile_status[0] == st
assert compile_status[1] == chn
def test_2():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
runTest2('ERROR', '编译错误')
runTest2('CTLE', '编译超时')
| 23.396552 | 64 | 0.585851 | 173 | 1,357 | 4.479769 | 0.369942 | 0.117419 | 0.098065 | 0.061935 | 0.28129 | 0.28129 | 0.28129 | 0.229677 | 0.229677 | 0.131613 | 0 | 0.032505 | 0.229182 | 1,357 | 57 | 65 | 23.807018 | 0.708413 | 0.00958 | 0 | 0.181818 | 0 | 0 | 0.127422 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 1 | 0.113636 | false | 0 | 0.045455 | 0 | 0.181818 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcfade839bbd2f518955cd01678b3f38bc653fa2 | 18,424 | py | Python | adventure.py | brianquinlan/2016-christmas-adventure | 40558968765a585414bb1c5b284e716dd4981957 | [
"MIT"
] | null | null | null | adventure.py | brianquinlan/2016-christmas-adventure | 40558968765a585414bb1c5b284e716dd4981957 | [
"MIT"
] | null | null | null | adventure.py | brianquinlan/2016-christmas-adventure | 40558968765a585414bb1c5b284e716dd4981957 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import itertools
import random
import subprocess
import sys
WELCOME_TEXT = """Welcome {name} to the land of Pavlisha.
Many have entered this land but few have returned.
Your quest is to slay the Bad King, who has stolen the Clock of Time. Without
the Clock of Time it will be 2016 forever and there will never be another
another Christmas, birthday or holiday again!
"""
ENDING = """
You have defeated the Bad King and recovered the Clock of Time.
You have saved all future Christmases.
Merry Christmas {name}!
Love,
Brian, Kevin, Sophie, Pavel and Alex.
"""
DIRECTION_CHOICE = """This place seems very familiar to another but you can't
put your finger on how...
You are standing in a snow-covered plain. In every direction stretches untracked
miles of treacherous wilderness. Your blood chills at the thought of entering any
of these foreboding landscapes - but enter you must!
To your East lies Mount Doom - a volcano covered in lava and burning embers.
You can smell the sulfur even from here.
To your South lies a nameless forest. You can hear whispers calling you to
enter. They are not kind voices.
To your West lies Swamp Putrid. Its name is well deserved as you can smell the
decaying remains of those who entered before you.
To your feet lies a cave so dark that you can't see into it more than a
sword-length.
Wait...to your North lies a beautiful meadow with a path that winds away from
the terrible danger.
You take a few minutes to rest and then make your choice.
"""
NORTH_TEXT = """You walk north on the idyllic path. You hear bird song, smell the sweet
flowers and see multi-coloured butterflies. The sun is warm and life is good.
Or is it...
"""
NORTH_CONTINUE_TEXT = """Now that the fight is behind you, you continue on the path.
If anything, the flowers smell even sweater than before. Life is great.
Or is it...
"""
EAST_TEXT = """You walk east towards the hellish fires of Mount Doom.
The air reeks of sulfur and you can feel the heat of the lava as you approach.
Occasional pyroclastic blocks fly from the volcano.
"""
EAST_TREE = """At the peak of the volcano you see a single tree. You wonder how it
managed to survive up here.
As your approach, you see that its huge branches have been charred and covered
with a red film. It radiates a sense of potent malevolence.
Just as you are deciding whether to run of not, it charges you and attempts to
crush you with its powerful branches.
"""
EAST_TREE_WIN = """At the base of the tree you spot a golden ingot and a potion.
You put the ingot in your pocket but you aren't sure what to do with the potion.
Oh heck, you are an adventurer, aren't you? You sip the potion and suddenly
feel a bit stronger.
"""
SOUTH_TEXT = """You enter the dark forest.
Your sense of foreboding lessens briefly when you see five small pigs playing
with each other and eating truffles.
Suddenly lightning flashes from the sky and hits the ground near the pigs. The
sight is horrible but it gets even worse as they change before your eyes into
horrible Zombie Pigmen.
They moan their hatred of life in general (and you in particular) and move
towards you to attack.
Fortunately, the forest restricts their movements so that they can only attack
you one at a time.
"""
SOUTH_ALREADY_DONE = """You wonder around the forest for a while but don't
find anything interesting.
You return to the snowy clearing.
"""
SOUTH_END = """You catch your breath amongst the remains of the Zombie Pigmen.
Suddenly, in the corner of your eye, you see a potion laying next to one of
the zombified pigs.
You read the label and it says "Potion of Invisibility". You hide it in your
pack and return to the clearing.
"""
WEST_TEXT = """You walk into the dank swamp hoping not to vomit from the terrible smells.
In the distance you see a huge giant - maybe the smell of decay is coming from
its victims?
As you get closer, you see the Giant is smiling and realize that it is a
Friendly Giant.
You also see that, behind the giant, there is a Crafting Table and various
magical components! If only you could make use of it for a while...
The giant greats you with a wave and says: "Answer my riddle and the Crafting
Table is yours to use. What gets wetter as it dries?".
"""
WEST_COMPLETED = """You inspect the crafting table and realize that you can use it to
make magical armor and weapons.
You start to work immediately.
After some days, you finish your work and your weapon and armor glow brightly
with their new enchantment!
You walk back to the snowy clearing feeling that there is nothing that you
cannot do with your new magical tools.
Certainly you couldn't be crushed by flying rocks.
"""
WEST_ALREADY_COMPLETED = """You wander around the swamp until the smell overwhelms you.
You return to the snowy clearing.
"""
DOWN_COMMON = """You descend into the dark cave.
There is no light at all except for the faint glow coming from your enchanted
armor. You proceed cautiously, the air chilling you to the bone.
Ahead, you see a massive rock chamber. As you approach, you see that it is so
large that it could contain a huge tower. And it does! Guarding the tower is a
nearly infinite number of soldiers.
"""
DOWN_VISIBLE = DOWN_COMMON + """You carefully sneak towards the tower, trying to avoid
the attention of the guards.
"""
DOWN_INVISIBLE = DOWN_COMMON + """You drink your Potion of Invisibility and race towards
the tower. You make it inside just as it wares off!
You climb the circular stairs until the top of the tower. At the top of the
tower you see a medium-sized man sitting in a throne. It is the Bad King!
"Welcome to my tower, {name}." says the Bad King, "I hope that you are
ready to die."
With those words, he picks up his staff and charges towards you.
"""
EAST_AREA = 'east'
SOUTH_AREA = 'south'
WEST_AREA = 'west'
CAVE_AREA = 'cave'
if sys.version_info[0] < 3:
non_clearing_input = raw_input
else:
non_clearing_input = input
def clear():
subprocess.call('clear', shell=True)
def my_input(*args, **kwargs):
x = non_clearing_input(*args, **kwargs)
clear()
return x
input = my_input
class Character:
def __init__(self, name, race, dexterity, strength, max_hitpoints):
self.name = name
self.race = race
self.dexterity = dexterity
self.strength = strength
self.max_hitpoints = max_hitpoints
self.hitpoints = max_hitpoints
self.weapon = 'Sword'
self.armor = 'Chain Mail'
self.completed_areas = set()
self.inventory = set()
def get_damage(self):
if self.weapon == 'Enchanted Sword':
return int(random.randint(2, 20) * (self.strength + 50) / 100)
else:
return int(random.randint(1, 10) * (self.strength + 50) / 100)
def __str__(self):
return """{} the {}
Dexterity: {}
Strength: {}
Hitpoints: {} (of {})
Armor: {}
Weapon: {}
Other Items: {}
""".format(self.name, self.race, self.dexterity, self.strength, self.hitpoints,
self.max_hitpoints, self.armor, self.weapon,
', '.join(sorted(self.inventory)) or '<none>')
class CharacterDeadException(BaseException):
def __init__(self, character):
pass
def select_character():
print('What race do you want to be?')
print('')
print('Elf - Fast but not very strong')
print('Human - Jack of all trades, master of none')
print('Orc - Strong but slow')
print('')
r = ''
while not r or r[0] not in 'EHO':
r = input('Enter (E)lf, (H)uman or (O)rc: ').upper().strip()
if r[0] == 'E':
race = 'Elf'
dexterity = random.randint(75, 100)
strength = random.randint(25, 50)
hitpoints = random.randint(50, 100)
name = input('What is your name, wise Elf? ')
elif r[0] == 'H':
race = 'Human'
dexterity = random.randint(25, 75)
strength = random.randint(25, 75)
hitpoints = random.randint(100, 150)
name = input('What is your name, bold Human? ')
else:
race = 'Orc'
dexterity = random.randint(25, 50)
strength = random.randint(75, 100)
hitpoints = random.randint(150, 200)
name = input('What is your name, strong Orc? ')
character = Character(name, race, dexterity, strength, hitpoints)
print('')
print(character)
print('')
return character
class Monster:
def __init__(self, name, hitpoints, dexterity, hitname, missname,
attack_min_damage, attack_max_damage):
self.name = name
self.hitpoints = hitpoints
self.dexterity = dexterity
self.hitname = hitname
self.missname = missname
self.attack_min_damage = attack_min_damage
self.attack_max_damage = attack_max_damage
def generate_hit_roll():
return random.randint(0, 100) + 20
def proceed_after_fight(character, monster):
while True:
print('')
hit = generate_hit_roll()
if hit >= character.dexterity:
damage = random.randint(monster.attack_min_damage,
monster.attack_max_damage)
character.hitpoints -= damage
print('The {} {} for {} damage. You have {} hitpoints remaining.'.format(
monster.name, monster.hitname, damage, character.hitpoints))
else:
print('The {} {} - but you dodge away!'.format(monster.name,
monster.missname))
if character.hitpoints <= 0:
raise CharacterDeadException(character)
c = ''
while not c or c[0] not in 'AF':
c = input('What do you want to do? (A)ttack or (F)lee? ').strip().upper()
if c[0] == 'F':
print('You cowardly run back to the snowy plains.')
print('')
return False
hit = generate_hit_roll()
if hit >= monster.dexterity:
damage = character.get_damage()
monster.hitpoints -= damage
if monster.hitpoints > 0:
print('You swing your {} at the {} and hit it for {} damage. It has {} '
'hitpoints remaining.'.format(character.weapon, monster.name,
damage, monster.hitpoints))
else:
print("You swing your mighty {} at the {}. It's body will lay as an "
'example to others who dare to confront you.'.format(
character.weapon, monster.name))
return True
else:
print('You swing your mighty {} at the {} but hit nothing but air! Maybe '
"you aren't cut out for adventuring..."
.format(character.weapon, monster.name))
def proceed_after_random_fight(character):
monster = random.choice([
Monster('Giant Snake',
random.randint(5, 20),
random.randint(10, 50), 'slashes you with its giant fangs',
'strikes at you with its giant fangs', 1, 5),
Monster('Giant Spider',
random.randint(1, 10),
random.randint(1, 10), 'bites you with its poisonous fangs',
'jumpes to bit you', 5, 20),
Monster('Skeleton',
random.randint(1, 10),
random.randint(10, 20), 'stabs you with its ice sword',
'swings at you with its ice sword', 2, 10),
Monster('Zombie',
random.randint(1, 10),
random.randint(10, 20), 'cruches you with its decaying arms',
'tries to grab you with its decaying arms', 2, 10),
Monster('Orc',
random.randint(2, 50),
random.randint(10, 20), 'smashes you with its mace',
'swings at you with its mace', 20, 50),
])
print('You are attacked by a {}!'.format(monster.name))
return proceed_after_fight(character, monster)
def go_north(character):
"""Beautiful Meadow."""
print(NORTH_TEXT)
while proceed_after_random_fight(character):
print('')
print(NORTH_CONTINUE_TEXT)
def go_east(character):
"""Mount Doom."""
print(EAST_TEXT)
for i in range(0, 150, 25):
if random.randint(0, i) > character.dexterity:
print('A block of pyroclastic debris flies towards you. You attempt to '
'dodge but are\ntoo slow.')
print('')
if 'Enchanted' in character.armor:
print('The debris hits your {} and bounces off harmlessly.'.format(
character.armor))
print('')
break
else:
print(
'It crushes you into a smoldering pile of bones and burned flesh.')
print('')
raise CharacterDeadException(character)
else:
print('A block of pyroclastic debris flies towards you but you manage to '
'dodge out\nof the way.')
c = ''
while not c or c[0] not in 'CF':
c = input('Do you go (C)ontinue of (F)lee? ').upper().strip()
if c[0] == 'F':
print('You cowardly run back to the snowy plains.')
return
if EAST_AREA in character.completed_areas:
print(
'At the peak of the volcano, you see the evil tree that you previously'
' defeated. You walk back to the snowy clearing.')
print('')
return
print('')
print(EAST_TREE)
evil_tree = Monster('Evil Tree',
random.randint(50, 100),
random.randint(0, 5),
'cruches you with its huge branches',
'swings its huge branches towards you', 5, 15)
if proceed_after_fight(character, evil_tree):
print(EAST_TREE_WIN)
character.inventory.add('Golden Ingot')
strength = character.strength + random.randint(10, 50)
character.strength += strength
print(
'You finish drinking the potion of strength and gain {} strength. You '
'now have {} strength.'.format(strength, character.strength))
print('')
print('You feel like a titan!')
print('')
print('You walk back to the snowly clearing')
print('')
character.completed_areas.add(EAST_AREA)
def go_south(character):
"""Forest."""
if 'Invisibility Potion' in character.inventory:
print(SOUTH_ALREADY_DONE)
return
print(SOUTH_TEXT)
for i in range(1, 6):
zombie = Monster('Zombie Pigman #{}'.format(i),
random.randint(i * 5, i * 10),
random.randint(25, 75), 'stabs you with its wicked sword',
'swings its sword at you', i, i * 5)
if not proceed_after_fight(character, zombie):
return
print(SOUTH_END)
character.inventory.add('Invisibility Potion')
def go_west(character):
"""Swamp."""
if WEST_AREA in character.completed_areas:
print(WEST_ALREADY_COMPLETED)
print('')
return
print(WEST_TEXT)
answer = input('What gets wetter as it dries? ').strip()
if 'towel' not in answer.lower() and 'sponge' not in answer.lower():
print('"{0}"? "{0}"?! screams the giant. I will smash you into paste!'.
format(answer))
print('')
giant = Monster('Friendly Giant', 500, 50, 'smashes you with a giant fist',
'tries to step on you', 15, 50)
if not proceed_after_fight(character, giant):
return
else:
print(
"""Yes, towels (and sponges) get wetter as they dry, smiles the giant. He walks away humming."""
)
print('')
print(WEST_COMPLETED)
character.armor = 'Enchanted ' + character.armor
character.weapon = 'Enchanted ' + character.weapon
character.completed_areas.add(WEST_AREA)
def go_down(character):
"""Cave."""
if 'Enchanted' not in character.armor:
while True:
character.hitpoints -= 5
print(
'The cave is dark and you stubble around until you bump you head on '
'the ceiling.')
print('You take {} damage. You have {} hitpoints remaining.'.format(
5, character.hitpoints))
if character.hitpoints <= 0:
raise CharacterDeadException(character)
c = ''
while not c or c[0] not in 'CF':
c = input('Do you want to (C)ontinue or (F)lee? ').upper().strip()
if c and c[0] == 'F':
print(
'You cowardly run back to the snowy plains after a little bump on '
'the head.')
print('')
return
if 'Invisibility Potion' not in character.inventory:
print(DOWN_VISIBLE)
for guard_name in itertools.chain(['Guard', 'Guard', 'Strong Guard'],
itertools.repeat('Elite Guard')):
print(
'You are spotted by a {} who immediately rushes to defend his king!'.
format(guard_name))
if guard_name == 'Guard':
guard = Monster(guard_name,
random.randint(1, 10),
random.randint(25, 50), 'stabs you with his spear',
'stabs at you with his spear', 1, 10)
elif guard_name == 'Strong Guard':
guard = Monster(guard_name,
random.randint(10, 20),
random.randint(25, 50), 'hits you with his battle axe',
'swings his battle axe at you', 2, 20)
else:
guard = Monster(guard_name,
random.randint(40, 80),
random.randint(50, 100),
'smashes you with his war hammer',
'swings his war hammer at you', 5, 50)
if not proceed_after_fight(character, guard):
return
character.inventory.remove('Invisibility Potion')
print(DOWN_INVISIBLE.format(name=character.name))
evil_king = Monster('Bad King', 100, 50, 'hits you with his enchanted staff',
'swings at you with his enchanted staff', 5, 10)
if not proceed_after_fight(character, evil_king):
return
print(ENDING.format(name=character.name))
sys.exit(0)
def select_path(character):
while True:
print(DIRECTION_CHOICE)
character.hitpoints = character.max_hitpoints
c = ''
while not c or c[0] not in 'NESWDP':
c = input('Do you go (N)orth (E)ast (S)outh (W)est (D)own '
'or (P)rint Character Information? ').upper().strip()
if c[0] == 'N':
go_north(character)
elif c[0] == 'E':
go_east(character)
elif c[0] == 'S':
go_south(character)
elif c[0] == 'W':
go_west(character)
elif c[0] == 'D':
go_down(character)
elif c[0] == 'P':
print(character)
print('')
def main():
try:
character = select_character()
print(WELCOME_TEXT.format(name=character.name))
select_path(character)
except CharacterDeadException:
print("You died. Try again and maybe you'll get lucky.")
if __name__ == '__main__':
clear()
main()
| 31.174281 | 104 | 0.651162 | 2,644 | 18,424 | 4.475794 | 0.238654 | 0.038449 | 0.01014 | 0.015379 | 0.163427 | 0.114163 | 0.069461 | 0.052391 | 0.036843 | 0.027632 | 0 | 0.016367 | 0.250543 | 18,424 | 590 | 105 | 31.227119 | 0.840672 | 0.003908 | 0 | 0.229581 | 0 | 0.002208 | 0.458859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039735 | false | 0.002208 | 0.00883 | 0.004415 | 0.094923 | 0.143488 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcfb1989c996cbe167fe37e33ab960b7feff5dd5 | 7,649 | py | Python | wax/modules/online_supervised_learner_test.py | eserie/wax-ml | 9cf92ff5c41ea681fd3eaaf4560b3380f986ee1e | [
"MIT",
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 42 | 2021-06-14T16:27:54.000Z | 2022-03-23T09:51:42.000Z | wax/modules/online_supervised_learner_test.py | eserie/wax-ml | 9cf92ff5c41ea681fd3eaaf4560b3380f986ee1e | [
"MIT",
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-10-01T12:45:29.000Z | 2021-10-03T18:06:39.000Z | wax/modules/online_supervised_learner_test.py | eserie/wax-ml | 9cf92ff5c41ea681fd3eaaf4560b3380f986ee1e | [
"MIT",
"ECL-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2021-06-11T12:32:41.000Z | 2022-02-17T16:13:15.000Z | # Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""We implement an online learning non-stationary linear regression problem.
We go there progressively by showing how a linear regression problem can be cast
into an online learning problem thanks to the `OnlineSupervisedLearner` module.
Then, in order to tackle a non-stationary linear regression problem (i.e. with a weight that can vary in time)
we reformulate the problem into a reinforcement learning problem that we implement with the `GymFeedBack` module of WAX-ML.
We then need to define an "agent" and an "environment" using simple functions or modules:
- The agent is responsible for learning the weights of its internal linear model.
- The environment is responsible for generating labels and evaluating the agent's reward metric.
We experiment with a non-stationary environment that returns the sign of the linear regression parameters at a given time step,
known only to the environment.
We will see that doing this is very simple with the WAX-ML tools and that the functional workflow it adopts
allows, each time we increase in complexity, to reuse the previously implemented transformations.
In this journey, we will use:
- Haiku basic linear module `hk.Linear`.
- Optax stochastic gradient descent optimizer: `sgd`.
- WAX-ML modules: `OnlineSupervisedLearner`, `Lag`, `GymFeedBack`
- WAX-ML helper functions: `dynamic_unroll`, `jit_init_apply`
"""
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from jax.tree_util import tree_map
from wax.compile import jit_init_apply
from wax.modules import GymFeedback, Lag, OnlineSupervisedLearner
from wax.unroll import unroll
@jit_init_apply
@hk.transform_with_state
def linear_model(x):
return hk.Linear(output_size=1, with_bias=False)(x)
def test_static_model():
# First let's implement a simple linear regression
# Let's generate a batch of data:
seq = hk.PRNGSequence(42)
T = 100
N = 3
X = jax.random.normal(next(seq), (T, N))
w_true = jnp.ones(N)
params, state = linear_model.init(next(seq), X[0])
linear_model.apply(params, state, None, X[0])
Y_pred = unroll(linear_model, rng=next(seq))(X)
assert Y_pred.shape == (T, 1)
noise = jax.random.normal(next(seq), (T,))
Y = X.dot(w_true) + noise
mean_loss = ((Y - Y_pred) ** 2).sum(axis=1).mean()
assert mean_loss > 0
def generate_many_observations(T=300, sigma=1.0e-2, rng=None):
rng = jax.random.PRNGKey(42) if rng is None else rng
X = jax.random.normal(rng, (T, 3))
noise = sigma * jax.random.normal(rng, (T,))
w_true = jnp.ones(3)
noise = sigma * jax.random.normal(rng, (T,))
Y = X.dot(w_true) + noise
return (X, Y)
def test_online_model():
# # Online model
opt = optax.sgd(1e-3)
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
@jit_init_apply
@hk.transform_with_state
def learner(x, y):
return OnlineSupervisedLearner(linear_model, opt, loss)(x, y)
seq = hk.PRNGSequence(42)
# generate data
T = 300
X, Y = generate_many_observations(T)
# dynamic unroll the learner
x0, y0 = tree_map(lambda x: x[0], (X, Y))
(output, info) = unroll(learner, rng=next(seq))(X, Y)
assert len(info.loss) == T
assert len(info.params["linear"]["w"])
def linear_regression_agent(obs):
x, y = obs
opt = optax.sgd(1e-3)
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
def learner(x, y):
return OnlineSupervisedLearner(linear_model, opt, loss)(x, y)
return learner(x, y)
def stationary_linear_regression_env(y_pred, raw_obs):
# Only the environment now the true value of the parameters
w_true = -jnp.ones(3)
# The environment has its proper loss definition
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
# raw observation contains features and generative noise
x, noise = raw_obs
# generate targets
y = x @ w_true + noise
obs = (x, y)
y_previous = Lag(1)(y)
# evaluate the prediction made by the agent
reward = loss(y_pred, y_previous)
info = {}
return reward, obs, info
def generate_many_raw_observations(T=300, sigma=1.0e-2, rng=None):
rng = jax.random.PRNGKey(42) if rng is None else rng
X = jax.random.normal(rng, (T, 3))
noise = sigma * jax.random.normal(rng, (T,))
return (X, noise)
def test_online_recast_as_reinforcement_learning_pb():
# # Online supervised learning recast as a reinforcement learning problem
# obs = (x, y) are tuple observations.
# raw_obs = (x, noise) consist in the feature and input noise.
@hk.transform_with_state
def gym_fun(raw_obs):
return GymFeedback(
linear_regression_agent,
stationary_linear_regression_env,
return_action=True,
)(raw_obs)
T = 300
raw_observations = generate_many_raw_observations(T)
rng = jax.random.PRNGKey(42)
(gym_output, gym_info) = unroll(gym_fun, rng=rng, skip_first=True)(
raw_observations,
)
assert len(gym_output.reward) == T - 1
assert len(gym_info.agent.loss) == T - 1
assert len(gym_info.agent.params["linear"]["w"]) == T - 1
class NonStationaryEnvironment(hk.Module):
def __call__(self, action, raw_obs):
step = hk.get_state("step", [], init=lambda *_: 0)
# Only the environment now the true value of the parameters
# at step 2000 we flip the sign of the true parameters !
w_true = hk.cond(
step < 2000,
step,
lambda step: -jnp.ones(3),
step,
lambda step: jnp.ones(3),
)
# The environment has its proper loss definition
@jax.jit
def loss(y_pred, y):
return jnp.mean(jnp.square(y_pred - y))
# raw observation contains features and generative noise
x, noise = raw_obs
# generate targets
y = x @ w_true + noise
obs = (x, y)
# evaluate the prediction made by the agent
y_previous = Lag(1)(y)
y_pred = action
reward = loss(y_pred, y_previous)
step += 1
hk.set_state("step", step)
info = {}
return reward, obs, info
def test_non_stationary_environement():
# ## Non-stationary environment
# Now, let's implement a non-stationary environment
# Now let's run a gym simulation to see how the agent adapt to the change of environment.
@hk.transform_with_state
def gym_fun(raw_obs):
return GymFeedback(
linear_regression_agent, NonStationaryEnvironment(), return_action=True
)(raw_obs)
T = 300
raw_observations = generate_many_raw_observations(T)
rng = jax.random.PRNGKey(42)
(gym_output, gym_info), final_state = unroll(
gym_fun, return_final_state=True, skip_first=True, rng=rng
)(raw_observations)
assert len(gym_output.reward) == T - 1
assert len(gym_info.agent.loss) == T - 1
assert len(gym_info.agent.params["linear"]["w"]) == T - 1
| 31.093496 | 127 | 0.676298 | 1,142 | 7,649 | 4.410683 | 0.236427 | 0.01489 | 0.011912 | 0.011912 | 0.415922 | 0.389915 | 0.341672 | 0.335517 | 0.302164 | 0.302164 | 0 | 0.01403 | 0.226566 | 7,649 | 245 | 128 | 31.220408 | 0.83739 | 0.377173 | 0 | 0.534351 | 0 | 0 | 0.006145 | 0 | 0 | 0 | 0 | 0 | 0.076336 | 1 | 0.137405 | false | 0 | 0.061069 | 0.068702 | 0.312977 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
fcfc2b1fb4d4ad2e27520f6caefbdcadcd160c6c | 2,814 | py | Python | src/cvhelpers/visualization/objects.py | yewzijian/RegTR | 64e5b3f0ccc1e1a11b514eb22734959d32e0cec6 | [
"MIT"
] | 25 | 2022-03-28T06:26:16.000Z | 2022-03-30T14:21:24.000Z | src/cvhelpers/visualization/objects.py | yewzijian/RegTR | 64e5b3f0ccc1e1a11b514eb22734959d32e0cec6 | [
"MIT"
] | null | null | null | src/cvhelpers/visualization/objects.py | yewzijian/RegTR | 64e5b3f0ccc1e1a11b514eb22734959d32e0cec6 | [
"MIT"
] | 2 | 2022-03-29T09:37:50.000Z | 2022-03-30T06:26:35.000Z | """Functions to create objects to add to the visualizer"""
import numpy as np
import torch
from .vtk_object import VTKObject
def _convert_torch_to_numpy(arr):
"""If arr is torch.Tensor, return the numpy equivalent, else return arr
as it is"""
if isinstance(arr, torch.Tensor):
arr = arr.detach().cpu().numpy()
return arr
def create_point_cloud(xyz: np.ndarray, colors=None, cmap=None, color_norm=None,
pt_size=1.0, alpha=1.0):
"""Create a point cloud with colors from a given NumPy array
The NumPy array should have dimension Nx6 where the first three
dimensions correspond to X, Y and Z and the last three dimensions
correspond to R, G and B values (between 0 and 255)
Returns: VTKObject() which encapulsates the point sources and actors
"""
xyz = _convert_torch_to_numpy(xyz)
obj = VTKObject()
obj.CreateFromArray(xyz[:, :3])
if colors is not None:
obj.SetColors(colors, cmap, color_norm)
if alpha < 1.0:
obj.actor.GetProperty().SetOpacity(alpha)
obj.actor.GetProperty().SetPointSize(pt_size)
return obj
def create_hedgehog_actor(xyz, normals, scale=1.0):
obj = VTKObject()
obj.CreateFromArray(xyz)
obj.AddNormals(normals)
obj.SetupPipelineHedgeHog(scale)
return obj
def create_axes(length):
"""Create coordinate system axes with specified length"""
obj = VTKObject()
obj.CreateAxes(length)
return obj
def create_sphere(origin, r=1.0, color=None):
"""Create a sphere with given origin (x,y,z) and radius r"""
origin = _convert_torch_to_numpy(origin)
obj = VTKObject()
obj.CreateSphere(origin, r, color)
return obj
def create_cylinder(origin, r=1.0, h=1.0):
"""Create a cylinder with given origin (x,y,z), radius r and height h"""
obj = VTKObject()
obj.CreateCylinder(origin, r, h)
return obj
def create_plane(normal=None, origin=None):
"""Create a plane (optionally with a given normal vector and origin)
Note: SetActorScale can be used to scale the extent of the plane"""
obj = VTKObject()
obj.CreatePlane(normal, origin)
return obj
def create_box(bounds):
"""Create a box witih the given bounds=[xmin,xmax,ymin,ymax,zmin,zmax]"""
obj = VTKObject()
obj.CreateBox(bounds)
return obj
def create_line(p1, p2):
"""Create a 3D line from p1=[x1,y1,z1] to p2=[x2,y2,z2]"""
obj = VTKObject()
obj.CreateLine(p1, p2)
return obj
def create_lines(lines, line_color=(1.0, 1.0, 1.0), line_width=1):
"""Create multiple 3D lines
Args:
lines: List of 3D lines, each element is [x1, y1, z1, x2, y2, z2]
"""
lines = _convert_torch_to_numpy(lines)
obj = VTKObject()
obj.CreateLines(lines, line_color, line_width)
return obj
| 26.299065 | 80 | 0.672708 | 417 | 2,814 | 4.453237 | 0.33813 | 0.01077 | 0.072698 | 0.077544 | 0.054927 | 0.019386 | 0 | 0 | 0 | 0 | 0 | 0.021838 | 0.218905 | 2,814 | 106 | 81 | 26.54717 | 0.823021 | 0.344705 | 0 | 0.333333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.185185 | false | 0 | 0.055556 | 0 | 0.425926 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |