hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c6934a03c692a0936dbdefc83d05a9252b05f1c4
| 6,795
|
py
|
Python
|
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | 2
|
2021-05-26T19:14:16.000Z
|
2021-05-27T21:14:24.000Z
|
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
files/area.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------
# Constantes que você pode utilizar nesse exercício
# Em notação científica 1.0e-6 é o o mesmo qoe 0.000001 (10 elevado a -6)
EPSILON = 1.0e-6
#------------------------------------------------------------------
# O import abaixo permite que o programa utilize todas as funções do módulo math,
# como por exemplo, math.exp e math.sin.
import math
#------------------------------------------------------------------
def main():
'''() -> None
Modifique essa função, escrevendo outros testes.
'''
# escolha a função que desejar e atribuia a f_x
f_x = math.cos
# f_x = math.sin
# f_x = math.exp # etc, para integração com outras funções.
# f_x = identidade # identidade() definidas mais adiante
# f_x = circunferencia # circunferencia() definida mais adiante
# f_x = exp # exp() definida mais adiante
print("Início dos testes.")
# Testes da f_x
nome = f_x.__name__ # nome da f_x usada
print(f"A função f_x usada nos testes é {nome}()")
print(f"Valor de f_x(0.0)= {f_x( 0.0 )}")
print(f"Valor de f_x(0.5)= {f_x( 0.5 )}")
print(f"Valor de f_x(1.0)= {f_x( 1.0 )}")
# testes da função área_por_retangulos
print()
print("Área por retângulos:")
a, b = 0, 1 # intervalo [a,b]
k = 1 # número de retângulos
n = 3 # número de iterações
i = 0
while i < n:
print(f"teste {i+1}: para {k} retângulos no intervalo [{a}, {b}]:")
print(f" área aproximada = {area_por_retangulos(f_x, a, b, k):g}")
k *= 10
i += 1
# testes da função área_aproximada
print()
print("Área aproximada:")
a, b = 0, 1 # intervalo
k, area = area_aproximada(f_x, a, b) # número de retângulos e aproximação
print(f"teste 1: para eps = {EPSILON:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulo a área é aproximadamente = {area:g}")
eps = 1e-6 # erro relativo aceitável
i = 1
n = 4
while i < n:
eps *= 10 # aumenta o erro relativo aceitável
k, area = area_aproximada(f_x, a, b, eps)
print(f"teste {i+1}: para eps = {eps:g} e intervalo [{a}, {b}]:")
print(f" com {k} retângulos a área é aproximadamente = {area:g}")
i += 1
print("Fim dos testes.")
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=x
def identidade( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA o valor recebido.
EXEMPLOS:
In [6]: identidade(3.14)
Out[6]: 3.14
In [7]: identidade(1)
Out[7]: 1
In [8]: identidade(-3)
Out[8]: -3
'''
return x
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x)=sqrt(1 - x*x)
def circunferencia( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA um valor y >= 0 tal que (x,y) é um ponto na circunferência de raio 1 e centro (0,0).
PRÉ-CONDIÇÃO: a função supõe que x é um valor tal que -1 <= x <= 1.
EXEMPLOS:
In [9]: circunferencia(-1)
Out[9]: 0.0
In [10]: circunferencia(0)
Out[10]: 1.0
In [11]: circunferencia(1)
Out[11]: 0.0
'''
y = math.sqrt( 1 - x*x )
return y
#------------------------------------------------------------------
# FUNÇÃO AUXILIAR PARA TESTE: função f(x) = e^x
def exp( x ):
''' (float) -> float
RECEBE um valor x.
RETORNA (uma aproximação de) exp(x).
EXEMPLOS:
In [12]: exp(1)
Out[12]: 2.718281828459045
In [13]: exp(0)
Out[13]: 1.0
In [14]: exp(-1)
Out[14]: 0.36787944117144233
'''
y = math.exp( x )
return y # return math.exp( x )
#------------------------------------------------------------------
#
def erro_rel(y, x):
''' (float, float) -> float
RECEBE dois números x e y.
RETORNA o erro relativo entre eles.
EXEMPLOS:
In [1]: erro_rel(0, 0)
Out [1]: 0.0
In [2]: erro_rel(0.01, 0)
Out [2]: 1.0
In [3]: erro_rel(1.01, 1.0)
Out [3]: 0.01
'''
if x == 0 and y == 0:
return 0.0
elif x == 0:
return 1.0
erro = (y-x)/x
if erro < 0:
return -erro
return erro
#------------------------------------------------------------------
def area_por_retangulos(f, a, b, k):
'''(function, float, float, int) -> float
RECEBE uma função f, dois números a e b e um inteiro k.
RETORNA uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulos.
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [15]area_por_retangulos(identidade, 0, 1, 1)
Out[15]: 0.5
In [16]:area_por_retangulos(circunferencia, -1, 0, 1)
Out[16]: 0.8660254037844386
'''
# escreva a sua solução a seguir
# remova ou modifique a linha abaixo como desejar
base = (b-a)/k
i = 0
x_meio = ((b-a)/(2*k)) + a
soma = 0
while i < k:
area = f(x_meio)*base
x_meio += base
i += 1
soma += area
return soma
#------------------------------------------------------------------
def area_aproximada(f, a, b, eps=EPSILON):
'''(function, float, float, float) -> int, float
RECEBE uma função f, dois números a, b, eps.
RETORNA um inteiro k e uma aproximação da área sob a função f no intervalo [a,b]
usando k retângulo.
O valor de k deve ser a __menor potência__ de 2 tal que o erro relativo
da aproximação retornada seja menor que eps.
Assim, os possíveis valores de k são 1, 2, 4, 8, 16, 32, 64, ...
PRÉ-CONDIÇÃO: a função supõe que a função f é continua no intervalo [a,b] e que
f(x) >= 0 para todo x, a <= x <= b.
EXEMPLOS:
In [22]: area_aproximada(identidade, 1, 2)
Out[22]: (2, 1.5)
In [23]: area_aproximada(exp, 1, 2, 16)
Out[23]: (2, 4.6224728167337865)
'''
# escreva o corpo da função
# remova ou modifique a linha abaixo como desejar
k = 1
sub = eps + 1
while sub >= eps:
sub = erro_rel(area_por_retangulos(f,a,b,k*2),area_por_retangulos(f,a,b,k))
k *= 2
return k, area_por_retangulos(f,a,b,k) # para retornar um int e um float
# basta separá-los por vírgula
#######################################################
### FIM ###
#######################################################
#
# NÃO MODIFIQUE AS LINHAS ABAIXO
#
# Esse if serve para executar a função main() apenas quando
# este é o módulo a partir do qual a execução foi iniciada.
if __name__ == '__main__':
main()
| 31.901408
| 98
| 0.512288
| 990
| 6,795
| 3.449495
| 0.206061
| 0.015227
| 0.025769
| 0.019034
| 0.296047
| 0.283455
| 0.246266
| 0.185066
| 0.120059
| 0.106589
| 0
| 0.053119
| 0.26858
| 6,795
| 212
| 99
| 32.051887
| 0.634004
| 0.60574
| 0
| 0.202703
| 0
| 0.013514
| 0.252511
| 0.011416
| 0
| 0
| 0
| 0.009434
| 0
| 1
| 0.094595
| false
| 0
| 0.013514
| 0
| 0.22973
| 0.216216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c693df3548964a87b3411b88e56a453a7a597f59
| 4,421
|
py
|
Python
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 9
|
2020-12-18T13:26:45.000Z
|
2022-03-03T16:46:33.000Z
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 12
|
2020-12-19T18:32:51.000Z
|
2021-10-30T17:48:35.000Z
|
gribmagic/unity/download/engine.py
|
earthobservations/gribmagic
|
59c647d3ca3ecaf2d720837ba0cec9cc2aa2546e
|
[
"MIT"
] | 2
|
2020-12-19T08:02:03.000Z
|
2021-10-30T16:01:02.000Z
|
"""
Handle download of NWP data from remote servers.
"""
import logging
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, List
import requests
from gribmagic.unity.configuration.constants import (
KEY_COMPRESSION,
KEY_LOCAL_FILE_PATHS,
KEY_REMOTE_FILE_PATHS,
)
from gribmagic.unity.configuration.model import WeatherModelSettings
from gribmagic.unity.download.decoder import (
decode_bunzip,
decode_identity,
decode_tarfile,
)
from gribmagic.unity.enumerations import WeatherModel
from gribmagic.unity.model import DownloadItem
session = requests.Session()
logger = logging.getLogger(__name__)
DEFAULT_NUMBER_OF_PARALLEL_PROCESSES = 4
def run_download(
weather_model: WeatherModel,
model_file_lists: Dict[str, List[str]],
parallel_download: bool = False,
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Download weather forecasts data.
"""
model = WeatherModelSettings(weather_model)
if model.info[KEY_COMPRESSION] == "tar":
return __download_tar_file(
weather_model,
model_file_lists[KEY_REMOTE_FILE_PATHS][0],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
if parallel_download:
download_specifications = [
DownloadItem(model=weather_model, local_file=local_file_path, remote_url=remote_file)
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
)
]
return __download_parallel(download_specifications, n_processes)
else:
results = []
for remote_file, local_file_path in zip(
model_file_lists[KEY_REMOTE_FILE_PATHS],
model_file_lists[KEY_LOCAL_FILE_PATHS],
):
item = DownloadItem(
model=weather_model, local_file=local_file_path, remote_url=remote_file
)
results.append(__download(item))
return results
def __download(item: DownloadItem) -> None:
"""
base download function to manage single file download
Args:
download_specification: Tuple with
- WeatherModel
- local_file_path
- remote_file_path
Returns:
Stores a file in temporary directory
"""
model = WeatherModelSettings(item.model)
# Compute source URL and target file.
url = item.remote_url
target_file = Path(item.local_file)
if target_file.exists():
logger.info(f"Skipping existing file {target_file}")
return target_file
logger.info(f"Downloading {url} to {target_file}")
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
if not target_file.parent.is_dir():
target_file.parent.mkdir(exist_ok=True)
if model.info[KEY_COMPRESSION] == "bz2":
decode_bunzip(response.raw, target_file)
else:
decode_identity(response.raw, target_file)
return target_file
def __download_parallel(
download_specifications: List[DownloadItem],
n_processes: int = DEFAULT_NUMBER_OF_PARALLEL_PROCESSES,
) -> None:
"""
Script to run download in parallel
Args:
download_specifications: List of Tuple with
- WeatherModel
- local_file_path
- remote_file_path
n_processes: Number of parallel processes used for download
Returns:
None
"""
with ThreadPoolExecutor(max_workers=n_processes) as executor:
results = executor.map(__download, download_specifications)
executor.shutdown(wait=True)
return results
def __download_tar_file(
weather_model: WeatherModel, url: str, local_file_list: List[Path]
) -> None:
"""
Downloads a weather forecast package with one tar archive
Args:
weather_model:
remote_file:
local_file_list:
Returns:
"""
model = WeatherModelSettings(weather_model)
try:
response = session.get(url, stream=True)
response.raise_for_status()
except Exception as ex:
logger.warning(f"Failed accessing resource {url}: {ex}")
return
return decode_tarfile(response.raw, local_file_list)
| 27.459627
| 97
| 0.680615
| 509
| 4,421
| 5.607073
| 0.259332
| 0.050456
| 0.034338
| 0.035739
| 0.357043
| 0.29117
| 0.29117
| 0.269096
| 0.269096
| 0.201121
| 0
| 0.000899
| 0.24542
| 4,421
| 160
| 98
| 27.63125
| 0.854616
| 0.163538
| 0
| 0.340426
| 0
| 0
| 0.042254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.106383
| 0
| 0.234043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c696cbe9a74a6a3f3db61104f5e94acb0ded96e3
| 2,195
|
py
|
Python
|
tests/main.py
|
Antojitos/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 3
|
2015-10-30T13:09:13.000Z
|
2021-02-17T19:12:37.000Z
|
tests/main.py
|
amessinger/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 5
|
2015-10-30T12:53:05.000Z
|
2015-12-14T15:20:04.000Z
|
tests/main.py
|
Antojitos/guacamole
|
50b4da41a45b2b4dd4f63f6c6cc68bfcf8563152
|
[
"MIT"
] | 1
|
2015-10-28T08:44:48.000Z
|
2015-10-28T08:44:48.000Z
|
import sys
import os
import shutil
import filecmp
import json
import unittest
# Path hack. http://stackoverflow.com/questions/6323860/sibling-package-imports
sys.path.insert(0, os.path.abspath('../guacamole'))
import guacamole
class GuacamoleTestCase(unittest.TestCase):
def setUp(self):
guacamole.app.config['TESTING'] = True
self.app = guacamole.app.test_client()
self.original_file_name = 'image.jpg'
self.original_file_path = os.path.join('tests/fixtures', self.original_file_name)
self.original_file = open(self.original_file_path, 'r')
self.original_file_tags = 'Mexican, food,fiesta'
if not os.path.exists('files'):
os.makedirs('files')
def tearDown(self):
shutil.rmtree('files')
pass
def test_post_file(self):
"""Testing file upload"""
response = self.app.post('/files/',
buffered=True,
content_type='multipart/form-data',
data={
'file': (self.original_file, self.original_file_name)
})
uploaded_file_meta = json.loads(response.data)
uploaded_file_path = os.path.join('files', uploaded_file_meta['uri'])
assert '200' in response.status
assert os.path.isfile(uploaded_file_path)
assert filecmp.cmp(self.original_file_path, uploaded_file_path)
def test_post_file_with_tags(self):
"""Testing file upload with tags"""
response = self.app.post('/files/',
buffered=True,
content_type='multipart/form-data',
data={
'file': (self.original_file, self.original_file_name),
'tags': self.original_file_tags
})
uploaded_file_meta = json.loads(response.data)
uploaded_file_path = os.path.join('files', uploaded_file_meta['uri'])
assert '200' in response.status
assert '["mexican", "food", "fiesta"]' in response.data
assert os.path.isfile(uploaded_file_path)
assert filecmp.cmp(self.original_file_path, uploaded_file_path)
if __name__ == '__main__':
unittest.main()
| 32.279412
| 89
| 0.626424
| 260
| 2,195
| 5.057692
| 0.292308
| 0.118631
| 0.158175
| 0.060837
| 0.48365
| 0.469962
| 0.469962
| 0.469962
| 0.469962
| 0.469962
| 0
| 0.008589
| 0.257403
| 2,195
| 68
| 90
| 32.279412
| 0.79816
| 0.058314
| 0
| 0.408163
| 0
| 0
| 0.097715
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.081633
| false
| 0.020408
| 0.142857
| 0
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c697934e43005813bbf25f5936b378004c77b6ac
| 324
|
py
|
Python
|
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | 7
|
2018-02-23T17:41:04.000Z
|
2022-03-09T12:20:56.000Z
|
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | null | null | null |
settings.py
|
musahibrahimali/flasket-api
|
d212cb84817dee90e9a53015b2811468a4db75ff
|
[
"MIT"
] | 1
|
2021-06-02T17:23:45.000Z
|
2021-06-02T17:23:45.000Z
|
# Flask settings
FLASK_DEBUG = True # Do not use debug mode in production
# SQLAlchemy settings
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = True
# Flask-Restplus settings
SWAGGER_UI_DOC_EXPANSION = 'list'
RESTPLUS_VALIDATE = True
RESTPLUS_MASK_SWAGGER = False
ERROR_404_HELP = False
| 23.142857
| 57
| 0.805556
| 43
| 324
| 5.767442
| 0.674419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.12963
| 324
| 13
| 58
| 24.923077
| 0.868794
| 0.290123
| 0
| 0
| 0
| 0
| 0.102222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c69d613e92541912c5d1aa1169340677fbcf4a96
| 5,437
|
py
|
Python
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 7
|
2019-04-08T02:31:55.000Z
|
2021-11-15T14:40:49.000Z
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 31
|
2019-02-22T22:23:26.000Z
|
2021-08-02T17:17:06.000Z
|
mlops/parallelm/mlops/ml_metrics_stat/ml_stat_object_creator.py
|
mlpiper/mlpiper
|
0fd2b6773f970c831038db47bf4920ada21a5f51
|
[
"Apache-2.0"
] | 8
|
2019-03-15T23:46:08.000Z
|
2020-02-06T09:16:02.000Z
|
import numpy as np
from parallelm.mlops.mlops_exception import MLOpsStatisticsException
from parallelm.mlops.stats.graph import Graph
from parallelm.mlops.stats.multi_line_graph import MultiLineGraph
from parallelm.mlops.stats.single_value import SingleValue
from parallelm.mlops.stats.table import Table
from parallelm.mlops.stats_category import StatCategory
class MLStatObjectCreator(object):
@staticmethod
def get_single_value_stat_object(name, single_value):
"""
Create Single Value stat object from numerical value
:param name: Name of stat
:param single_value: single numeric value
:return: MLOps Single Value object, time series category
"""
if isinstance(single_value, (int, float)):
category = StatCategory.TIME_SERIES
single_value = \
SingleValue() \
.name(name) \
.value(single_value) \
.mode(category)
return single_value, category
else:
raise MLOpsStatisticsException \
("For outputting {}, {} should be of type numeric but got {}."
.format(name, single_value, type(single_value)))
@staticmethod
def get_table_value_stat_object(name, list_2d, match_header_pattern=None):
"""
Create Table Value stat object from list of list. Where first element of 2d list is header. And from remaining lists, list's first index is Row's header.
:param name: Name of stat
:param list_2d: 2d representation of table to output
:param match_header_pattern: If not none, then header of table should match the pattern provided
:return: MLOps Table Value object, general stat category
"""
category = StatCategory.GENERAL
try:
header = list(map(lambda x: str(x).strip(), list_2d[0]))
if match_header_pattern is not None:
assert header == match_header_pattern, \
"headers {} is not matching expected headers pattern {}" \
.format(header, match_header_pattern)
len_of_header = len(header)
table_object = Table().name(name).cols(header)
for index in range(1, len(list_2d)):
assert len(list_2d[index]) - 1 == len_of_header, \
"length of row value does not match with headers length"
row_title = str(list_2d[index][0]).strip()
row_value = list(map(lambda x: str(x).strip(), list_2d[index][1:]))
table_object.add_row(row_title, row_value)
return table_object, category
except Exception as e:
raise MLOpsStatisticsException \
("error happened while outputting table object from list_2d: {}. error: {}".format(list_2d, e))
@staticmethod
def get_graph_value_stat_object(name, x_data, y_data, x_title, y_title, legend):
"""
Create graph object from given data.
:param name: Name of stat
:param x_data: X axis data. It has to be numeric list.
:param y_data: Y axis data. It has to be numeric list.
:param x_title: X axis title
:param y_title: Y axis title
:param legend: Legend of Y axis
:return: MLOps Graph Value object, general stat category
"""
category = StatCategory.GENERAL
if legend is None:
legend = "{} vs {}".format(y_title, x_title)
try:
graph_object = Graph() \
.name(name) \
.set_x_series(list(x_data)) \
.add_y_series(label=legend, data=list(y_data))
graph_object.x_title(x_title)
graph_object.y_title(y_title)
return graph_object, category
except Exception as e:
raise MLOpsStatisticsException \
("error happened while outputting graph object. error: {}".format(e))
@staticmethod
def get_multiline_stat_object(name, list_value, labels=None):
"""
Create multiline object from list of values. It outputs mulitline from values and legends is index of the values - i.e. 0, 1, ..
:param name: Name of stat
:param list_value: list of values to embed in multiline value.
:return: MLOps Multiline Value object, timeseries stat category
"""
if isinstance(list_value, list) or isinstance(list_value, np.ndarray):
category = StatCategory.TIME_SERIES
# if labels are not provided then it will be 0, 1, .. length of list - 1
if labels is None:
labels = range(len(list_value))
labels = list(map(lambda x: str(x).strip(), labels))
if (len(labels) == len(list_value)):
multiline_object = MultiLineGraph() \
.name(name) \
.labels(labels)
multiline_object.data(list(list_value))
return multiline_object, category
else:
raise MLOpsStatisticsException(
"size of labels associated with list of values to get does not match. {}!={}"
.format(len(labels), len(list_value)))
else:
raise MLOpsStatisticsException(
"list_value has to be of type list or nd array but got {}".format(type(list_value)))
| 41.823077
| 161
| 0.609343
| 659
| 5,437
| 4.878604
| 0.194234
| 0.041058
| 0.033593
| 0.03577
| 0.18196
| 0.168896
| 0.153966
| 0.129393
| 0.093935
| 0.055365
| 0
| 0.0056
| 0.310281
| 5,437
| 129
| 162
| 42.147287
| 0.851733
| 0.227699
| 0
| 0.291139
| 0
| 0
| 0.109068
| 0
| 0
| 0
| 0
| 0
| 0.025316
| 1
| 0.050633
| false
| 0
| 0.088608
| 0
| 0.202532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c69fe4b03acf538832512321d83a32c7f8cc326f
| 480
|
py
|
Python
|
awsflow/lambdas/demo.py
|
algorithmia-algorithms/awsflow
|
927698c27e57377dbe8094c71d5b0c36548b0937
|
[
"MIT"
] | 12
|
2019-04-06T14:59:29.000Z
|
2020-04-14T21:02:23.000Z
|
awsflow/lambdas/demo.py
|
vaquarkhan/awsflow
|
59f9001972aec2bac60a97d174b97f96689360ce
|
[
"MIT"
] | null | null | null |
awsflow/lambdas/demo.py
|
vaquarkhan/awsflow
|
59f9001972aec2bac60a97d174b97f96689360ce
|
[
"MIT"
] | 3
|
2019-07-30T17:11:14.000Z
|
2020-02-17T20:39:25.000Z
|
from awsflow.tools.emr import logging
from awsflow.version import __version__
def hello_world(event, context):
"""
Test function, does nothing
:param event: AWS lambdas function event
:param context: AWS lambdas function context
:return:
"""
message = 'event={} context={}'.format(event, context)
logging.info('Hello World! Message is {}'.format(message))
return {
'parameters': message,
'awsflow-version': __version__
}
| 25.263158
| 62
| 0.666667
| 53
| 480
| 5.867925
| 0.471698
| 0.115756
| 0.115756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220833
| 480
| 18
| 63
| 26.666667
| 0.831551
| 0.254167
| 0
| 0
| 0
| 0
| 0.212121
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6a0b2e6f13cc83e001ace2dc43eeb51890ba31f
| 1,074
|
py
|
Python
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 46
|
2018-09-23T02:08:02.000Z
|
2022-03-19T15:56:15.000Z
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 6
|
2018-12-02T09:04:56.000Z
|
2021-09-30T12:14:53.000Z
|
weather/tools.py
|
yulinliu101/DeepTP
|
bc4f9adad6dda6c32e58026dda7863e0cb2a6072
|
[
"MIT"
] | 27
|
2018-11-19T18:17:07.000Z
|
2021-08-28T17:07:11.000Z
|
'''
Module's author : Jarry Gabriel
Date : June, July 2016
Some Algorithms was made by : Malivai Luce, Helene Piquet
This module handle different tools
'''
from pyproj import Proj, Geod
import numpy as np
# Projections
wgs84=Proj("+init=EPSG:4326")
epsg3857=Proj("+init=EPSG:3857")
g=Geod(ellps='WGS84')
# Returns pressure from altitude (ft)
def press(alt):
z = alt/3.28084
return 1013.25*(1-(0.0065*z)/288.15)**5.255
# Returns the closest lvl from levels with altitude (atl)
def proxilvl(alt , lvls):
p = press(alt)
levels = np.array(sorted(lvls.keys()))
return levels[np.abs(levels - p).argmin()]
# def proxy(val, lvl1, lvl2):
# if (abs(val - lvl1) < abs(val - lvl2)):
# return lvl1
# else:
# return lvl2
# p = press(alt)
# levels = sorted(lvls.keys())
# if p < levels[0]:
# return levels[0]
# else:
# for i, el in enumerate(levels[1:]):
# if p < el:
# return proxy(p, levels[i-1], el)
# return levels[-1]
| 25.571429
| 58
| 0.57635
| 151
| 1,074
| 4.099338
| 0.556291
| 0.038772
| 0.038772
| 0.048465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075227
| 0.282123
| 1,074
| 42
| 59
| 25.571429
| 0.727626
| 0.58473
| 0
| 0
| 0
| 0
| 0.091623
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6a0ccd39f3cb516016d54f1a50913914e43bf5d
| 1,315
|
py
|
Python
|
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
src/database/report.py
|
moevm/nosql1h19-report-stats
|
ab1dc80858df2d8b44489dc7ca900371b1fcc80f
|
[
"MIT"
] | null | null | null |
from docx import Document
class Report:
def __init__(self, docx_text, meta, text_processor):
self.document = Document(docx_text)
self.date = self.document.core_properties.modified
self.title = meta['title']
self.author = meta['author']
self.group = int(meta['group'])
self.department = meta['department']
self.course = int(meta['course'])
self.faculty = meta['faculty']
raw_text = ' '.join([par.text for par in self.document.paragraphs])
processed_text = text_processor.process(raw_text)
self.text = processed_text['text']
self.text.pop('clean_text', None) # Не храним очищенный текст
self.words = processed_text['words']
self.words.pop('words', None) # Не храним все слова
self.symbols = processed_text['symbols']
def serialize_db(self):
serialized_document = {
'title': self.title,
'date': self.date,
'author': self.author,
'group': self.group,
'department': self.department,
'course': self.course,
'faculty': self.faculty,
'text': self.text,
'words': self.words,
'symbols': self.symbols
}
return serialized_document
| 32.073171
| 75
| 0.579468
| 143
| 1,315
| 5.195804
| 0.321678
| 0.043069
| 0.048452
| 0.048452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.298859
| 1,315
| 40
| 76
| 32.875
| 0.805857
| 0.034221
| 0
| 0
| 0
| 0
| 0.102605
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.03125
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6a371ecbe5a163fba368a97852b226ecc2b76c6
| 19,724
|
py
|
Python
|
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
transmission/PFM_v24.py
|
zarppy/MUREIL_2014
|
25ba16554ce8f614b9337e0fffce75da3fa259a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
#
# Copyright (C) University of Melbourne 2012
#
#
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#
#
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
import math
class PowerFlow():
"""The power flow class, which can serve as a transmission model for
an energy system model. In the current version it can return the amount
of failed transmission. It further will have the ability to be updated
via a function, in order to introduce changeability.
"""
def __init__(self):
"""Initiates a class member of the power flow class.
"""
self.b_inverse_matrix = np.matrix(1)
self.a_d_matrix = np.matrix(1)
self.no_edges = 0
self.total_unresolved_flow = 0
self.flow_series = []
self.line_dictionary = {}
self.node_dictonary = {}
# Maybe expendable, right no used for update method
self.y_bus = []
self.a_matrix = []
self.capacity_matrix = []
self.no_nodes = 0
def calculate_flow(self, supply):
"""Calculates the power flow for the current supply set, which is
provided by the txmultigenerator. The method
create_transmission_network needs to be run before calculating the
flow. No output is returned, but the total_unresolved_flow is changed.
Inputs:
supply: a timeseries of supply vectors
Output:
none
"""
# Loop through full timeperiod
t=0
while t < len(supply):
supply_vector = np.matrix(np.array(supply[t])[1:])
# Calculate the nodal phase angles
phase_angle_vector = self.b_inverse_matrix * supply_vector.T
# Calculate the line flows
flow_vector = self.a_d_matrix * phase_angle_vector
# Save flow in timeseries for later evaluation
self.flow_series.append(flow_vector)
t += 1
def analyse_network(self):
"""Analysis of the network. Returns a maximum flows that were assigned
to the lines and a capacity that would be sufficient to transport 90%
of the flows. These values can be later used to see where capacity
was exceded to recaculate the dispatch and eventually make network
updates.
Input:
None, uses self.flow_series as basis of calculation
Output:
line_maxLoad_in: maximum flow in timeseries in defined direction
on line
line_maxLoad_ag: maximum flow in timeseries against defined
direction on line
line_load90_in: 90% percentile flow in timeseries in defined
direction on line
line_load90_ag: 90% percentile flow in timeseries against defined
direction on line
"""
# Devide flow_array into one with the positive values and one with neg.
flow_array_pos = np.clip(np.array(self.flow_series),0,np.Infinity)
flow_array_neg = -1*(np.clip(np.array(self.flow_series),-np.Infinity,0))
# Calculate max load that occured on the transmission line in the timeseries
line_maxLoad_in= flow_array_pos.max(axis=0)
line_maxLoad_ag= flow_array_pos.max(axis=0)
# Calculate capacity that would be sufficient for 90% of the loads
# on that line for the loads of that timeseries
line_load90_in = np.percentile(flow_array_pos,90,axis=0)
line_load90_ag = np.percentile(flow_array_neg,90,axis=0)
return line_maxLoad_in, line_maxLoad_ag, line_load90_in, line_load90_ag
def create_transmission_network(self, y_bus, a_matrix, capacity_matrix):
"""Prepares the transmission network for the flow calculation. Sets
up the matrixes needed for the flow calculation, namely b_inverse_matrix
and the a_d_matrix. Further creates a line_dictionary with information
about origin node, destination node, capacity and admittance value for
each line.
N: number of nodes
M: number of lines
Input:
y_bus: (NxN) nodal attmittance matrix with
y-bus(i,j) = -Y(i,j) for non-diagonal values and
y-bus(i,i) = Y(i,i) + sum(Y(i,j): for j:(1,N) & j != i)
In this simple DC power flow model the resistance is
neglected, therefore the admittance y = -j * b with b
being the suspectance.
a_matrix: (MxN) node-arc incidence matrix, with
a(m,n) = 1 if arc m has its starting point in node n
a(m,n) = -1 if arc m has its end point in node n#
a(m,n) = 0 otherwise
capacity_matrix: (NxN) matrix of the line capacities
capacity(i,j) = tranfer capacity between node i and node j
(note: capacity(i,j) can be different from capacity(j,i))
Output:
none, but saves mentioned results in self. variables
"""
self.no_edges = len(a_matrix)
self.no_nodes = len(a_matrix[1])
self.y_bus = y_bus
self.a_matrix = a_matrix
self.capacity_matrix = capacity_matrix
# Calculate b_inverse_matrix
# first calculate b_prime_matrix, which is the negative of the y-bus,
# but the diagonal elements are replaced by the sum of the b-values
# in the row of the respective element.
# shape: (N-1) x (N-1)
b_prime_matrix = -1 * y_bus[1:,1:]
for i, row in enumerate(b_prime_matrix):
# replace diagonal elements with sum of all other elements of its row
b_prime_matrix[i][i] = sum(y_bus[i+1]) - y_bus[i+1][i+1]
self.b_inverse_matrix = np.linalg.inv(b_prime_matrix)
#Calculate D-matrix and capacity_vector and create line_dictionary
d_matrix = np.zeros((self.no_edges,self.no_edges))
i=0
while i < self.no_edges:
row = list(a_matrix[i])
orig_id = row.index(1)
dest_id = row.index(-1)
d_matrix[i][i] = y_bus[orig_id][dest_id]
self.line_dictionary[i] = {'origin': orig_id, 'destination': dest_id,
'capacity_in':capacity_matrix[orig_id][dest_id],
'capacity_ag':capacity_matrix[dest_id][orig_id],
'Y':y_bus[orig_id][dest_id] }
i=i+1
# Calculate a_d_matrix
# := transfer admittance matrix
# (M x N-1)
# with a_d(line i, node j) := -b(i) if j is end node of line
# b(i) if j is start node of line
self.a_d_matrix = np.matrix(d_matrix) * np.matrix(a_matrix)[:,1:]
def update_transmission_network(self, origin_id, dest_id, cap_incr_in,
cap_incr_ag, new_y):
"""Updates the capacity and y-bus of the transmission network
according to the input values and returns a cost value.
### PRELIMINAR VERSION ###
to do:
-better cost calculation, based on different types of updates,
maybe just 2 or 3 different options with a fixed capacity increase
-...
Inputs:
origin_id: id of starting node
dest_id: id of end node
cap_incr_in: capacity update in direction of line
cap_incr_ag: capacity update against direction of line
new_y: new admittance value for y_bus
Output:
cost: investment cost for capacity increase
"""
cost = 0
new_capacity_matrix = self.capacity_matrix
new_y_bus = self.y_bus
new_a_matrix = self.a_matrix
# Check if nodes existed before
if origin_id < self.no_nodes and dest_id < self.no_nodes:
# Calculate distance for cost calculation with Haversine Formula
lat1, lat2, lon1, lon2 = map(math.radians,
[self.node_dictonary[dest_id]['y_loc'],
self.node_dictonary[origin_id]['y_loc'],
self.node_dictonary[origin_id]['x_loc'],
self.node_dictonary[dest_id]['x_loc']])
dlon = abs(lon1 - lon2)
dlat = abs(lat1 - lat2)
a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2)\
* (math.sin(dlon/2))**2
c = 2 * math.atan2( math.sqrt(a), math.sqrt(1-a) )
distance = 6373 * c
# Check further if connection existed before
if self.capacity_matrix[origin_id][dest_id] != 0 or \
self.capacity_matrix[dest_id][origin_id] != 0:
# Simple case: increase capacity and update Y
new_capacity_matrix[origin_id][dest_id] += cap_incr_in
new_capacity_matrix[dest_id][origin_id] += cap_incr_ag
new_y_bus[origin_id][dest_id] = new_y
new_y_bus[dest_id][origin_id] = new_y
cost = 1.4 * distance
else:
# New line, but existing nodes
new_capacity_matrix[origin_id][dest_id] += cap_incr_in
new_capacity_matrix[dest_id][origin_id] += cap_incr_ag
new_y_bus[origin_id][dest_id] = new_y
new_y_bus[dest_id][origin_id] = new_y
cost = 1.4 *distance
# Update a_matrix
a_row = [0]*self.no_nodes
a_row[origin_id] = 1
a_row[dest_id] = -1
new_a_matrix.append(a_row)
# Calculate costs
cost = max(cap_incr_in, cap_incr_ag) * 1.5
else:
# New nodes must be added.
# supply vector length must be adjusted
cost = 1
self.create_transmission_network(new_y_bus, new_a_matrix, new_capacity_matrix)
return cost
def draw_network(self, flow_vector, supply, filename):
"""Creates a plot of the network with the flows using Networkx.
"""
g = nx.DiGraph()
label1 = {} # node label
label_node2 = {}
label2 = {} # line label
pos1 = {}
line_attributes = {}
# Preparing the nodes
for node in self.node_dictonary:
g.add_node(node)
pos1[node] = (self.node_dictonary[node]["x_loc"], \
self.node_dictonary[node]["y_loc"])
label1[node] = self.node_dictonary[node]["name"][:3]
node += 1
# Adjusting position to improve readability
# if test as easy way to only adjust node positions if NEM network
# is used, otherwise leave as they are
if self.node_dictonary[0]['name'] == "MELBOURNE":
pos1[1] = (pos1[1][0],pos1[1][1]-1) #LATROBE
pos1[2] = (pos1[2][0]-0.1,pos1[2][1]+0.4) #CVIC
pos1[5] = (pos1[5][0]-1.3,pos1[5][1]-1) #GEELONG
pos1[6] = (pos1[6][0]-0.9,pos1[6][1]-0.4) #SWVIC
pos1[8] = (pos1[8][0]+0.7,pos1[8][1]) #SYDNEY
pos1[10] = (pos1[10][0]-1,pos1[10][1]+0.3) #DARPOINT
pos1[11] = (pos1[11][0],pos1[11][1]+1) #WAGGA
pos1[12] = (pos1[12][0]+0.8,pos1[12][1]) #CANBERRA
pos1[13] = (pos1[13][0]-0.8,pos1[13][1]+0.2) #MTPIPER
pos1[14] = (pos1[14][0]-0.7,pos1[14][1]+1.5) #BAYSWATER
pos1[15] = (pos1[15][0],pos1[15][1]+1.5) #ARMIDALE
pos1[16] = (pos1[16][0]+0.7,pos1[16][1]+1.3) #ERARING
pos1[17] = (pos1[17][0]+0.6,pos1[17][1]+0.9) #BRISBANE
pos1[18] = (pos1[18][0]-0.5,pos1[18][1]+0.3) #TARONG
pos1[19] = (pos1[19][0]-0.8,pos1[19][1]) #ROMA
for node in self.node_dictonary:
if supply[0][node] != 0:
label_node2[node] = round(supply[0][node],1)
#Preparing the lines
for line in self.line_dictionary:
origin = self.line_dictionary[line]["origin"]
dest = self.line_dictionary[line]["destination"]
g.add_edge(origin,dest)
line_tuppel = ((origin,dest))
line_attributes[line_tuppel] = {}
# Attributes
# ---width
if self.line_dictionary[line]['capacity_in'] > 10000:
line_attributes[line_tuppel]['width']=20
elif self.line_dictionary[line]['capacity_in'] > 6000:
line_attributes[line_tuppel]['width']=15
elif self.line_dictionary[line]['capacity_in'] > 2000:
line_attributes[line_tuppel]['width']=11
elif self.line_dictionary[line]['capacity_in'] > 500:
line_attributes[line_tuppel]['width']=8
else:
line_attributes[line_tuppel]['width']=4
# ---color&style
if abs(flow_vector.item(line)) > 0.01:
if abs(flow_vector.item(line))/self.line_dictionary[line]['capacity_in'] > 1.0:
line_attributes[line_tuppel]['color']='red'
line_attributes[line_tuppel]['style']='solid'
elif abs(flow_vector.item(line))/self.line_dictionary[line]['capacity_in'] > 0.8:
line_attributes[line_tuppel]['color']='orange'
line_attributes[line_tuppel]['style']='solid'
else:
line_attributes[line_tuppel]['color']='green'
line_attributes[line_tuppel]['style']='solid'
else:
line_attributes[line_tuppel]['color']='black'
line_attributes[line_tuppel]['style']='dotted'
#label with arrows for direction...
if pos1[origin][0] < pos1[dest][0]:
if flow_vector.item(line) > 0.001:
label2[(origin,dest)] = \
str(abs(round(flow_vector.item(line),1))) + " >>" +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
elif flow_vector.item(line) < -0.001:
label2[(origin,dest)] = "<< " + \
str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_ag']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
label2[(origin,dest)] = str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
if flow_vector.item(line) > 0.001:
label2[(origin,dest)] = "<< " + \
str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
elif flow_vector.item(line) < -0.001:
label2[(origin,dest)] = \
str(abs(round(flow_vector.item(line),1))) + " >>" +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_ag']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
else:
label2[(origin,dest)] = str(abs(round(flow_vector.item(line),1))) +\
"\n"+str(line)+":(" + str(self.line_dictionary[line]['capacity_in']) + \
", " + str(self.line_dictionary[line]['Y'])+ ")"
#draw graph
plt.figure(1,figsize=(20,25))
nx.draw_networkx_nodes(g, pos = pos1,
with_labels = False,
node_color=(0,0,0.4),
node_size = 1000)
nx.draw_networkx_labels(g, pos=pos1,
labels = label1,
font_size = 9,
font_color='white',
font_weight = 'bold')
# Supply values as box next to node
for node in label_node2:
if label_node2[node]>0:
plt.text(pos1[node][0]-0.5, pos1[node][1]+0.3,
str(label_node2[node]),
size=10, weight='bold', stretch='condensed',
color='black', bbox=dict(facecolor='lightblue')
)
else:
plt.text(pos1[node][0]-0.4, pos1[node][1]+0.3,
str(label_node2[node]),
size=10, weight='bold', stretch='condensed',
color='black', bbox=dict(facecolor='orange')
)
for edge in g.edges():
nx.draw_networkx_edges(g, edgelist=[edge],
pos=pos1,
arrows = False,
width = line_attributes[edge]['width'],
edge_color = line_attributes[edge]['color'],
style = line_attributes[edge]['style'])
nx.draw_networkx_edge_labels(g, pos = pos1,
edge_labels = label2,
edge_text_pos = 0.5,
font_size=6,
font_weight = 'bold')
plt.savefig(filename + ".pdf")
| 45.657407
| 97
| 0.523575
| 2,400
| 19,724
| 4.143333
| 0.195417
| 0.035197
| 0.041633
| 0.044248
| 0.29998
| 0.238737
| 0.199718
| 0.177997
| 0.162912
| 0.150845
| 0
| 0.03386
| 0.374113
| 19,724
| 432
| 98
| 45.657407
| 0.771648
| 0.311042
| 0
| 0.251121
| 0
| 0
| 0.037636
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026906
| false
| 0
| 0.017937
| 0
| 0.058296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6a5691106c675b51a0898624e8d7f4af7a6316d
| 11,893
|
py
|
Python
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 5
|
2017-04-07T06:23:04.000Z
|
2019-11-19T00:52:34.000Z
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 16
|
2018-09-12T11:14:40.000Z
|
2021-04-19T09:02:44.000Z
|
ecl/tests/unit/compute/v2/test_server.py
|
keiichi-hikita/eclsdk
|
c43afb982fd54eb1875cdc22d46044644d804c4a
|
[
"Apache-2.0"
] | 14
|
2017-05-11T14:26:26.000Z
|
2021-07-14T14:00:06.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from ecl.compute.v2 import server
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'accessIPv4': '1',
'accessIPv6': '2',
'addresses': {'region': '3'},
'created': '2015-03-09T12:14:57.233772',
'flavorRef': '5',
'flavor': {'id': 'FLAVOR_ID', 'links': {}},
'hostId': '6',
'id': IDENTIFIER,
'imageRef': '8',
'image': {'id': 'IMAGE_ID', 'links': {}},
'links': '9',
'metadata': {'key': '10'},
'name': '11',
'progress': 12,
'tenant_id': '13',
'status': '14',
'updated': '2015-03-09T12:15:57.233772',
'user_id': '16',
'key_name': '17',
'OS-DCF:diskConfig': '18',
'OS-EXT-AZ:availability_zone': '19',
'OS-EXT-STS:power_state': '20',
'OS-EXT-STS:task_state': '21',
'OS-EXT-STS:vm_state': '22',
'os-extended-volumes:volumes_attached': '23',
'OS-SRV-USG:launched_at': '2015-03-09T12:15:57.233772',
'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772',
'security_groups': '26',
'adminPass': '27',
'personality': '28',
'block_device_mapping_v2': {'key': '29'},
'os:scheduler_hints': {'key': '30'},
'user_data': '31'
}
class TestServer(testtools.TestCase):
def setUp(self):
super(TestServer, self).setUp()
self.resp = mock.Mock()
self.resp.body = None
self.resp.json = mock.Mock(return_value=self.resp.body)
self.sess = mock.Mock()
self.sess.post = mock.Mock(return_value=self.resp)
def test_basic(self):
sot = server.Server()
self.assertEqual('server', sot.resource_key)
self.assertEqual('servers', sot.resources_key)
self.assertEqual('/servers', sot.base_path)
self.assertEqual('compute', sot.service.service_type)
self.assertTrue(sot.allow_create)
self.assertTrue(sot.allow_get)
self.assertTrue(sot.allow_update)
self.assertTrue(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertDictEqual({"image": "image",
"flavor": "flavor",
"name": "name",
"status": "status",
"host": "host",
"changes_since": "changes-since"},
sot._query_mapping._mapping)
def test_make_it(self):
sot = server.Server(**EXAMPLE)
self.assertEqual(EXAMPLE['accessIPv4'], sot.access_ipv4)
self.assertEqual(EXAMPLE['accessIPv6'], sot.access_ipv6)
self.assertEqual(EXAMPLE['addresses'], sot.addresses)
self.assertEqual(EXAMPLE['created'], sot.created_at)
self.assertEqual(EXAMPLE['flavorRef'], sot.flavor_id)
self.assertEqual(EXAMPLE['flavor'], sot.flavor)
self.assertEqual(EXAMPLE['hostId'], sot.host_id)
self.assertEqual(EXAMPLE['id'], sot.id)
self.assertEqual(EXAMPLE['imageRef'], sot.image_id)
self.assertEqual(EXAMPLE['image'], sot.image)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['metadata'], sot.metadata)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['progress'], sot.progress)
self.assertEqual(EXAMPLE['tenant_id'], sot.project_id)
self.assertEqual(EXAMPLE['status'], sot.status)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
self.assertEqual(EXAMPLE['user_id'], sot.user_id)
self.assertEqual(EXAMPLE['key_name'], sot.key_name)
self.assertEqual(EXAMPLE['OS-DCF:diskConfig'], sot.disk_config)
self.assertEqual(EXAMPLE['OS-EXT-AZ:availability_zone'],
sot.availability_zone)
self.assertEqual(EXAMPLE['OS-EXT-STS:power_state'], sot.power_state)
self.assertEqual(EXAMPLE['OS-EXT-STS:task_state'], sot.task_state)
self.assertEqual(EXAMPLE['OS-EXT-STS:vm_state'], sot.vm_state)
self.assertEqual(EXAMPLE['os-extended-volumes:volumes_attached'],
sot.attached_volumes)
self.assertEqual(EXAMPLE['OS-SRV-USG:launched_at'], sot.launched_at)
self.assertEqual(EXAMPLE['OS-SRV-USG:terminated_at'],
sot.terminated_at)
self.assertEqual(EXAMPLE['security_groups'], sot.security_groups)
self.assertEqual(EXAMPLE['adminPass'], sot.admin_pass)
self.assertEqual(EXAMPLE['adminPass'], sot.adminPass)
self.assertEqual(EXAMPLE['personality'], sot.personality)
self.assertEqual(EXAMPLE['block_device_mapping_v2'],
sot.block_device_mapping_v2)
self.assertEqual(EXAMPLE['os:scheduler_hints'], sot.scheduler_hints)
self.assertEqual(EXAMPLE['user_data'], sot.user_data)
def test_detail(self):
sot = server.ServerDetail()
self.assertEqual('server', sot.resource_key)
self.assertEqual('servers', sot.resources_key)
self.assertEqual('/servers/detail', sot.base_path)
self.assertEqual('compute', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_change_passowrd(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.change_password(self.sess, 'a'))
url = 'servers/IDENTIFIER/action'
body = {"changePassword": {"adminPass": "a"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_reboot(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.reboot(self.sess, 'HARD'))
url = 'servers/IDENTIFIER/action'
body = {"reboot": {"type": "HARD"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_force_delete(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.force_delete(self.sess))
url = 'servers/IDENTIFIER/action'
body = {'forceDelete': None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_rebuild(self):
sot = server.Server(**EXAMPLE)
# Let the translate pass through, that portion is tested elsewhere
sot._translate_response = lambda arg: arg
result = sot.rebuild(self.sess, name='noo', admin_password='seekr3t',
image='http://image/1', access_ipv4="12.34.56.78",
access_ipv6="fe80::100",
metadata={"meta var": "meta val"},
personality=[{"path": "/etc/motd",
"contents": "foo"}])
self.assertIsInstance(result, server.Server)
url = 'servers/IDENTIFIER/action'
body = {
"rebuild": {
"name": "noo",
"imageRef": "http://image/1",
"adminPass": "seekr3t",
"accessIPv4": "12.34.56.78",
"accessIPv6": "fe80::100",
"metadata": {"meta var": "meta val"},
"personality": [{"path": "/etc/motd", "contents": "foo"}],
"preserve_ephemeral": False
}
}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_rebuild_minimal(self):
sot = server.Server(**EXAMPLE)
# Let the translate pass through, that portion is tested elsewhere
sot._translate_response = lambda arg: arg
result = sot.rebuild(self.sess, name='nootoo',
admin_password='seekr3two',
image='http://image/2')
self.assertIsInstance(result, server.Server)
url = 'servers/IDENTIFIER/action'
body = {
"rebuild": {
"name": "nootoo",
"imageRef": "http://image/2",
"adminPass": "seekr3two",
"preserve_ephemeral": False
}
}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.resize(self.sess, '2'))
url = 'servers/IDENTIFIER/action'
body = {"resize": {"flavorRef": "2",
"OS-DCF:diskConfig": "AUTO"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_confirm_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.confirm_resize(self.sess))
url = 'servers/IDENTIFIER/action'
body = {"confirmResize": None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_revert_resize(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.revert_resize(self.sess))
url = 'servers/IDENTIFIER/action'
body = {"revertResize": None}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_create_image(self):
sot = server.Server(**EXAMPLE)
name = 'noo'
metadata = {'nu': 'image', 'created': 'today'}
self.assertIsNotNone(sot.create_image(self.sess, name, metadata))
url = 'servers/IDENTIFIER/action'
body = {"createImage": {'name': name, 'metadata': metadata}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_create_image_minimal(self):
sot = server.Server(**EXAMPLE)
name = 'noo'
self.assertIsNone(self.resp.body, sot.create_image(self.sess, name))
url = 'servers/IDENTIFIER/action'
body = {"createImage": {'name': name}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=dict(sot.service), json=body, headers=headers)
def test_add_security_group(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.add_security_group(self.sess, "group"))
url = 'servers/IDENTIFIER/action'
body = {"addSecurityGroup": {"name": "group"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
def test_remove_security_group(self):
sot = server.Server(**EXAMPLE)
self.assertIsNone(sot.remove_security_group(self.sess, "group"))
url = 'servers/IDENTIFIER/action'
body = {"removeSecurityGroup": {"name": "group"}}
headers = {'Accept': ''}
self.sess.post.assert_called_with(
url, endpoint_filter=sot.service, json=body, headers=headers)
| 38.739414
| 79
| 0.600858
| 1,320
| 11,893
| 5.287121
| 0.201515
| 0.090271
| 0.107179
| 0.038114
| 0.550939
| 0.486603
| 0.449205
| 0.417682
| 0.354635
| 0.332712
| 0
| 0.020192
| 0.254604
| 11,893
| 306
| 80
| 38.866013
| 0.767061
| 0.054822
| 0
| 0.326531
| 0
| 0
| 0.190345
| 0.06716
| 0
| 0
| 0
| 0
| 0.314286
| 1
| 0.065306
| false
| 0.040816
| 0.012245
| 0
| 0.081633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6a5791901b1fc6361134fdaba0ad7eda0768c85
| 1,577
|
py
|
Python
|
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
packages/diana/diana/connect/utils/orth_fiq.py
|
derekmerck/diana-star
|
78aa7badb27677a1f5c83d744852f659e2541567
|
[
"MIT"
] | null | null | null |
# import logging
# from pprint import pformat
from diana.utils.dicom import DicomLevel
def find_item_query(item):
"""
Have some information about the dixel, want to find the STUID, SERUID, INSTUID
Returns a _list_ of dictionaries with matches, retrieves any if "retrieve" flag
"""
q = {}
keys = {}
# All levels have these
keys[DicomLevel.STUDIES] = ['PatientID',
'PatientName',
'PatientBirthDate',
'PatientSex',
'StudyInstanceUID',
'StudyDate',
'StudyTime',
'AccessionNumber']
# Series level has these
keys[DicomLevel.SERIES] = keys[DicomLevel.STUDIES] + \
['SeriesInstanceUID',
'SeriesDescription',
'ProtocolName',
'SeriesNumber',
'NumberOfSeriesRelatedInstances',
'Modality']
# For instance level, use the minimum
keys[DicomLevel.INSTANCES] = ['SOPInstanceUID', 'SeriesInstanceUID']
def add_key(q, key, dixel):
q[key] = dixel.meta.get(key, '')
return q
for k in keys[item.level]:
q = add_key(q, k, item)
if item.level == DicomLevel.STUDIES and item.meta.get('Modality'):
q['ModalitiesInStudy'] = item.meta.get('Modality')
# logging.debug(pformat(q))
query = {'Level': str(item.level),
'Query': q}
return query
| 30.326923
| 83
| 0.521877
| 141
| 1,577
| 5.794326
| 0.539007
| 0.068543
| 0.046512
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.375396
| 1,577
| 52
| 84
| 30.326923
| 0.829442
| 0.195308
| 0
| 0
| 0
| 0
| 0.212851
| 0.024096
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.033333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6acd732e85ef3e6872505baf917d917ef7c0ec1
| 8,045
|
py
|
Python
|
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | null | null | null |
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | 42
|
2018-07-20T14:15:48.000Z
|
2019-09-26T05:44:21.000Z
|
nisse/routes/slack/command_handlers/report_command_handler.py
|
nexocodecom/nisse.io
|
58a64072bc8dad87fbb1f54dabc93fd2d4cff6eb
|
[
"MIT"
] | null | null | null |
import logging
import os
import uuid
from typing import List
from flask import current_app
from flask.config import Config
from flask_injector import inject
from slackclient import SlackClient
from werkzeug.utils import secure_filename
from nisse.models.DTO import PrintParametersDto
from nisse.models.slack.common import ActionType
from nisse.models.slack.common import LabelSelectOption
from nisse.models.slack.dialog import Element, Dialog
from nisse.models.slack.message import Attachment, Message, Action, TextSelectOption
from nisse.models.slack.payload import ReportGenerateFormPayload
from nisse.routes.slack.command_handlers.slack_command_handler import SlackCommandHandler
from nisse.services.project_service import ProjectService
from nisse.services.reminder_service import ReminderService
from nisse.services.report_service import ReportService
from nisse.services.user_service import UserService
from nisse.services.xlsx_document_service import XlsxDocumentService
from nisse.utils import string_helper
from nisse.utils.date_helper import TimeRanges
from nisse.utils.date_helper import get_start_end_date
from nisse.utils.validation_helper import list_find
class ReportCommandHandler(SlackCommandHandler):
@inject
def __init__(self, config: Config, logger: logging.Logger, user_service: UserService,
slack_client: SlackClient, project_service: ProjectService,
reminder_service: ReminderService, report_service: ReportService, sheet_generator: XlsxDocumentService):
super().__init__(config, logger, user_service, slack_client, project_service, reminder_service)
self.report_service = report_service
self.sheet_generator = sheet_generator
def handle(self, payload: ReportGenerateFormPayload):
if payload.submission:
date_to = payload.submission.day_to
date_from = payload.submission.day_from
selected_user_id = None
if hasattr(payload.submission, 'user'):
selected_user_id = payload.submission.user
project_id = payload.submission.project
print_param = PrintParametersDto()
print_param.date_to = date_to
print_param.date_from = date_from
print_param.project_id = project_id
# todo cache projects globally e.g. Flask-Cache
projects = self.project_service.get_projects()
selected_project = list_find(lambda p: str(p.project_id) == print_param.project_id, projects)
user = self.get_user_by_slack_user_id(payload.user.id)
selected_user = None
if user.role.role != 'admin':
print_param.user_id = user.user_id
# if admin select proper user
elif selected_user_id is not None:
print_param.user_id = selected_user_id
selected_user = self.user_service.get_user_by_id(selected_user_id)
# generate report
path_for_report = os.path.join(current_app.instance_path, current_app.config["REPORT_PATH"],
secure_filename(str(uuid.uuid4())) + ".xlsx")
load_data = self.report_service.load_report_data(print_param)
self.sheet_generator.save_report(path_for_report, print_param.date_from, print_param.date_to, load_data)
im_channel = self.slack_client.api_call("im.open", user=payload.user.id)
if not im_channel["ok"]:
self.logger.error("Can't open im channel for: " + str(selected_user_id) + '. ' + im_channel["error"])
selected_project_name = "all projects"
if selected_project is not None:
selected_project_name = selected_project.name
resp = self.slack_client.api_call(
"files.upload",
channels=im_channel['channel']['id'],
file=open(path_for_report, 'rb'),
title=string_helper.generate_xlsx_title(selected_user, selected_project_name, print_param.date_from,
print_param.date_to),
filetype="xlsx",
filename=string_helper.generate_xlsx_file_name(selected_user, selected_project_name,
print_param.date_from,
print_param.date_to)
)
try:
os.remove(path_for_report)
except OSError as err:
self.logger.error("Cannot delete report file {0}".format(err))
if not resp["ok"]:
self.logger.error("Can't send report: " + resp.get("error"))
else:
self.show_dialog({'trigger_id': payload.trigger_id}, None, next(iter(payload.actions.values())))
def create_dialog(self, command_body, argument, action) -> Dialog:
selected_period = None
if action and len(action.selected_options):
selected_period = next(iter(action.selected_options), None).value
start_end = get_start_end_date(selected_period)
# todo cache it globally e.g. Flask-Cache
projects = self.project_service.get_projects()
project_options_list: List[LabelSelectOption] = [LabelSelectOption(label=p.name, value=p.project_id) for p in
projects]
# admin see users list
user = self.get_user_by_slack_user_id(action.name)
elements: Element = [
Element(label="Date from", type="text", name='day_from', placeholder="Specify date", value=start_end[0]),
Element(label="Date to", type="text", name='day_to', placeholder="Specify date", value=start_end[1]),
Element(label="Project", type="select", name='project', optional='true', placeholder="Select a project",
options=project_options_list)
]
dialog: Dialog = Dialog(title="Generate report", submit_label="Generate",
callback_id=string_helper.get_full_class_name(ReportGenerateFormPayload), elements=elements)
if action.name:
prompted_user = self.get_user_by_slack_user_id(action.name)
if user.role.role == 'admin':
users = self.user_service.get_users()
user_options_list = [LabelSelectOption(label=string_helper.get_user_name(p), value=p.user_id) for p in
users]
dialog.elements.append(
Element(label="User", value=(prompted_user.user_id if prompted_user else None),
optional='true', type="select", name='user', placeholder="Select user",
options=user_options_list))
return dialog
def report_pre_dialog(self, command_body, arguments, action):
message_text = "I'm going to generate report..."
inner_user_id = None
if len(arguments):
user = arguments[0]
inner_user_id = self.extract_slack_user_id(user)
self.get_user_by_slack_user_id(inner_user_id)
actions = [
Action(
name=inner_user_id if inner_user_id is not None else command_body['user_id'],
text="Select time range...",
type=ActionType.SELECT.value,
options=[TextSelectOption(text=tr.value, value=tr.value) for tr in TimeRanges]
)
]
attachments = [
Attachment(
text="Generate report for",
fallback="Select time range to report",
color="#3AA3E3",
attachment_type="default",
callback_id=string_helper.get_full_class_name(ReportGenerateFormPayload),
actions=actions
)
]
return Message(
text=message_text,
response_type="ephemeral",
mrkdwn=True,
attachments=attachments
).dump()
| 43.252688
| 121
| 0.640895
| 920
| 8,045
| 5.344565
| 0.208696
| 0.029286
| 0.022778
| 0.020338
| 0.179784
| 0.157006
| 0.10901
| 0.10901
| 0.090706
| 0.06508
| 0
| 0.001377
| 0.277688
| 8,045
| 185
| 122
| 43.486486
| 0.844777
| 0.018645
| 0
| 0.014085
| 0
| 0
| 0.058056
| 0
| 0
| 0
| 0
| 0.005405
| 0
| 1
| 0.028169
| false
| 0
| 0.176056
| 0
| 0.225352
| 0.091549
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6acd7e0d4951d5c3034a6f821df7b9a82c0e2f9
| 369
|
py
|
Python
|
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
days/day01/part1.py
|
jaredbancroft/aoc2021
|
4eaf339cc0c8566da2af13f7cb9cf6fe87355aac
|
[
"MIT"
] | null | null | null |
from helpers import inputs
def solution(day):
depths = inputs.read_to_list(f"inputs/{day}.txt")
part1_total = 0
for index, depth in enumerate(depths):
if index - 1 >= 0:
diff = int(depth) - int(depths[index - 1])
if diff > 0:
part1_total += 1
return f"Day 01 Part 1 Total Depth Increase: {part1_total}"
| 28.384615
| 63
| 0.588076
| 53
| 369
| 4
| 0.54717
| 0.141509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046693
| 0.303523
| 369
| 12
| 64
| 30.75
| 0.77821
| 0
| 0
| 0
| 0
| 0
| 0.176152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6b79f701bcc0df19eeeaf217d68d4ce14a63d1a
| 251
|
py
|
Python
|
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
bot.py
|
White-ZacK/HLavalink
|
917a2a5abf3df2b2fbdff93709b9eb9e47c033aa
|
[
"MIT"
] | null | null | null |
import discord
import os
from discord.ext import commands
bot = commands.Bot(command_prefix=">")
TOKEN = os.environ.get('TOKEN')
@bot.event
async def on_ready():
print(f'{bot.user} has logged in.')
bot.load_extension('cogs.WVL')
bot.run(TOKEN)
| 17.928571
| 38
| 0.7251
| 40
| 251
| 4.475
| 0.7
| 0.122905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123506
| 251
| 13
| 39
| 19.307692
| 0.813636
| 0
| 0
| 0
| 0
| 0
| 0.155378
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6badd66c9c53436c0cfcf31174d258e7727a76d
| 795
|
py
|
Python
|
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | 20
|
2019-04-13T07:07:49.000Z
|
2022-02-23T03:10:40.000Z
|
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | null | null | null |
test.py
|
Roulbac/GanSeg
|
78f354da5d724b93ead3ac6c2b15ae18d3ac0aea
|
[
"MIT"
] | 4
|
2019-04-13T13:50:39.000Z
|
2020-11-08T03:50:54.000Z
|
from options.test_parser import TestParser
from models import create_model, get_model_parsing_modifier
from datasets import create_dataset, get_dataset_parsing_modifier
parser = TestParser()
model_name = parser.get_model_name()
dataset_name = parser.get_dataset_name()
print('Model name: {}'.format(model_name))
print('Dataset name: {}'.format(dataset_name))
model_parser_modifier = get_model_parsing_modifier(model_name)
model_parser_modifier(parser, is_train=False)
dataset_parser_modifier = get_dataset_parsing_modifier(dataset_name)
dataset_parser_modifier(parser, is_train=False)
opts, _ = parser.parse_options()
opts_str = parser.make_opts_string(opts, verbose=True)
model = create_model(opts)
dataset = create_dataset(opts)
if opts.eval:
model.set_eval()
model.test(dataset)
| 27.413793
| 68
| 0.820126
| 112
| 795
| 5.4375
| 0.267857
| 0.073892
| 0.049261
| 0.075534
| 0.10509
| 0.10509
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08805
| 795
| 28
| 69
| 28.392857
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.157895
| 0
| 0.157895
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6bbf866443aff7a6fcd220b4ae5ee2ac61f6a5c
| 353
|
py
|
Python
|
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
2018-12-31.py
|
shangpf1/python_study
|
6730519ce7b5cf4612e1c778ae5876cfbb748a4f
|
[
"MIT"
] | null | null | null |
# 浏览器最大化窗口、截屏
from selenium import webdriver
from os import path
driver = webdriver.Chrome()
d = path.dirname('__file__')
index = path.join(d,'index.png')
driver.get("https://www.baidu.com/")
# 最大化窗口
driver.maximize_window()
# 截屏
driver.save_screenshot(index)
# 后退操作
driver.back()
# 前进操作
driver.forward()
# 刷新操作
driver.refresh()
driver.quit()
| 12.172414
| 36
| 0.716714
| 49
| 353
| 5.040816
| 0.673469
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130312
| 353
| 29
| 37
| 12.172414
| 0.80456
| 0.09915
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6bcdd4e1b6e9560584746d256ad5769eed1114e
| 4,016
|
py
|
Python
|
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
flask_webapi/exceptions.py
|
viniciuschiele/flask-webapi
|
4901c0b78fc61b8db18c211c5858b84901d0f4ab
|
[
"MIT"
] | null | null | null |
"""
Handles exceptions raised by Flask WebAPI.
"""
from . import status
class APIException(Exception):
"""
Base class for Flask WebAPI exceptions.
Subclasses should provide `.status_code` and `.default_message` properties.
:param str message: The actual message.
:param kwargs: The extra attributes.
"""
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_message = 'A server error occurred.'
def __init__(self, message=None, **kwargs):
if message is not None:
self.message = str(message)
else:
self.message = str(self.default_message)
self.kwargs = kwargs
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return str(self.message)
def denormalize(self, message_key_name='message', field_key_name='field'):
"""
Turns all `APIException` instances into `dict` and
returns a unique level of errors.
:param message_key_name: The key name used for the message item.
:param field_key_name: The key name used for the field item.
:return: A list of errors.
"""
errors = []
self._denormalize(errors, self, message_key_name=message_key_name, field_key_name=field_key_name)
return errors
def _denormalize(self, errors, message, field=None, message_key_name='message', field_key_name='field'):
kwargs = None
if isinstance(message, APIException):
kwargs = message.kwargs
message = message.message
if isinstance(message, dict):
for f, messages in message.items():
f = field + '.' + f if field else f
self._denormalize(errors, messages, f, message_key_name, field_key_name)
elif isinstance(message, list):
for message in message:
self._denormalize(errors, message, field, message_key_name, field_key_name)
else:
data = {message_key_name: message}
if kwargs:
data.update(kwargs)
if field:
data.update({field_key_name: field})
errors.append(data)
return errors
class ValidationError(APIException):
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, message, **kwargs):
# if `message` is a dict the key is
# the name of the field and the value is
# actual message.
if isinstance(message, dict):
result = {}
for field, messages in message.items():
if not isinstance(messages, ValidationError):
messages = ValidationError(messages)
if isinstance(messages.message, str):
result[field] = [messages]
else:
result[field] = messages.message
self.message = result
self.kwargs = {}
elif isinstance(message, list):
result = []
for msg in message:
if not isinstance(msg, ValidationError):
if isinstance(msg, dict):
msg = ValidationError(**msg)
else:
msg = ValidationError(msg)
result.append(msg)
if len(result) == 1:
self.message = result[0].message
self.kwargs = result[0].kwargs
else:
self.message = result
self.kwargs = {}
else:
self.message = str(message)
self.kwargs = kwargs
class UnsupportedMediaType(Exception):
default_message = 'Unsupported media type "{mimetype}" in request.'
def __init__(self, mimetype, message=None):
if message is None:
message = self.default_message.format(mimetype=mimetype)
self.message = message
| 30.656489
| 108
| 0.586404
| 435
| 4,016
| 5.204598
| 0.206897
| 0.055654
| 0.04947
| 0.037102
| 0.157244
| 0.091873
| 0.05742
| 0.05742
| 0
| 0
| 0
| 0.003336
| 0.328187
| 4,016
| 130
| 109
| 30.892308
| 0.835804
| 0.140189
| 0
| 0.25641
| 0
| 0
| 0.028563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.012821
| 0.038462
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6bec2b7b19f2adc7fd34bc6ce05b27edb1743ba
| 5,133
|
py
|
Python
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 5
|
2021-01-09T23:09:22.000Z
|
2022-01-22T12:34:25.000Z
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 2
|
2021-01-19T03:46:53.000Z
|
2021-06-28T15:19:24.000Z
|
plugins/module_utils/fortiwebcloud/request.py
|
fortinet/fortiwebcloud-ansible
|
4a6a2b139b88d6428494ca87d570a0a09988b15d
|
[
"MIT"
] | 2
|
2021-09-17T11:13:31.000Z
|
2021-11-30T10:53:49.000Z
|
#!/usr/bin/python
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2020 Fortinet, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import json
import time
import threading
import urllib.parse
from ansible.plugins.httpapi import HttpApiBase
from ansible.module_utils.basic import to_text
from ansible.module_utils.six.moves import urllib
from ansible_collections.fortinet.fortiwebcloud.plugins.module_utils.fortiwebcloud.settings import (API_VER, DOMAIN)
# Global FWB REST connection session
class RequestBase(object):
def __init__(self, method='GET', path="", query='', data={}, files=None, handler=None, timeout=60, **kargs):
self.method = method
self.data = data
self.files = files
self.timeout = timeout
if type(query) == "string":
self.query = query
else:
self.query = urllib.parse.urlencode(query)
self.api_ver = API_VER
self.domain = DOMAIN
self.path = path
self.url = self._set_url()
self.headers = dict()
self.set_headers('Content-Type', 'application/json')
self.set_headers('Accept', 'text/plain')
self.handler = handler
@staticmethod
def _format_path(path):
return '/'.join([seg for seg in path.split('/') if len(seg)])
def _set_url(self):
ulist = []
ulist.append(self.api_ver)
ulist.append(self.path)
url = "/".join(ulist)
if self.query:
query_str = self.query if self.query.startswith('?') else '?' + self.query
url = url + query_str
return "/" + url
def set_headers(self, key, value):
self.headers[key] = value
def validate(self):
"""
Validate the setup of rest api
"""
if not self.method in ('GET', 'POST', 'PUT', 'DELETE'):
raise Exception("REST API method %s not supported." % self.method)
def get(self, data={}):
status, res = self.handler.send_req(self.url, headers=self.headers, method="GET")
return res
def delete(self, data={}):
status, res = self.handler.send_req(self.url, headers=self.headers, method="DELETE")
return res
def put(self, data={}, files=None):
status, res = self.handler.send_req(
self.url, headers=self.headers,
data=json.dumps(data), files=files, method="PUT")
return res
def post(self, data={}):
_, res = self.handler.send_req(
self.url, headers=self.headers,
data=json.dumps(data), method="POST")
return res
def send(self, data=None, files=None):
"""
Send rest api, and wait its return.
"""
self.validate()
try:
ts = time.time()
method_val = getattr(self, self.method.lower(), self.get)
d = data or self.data
print(f"send data {d}")
f = files or self.files
print(f"send files {f}")
if f:
response = method_val(data=d, files=f)
else:
response = method_val(data=d)
try:
response = json.loads(response)
except Exception as e:
raise Exception(f"Get response json content failed for {e}.")
duration = time.time() - ts
print(f"URL:{self.url}, method:{self.method} finished, duration:{duration}.")
return response
except Exception as e:
raise Exception("Failed to connect to %s: %s." % (self.url, e))
| 36.664286
| 116
| 0.643678
| 673
| 5,133
| 4.861813
| 0.355126
| 0.017115
| 0.017115
| 0.022005
| 0.174817
| 0.146699
| 0.146699
| 0.122249
| 0.122249
| 0.122249
| 0
| 0.001576
| 0.258523
| 5,133
| 139
| 117
| 36.928058
| 0.858119
| 0.336645
| 0
| 0.144578
| 0
| 0
| 0.086186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120482
| false
| 0
| 0.120482
| 0.012048
| 0.337349
| 0.036145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6bf070a0e1401995e4a06960552d64f43d04d96
| 497
|
py
|
Python
|
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
tests/test_account.py
|
thangduong/lendingclub2
|
b16552807b69b81804369fd1a9058fa8f89ce1ef
|
[
"MIT"
] | null | null | null |
# Filename: test_account.py
"""
Test the lendingclub2.accountmodule
"""
# PyTest
import pytest
# lendingclub2
from lendingclub2.account import InvestorAccount
from lendingclub2.error import LCError
class TestInvestorAccount:
def test_properties(self):
try:
investor = InvestorAccount()
except LCError:
pytest.skip("skip because cannot find account ID")
assert investor.available_balance >= 0.0
assert investor.total_balance >= 0.0
| 20.708333
| 62
| 0.702213
| 53
| 497
| 6.509434
| 0.584906
| 0.092754
| 0.052174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020779
| 0.225352
| 497
| 23
| 63
| 21.608696
| 0.875325
| 0.16499
| 0
| 0
| 0
| 0
| 0.08642
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6c3cf7f18578ef4fee0cf3ceb347dcb151e1993
| 3,827
|
py
|
Python
|
Lib/corpuscrawler/crawl_pl.py
|
cash/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 95
|
2019-06-13T23:34:21.000Z
|
2022-03-12T05:22:49.000Z
|
Lib/corpuscrawler/crawl_pl.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 31
|
2019-06-02T18:56:53.000Z
|
2021-08-10T20:16:02.000Z
|
Lib/corpuscrawler/crawl_pl.py
|
sahwar/corpuscrawler
|
8913fe1fb2b6bfdfbf2ba01d2ce88057b3b5ba3d
|
[
"Apache-2.0"
] | 35
|
2019-06-18T08:26:24.000Z
|
2022-01-11T13:59:40.000Z
|
# coding: utf-8
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function, unicode_literals
import re
from corpuscrawler.util import (
crawl_deutsche_welle, crawl_udhr, extract, cleantext, clean_paragraphs, urlpath
)
def crawl(crawler):
out = crawler.get_output(language='pl')
crawl_udhr(crawler, out, filename='udhr_pol.txt')
crawl_deutsche_welle(crawler, out, prefix='/pl/')
crawl_pl_usembassy_gov(crawler, out)
def _pl_usembassy_gov_path(url):
if not urlpath(url).startswith('/pl/'):
return False
else:
if urlpath(url) == '/pl/':
return False
elif urlpath(url).startswith('/pl/category/'):
return False
elif urlpath(url).startswith('/pl/tag/'):
return False
else:
return True
def crawl_pl_usembassy_gov(crawler, out):
sitemap = crawler.fetch_sitemap('https://pl.usembassy.gov/sitemap_index.xml')
trans_regex = re.compile(
r'<h3>Tłumaczenie</h3><div class="translations_sidebar"><ul><li><a href ?="([^"]*)"'
)
pubdate_regex = re.compile(
r'<meta property="article:published_time" content="([^"]*)"'
)
links = set()
for key in sorted(sitemap.keys()):
if _pl_usembassy_gov_path(key):
links.add(key)
for link in sorted(links):
result = crawler.fetch(link)
if result.status != 200:
continue
html = result.content.decode('utf-8')
title = extract('<title>', '</title>', html)
title = title if title else ''
title = title.split(' | ')[0] if ' | ' in title else title
pubdate_match = pubdate_regex.search(html)
pubdate = pubdate_match.group(1) if pubdate_match else None
trans_match = trans_regex.search(html)
trans = trans_match.group(1) if trans_match else None
if pubdate is None: pubdate = result.headers.get('Last-Modified')
if pubdate is None: pubdate = sitemap[link]
exstart = '<div class="entry-content">'
exstart2 = '<div class="mo-page-content">'
exend = '<!-- AddThis Advanced Settings above via filter on the_content -->'
exstart = exstart2 if exstart2 in html else exstart
content = extract(exstart, exend, html)
cleanparas = clean_paragraphs(content) if content else None
# Don't repeat the title if it's the only text content
cleantitle = cleantext(title)
if cleanparas:
if len(cleanparas) == 1 and cleanparas[0] == cleantitle:
paras = [cleantitle]
else:
paras = [cleantitle] + cleanparas
else:
paras = [cleantitle]
# There are quite a few media pages whose only text is the filename
# this, conveniently, is typically also the post's name
if len(paras) == 1 and paras[0].lower() in urlpath(link).lower():
continue
if paras:
out.write('# Location: %s\n' % link)
out.write('# Genre: Diplomatic\n')
if trans:
out.write('# Translation: %s\n' % trans)
if pubdate:
out.write('# Publication-Date: %s\n' % pubdate)
out.write('\n'.join(paras) + '\n')
| 40.284211
| 92
| 0.629736
| 488
| 3,827
| 4.844262
| 0.420082
| 0.025381
| 0.029611
| 0.027919
| 0.07445
| 0.055838
| 0.031303
| 0
| 0
| 0
| 0
| 0.0088
| 0.257643
| 3,827
| 94
| 93
| 40.712766
| 0.823302
| 0.197805
| 0
| 0.166667
| 0
| 0
| 0.154703
| 0.046542
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.152778
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6c507d5077fa7072a210afdf6ced8586dc0a30d
| 2,775
|
py
|
Python
|
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
typewise_alert.py
|
clean-code-craft-tcq-1/add-variety-python-AkshayUHegde
|
924beb7195d960d3fe06460da9df1a42c5d5693f
|
[
"MIT"
] | null | null | null |
class TypewiseAlert:
def __init__(self, limits_for_types=None, alert_target_funcs=None):
self.default_limits_for_cooling_types = {
"PASSIVE_COOLING": (0, 35),
"MED_ACTIVE_COOLING": (0, 40),
"HI_ACTIVE_COOLING": (0, 45),
}
self.default_alert_funcs = {
'TO_CONTROLLER': self.send_controller_message,
'TO_EMAIL': self.send_email
}
self.alert_mail_details = {
"TOO_LOW": {
"recipient": "low_temperature_breach_expert@bosch.com",
"email_message": "The temperature has dropped beyond lower breach limits. "
"Please take corrective action immediately."
},
"TOO_HIGH": {
"recipient": "high_temperature_breach_expert@bosch.com",
"email_message": "The temperature has dropped beyond upper breach limits. "
"Please take corrective action immediately."
},
"NORMAL": {
"recipient": "monitoring_team@bosch.com",
"email_message": "The temperature is OK."
},
}
self.default_controller_header = 0xfeed
self.limits_for_types = [limits_for_types if limits_for_types is not None
else self.default_limits_for_cooling_types][0]
self.alert_target_funcs = [alert_target_funcs if alert_target_funcs is not None
else self.default_alert_funcs][0]
def send_controller_message(self, breach_type):
print(f'{self.default_controller_header}, {breach_type}')
return f"CONTROLLER_MESSAGE,{breach_type}"
def send_email(self, breach_type):
recipients = self.alert_mail_details[breach_type]['recipient']
email_message = self.alert_mail_details[breach_type]['email_message']
email_content = f"To,\n{recipients}\n \t{email_message}"
print(email_content)
return f"EMAIL,{breach_type}"
def infer_breach(self, value, lower_limit, upper_limit):
if value < lower_limit:
return 'TOO_LOW'
if value > upper_limit:
return 'TOO_HIGH'
return 'NORMAL'
def classify_temperature_breach(self, cooling_type, temperature_in_c):
lower_limit, upper_limit = self.limits_for_types[cooling_type]
return self.infer_breach(temperature_in_c, lower_limit, upper_limit)
def check_and_alert(self, alert_target, battery_characteristic, temperature_in_c):
breach_type = \
self.classify_temperature_breach(battery_characteristic['coolingType'], temperature_in_c)
return self.alert_target_funcs[alert_target](breach_type)
| 45.491803
| 101
| 0.627027
| 311
| 2,775
| 5.215434
| 0.250804
| 0.055487
| 0.043157
| 0.033292
| 0.350801
| 0.350801
| 0.192355
| 0.090012
| 0.090012
| 0.090012
| 0
| 0.006058
| 0.286126
| 2,775
| 60
| 102
| 46.25
| 0.812721
| 0
| 0
| 0.037037
| 0
| 0
| 0.241168
| 0.060923
| 0
| 0
| 0.002163
| 0
| 0
| 1
| 0.111111
| false
| 0.018519
| 0
| 0
| 0.259259
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6c9aa4e57c89e6f69fa55d265d499cc88ae995f
| 1,519
|
py
|
Python
|
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
4_factory/factory_method/dependent_pizza_store.py
|
hypersport/Head-First-Design-Patterns-Python
|
0c8b831ae89ebbbef8b203b96508deb7e3063590
|
[
"MIT"
] | null | null | null |
from chicago_style_clam_pizza import ChicagoStyleClamPizza
from chicago_style_cheese_pizza import ChicagoStyleCheesePizza
from chicago_style_pepperoni_pizza import ChicagoStylePepperoniPizza
from chicago_style_veggie_pizza import ChicagoStyleVeggiePizza
from ny_style_clam_pizza import NYStyleClamPizza
from ny_style_cheese_pizza import NYStyleCheesePizza
from ny_style_pepperoni_pizza import NYStylePepperoniPizza
from ny_style_veggie_pizza import NYStyleVeggiePizza
class DependentPizzaStore:
pizza = None
def create_pizza(self, style: str, t: str):
if style == 'NY':
if t == 'cheese':
self.pizza = NYStyleCheesePizza()
elif t == 'pepperoni':
self.pizza = NYStylePepperoniPizza()
elif t == 'clam':
self.pizza = NYStyleClamPizza()
elif t == 'veggie':
self.pizza = NYStyleVeggiePizza()
elif style == 'Chicago':
if t == 'cheese':
self.pizza = ChicagoStyleCheesePizza()
elif t == 'pepperoni':
self.pizza = ChicagoStylePepperoniPizza()
elif t == 'clam':
self.pizza = ChicagoStyleClamPizza()
elif t == 'veggie':
self.pizza = ChicagoStyleVeggiePizza()
else:
print('Error: invalid type of pizza')
return None
self.pizza.prepare()
self.pizza.bake()
self.pizza.cut()
self.pizza.box()
return self.pizza
| 35.325581
| 68
| 0.631336
| 146
| 1,519
| 6.39726
| 0.267123
| 0.125268
| 0.068522
| 0.042827
| 0.169165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293614
| 1,519
| 42
| 69
| 36.166667
| 0.870457
| 0
| 0
| 0.216216
| 0
| 0
| 0.057275
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.216216
| 0
| 0.351351
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6cac8b3c9901ec09333ce8b40056a0c6f21d27c
| 459
|
py
|
Python
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 36
|
2018-10-22T19:21:14.000Z
|
2022-03-22T12:10:01.000Z
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 329
|
2018-11-22T18:04:57.000Z
|
2022-03-18T01:26:55.000Z
|
tests/performance/cte-arm/tests/rf_mnist.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 21
|
2019-01-10T11:46:39.000Z
|
2022-03-17T12:59:45.000Z
|
import performance
import dislib as ds
from dislib.classification import RandomForestClassifier
def main():
x_mn, y_mn = ds.load_svmlight_file(
"/fefs/scratch/bsc19/bsc19029/PERFORMANCE/datasets/train.scaled",
block_size=(5000, 780), n_features=780, store_sparse=False)
rf = RandomForestClassifier(n_estimators=100, distr_depth=2)
performance.measure("RF", "mnist", rf.fit, x_mn, y_mn)
if __name__ == "__main__":
main()
| 24.157895
| 73
| 0.723312
| 61
| 459
| 5.131148
| 0.704918
| 0.019169
| 0.025559
| 0.038339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0.16122
| 459
| 18
| 74
| 25.5
| 0.758442
| 0
| 0
| 0
| 0
| 0
| 0.167756
| 0.135076
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d0b39109db93442e531726d432358337458672
| 2,275
|
py
|
Python
|
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | 1
|
2016-08-29T03:38:42.000Z
|
2016-08-29T03:38:42.000Z
|
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | null | null | null |
pwn/shellcode/misc/exit.py
|
Haabb/pwnfork
|
c2530ea2fd2f9d4e65df234afeb8f7def93afe49
|
[
"MIT"
] | null | null | null |
from pwn.internal.shellcode_helper import *
from ..misc.pushstr import pushstr
@shellcode_reqs(arch=['i386', 'amd64'], os=['linux', 'freebsd'])
def exit(returncode = None, arch = None, os = None):
"""Exits. Default return code, None, means "I don't care"."""
returncode = arg_fixup(returncode)
if arch == 'i386':
if os in ['linux', 'freebsd']:
return _exit_i386(returncode, os)
elif arch == 'amd64':
if os in ['linux', 'freebsd']:
return _exit_amd64(returncode, os)
bug("OS/arch combination (%s, %s) is not supported for exit" % (os, arch))
def _exit_amd64(returncode, os):
out = ["push SYS_exit",
"pop rax"]
if returncode != None:
if os == 'linux':
if returncode == 0:
out += ['xor ebx, ebx']
elif isinstance(returncode, int):
out += [pushstr(p32(returncode), null = False, raw = True),
'pop rbx']
else:
out += ['mov ebx, %s' % str(returncode)]
elif os == 'freebsd':
if returncode == 0:
out += ['cdq', 'push rdx']
elif isinstance(returncode, int):
out += [pushstr(p32(returncode), null = False, raw = True)]
else:
out += ['push %s' % str(returncode)]
out += ['push rax']
out += ['syscall']
return '\n'.join(' ' + s for s in out)
def _exit_i386(returncode, os):
if returncode == None:
return """
push SYS_exit
pop eax
int 0x80
"""
if os == 'linux':
return """
""" + pwn.shellcode.mov('ebx', returncode, raw = True) + """
push SYS_exit
pop eax
int 0x80"""
elif os == 'freebsd':
if str(returncode) == "0":
return """
push SYS_exit
pop eax
cdq
push edx
push edx
int 0x80"""
else:
return """
push %s
push SYS_exit
pop eax
push eax
int 0x80""" % str(returncode)
else:
bug('OS was neither linux nor freebsd')
| 29.545455
| 78
| 0.465934
| 243
| 2,275
| 4.296296
| 0.288066
| 0.033525
| 0.052682
| 0.06705
| 0.270115
| 0.253831
| 0.226054
| 0.126437
| 0.126437
| 0.126437
| 0
| 0.02874
| 0.403516
| 2,275
| 76
| 79
| 29.934211
| 0.740604
| 0.024176
| 0
| 0.461538
| 0
| 0
| 0.31346
| 0
| 0
| 0
| 0.007227
| 0
| 0
| 1
| 0.046154
| false
| 0
| 0.030769
| 0
| 0.184615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d4b9bc3a7c3d3b66374d69e6147ebd024b69ea
| 14,117
|
py
|
Python
|
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
effect_tools.py
|
rsjones94/hurricane_analysis
|
b619526dcf40ea83e9ae3ba92f3a1d28fce25776
|
[
"MIT"
] | null | null | null |
import os
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from read import clean_read
from detrend import *
def get_effect(data, param, mean, stddev, start_index, lag=3, effect_type=1,
returning_gap=0, dropthrough=(0, 0), forcing=(None, None),
max_effect=365, max_dropout=5):
"""
For a given parameter, finds the time it takes for the time series to return to normalcy
after a peturbation
Args:
data: A DataFrame of gauge data
param: the column in data to use
mean: the mean value of the pre-effect window
stddev: the standard deviation of the pre-effect window
start_index: the index of the storm peturbation
lag: the number of days allowed for an effect to begin. Minimum is 1
effect_type: the INITIAL expected effect of the peturbation. 1 indicates a positive effect, -1
indicates a negative effect
returning_gap: number of days where an increasing effect is allowed to reverse trend
before it is considered to be on its reverse trend
dropthrough: A list or tuple indicating the number of dropthroughs allowed and the number of days
the time series is allotted to drop through before being considered terminated.
A dropthrough is when a parameter is outside the normal range for that parameter and quickly
becomes outside the normal range but with opposite valence, e.g., it is above the normal range and
quickly goes to being below the normal range.
forcing: a tuple of 1) the number of days a returning trend can be reversed before it is forced to
return by calculating the best fit line for the last n returning days and
calculating the date of intersection. This allows an effect window to be
estimated even when additional storms/forcing effects follow the initial
peturbation. Default is None, which will never force a completion.
2) the number of points to include in the forcing slope fit line
max_effect: the maximum number of days an effect can continue before being terminated
max_dropout: number of continuous days of no signal before mandatory termination
Returns:
A list with two parts. The first is a list of the start and end indices of the effect
(or None, if there was no effect). The second is list, (days_above, days_below, days_between,
termination_type, forcing_start, forcing_slope). termination_type can be "natural", "forced",
None or 'dropout'
If not forced, forcing_start and forcing_slope will be None.
"""
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
force_completion = forcing[0] # number of days to regress before completion is forced
force_history = forcing[1]
dropthrough = [dropthrough[0], dropthrough[1]]
comp_dict = {1: greater, -1: lesser}
exes = np.array(data.index)
orig = np.array(data[param])
whys = np.array(pd.Series(orig).interpolate(limit_direction='both'))
low = mean - stddev
high = mean + stddev
normalcy = (low, high)
if effect_type == 1:
comp_ind = 1
comp_val = normalcy[comp_ind] # high
elif effect_type == -1:
comp_ind = 0
comp_val = normalcy[comp_ind] # low
else:
raise Exception('effect_type must be 1 or -1')
effect_begun = False
i = start_index - 1
while lag > 0:
lag -= 1
i += 1
val = whys[i]
if comp_dict[effect_type](val, comp_val):
effect_begun = True
returner[0][0] = i
break
if not effect_begun:
returner[1][3] = None
return returner
# print(f'Effect begins at {i} {whys[i]}')
i -= 1
is_returning = False
has_real_val = False
nan_count = 0
ret_gap_count = 0
while True:
i += 1
# print(f'Checking {i} {whys[i]}')
if i > (i + max_effect):
returner[1][3] = 'max_effect'
if np.isnan(orig[i]):
nan_count += 1
# print(f'NANNER: {nan_count}')
if nan_count > max_dropout:
returner[1][3] = 'dropout'
# print('dropping out')
i -= nan_count - 1
break
else:
has_real_val = True
nan_count = 0
last_val = whys[i - 1]
val = whys[i]
towards_pre = comp_dict[effect_type](last_val, val)
# print(f'Towards pre: {towards_pre}')
if towards_pre and not is_returning: # checking to see if the data has started going back to pre-peturbation
ret_gap_count += 1
# print(f'Retgap: {ret_gap_count} at {i}')
if ret_gap_count > returning_gap or comp_dict[effect_type](comp_val, val):
# print(f'returning at {i}')
is_returning = True
ret_gap_count = 0
elif not is_returning:
ret_gap_count = 0
# print(f'past pre-pet')
if is_returning:
if comp_dict[effect_type](comp_val, val): # check to see if we've returned to normalcy
# print(f'we normal at {i}')
if dropthrough[0] == 0: # if no dropthroughs left then we're done
# print('no dropthroughs left')
break
else:
if within(val, normalcy): # if we're within normalcy, check to see if we'll drop through in time
# print('need to check dropthrough')
does_drop_through, ind = drops_through(whys, i, normalcy, dropthrough[1])
# print(f'Drops thru? {does_drop_through}')
if does_drop_through: # if it does drop through, go on
days_to_drop = ind - i
returner[1][2] += days_to_drop - 1
i = ind - 1
else: # if it doesn't, then we're done
# print('did not drop thru')
break
dropthrough[0] -= 1
effect_type = -effect_type
comp_ind ^= 1 # bit flip from 0 to 1 and vice versa
comp_val = normalcy[comp_ind]
is_returning = False
elif force_completion and comp_dict[effect_type](val, last_val):
# print('moving away?')
# check to see if the data is moving away from pre-pet again
# assuming force_completion is numeric
# print('Force completion active')
# print(f'Func {comp_dict[effect_type]}, vals {val,last_val}. Ind {i}')
# print('ddtr:')
dn = days_to_return(whys, i - 1, func=comp_dict[-effect_type], max_nan=max_dropout)
# print(f'{dn}')
# print(dn)
if dn <= force_completion: # if we return in time
if last_val > high:
returner[1][0] += (dn - 2)
if last_val < low:
returner[1][1] += (dn - 2)
i += (dn - 2)
else: # force completion
# print(f'Forcing completion')
try:
ind, days_to_force, slope = forced_return(exes, whys, i - 1, normalcy, history=force_history)
# print(f'Completion forced at {ind} from {i-1}. Takes {days_to_force} days. Slope: {slope}')
returner[1][3] = 'forced'
returner[1][4] = i - 1
returner[1][5] = slope
to_add = days_to_force - 1
if last_val > high:
returner[1][0] += to_add
if last_val < low:
returner[1][1] += to_add
i = ind
except ValueError:
returner[1][3] = 'forcing error'
i -= 1
break
# print('eob')
if val > high:
returner[1][0] += 1
elif val < low:
returner[1][1] += 1
else:
returner[1][2] += 1
returner[0][1] = i
if not has_real_val:
returner = [[None, None], [0, 0, 0, 'dropout', None, None]]
if returner[0][0] == returner[0][1]: # happens sometimes when there is a dropout but an effect is registered due to
# interpolation at the storm start
returner = [[None, None], [0, 0, 0, 'natural', None, None]]
return returner
def greater(a, b):
return a > b
def lesser(a, b):
return a < b
def within(a, b):
return b[1] > a > b[0]
def forced_return(exes, whys, i, window, history=3):
"""
Gives the index of a forced return and the slope of the return
Args:
exes: x vals
whys: y vals
i: index of the return begin
window: the min and max of the return window
history: number of points to include in the best fit
Returns:
tuple (index_of_return, days_to_return, slope)
"""
# print('\nFORCING:')
while True:
x = exes[(i - history + 1):(i + 1)]
y = whys[(i - history + 1):(i + 1)]
m, b = np.polyfit(x, y, 1)
# print(f'{m}')
if whys[i] > window[1] and m >= 0:
history -= 1
elif whys[i] < window[0] and m <= 0:
history -= 1
elif np.isclose(m, 0):
history -= 1
else:
break
if history == 1:
raise ValueError('Forced return impossible')
def lin_func(index, y=whys[i], anchor=i, slope=m):
r = y + (index - anchor) * slope
return r
# print('lin_func defined')
if whys[i] > window[1]:
func = lesser
comp = window[1]
# print('func def')
elif whys[i] < window[0]:
func = greater
comp = window[0]
# print('func def')
else:
Exception('Whoah. something weird with forced_return()')
val = whys[i]
n = 0
while not func(val, comp):
i += 1
n += 1
val = lin_func(index=i)
# print(val)
# print('finished')
return i, n, m
def days_to_return(exes, i, func, max_nan=0):
"""
Returns the number of days for a series to return to above/below the indexed value
Args:
exes: series of x vals
i: index to start at
func: a function, either lesser or greater as defined in this module
max_nan: maximum allowable consecutive nans
Returns:
num of days to return
"""
if func is lesser:
# print('looking for when vals drop below comp')
pass
elif func is greater:
# print('looking for when vals rise above comp')
pass
initial = exes[i]
nas = 0
n = 0
try:
while nas <= max_nan:
i += 1
n += 1
val = exes[i]
# print(f'Compare {val} to initial ({initial})')
if np.isnan(val):
nas += 1
elif func(val, initial):
break
except IndexError:
pass
return n
def drops_through(exes, i, window, allowed):
"""
Checks if exes drops through the window fast enough from index i
Args:
exes: the x data
i: the index being checked
window: the min and max of the window
allowed: number of days allowed to pass through the window
Returns:
bool
"""
val = exes[i]
while within(val, window):
i -= 1
val = exes[i]
if val > window[1]:
func = lesser
comp = window[0]
# print('First val out of window is above. Checking to see when val goes below window')
elif val < window[0]:
func = greater
comp = window[1]
# print('First val out of window is below. Checking to see when val goes above window')
else:
raise Exception('Whoah. something weird with drop_through()')
count = 0
while count < allowed:
i += 1
count += 1
val = exes[i]
# print(val,comp)
if func(val, comp):
return True, i
return False, -1
###############
'''
choice_param = 'Discharge Detrend'
choice_gauge = '02218565'
# 04249000
# 015765185
# 0209303205
results_folder = r'E:\hurricane\results'
data_folder = r'E:\hurricane\station_data\modified'
data = clean_read(os.path.join(data_folder,choice_gauge+'.csv'))
result_df = pd.read_csv(os.path.join(results_folder,choice_param+'.csv'), dtype={'Gauge':str})
for index,line in result_df.iterrows():
if np.isnan(line['Pre-effect Window']):
continue
gauge = line['Gauge']
start = line['Storm Index']
mean = line['Pre-effect Mean']
stddev = line['Pre-effect Stddev']
if gauge == choice_gauge:
break
low = mean - stddev
high = mean + stddev
(es, ee), stats = get_effect(data, choice_param, mean, stddev, start, lag=3, effect_type=1,
returning_gap=1, dropthrough=[1,2], forcing=(3,4), max_effect=365, max_dropout=5)
plt.figure()
plt.plot(data.index,data[choice_param])
plt.axvline(start, color='red')
plt.axhline(high, color='orange')
plt.axhline(low, color='orange')
if stats[3] is not None:
plt.axvline(es, color='green', linestyle='dashed')
plt.axvline(ee, color='blue')
if stats[3] == 'forced':
x1 = stats[4]
x2 = ee
y1 = data[choice_param][stats[4]]
y2 = y1 + (x2-x1)*stats[5]
fx = [x1,x2]
fy = [y1,y2]
plt.plot(fx,fy,color='black', linestyle='dashed')
plt.xlim(start-28,start+28)
plt.title(f'Above: {stats[0]}, Below: {stats[1]}, Between: {stats[2]} \n'
f'Termination Type: {stats[3]}')
plt.show()
'''
| 32.602771
| 120
| 0.551463
| 1,887
| 14,117
| 4.034446
| 0.18071
| 0.010508
| 0.01261
| 0.016551
| 0.18232
| 0.099698
| 0.055432
| 0.008932
| 0.008932
| 0
| 0
| 0.023942
| 0.352058
| 14,117
| 432
| 121
| 32.678241
| 0.808352
| 0.353262
| 0
| 0.348039
| 0
| 0
| 0.027075
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0.014706
| 0.034314
| 0.014706
| 0.122549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d6b79b9b74cb519b433548531f1d028f0803ab
| 871
|
py
|
Python
|
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
warningshot.py
|
DeadpoolPancakes/nerf-sentry
|
0f9cccd78e66f4020f1960871fd35c328a697086
|
[
"MIT"
] | null | null | null |
import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
Motor1Enable = 5
Motor1B = 24
Motor1A = 27
Motor2Enable = 17
Motor2B = 6
Motor2A = 22
#single shot script used as a warning shot
# Set up defined GPIO pins
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1Enable,GPIO.OUT)
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2Enable,GPIO.OUT)
# Turn the firing motor on
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2Enable,GPIO.HIGH)
# warm it up for half a second
sleep(0.5)
#turn on firing mechanism
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.HIGH)
# Stop the motor
sleep(0.5)
GPIO.output(Motor2Enable,GPIO.LOW)
GPIO.output(Motor1Enable,GPIO.LOW)
# Always end this script by cleaning the GPIO
GPIO.cleanup()
| 21.243902
| 45
| 0.771527
| 143
| 871
| 4.699301
| 0.384615
| 0.119048
| 0.081845
| 0.119048
| 0.098214
| 0.098214
| 0
| 0
| 0
| 0
| 0
| 0.044213
| 0.117107
| 871
| 41
| 46
| 21.243902
| 0.829649
| 0.234214
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d814a8e68b9da379529a21009897f7697124d2
| 1,979
|
py
|
Python
|
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | null | null | null |
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | 28
|
2016-10-21T16:04:56.000Z
|
2018-11-10T20:55:40.000Z
|
ampadb_index/parse_md.py
|
ampafdv/ampadb
|
25c804a5cb21afcbe4e222a3b48cca27ff2d9e19
|
[
"MIT"
] | 2
|
2016-10-22T19:24:45.000Z
|
2017-02-11T10:49:02.000Z
|
import html
import markdown
import bleach
import lxml.html
from lxml.html import builder as E
TAGS = [
'p', 'img', 'em', 'strong', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ol', 'ul',
'li', 'br', 'hr', 'a', 'img', 'blockquote', 'b', 'i', 'u', 's', 'pre',
'code', 'table', 'thead', 'tr', 'th', 'tbody', 'td'
]
ATTRS = {
'ol': ['start'],
'a': ['href', 'title', 'rel'],
'img': ['src', 'title', 'alt'],
'th': ['align'],
'td': ['align']
}
STYLES = []
def clean(raw_html):
return bleach.clean(raw_html, tags=TAGS, attributes=ATTRS, styles=STYLES)
def parse_md(md_text, wrap='div', html_class='markdown'):
raw_html = markdown.markdown(
md_text,
output_format='html5',
enable_attributes=False,
lazy_ol=False,
encoding='utf-8',
extensions=['markdown.extensions.extra'])
clean_html = clean(raw_html)
# Embolica el codi amb l'etiqueta que calgui
if wrap == 'div':
if html_class:
tree = E.DIV(E.CLASS(html_class))
else:
tree = E.DIV()
elif wrap == 'blockquote':
if html_class:
tree = E.BLOCKQUOTE(E.CLASS(html_class))
else:
tree = E.BLOCKQUOTE()
elif wrap == 'raw':
return clean_html
else:
raise ValueError('`wrap` ha de ser "div" o "blockquote", no '
'{}'.format(wrap))
bin_html = clean_html.encode('utf-8', 'xmlcharrefreplace')
try:
for elem in lxml.html.fragments_fromstring(
bin_html, parser=lxml.html.HTMLParser(encoding='utf-8')):
tree.append(elem)
except TypeError:
# S'ha de "desescapar" perque E.P també escapa l'HTML
tree.append(E.P(html.unescape(clean_html)))
for table in tree.iter('table'):
table.classes |= {'table'} # Afegir la classe "table"
return lxml.html.tostring(
tree, encoding='utf-8', method='html',
pretty_print=True).decode('utf-8')
| 29.984848
| 79
| 0.560889
| 254
| 1,979
| 4.279528
| 0.468504
| 0.036799
| 0.033119
| 0.027599
| 0.073597
| 0.044158
| 0.044158
| 0
| 0
| 0
| 0
| 0.008242
| 0.264275
| 1,979
| 65
| 80
| 30.446154
| 0.738324
| 0.060131
| 0
| 0.089286
| 0
| 0
| 0.153556
| 0.01347
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.089286
| 0.017857
| 0.178571
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d82fc284eef62f6b254b22655051352ba00a72
| 532
|
py
|
Python
|
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | 1
|
2021-05-18T16:10:49.000Z
|
2021-05-18T16:10:49.000Z
|
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
src/server_3D/API/Rice/factoryTypes/hybridShapeFactoryMeth/addNewLinePtPt.py
|
robertpardillo/Funnel
|
f45e419f55e085bbb95e17c47b4c94a7c625ba9b
|
[
"MIT"
] | null | null | null |
from ...abstractObjects.hybridShapes.line import LinePtPt
def AddNewLinePtPt(self, geometrical_set, start, end):
part = geometrical_set.parentsDict['Part']
reference1 = part._createReferenceFromObject(start)
reference2 = part._createReferenceFromObject(end)
cat_constructor = self.cat_constructor.AddNewLinePtPt(reference1, reference2)
geometrical_set.cat_constructor.AppendHybridShape(cat_constructor)
line = LinePtPt(geometrical_set.parentsDict, cat_constructor, start, end)
return line
| 40.923077
| 82
| 0.781955
| 52
| 532
| 7.788462
| 0.423077
| 0.17284
| 0.123457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008734
| 0.139098
| 532
| 13
| 83
| 40.923077
| 0.875546
| 0
| 0
| 0
| 0
| 0
| 0.007692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6d9810ee3519ae415fa0512f84807c328a50106
| 1,223
|
py
|
Python
|
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
Lab Activity 6.py
|
Jeralph-Red/OOP-58001
|
4e38f9a0a58098a121a61e640a53e9568bf529b0
|
[
"Apache-2.0"
] | null | null | null |
from tkinter import *
class SemGrade:
def __init__(self, win):
self.lbl1=Label(win, text='Prelim:')
self.lbl2=Label(win, text='Midterm:')
self.lbl3=Label(win, text='Final:')
self.lbl4=Label(win, text='Semestral Grade:')
self.t1=Entry(bd=3)
self.t2=Entry(bd=3)
self.t3=Entry(bd=3)
self.t4=Entry(bd=3)
self.btn1 = Button(win, text='Add')
self.b1 = Button(win, text='Compute for Semestral Grade', command=self.compute)
self.b1.place(x=100, y=150)
self.lbl1.place(x=70, y=50)
self.t1.place(x=180, y=50)
self.lbl2.place(x=70, y=80)
self.t2.place(x=180, y=80)
self.lbl3.place(x=70, y=110)
self.t3.place(x=180, y=110)
self.lbl4.place(x=70,y=190)
self.t4.place(x=180,y=190)
def compute(self):
self.t4.delete(0, 'end')
num1=int(self.t1.get())
num2=int(self.t2.get())
num3=int(self.t3.get())
result=(num1+num2+num3)/3
self.t4.insert(END, str(result))
window=Tk()
mywin=SemGrade(window)
window.title('Semestral Grade Calculator')
window.geometry("400x300+10+10")
window.mainloop()
| 31.358974
| 88
| 0.567457
| 186
| 1,223
| 3.709677
| 0.349462
| 0.078261
| 0.069565
| 0.069565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101883
| 0.261652
| 1,223
| 39
| 89
| 31.358974
| 0.662237
| 0
| 0
| 0
| 0
| 0
| 0.091906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.029412
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6da86aae41063146c3bc7bd5c1f243c9c0368e2
| 1,853
|
py
|
Python
|
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
parse_wfd.py
|
ajsimon1/Cazar
|
6831dbdb63764ad2159eaad45fe2b6cfc7edd553
|
[
"MIT"
] | null | null | null |
import os
import sys
import pandas as pd
from xml.etree import ElementTree as et
cwd = os.getcwd()
filepath = 'C:\\Users\\asimon\\Desktop\\Practice-' \
'Training\\p21_template_out3.xml'
def parse_wfd_xml(filepath):
tree = et.parse(filepath)
root = tree.getroot()
data, page = root.findall('.//LineDataInput/LDILayout/Nodes/Node/Node')
data_dict = {}
page_dict = {}
for i in data.findall('./Node/Node/Content'):
data_dict[i.find('Name').text] = i.find('Guid').text
df_data = pd.DataFrame.from_dict(data_dict,
orient='index',
columns=['guid'])
for i in page.findall('./Node/Node/Node/Content'):
try:
page_dict[i.find('DataVariable').text] = [i.find('Name').text,
i.find('Size').get('X'),
i.find('Size').get('Y'),
i.find('Offset').get('X'),
i.find('Offset').get('X')]
except AttributeError:
pass
df_page = pd.DataFrame.from_dict(page_dict,
orient='index',
columns=['name',
'size_x',
'size_y',
'offest_x',
'offest_y'])
# df_combined = df_data.join(df_page, on='guid')
# possible drop NaNs?
return df_data.join(df_page, on='guid')
if __name__ == '__main__':
df = parse_wfd_xml(filepath)
writer = pd.ExcelWriter('wfd_output.xlsx')
df.to_excel(writer, 'Sheet1')
writer.save()
| 39.425532
| 75
| 0.447922
| 188
| 1,853
| 4.228723
| 0.414894
| 0.050314
| 0.033962
| 0.047799
| 0.138365
| 0.100629
| 0.055346
| 0
| 0
| 0
| 0
| 0.003745
| 0.423637
| 1,853
| 46
| 76
| 40.282609
| 0.740637
| 0.035618
| 0
| 0.05
| 0
| 0
| 0.156951
| 0.075112
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0.025
| 0.1
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6dbe3048a8498d4b259596610f445fd78aa7173
| 17,022
|
py
|
Python
|
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
p20191120_wada.py
|
tmseegoslo/wada
|
1f0163ccc0e0815ae7586291712f8920b00cf7ba
|
[
"Apache-2.0"
] | null | null | null |
#MNE tutorial
#Import modules
import os
import numpy as np
import mne
import re
import complexity_entropy as ce
#Import specific smodules for filtering
from numpy.fft import fft, fftfreq
from scipy import signal
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import matplotlib.pyplot as plt
### PUT ALL PARAMETERS HERE ###
### ### ### ### ### ### ### ###
### PUT FUNCTIONS HERE OR BETTER, IN SEPARATE FILE ###
### ### ### ### ### ### ### ### ### ### ### ### ### ###
#Path(s) to data #UPDATE TO READ ALL SUBFOLDERS IN A FOLDER
data_folder = 'Y:\Data\Wada Data Swiss\Visit_JFS_BJE\Originals'
data_raw_file = os.path.join(data_folder,
'wadatest_14_06_19.edf')
### LOOP OVER ALL SUBJECTS FOR PREPROCESSING ###
### consider putting pre-processing ###
#Read data
raw = mne.io.read_raw_edf(data_raw_file, misc=['ECG EKG-REF'],
stim_channel='Event EVENT-REF', preload=True)
#Convenience function to trim channel names
def ch_rename(oldname):
return re.findall(r"\s.+-", oldname)[0][1:-1]
#Trim channel names
raw.rename_channels(ch_rename)
#Print overall and detailed info about raw dataset
print(raw)
print(raw.info)
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
raw.set_montage(montage)
#Plot sensor locations
#raw.plot_sensors(show_names=True)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='Slow EEG')
#Plot raw EEG traces. Mark onset of slow EEG
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Mark onset of slow EEG')
#Crop data around the newly inserted marker
seg_length = 300 #seconds
times_slow = [a['onset'] for a in raw.annotations if 'Slow' in a['description']]
tmin = times_slow[1]-seg_length
tmax = times_slow[1]+seg_length
raw = raw.crop(tmin=tmin,tmax=tmax)
#Temporarily add dummy annotation to spare user from adding new label
raw.annotations.append(onset=raw.times[0]-1.0, duration=0.0, description='BAD_segments')
#Plot raw EEG traces. Reject obviously bad channels and mark bad segments
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Reject obviously bad channels and bad segments')
# Making and inserting events for epoching data
epoch_length = 10.0 # sec
overlap = 9.0 # sec
event_id = 1
t_min = 0.0
events = mne.make_fixed_length_events(raw, id=event_id, start=t_min,
stop=None, duration=epoch_length,
first_samp=True, overlap=overlap)
raw.add_events(events, stim_channel='EVENT', replace=False)
# Check that events are in the right place
raw.plot(start=0, duration=15, n_channels=26,
scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
remove_dc=True, title='Check position of events', events=events)
# Read epochs
rawepochs = mne.Epochs(raw, events=events, event_id=event_id, tmin=t_min,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
rawepochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Plot power spectrum
rawepochs.plot_psd(fmax=180)
#Filter the data from 1-100 Hz using the default options
#NOTE: Usually you should apply high-pass and low-pass filter separately, but
#this is done 'behind the scenes' in this case
epochs = rawepochs.copy().filter(1, 80, picks='eeg', filter_length='auto',
l_trans_bandwidth='auto', h_trans_bandwidth='auto',
method='fir', phase='zero', fir_window='hamming',
fir_design='firwin')
#Plot power spectra
epochs.plot_psd(fmax=180)
#Plot epoched EEG traces. Reject obviously bad channels and mark bad segments
epochs.plot(n_epochs=10, n_channels=22, scalings=dict(eeg=3e-4, misc=1e-3, stim=100),
title='Reject obviously bad channels and bad segments')
#Set up and fit the ICA
ica = mne.preprocessing.ICA(method = 'infomax', fit_params=dict(extended=True),
random_state=0, max_iter=1000)
ica.fit(epochs, picks='eeg')
#Quick look at components
ica.plot_components(inst=epochs, plot_std=True,
picks='eeg',
psd_args=dict(fmax=85))
#Plot time course of ICs
ica.plot_sources(epochs)
# =============================================================================
# #Check components one by one and mark bad ones
# n_comps = ica.get_components().shape[1]
# is_brain = [True for i in range(0,n_comps)]
# print('Press a keyboard key for brain, and a mouse button for non-brain')
# for i in range(0,n_comps) :
# ica.plot_properties(prep, picks=i, psd_args=dict(fmin=0, fmax=110))
# is_brain[i] = plt.waitforbuttonpress()
# plt.close()
# idx_bad = [i for i, x in enumerate(is_brain) if not(x)]
# ica.exclude = idx_bad
# =============================================================================
ica.apply(epochs)
#Plot cleaned data
epochs.plot(scalings=dict(eeg=3e-4, misc=1e-3, stim=1),n_epochs=5)
#Compare power spectra
epochs.plot_psd(fmax=90)
#Set bipolar (double banana) reference
anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
'Fz', 'Cz',
'T6', 'T5',
'T4', 'T3']
cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
'Cz', 'Pz',
'A2', 'A1',
'T2', 'T1']
#Read montage
montage = mne.channels.make_standard_montage('standard_postfixed')
#Set montage
epochs.set_montage(montage)
epochs_bipolar = mne.set_bipolar_reference(epochs, anodes, cathodes,
drop_refs=False)
#Print info for bipolar (double banana) reference raw data
print(prep_bi)
print(prep_bi.info['ch_names'])
#WARNING: Plotting of sensor locations does not work, set locations first
#Plot sensor locations for bipolar (double banana) reference raw data
#raw_bi.plot_sensors(show_names=True)
# =============================================================================
# order=np.array([0, 2, 4, 6, 21, 8, 22, 23, 10, 12,
# 14, 15,
# 1, 3, 5, 7, 18, 9, 19, 20, 11, 13,
# 16, 17])
# =============================================================================
ch_names = ['T3-T1', 'T5-A1', 'Fp1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'Fp1-F3',
'F3-C3', 'C3-P3', 'P3-O1', 'Fz-Cz', 'Cz-Pz', 'Fp2-F4', 'F4-C4',
'C4-P4', 'P4-O2', 'Fp2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'T4-T2',
'T6-A2', 'EKG', 'EVENT']
# =============================================================================
# ch_names = ['T1-A1','F7-A1','T3-A1','T5-A1','Fp1-A1','F3-A1','C3-A1','P3-A1','O1-A1',
# 'Fz-Cz','Pz-Cz',
# 'O2-A2','P4-A2','C4-A2','F4-A2','Fp2-A2','T6-A2','T4-A2','F8-A2','T2-A2',
# 'EKG','EVENT']
# =============================================================================
prep_bi.reorder_channels(ch_names)
#Plot re-referenced data (bipolar double banana reference)
prep_bi.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Compare power spectra
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
ax.set_xlim(0, 110)
ax.set_ylim(-70, 50)
#raw.plot_psd(fmax=110, ax=ax)
prep_bi.plot_psd(fmax=110, ax=ax)
prep_short = prep_bi.copy()
# =============================================================================
# # Filter again
# prep_short = prep_short.filter(1, 80, picks='eeg', filter_length='auto',
# l_trans_bandwidth='auto', h_trans_bandwidth='auto',
# method='fir', phase='zero', fir_window='hamming',
# fir_design='firwin')
# #Compare power spectra
# fig = plt.figure()
# ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
# ax.set_xlim(0, 100)
# ax.set_ylim(-70, 50)
# prep_short.plot_psd(fmax=100, ax=ax)
# =============================================================================
#prep_short = prep_short.crop(tmin=3840,tmax=4740)
#Plot cropped data
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False)
#Get start of infusion.
#WARNING: Hard coded index + not equal to start of slowing of EEG
#time_ipsi_slow = prep_short.annotations[0]['onset']-prep_short._first_time
time_ipsi_slow = prep_short.annotations[1]['onset']-prep_short._first_time #!!! Horrible hack! Manually inserted annotation
epoch_length = 16
time_first_event = time_ipsi_slow - epoch_length*(time_ipsi_slow//epoch_length)
events = mne.make_fixed_length_events(prep_short, id=1, start=time_first_event,
stop=None, duration=epoch_length,
first_samp=True, overlap=0.0)
prep_short.add_events(events, stim_channel='EVENT', replace=False)
#Plot data with added events
prep_short.plot(start=0, duration=15, n_channels=24,
scalings=dict(eeg=1e-4, misc=1e-3, stim=100),
remove_dc=False, events=events)
# Read epochs
epochs = mne.Epochs(prep_short, events=events, event_id=1, tmin=0.0,
tmax=epoch_length, baseline=(None, None), picks='eeg',
preload=True, reject=None, proj=False)
#Plot epoched data
epochs.plot(n_epochs=3, n_channels=22, scalings=dict(eeg=1e-4, misc=1e-3, stim=100))
#Get the 3D matrix of epoched EEG-data
data = epochs.get_data(picks='eeg')
idx_left = [2,3,4,5,6,7,8,9] #[3,4,7,8] #[2,3,4,5,7,8]
idx_right = [12,13,14,15,16,17,18,19] #[13,14,17,18] #[13,14,16,17,18,19]
idx_all = idx_left+idx_right #[3,4,7,8,13,14,17,18]
#Calculate Lempel-Ziv complexity
LZC = np.zeros(data.shape[0])
LZCcontra = np.zeros(data.shape[0])
LZCipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
LZC[i] = ce.LZc(np.transpose(data[i,idx_all,:]))
LZCcontra[i] = ce.LZc(np.transpose(data[i,idx_left,:]))
LZCipsi[i] = ce.LZc(np.transpose(data[i,idx_right,:]))
#Plot LZC vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), LZC/LZC[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), LZC/LZC[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCcontra/LZCcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), LZCipsi/LZCipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('LZC/LZC_baseline')
plt.legend(('tLZCcontra', 'tLZCipsi'))
plt.title('Lempel-Ziv complexity - 16s epochs - 8 bipolar channels - 1-30 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate amplitude coalition entropy
ACE = np.zeros(data.shape[0])
ACEcontra = np.zeros(data.shape[0])
ACEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
ACE[i] = ce.ACE(data[i,idx_all,:])
ACEcontra[i] = ce.ACE(data[i,idx_left,:])
ACEipsi[i] = ce.ACE(data[i,idx_right,:])
#Plot ACE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
#plt.plot(range(1,data.shape[0]+1), ACE/ACE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), ACE/ACE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEcontra/ACEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), ACEipsi/ACEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('ACE/ACE_baseline')
plt.legend(('ACEcontra', 'ACEipsi'))
plt.title('Amplitude coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#Calculate synchrony coalition entropy
SCE = np.zeros(data.shape[0])
SCEcontra = np.zeros(data.shape[0])
SCEipsi = np.zeros(data.shape[0])
for i in range(0,data.shape[0]) :
SCE[i] = ce.SCE(data[i,idx_all,:])
SCEcontra[i] = ce.SCE(data[i,idx_left,:])
SCEipsi[i] = ce.SCE(data[i,idx_right,:])
#Plot SCE vs epoch number
fig = plt.figure()
ax = fig.add_axes([0.1, 0.1, 0.85, 0.85])
#plt.plot(range(1,data.shape[0]+1), SCE/SCE[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean())
#plt.plot(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean())
#plt.step(range(1,data.shape[0]+1), SCE/SCE[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEcontra/SCEcontra[50:60].mean(),where='mid')
plt.step(range(1,data.shape[0]+1), SCEipsi/SCEipsi[50:60].mean(),where='mid')
ylim = ax.get_ylim()
plt.plot([59.5, 59.5],ylim,'k:')
plt.text(59.5, ylim[1]+0.02*(ylim[1]-ylim[0]),'Start Etomidtae',horizontalalignment='center')
plt.plot([50, 113],[1, 1],'k:')
ax.set_xlim(50, 113)
ax.set_ylim(ylim)
plt.xlabel('Epoch number')
plt.ylabel('SCE/SCE_baseline')
plt.legend(('SCEcontra', 'SCEipsi'))
plt.title('Synchrony coalition entropy - 16s epochs - 8 bipolar channels - 1-35 Hz')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
## POSSIBLY USEFUL ##
# =============================================================================
# #Resample if needed (Warning: looking at PSD there seems to be some passband-ripples?)
# prep = raw.copy().resample(64)
#
# #Compare power spectra
# raw.plot_psd(fmax=32)
# prep.plot_psd(fmax=32)
#
# #Compare EEG traces
# raw.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# prep.plot(start=0, duration=15, n_channels=26,
# scalings=dict(eeg=1e-4, misc=1e-3, stim=1),
# remove_dc=True)
# =============================================================================
# =============================================================================
# #Construct and visualize FIR filter (recommended over IIR for most applications)
# sfreq = 1000.
# f_p = 40.
# flim = (1.0, sfreq / 2.0) # limits for plotting
# nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
# freq = [0, f_p, f_p, nyq]
# gain = [1, 1, 0, 0]
#
# third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1.0 / 3.]
# ax = plt.subplots(1, figsize=third_height)[1]
# plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
# =============================================================================
## GRAVEYARD ##
# =============================================================================
# stim_data = np.zeros((1, len(prep_short.times)))
# info = mne.create_info(['STI'], raw.info['sfreq'], ['stim'])
# stim_raw = mne.io.RawArray(stim_data, info)
# raw.add_channels([stim_raw], force_update_info=True)
#
# =============================================================================
# =============================================================================
# #Set bipolar (double banana) reference
# anodes = ['Fp2', 'F8', 'T4', 'T6', 'Fp1', 'F7', 'T3', 'T5',
# 'Fp2', 'F4', 'C4', 'P4', 'Fp1', 'F3', 'C3', 'P3',
# 'Fz', 'Cz',
# 'T6', 'T5',
# 'T4', 'T3']
# cathodes = ['F8', 'T4', 'T6', 'O2', 'F7', 'T3', 'T5', 'O1',
# 'F4', 'C4', 'P4', 'O2', 'F3', 'C3', 'P3', 'O1',
# 'Cz', 'Pz',
# 'A2', 'A1',
# 'T2', 'T1']
# raw_bi = mne.set_bipolar_reference(raw, anodes, cathodes)
# #Print info for bipolar (double banana) reference raw data
# print(raw_bi)
# print(raw_bi.info)
# #WARNING: Plotting of sensor locations does not work, set locations first
# #Plot sensor locations for bipolar (double banana) reference raw data
# #raw_bi.plot_sensors(show_names=True)
# =============================================================================
| 39.311778
| 124
| 0.584714
| 2,508
| 17,022
| 3.870415
| 0.183014
| 0.006387
| 0.030906
| 0.027815
| 0.56722
| 0.529
| 0.498506
| 0.490265
| 0.464201
| 0.450088
| 0
| 0.057893
| 0.179062
| 17,022
| 432
| 125
| 39.402778
| 0.636754
| 0.440783
| 0
| 0.308511
| 0
| 0
| 0.120981
| 0.00568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005319
| false
| 0
| 0.053191
| 0.005319
| 0.06383
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6dc529d66bad976f5633ed5b6e53c5c1922f83f
| 1,790
|
py
|
Python
|
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | 1
|
2021-01-12T19:12:06.000Z
|
2021-01-12T19:12:06.000Z
|
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
classifiers.py
|
mavroudisv/Mahalanobis-Classifier
|
9029b2d84215afd02d8ccdbe3be7ea875b83deb6
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
class MahalanobisClassifier():
def __init__(self, samples, labels):
self.clusters={}
for lbl in np.unique(labels):
self.clusters[lbl] = samples.loc[labels == lbl, :]
def mahalanobis(self, x, data, cov=None):
"""Compute the Mahalanobis Distance between each row of x and the data
x : vector or matrix of data with, say, p columns.
data : ndarray of the distribution from which Mahalanobis distance of each observation of x is to be computed.
cov : covariance matrix (p x p) of the distribution. If None, will be computed from data.
"""
x_minus_mu = x - np.mean(data)
if not cov:
cov = np.cov(data.values.T)
inv_covmat = sp.linalg.inv(cov)
left_term = np.dot(x_minus_mu, inv_covmat)
mahal = np.dot(left_term, x_minus_mu.T)
return mahal.diagonal()
def predict_probability(self, unlabeled_samples):
dists = np.array([])
def dist2prob(D):
row_sums = D.sum(axis=1)
D_norm = (D / row_sums[:, np.newaxis])
S = 1 - D_norm
row_sums = S.sum(axis=1)
S_norm = (S / row_sums[:, np.newaxis])
return S_norm
#Distance of each sample from all clusters
for lbl in self.clusters:
tmp_dists=self.mahalanobis(unlabeled_samples, self.clusters[lbl])
if len(dists)!=0:
dists = np.column_stack((dists, tmp_dists))
else:
dists = tmp_dists
return dist2prob(dists)
def predict_class(self, unlabeled_sample, ind2label):
return np.array([ind2label[np.argmax(row)] for row in self.predict_probability(unlabeled_sample)])
| 37.291667
| 118
| 0.6
| 242
| 1,790
| 4.301653
| 0.371901
| 0.04611
| 0.023055
| 0.03074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006426
| 0.304469
| 1,790
| 47
| 119
| 38.085106
| 0.829719
| 0.205028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.060606
| 0.030303
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6dcf725bd23764de094f21a2a52e9e26e955427
| 1,982
|
py
|
Python
|
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | 2
|
2020-02-06T17:30:41.000Z
|
2020-08-04T10:35:46.000Z
|
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | null | null | null |
augmentation/postprocessor.py
|
abamaxa/docvision_generator
|
8017f29c7d908cb80ddcd59e345a222271fa74de
|
[
"MIT"
] | null | null | null |
import os
import shutil
import json
import time
import cv2
import numpy as np
import PIL
def convert_image_to_numpy(image) :
(im_width, im_height) = image.size
image_np = np.fromstring(image.tobytes(), dtype='uint8', count=-1, sep='')
array_shape = (im_height, im_width, int(image_np.shape[0] / (im_height * im_width)))
return image_np.reshape(array_shape).astype(np.uint8)
def convert_numpy_to_image(image_np) :
image = PIL.Image.fromarray(image_np)
return image
def postprocess(image, erode_by) :
kernel = np.ones((erode_by, erode_by), np.uint8)
if isinstance(image, PIL.Image.Image) :
image = convert_image_to_numpy(image)
image = cv2.erode(image, kernel)
return convert_numpy_to_image(image)
else :
return cv2.erode(image, kernel)
def save_file(image, original_file, prefix, json_data) :
new_file = prefix + "E-" + original_file
cv2.imwrite(new_file, image)
json_filename = new_file[:-3] + "json"
json_data["filename"] = new_file
with open(json_filename, "w") as json_file :
json.dump(json_data, json_file, indent=4)
def erode_all(save_as_hsv) :
kernel7 = np.ones((7,7),np.uint8)
kernel5 = np.ones((5,5),np.uint8)
kernel3 = np.ones((3,3),np.uint8)
for file in os.listdir('.') :
if not file.lower()[-3:] in ("png, ""jpg") :
continue
print(file)
json_filename = file[:-3] + "json"
with open(json_filename, "r") as json_file :
json_data = json.load(json_file)
image = cv2.imread(file)
if save_as_hsv :
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image3 = cv2.erode(image, kernel3)
save_file(image3, file, "3", json_data)
image5 = cv2.erode(image, kernel5)
save_file(image5, file, "5", json_data)
#image7 = cv2.erode(image, kernel7)
#save_file("7E-" + file, image7)
if __name__ == '__main__' :
erode_all(True)
| 29.58209
| 88
| 0.639758
| 285
| 1,982
| 4.214035
| 0.301754
| 0.039967
| 0.054122
| 0.03164
| 0.079933
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028834
| 0.230071
| 1,982
| 67
| 89
| 29.58209
| 0.758191
| 0.032795
| 0
| 0
| 0
| 0
| 0.022965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.14
| 0
| 0.32
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6e4a42a16095039958ecdd10b4a917bcf6aef59
| 581
|
py
|
Python
|
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | null | null | null |
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | 13
|
2020-11-10T12:29:46.000Z
|
2020-11-20T00:04:02.000Z
|
resources/samd21flash.py
|
dotchetter/W.O.O.B.S
|
6055020f21c462940e9477192c831d8ad0b2669e
|
[
"MIT"
] | null | null | null |
import os
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-port")
parser.add_argument("-programmer")
parser.add_argument("-binary")
args = parser.parse_args()
port_norm = args.port
port_bootloader = f"{port_norm[0:3]}{int(port_norm[-1])+1}"
print("Issuing command to bootloader with 1200 baud")
os.system(f'cmd /k "mode {port_bootloader}:1200,n,8,1,p"')
print("Complete.\nFlashing device.")
os.system(f'cmd /k "{args.programmer}" --port={port_norm} -i -e -w -v -b {args.binary} -R')
| 32.277778
| 95
| 0.666093
| 85
| 581
| 4.341176
| 0.529412
| 0.086721
| 0.138211
| 0.065041
| 0.070461
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028747
| 0.16179
| 581
| 18
| 95
| 32.277778
| 0.728953
| 0
| 0
| 0
| 0
| 0.071429
| 0.448454
| 0.118557
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6e60e06fca1a3189ef7b894a20c3b5c14557fda
| 41,045
|
py
|
Python
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2017-11-06T12:01:20.000Z
|
2021-03-01T23:52:41.000Z
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 1
|
2016-12-02T04:04:03.000Z
|
2016-12-02T04:04:03.000Z
|
test/ontic_type_test.py
|
neoinsanity/ontic
|
2b313fb9fc45faf550791a797624c9997386c343
|
[
"Apache-2.0"
] | 2
|
2015-06-26T22:24:57.000Z
|
2016-12-01T02:15:36.000Z
|
"""Test the basic functionality of the base and core data types."""
from datetime import date, time, datetime
from typing import NoReturn
from ontic import OnticType
from ontic import property
from ontic import type as o_type
from ontic.meta import Meta
from ontic.property import OnticProperty
from ontic.schema import Schema
from ontic.validation_exception import ValidationException
from test.utils import BaseTestCase
class OnticTypeTest(BaseTestCase):
"""OnticType test cases."""
def test_object_type_instantiation(self) -> NoReturn:
"""OnticType instantiation to confirm dict behavior"""
schema = {'prop': {'type': 'int'}}
my_type = o_type.create_ontic_type('MyType', schema)
expected_dict = {'prop': 3}
my_object = my_type()
my_object.prop = 3
self.assertDictEqual(expected_dict, my_object)
def test_dynamic_access(self) -> NoReturn:
"""OnticType property access as a Dict and an Attribute."""
some_type = o_type.OnticType()
self.assert_dynamic_accessing(some_type)
def test_ontic_type_perfect(self) -> NoReturn:
"""Test the OnticType.perfect method."""
schema_def = Schema({
'prop_1': {'type': 'int'},
'prop_2': {'type': 'int', 'default': 20},
'prop_3': {'type': 'int', 'default': 30},
'prop_4': {'type': 'int', 'default': 40},
})
my_type = o_type.create_ontic_type('PerfectOntic', schema_def)
ontic_object = my_type()
ontic_object.prop_1 = 1
ontic_object.prop_3 = None
ontic_object.prop_4 = 400
ontic_object.extra_prop = 'Extra'
expected_dict = {
'prop_1': 1,
'prop_2': 20,
'prop_3': 30,
'prop_4': 400
}
ontic_object.perfect()
self.assertDictEqual(expected_dict, ontic_object)
def test_ontic_type_validate(self) -> NoReturn:
"""Test the OnticType.validate method."""
schema = {
'some_property': {'required': True},
'other_property': {'required': False}
}
# Create the o_type
my_type = o_type.create_ontic_type('RequireCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, which should cause ValueError
self.assertRaisesRegex(
ValidationException,
'The value for "some_property" is required.',
ontic_object.validate)
# Validate with data
ontic_object.some_property = 'Something'
ontic_object.other_property = 'Other'
o_type.validate_object(ontic_object)
def test_object_type_validate_value(self) -> NoReturn:
"""Test ObjectType.validate_value method."""
# Test that scalar property is valid.
single_property_schema = {
'prop1': {'type': 'str'}
}
my_type = o_type.create_ontic_type(
'GoodValidateValue', single_property_schema)
ontic_object = my_type({'prop1': 'Hot Dog'})
self.assertEqual([], ontic_object.validate_value('prop1'))
class CreateOnticTypeTestCase(BaseTestCase):
"""Test the dynamic creation of Ontic types."""
def test_create_ontic_type_arg_errors(self):
"""Assert the create ontic o_type arg errors."""
self.assertRaisesRegex(
ValueError, 'The string "name" argument is required.',
o_type.create_ontic_type, name=None, schema=dict())
self.assertRaisesRegex(
ValueError, 'The schema dictionary is required.',
o_type.create_ontic_type, name='SomeName', schema=None)
self.assertRaisesRegex(
ValueError, 'The schema must be a dict.',
o_type.create_ontic_type, name='SomeName', schema=list())
def test_create_ontic_type(self) -> NoReturn:
"""The most simple and basic dynamic Ontic."""
# Test creation from raw dictionary.
my_type = o_type.create_ontic_type('Simple', dict())
self.assertIsNotNone(my_type)
ontic_object = my_type()
self.assert_dynamic_accessing(ontic_object)
self.assertIsInstance(ontic_object, my_type)
# Test creation using a Schema object.
my_type = o_type.create_ontic_type('AnotherSimple', Schema())
self.assertIsNotNone(my_type)
ontic_object = my_type()
self.assert_dynamic_accessing(ontic_object)
self.assertIsInstance(ontic_object, my_type)
class PerfectObjectTestCase(BaseTestCase):
"""Test ontic_type.perfect_object method."""
def test_bad_perfect_usage(self) -> NoReturn:
"""Ensure handling of bad arguments to perfect)_object method."""
self.assertRaisesRegex(
ValueError,
r'"the_object" must be provided.',
o_type.perfect_object, None)
self.assertRaisesRegex(
ValueError,
r'"the_object" must be OnticType type.',
o_type.perfect_object, {})
def test_valid_perfect_usage(self) -> NoReturn:
"""Ensure that the perfect behavior is correct."""
schema_def = Schema({
'prop_1': {'type': 'int'},
'prop_2': {'type': 'int', 'default': 20},
'prop_3': {'type': 'int', 'default': 30},
'prop_4': {'type': 'int', 'default': 40},
})
my_type = o_type.create_ontic_type('PerfectOntic', schema_def)
ontic_object = my_type()
ontic_object.prop_1 = 1
ontic_object.prop_3 = None
ontic_object.prop_4 = 400
ontic_object.extra_prop = 'Extra'
expected_dict = {
'prop_1': 1,
'prop_2': 20,
'prop_3': 30,
'prop_4': 400
}
o_type.perfect_object(ontic_object)
self.assertDictEqual(expected_dict, ontic_object)
def test_perfect_collection_types(self) -> NoReturn:
"""Ensure that collection defaults are handled correctly."""
schema_def = Schema({
'dict_prop': {
'type': 'dict',
'default': {'a': 1, 'b': 2, 'c': 3}
},
'list_prop': {
'type': 'list',
'default': [1, 2, 3]
},
'set_prop': {
'type': 'set',
'default': {1, 2, 3}
}
})
my_type = o_type.create_ontic_type('PerfectCollection', schema_def)
ontic_object = my_type()
o_type.perfect_object(ontic_object)
# Test that the collection values are equal
self.assertDictEqual(schema_def.dict_prop.default,
ontic_object.dict_prop)
self.assertListEqual(schema_def.list_prop.default,
ontic_object.list_prop)
self.assertSetEqual(schema_def.set_prop.default,
ontic_object.set_prop)
# Ensure that the collections are not the same objects
self.assertIsNot(schema_def.dict_prop.default,
ontic_object.dict_prop)
self.assertIsNot(schema_def.list_prop.default,
ontic_object.list_prop)
self.assertIsNot(schema_def.set_prop.default,
ontic_object.set_prop)
def test_perfect_bad_collection_type(self) -> NoReturn:
"""Test for the handling of bad collection member o_type."""
def test_perfect_collection_default_copy(self) -> NoReturn:
"""Ensure that collection default settings are handled correctly."""
# Configure default collection.
default_dict = {'key': 'value'}
default_list = ['item']
inner_tuple = (1, 2)
outer_tuple = (inner_tuple, 3, 4)
default_set = {'entity', outer_tuple}
# Configure default collections to test deep copy behavior.
ontic_object = o_type.OnticType()
ontic_object.dict = default_dict
default_deep_dict = {'name': default_dict}
default_deep_list = [default_dict]
default_deep_set = {(inner_tuple, outer_tuple)}
schema_def = Schema({
'dict_no_default': {
'type': 'dict',
},
'list_no_default': {
'type': 'list',
},
'set_no_default': {
'type': 'set',
},
'dict_with_default': {
'type': 'dict',
'default': default_dict,
},
'list_with_default': {
'type': 'list',
'default': default_list,
},
'set_with_default': {
'type': 'set',
'default': default_set,
},
'dict_deep_default': {
'type': 'dict',
'default': default_deep_dict,
},
'list_deep_default': {
'type': 'list',
'default': default_deep_list,
},
'set_deep_default': {
'type': 'set',
'default': default_deep_set,
},
})
# Execute test subject.
my_type = o_type.create_ontic_type('CollectionDefaults', schema_def)
my_object = my_type()
o_type.perfect_object(my_object)
o_type.validate_object(my_object)
# Assert the no default state.
self.assertIsNone(my_object.dict_no_default)
self.assertIsNone(my_object.list_no_default)
self.assertIsNone(my_object.set_no_default)
# Assert equality and copy of defaults.
self.assertDictEqual(default_dict, my_object.dict_with_default)
self.assertIsNot(default_dict, my_object.dict_with_default)
self.assertListEqual(default_list, my_object.list_with_default)
self.assertIsNot(default_list, my_object.list_with_default)
self.assertSetEqual(default_set, my_object.set_with_default)
self.assertIsNot(default_set, my_object.set_with_default)
# Assert equality and copy of deep defaults.
self.assertDictEqual(default_dict, my_object.dict_deep_default['name'])
self.assertIsNot(default_deep_dict['name'],
my_object.dict_deep_default['name'])
self.assertDictEqual(default_dict, my_object.list_deep_default[0])
self.assertIsNot(default_deep_list[0], my_object.list_deep_default[0])
self.assertSetEqual(default_deep_set, my_object.set_deep_default)
self.assertIsNot(default_deep_set, my_object.set_deep_default)
def test_perfect_schema_bad_member_type(self) -> NoReturn:
"""Test perfect for bad member o_type."""
invalid_property_schema = OnticProperty(name='invalid_property')
invalid_property_schema.o_type = list
invalid_property_schema.member_type = 'UNKNOWN'
self.maxDiff = None
self.assertRaisesRegex(
ValidationException,
r"""The value "UNKNOWN" for "member_type" not in enumeration \[<class 'bool'>, <class 'complex'>, """
r"""<class 'datetime.date'>, <class 'datetime.datetime'>, <class 'datetime.time'>, <class 'dict'>, """
r"""<class 'float'>, <class 'int'>, <class 'list'>, <class 'set'>, <class 'str'>, <class 'tuple'>, None\].""",
property.validate_property, invalid_property_schema)
value_errors = property.validate_property(
invalid_property_schema,
raise_validation_exception=False)
self.assertEqual(1, len(value_errors))
self.assertEqual(
"""The value "UNKNOWN" for "member_type" not in enumeration [<class 'bool'>, <class 'complex'>, """
"""<class 'datetime.date'>, <class 'datetime.datetime'>, <class 'datetime.time'>, <class 'dict'>, """
"""<class 'float'>, <class 'int'>, <class 'list'>, <class 'set'>, <class 'str'>, <class 'tuple'>, None].""",
value_errors[0])
class ValidateObjectTestCase(BaseTestCase):
"""Test ontic_types.validate_object method basics."""
def test_bad_validate_object(self) -> NoReturn:
"""ValueError testing of validate_object."""
self.assertRaisesRegex(
ValueError,
'Validation can only support validation of objects derived from '
'ontic.ontic_type.OnticType.',
o_type.validate_object, None)
self.assertRaisesRegex(
ValueError,
'Validation can only support validation of objects derived from '
'ontic.ontic_type.OnticType.',
o_type.validate_object, 'Not a OnticType')
def test_validation_exception_handling(self) -> NoReturn:
"""Ensure that validate_object handles error reporting."""
schema_instance = Schema(some_attr={'type': 'int'})
my_type = o_type.create_ontic_type('ValidateCheck',
schema_instance)
ontic_object = my_type()
ontic_object.some_attr = 'WRONG'
self.assertRaisesRegex(
ValidationException,
r"""The value for "some_attr" is """
r"""not of type "<class 'int'>": WRONG""",
o_type.validate_object, ontic_object)
expected_errors = [
r"""The value for "some_attr" is not """
r"""of type "<class 'int'>": WRONG"""]
try:
o_type.validate_object(ontic_object)
self.fail('ValidationException should have been thrown.')
except ValidationException as ve:
self.assertListEqual(expected_errors, ve.validation_errors)
errors = o_type.validate_object(ontic_object,
raise_validation_exception=False)
self.assertListEqual(expected_errors, errors)
def test_type_setting(self) -> NoReturn:
"""Validate 'type' schema setting."""
schema = {
'bool_property': {'type': 'bool'},
'dict_property': {'type': 'dict'},
'float_property': {'type': 'float'},
'int_property': {'type': 'int'},
'list_property': {'type': 'list'},
'ontic_property': {'type': Meta},
'set_property': {'type': 'set'},
'str_property': {'type': 'str'},
'date_property': {'type': 'date'},
'time_property': {'type': 'time'},
'datetime_property': {'type': 'datetime'},
}
# Create the o_type
my_type = o_type.create_ontic_type('TypeCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object.
o_type.validate_object(ontic_object)
# Validate with known good data.
ontic_object.bool_property = True
ontic_object.dict_property = {'some_key': 'some_value'}
ontic_object.core_type_property = Meta({'key': 'val'})
ontic_object.float_property = 3.4
ontic_object.int_property = 5
ontic_object.list_property = [5, 6, 7]
ontic_object.set_property = {'dog', 'cat', 'mouse'}
ontic_object.str_property = 'some_string'
ontic_object.date_property = date(2000, 1, 1)
ontic_object.time_property = time(12, 30, 30)
ontic_object.datetime_property = datetime(2001, 1, 1, 12, 30, 30)
o_type.validate_object(ontic_object)
# Validate with known bad data.
ontic_object.bool_property = 'Dog'
self.assertRaisesRegex(
ValidationException,
r"""The value for "bool_property" is not """
r"""of type "<class 'bool'>": Dog""",
o_type.validate_object, ontic_object)
ontic_object.bool_property = True
# Validate a string vs a list o_type
ontic_object.list_property = 'some_string'
self.assertRaisesRegex(
ValidationException,
r"""The value for "list_property" is not """
r"""of type "<class 'list'>": some_string""",
o_type.validate_object, ontic_object)
def test_type_bad_setting(self) -> NoReturn:
"""ValueError for bad 'type' setting."""
schema = {
'some_property': {'type': 'Unknown'}
}
self.assertRaisesRegex(
ValueError,
r"""Illegal type declaration: Unknown""",
o_type.create_ontic_type, 'Dummy', schema)
def test_required_setting(self) -> NoReturn:
"""Validate 'required' schema setting."""
schema = {
'some_property': {'required': True},
'other_property': {'required': False}
}
# Create the o_type
my_type = o_type.create_ontic_type('RequireCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, which should cause ValueError
self.assertRaisesRegex(
ValidationException,
'The value for "some_property" is required.',
o_type.validate_object, ontic_object)
# Validate with data
ontic_object.some_property = 'Something'
ontic_object.other_property = 'Other'
o_type.validate_object(ontic_object)
def test_enum_setting(self) -> NoReturn:
"""Validate 'enum' schema setting."""
# Scalar testing
# ###############
schema = {
'enum_property': {'enum': {'some_value', 99}}
}
# Create the o_type
my_type = o_type.create_ontic_type('EnumCheck', schema)
self.assertIsNotNone(my_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object
o_type.validate_object(ontic_object)
# Validate a good setting
ontic_object.enum_property = 99
o_type.validate_object(ontic_object)
# Validate a bad setting
ontic_object.enum_property = 'bad, bad, bad'
self.assertRaisesRegex(
ValidationException,
r"""The value "bad, bad, bad" for "enum_property" not in """
r"""enumeration (\['some_value', 99\]|\[99, 'some_value'\])\.""",
o_type.validate_object, ontic_object)
def test_collection_enum_setting(self) -> NoReturn:
"""Validate 'enum' schema setting on collections."""
schema = {
'enum_property': {'type': 'list', 'enum': {'dog', 'cat'}}
}
# Create the o_type
my_type = o_type.create_ontic_type('EnumListCheck', schema)
self.assertIsNotNone(o_type)
# Create object of o_type
ontic_object = my_type()
# Validate an empty object, as required not set.
o_type.validate_object(ontic_object)
# Validate a good setting
ontic_object.enum_property = ['dog']
o_type.validate_object(ontic_object)
# Validate a bad setting
ontic_object.enum_property = ['fish']
self.assertRaisesRegex(
ValidationException,
r'''The value "fish" for "enum_property" not in'''
r''' enumeration \['cat', 'dog'\].''',
o_type.validate_object, ontic_object)
def test_min_setting(self) -> NoReturn:
"""Validate 'min' schema setting."""
schema = {
'str_min_property': {'type': 'str', 'min': 5},
'int_min_property': {'type': 'int', 'min': 10},
'float_min_property': {'type': 'float', 'min': 20},
'list_min_property': {'type': 'list', 'min': 1},
'set_min_property': {'type': 'set', 'min': 1},
'dict_min_property': {'type': 'dict', 'min': 1},
'date_min_property': {'type': 'date', 'min': date(2000, 1, 1)},
'time_min_property': {'type': 'time', 'min': time(12, 30, 30)},
'datetime_min_property': {
'type': 'datetime', 'min': datetime(2000, 1, 1, 12, 30, 30)}
}
my_type = o_type.create_ontic_type('MinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.str_min_property = '8 letters'
ontic_object.int_min_property = 20
ontic_object.float_min_property = 30.0
ontic_object.list_min_property = ['one item']
ontic_object.set_min_property = {'one item'}
ontic_object.dict_min_property = {'some_kee': 'one item'}
ontic_object.date_min_property = date(2001, 1, 1)
ontic_object.time_min_property = time(13, 30, 30)
ontic_object.datetime_min_property = datetime(2001, 1, 1)
o_type.validate_object(ontic_object)
# Str failure
ontic_object.str_min_property = '1'
self.assertRaisesRegex(
ValidationException,
'The value of "1" for "str_min_property" '
'fails min of 5.',
o_type.validate_object, ontic_object)
ontic_object.str_min_property = '8 letters'
# Int failure
ontic_object.int_min_property = 5
self.assertRaisesRegex(
ValidationException,
'The value of "5" for "int_min_property" '
'fails min of 10.',
o_type.validate_object, ontic_object)
ontic_object.int_min_property = 20
# Float failure
ontic_object.float_min_property = 15.0
self.assertRaisesRegex(
ValidationException,
'The value of "15.0" for "float_min_property" '
'fails min of 20.',
o_type.validate_object, ontic_object)
ontic_object.float_min_property = 30.0
# List failure
ontic_object.list_min_property = list()
self.assertRaisesRegex(
ValidationException,
r"""The value of "\[\]" for "list_min_property" """
r"""fails min of 1.""",
o_type.validate_object, ontic_object)
ontic_object.list_min_property = ['one item']
# Set failure
ontic_object.set_min_property = set()
self.assertRaisesRegex(
ValidationException,
r"""set\(\)" for "set_min_property" fails min of 1.""",
o_type.validate_object, ontic_object)
ontic_object.set_min_property = {'one item'}
# Dict failure
ontic_object.dict_min_property = dict()
self.assertRaisesRegex(
ValidationException,
'The value of "{}" for "dict_min_property" '
'fails min of 1.',
o_type.validate_object, ontic_object)
ontic_object.dict_min_property = {'some_key': 'one_item'}
# Date failure
ontic_object.date_min_property = date(1999, 1, 1)
self.assertRaisesRegex(
ValidationException,
'date_min_property" fails min of 2000-01-01.',
o_type.validate_object, ontic_object)
ontic_object.date_min_property = date(2001, 1, 1)
# Time failure
ontic_object.time_min_property = time(11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "11:30:30" for "time_min_property" '
'fails min of 12:30:30.',
o_type.validate_object, ontic_object)
ontic_object.time_min_property = time(13, 30, 30)
# Datetime failure
ontic_object.datetime_min_property = datetime(1999, 1, 1, 11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "1999-01-01 11:30:30" for "datetime_min_property" '
'fails min of 2000-01-01 12:30:30.',
o_type.validate_object, ontic_object)
def test_max_setting(self):
"""Validate 'max' schema setting."""
schema = {
'str_max_property': {'type': 'str', 'max': 5},
'int_max_property': {'type': 'int', 'max': 10},
'float_max_property': {'type': 'float', 'max': 20},
'list_max_property': {'type': 'list', 'max': 1},
'set_max_property': {'type': 'set', 'max': 1},
'dict_max_property': {'type': 'dict', 'max': 1},
'date_max_property': {'type': 'date', 'max': date(2000, 1, 1)},
'time_max_property': {'type': 'time', 'max': time(12, 30, 30)},
'datetime_max_property': {
'type': 'datetime', 'max': datetime(2000, 1, 1, 12, 30, 30)}
}
my_type = o_type.create_ontic_type('MaxCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.str_max_property = 'small'
ontic_object.int_max_property = 5
ontic_object.float_max_property = 10.0
ontic_object.list_max_property = ['one item']
ontic_object.set_max_property = {'one item'}
ontic_object.dict_max_property = {'some_kee': 'one item'}
ontic_object.date_max_property = date(1999, 1, 1)
ontic_object.time_max_property = time(11, 30, 30)
ontic_object.datetime_max_property = datetime(1999, 1, 1)
o_type.validate_object(ontic_object)
# Str failure
ontic_object.str_max_property = '8 letters'
self.assertRaisesRegex(
ValidationException,
'The value of "8 letters" for '
'"str_max_property" fails max of 5.',
o_type.validate_object, ontic_object)
ontic_object.str_max_property = 'small'
# Int failure
ontic_object.int_max_property = 20
self.assertRaisesRegex(
ValidationException,
'The value of "20" for "int_max_property" '
'fails max of 10.',
o_type.validate_object, ontic_object)
ontic_object.int_max_property = 5
# Float failure
ontic_object.float_max_property = 30.0
self.assertRaisesRegex(
ValidationException,
'The value of "30.0" for "float_max_property" fails max of 20.',
o_type.validate_object, ontic_object)
ontic_object.float_max_property = 15.0
# List failure
ontic_object.list_max_property = ['one item', 'two item']
self.assertRaisesRegex(
ValidationException,
r"""The value of "\['(one|two) item', '(one|two) item'\]" """
r"""for "list_max_property" fails max of 1.""",
o_type.validate_object, ontic_object)
ontic_object.list_max_property = ['one item']
# Set failure
ontic_object.set_max_property = {'one item', 'two item'}
expected_error = r"""The value of "{'(one|two) item', '(two|one) item'}" for "set_max_property" fails max of 1."""
self.assertRaisesRegex(
ValidationException,
expected_error,
o_type.validate_object, ontic_object)
# Dict failure
ontic_object.dict_max_property = {'some_key': 'one_item',
'another_key': 'two_item'}
self.assertRaisesRegex(
ValidationException,
r"""The value of """
r"""("{'some_key': 'one_item', 'another_key': 'two_item'}"|"""
r""""{'another_key': 'two_item', 'some_key': 'one_item'}")"""
r""" for "dict_max_property" fails max of 1.""",
o_type.validate_object, ontic_object)
ontic_object.dict_max_property = {'some_key': 'one_item'}
# Date failure
ontic_object.date_max_property = date(2001, 1, 1)
self.assertRaisesRegex(
ValidationException,
'The value of "2001-01-01" for '
'"date_max_property" fails max of 2000-01-01.',
o_type.validate_object, ontic_object)
ontic_object.date_max_property = date(2001, 1, 1)
# Time failure
ontic_object.time_max_property = time(13, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "13:30:30" for "time_max_property" '
'fails max of 12:30:30.',
o_type.validate_object, ontic_object)
ontic_object.time_max_property = time(13, 30, 30)
# Datetime failure
ontic_object.datetime_max_property = datetime(2001, 1, 1, 11, 30, 30)
self.assertRaisesRegex(
ValidationException,
'The value of "2001-01-01 11:30:30" for "datetime_max_property" '
'fails max of 2000-01-01 12:30:30.',
o_type.validate_object, ontic_object)
def test_regex_setting(self):
"""Validate 'regex' schema setting."""
schema = {
'b_only_property': {'type': 'str', 'regex': '^b+'}
}
my_type = o_type.create_ontic_type('RegexCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields
o_type.validate_object(ontic_object)
# Good test
ontic_object.b_only_property = ''
o_type.validate_object(ontic_object)
ontic_object.b_only_property = 'b'
o_type.validate_object(ontic_object)
# Bad test
ontic_object.b_only_property = 'a'
self.assertRaisesRegex(
ValidationException,
r'Value \"a\" for b_only_property does not '
r'meet regex: \^b\+',
o_type.validate_object, ontic_object)
def test_member_type_setting(self) -> NoReturn:
"""Validate 'member_type' setting."""
schema = {
'list_property': {'type': 'list', 'member_type': 'str'}
}
my_type = o_type.create_ontic_type('ItemTypeCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('some_item')
o_type.validate_object(ontic_object)
# Bad test
ontic_object.list_property.append(99)
self.assertRaisesRegex(
ValidationException,
r'''The value "99" for "list_property" is not of type '''
r'''"<class 'str'>".''',
o_type.validate_object, ontic_object)
def test_collection_regex_setting(self) -> NoReturn:
"""Validate string collection with 'regex' setting."""
schema = {
'set_property': {'type': set, 'member_type': str, 'regex': 'b+'}
}
my_type = o_type.create_ontic_type(
'CollectionRegexCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good test
ontic_object.set_property = set()
o_type.validate_object(ontic_object)
ontic_object.set_property.add('bbbbb')
o_type.validate_object(ontic_object)
# Bad test
ontic_object.set_property.add('xxxxxx')
self.assertRaisesRegex(
ValidationException,
r'''Value "xxxxxx" for "set_property" does not meet regex: b+''',
o_type.validate_object, ontic_object)
def test_member_min_setting(self) -> NoReturn:
"""Validate 'member_min' setting."""
# Test the item min setting for string items.
schema = {
'list_property': {'type': 'list', 'member_type': 'str',
'member_min': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('four')
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append('one')
self.assertRaisesRegex(
ValidationException,
r'''The value of "one" for "list_property" '''
r'''fails min length of 4.''',
o_type.validate_object, ontic_object)
# Test the item min setting for numeric items.
schema = {
'list_property': {'type': 'list', 'member_type': 'int',
'member_min': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append(4)
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append(1)
self.assertRaisesRegex(
ValidationException,
r'''The value of "1" for "list_property" fails min size of 4.''',
o_type.validate_object, ontic_object)
def test_member_max_setting(self) -> NoReturn:
"""Validate 'member_max' setting."""
# Test the item max setting for string items.
schema = {
'list_property': {
'type': 'list', 'member_type': 'str', 'member_max': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(my_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append('four')
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append('seven')
self.assertRaisesRegex(
ValidationException,
r'''The value of "seven" for "list_property" '''
r'''fails max length of 4.''',
o_type.validate_object, ontic_object)
# Test the item min setting for numeric items.
schema = {
'list_property': {
'type': 'list', 'member_type': 'int', 'member_max': 4}
}
my_type = o_type.create_ontic_type('StrItemMinCheck', schema)
self.assertIsNotNone(o_type)
ontic_object = my_type()
# None test, with no required fields.
o_type.validate_object(ontic_object)
# Good Test
ontic_object.list_property = []
o_type.validate_object(ontic_object)
ontic_object.list_property.append(4)
o_type.validate_object(ontic_object)
# Bad Test
ontic_object.list_property.append(7)
self.assertRaisesRegex(
ValidationException,
r'''The value of "7" for "list_property" fails max size of 4.''',
o_type.validate_object, ontic_object)
class ValidateValueTestCase(BaseTestCase):
"""Test ontic_types.validate_value method."""
def test_bad_validate_value(self) -> NoReturn:
"""ValueError testing of validate_value."""
self.assertRaisesRegex(
ValueError,
'"ontic_object" is required, cannot be None.',
o_type.validate_value, 'some_value', None)
self.assertRaisesRegex(
ValueError,
'"ontic_object" must be OnticType or child type of OnticType',
o_type.validate_value, 'some_value', "can't be string")
my_type = o_type.create_ontic_type(
'BadValidateValue',
{
'prop1': {'type': 'int'}
})
ontic_object = my_type()
ontic_object.prop1 = 1
self.assertRaisesRegex(
ValueError,
'"property_name" is required, cannot be None.',
o_type.validate_value, None, ontic_object)
self.assertRaisesRegex(
ValueError,
r'"property_name" is not a valid string.',
o_type.validate_value, '', ontic_object)
self.assertRaisesRegex(
ValueError,
'"property_name" is not a valid string.',
o_type.validate_value, 5, ontic_object)
self.assertRaisesRegex(
ValueError,
'"illegal property name" is not a recognized property.',
o_type.validate_value, 'illegal property name', ontic_object)
def test_validate_value_exception_handling(self) -> NoReturn:
"""Ensure validation exception handling by validation_object method."""
schema_instance = Schema(some_attr={'type': 'int'})
my_type = o_type.create_ontic_type('ValidateCheck',
schema_instance)
ontic_object = my_type()
ontic_object.some_attr = 'WRONG'
self.assertRaisesRegex(
ValidationException,
r"""The value for "some_attr" is not of type "<class 'int'>":"""
r""" WRONG""",
ontic_object.validate_value, 'some_attr')
with self.assertRaises(ValidationException) as ve:
ontic_object.validate_value('some_attr')
expected_errors = [
r"""The value for "some_attr" is not """
r"""of type "<class 'int'>": WRONG"""
]
self.assertListEqual(expected_errors, ve.exception.validation_errors)
errors = o_type.validate_value('some_attr', ontic_object,
raise_validation_exception=False)
self.assertListEqual(expected_errors, errors)
def test_validate_value_value_arg(self) -> NoReturn:
"""Valid value argument testing of validate_value."""
# Test that scalar property is valid.
single_property_schema = {
'prop1': {'type': 'str'}
}
my_type = o_type.create_ontic_type(
'GoodValidateValue', single_property_schema)
ontic_object = my_type({'prop1': 'Hot Dog'})
o_type.validate_value('prop1', ontic_object)
class ChildOnticType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(name='int_prop',
type=int),
OnticProperty(name='str_prop',
type=str,
required=True,
default='A Value')
])
class ParentOnticType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(name='child_prop', type=ChildOnticType)
])
DEFAULT_CHILD_PROP = ChildOnticType(int_prop=99, str_prop='The Value')
class RequiredOnticChildType(OnticType):
ONTIC_SCHEMA = Schema([
OnticProperty(
name='child_prop',
type=ChildOnticType,
required=True,
default=DEFAULT_CHILD_PROP),
])
class SettingOnticTypeTestCase(BaseTestCase):
"""Test case the setting of an OnticType as a OnticProperty.type setting."""
def test_ontic_type_perfect(self) -> NoReturn:
"""Test that Ontic child properties are perfected with parent."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType()
self.assertNotIn('int_prop', parent.child_prop)
self.assertNotIn('str_prop', parent.child_prop)
parent.perfect()
self.assertIsNone(parent.child_prop.int_prop)
self.assertEqual('A Value', parent.child_prop.str_prop)
res = parent.validate()
self.assertListEqual([], res)
def test_ontic_type_success(self) -> NoReturn:
"""Test validation of an OnticType property."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType(str_prop='Some Value')
parent.child_prop.int_prop = 1
res = parent.validate(raise_validation_exception=True)
self.assertListEqual(res, [])
def test_non_ontic_type_failure(self) -> NoReturn:
"""Test validation of an incorrect OnticType property."""
parent = ParentOnticType()
parent.child_prop = ChildOnticType()
parent.child_prop.int_prop = '1'
self.assertRaisesRegex(
ValidationException,
r"""The child property child_prop, has errors:: """
r"""The value for "int_prop" is not of o_type "<class 'int'>": 1"""
r""" || The value for "str_prop" is required.""",
parent.validate,
raise_validation_exception=True)
def test_ontic_type_default_setting(self) -> NoReturn:
"""Ensure that an OnticType property default is copied upon perfect."""
parent = RequiredOnticChildType()
self.assertNotIn('child_prop', parent)
parent.perfect()
self.assertIn('child_prop', parent)
self.assertIsNot(DEFAULT_CHILD_PROP, parent.child_prop)
self.assertEqual(99, parent.child_prop.int_prop)
self.assertEqual('The Value', parent.child_prop.str_prop)
self.assertEqual([], parent.validate())
| 37.111212
| 122
| 0.599903
| 4,600
| 41,045
| 5.07413
| 0.061087
| 0.105565
| 0.066278
| 0.056167
| 0.710852
| 0.610814
| 0.527612
| 0.468575
| 0.405938
| 0.363866
| 0
| 0.016642
| 0.289974
| 41,045
| 1,105
| 123
| 37.144796
| 0.784271
| 0.093361
| 0
| 0.506971
| 0
| 0.001267
| 0.132499
| 0.004105
| 0
| 0
| 0
| 0
| 0.144487
| 1
| 0.043093
| false
| 0
| 0.012674
| 0
| 0.070976
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6e972384085a17d4254d8b48954d37e8355bbe9
| 5,503
|
py
|
Python
|
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
api/telegram.py
|
ongzhixian/python-apps
|
11a0d0ce656a7e9d7bdff18dd29feaa2bb436ae6
|
[
"MIT"
] | null | null | null |
import json
import logging
import os
import pdb
import re
from helpers.app_helpers import *
from helpers.page_helpers import *
from helpers.jinja2_helpers import *
from helpers.telegram_helpers import *
#from main import *
#from flask import request
################################################################################
# Setup helper functions
################################################################################
def get_machine_status(log_string):
rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<box_ip>.*)\]")
result = rdp_re.match(log_string)
if result is None:
return result
box_name = result.group("box_name")
box_ip = result.group("box_ip").split(":")[0]
return (box_name, box_ip)
def get_box_statuses():
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
box_statuses = None
if os.path.exists(outfile_path):
# Read file
with open(outfile_path, "rb") as outfile:
json_data = outfile.read()
box_statuses = json.loads(json_data)
else:
box_statuses = {}
return box_statuses
def save_box_statuses(box_statuses):
logging.debug("IN save_box_statuses()")
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
# Write to file
try:
with open(outfile_path, "w+") as outfile:
outfile.write(json.dumps(box_statuses))
logging.debug("Saved!")
except Exception as ex:
logging.error(ex)
def update_box_statuses(log_string):
logging.debug("IN update_box_statuses()")
result = get_machine_status(log_string)
if result is not None:
logging.debug("IN result is not None")
# We got a machine status log entry; update json
# Get box statues
box_statuses = get_box_statuses()
box_name = result[0]
box_ip = result[1]
logging.debug("box_name: %s, box_ip: %s" % (box_name, box_ip))
# Update box_statuses.json
if not box_statuses.has_key(box_name):
box_statuses[box_name] = {}
box_statuses[box_name]["status"] = "In use" if len(box_ip) > 0 else "Available"
box_statuses[box_name]["comment"] = box_ip
save_box_statuses(box_statuses)
################################################################################
# Setup routes
################################################################################
@route('/api/telegram/updates', method='POST')
def api_telegram_plato_dev_post():
logging.debug("IN api_telegram_plato_dev_post()")
# ZX: Support to get an Update object from the content of the response?
# logging.info("should dump content here")
json_data = request.json
if json_data is None:
return None
try:
logging.info(str(json_data))
message_text = ""
if json_data.has_key("message"):
message_text = json_data["message"]["text"]
if json_data.has_key("channel_post"):
message_text = json_data["channel_post"]["text"]
logging.debug("message_text is:" + message_text)
update_box_statuses(message_text)
except Exception as ex:
logging.error(ex)
#send_message(appconfig["telegram"]["token"], "53274105", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
return str(json_data)
#
# cwd = os.getcwd()
# logging.info(cwd)
# rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<ip>.*)\]")
# result = rdp_re.match(str(json_data["message"]["text"]))
# if result is None:
# pass
# else:
# pass
#send_message(appconfig["telegram"]["token"], "53274105", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
# return str(json_data)
@route('/api/telegram/brahman-devops/sendMessage', method='POST')
def api_telegram_plato_dev_send_message_post():
logging.debug("IN api_telegram_plato_dev_send_message_post()")
chat_id = None
message = None
if 'chat_id' in request.json.keys():
chat_id = request.json['chat_id']
if 'message' in request.json.keys():
message = request.json['message']
if chat_id is None or message is None:
response.set_header('Content-Type', 'application/json')
return json.dumps("{}")
json_response_string = send_message(appconfig["telegram"]["token"], chat_id, message)
json_response_object = json.loads(json_response_string)
response.set_header('Content-Type', 'application/json')
return json_response_object
@route('/api/telegram/setWebhook', method='POST')
def api_telegram_set_webhook_post():
logging.debug("IN api_telegram_set_webhook_post()")
json_data = set_webhook(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
@route('/api/telegram/getme', method='POST')
def api_telegram_getme_get():
#
# {"ok": true, "result": {"username": "plato_dev_bot", "first_name": "plato-dev-bot", "is_bot": true, "id": 407476479}}
logging.debug("IN api_telegram_getme_get()")
json_data = get_me(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
| 35.050955
| 123
| 0.623296
| 701
| 5,503
| 4.649073
| 0.196862
| 0.084382
| 0.030071
| 0.03498
| 0.474992
| 0.372814
| 0.342436
| 0.294569
| 0.271863
| 0.217858
| 0
| 0.006757
| 0.193167
| 5,503
| 156
| 124
| 35.275641
| 0.727252
| 0.180992
| 0
| 0.163265
| 0
| 0
| 0.184268
| 0.069762
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0.091837
| 0
| 0.265306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6eb3b19d050576ce9764d0276a806ecdcc82b5f
| 2,456
|
py
|
Python
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 455
|
2015-04-02T06:12:13.000Z
|
2022-02-28T10:54:29.000Z
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 66
|
2015-04-07T15:20:55.000Z
|
2021-06-04T16:40:46.000Z
|
experiments/bayesopt/run_direct_surrogate.py
|
lebrice/RoBO
|
0cb58a1622d3a540f7714b239f0cedf048b6fd9f
|
[
"BSD-3-Clause"
] | 188
|
2015-04-14T09:42:34.000Z
|
2022-03-31T21:04:53.000Z
|
import os
import sys
import DIRECT
import json
import numpy as np
from hpolib.benchmarks.ml.surrogate_svm import SurrogateSVM
from hpolib.benchmarks.ml.surrogate_cnn import SurrogateCNN
from hpolib.benchmarks.ml.surrogate_fcnet import SurrogateFCNet
run_id = int(sys.argv[1])
benchmark = sys.argv[2]
n_iters = 50
n_init = 2
output_path = "./experiments/RoBO/surrogates"
if benchmark == "svm_mnist":
b = SurrogateSVM(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "cnn_cifar10":
b = SurrogateCNN(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
elif benchmark == "fcnet_mnist":
b = SurrogateFCNet(path="/ihome/kleinaa/devel/git/HPOlib/surrogates/")
info = b.get_meta_information()
X = []
y = []
def wrapper(x, user_data):
X.append(x.tolist())
y_ = b.objective_function(x)['function_value']
y.append(y_)
return y_, 0
# Dimension and bounds of the function
bounds = b.get_meta_information()['bounds']
dimensions = len(bounds)
lower = np.array([i[0] for i in bounds])
upper = np.array([i[1] for i in bounds])
start_point = (upper-lower)/2
x, _, _ = DIRECT.solve(wrapper,
l=[lower],
u=[upper],
maxT=n_iters*2,
maxf=n_iters)
X = X[:n_iters]
y = y[:n_iters]
fvals = np.array(y)
incs = []
incumbent_val = []
curr_inc_val = sys.float_info.max
inc = None
for i, f in enumerate(fvals):
if curr_inc_val > f:
curr_inc_val = f
inc = X[i]
incumbent_val.append(curr_inc_val)
incs.append(inc)
# Offline Evaluation
test_error = []
runtime = []
cum_cost = 0
results = dict()
for i, inc in enumerate(incs):
y = b.objective_function_test(np.array(inc))["function_value"]
test_error.append(y)
# Compute the time it would have taken to evaluate this configuration
c = b.objective_function(np.array(X[i]))["cost"]
cum_cost += c
runtime.append(cum_cost)
# Estimate the runtime as the optimization overhead + estimated cost
results["runtime"] = runtime
results["test_error"] = test_error
results["method"] = "direct"
results["benchmark"] = benchmark
results["run_id"] = run_id
results["incumbents"] = incs
results["incumbent_values"] = incumbent_val
results["X"] = X
results["y"] = y
p = os.path.join(output_path, benchmark, "direct")
os.makedirs(p, exist_ok=True)
fh = open(os.path.join(p, '%s_run_%d.json' % (benchmark, run_id)), 'w')
json.dump(results, fh)
| 24.078431
| 74
| 0.678339
| 360
| 2,456
| 4.466667
| 0.363889
| 0.018657
| 0.024876
| 0.041045
| 0.148632
| 0.090796
| 0.090796
| 0.06592
| 0.06592
| 0
| 0
| 0.006484
| 0.183632
| 2,456
| 101
| 75
| 24.316832
| 0.795511
| 0.077362
| 0
| 0
| 0
| 0
| 0.14153
| 0.069881
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013699
| false
| 0
| 0.109589
| 0
| 0.136986
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6eb612c8a8c4eac0f2f977fa8c04f601c65f1a7
| 1,197
|
py
|
Python
|
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | 1
|
2018-11-23T20:11:27.000Z
|
2018-11-23T20:11:27.000Z
|
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | null | null | null |
calls/delete_call_feedback_summary.py
|
mickstevens/python3-twilio-sdkv6-examples
|
aac0403533b35fec4e8483de18d8fde2d783cfb2
|
[
"MIT"
] | null | null | null |
# *** Delete Call Feedback Summary ***
# Code based on https://www.twilio.com/docs/voice/api/call-quality-feedback
# Download Python 3 from https://www.python.org/downloads/
# Download the Twilio helper library from https://www.twilio.com/docs/python/install
import os
from twilio.rest import Client
# from datetime import datetime | not required for this example
import logging
#write requests & responses from Twilio to log file, useful for debugging:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='/usr/local/twilio/python3/sdkv6x/calls/logs/call_feedback.log',
filemode='a')
# Your Account Sid and Auth Token from twilio.com/console & stored in Mac OS ~/.bash_profile in this example
account_sid = os.environ.get('$TWILIO_ACCOUNT_SID')
auth_token = os.environ.get('$TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
# A list of call feedback summary parameters & their permissable values, comment out (#) those lines not required:
# FSe6b77c80b547957f8ab7329b5c0b556c
client.calls \
.feedback_summaries("FSxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") \
.delete()
| 44.333333
| 114
| 0.734336
| 157
| 1,197
| 5.528662
| 0.56051
| 0.046083
| 0.043779
| 0.039171
| 0.048387
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023928
| 0.162072
| 1,197
| 26
| 115
| 46.038462
| 0.841476
| 0.53467
| 0
| 0
| 0
| 0
| 0.311355
| 0.173993
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f1e3f027d95fbea317bf8aa4166e874befc948
| 5,693
|
py
|
Python
|
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
controllers/transactions_controller.py
|
JeremyCodeClan/spentrack_project
|
455074446b5b335ea77933c80c43745fcad1171c
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Flask, render_template, request, redirect
from models.transaction import Transaction
import repositories.transaction_repository as transaction_repo
import repositories.merchant_repository as merchant_repo
import repositories.tag_repository as tag_repo
transactions_blueprint = Blueprint("transactions", __name__)
@transactions_blueprint.route("/jeremy_e51")
def transactions():
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51/new")
def new():
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/new.html",
transactions = transactions, total = total, login = 1, new_cancel = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>")
def transaction_show(id):
order = 'order_date_desc'
show_one = transaction_repo.select(id)
merchant = None
tag = None
if show_one.merchant: merchant = merchant_repo.select(show_one.merchant)
if show_one.tag: tag = tag_repo.select(show_one.tag)
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/show.html",
transactions = transactions, show_one = show_one, merchant = merchant, tag = tag, total = total, login = 1, order = order
)
@transactions_blueprint.route("/jeremy_e51", methods=['POST'])
def add_transaction():
name = request.form['name']
description = request.form['description']
amount = request.form['amount']
date = request.form['date']
transaction = Transaction(name, description, amount, date)
transaction_repo.save(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/<id>/edit")
def edit_transaction(id):
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
merchants = merchant_repo.select_all()
tags = tag_repo.select_all()
return render_template(
'transactions/edit.html',
transactions = transactions, merchants = merchants, tags = tags, id = int(id), total = total, login = 1
)
@transactions_blueprint.route("/jeremy_e51/<id>", methods=['POST'])
def update_transaction(id):
transaction = transaction_repo.select(id)
if "tag_id" in request.form:
if request.form["tag_id"] != "None":
tag_id = request.form["tag_id"]
tag = tag_repo.select(tag_id)
transaction.tag = tag
if "merchant_id" in request.form:
if request.form["merchant_id"] != "None":
merchant_id = request.form["merchant_id"]
merchant = merchant_repo.select(merchant_id)
transaction.merchant = merchant
transaction_repo.update(transaction)
return redirect('/jeremy_e51')
@transactions_blueprint.route("/jeremy_e51/order")
def transactions_by_order():
order_date = request.args['order_date']
order_amount = request.args['order_amount']
order_name = request.args['order_name']
if order_date:
if order_date == 'desc':
order = 'order_date_desc'
transactions = transaction_repo.select_all()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_date == 'asc':
order = 'order_date_asc'
transactions = transaction_repo.select_all_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount:
if order_amount == 'desc':
order = 'order_amount_desc'
transactions = transaction_repo.order_by_price_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_amount == 'asc':
order = 'order_amount_asc'
transactions = transaction_repo.order_by_price_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name:
if order_name == 'desc':
order = 'order_name_desc'
transactions = transaction_repo.order_by_name_desc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
if order_name == 'asc':
order = 'order_name_asc'
transactions = transaction_repo.order_by_name_asc()
total = transaction_repo.total_amount(transactions)
return render_template(
"transactions/index.html",
transactions = transactions, total = total, login = 1, order = order
)
return redirect('/jeremy_e51')
| 40.664286
| 129
| 0.657298
| 607
| 5,693
| 5.90939
| 0.093904
| 0.104544
| 0.075272
| 0.069696
| 0.577084
| 0.55729
| 0.509897
| 0.460831
| 0.460831
| 0.460831
| 0
| 0.007223
| 0.246092
| 5,693
| 140
| 130
| 40.664286
| 0.828518
| 0
| 0
| 0.351563
| 0
| 0
| 0.113102
| 0.043379
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054688
| false
| 0
| 0.039063
| 0
| 0.195313
| 0.070313
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f1fc0edc1a1464fe8ec814304b412c4369a1d8
| 86,261
|
py
|
Python
|
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | 12
|
2019-09-10T21:31:51.000Z
|
2022-01-21T14:31:05.000Z
|
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | null | null | null |
Welcomer 6.20/modules/core.py
|
TheRockettek/Welcomer
|
60706b4d6eec7d4f2500b3acc37530e42d846532
|
[
"MIT"
] | 1
|
2021-09-17T09:03:54.000Z
|
2021-09-17T09:03:54.000Z
|
import asyncio
import copy
import csv
import io
import math
from math import inf
import os
import sys
import time
import traceback
import logging
from importlib import reload
from datetime import datetime
import logging
import aiohttp
import discord
import requests
import json
import ujson
from discord.ext import commands
from rockutils import rockutils
import uuid
import handling
def canint(val):
try:
int(val)
return True
except BaseException:
return False
class NoPermission(Exception):
pass
class NoDonator(Exception):
pass
class WelcomerCore(commands.Cog):
def __init__(self, bot):
self.bot = bot
def maketimestamp(
self,
timestamp=0,
lang=[
"second",
"minute",
"hour",
"day",
"and",
"ago",
"year"],
allow_secs=False,
include_ago=True):
if not timestamp:
timestamp = 0
_y, _d, _h, _m, _s = rockutils.parse_unix(
datetime.utcnow().timestamp() - timestamp)
# message = ""
# if _y > 0:
# message += f"{str(_y)} {lang[6]}{'s' if _y > 1 else ''} "
# if _d > 0:
# if _h < 0:
# message += f"{lang[4]} "
# elif len(message) > 1:
# message += ", "
# message += f"{str(_d)} {lang[3]}{'s' if _d > 1 else ''} "
# if _h > 0:
# if _m < 0:
# message += f"{lang[4]} "
# elif len(message) > 1:
# message += ", "
# message += f"{str(_h)} {lang[2]}{'s' if _h > 1 else ''} "
# # if we dont allow seconds, round the minutes up
# if not allow_secs and _s > 0:
# _m += 1
# if _m > 0:
# if _h > 0 or _d > 0:
# message += f"{lang[4]} "
# message += f"{str(_m)} {lang[1]}{'s' if _m > 1 else ''} "
# if allow_secs:
# if _h > 0 or _d > 0 or _m > 0:
# message += f"{lang[4]} "
# message += f"{str(_s)} {lang[0]}{'s' if _s > 1 else ''} "
# if include_ago:
# message += lang[5]
# return message
message = ""
if _y > 0:
message += f"{_y} year{'s' if _y != 1 else ''}"
if _d > 0:
if _h < 0:
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_d} day{'s' if _d != 1 else ''}"
if _h > 0:
if _m < 0:
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_h} hour{'s' if _h != 1 else ''}"
if _m > 0:
if _s < 0 if allow_secs else (_h > 0 or _d > 0):
message += " and "
elif len(message) > 1:
message += ", "
message += f"{_m} minute{'s' if _m != 1 else ''}"
if allow_secs:
if _h > 0 or _d > 0 or _m > 0:
message += " and "
message += f"{_s} second{'s' if _s != 1 else ''}"
if include_ago:
message += " ago"
return message
async def get_value(self, table, key, default=None):
# print("FETCH", table, key)
async with self.bot.connection.acquire() as connection:
value = await connection.fetchrow(
f"SELECT * FROM {table} WHERE id = $1",
key
)
if value:
print("FETCH", table, key, "OK")
try:
return ujson.loads(value["value"])
except ValueError:
return json.loads(value["value"])
else:
print("FETCH", table, key, "FAIL")
return default
async def set_value(self, table, key, value):
if key is None:
key = str(uuid.uuid4())
print("SET", table, key)
try:
async with self.bot.connection.acquire() as connection:
await connection.execute(
f"INSERT INTO {table}(id, value) VALUES($1, $2) ON CONFLICT (id) DO UPDATE SET value = $2",
key, ujson.dumps(value)
)
except Exception as e:
print("Failed to set value", table, ":", key, e)
# return False
else:
# return True
return {
"generated_keys": [key],
"inserted": 1
}
async def get_guild_info(self, id, refer="", reload_data=True, create_cache=True, direct=False, request_invites=True):
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Getting information for G:{id}",
# prefix="Guild Info:Get",
# prefix_colour="light green")
guild_info = await self.get_value("guilds", str(id))
# guild_info = await r.table("guilds").get(str(id)).run(self.bot.connection)
if not direct:
new_data = True if not isinstance(
guild_info, dict) else not bool(guild_info)
has_updated = True if new_data else False
guild = self.bot.get_guild(int(id))
_guild_info = self.bot.serialiser.guild(guild)
_time = time.time()
default_data = copy.deepcopy(self.bot.default_guild)
latest_version = default_data['d']['dv']
if new_data and guild:
# try:
# old_info = await r.db("welcomer5").table("guilds").get(str(id)).run(self.bot.connection)
# if old_info:
# default_data['a']['e'] = old_info['analytics']['enabled']
# default_data['ar']['e'] = old_info['autorole']['enabled']
# default_data['ar']['r'] = list(
# map(str, old_info['autorole']['role_ids']))
# for donation in old_info['donations']:
# default_data['d']['de'].append(donation['id'])
# default_data['d']['b']['hb'] = True
# default_data['l']['e'] = old_info['leaver']['enabled']
# if isinstance(old_info['leaver']['channel'], str):
# default_data['l']['c'] = old_info['leaver']['channel']
# default_data['l']['t'] = old_info['leaver']['text']
# if "prefix" in old_info:
# default_data['d']['b']['p'] = old_info['prefix']
# default_data['r']['e'] = old_info['rules']['enabled']
# default_data['r']['r'] = old_info['rules']['rules']
# default_data['d']['b']['ai'] = old_info['settings']['allow_invite']
# default_data['d']['b']['d'] = old_info['settings']['description']
# default_data['d']['b']['ss'] = old_info['settings']['show_staff']
# default_data['st']['ap'] = old_info['staff']['allow_ping']
# for staff_id, allow_ping in old_info['staff']['staff_ids'].items():
# default_data['st']['u'].append(
# [staff_id, allow_ping])
# # for channel_id, stat in old_info['stats']['channels']:
# # stats = {}
# # stats['c'] = channel_id
# # stats['t'] = stat['type']
# # stats['t'] = stat['text']
# # default_data['s']['c'].append(stat)
# default_data['s']['c'] = old_info['stats']['channels']
# if isinstance(old_info['stats']['enabled'], str):
# default_data['s']['e'] = old_info['stats']['enabled']
# default_data['s']['ca'] = old_info['stats']['category']
# default_data['tc']['e'] = old_info['tempchannels']['enabled']
# if isinstance(old_info['tempchannels']['category'], str):
# default_data['tc']['c'] = old_info['tempchannels']['category']
# default_data['tc']['ap'] = old_info['tempchannels']['autopurge']
# if isinstance(old_info['welcomer']['channel'], str):
# default_data['w']['c'] = old_info['welcomer']['channel']
# default_data['w']['e'] = old_info['welcomer']['enable_embed']
# default_data['w']['b'] = old_info['welcomer']['text']['badges']
# default_data['w']['iv'] = old_info['welcomer']['text']['invited']
# default_data['w']['i']['e'] = old_info['welcomer']['images']['enabled']
# default_data['w']['i']['bg'] = old_info['welcomer']['images']['background']
# # default_data['w']['i']['c']['bo'] = old_info['welcomer']['images']['colour']['border']
# # default_data['w']['i']['c']['b'] = old_info['welcomer']['images']['colour']['text']
# # default_data['w']['i']['c']['pb'] = old_info['welcomer']['images']['colour']['profile']
# default_data['w']['i']['m'] = old_info['welcomer']['images']['message']
# default_data['w']['t']['e'] = old_info['welcomer']['text']['enabled']
# default_data['w']['t']['m'] = old_info['welcomer']['text']['message']
# default_data['w']['dm']['e'] = old_info['welcomer']['dm']['enabled']
# default_data['w']['dm']['m'] = old_info['welcomer']['text']['message']
# if "namepurge" in old_info['welcomer']:
# default_data['np']['e'] = old_info['welcomer']['namepurge']['enabled']
# default_data['np']['f'] = list(map(lambda o: o.replace(
# "\n", ""), old_info['welcomer']['namepurge']['filter']))
# except BaseException:
# exc_info = sys.exc_info()
# traceback.print_exception(*exc_info)
guild_info = default_data
origional_guild_info = copy.deepcopy(guild_info)
guild_info['d']['b']['c'] = self.bot.cluster_id
guild_info['id'] = str(id)
if self.bot.donator:
guild_info['d']['b']['hd'] = True
elif guild:
if not guild.get_member(498519480985583636):
guild_info['d']['b']['hd'] = False
if guild:
if new_data:
guild_info['d']['g']['ga'] = math.ceil(_time)
guild_info['d']['g']['gc'] = math.ceil(
guild.created_at.timestamp())
if request_invites:
try:
guild_info['d']['i'] = await self.bot.serialiser.invites(guild)
except BaseException:
pass
guild_info['d']['g']['i'] = _guild_info['icons']
guild_info['d']['g']['ic'] = _guild_info['icon']
guild_info['d']['g']['n'] = _guild_info['name']
guild_info['d']['b']['r'] = _guild_info['region']
guild_info['d']['b']['sh'] = guild.shard_id
if guild.owner or guild.owner_id:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
user = self.bot.get_user(owner_id)
if user:
guild_info['d']['g']['o'] = self.bot.serialiser.user(
user)
if _time - guild_info['d']['m']['u'] > 600:
guild_info['d']['m'] = {
"b": _guild_info['bots'],
"m": _guild_info['users'] - _guild_info['bots'],
"a": _guild_info['users'],
"u": _time
}
# if _time - guild_info['d']['d']['u'] > 600:
# _guild_detailed = self.bot.serialiser.guild_detailed(
# guild)
# guild_info['d']['d'] = {
# "s": _guild_detailed['streaming'],
# "o": _guild_detailed['online'],
# "i": _guild_detailed['idle'],
# "d": _guild_detailed['dnd'],
# "of": _guild_detailed['offline'],
# "u": _time
# }
if _time - guild_info['d']['c']['u'] > 600:
_channels = self.bot.serialiser.channels(guild)
guild_info['d']['c'] = {
"c": _channels['categories'],
"v": _channels['voice'],
"t": _channels['text'],
"u": _time
}
if "r" not in guild_info['d'] or (
_time - guild_info['d']['r']['u'] > 600):
_roles = self.bot.serialiser.roles(guild)
guild_info['d']['r'] = {
"r": _roles,
"u": _time
}
has_updated = True if guild_info != origional_guild_info else has_updated
if latest_version != guild_info['d']['dv']:
default_data.update(guild_info)
guild_info = default_data
_version = guild_info['d']['dv']
if _version == 0:
# example hardcoded data overwrite
pass
if "sw" not in guild_info['d']['b']:
guild_info['d']['b']['sw'] = True
guild_info['d']['dv'] = default_data['d']['dv']
has_updated = True
if not isinstance(guild_info['s']['c'], list):
print("Emptying channel list")
guild_info['s']['c'] = []
def normalize_colour(string):
if string.startswith("RGBA|"):
return string
elif string.startswith("RGB|"):
return string
else:
try:
_hex = str(hex(int(string)))[2:]
if len(_hex) >= 8:
return f"RGBA|{str(hex(string))[:8]}"
elif len(_hex) >= 6:
return f"RGB|{str(hex(string))[:6]}"
except BaseException:
pass
return f"RGB|FFFFFF"
keys = ['w.i.c.b', 'w.i.c.b', 'w.i.c.pb', 'w.i.c.ib']
for key in keys:
value = rockutils.getvalue(key, guild_info)
value = str(value)
if not value.startswith("R"):
newvalue = normalize_colour(value)
rockutils.setvalue(key, guild_info, newvalue)
# print("create cache", create_cache)
if create_cache:
guild = self.bot.get_guild(int(id))
if guild:
await self.create_guild_cache(guild_info, guild, force=True)
else:
rockutils.prefix_print(
f"Wanted to make cache for {id} but no guild object", prefix="createcache", prefix_colour="red", text_colour="light red")
create_cache = False
if has_updated or new_data:
if new_data:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Creating information for G:{id}",
# prefix="Guild Info:Get",
# prefix_colour="light green")
# await r.table("guilds").insert(guild_info).run(self.bot.connection)
await self.set_value("guilds", guild_info["id"], guild_info)
else:
await self.update_guild_info(id, guild_info, refer="getguildinfo:" + (refer or "?"))
# print("create cache", create_cache)
if create_cache:
guild = self.bot.get_guild(int(id))
if guild:
await self.create_guild_cache(guild_info, guild, force=True)
else:
rockutils.prefix_print(
f"Wanted to make cache for {id} but no guild object", prefix="createcache", prefix_colour="red", text_colour="light red")
return guild_info
async def update_info(self, ctx, data):
guilddata = copy.copy(ctx.guildinfo)
if data:
if isinstance(data[0], list):
for key, value in data:
if rockutils.hasvalue(key, guilddata):
rockutils.setvalue(key, guilddata, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
else:
# Table not nested (only one key value pair)
key, value = data[0], data[1]
if rockutils.hasvalue(key, guilddata):
rockutils.setvalue(key, guilddata, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
await self.bot.create_guild_cache(guilddata, guild=ctx.guild, force=True)
return await self.update_guild_info(ctx.guild.id, guilddata, refer="updateinfo")
async def update_info_key(self, guildinfo, data, refer=""):
if isinstance(guildinfo, int):
guildinfo = await self.bot.get_guild_info(guildinfo, refer=f"Update Info Key:{refer}")
if len(data) > 0:
if isinstance(data[0], list):
# print(list(map(lambda o: o[0], data)))
for key, value in data:
if rockutils.hasvalue(key, guildinfo):
rockutils.setvalue(key, guildinfo, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
else:
# print(data[0])
# Table not nested (only one key value pair)
key, value = data[0], data[1]
if rockutils.hasvalue(key, guildinfo):
rockutils.setvalue(key, guildinfo, value)
else:
rockutils.prefix_print(
f"Could not find key {key} in guildinfo",
prefix="Update Info",
prefix_colour="red",
text_colour="light red")
guild = self.bot.get_guild(int(guildinfo['id']))
await self.bot.create_guild_cache(guildinfo, guild=guild, force=True)
return await self.update_guild_info(guildinfo['id'], guildinfo, refer=f"Update Info Key:{refer}")
async def update_guild_info(self, id, data, forceupdate=False, refer=""):
try:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating information for G:{id}",
# prefix="Guild Info:Update",
# prefix_colour="light green")
t = time.time()
res = await self.set_value("guilds", str(id), data)
# if forceupdate:
# res = await r.table("guilds").get(str(id)).update(data).run(self.bot.connection)
# else:
# res = await r.table("guilds").get(str(id)).replace(data).run(self.bot.connection)
te = time.time()
if te - t > 1:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating guild info took {math.floor((te-t)*1000)}ms",
prefix="Guild Info:Update",
prefix_colour="red",
text_colour="light red")
return res
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Error occured whilst updating info for G:{id}. {e}",
prefix="Guild Info:Update",
prefix_colour="red",
text_colour="light red")
return False
async def get_user_info(self, id, refer="", reload_data=True, direct=False):
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Getting information for U:{id}",
# prefix="User Info:Get",
# prefix_colour="light green")
# user_info = await r.table("users").get(str(id)).run(self.bot.connection)
user_info = await self.get_value("users", str(id))
if not direct:
new_data = True if not isinstance(
user_info, dict) else not bool(user_info)
has_updated = True if new_data else False
user = self.bot.get_user(int(id))
_user_info = self.bot.serialiser.user(user)
_time = time.time()
default_data = copy.deepcopy(self.bot.default_user)
latest_version = default_data['g']['dv']
if new_data and user:
# try:
# old_info = await r.db("welcomer5").table("guilds").get(str(id)).run(self.bot.connection)
# if old_info:
# if (old_info['membership']['exte'] or
# old_info['membership']['plus'] or
# old_info['membership']['pro']):
# default_data['m']['5']['h'] = True
# default_data['m']['5']['p'] = (old_info['membership']['exte_patr'] or
# old_info['membership']['plus_patr'] or
# old_info['membership']['pro_patr'])
# default_data['m']['5']['u'] = max(
# old_info['membership']['exte_since'],
# old_info['membership']['plus_since'],
# old_info['membership']['pro_since']) + 2592000
# default_data['m']['p'] = old_info['membership']['partner']
# default_data['m']['s'] = list(
# map(lambda o: o['id'], old_info['membership']['servers']))
# default_data['r']['r'] = old_info['reputation']
# default_data['r']['l'] = old_info['last_reputation']
# default_data['g']['b']['pd'] = old_info['prefer_dms']
# except BaseException:
# exc_info = sys.exc_info()
# traceback.print_exception(*exc_info)
user_info = default_data
origional_user_info = copy.deepcopy(user_info)
user_info['id'] = str(id)
if user:
if new_data:
user_info['g']['g']['ua'] = math.ceil(_time)
user_info['g']['g']['uc'] = math.ceil(
user.created_at.timestamp())
if "avatar" in _user_info:
user_info['g']['g']['a'] = _user_info['avatar']
user_info['g']['g']['n'] = _user_info['name']
user_info['g']['g']['d'] = _user_info['discriminator']
user_info['g']['g']['u'] = _time
# if _time - user_info['g']['g']['m']['u'].get(
# self.bot.cluster_id, 0) > 900 and not user.bot:
# user_info['g']['g']['m']['c'][
# self.bot.cluster_id] = self.bot.serialiser.mutualguilds(user)
# user_info['g']['g']['m']['u'][self.bot.cluster_id] = _time
expired = []
renewed = []
changes = []
for membership_type, v in user_info['m'].items():
if isinstance(v, dict):
# print(_time, user_info['m'][membership_type]['u'])
# print(user_info['m'][membership_type]['u'])
if user_info['m'][membership_type]['h'] and user_info['m'][membership_type]['u'] and ((_time > user_info['m'][membership_type]['u'])):
user_info['m'][membership_type]['h'] = False
if user_info['m'][membership_type]['p']:
user_info['m'][membership_type]['h'] = True
user_info['m'][membership_type]['u'] = _time + 2592000
renewed.append("Welcomer x" + membership_type)
else:
expired.append("Welcomer x" + membership_type)
if len(expired) > 0 or len(renewed) > 0:
url = "https://[removed]"
await rockutils.send_webhook(url, f"User: `{id}` <@{id}> membership expired. Expired: `{expired}` Renewed: `{renewed}`")
message = rockutils._(
"Some of your memberships have expired and may have renewed if you have paid using patreon.\n\n__Expired memberships:__**\n{expired}**\n__Renewed memberships:__\n**{renewed}**\n\nYou are able to renew memberships automatically by donating with patreon. Find out more at **{url}**",
user_info).format(
expired=", ".join(expired),
renewed=", ".join(renewed),
url="https://welcomer.gg/donate")
try:
await user.send(message)
except BaseException:
pass
if not user.bot:
user_info['b'] = sorted(
self.bot.serialiser.badges(
user, user_info), key=lambda o: o[0])
has_updated = True if user_info != origional_user_info else has_updated
if latest_version != user_info['g']['dv']:
user_info = default_data.update(user_info)
_version = user_info['g']['dv']
if _version == 0:
# example hardcoded data overwrite
pass
user_info['g']['dv'] = default_data['g']['dv']
has_updated = True
if has_updated or new_data:
if new_data:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Creating information for G:{id}",
prefix="User Info:Get",
prefix_colour="light green")
# await r.table("users").insert(user_info).run(self.bot.connection)
await self.set_value("users", user_info["id"], user_info)
else:
await self.update_user_info(id, user_info)
return user_info
async def update_user_info(self, id, data, forceupdate=False, refer=""):
try:
# rockutils.prefix_print(
# f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating information for U:{id}",
# prefix="User Info:Update",
# prefix_colour="light green")
t = time.time()
await self.set_value("users", str(id), data)
# if forceupdate:
# await r.table("users").get(str(id)).update(data).run(self.bot.connection)
# else:
# await r.table("users").get(str(id)).replace(data).run(self.bot.connection)
te = time.time()
if te - t > 1:
rockutils.prefix_print(
f"{f'[Refer: {refer}] ' if refer != '' else ''}Updating guild info took {math.floor((te-t)*1000)}ms",
prefix="User Info:Update",
prefix_colour="red",
text_colour="light red")
return True
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Error occured whilst updating info for U:{id}. {e}",
prefix="User Info:Update",
prefix_colour="red",
text_colour="light red")
return False
@ commands.Cog.listener()
async def on_shard_connect(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [0, shard_id]})
@ commands.Cog.listener()
async def on_shard_ready(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [1, shard_id]})
@ commands.Cog.listener()
async def on_shard_resumed(self, shard_id):
await self.push_ipc({"o": "SHARD_UPDATE", "d": [4, shard_id]})
@ commands.Cog.listener()
async def on_connect(self):
if self.bot.ranonconnect:
return
self.bot.ranonconnect = True
rockutils.prefix_print("Bot is now connecting", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 0})
game = discord.Game("Getting Ready...")
await self.bot.change_presence(status=discord.Status.idle, activity=game)
@ commands.Cog.listener()
async def on_ready(self):
rockutils.prefix_print("Bot is fully ready", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 1})
game = discord.Game("welcomer.gg | +help")
await self.bot.change_presence(status=discord.Status.online, activity=game)
@ commands.Cog.listener()
async def on_resume(self):
rockutils.prefix_print("Bot is now resuming", prefix_colour="green")
await self.push_ipc({"o": "STATUS_UPDATE", "d": 4})
async def sync_task(self):
# ws = self.bot.ipc_ws
rockutils.prefix_print("Starting sync task", prefix="Sync Task")
while True:
try:
await self.sync_handle()
except asyncio.CancelledError:
raise asyncio.CancelledError
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{type(e)} {str(e)}",
prefix="Sync Task",
prefix_colour="light red",
text_colour="red")
await asyncio.sleep(1)
async def sync_receiver(self):
ws = self.bot.ipc_ws
rockutils.prefix_print("Yielding sync receiver", prefix="Sync Handler")
while not self.bot.is_ready():
await asyncio.sleep(1)
rockutils.prefix_print("Starting sync receiver", prefix="Sync Handler")
while True:
try:
print("Waiting for json")
jobs = await ws.receive_json(loads=ujson.loads)
except ValueError:
pass
except asyncio.CancelledError:
raise asyncio.CancelledError
else:
if len(jobs) > 0:
try:
f = open("handling.py", "r")
file_content = f.read()
f.close()
compile(file_content + "\n", "handling.py", "exec")
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not update handling: {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
for job in jobs:
print(f"Running job {job} in task")
self.bot.loop.create_task(self.process_job(job))
async def process_job(self, job):
try:
opcode = job['o'].lower()
try:
args = ujson.loads(job['a'])
except BaseException:
args = job['a']
key = job['k']
if canint(args):
args = int(args)
if hasattr(handling, opcode):
try:
result = await asyncio.wait_for(getattr(handling, opcode)(self, opcode, args), timeout=60)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
result = result = {
"success": False, "error": "Exception",
"exception": str(type(e))}
rockutils.prefix_print(
f"Could not process job. {opcode}:{args}. {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
else:
result = {
"success": False, "error": "InvalidOPCode"}
_payload = {
"o": "SUBMIT",
"k": key,
"r": self.bot.cluster_id,
"d": result
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/ipc_submit/{self.bot.cluster_id}/{self.bot.config['ipc']['auth_key']}"
async with aiohttp.ClientSession() as _session:
await _session.post(domain, json=_payload)
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not process jobs: {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def sync_send(self, _payload):
try:
_payload['o'] = _payload['o'].upper()
await self.bot.ipc_ws.send_json(_payload, dumps=ujson.dumps)
except asyncio.CancelledError:
raise asyncio.CancelledError
except OverflowError:
# If we have overflowed in a ping, and more than half the
# shards are broken, kill the bot.
if _payload["o"] == "SUBMIT" and "ping" in _payload["k"]:
total = round(len(_payload["d"]["latencies"])/2)
tinf = 0
for i in _payload["d"]["latencies"]:
if i[1] == inf:
tinf += 1
if tinf >= total:
self.bot.logout()
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Could not send payload. {_payload}. {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def sync_handle(self):
rockutils.prefix_print("Starting sync handler", prefix="Sync Handler")
try:
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/ipc/{self.bot.cluster_id}/{self.bot.config['ipc']['auth_key']}"
rockutils.prefix_print(f"Connecting to WS via {domain}")
session = aiohttp.ClientSession()
self.bot.ipc_ws = await session.ws_connect(domain)
rockutils.prefix_print(
"Connected to websocket",
prefix="Sync Handler")
self.bot.sync_receiver_task = self.bot.loop.create_task(
self.sync_receiver())
while True:
await asyncio.sleep(1)
if self.bot.sync_receiver_task.done():
rockutils.prefix_print(
"Closing sync", prefix="Sync Handler", text_colour="red")
try:
self.bot.sync_receiver_task.cancel()
except asyncio.CancelledError:
raise asyncio.CancelledError
except BaseException:
pass
await session.close()
return
except aiohttp.client_exceptions.ClientConnectionError:
await session.close()
rockutils.prefix_print(
"Encountered connection error with IPC",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
await asyncio.sleep(2)
except asyncio.CancelledError:
raise asyncio.CancelledError
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"{type(e)} {str(e)}",
prefix="Sync Handler",
prefix_colour="light red",
text_colour="red")
async def push_ipc(self, _payload):
if _payload.get("o", "") != "":
await self.bot.sync_send(_payload)
return True
else:
return False
async def has_guild_donated(self, guild, guild_info, donation=False,
partner=True):
if guild and isinstance(guild, discord.Guild):
_time = time.time()
if partner:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
_userinfo = await self.bot.get_user_info(owner_id)
if _userinfo and _userinfo['m']['p']:
return True
for id in guild_info['d']['de']:
id = int(id)
try:
_user = self.bot.get_user(id)
if _user:
if await self.bot.has_special_permission(_user, support=True, developer=True, admin=True, trusted=True):
return True
_userinfo = await self.bot.get_user_info(id)
if _userinfo:
if donation:
if _userinfo['m']['1']['h'] and (
_time < (_userinfo['m']['1'].get('u', 0) or 0) or
_userinfo['m']['1']['p']):
return True
if _userinfo['m']['3']['h'] and (
_time < (_userinfo['m']['3'].get('u', 0) or 0) or
_userinfo['m']['3']['p']):
return True
if _userinfo['m']['5']['h'] and (
_time < (_userinfo['m']['5'].get('u', 0) or 0) or
_userinfo['m']['5']['p']):
return True
except BaseException:
pass
return False
async def has_special_permission(self, user, support=False,
developer=False, admin=False,
trusted=False):
_config = rockutils.load_json("cfg/config.json")
if _config != self.bot.config:
self.bot.config = copy.deepcopy(_config)
if user and type(user) in [discord.User, discord.Member]:
if support and user.id in _config['roles']['support']:
return True
if developer and user.id in _config['roles']['developer']:
return True
if admin and user.id in _config['roles']['admins']:
return True
if trusted and user.id in _config['roles']['trusted']:
return True
return False
async def walk_help(self, ctx, group):
message = ""
command_list = []
briefs = {}
for command in group.commands:
key = command.description.split('|')[0]
if key not in briefs:
briefs[key] = []
briefs[key].append(command)
for key, value in briefs.items():
_sorted = sorted(value, key=lambda o: o.name)
briefs[key] = _sorted
for key in sorted(briefs.keys()):
for command in briefs[key]:
command_list.append(command)
for command in command_list:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
if len(message) + len(sub_message) > 2048:
await self.bot.send_data(ctx, message, ctx.userinfo, title=f"{ctx.command.name[0].upper()}{ctx.command.name[1:].lower()} usage")
message = ""
message += sub_message
await self.bot.send_data(ctx, message, ctx.userinfo, title=f"{ctx.command.name[0].upper()}{ctx.command.name[1:].lower()} usage")
async def send_user_data(self, user, message,
title="", footer="", raw=False):
message_kwargs = {}
extra = ""
if raw:
message_kwargs['content'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
else:
embed_kwargs = {}
embed_kwargs['description'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
embed_kwargs['timestamp'] = datetime.utcfromtimestamp(
math.ceil(time.time()))
if title:
embed_kwargs['title'] = title
embed = discord.Embed(colour=3553599, **embed_kwargs)
embed.set_footer(text=footer)
message_kwargs['embed'] = embed
try:
await user.send(**message_kwargs)
except BaseException:
try:
await user.send(message[:2048])
except BaseException:
return
if len(extra) > 0:
return await self.send_user_data(user, message, title, footer, raw)
async def send_data(self, ctx, message, userinfo={}, prefer_dms=False,
force_guild=False, force_dm=False, alert=True,
title="", footer="", raw=False):
if force_dm and force_guild:
force_dm, force_guild = False, False
if userinfo.get("g"):
use_guild = not userinfo['g']['b']['pd']
if force_dm:
use_guild = False
if force_guild:
use_guild = True
if not getattr(ctx, "guild", False):
use_guild = False
message_kwargs = {}
extra = ""
if raw:
message_kwargs['content'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
else:
embed_kwargs = {}
embed_kwargs['description'] = message[:2048]
if len(message) > 2048:
extra = message[2048:]
embed_kwargs['timestamp'] = datetime.utcfromtimestamp(
math.ceil(time.time()))
if title:
embed_kwargs['title'] = title
embed = discord.Embed(colour=3553599, **embed_kwargs)
embed.set_footer(text=footer)
message_kwargs['embed'] = embed
if use_guild:
try:
await ctx.send(**message_kwargs)
except BaseException:
try:
await ctx.send(message[:2048])
except BaseException:
return
else:
try:
await ctx.author.send(**message_kwargs)
if alert and getattr(ctx, "guild", False):
try:
_message = rockutils._(
"Help has been sent to your direct messages", ctx)
await ctx.send(":mailbox_with_mail: | " + _message)
except BaseException:
pass
except BaseException:
try:
await ctx.send(**message_kwargs)
except BaseException:
try:
await ctx.send(message[:2048])
except BaseException:
return
if len(extra) > 0:
return await self.send_data(ctx, extra, userinfo, prefer_dms,
force_guild, force_dm, alert,
title, footer, raw)
def reload_data(self, filename, key=None):
if not key:
_, key = os.path.split(filename)
key = key[:key.find(".")]
if os.path.exists(filename):
data = rockutils.load_json(filename)
setattr(self.bot, key, data)
return True, key
else:
return False, key
def should_cache(self, guildinfo):
return guildinfo['a']['e'] or len(
guildinfo['rr']) > 0 or guildinfo['tr']['e'] or guildinfo['am'][
'e'] or guildinfo['s']['e']
async def create_guild_cache(self, guildinfo, guild=None, cache_filter=[],
force=False):
cached = False
force = True
if not guild:
guild = await self.bot.get_guild(int(guildinfo['id']))
_id = None
if guild:
_id = guild.id
else:
_id = int(guildinfo['id'])
if guildinfo and _id:
c = self.bot.cache
# print(f"Creating cache for {_id}")
if (_id not in c['prefix'] or force) and (
"prefix" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['prefix'][_id] = guildinfo['d']['b']['p']
if (_id not in c['guilddetails'] or force) and (
"guilddetails" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['guilddetails'][_id] = guildinfo['d']['b']
if (_id not in c['rules'] or force) and (
"rules" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['rules'][_id] = guildinfo['r']
# if (_id not in c['channels'] or force) and (
# "channels" in cache_filter if len(cache_filter) > 0 else True):
# c['channels'][_id] = guildinfo['ch']
# if (_id not in c['serverlock'] or force) and (
# "serverlock" in cache_filter if len(cache_filter) > 0 else True):
# c['serverlock'][_id] = guildinfo['sl']
if (_id not in c['staff'] or force) and (
"staff" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['staff'][_id] = guildinfo['st']
if (_id not in c['tempchannel'] or force) and (
"tempchannel" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['tempchannel'][_id] = guildinfo['tc']
if (_id not in c['autorole'] or force) and (
"autorole" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['autorole'][_id] = guildinfo['ar']
# if (_id not in c['rolereact'] or force) and (
# "rolereact" in cache_filter if len(cache_filter) > 0 else True):
# c['rolereact'][_id] = guildinfo['rr']
if (_id not in c['leaver'] or force) and (
"leaver" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['leaver'][_id] = guildinfo['l']
if (_id not in c['freerole'] or force) and (
"freerole" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['freerole'][_id] = guildinfo['fr']
if (_id not in c['timeroles'] or force) and (
"timeroles" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['timeroles'][_id] = guildinfo['tr']
if (_id not in c['namepurge'] or force) and (
"namepurge" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['namepurge'][_id] = guildinfo['np']
if (_id not in c['welcomer'] or force) and (
"welcomer" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['welcomer'][_id] = guildinfo['w']
if (_id not in c['stats'] or force) and (
"stats" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['stats'][_id] = guildinfo['s']
if (_id not in c['automod'] or force) and (
"automod" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['automod'][_id] = guildinfo['am']
if (_id not in c['borderwall'] or force) and (
"borderwall" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['borderwall'][_id] = guildinfo['bw']
# if (_id not in c['customcommands'] or force) and (
# "customcommands" in cache_filter if len(cache_filter) > 0 else True):
# c['customcommands'][_id] = guildinfo['cc']
# if (_id not in c['music'] or force) and (
# "music" in cache_filter if len(cache_filter) > 0 else True):
# c['music'][_id] = guildinfo['m']
# if (_id not in c['polls'] or force) and (
# "polls" in cache_filter if len(cache_filter) > 0 else True):
# c['polls'][_id] = guildinfo['p']
# if (_id not in c['logging'] or force) and (
# "logging" in cache_filter if len(cache_filter) > 0 else True):
# c['logging'][_id] = guildinfo['lo']
if (_id not in c['moderation'] or force) and (
"moderation" in cache_filter if len(cache_filter) > 0 else True):
self.bot.cache['moderation'][_id] = guildinfo['m']
if (_id not in c['activepunishments'] or force) and (
"activepunishments" in cache_filter if len(cache_filter) > 0 else True):
punishments = []
if os.path.exists(f"punishments/{_id}.csv"):
with open(f"punishments/{_id}.csv") as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
if row[8].lower() == "false":
punishments.append({
"userid": int(row[0]),
"type": row[4],
"endtime": int(row[6]) + int(row[7])
})
self.bot.cache['activepunishments'][_id] = punishments
# "analytics",
else:
print(f"Skipped cache as missing arg")
return cached
async def has_elevation(self, guild, guildinfo, user):
if await self.bot.has_special_permission(user, developer=True):
return True
if hasattr(guild, "owner") or hasattr(guild, "owner_id"):
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
if owner_id == user.id:
return True
if guildinfo:
if guildinfo.get("st"):
for staff in guildinfo['st']['u']:
if str(user.id) == staff[0]:
return True
if guild:
member = guild.get_member(user.id)
if member and await self.bot.has_permission_node(member, ["manage_guild", "ban_members"]):
return True
return False
async def get_prefix(self, message, return_prefixes=False):
if message.guild:
if message.guild.id not in self.bot.cache['prefix']:
guild_info = await self.bot.get_guild_info(message.guild.id, refer="get_prefix")
self.bot.cache['prefix'][
message.guild.id] = guild_info['d']['b']['p'] or "+"
prefix = self.bot.cache['prefix'][message.guild.id]
else:
prefix = "+"
prefix = prefix
if type(prefix) != str:
print(message.guild.id, "does not have string prefix!!!",
type(prefix), prefix)
if return_prefixes:
return prefix
else:
return commands.when_mentioned_or(prefix)(self.bot, message)
async def has_permission_node(self, target, check_for=[], return_has=False):
permissions = discord.Permissions.all()
my_permissions = {}
for key in list(
node.upper() for node in dir(permissions) if isinstance(
getattr(
permissions,
node),
bool)):
my_permissions[key] = False
for role in target.roles:
for node in my_permissions:
if getattr(role.permissions, node.lower()):
my_permissions[node] = True
if len(check_for) > 0:
my_permissions = list(
node for node,
val in my_permissions.items() if val)
if "ADMINISTRATOR" in my_permissions:
return True
for node in check_for:
if node.upper() in my_permissions:
return True, my_permissions
return False
elif return_has:
return list(node for node, val in my_permissions.items() if val)
else:
return False
def get_emote(self, name, fallback=":grey_question:"):
if getattr(self.bot, "emotes", None) is None:
try:
data = rockutils.load_json("cfg/emotes.json")
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Failed to retrieve emotes.json: {e}",
prefix_colour="light red")
if not data:
guild = self.bot.get_guild(
self.bot.config['bot']['emote_server'])
if guild:
emotes = self.bot.serialiser.emotes(guild)
if emotes[0]:
emotelist = {}
for emote in emotes:
emotelist[emote['name']] = emote['str']
rockutils.save_json("cfg/emotes.json", emotelist)
else:
self.bot.blocking_broadcast(
"emotesdump", "*", args="", timeout=10)
while not os.path.exists("cfg/emotes.json"):
try:
data = rockutils.load_json("cfg/emotes.json")
except BaseException:
pass
setattr(self.bot, "emotes", emotelist)
else:
setattr(self.bot, "emotes", data)
# # sometimes will save it as a list with a table inside, precaution
# if type(self.bot.emotes) == list:
# setattr(self.bot, "emotes", self.bot.emotes[0])
return self.bot.emotes.get(name, fallback)
async def broadcast(self, opcode, recepients, args="", timeout=10):
payload = {
"op": opcode,
"args": ujson.dumps(args),
"recep": recepients,
"timeout": str(timeout),
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/job/{self.bot.config['ipc']['auth_key']}"
timeout = aiohttp.ClientTimeout(total=timeout + 2)
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.post(domain, headers=payload) as resp:
return await resp.json()
def blocking_broadcast(self, opcode, recepients, args="", timeout=10):
payload = {
"op": opcode,
"args": ujson.dumps(args),
"recep": recepients,
"timeout": str(timeout),
}
domain = f"http://{self.bot.config['ipc']['host']}:{self.bot.config['ipc']['port']}/api/job/{self.bot.config['ipc']['auth_key']}"
timeout = timeout + 2
with requests.post(domain, headers=payload, timeout=timeout) as resp:
return resp.json()
@ commands.Cog.listener()
async def on_command_error(self, ctx, error):
# if isinstance(error, self.NoPermission):
# message = rockutils._("You do not have permission to use this command")
# return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
# if isinstance(error, self.NoDonator):
# message = rockutils._("This command is for donators only. Do +membership to find out more")
# return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.ext.commands.NoPrivateMessage):
message = rockutils._(
"This command cannot be ran in a private message", ctx)
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, (discord.ext.commands.UnexpectedQuoteError,
discord.ext.commands.InvalidEndOfQuotedStringError)):
message = rockutils._(
"Your message provided has an unexpected quotations and could not be executed", ctx)
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.ext.commands.BotMissingPermissions):
message = rockutils._(
"The bot is unable to run this command as it is missing permissions: {permissions}",
ctx).format(
permissions=",".join(map(lambda o: o.upper(), error.missing_perms)))
return await ctx.send(f"{self.bot.get_emote('alert')} | " + message)
if isinstance(error, discord.errors.Forbidden):
return
if isinstance(error, discord.ext.commands.CheckFailure):
return
_traceback = traceback.format_exception(
type(error), error, error.__traceback__)
_error = {
"name": str(error),
"type": str(type(error)),
"tb": _traceback,
"status": "not handled",
"occurance": str(datetime.now()),
"timestamp": str(time.time()),
"version": ctx.bot.version,
"gname": getattr(ctx.guild, "name", "Direct Message"),
"gid": str(getattr(ctx.guild, "id", "Direct Message")),
"aname": str(ctx.author),
"aid": str(ctx.author.id),
"mc": getattr(ctx.message, "content", ""),
"command": str(ctx.command),
"cog": str(getattr(ctx.command, "cog", ""))
}
try:
# response = await r.table("errors").insert(_error).run(self.bot.connection)
response = await self.set_value("errors", None, _error)
except BaseException:
response = {"inserted": 0}
if response['inserted'] > 0:
_id = response['generated_keys'][0]
embed = discord.Embed(
title="Uh oh, something bad just happened",
description=f"We tried executing your command but something very unexpected happened. Either a bug or a tiger escaped the zoo but im pretty sure it was a bug. I have alerted my higher ups that this has occured and it should be fixed soon. [Track Issue](https://welcomer.fun/errors/{_id})\n\n`{_error['name']}`")
await ctx.send(embed=embed)
else:
embed = discord.Embed(
title="Uh oh, something bad just happened",
description=f"We tried executing your command but something extremely unexpected happened. I was unable to contact my higher ups at this moment in time and this could be very bad. Please head to the support server and give them my memo")
await ctx.send(embed=embed, file=discord.File(io.StringIO(ujson.dumps(_error)), "memo.json"))
@ commands.command(
name="help",
description="|Returns list of all commands with their usage and description")
async def custom_help(self, ctx, module=""):
message = ""
modules = dict()
modules['misc'] = []
is_developer = await ctx.bot.has_special_permission(ctx.author,
developer=True)
is_admin = await ctx.bot.has_special_permission(ctx.author,
developer=True,
admin=True)
is_support = await ctx.bot.has_special_permission(ctx.author,
developer=True,
admin=True,
support=True)
for command in self.bot.commands:
if isinstance(command, discord.ext.commands.core.Group):
if (
is_developer if "developer" in (
command.brief or "") else True) and (
is_support if "support" in (
command.brief or "") else True) and (
is_admin if "admin" in (
command.brief or "") else True):
modules[command.name.lower()] = command
else:
modules['misc'].append(command)
if module == "":
message = rockutils._(
"Please specify a module that you would like to look up",
ctx) + "\n\n"
for k in sorted(modules.keys()):
if k == "misc":
message += f"{self.bot.get_emote('dotshorizontal')} **MISC** - `Helpful commands for general use`\n"
c = self.bot.get_command(k)
if c:
message += f"{self.bot.get_emote(c.description.split('|')[0])} **{c.name.upper()}** - "
message += f"`{c.description.split('|')[1]}`\n"
return await self.send_data(ctx, message, ctx.userinfo,
prefer_dms=True, raw=False,
force_guild=False, force_dm=False,
alert=True)
if module != "":
if module.lower() in modules.keys():
modules = {
module.lower(): modules[module.lower()]
}
else:
message = rockutils._(
"Could not find a module with the name: **{modulename}**",
ctx).format(
modulename=module)
message += "\n\n" + rockutils._("Modules", ctx) + ":\n\n"
message += ", ".join(f"**{k}**" for k in modules.keys())
return await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
for cog, cog_obj in modules.items():
if cog.lower() in ['misc']:
message = ""
message += f"\n**{self.bot.get_emote('dotshorizontal')} MISC**\n\n"
for command in sorted(
cog_obj, key=lambda o: f"{o.full_parent_name} {o.name}"):
if len(command.description.split("|")) >= 2:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
else:
sub_message = f"**{command.full_parent_name} {command.name}** | {command.description}\n"
if len(message) + len(sub_message) > 2048:
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
message = ""
message += sub_message
else:
message = ""
message += f"\n**{self.bot.get_emote(cog_obj.description.split('|')[0])} {cog.upper()}**\n\n"
for command in sorted(
cog_obj.commands,
key=lambda o: f"{o.full_parent_name} {o.name}"):
if len(command.description.split("|")) >= 2:
sub_message = f"**{command.full_parent_name} {command.name} {command.description.split('|')[0]}** | {command.description.split('|')[1]}\n"
else:
sub_message = f"**{command.full_parent_name} {command.name}** | {command.description}\n"
if len(message) + len(sub_message) > 2048:
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
message = ""
sub_message = ""
message += sub_message
await self.send_data(ctx, message, ctx.userinfo, prefer_dms=True, raw=False, force_guild=False, force_dm=False, alert=True)
async def chunk_guild(self, guild):
if guild.chunked:
return
a = time.time()
await guild.chunk(cache=True)
if math.ceil((time.time()-a)*1000) >= 10000:
await rockutils.send_webhook(
"https://discord.com/api/webhooks/8[removed]",
f"{'<@143090142360371200>' if math.ceil((time.time()-a)*1000) > 60000 else ''}Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
rockutils.prefix_print(
f"Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# try:
# a = time.time()
# since = self.bot.chunkcache.get(guild.id, 0)
# cond = self.bot.lockcache.get(guild.id)
# if not cond:
# self.bot.lockcache[guild.id] = asyncio.Condition()
# cond = self.bot.lockcache[guild.id]
# if type(since) != float:
# self.bot.chunkcache[guild.id] = 0
# since = 0
# if a-since > 60:
# rockutils.prefix_print(
# f"Chunking {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# self.bot.chunkcache[guild.id] = a
# await cond.acquire()
# await guild.chunk(cache=True)
# cond.notify_all()
# if math.ceil((time.time()-a)*1000) >= 1000:
# await rockutils.send_webhook(
# "https://discord.com/api/webhooks/[removed]",
# f"{'<@143090142360371200>' if math.ceil((time.time()-a)*1000) > 60000 else ''}Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
# rockutils.prefix_print(
# f"Chunked {guild.id} in {math.ceil((time.time()-a)*1000)}ms", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# elif cond:
# rockutils.prefix_print(
# f"Waiting for chunk lock on {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# await cond.wait()
# rockutils.prefix_print(
# f"Finished waiting for chunk lock for {guild.id}", prefix_colour="light yellow", prefix="Core:ProcessMessage")
# # wait on lock
# except Exception as e:
# rockutils.prefix_print(
# f"Failed to chunk guild: {e.id}", prefix_colour="red", prefix="Core:ProcessMessage")
async def process_message(self, message):
prefixes = (await self.get_prefix(message, return_prefixes=True), f"<@{self.bot.user.id}>", f"<@!{self.bot.user.id}>")
if not message.content.startswith(prefixes):
return
ctx = await self.bot.get_context(message)
if ctx.command is None:
if ctx.guild.me in ctx.message.mentions:
message.content = f"{prefixes[0]}prefix"
ctx = await self.bot.get_context(message)
else:
return
if ctx.guild:
try:
await asyncio.wait_for(self.bot.chunk_guild(ctx.guild), timeout=10)
except asyncio.TimeoutError:
await rockutils.send_webhook(
"https://discord.com/api/webhooks/[removed]",
f"Failed to chunk guild `{ctx.guild}` ID: {ctx.guild.id} Shard: {self.bot.shard_id} Cluster: {self.bot.cluster_id}")
return await ctx.send(f"{self.bot.get_emote('alert')} | " + "I am having problems chunking this guild. Try again later. Keep getting this issue? Try the other bot: http://welcomer.gg/invitebot/fallback")
ctx.userinfo = await self.bot.get_user_info(ctx.author.id, refer="process_commands")
if isinstance(message.guild, discord.guild.Guild):
ctx.guildinfo = await self.bot.get_guild_info(ctx.guild.id, refer="process_commands")
else:
ctx.guildinfo = copy.deepcopy(
rockutils.load_json("cfg/default_guild.json"))
ctx.prefix = ctx.guildinfo['d']['b']['p']
rockutils.prefix_print(
ctx.message.content,
prefix=ctx.author.__str__())
# black and whitelist
if self.bot.donator:
if ctx.guild:
has_donated = await self.bot.has_guild_donated(ctx.guild, ctx.guildinfo, donation=True, partner=True)
if not has_donated:
if ctx.command.name not in [
'help', 'donate', 'prefix', 'membership']:
message = rockutils._(
"A membership is required to use the donator bot. You can find out more at **{website}** or by doing `{donatecommand}`. If you have donated, do `{membershipcommand}` to be able to manage servers you have a membership on".format(
website="https://welcomer.fun/donate",
donatecommand="+donate",
membershipcommand="+membership"))
try:
await ctx.send(
f"{self.bot.get_emote('cross')} | " + message)
except BaseException:
pass
elif ctx.guild:
if ctx.command.name not in [
'help', 'donate', 'prefix', 'membership']:
message = rockutils._(
"A membership is required to use the donator bot. You can find out more at **{website}** or by doing `{donatecommand}`. If you have donated, do `{membershipcommand}` to be able to manage servers you have a membership on".format(
website="https://welcomer.fun/donate",
donatecommand="+donate",
membershipcommand="+membership"))
try:
await ctx.send(
f"{self.bot.get_emote('cross')} | " + message)
except BaseException:
pass
else:
if ctx.guild and ctx.guild.get_member(
498519480985583636) and not self.bot.debug:
# If this is normal bot and sees donator welcomer, do not
# respond to messages
return
if self.bot.user == 330416853971107840 and ctx.guild.get_member(824435160593727518):
# Do not process commands if i am the main bot and see bcomer
return
await self.bot.invoke(ctx)
class DataSerialiser:
def __init__(self, bot):
self.bot = bot
# def guild_detailed(self, guild):
# detailed = {
# "streaming": 0,
# "online": 0,
# "idle": 0,
# "dnd": 0,
# "offline": 0,
# "bots": 0,
# "members": 0,
# }
# if guild and isinstance(guild, discord.Guild):
# for member in guild.members:
# detailed["bots" if member.bot else "members"] += 1
# if hasattr(member, "status"):
# detailed[str(member.status)] += 1
# if hasattr(member, "activities"):
# for activity in member.activities:
# if isinstance(
# activity, discord.Streaming):
# detailed['streaming'] += 1
# elif hasattr(member, "activity") and isinstance(member.activity, discord.Streaming):
# detailed['streaming'] += 1
# return detailed
def guild(self, guild):
guild_info = {}
if guild and isinstance(guild, discord.Guild):
guild_info = {
"name": guild.name,
"id": str(guild.id),
"owner": {
"id": "0",
"name": "?"
},
"region": str(guild.region),
"users": guild.member_count,
"bots": sum(1 for m in guild.members if m.bot),
"creation": guild.created_at.timestamp(),
"icon": str(guild.icon),
"icons": [
str(guild.icon_url_as(format="jpeg", size=64)),
str(guild.icon_url_as(format="png", size=256))
]
}
if guild.owner or guild.owner_id:
try:
owner_id = guild.owner.id
except:
owner_id = guild.owner_id
guild_info["owner"]["id"] = str(guild.owner_id)
guild_info["owner"]["name"] = str(guild.owner)
return guild_info
async def guildelevation(self, guild, guildinfo=None, member=None):
guildinfo = {}
if guild and isinstance(guild, discord.Guild):
guild_info = {
"name": guild.name,
"id": str(guild.id),
"owner": {
"id": str(getattr(guild.owner, "id", guild.owner_id)),
"name": str(guild.owner),
},
"users": guild.member_count,
"bots": sum(1 for m in guild.members if m.bot),
"icon": str(guild.icon),
"icons": [
str(guild.icon_url_as(format="jpeg", size=64)),
str(guild.icon_url_as(format="png", size=256))
]
}
if member and guildinfo:
member = guild.get_member(member.id)
if member:
guild_info['elevated'] = await self.bot.has_elevation(guild, guildinfo, member)
return guild_info
def roles(self, guild):
roles = []
for role in guild.roles:
roles.append({
"name": role.name,
"id": str(role.id),
"position": str(role.position),
"higher": role > guild.me.top_role,
})
return roles
def channels(self, guild):
channels = {
"categories": [],
"voice": [],
"text": []
}
if guild and isinstance(guild, discord.Guild):
for channel in guild.channels:
if isinstance(channel, discord.TextChannel):
channels['text'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"category": str(getattr(channel, "category_id")),
"topic": channel.topic,
"nsfw": channel.is_nsfw()
})
if isinstance(channel, discord.VoiceChannel):
channels['voice'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"category": str(getattr(channel, "category_id")),
"bitrate": channel.bitrate,
"user_limit": channel.user_limit
})
if isinstance(channel, discord.CategoryChannel):
channels['categories'].append({
"name": channel.name,
"id": str(channel.id),
"position": channel.position,
"nsfw": channel.is_nsfw()
})
return channels
def emotes(self, guild):
emotes = []
if guild and isinstance(guild, discord.Guild):
for emote in guild.emojis:
emotes.append({
"str": str(emote),
"id": str(emote.id),
"name": emote.name,
"gif": emote.animated,
"url": str(emote.url)
})
return emotes
async def invites(self, guild):
ginvites = []
if guild and isinstance(guild, discord.Guild):
try:
for invite in await guild.invites():
try:
ginvites.append(
{"code": invite.code, "created_at": math.ceil(
invite.created_at.timestamp()),
"temp": invite.temporary, "uses": invite.uses,
"max": invite.max_uses,
"inviter": str(invite.inviter.id)
if invite.inviter else "Unknown",
"inviter_str": str(invite.inviter)
if invite.inviter else "Unknown",
"channel": str(invite.channel.id),
"channel_str": str(invite.channel),
"duration": str(invite.max_age), })
except AttributeError as e:
print("Issue when handling invite", invite.code, "on guild", guild.id, e)
except Exception as e:
raise e
except Exception as e:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
rockutils.prefix_print(
f"Failed to retrieve invites: {e}",
prefix_colour="light red")
return []
return ginvites
def user(self, user):
userinfo = {}
if user and type(user) in [discord.User, discord.ClientUser]:
userinfo = {
"name": user.name,
"bot": user.bot,
"id": str(user.id),
"discriminator": user.discriminator,
"display": str(user.name),
"icon": str(user.avatar),
"creation": user.created_at.timestamp(),
"avatar": [
str(user.default_avatar_url),
str(user.avatar_url_as(format="jpeg", size=64)),
str(user.avatar_url_as(format="png", size=256))
]
}
return userinfo
def mutualguildsid(self, _id):
guilds = []
for guild in self.bot.guilds:
member = guild.get_member(_id)
if member.bot:
return []
if member:
guilds.append(self.guild(guild))
return guilds
def mutualguilds(self, user):
guilds = []
if user.bot:
return guilds
for guild in self.bot.guilds:
if guild.get_member(user.id):
guilds.append(self.guild(guild))
return guilds
def badges(self, user, userinfo):
_time = time.time()
badges = []
if (userinfo['m']['1']['h'] and (
_time < (userinfo['m']['1'].get('u', 0) or 0) or userinfo['m']['1']['p'])) or \
(userinfo['m']['3']['h'] and (
_time < (userinfo['m']['3'].get('u', 0) or 0) or userinfo['m']['3']['p'])) or \
(userinfo['m']['5']['h'] and (
_time < (userinfo['m']['5'].get('u', 0) or 0) or userinfo['m']['5']['p'])):
badges.append([
self.bot.get_emote("gift"),
"Donator",
"This user supports welcomer",
"202225"
])
if userinfo['m']['p']:
badges.append([
self.bot.get_emote("starbox"),
"Welcomer Partner",
"Currently a Welcomer partner",
"2D103F"
])
all_guilds = rockutils.merge_embeded_lists(
userinfo['g']['g']['m']['c'])
tops = {}
for guild in all_guilds:
if guild['owner']['id'] == str(user.id):
if guild['users'] > 250:
if not guild['id'] in tops:
tops[guild['id']] = guild
if guild['users'] > tops[guild['id']]['users']:
tops[guild['id']] = guild
for guild in tops.values():
badges.append([
self.bot.get_emote("packagevariantclosed"),
"Server Owner",
f"Owner of server with {guild['users']} members",
"202225"
])
if user.id in self.bot.config['roles']['support']:
badges.append([
self.bot.get_emote("gavel"),
"Welcomer Support",
"Official Welcomer support member",
"202225"
])
if user.id in self.bot.config['roles']['trusted']:
badges.append([
self.bot.get_emote("accountstar"),
"Trusted user",
"User that Welcomer recognises as trustworthy",
"202225"
])
if user.id in self.bot.config['roles']['admins']:
badges.append([
self.bot.get_emote("wrench"),
"Welcomer Administrator",
"Official Welcomer administrator",
"202225"
])
if user.id in self.bot.config['roles']['developer']:
badges.append([
self.bot.get_emote("cogs"),
"Welcomer Developer",
"These people made the bot :)",
"202225"
])
return badges
def setup(bot):
def existingdict(subject, key, data):
if not subject.get(key):
subject[key] = data
caches = [
"prefix",
"guilddetails",
"rules",
"analytics",
"channels",
"serverlock",
"staff",
"tempchannel",
"autorole",
"rolereact",
"leaver",
"freerole",
"timeroles",
"namepurge",
"welcomer",
"stats",
"automod",
"borderwall",
"customcommands",
"music",
"polls",
"logging",
"moderation",
"activepunishments"
]
for name in caches:
existingdict(bot.cache, name, {})
core = WelcomerCore(bot)
for key in dir(core):
if not ("on_" in key[:3] and key != "on_message_handle"):
value = getattr(core, key)
if callable(value) and "_" not in key[0]:
setattr(bot, key, value)
if not hasattr(bot, key):
print(f"I called set for {key} but its not set now")
bot.remove_command("help")
bot.add_cog(core)
if not hasattr(bot, "chunkcache"):
setattr(bot, "chunkcache", {})
if not hasattr(bot, "lockcache"):
setattr(bot, "lockcache", {})
setattr(bot, "ranonconnect", False)
setattr(bot, "cachemutex", False)
setattr(bot, "serialiser", DataSerialiser(bot))
setattr(bot, "emotes", rockutils.load_json("cfg/emotes.json"))
default_data = rockutils.load_json("cfg/default_user.json")
setattr(bot, "default_user", default_data)
default_data = rockutils.load_json("cfg/default_guild.json")
setattr(bot, "default_guild", default_data)
bot.reload_data("cfg/config.json", "config")
reload(handling)
| 42.222712
| 327
| 0.48417
| 9,047
| 86,261
| 4.482923
| 0.085332
| 0.029341
| 0.020712
| 0.016051
| 0.493971
| 0.440984
| 0.40003
| 0.365288
| 0.342259
| 0.30631
| 0
| 0.010601
| 0.388692
| 86,261
| 2,042
| 328
| 42.243389
| 0.758515
| 0.150404
| 0
| 0.400673
| 0
| 0.018182
| 0.138677
| 0.020328
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012795
| false
| 0.009428
| 0.015488
| 0.000673
| 0.088215
| 0.03569
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f503162b0ef4701efc6276ebdf2a288cdafb1f
| 3,480
|
py
|
Python
|
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
figures/bothspectra.py
|
DanielAndreasen/Paper-updated-nir-linelist
|
a4094a1d73a58c1ee1597c6df8a11b0b9ce17777
|
[
"MIT"
] | null | null | null |
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('ticks')
sns.set_context('paper', font_scale=1.7)
from plot_fits import get_wavelength, dopplerShift
from scipy.interpolate import interp1d
plt.rcParams['xtick.direction'] = 'in'
"""
Compare the spectrum of Arcturus with 10 Leo, plus have some Fe lines
identified.
"""
def get_ymin(center, d1, d2):
w1, f1 = d1
i1 = np.argmin(abs(w1-center))
v1 = f1[i1]
w2, f2 = d2
i2 = np.argmin(abs(w2-center))
v2 = f2[i2]
return min([v1])
if __name__ == '__main__':
regions = [[10000, 10100], [10130, 10230], [12200, 12300]]
lines = np.loadtxt('Felines.moog', usecols=(0,))
wArcturus = get_wavelength(fits.getheader('ArcturusSummer.fits'))
fArcturus = fits.getdata('ArcturusSummer.fits')
w10Leo1 = get_wavelength(fits.getheader('10LeoYJ.fits'))
f10Leo1 = fits.getdata('10LeoYJ.fits')
w10Leo2 = get_wavelength(fits.getheader('10LeoH.fits'))
f10Leo2 = fits.getdata('10LeoH.fits')
w10Leo3 = get_wavelength(fits.getheader('10LeoK.fits'))
f10Leo3 = fits.getdata('10LeoK.fits')
f10Leo1, w10Leo1 = dopplerShift(w10Leo1, f10Leo1, -82.53)
f10Leo2, w10Leo2 = dopplerShift(w10Leo2, f10Leo2, -81.82)
f10Leo3, w10Leo3 = dopplerShift(w10Leo3, f10Leo3, -81.37)
for i, region in enumerate(regions):
if i != 1:
continue
if (w10Leo1[0] <= region[0]) and (w10Leo1[-1] >= region[1]):
w10Leo = w10Leo1
f10Leo = f10Leo1
elif (w10Leo2[0] <= region[0]) and (w10Leo2[-1] >= region[1]):
w10Leo = w10Leo2
f10Leo = f10Leo2
elif (w10Leo3[0] <= region[0]) and (w10Leo3[-1] >= region[1]):
w10Leo = w10Leo3
f10Leo = f10Leo3
else:
continue
i1 = (region[0] <= wArcturus) & (wArcturus <= region[1])
i2 = (region[0] <= w10Leo) & (w10Leo <= region[1])
i3 = (region[0] <= lines) & (lines <= region[1])
w1, f1 = wArcturus[i1], fArcturus[i1]
w2, f2 = w10Leo[i2], f10Leo[i2]
plines = lines[i3]
w0 = w1[0] if w1[0] != min((w1[0], w2[0])) else w2[0]
wn = w1[-1] if w1[-1] != max((w1[-1], w2[-1])) else w2[-1]
interp1 = interp1d(w1, f1, kind='linear')
interp2 = interp1d(w2, f2, kind='linear')
w = np.linspace(w0, wn, len(w1))
f1 = interp1(w)
f2 = interp2(w)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(111)
ax.tick_params('y', labelcolor='w', left='off')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(w, f1, label='Arcturus')
ax.plot(w, f2-0.15, label='10 Leo')
ax.plot(w, f1-f2+0.15, label='Difference')
for j, line in enumerate(plines):
if j%2 == 0:
dy = -0.02
else:
dy = 0.02
if j == 6:
dy = 0.02
elif j == 7:
dy = -0.02
ymin = get_ymin(line, (w1, f1), (w2, f2))
plt.vlines(line, ymin, 1.04+dy, linestyles='dashed')
plt.text(line, 1.04+dy, 'Fe')
ax.set_xlabel(r'Wavelength [$\AA$]')
ax.set_ylabel('Normalized flux')
y1, _ = plt.ylim()
plt.ylim(y1, 1.15)
plt.legend(loc='best', frameon=False)
plt.tight_layout()
# plt.savefig('bothspectra.pdf')
plt.show()
| 31.926606
| 70
| 0.561494
| 465
| 3,480
| 4.144086
| 0.369892
| 0.021796
| 0.035288
| 0.05397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120923
| 0.277586
| 3,480
| 108
| 71
| 32.222222
| 0.645585
| 0.008621
| 0
| 0.094118
| 0
| 0
| 0.075915
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011765
| false
| 0
| 0.070588
| 0
| 0.094118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f6ce9055d1d8634c3084a055d492122c9b4918
| 1,818
|
py
|
Python
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 12
|
2016-11-30T04:39:18.000Z
|
2021-09-11T13:57:37.000Z
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 2
|
2018-03-05T19:01:09.000Z
|
2019-10-10T00:30:55.000Z
|
EnumLasso/paper/paper_thaliana.py
|
t-basa/LassoVariants
|
ead33ac83de19865a9553dbdda9a28aa5c781e44
|
[
"MIT"
] | 6
|
2017-08-19T17:49:51.000Z
|
2022-01-09T07:41:22.000Z
|
# -*- coding: utf-8 -*-
"""
@author: satohara
"""
import sys
sys.path.append('../')
import codecs
import numpy as np
import pandas as pd
from EnumerateLinearModel import EnumLasso
# data - x
fn = './data/call_method_32.b'
df = pd.read_csv(fn, sep=',', header=None)
data_id_x = np.array([int(v) for v in df.ix[1, 2:]])
gene_id = df.ix[2:, :1].values
gene_id = np.array([[int(v[0]), int(v[1])] for v in gene_id])
data = df.ix[2:, 2:].values
data[data=='-'] = 0
data[data=='A'] = 1
data[data=='T'] = 2
data[data=='G'] = 3
data[data=='C'] = 4
count = np.c_[np.sum(data == 1, axis=1), np.sum(data == 2, axis=1), np.sum(data == 3, axis=1), np.sum(data == 4, axis=1)]
c = np.argmax(count, axis=1) + 1
x = data.copy()
for i in range(data.shape[1]):
x[:, i] = 1 - (data[:, i] - c == 0)
# data - y
fn = './data/phenotype_published_raw.tsv'
with codecs.open(fn, 'r', 'Shift-JIS', 'ignore') as file:
df = pd.read_table(file, delimiter='\t')
y = df.ix[:, 41].values
# data - reordering, remove nan
idx = np.argsort(data_id_x)
x = x[:, idx]
idx = ~np.isnan(y)
x = x[:, idx].T
y = y[idx]
# data - training & test split
seed = 0
r = 0.8
np.random.seed(seed)
idx = np.random.permutation(x.shape[0])
m = int(np.round(x.shape[0] * r))
xte = x[idx[m:], :]
yte = y[idx[m:]]
x = x[idx[:m], :]
y = y[idx[:m]]
# EnumLasso
rho = 0.1
delta = 0.05
mdl = EnumLasso(rho=rho, warm_start=True, enumtype='k', k=50, delta=delta, save='paper_thaliana.npy', modeltype='regression', verbose=True)
mdl.fit(x, y)
print()
print('--- Enumerated Solutions ---')
print(mdl)
# evaluate
print('--- Mean Square Error / # of Non-zeros ---')
for i in range(len(mdl.obj_)):
a = mdl.a_[i]
b = mdl.b_[i]
z = xte.dot(a) + b
mse = np.mean((z - yte)**2)
print('Solution %3d: MSE = %f / NNZ = %d' % (i+1, mse, a.nonzero()[0].size))
| 24.90411
| 139
| 0.593509
| 336
| 1,818
| 3.154762
| 0.380952
| 0.037736
| 0.033962
| 0.028302
| 0.039623
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031167
| 0.170517
| 1,818
| 73
| 140
| 24.90411
| 0.671751
| 0.074807
| 0
| 0
| 0
| 0
| 0.129419
| 0.034152
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.092593
| 0
| 0.092593
| 0.092593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f74625e459f6cfa2aca2f74b48bf8881d4641b
| 8,309
|
py
|
Python
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 14
|
2015-02-06T02:47:57.000Z
|
2020-03-14T15:06:05.000Z
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:29:11.000Z
|
2021-06-02T02:14:27.000Z
|
lib/backup_service_client/models/bucket.py
|
sumedhpb/testrunner
|
9ff887231c75571624abc31a3fb5248110e01203
|
[
"Apache-2.0"
] | 155
|
2018-11-13T14:57:07.000Z
|
2022-03-28T11:53:22.000Z
|
# coding: utf-8
"""
Couchbase Backup Service API
This is REST API allows users to remotely schedule and run backups, restores and merges as well as to explore various archives for all there Couchbase Clusters. # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Bucket(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'size': 'int',
'items': 'int',
'mutations': 'int',
'tombstones': 'int',
'views_count': 'int',
'fts_count': 'int',
'index_count': 'int',
'analytics_count': 'int'
}
attribute_map = {
'name': 'name',
'size': 'size',
'items': 'items',
'mutations': 'mutations',
'tombstones': 'tombstones',
'views_count': 'views_count',
'fts_count': 'fts_count',
'index_count': 'index_count',
'analytics_count': 'analytics_count'
}
def __init__(self, name=None, size=None, items=None, mutations=None, tombstones=None, views_count=None, fts_count=None, index_count=None, analytics_count=None): # noqa: E501
"""Bucket - a model defined in Swagger""" # noqa: E501
self._name = None
self._size = None
self._items = None
self._mutations = None
self._tombstones = None
self._views_count = None
self._fts_count = None
self._index_count = None
self._analytics_count = None
self.discriminator = None
if name is not None:
self.name = name
if size is not None:
self.size = size
if items is not None:
self.items = items
if mutations is not None:
self.mutations = mutations
if tombstones is not None:
self.tombstones = tombstones
if views_count is not None:
self.views_count = views_count
if fts_count is not None:
self.fts_count = fts_count
if index_count is not None:
self.index_count = index_count
if analytics_count is not None:
self.analytics_count = analytics_count
@property
def name(self):
"""Gets the name of this Bucket. # noqa: E501
:return: The name of this Bucket. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Bucket.
:param name: The name of this Bucket. # noqa: E501
:type: str
"""
self._name = name
@property
def size(self):
"""Gets the size of this Bucket. # noqa: E501
:return: The size of this Bucket. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this Bucket.
:param size: The size of this Bucket. # noqa: E501
:type: int
"""
self._size = size
@property
def items(self):
"""Gets the items of this Bucket. # noqa: E501
:return: The items of this Bucket. # noqa: E501
:rtype: int
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this Bucket.
:param items: The items of this Bucket. # noqa: E501
:type: int
"""
self._items = items
@property
def mutations(self):
"""Gets the mutations of this Bucket. # noqa: E501
:return: The mutations of this Bucket. # noqa: E501
:rtype: int
"""
return self._mutations
@mutations.setter
def mutations(self, mutations):
"""Sets the mutations of this Bucket.
:param mutations: The mutations of this Bucket. # noqa: E501
:type: int
"""
self._mutations = mutations
@property
def tombstones(self):
"""Gets the tombstones of this Bucket. # noqa: E501
:return: The tombstones of this Bucket. # noqa: E501
:rtype: int
"""
return self._tombstones
@tombstones.setter
def tombstones(self, tombstones):
"""Sets the tombstones of this Bucket.
:param tombstones: The tombstones of this Bucket. # noqa: E501
:type: int
"""
self._tombstones = tombstones
@property
def views_count(self):
"""Gets the views_count of this Bucket. # noqa: E501
:return: The views_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._views_count
@views_count.setter
def views_count(self, views_count):
"""Sets the views_count of this Bucket.
:param views_count: The views_count of this Bucket. # noqa: E501
:type: int
"""
self._views_count = views_count
@property
def fts_count(self):
"""Gets the fts_count of this Bucket. # noqa: E501
:return: The fts_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._fts_count
@fts_count.setter
def fts_count(self, fts_count):
"""Sets the fts_count of this Bucket.
:param fts_count: The fts_count of this Bucket. # noqa: E501
:type: int
"""
self._fts_count = fts_count
@property
def index_count(self):
"""Gets the index_count of this Bucket. # noqa: E501
:return: The index_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._index_count
@index_count.setter
def index_count(self, index_count):
"""Sets the index_count of this Bucket.
:param index_count: The index_count of this Bucket. # noqa: E501
:type: int
"""
self._index_count = index_count
@property
def analytics_count(self):
"""Gets the analytics_count of this Bucket. # noqa: E501
:return: The analytics_count of this Bucket. # noqa: E501
:rtype: int
"""
return self._analytics_count
@analytics_count.setter
def analytics_count(self, analytics_count):
"""Sets the analytics_count of this Bucket.
:param analytics_count: The analytics_count of this Bucket. # noqa: E501
:type: int
"""
self._analytics_count = analytics_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Bucket, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Bucket):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.965625
| 178
| 0.563846
| 991
| 8,309
| 4.579213
| 0.13219
| 0.047598
| 0.095196
| 0.095196
| 0.412737
| 0.305862
| 0.281402
| 0.212869
| 0.119877
| 0.05465
| 0
| 0.018238
| 0.340113
| 8,309
| 319
| 179
| 26.047022
| 0.809411
| 0.32519
| 0
| 0.077465
| 0
| 0
| 0.059657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169014
| false
| 0
| 0.021127
| 0
| 0.316901
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f93b1caf13cee134c81078e57fec4a501c2e10
| 1,618
|
py
|
Python
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | 2
|
2021-05-29T16:57:17.000Z
|
2021-06-13T18:39:24.000Z
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | 22
|
2021-05-22T18:23:40.000Z
|
2021-12-18T21:09:59.000Z
|
funciones/app.py
|
christophermontero/estima-tu-proyecto
|
19f533be203c9ac2c4383ded5a1664dd1d05d679
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
from db import db_session, init_db
from model import Funcion
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
init_db()
@app.route("/funciones", methods=["POST"])
def create_funcion():
data = request.json
if data["nombreFuncion"] is None:
return jsonify({"mensaje": "error"}), 400
funcion = Funcion.create(
data["idFuncion"], data["nombreFuncion"], data["numCampos"], data["numObjetos"],
data["complejidad"], data["modulo_id"],
)
return jsonify({"funcion": funcion.toJson()})
@app.route("/funciones", methods=["GET"])
def get_funciones():
funciones = [funcion.toJson() for funcion in Funcion.query.all()]
return jsonify({"funciones": funciones})
@app.route("/funciones/<idFuncion>", methods=["GET"])
def get_funcion(idFuncion):
funcion = Funcion.query.filter_by(idFuncion=idFuncion).first()
if funcion is None:
return jsonify({"message": "La función no existe"}), 404
return jsonify({"funcion": funcion.toJson()})
@app.route("/funciones/porModulo/<idModule>", methods=["GET"])
def get_funcion_byModule(idModule):
m = [function.toJson() for function in Funcion.query.filter_by(modulo_id=idModule).all()]
return jsonify({"funcion": m})
@app.route("/funciones/<idFuncion>", methods=["DELETE"])
def delete_funcion(idFuncion):
function = Funcion.query.filter_by(idFuncion=idFuncion).first()
confirmation = Funcion.delete(function)
return jsonify({"modulos": confirmation})
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| 25.68254
| 93
| 0.685414
| 194
| 1,618
| 5.572165
| 0.345361
| 0.084181
| 0.078631
| 0.044403
| 0.269195
| 0.172063
| 0.172063
| 0.092507
| 0
| 0
| 0
| 0.007257
| 0.148331
| 1,618
| 62
| 94
| 26.096774
| 0.777213
| 0
| 0
| 0.054054
| 0
| 0
| 0.189122
| 0.063041
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.081081
| 0
| 0.405405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6f9a9602db33208c1f896b22af13200b9be42d9
| 309
|
py
|
Python
|
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
onnx_script/check_onnx_model.py
|
abyssss52/pytorch-image-models
|
6ed4124c610a73fc849e7e9567bc36cf5bf38ceb
|
[
"Apache-2.0"
] | null | null | null |
import onnx
# Load the ONNX model
model = onnx.load("./mobilenetv2_new.onnx")
# model = onnx.load("../FaceAnti-Spoofing.onnx")
# Check that the IR is well formed
onnx.checker.check_model(model)
# Print a human readable representation of the graph
onnx.helper.printable_graph(model.graph)
print(model.graph)
| 25.75
| 52
| 0.76699
| 47
| 309
| 4.978723
| 0.531915
| 0.102564
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003663
| 0.116505
| 309
| 11
| 53
| 28.090909
| 0.85348
| 0.485437
| 0
| 0
| 0
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.4
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fa99e51df1893798f6cb4d6c3cbd2091fbf05a
| 7,167
|
py
|
Python
|
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
src/visualization/plot_grid.py
|
davimnz/boa
|
0546ad4df0ecabec1fd3beb1264cd0930dce13a9
|
[
"MIT"
] | null | null | null |
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from math import cos, radians
def shift_position(pos, x_shift, y_shift) -> dict:
"""
Moves nodes' position by (x_shift, y_shift)
"""
return {n: (x + x_shift, y + y_shift) for n, (x, y) in pos.items()}
def convert_to_2d(latitude, longitude, center_latitude=50.0):
"""
Converts (lat, long) to (x, y) using approximation for small areas.
"""
earth_radius = 6373.0 # unit : km
aspect_ratio = radians(center_latitude)
x = earth_radius * longitude * cos(aspect_ratio)
y = earth_radius * latitude
return x, y
def plot_stock_grid(data, position, supply_site_code,
sku_code, balance=False) -> None:
"""
Plots a map containing the amount of stock in each location of a given
grid: Hub, Depot or Distributor.
"""
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
stock_mean = []
positions = {}
labels = {}
colors = []
color_dict = {"DEP": "#3f60e1",
"DIST": "#60e13f",
"HUB": "#e13f60",
"DEPOT": '#3f60e1'}
location_index = grid_table.columns.to_list().index('Location Code')
if balance:
stock_index = grid_table.columns.to_list().index('x_opt')
else:
stock_index = grid_table.columns.to_list().index('Closing Stock')
type_index = grid_table.columns.to_list().index('Location Type')
reorder_index = grid_table.columns.to_list().index('Reorder Point (Hl)')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
stock = round(100 * row[stock_index + 1]
/ row[reorder_index + 1]) / 100
stock_mean.append(stock)
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors.append(color)
else:
color = color_dict[type]
colors.append(color)
position_row = position[position['code'] == location_code]
latitude = position_row['latitude']
longitude = position_row['longitude']
position_2d = convert_to_2d(latitude, longitude)
positions[location_code] = position_2d
labels[location_code] = stock
positions_nodes = shift_position(positions, 0, 500)
print(np.mean(stock_mean))
grid = nx.Graph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nx.draw_networkx(grid, pos=positions, with_labels=False,
node_size=350, node_color=colors)
nx.draw_networkx_labels(grid, pos=positions_nodes,
labels=labels, font_size=16)
ylim = plt.ylim()
plt.ylim(0.99 * ylim[0], 1.01 * ylim[1])
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depósito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
def plot_exchange_map(data, exchange, position,
supply_site_code, sku_code) -> None:
"""
Plots the optimal exchange map for a given grid.
"""
exchange_table = exchange[(
exchange['Supply Site Code'] == supply_site_code)]
exchange_table = exchange_table[(exchange_table['SKU'] == sku_code)]
grid_table = data[(data['Supply Site Code'] == supply_site_code)]
grid_table = grid_table[(grid_table['SKU'] == sku_code)]
labels = {'Hub': 'Hub'}
colors = {}
color_dict = {"DEP": "#3f60e1", "DIST": "#60e13f", "HUB": "#e13f60"}
location_index = grid_table.columns.to_list().index('Location Code')
type_index = grid_table.columns.to_list().index('Location Type')
for row in grid_table.itertuples():
location_code = row[location_index + 1]
type = row[type_index + 1]
if location_code == supply_site_code:
color = color_dict["HUB"]
colors[location_code] = color
else:
color = color_dict[type]
colors[location_code] = color
labels[location_code] = location_code
grid = nx.DiGraph()
for key, value in labels.items():
grid.add_node(key, stock=value)
nodes_with_edges = []
origin_index = exchange_table.columns.to_list().index('Origin')
destiny_index = exchange_table.columns.to_list().index('Destiny')
amount_index = exchange_table.columns.to_list().index('Amount')
for row in exchange_table.itertuples():
origin = row[origin_index + 1]
destiny = row[destiny_index + 1]
amount = round(row[amount_index + 1])
if origin == "Available":
origin = supply_site_code
if destiny == supply_site_code:
destiny = 'Hub'
colors['Hub'] = colors[supply_site_code]
grid.add_edge(origin, destiny, weight=amount)
nodes_with_edges.append(origin)
nodes_with_edges.append(destiny)
layout = nx.planar_layout(grid)
layout_label = shift_position(layout, -0.03, 0.03)
nodes_with_edges = list(set(nodes_with_edges))
nodes_colors = []
nodes_labels = {}
for node in nodes_with_edges:
nodes_colors.append(colors[node])
nodes_labels[node] = labels[node]
nx.draw_networkx(grid, layout, node_color=nodes_colors,
nodelist=nodes_with_edges, with_labels=False,
arrowsize=20, node_size=400)
grid_edge_labels = nx.get_edge_attributes(grid, 'weight')
nx.draw_networkx_edge_labels(grid, layout,
edge_labels=grid_edge_labels)
nx.draw_networkx_labels(grid, pos=layout_label, labels=nodes_labels)
dep_legend = mpatches.Patch(color=color_dict["DEP"], label='Depósito')
dist_legend = mpatches.Patch(color=color_dict["DIST"], label='CDD')
hub_legend = mpatches.Patch(color=color_dict["HUB"], label="Hub")
plt.legend(handles=[dep_legend, dist_legend, hub_legend], fontsize=20)
plt.axis('off')
plt.show()
if __name__ == "__main__":
unbalanced = pd.read_csv('data/data.csv', delimiter=';', decimal=',')
balanced = pd.read_csv('output/distribution_output_cvxopt.csv',
delimiter=';', decimal=',')
position = pd.read_csv('data/geopositioning.csv',
delimiter=';', decimal=',')
exchange = pd.read_csv('output/exchanges_output.csv',
delimiter=';', decimal=',')
# choose which grid to plot. The grid cannot be scenario 0
supply_site_code = 'PL-1721'
sku_code = 85023
# plots unbalanced grid, balanced grid, and exchange map
plot_stock_grid(unbalanced, position, supply_site_code, sku_code)
plot_stock_grid(balanced, position, supply_site_code,
sku_code, balance=True)
plot_exchange_map(unbalanced, exchange, position,
supply_site_code, sku_code)
| 34.960976
| 76
| 0.635412
| 909
| 7,167
| 4.755776
| 0.192519
| 0.039325
| 0.055054
| 0.041638
| 0.433727
| 0.409207
| 0.38353
| 0.302105
| 0.284987
| 0.265094
| 0
| 0.01788
| 0.243058
| 7,167
| 204
| 77
| 35.132353
| 0.778986
| 0.053997
| 0
| 0.317241
| 0
| 0
| 0.069864
| 0.01296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027586
| false
| 0
| 0.041379
| 0
| 0.082759
| 0.006897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fb42ccff41d5e02e75ca92305085547bd5ee39
| 3,870
|
py
|
Python
|
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | null | null | null |
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | 1
|
2020-03-05T23:20:38.000Z
|
2020-03-10T18:03:31.000Z
|
datascripts/make_placescsv.py
|
NCI-NAACCR-Zone-Design/Louisiana
|
42fb1d05c47ae01401ee3ac3cc68ff5e4f5d5c07
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from osgeo import ogr
import os
import csv
import settings
class PlacesIntersector:
def run(self):
print("PlacesIntersector")
self.reproject(settings.INPUT_ZONESFILE, settings.REPROJECTED_ZONESFILE, settings.CTAZONES_SHAPEFILE_IDFIELD, settings.CTAZONES_SHAPEFILE_NAMEFIELD)
self.reproject(settings.INPUT_CITYBOUNDS_SHP, settings.REPROJECTED_CITY_SHP, settings.CITYBOUNDS_IDFIELD, settings.CITYBOUNDS_NAMEFIELD)
self.reproject(settings.INPUT_COUNTYBOUNDS_SHP, settings.REPROJECTED_COUNTY_SHP, settings.COUNTYBOUNDS_IDFIELD, settings.COUNTYBOUNDS_NAMEFIELD)
self.findplaces(settings.REPROJECTED_CITY_SHP, settings.OUTPUT_CITYCSV, 'City')
self.findplaces(settings.REPROJECTED_COUNTY_SHP, settings.OUTPUT_COUNTYCSV, 'County')
def reproject(self, inputshp, outputshp, idfield, namefield):
# reproject the shapefile to an Albers so we can do area calculations in findplaces()
# and to standardize on there being only one attribute: name
print(" Reproject {} => {}".format(inputshp, outputshp))
command = "{} {} -proj {} -filter-fields {} -rename-fields name={},id={} -o {} -quiet".format(
settings.MAPSHAPER_CLI,
inputshp,
settings.PLANAR_SRS,
','.join([idfield, namefield]),
namefield, idfield,
outputshp
)
# print(command)
os.system(command)
def findplaces(self, placesdataset, csvfilename, placecolumnname):
print(" Calculating {} => {}".format(placesdataset, csvfilename))
outfh = open(csvfilename, 'w')
csvfh = csv.writer(outfh)
csvfh.writerow(['Zone', placecolumnname])
ctads = ogr.Open(settings.REPROJECTED_ZONESFILE, False)
ctalayer = ctads.GetLayer(0)
for cta in ctalayer:
ctaid = cta.GetField('id')
ctageom = cta.GetGeometryRef()
places = []
ds = ogr.Open(placesdataset, False)
layer = ds.GetLayer(0)
layer.SetSpatialFilter(ctageom)
for thisplace in layer:
# work around twitchy hands making false intersections
# "% of CTA area" strategy doesn't work: small towns in large rural CTAs = small percentage
# but a town sliver over X acres, well, that should count as intersecting the town.
#
# also work around boundary datasets that are so precisely snapped,
# that we get zero-area intersection as the overlapping boundary linestring of two areas
# this leads to harmless but scary "non-surface geometry" warnings
#
# also, note that we collect names here and unique-ify them in a second step
# multipolygon datasets means that a CTA may intersect the same place more than once!
geom = thisplace.GetGeometryRef()
intersection = geom.Intersection(ctageom)
iacres = 0
if intersection.GetGeometryName() in ('POLYGON', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION'):
iacres = intersection.Area() * settings.SQMETERS_TO_ACRES
if iacres < 2000:
continue
name = thisplace.GetField('name')
# print(" {}".format(name))
places.append(name)
ds = None # close places dataset, will reopen at next CTA
# done collecting: unique-ify the list, write the CSV rows
places = list(set(places))
for name in places:
csvfh.writerow([ctaid, name])
# done CTA loop, close geo fh and CSV fh
ctads = None
outfh.close()
if __name__ == '__main__':
PlacesIntersector().run()
print("DONE")
| 39.896907
| 156
| 0.62093
| 404
| 3,870
| 5.856436
| 0.475248
| 0.048183
| 0.026627
| 0.032967
| 0.088757
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002927
| 0.293798
| 3,870
| 96
| 157
| 40.3125
| 0.862788
| 0.247028
| 0
| 0
| 0
| 0.017857
| 0.072859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fd01691eb418ac4d1818fca0bd68461092ddaa
| 580
|
py
|
Python
|
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
Google/google_organic_results/google_organic_ads/google_regular_ads/serpapi_scrape_google_ads.py
|
dimitryzub/blog-posts-archive
|
0978aaa0c9f0142d6f996b81ce391930c5e3be35
|
[
"CC0-1.0"
] | null | null | null |
# scrapes both regular and shopping ads (top, right blocks)
from serpapi import GoogleSearch
import json, os
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google",
"q": "buy coffee",
"gl": "us",
"hl": "en"
}
search = GoogleSearch(params)
results = search.get_dict()
if results.get("ads", []):
for ad in results["ads"]:
print(json.dumps(ad, indent=2))
if results.get("shopping_results", []):
for shopping_ad in results["shopping_results"]:
print(json.dumps(shopping_ad, indent=2))
else:
print("no shopping ads found.")
| 22.307692
| 59
| 0.639655
| 79
| 580
| 4.607595
| 0.544304
| 0.06044
| 0.065934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004292
| 0.196552
| 580
| 25
| 60
| 23.2
| 0.776824
| 0.098276
| 0
| 0
| 0
| 0
| 0.201536
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.105263
| 0.157895
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fd244b6ad93e904d3cfe0db3dd28977bc63c93
| 3,316
|
py
|
Python
|
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | 28
|
2018-07-26T09:47:32.000Z
|
2022-01-24T10:38:13.000Z
|
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | null | null | null |
tomomibot/commands/start.py
|
adzialocha/tomomibot
|
ed3964223bd63340f28d36daa014865f61aaf571
|
[
"MIT"
] | 5
|
2018-08-11T08:07:23.000Z
|
2021-12-23T14:47:40.000Z
|
import click
from tomomibot.cli import pass_context
from tomomibot.runtime import Runtime
from tomomibot.utils import check_valid_voice, check_valid_model
from tomomibot.const import (INTERVAL_SEC, INPUT_DEVICE, OUTPUT_CHANNEL,
INPUT_CHANNEL, OUTPUT_DEVICE, SAMPLE_RATE,
THRESHOLD_DB, NUM_CLASSES_SOUNDS,
SEQ_LEN, TEMPERATURE,
PENALTY, VOLUME, OSC_ADDRESS, OSC_PORT)
@click.command('start', short_help='Start a live session')
@click.option('--interval',
default=INTERVAL_SEC,
help='Interval (in seconds) of analyzing incoming live signal')
@click.option('--input_device',
default=INPUT_DEVICE,
help='Index of audio device for incoming signal')
@click.option('--output_device',
default=OUTPUT_DEVICE,
help='Index of audio device for outgoing signal')
@click.option('--input_channel',
default=INPUT_CHANNEL,
help='Index of channel for incoming signal')
@click.option('--output_channel',
default=OUTPUT_CHANNEL,
help='Index of channel for outgoing signal')
@click.option('--samplerate',
default=SAMPLE_RATE,
help='Sample rate of audio signals')
@click.option('--threshold',
default=THRESHOLD_DB,
help='Ignore audio events under this db value')
@click.option('--num_classes',
default=NUM_CLASSES_SOUNDS,
help='Number of k-means classes')
@click.option('--dynamics/--no_dynamics',
default=False,
help='Use dynamics (volume) classes')
@click.option('--durations/--no_durations',
default=False,
help='Use duration classes (length of sound events)')
@click.option('--seq_len',
default=SEQ_LEN,
help='How long is the sequence the model needs to predict')
@click.option('--temperature',
default=TEMPERATURE,
help='Softmax reweighting temperature')
@click.option('--penalty',
default=PENALTY,
help='Multiple of seq_len to be reached for cutting sequence')
@click.option('--reference',
default=None,
help='Use this voice as a reference for PCA and k-means')
@click.option('--volume',
default=VOLUME,
type=float,
help='Volume of the audio output')
@click.option('--osc_address',
default=OSC_ADDRESS,
type=str,
help='Address of OSC server')
@click.option('--osc_port',
default=OSC_PORT,
type=int,
help='Port of OSC server')
@click.argument('voice')
@click.argument('model')
@pass_context
def cli(ctx, voice, model, **kwargs):
"""Start a live session with tomomibot."""
try:
check_valid_model(model)
except FileNotFoundError as err:
ctx.elog('Model "{}" is invalid: {}'.format(model, err))
else:
try:
check_valid_voice(voice)
except FileNotFoundError as err:
ctx.elog('Voice "{}" is invalid: {}'.format(voice, err))
else:
runtime = Runtime(ctx, voice, model, **kwargs)
runtime.initialize()
| 39.011765
| 77
| 0.596803
| 367
| 3,316
| 5.26703
| 0.299728
| 0.096741
| 0.043973
| 0.017589
| 0.155199
| 0.129333
| 0.032075
| 0
| 0
| 0
| 0
| 0
| 0.291918
| 3,316
| 84
| 78
| 39.47619
| 0.823254
| 0.010856
| 0
| 0.1
| 0
| 0
| 0.286805
| 0.015272
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0125
| false
| 0.025
| 0.0625
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
059b0412d51d78feb8e9b2b1008cb427fb6c0e11
| 5,516
|
py
|
Python
|
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | 19
|
2021-03-11T12:59:00.000Z
|
2022-02-12T18:50:58.000Z
|
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | null | null | null |
Bot/commands_handling/group_commands.py
|
DogsonPl/bot_for_messenger
|
2d6664b52b59696dc82efb3d361b7700ebb3960b
|
[
"MIT"
] | 4
|
2021-03-10T23:07:13.000Z
|
2021-09-28T18:55:30.000Z
|
import fbchat
import random as rd
from .logger import logger
from ..bot_actions import BotActions
from ..sql import handling_group_sql
BOT_WELCOME_MESSAGE = """👋 Witajcie, jestem botem 🤖
❓ Jeśli chcesz zobaczyć moje komendy napisz !help"""
def check_admin_permission(function):
async def wrapper(self, event, group_info):
if event.author.id not in group_info.admins:
return await self.send_text_message(event, "🚫 Tylko administartor grupy może używać tej funkcji")
return await function(self, event, group_info)
return wrapper
def check_group_instance(function):
async def wrapper(self, event):
if not isinstance(event.thread, fbchat.Group):
return await self.send_text_message(event, "🚫 To komenda tylko dla grup")
group_info = await self.get_thread_info(event.thread.id)
return await function(self, event, group_info)
return wrapper
class GroupCommands(BotActions):
def __init__(self, loop, bot_id, client):
super().__init__(loop, bot_id, client)
@logger
@check_group_instance
@check_admin_permission
async def delete_random_person(self, event, group_info):
member_to_kick = rd.choice(group_info.participants).id
if member_to_kick in group_info.admins:
await self.send_text_message(event, "🚫 Wylosowalo admina. Nie moge go usunąć")
elif member_to_kick == self.bot_id:
await self.send_text_message(event, "🚫 Wylosowało mnie")
else:
try:
await event.thread.remove_participant(member_to_kick)
except fbchat.InvalidParameters:
await self.send_text_message(event, "🚫 Żeby działała ta funkcja na grupie, muszę mieć admina")
@logger
@check_group_instance
@check_admin_permission
async def set_welcome_message(self, event, group_info):
if event.message.text.lower() == "!powitanie":
message = "🚫 Po !powitanie ustaw treść powitania"
else:
await handling_group_sql.set_welcome_message(event)
message = "✅ Powitanie zostało zmienione :)"
await self.send_text_message(event, message)
@logger
@check_group_instance
@check_admin_permission
async def set_new_group_regulations(self, event, group_info):
if event.message.text.lower() == "!nowyregulamin":
message = "🚫 Po !nowyregulamin ustaw treść regulaminu"
else:
await handling_group_sql.set_group_regulations(event)
message = "✅ Regulamin został zmieniony :) Użyj komendy !regulamin by go zobaczyć"
await self.send_text_message(event, message)
@logger
@check_group_instance
async def get_group_regulations(self, event, group_info):
group_regulations = await handling_group_sql.fetch_group_regulations(event)
if group_regulations is None:
group_regulations = "📜 Grupa nie ma regulaminu. Aby go ustawić użyj komendy\n!nowyregulamin 'treść'"
await self.send_text_message(event, group_regulations)
@logger
@check_group_instance
@check_admin_permission
async def mention_everyone(self, event, group_info):
mentions = [fbchat.Mention(thread_id=participant.id, offset=0, length=12) for participant in group_info.participants]
await self.send_text_message_with_mentions(event, "💬 ELUWA ALL", mentions)
@logger
@check_group_instance
async def send_message_with_random_mention(self, event, group_info):
lucky_member = rd.choice(group_info.participants).id
mention = [fbchat.Mention(thread_id=lucky_member, offset=0, length=12)]
await self.send_text_message_with_mentions(event, "🎆 Zwycięzca", mention)
@logger
@check_group_instance
async def send_love_message(self, event, group_info):
try:
first_person, second_person = event.message.mentions
except ValueError:
await self.send_text_message(event, "💡 Po !kocha oznacz dwie osoby, np !kocha @nick1 @nick2")
else:
love_percent = rd.randint(0, 100)
if love_percent <= 25:
emoji = "💔"
elif love_percent <= 50:
emoji = "💛"
elif love_percent <= 75:
emoji = "❤"
else:
emoji = "💝💘"
first_person_name = event.message.text[8:first_person.length+7]
second_person_name = event.message.text[9+first_person.length:8+first_person.length+second_person.length]
await self.send_text_message(event, f"{emoji} {first_person_name} kocha {second_person_name} w {love_percent} procentach")
@logger
async def reply_on_person_removed(self, event):
if self.bot_id != event.removed.id:
# if bot is removed from group, bot can`t send removed message
await self.send_text_message(event, "🥂 Jakaś kurwa opusciła grupe")
@logger
async def send_message_on_person_added(self, event):
for user in event.added:
if user.id == self.bot_id:
await self.send_text_message(event, BOT_WELCOME_MESSAGE)
break
else:
message = await handling_group_sql.fetch_welcome_message(event)
if message is None:
message = """🥂 Witaj w grupie! Jeśli chcesz zobaczyć moje funkcje napisz !help
Jeśli chesz ustawić wiadomość powitalną użyj komendy !powitanie"""
await self.send_text_message(event, message)
| 41.787879
| 134
| 0.676215
| 710
| 5,516
| 5.042254
| 0.267606
| 0.040223
| 0.054469
| 0.071229
| 0.390782
| 0.352793
| 0.248883
| 0.201676
| 0.158659
| 0.057542
| 0
| 0.005263
| 0.242205
| 5,516
| 131
| 135
| 42.10687
| 0.845694
| 0.010877
| 0
| 0.3125
| 0
| 0
| 0.159333
| 0.004217
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026786
| false
| 0
| 0.044643
| 0
| 0.133929
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
059f84fb457661f2a82136d2fab085f6c614dd8f
| 1,100
|
py
|
Python
|
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | 1
|
2022-03-09T07:28:14.000Z
|
2022-03-09T07:28:14.000Z
|
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | null | null | null |
util/file_parsing.py
|
LindaSt/BT-graph-creation
|
a6aa4d0ca42db4744150f11f17aea7e98d391319
|
[
"MIT"
] | null | null | null |
import os
import xml.etree.ElementTree as ET
def parse_xml(file_path) -> dict:
tree = ET.parse(file_path)
root = tree.getroot()
groups_colours = {i.attrib['Name']: i.attrib['Color'] for i in root.iter('Group')}
groups = ['hotspot', 'lymphocytes', 'tumorbuds', 'lymphocytesR', 'tumorbudsR']
annotations_elements = {g: [] for g in groups}
for i in root.iter('Annotation'):
annotations_elements[i.attrib['PartOfGroup']].append(i)
annotations = {g: [] for g in groups}
for group, element_list in annotations_elements.items():
for element in element_list:
if element.attrib['Type'] == 'Dot':
annotations[group].append(
[[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')][0])
else:
if group in ['lymphocytes', 'tumorbuds']:
group = 'rectangles_' + group
annotations[group].append(
[[float(i.attrib['X']), float(i.attrib['Y'])] for i in element.iter('Coordinate')])
return annotations
| 36.666667
| 106
| 0.59
| 132
| 1,100
| 4.840909
| 0.378788
| 0.076682
| 0.037559
| 0.031299
| 0.323944
| 0.28482
| 0.234742
| 0.234742
| 0.234742
| 0.234742
| 0
| 0.001224
| 0.257273
| 1,100
| 29
| 107
| 37.931034
| 0.780906
| 0
| 0
| 0.090909
| 0
| 0
| 0.132848
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05a722d6a74837776cdd4f147e146b4674a0d013
| 2,205
|
py
|
Python
|
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | null | null | null |
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | null | null | null |
app.py
|
limjierui/money-goose-telebot
|
bf048e27598b9ff6da580ee62309c4ca33eae0c5
|
[
"MIT"
] | 3
|
2020-12-21T16:21:45.000Z
|
2020-12-24T16:21:28.000Z
|
from flask import Flask, request
import telegram
from moneyGooseBot.master_mind import mainCommandHandler
from moneyGooseBot.credentials import URL, reset_key, bot_token, bot_user_name
from web_server import create_app
# https://api.telegram.org/bot1359229669:AAEm8MG26qbA9XjJyojVKvPI7jAdMVqAkc8/getMe
bot = telegram.Bot(token=bot_token)
app = create_app()
@app.route('/{}'.format(TOKEN), methods=['POST'])
def respond():
# retrieve the message in JSON and then transform it to the Telegram object
print("Received message")
# for overwhelming updates, clear the update attemp (this line below)
# and have the method return 1 to clear all pending updates
try:
update = telegram.Update.de_json(request.get_json(force=True), bot)
except:
print("some error has occured internally")
if update.message:
mainCommandHandler(incoming_message = update.messagem, telebot_instance = bot)
return 'ok'
@app.route('/{}'.format(RESETKEY), methods=['POST'])
def reset():
return 'ok'
@app.route('/setwebhook', methods=['GET', 'POST'])
def set_webhook():
# we use the bot object to link the bot to our app which live
# in the link provided by URL
s = bot.setWebhook('{URL}{HOOK}'.format(URL=URL, HOOK=bot_token))
# something to let us know things work
if s:
return "webhook setup ok"
else:
return "webhook setup failed"
@app.route('/resetupdate', methods=['GET','POST'])
def reset_update():
"""
Really a temprorary method to keep the update from flooding
"""
s = bot.setWebhook('{URL}{RESET}'.format(URL=URL, RESET=reset_key))
if s:
return "reset hook setup ok"
else:
return "reset hook setup failed"
@app.route('/dropwebhook', methods=['GET'])
def drop_webhook():
"""
Stops the webhook from polling the server and drops all pending requests
"""
s = bot.deleteWebhook(drop_pending_updates=True)
if s:
return "web hook delete success"
else:
return "web hook delete failure"
if __name__ == '__main__':
# note the threaded arg which allow
# your app to have more than one thread
app.run(threaded=True, debug=True)
| 30.625
| 86
| 0.686168
| 298
| 2,205
| 4.983221
| 0.432886
| 0.026936
| 0.018182
| 0.021549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009698
| 0.204989
| 2,205
| 72
| 87
| 30.625
| 0.837422
| 0.277098
| 0
| 0.186047
| 0
| 0
| 0.176093
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.116279
| 0.023256
| 0.418605
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05aa26976885770e54982447eb4735e665e02cf2
| 3,061
|
py
|
Python
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 28
|
2021-04-08T15:59:56.000Z
|
2022-03-12T20:42:16.000Z
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 7
|
2020-08-25T07:58:01.000Z
|
2020-09-12T20:44:12.000Z
|
final/software_tutorial/tutorial/libopencm3/scripts/data/lpc43xx/yaml_odict.py
|
mmwvh/ce
|
162064eeb6668896410c9d176fe75531cd3493fb
|
[
"MIT"
] | 13
|
2020-02-13T18:25:57.000Z
|
2022-03-01T11:27:12.000Z
|
import yaml
from collections import OrderedDict
def construct_odict(load, node):
"""This is the same as SafeConstructor.construct_yaml_omap(),
except the data type is changed to OrderedDict() and setitem is
used instead of append in the loop.
>>> yaml.load('''
... !!omap
... - foo: bar
... - mumble: quux
... - baz: gorp
... ''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.load('''!!omap [ foo: bar, mumble: quux, baz : gorp ]''')
OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
"""
omap = OrderedDict()
yield omap
if not isinstance(node, yaml.SequenceNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map",
node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark
)
for subnode in node.value:
if not isinstance(subnode, yaml.MappingNode):
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a mapping of length 1, but found %s" % subnode.id,
subnode.start_mark
)
if len(subnode.value) != 1:
raise yaml.constructor.ConstructorError(
"while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark
)
key_node, value_node = subnode.value[0]
key = load.construct_object(key_node)
value = load.construct_object(value_node)
omap[key] = value
yaml.add_constructor(u'tag:yaml.org,2002:omap', construct_odict)
def repr_pairs(dump, tag, sequence, flow_style=None):
"""This is the same code as BaseRepresenter.represent_sequence(),
but the value passed to dump.represent_data() in the loop is a
dictionary instead of a tuple."""
value = []
node = yaml.SequenceNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
for (key, val) in sequence:
item = dump.represent_data({key: val})
if not (isinstance(item, yaml.ScalarNode) and not item.style):
best_style = False
value.append(item)
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
def repr_odict(dumper, data):
"""
>>> data = OrderedDict([('foo', 'bar'), ('mumble', 'quux'), ('baz', 'gorp')])
>>> yaml.dump(data, default_flow_style=False)
'!!omap\\n- foo: bar\\n- mumble: quux\\n- baz: gorp\\n'
>>> yaml.dump(data, default_flow_style=True)
'!!omap [foo: bar, mumble: quux, baz: gorp]\\n'
"""
return repr_pairs(dumper, u'tag:yaml.org,2002:omap', data.iteritems())
yaml.add_representer(OrderedDict, repr_odict)
| 37.329268
| 90
| 0.613525
| 387
| 3,061
| 4.736434
| 0.271318
| 0.0491
| 0.03928
| 0.052373
| 0.302237
| 0.302237
| 0.253137
| 0.238407
| 0.217676
| 0.217676
| 0
| 0.004822
| 0.254819
| 3,061
| 81
| 91
| 37.790123
| 0.798772
| 0.285528
| 0
| 0.142857
| 0
| 0
| 0.129745
| 0.021144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.040816
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05ac654490e3084f2724bef66dfbbee9d64e72f4
| 10,609
|
py
|
Python
|
app.py
|
isabella232/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 1
|
2015-03-16T21:22:58.000Z
|
2015-03-16T21:22:58.000Z
|
app.py
|
nprapps/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 1
|
2021-02-24T06:08:41.000Z
|
2021-02-24T06:08:41.000Z
|
app.py
|
isabella232/arrested-development
|
ac53eb71a4cacc3793d51ff2c2c3c51a7c384dea
|
[
"FSFAP"
] | 2
|
2015-02-22T23:39:11.000Z
|
2021-02-23T10:45:05.000Z
|
#!/usr/bin/env python
import json
from mimetypes import guess_type
import urllib
import envoy
from flask import Flask, Markup, abort, render_template, redirect, Response
import app_config
from models import Joke, Episode, EpisodeJoke, JokeConnection
from render_utils import flatten_app_config, make_context
app = Flask(app_config.PROJECT_NAME)
def _all_seasons():
output = []
SEASONS = [1, 2, 3]
if app_config.IMPORT_NEW_SEASON is True:
SEASONS.append(4)
for season in SEASONS:
season_dict = {}
season_dict['season'] = season
season_dict['episodes'] = []
for episode in Episode.select().where(Episode.season == season):
season_dict['episodes'].append({
'url': 'episode-%s.html' % episode.code,
'text': '%s: %s' % (episode.episode, episode.title),
'episode': episode.episode,
'code': episode.code
})
season_dict['episodes'] = sorted(season_dict['episodes'], key=lambda episode: episode['episode'])
output.append(season_dict)
return output
@app.route('/episode-<episode_code>.html')
def _episode_detail(episode_code):
context = make_context()
context['episode'] = Episode.get(Episode.code == episode_code)
context['jokes'] = {}
context['joke_count'] = 0
for joke in EpisodeJoke.select().where(EpisodeJoke.episode == context['episode']):
group = joke.joke.primary_character
if group not in app_config.PRIMARY_CHARACTER_LIST:
group = 'Miscellaneous'
if group not in context['jokes']:
context['jokes'][group] = []
context['jokes'][group].append(joke)
context['joke_count'] += 1
context['seasons'] = _all_seasons()
context['group_order'] = [g for g in app_config.PRIMARY_CHARACTER_LIST if g in context['jokes']]
try:
context['next'] = Episode.get(number=context['episode'].number + 1)
except Episode.DoesNotExist:
context['next'] = None
try:
context['prev'] = Episode.get(number=context['episode'].number - 1)
except Episode.DoesNotExist:
context['prev'] = None
return render_template('episode_detail.html', **context)
@app.route('/joke-<joke_code>.html')
def _joke_detail(joke_code):
context = make_context()
context['joke'] = Joke.get(Joke.code == int(joke_code))
context['episodejokes'] = EpisodeJoke.select().where(EpisodeJoke.joke == context['joke'])
context['episodejokes'] = sorted(context['episodejokes'], key=lambda ej: ej.episode.code)
context['seasons'] = _all_seasons()
with open('www/live-data/jokes.json') as f:
data = json.load(f)
group_order = data['group_order']
joke_data = data['jokes']
connections = data['connections']
connected_joke_codes = [int(joke_code)]
def filter_connections(c):
if c['joke1_code'] == int(joke_code) or c['joke2_code'] == int(joke_code):
connected_joke_codes.append(c['joke1_code'])
connected_joke_codes.append(c['joke2_code'])
return True
return False
connections = filter(filter_connections, connections)
def filter_jokes(c):
return c['code'] in connected_joke_codes
for group, jokes in joke_data.items():
joke_data[group] = filter(filter_jokes, jokes)
if len(joke_data[group]) == 0:
del joke_data[group]
group_order.remove(group)
context['group_order'] = Markup(json.dumps(group_order))
context['joke_data'] = Markup(json.dumps(joke_data))
context['connection_data'] = Markup(json.dumps(connections))
context['episodes'] = Markup(json.dumps(data['episodes']))
group = context['joke'].primary_character
if group not in app_config.PRIMARY_CHARACTER_LIST:
group = 'Miscellaneous'
context['group'] = group
return render_template('joke_detail.html', **context)
@app.route('/')
def index():
context = make_context()
context['jokes'] = []
for joke in Joke.select():
context['jokes'].append(joke)
context['jokes'] = sorted(context['jokes'], key=lambda joke: joke.code)
context['seasons'] = _all_seasons()
with open('www/live-data/jokes.json') as f:
data = json.load(f)
context['group_order'] = Markup(json.dumps(data['group_order']))
context['joke_data'] = Markup(json.dumps(data['jokes']))
context['connection_data'] = Markup(json.dumps(data['connections']))
context['episodes'] = Markup(json.dumps(data['episodes']))
return render_template('viz.html', **context)
@app.route('/admin/episodes/<episode_code>/jokeconnection/<joke_connection_id>/delete/', methods=['DELETE'])
def _admin_jokeconnection_delete(episode_code, joke_connection_id):
from flask import request
if request.method == 'DELETE':
JokeConnection.delete().where(JokeConnection.id == int(joke_connection_id)).execute()
return joke_connection_id
@app.route('/admin/episodes/<episode_code>/episodejoke/<episode_joke_id>/delete/', methods=['DELETE'])
def _admin_episodejokes_delete(episode_code, episode_joke_id):
from flask import request
if request.method == 'DELETE':
EpisodeJoke.delete().where(EpisodeJoke.id == int(episode_joke_id)).execute()
return episode_joke_id
@app.route('/admin/episodes/<episode_code>/episodejoke/', methods=['PUT', 'POST'])
def _admin_episodejokes(episode_code):
from flask import request
details = request.form.get('details', None)
if request.method == 'POST':
episode_joke_id = request.form.get('episode_joke_id', None)
ej = EpisodeJoke.get(id=int(episode_joke_id))
ej.details = details
ej.save()
return '%s' % ej.id
if request.method == 'PUT':
joke_code = request.form.get('joke_code', None)
joke_type = request.form.get('type', None)
joke = Joke.get(code=int(joke_code))
episode = Episode.get(code=episode_code)
code = 's%se%sj%s' % (
str(episode.season).zfill(2),
str(episode.episode).zfill(2),
joke.code
)
context = {}
context['ej'] = EpisodeJoke(joke=joke, episode=episode, joke_type=joke_type, details=details, code=code)
context['ej'].save()
return render_template('_episodejoke_form_row.html', **context)
@app.route('/admin/episodes/<episode_code>/jokeconnection/', methods=['PUT'])
def _admin_jokeconnections(episode_code):
from flask import request
if request.method == 'POST':
pass
if request.method == 'PUT':
payload = {}
ej = EpisodeJoke.get(id=int(request.form.get('episode_joke_id')))
payload['joke1'] = ej.joke
payload['joke2'] = Joke.get(code=int(request.form.get('joke_code')))
payload['episode'] = ej.episode
j = JokeConnection(**payload)
j.save()
return("""
<br/>
<a class="related kill-related" href="#" data-jc-id="%s">×</a>
<a class="related" href="#joke-%s">%s →</a>""" % (j.id, j.joke2.code, j.joke2.text))
@app.route('/admin/episodes/')
def _admin_episodes_nocode():
return redirect('/admin/episodes/s04e01/')
@app.route('/admin/episodes/<episode_code>/', methods=['GET', 'PUT'])
def _admin_episodes(episode_code):
from flask import request
if request.method == 'GET':
context = {}
context['episode'] = Episode.get(code=episode_code)
context['episodejokes'] = EpisodeJoke.select().join(Episode).where(Episode.code == episode_code)
context['jokes'] = Joke.select().order_by(Joke.primary_character)
context['seasons'] = _all_seasons()
try:
context['next'] = Episode.get(number=context['episode'].number + 1)
except Episode.DoesNotExist:
context['next'] = None
try:
context['prev'] = Episode.get(number=context['episode'].number - 1)
except Episode.DoesNotExist:
context['prev'] = None
return render_template('admin_episode_detail.html', **context)
if request.method == 'PUT':
e = Episode.get(code=episode_code)
e.blurb = request.form.get('blurb', None)
e.save()
return '%s' % e.id
@app.route('/admin/output/')
def _admin_output():
output = {}
output['joke_main'] = ''
output['joke_details'] = ''
output['joke_connections'] = ''
for joke in Joke.select():
for episode in Episode.select().where(Episode.season == 4).order_by(Episode.number):
try:
ej = EpisodeJoke.get(episode=episode, joke=joke)
output['joke_main'] += '%s\t' % ej.joke_type
output['joke_details'] += '\'%s\t' % ej.details
if ej.connections():
output['joke_connections'] += '\'%s\t' % ej.connections()[0]['text']
else:
output['joke_connections'] += '\t'
except EpisodeJoke.DoesNotExist:
output['joke_main'] += '\t'
output['joke_details'] += '\t'
output['joke_connections'] += '\t'
output['joke_main'] += '\n'
output['joke_details'] += '\n'
output['joke_connections'] += '\n'
return render_template('_output.html', **output)
# Render LESS files on-demand
@app.route('/less/<string:filename>')
def _less(filename):
try:
with open('less/%s' % filename) as f:
less = f.read()
except IOError:
abort(404)
r = envoy.run('%s/lessc -' % app_config.APPS_NODE_PATH, data=less)
return r.std_out, 200, {'Content-Type': 'text/css'}
# Render JST templates on-demand
@app.route('/js/templates.js')
def _templates_js():
r = envoy.run('%s/jst --template underscore jst' % app_config.APPS_NODE_PATH)
return r.std_out, 200, {'Content-Type': 'application/javascript'}
# Render application configuration
@app.route('/js/app_config.js')
def _app_config_js():
config = flatten_app_config()
js = 'window.APP_CONFIG = ' + json.dumps(config)
return js, 200, {'Content-Type': 'application/javascript'}
# Server arbitrary static files on-demand
@app.route('/<path:path>')
def _static(path):
try:
with open('www/%s' % path) as f:
return f.read(), 200, {'Content-Type': guess_type(path)[0]}
except IOError:
abort(404)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000, debug=app_config.DEBUG)
| 33.153125
| 112
| 0.624658
| 1,279
| 10,609
| 5.014073
| 0.145426
| 0.039451
| 0.018712
| 0.019648
| 0.398098
| 0.305005
| 0.221737
| 0.213317
| 0.156869
| 0.110089
| 0
| 0.006552
| 0.223112
| 10,609
| 319
| 113
| 33.257053
| 0.771536
| 0.014327
| 0
| 0.247863
| 0
| 0.004274
| 0.178722
| 0.050517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.07265
| false
| 0.004274
| 0.059829
| 0.008547
| 0.213675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05ae582a0fb6d75889c4d858419450e634ed3a1d
| 12,129
|
py
|
Python
|
json_modify.py
|
Enacero/yaml-patch
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | 2
|
2020-04-21T08:49:39.000Z
|
2020-12-21T07:28:43.000Z
|
json_modify.py
|
Enacero/json_modify
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | null | null | null |
json_modify.py
|
Enacero/json_modify
|
7270d431447c82d665622cc316f0941214e7eee2
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Oleksii Petrenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from copy import deepcopy
import json
import typing
import os
import yaml
__version__ = "1.0.1"
__license__ = "MIT"
__all__ = (
"apply_actions",
"apply_to_list",
"apply_to_dict",
"validate_action",
"validate_marker",
"apply_action",
"get_path",
"get_section",
"get_reader",
"find_section_in_list",
)
def get_reader(
file_name: str,
) -> typing.Callable[[typing.Any], typing.Iterable[typing.Any]]:
"""
Determine reader for file.
:param file_name: name of the file with source data
:return: function to read data from file
"""
ext = os.path.splitext(file_name)[-1]
if ext in [".yaml", "yml"]:
return yaml.safe_load
elif ext == ".json":
return json.load
raise ValueError("Cant determine reader for {} extension".format(ext))
def find_section_in_list(
section: typing.List[typing.Any], action: typing.Dict[str, typing.Any], key: str
) -> int:
"""
Find index of section in list
:param section: list, where we want to search
:param action: action dictionary
:param key: the key marker
:return: index of searched section
"""
key = key[1:]
if key.isdigit():
return int(key)
if key not in action:
raise KeyError("Action {}: marker {} not found in action".format(action, key))
compares = action[key]
for index, section in enumerate(section):
if all(section[compare["key"]] == compare["value"] for compare in compares):
return index
raise IndexError(
"Action {}: Value with {} filters not found".format(action, compares)
)
def get_path(action: typing.Dict[str, typing.Any], path_delim: str) -> typing.List[str]:
"""
Get path from action
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: list of keys
"""
path = action["path"]
if isinstance(path, str):
keys = [str(key) for key in action["path"].split(path_delim)]
return keys
elif isinstance(path, typing.List) and all(isinstance(key, str) for key in path):
return path
else:
raise TypeError(
"Action {}: path should be str or list of strings".format(action)
)
def get_section(
source_data: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> typing.Iterable[typing.Any]:
"""
Get section descried by action's path.
:param source_data: source data where to search
:param action: action object
:param path_delim: delimiter to be used to split path into keys.
(Not used when path is list)
:return: section from source_data described by path
"""
section = source_data
path = get_path(action, path_delim)
if not action["action"] == "add":
path = path[:-1]
for key in path:
key = key.strip()
if key.startswith("$"):
if not isinstance(section, typing.List):
raise TypeError(
"Action {}: section {} is not list".format(action, section)
)
section_index = find_section_in_list(section, action, key)
section = section[section_index]
else:
if not isinstance(section, typing.Dict):
raise TypeError(
"Action {}: section {} is not dict".format(action, section)
)
section = section[key]
return section
def apply_to_dict(
section: typing.Dict[str, typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to dictionary.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, typing.Dict):
section.update(value)
else:
raise TypeError(
"Action {}: value for add operation on dict should "
"be of type dict".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
if action_name == "replace":
section[key] = value
elif action_name == "delete":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
del section[key]
elif action_name == "rename":
if key not in section:
raise KeyError("Action {}: no such key {}".format(action, key))
elif isinstance(value, str):
section[value] = section[key]
del section[key]
else:
raise TypeError(
"Action {}: for rename action on dict value "
"should be string".format(action)
)
def apply_to_list(
section: typing.List[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to list.
:param section: section on which action should be applied
:param action: action object that should be applied
:param path_delim: delimiter
"""
action_name = action["action"]
value = action.get("value")
if action_name == "add":
if isinstance(value, list):
section.extend(value)
else:
raise TypeError(
"Action {}: value for add operation on list should "
"be of type list".format(action)
)
else:
path = get_path(action, path_delim)
key = path[-1].strip()
section_index = find_section_in_list(section, action, key)
if action_name == "replace":
section[section_index] = value
elif action_name == "delete":
section.pop(section_index)
def apply_action(
section: typing.Iterable[typing.Any],
action: typing.Dict[str, typing.Any],
path_delim: str,
) -> None:
"""
Apply action to selected section.
:param section: section to be modified
:param action: action object
:param path_delim: path delimiter. default is '/'
"""
if isinstance(section, typing.Dict):
apply_to_dict(section, action, path_delim)
elif isinstance(section, typing.List):
apply_to_list(section, action, path_delim)
else:
raise TypeError(
"Action {}: Section {} is not of type dict or list".format(action, section)
)
def validate_marker(action: typing.Dict[str, typing.Any], key: str) -> None:
"""
Validate marker from action's path.
:param action: action object
:param key: key that is used as marker
"""
key = key[1:]
marker = action.get(key)
if not marker:
raise KeyError(
"Action {}: marker {} should be defined in action".format(action, key)
)
if not isinstance(marker, typing.List):
raise TypeError(
"Action {}: marker {} should be of type list".format(action, key)
)
for search_filter in marker:
if not isinstance(search_filter, typing.Dict):
raise TypeError(
"Action {}: marker {} filters should be of type dict".format(
action, key
)
)
filter_key = search_filter.get("key")
filter_value = search_filter.get("value")
if not filter_key or not filter_value:
raise KeyError(
"Action {}: for marker {} key and value should be specified".format(
action, key
)
)
def validate_action(action: typing.Dict[str, typing.Any], path_delim: str) -> None:
"""
Validate action.
:param action: action object
:param path_delim: path delimiter
"""
action_name = action.get("action")
if not action_name:
raise KeyError("Action {}: key action is required".format(action))
path = action.get("path")
if not path:
raise KeyError("Action {}: key path is required".format(action))
path = get_path(action, path_delim)
for key in path:
if key.startswith("$") and not key[1:].isdigit():
validate_marker(action, key)
value = action.get("value")
if action_name in ["add", "replace", "rename"] and not value:
raise KeyError(
"Action {}: for {} action key value is required".format(action, action_name)
)
if action_name == "add":
key = path[-1]
if key.startswith("$") and not isinstance(value, typing.List):
raise TypeError(
"Action {}: for add action on list value should be list".format(action)
)
elif not isinstance(value, typing.Dict):
raise TypeError(
"Action {}: for add action on dict value should be dict".format(action)
)
elif action_name == "rename":
if not isinstance(value, str):
raise TypeError(
"Action {}: for rename action on dict value should be string".format(
action
)
)
def apply_actions(
source: typing.Union[typing.Dict[str, typing.Any], str],
actions: typing.Union[typing.List[typing.Dict[str, typing.Any]], str],
copy: bool = False,
path_delim: str = "/",
) -> typing.Iterable[typing.Any]:
"""
Apply actions on source_data.
:param source: dictionary or json/yaml file with data that should be modified
:param actions: list or json/yaml file with actions, that should be applied to
source
:param copy: should source be copied before modification or changed in place
(works only when source is dictionary not file). default is False
:param path_delim: path delimiter. default is '/'
:return: source modified after applying actions
"""
if isinstance(source, str):
reader = get_reader(source)
with open(source, "r") as f:
source_data = reader(f)
elif isinstance(source, typing.Dict):
if copy:
source_data = deepcopy(source)
else:
source_data = source
else:
raise TypeError("source should be data dictionary or file_name with data")
if isinstance(actions, str):
reader = get_reader(actions)
with open(actions, "r") as f:
actions_data = reader(f)
elif isinstance(actions, typing.List):
actions_data = actions
else:
raise TypeError(
"actions should be data dictionary or file_name with actions list"
)
for action in actions_data:
validate_action(action, path_delim)
for action in actions_data:
section = get_section(source_data, action, path_delim)
apply_action(section, action, path_delim)
return source_data
| 32.692722
| 88
| 0.612252
| 1,513
| 12,129
| 4.820886
| 0.144085
| 0.029613
| 0.032904
| 0.028654
| 0.415136
| 0.319852
| 0.290376
| 0.250343
| 0.233342
| 0.207294
| 0
| 0.001739
| 0.288812
| 12,129
| 370
| 89
| 32.781081
| 0.843844
| 0.242147
| 0
| 0.31405
| 0
| 0
| 0.158541
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041322
| false
| 0
| 0.020661
| 0
| 0.095041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05aed2b7bdb2d62afb387bf3fa03ff50f51651b0
| 43,958
|
py
|
Python
|
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | 1
|
2017-06-13T04:42:34.000Z
|
2017-06-13T04:42:34.000Z
|
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
serial_scripts/vm_regression/test_vm_serial.py
|
vkolli/contrail-test-perf
|
db04b8924a2c330baabe3059788b149d957a7d67
|
[
"Apache-2.0"
] | null | null | null |
import traffic_tests
from vn_test import *
from vm_test import *
from floating_ip import *
from policy_test import *
from compute_node_test import ComputeNodeFixture
from user_test import UserFixture
from multiple_vn_vm_test import *
from tcutils.wrappers import preposttest_wrapper
sys.path.append(os.path.realpath('tcutils/pkgs/Traffic'))
from traffic.core.stream import Stream
from traffic.core.profile import create, ContinuousProfile
from traffic.core.helpers import Host
from traffic.core.helpers import Sender, Receiver
from base import BaseVnVmTest
from common import isolated_creds
import inspect
from tcutils.util import skip_because
from tcutils.tcpdump_utils import start_tcpdump_for_intf,\
stop_tcpdump_for_intf, verify_tcpdump_count
import test
from tcutils.contrail_status_check import ContrailStatusChecker
class TestBasicVMVN0(BaseVnVmTest):
@classmethod
def setUpClass(cls):
super(TestBasicVMVN0, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestBasicVMVN0, cls).tearDownClass()
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_bring_up_vm_with_control_node_down(self):
'''
Description: Create VM when there is not active control node. Verify VM comes up fine when all control nodes are back
Test steps:
1. Create a VN.
2. Shutdown the control node and spawn some VMs.
3. The VMS info should get deleted from the agents after 2 minutes.
4. The Tap intf corresponding to the VM should go to ERROR state.
5. Bring up the control nodes.
Pass criteria: The VMs should be back to ACTIVE state, so should the Tap interfaces.
Maintainer : ganeshahv@juniper.net
'''
if len(set(self.inputs.bgp_ips)) < 2:
raise self.skipTest(
"Skipping Test. At least 2 control node required to run the test")
result = True
vn1_name = get_random_name('vn30')
vn1_subnets = ['30.1.1.0/24']
# Collecting all the control node details
controller_list = []
for entry in self.inputs.compute_ips:
inspect_h = self.agent_inspect[entry]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
controller_list.append(entry['controller_ip'])
controller_list = set(controller_list)
# Stop all the control node
for entry in controller_list:
self.logger.info('Stoping the Control service in %s' % (entry))
self.inputs.stop_service('contrail-control', [entry])
self.addCleanup(self.inputs.start_service,
'contrail-control', [entry])
sleep(30)
vn1_vm1_name = get_random_name('vm1')
vn1_vm2_name = get_random_name('vm2')
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
vm1_fixture.verify_vm_launched()
vm2_fixture.verify_vm_launched()
vm1_node_ip = self.inputs.host_data[
self.nova_h.get_nova_host_of_vm(vm1_fixture.vm_obj)]['host_ip']
vm2_node_ip = self.inputs.host_data[
self.nova_h.get_nova_host_of_vm(vm2_fixture.vm_obj)]['host_ip']
inspect_h1 = self.agent_inspect[vm1_node_ip]
inspect_h2 = self.agent_inspect[vm2_node_ip]
self.logger.info(
'Checking TAP interface is created for all VM and should be in Error state')
vm1_tap_intf = None
vm2_tap_intf = None
vm1_tap_intf = inspect_h1.get_vna_tap_interface_by_ip(
vm1_fixture.vm_ip)
if vm1_tap_intf is []:
self.logger.error('TAP interface is not created for VM %s' %
(vn1_vm1_name))
result = result and False
else:
if vm1_tap_intf[0]['vrf_name'] != '--ERROR--':
self.logger.error(
'TAP interface VRF info should be Error . But currently in %s' %
(vm1_tap_intf[0]['vrf_name']))
result = result and False
vm2_tap_intf = inspect_h2.get_vna_tap_interface_by_ip(
vm2_fixture.vm_ip)
if vm2_tap_intf is []:
self.logger.error('TAP interface is not created for VM %s' %
(vn1_vm2_name))
result = result and False
else:
if vm2_tap_intf[0]['vrf_name'] != '--ERROR--':
self.logger.error(
'TAP interface VRF info should be Error . But currently in %s' %
(vm2_tap_intf[0]['vrf_name']))
result = result and False
self.logger.info('Waiting for 120 sec for cleanup to begin')
sleep(120)
# Check agent should not have any VN info
for entry in self.inputs.compute_ips:
inspect_h = self.agent_inspect[entry]
self.logger.info('Checking VN info in agent %s.' % (entry))
if inspect_h.get_vna_vn(domain=self.project.domain_name,
project=self.project.project_name,
vn_name=vn1_fixture.vn_name):
self.logger.error(
'Agent should not have any VN info present when control node is down')
result = result and False
# Start all the control node
for entry in controller_list:
self.logger.info('Starting the Control service in %s' % (entry))
self.inputs.start_service('contrail-control', [entry])
sleep(10)
self.logger.info('Checking the VM came up properly or not')
assert vn1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
assert vm1_fixture.verify_on_setup()
# Check ping between VM
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error(
'Test to verify cleanup of agent after control nodes stop Failed')
assert result
return True
# end test_bring_up_vm_with_control_node_down
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_ipam_persistence_across_restart_reboots(self):
'''
Description: Test to validate IPAM persistence across restarts and reboots of nodes.
Test steps:
1. Create a IPAM.
2. Create a VN and launch VMs in it.
3. Restart the contrail-vrouter-agent and contrail-control services.
Pass criteria: The VMs should be back to ACTIVE state and the ping between them should PASS.
Maintainer : ganeshahv@juniper.net
'''
ipam_obj=self.useFixture( IPAMFixture(project_obj= self.project, name = get_random_name('my-ipam')))
assert ipam_obj.verify_on_setup()
ts = time.time()
vn_name = get_random_name('vn')
vn_fixture=self.useFixture( VNFixture(project_name= self.project.project_name, connections= self.connections,
vn_name= vn_name, inputs= self.inputs, subnets=['22.1.1.0/24'], ipam_fq_name = ipam_obj.fq_name))
assert vn_fixture.verify_on_setup()
vm1_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
vn_obj=vn_fixture.obj, vm_name = get_random_name('vm1')))
vm2_fixture = self.useFixture(VMFixture(connections=self.connections,project_name = self.inputs.project_name,
vn_obj=vn_fixture.obj, vm_name = get_random_name('vm2')))
assert vm1_fixture.verify_on_setup()
assert vm2_fixture.verify_on_setup()
self.nova_h.wait_till_vm_is_up( vm1_fixture.vm_obj )
self.nova_h.wait_till_vm_is_up( vm2_fixture.vm_obj )
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
self.logger.info('Will restart the services now')
for compute_ip in self.inputs.compute_ips:
pass
self.inputs.restart_service('contrail-vrouter-agent',[compute_ip])
for bgp_ip in self.inputs.bgp_ips:
self.inputs.restart_service('contrail-control',[bgp_ip])
pass
cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
assert cluster_status, 'Cluster is not stable after restart'
self.logger.info('Will check if the ipam persists and ping b/w VMs is still successful')
assert ipam_obj.verify_on_setup()
msg = 'VM verification failed after process restarts'
assert vm1_fixture.verify_on_setup(), msg
assert vm2_fixture.verify_on_setup(), msg
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
return True
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_multistep_vm_add_delete_with_stop_start_service(self):
'''
Description: Test to validate VMs addition deletion after service restarts.
Test steps:
1. Create a VN and launch a VM in it.
2. Stop the contrail-vrouter-agent service and check the VM's status.
3. Launch one more VM.
4. Start the contrail-vrouter-agent service.
Pass criteria: The VMs should be in ACTIVE state after the contrail-vrouter-agent service is UP.
Maintainer : ganeshahv@juniper.net
'''
vn_name = get_random_name('vn1')
vn_subnets = ['10.10.10.0/24']
vn_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets))
assert vn_fixture.verify_on_setup()
vn_obj = vn_fixture.obj
self.logger.info('Launching VM')
vm1_fixture = VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name=get_random_name('vm1') , project_name=self.inputs.project_name)
vm1_fixture.setUp()
assert vm1_fixture.verify_vm_launched()
self.logger.info('vm1 launched successfully.Stopping vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.stop_service('contrail-vrouter-agent', [compute_ip])
self.addCleanup(self.inputs.start_service,
'contrail-vrouter-agent', [compute_ip])
self.logger.info('Trying to delete vm1')
assert not vm1_fixture.cleanUp()
self.logger.info(
'vm1 is not deleted as expected.Trying to launch a new VM vm2')
vm2_fixture = self.useFixture(VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name = get_random_name ('vm2'), project_name=self.inputs.project_name))
assert vm2_fixture.verify_vm_launched()
self.logger.info('Checking if vm2 has booted up')
assert not self.nova_h.wait_till_vm_is_up(vm2_fixture.vm_obj)
self.logger.info(
'vm2 has not booted up as expected.Starting vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.start_service('contrail-vrouter-agent', [compute_ip])
vm2_fixture.wait_till_vm_is_up()
self.logger.info('vm2 is up now as expected')
assert vm2_fixture.verify_on_setup()
return True
# end test_multistep_vm_add_delete_with_stop_start_service
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_multistep_vm_delete_with_stop_start_service(self):
'''
Description: Test to validate VM's deletion attempt fails when the contrail-vrouter-agent service is down.
Test steps:
1. Create a VN and launch a VM in it.
2. Stop the contrail-vrouter-agent service and check the VM's status.
3. Try deleting the VM.
4. Start the contrail-vrouter-agent service.
Pass criteria: The VM's deletion should fail and it should come back to ACTIVE state after the contrail-vrouter-agent service is UP.
Maintainer : ganeshahv@juniper.net
'''
vn_name = get_random_name('vn1')
vn_subnets = ['10.10.10.0/24']
vn_fixture = self.useFixture(
VNFixture(
project_name=self.inputs.project_name, connections=self.connections,
vn_name=vn_name, inputs=self.inputs, subnets=vn_subnets))
assert vn_fixture.verify_on_setup()
vn_obj = vn_fixture.obj
self.logger.info('Launching VM')
vm1_fixture = VMFixture(connections=self.connections,
vn_obj=vn_obj, vm_name = get_random_name('vm1'), project_name=self.inputs.project_name)
vm1_fixture.setUp()
vm1_fixture.verify_vm_launched()
self.logger.info('VM launched successfully.Stopping vrouter service')
for compute_ip in self.inputs.compute_ips:
self.inputs.stop_service('contrail-vrouter-agent', [compute_ip])
# self.addCleanup( sleep(10))
self.addCleanup(self.inputs.start_service,
'contrail-vrouter-agent', [compute_ip])
self.logger.info('Trying to delete the VM')
assert not vm1_fixture.cleanUp()
self.logger.info('VM is not deleted as expected')
for compute_ip in self.inputs.compute_ips:
self.logger.info('Starting Vrouter Service')
self.inputs.start_service('contrail-vrouter-agent', [compute_ip])
sleep(10)
return True
# end test_multistep_vm_delete_with_stop_start_service
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter')
def test_nova_com_sch_restart_with_multiple_vn_vm(self):
'''
Description: Test to validate that multiple VM creation and deletion after service restarts.
Test steps:
1. Create multiple VNs and VMs in them.
2. Restart the openstack-nova-compute and openstack-nova-scheduler services.
Pass criteria: The VMs should all be UP and running after the restarts.
Maintainer : ganeshahv@juniper.net
'''
vm1_name = get_random_name('vm_mine')
vn_name = get_random_name('vn222')
vn_subnets = ['11.1.1.0/24']
vn_count_for_test = 32
if (len(self.inputs.compute_ips) == 1):
vn_count_for_test = 5
vm_fixture = self.useFixture(
create_multiple_vn_and_multiple_vm_fixture(
connections=self.connections,
vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name,
subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec',
flavor='m1.tiny'))
time.sleep(100)
try:
assert vm_fixture.verify_vms_on_setup()
assert vm_fixture.verify_vns_on_setup()
except Exception as e:
self.logger.exception("Got exception as %s" % (e))
compute_ip = []
for vmobj in vm_fixture.vm_obj_dict.values():
vm_host_ip = vmobj.vm_node_ip
if vm_host_ip not in compute_ip:
compute_ip.append(vm_host_ip)
self.inputs.restart_service('openstack-nova-compute', compute_ip)
self.inputs.restart_service('openstack-nova-scheduler', compute_ip)
sleep(30)
for vmobj in vm_fixture.vm_obj_dict.values():
assert vmobj.verify_on_setup()
return True
# end test_nova_com_sch_restart_with_multiple_vn_vm
@retry(delay=10, tries=30)
def verification_after_process_restart_in_policy_between_vns(self):
result=True
try:
self.analytics_obj.verify_process_and_connection_infos_agent()
self.analytics_obj.verify_process_and_connection_infos_control_node()
self.analytics_obj.verify_process_and_connection_infos_config()
except:
result=False
return result
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_process_restart_in_policy_between_vns(self):
''' Test to validate that with policy having rule to check icmp fwding between VMs on different VNs , ping between VMs should pass
with process restarts
1. Pick 2 VN's from resource pool which has one VM each
2. Create policy with icmp allow rule between those VN's and bind it networks
3. Ping from one VM to another VM
4. Restart process 'vrouter' and 'control' on setup
5. Ping again between VM's after process restart
Pass criteria: Step 2,3,4 and 5 should pass
'''
vn1_name = get_random_name('vn1')
vn1_subnets = ["192.168.1.0/24"]
vn2_name = get_random_name('vn2')
vn2_subnets = ["192.168.2.0/24"]
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'icmp',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name, rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name,
rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture = self.create_vn(vn1_name, vn1_subnets,option = 'api')
assert vn1_fixture.verify_on_setup()
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
vn2_fixture = self.create_vn(vn2_name, vn2_subnets, option = 'api')
assert vn2_fixture.verify_on_setup()
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
vn1_vm1_name = get_random_name('vn1_vm1')
vn2_vm1_name = get_random_name('vn2_vm1')
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
vm2_fixture = self.create_vm(vn2_fixture, vn2_vm1_name)
assert vm1_fixture.wait_till_vm_is_up()
assert vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip)
for compute_ip in self.inputs.compute_ips:
pass
self.inputs.restart_service('contrail-vrouter-agent', [compute_ip])
for bgp_ip in self.inputs.bgp_ips:
pass
self.inputs.restart_service('contrail-control', [bgp_ip])
for cfgm_ip in self.inputs.cfgm_ips:
pass
self.inputs.restart_service('contrail-api', [cfgm_ip])
self.verification_after_process_restart_in_policy_between_vns()
self.logger.info('Sleeping for a min.')
sleep(60)
for cfgm_name in self.inputs.cfgm_names:
assert self.analytics_obj.verify_cfgm_uve_module_state\
(self.inputs.collector_names[0],
cfgm_name,'contrail-api')
vn1_vm2_name = get_random_name('vn1_vm2')
vn2_vm2_name = get_random_name('vn2_vm2')
vn3_name = get_random_name('vn3')
vn3_subnets = ["192.168.4.0/24"]
vn3_fixture = self.create_vn(vn3_name, vn3_subnets,option = 'api')
assert vn1_fixture.verify_on_setup()
vm3_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
assert vm3_fixture.verify_on_setup()
vm4_fixture = self.create_vm(vn2_fixture, vn2_vm2_name)
assert vm4_fixture.verify_on_setup()
vm3_fixture.wait_till_vm_is_up()
vm4_fixture.wait_till_vm_is_up()
assert vm3_fixture.ping_with_certainty(vm4_fixture.vm_ip)
# end test_process_restart_in_policy_between_vns
@test.attr(type=['sanity', 'ci_sanity_WIP'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_process_restart_with_multiple_vn_vm(self):
'''
Description: Test to validate that multiple VM creation and deletion after service restarts.
Test steps:
1. Create multiple VNs and VMs in them.
2. Restart the contrail-vrouter-agent service.
Pass criteria: The VMs should all be UP and running after the restarts.
Maintainer : ganeshahv@juniper.net
'''
vm1_name = 'vm_mine'
vn_name = 'vn222'
vn_subnets = ['11.1.1.0/24']
vn_count_for_test = 32
if (len(self.inputs.compute_ips) == 1):
vn_count_for_test = 10
if os.environ.has_key('ci_image'):
vn_count_for_test = 3
vm_fixture = self.useFixture(
create_multiple_vn_and_multiple_vm_fixture(
connections=self.connections,
vn_name=vn_name, vm_name=vm1_name, inputs=self.inputs, project_name=self.inputs.project_name,
subnets=vn_subnets, vn_count=vn_count_for_test, vm_count=1, subnet_count=1, image_name='cirros-0.3.0-x86_64-uec',
flavor='m1.tiny'))
time.sleep(100)
try:
assert vm_fixture.wait_till_vms_are_up()
assert vm_fixture.verify_vns_on_setup()
except Exception as e:
self.logger.exception("Got exception as %s" % (e))
compute_ip = []
for vmobj in vm_fixture.vm_obj_dict.values():
vm_host_ip = vmobj.vm_node_ip
if vm_host_ip not in compute_ip:
compute_ip.append(vm_host_ip)
self.inputs.restart_service('contrail-vrouter-agent', compute_ip)
sleep(50)
for vmobj in vm_fixture.vm_obj_dict.values():
assert vmobj.verify_on_setup()
return True
#end test_process_restart_with_multiple_vn_vm
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_kill_service_verify_core_generation(self):
'''
Description: Test to Validate core is generated for services on SIGQUIT
Test steps:
1. Issue commands to generate cores for multipe process.
Pass criteria: Verify core generation is successful.
Maintainer : sandipd@juniper.net
'''
compute_ip = self.inputs.compute_ips[0]
compute_user = self.inputs.host_data[compute_ip]['username']
compute_pwd = self.inputs.host_data[compute_ip]['password']
cfgm_ip = self.inputs.cfgm_ips[0]
cfgm_user = self.inputs.host_data[cfgm_ip]['username']
cfgm_pwd = self.inputs.host_data[cfgm_ip]['password']
collector_ip = self.inputs.collector_ips[0]
collector_user = self.inputs.host_data[collector_ip]['username']
collector_pwd = self.inputs.host_data[collector_ip]['password']
control_ip = self.inputs.bgp_ips[0]
control_user = self.inputs.host_data[control_ip]['username']
control_pwd = self.inputs.host_data[control_ip]['password']
result = True
err_msg = []
# Format <service_name> : [<process_name>,
# <role_on_which_process_running>]
service_list = {
'contrail-control': 'control',
'contrail-vrouter-agent': 'compute',
'contrail-query-engine': 'collector',
'contrail-collector': 'collector',
'contrail-analytics-api': 'collector',
'contrail-discovery': 'cfgm',
'contrail-api': 'cfgm',
'contrail-svc-monitor': 'cfgm'
}
for service, role in service_list.iteritems():
cmd = "service %s status | awk '{print $4}' | cut -f 1 -d','" % service
self.logger.info("service:%s, role:%s" % (service, role))
if role == 'cfgm':
login_ip = cfgm_ip
login_user = cfgm_user
login_pwd = cfgm_pwd
elif role == 'compute':
login_ip = compute_ip
login_user = compute_user
login_pwd = compute_pwd
elif role == 'control':
login_ip = control_ip
login_user = control_user
login_pwd = control_pwd
elif role == 'collector':
login_ip = collector_ip
login_user = collector_user
login_pwd = collector_pwd
else:
self.logger.error("invalid role:%s" % role)
result = result and False
assert result, "Invalid role:%s specified for service:%s" % (
role, service)
with settings(host_string='%s@%s' % (login_user, login_ip),
password=login_pwd, warn_only=True, abort_on_prompts=False):
pid = run(cmd)
self.logger.info("service:%s, pid:%s" % (service, pid))
run('kill -3 %s' % pid)
sleep(10)
if "No such file or directory" in run("ls -lrt /var/crashes/core.*%s*" % (pid)):
self.logger.error(
"core is not generated for service:%s" % service)
err_msg.append("core is not generated for service:%s" %
service)
result = result and False
else:
# remove core after generation
run("rm -f /var/crashes/core.*%s*" % (pid))
assert result, "core generation validation test failed: %s" % err_msg
return True
# end test_kill_service_verify_core_generation
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_control_node_switchover(self):
''' Stop the control node and check peering with agent fallback to other control node.
1. Pick one VN from respource pool which has 2 VM's in it
2. Verify ping between VM's
3. Find active control node in cluster by agent inspect
4. Stop control service on active control node
5. Verify agents are connected to new active control-node using xmpp connections
6. Bring back control service on previous active node
7. Verify ping between VM's again after bringing up control serveice
Pass criteria: Step 2,5 and 7 should pass
'''
if len(set(self.inputs.bgp_ips)) < 2:
self.logger.info(
"Skipping Test. At least 2 control node required to run the test")
raise self.skipTest(
"Skipping Test. At least 2 control node required to run the test")
result = True
vn1_name = get_random_name('vn1')
vn1_subnets = ['192.168.1.0/24']
vn1_vm1_name = get_random_name('vn1_vm1')
vn1_vm2_name = get_random_name('vn1_vm2')
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
assert vn1_fixture.verify_on_setup()
vm1_fixture = self.create_vm(vn1_fixture, vn1_vm1_name)
assert vm1_fixture.wait_till_vm_is_up()
vm2_fixture = self.create_vm(vn1_fixture, vn1_vm2_name)
assert vm2_fixture.wait_till_vm_is_up()
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
# Figuring the active control node
active_controller = None
self.agent_inspect = self.connections.agent_inspect
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
active_controller = entry['controller_ip']
active_controller_host_ip = self.inputs.host_data[
active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, active_controller_host_ip))
# Stop on Active node
self.logger.info('Stoping the Control service in %s' %
(active_controller_host_ip))
self.inputs.stop_service(
'contrail-control', [active_controller_host_ip])
sleep(5)
# Check the control node shifted to other control node
new_active_controller = None
new_active_controller_state = None
inspect_h = self.agent_inspect[vm1_fixture.vm_node_ip]
agent_xmpp_status = inspect_h.get_vna_xmpp_connection_status()
for entry in agent_xmpp_status:
if entry['cfg_controller'] == 'Yes':
new_active_controller = entry['controller_ip']
new_active_controller_state = entry['state']
new_active_controller_host_ip = self.inputs.host_data[
new_active_controller]['host_ip']
self.logger.info('Active control node from the Agent %s is %s' %
(vm1_fixture.vm_node_ip, new_active_controller_host_ip))
if new_active_controller_host_ip == active_controller_host_ip:
self.logger.error(
'Control node switchover fail. Old Active controlnode was %s and new active control node is %s' %
(active_controller_host_ip, new_active_controller_host_ip))
result = False
if new_active_controller_state != 'Established':
self.logger.error(
'Agent does not have Established XMPP connection with Active control node')
result = result and False
# Start the control node service again
self.logger.info('Starting the Control service in %s' %
(active_controller_host_ip))
self.inputs.start_service(
'contrail-control', [active_controller_host_ip])
# Check the BGP peering status from the currently active control node
sleep(5)
cn_bgp_entry = self.cn_inspect[
new_active_controller_host_ip].get_cn_bgp_neigh_entry()
for entry in cn_bgp_entry:
if entry['state'] != 'Established':
result = result and False
self.logger.error(
'With Peer %s peering is not Established. Current State %s ' %
(entry['peer'], entry['state']))
assert vm1_fixture.verify_on_setup(), 'VM Verification failed'
assert vm2_fixture.verify_on_setup(), 'VM Verification failed'
# Check the ping
self.logger.info('Checking the ping between the VM again')
assert vm1_fixture.ping_to_ip(vm2_fixture.vm_ip)
assert vm2_fixture.ping_to_ip(vm1_fixture.vm_ip)
if not result:
self.logger.error('Switchover of control node failed')
assert result
return True
# end test_control_node_switchover
@test.attr(type=['sanity'])
@preposttest_wrapper
@skip_because(orchestrator = 'vcenter',address_family = 'v6')
def test_max_vm_flows(self):
''' Test to validate setting up of the max_vm_flows parameter in agent
config file has expected effect on the flows in the system.
1. Set VM flow cache time and max_vm_flows to 0.01% of max system
flows(512K).
2. Create 2 VN's and connect them using a policy.
3. Launch 2 VM's in the respective VN's.
4. Start traffic with around 20000 flows.
6. Restart vrouter agent service and check the flows are limited
0.01% of max system flows.
Pass criteria: Step 6 should pass
'''
result = True
# Set VM flow cache time to 30 and max_vm_flows to 0.1% of max system
# flows(512K).
self.comp_node_fixt = {}
self.flow_cache_timeout = 10
self.max_system_flows = 0
self.max_vm_flows = 0.01
for cmp_node in self.inputs.compute_ips:
self.comp_node_fixt[cmp_node] = self.useFixture(ComputeNodeFixture(
self.connections, cmp_node))
self.comp_node_fixt[cmp_node].set_flow_aging_time(
self.flow_cache_timeout)
self.comp_node_fixt[cmp_node].get_config_per_vm_flow_limit()
self.comp_node_fixt[cmp_node].set_per_vm_flow_limit(
self.max_vm_flows)
self.comp_node_fixt[cmp_node].sup_vrouter_process_restart()
if self.max_system_flows < self.comp_node_fixt[
cmp_node].max_system_flows:
self.max_system_flows = self.comp_node_fixt[
cmp_node].max_system_flows
self.addCleanup(self.cleanup_test_max_vm_flows_vrouter_config,
self.inputs.compute_ips,
self.comp_node_fixt)
# Define resources for this test.
vn1_name = get_random_name('VN1')
vn1_subnets = ['10.1.1.0/24']
vn2_name = get_random_name('VN2')
vn2_subnets = ['10.2.1.0/24']
vn1_vm1_name = get_random_name('VM1')
vn2_vm2_name = get_random_name('VM2')
policy1_name = 'policy1'
policy2_name = 'policy2'
rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
rev_rules = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any',
'source_network': vn2_name,
'dest_network': vn1_name,
},
]
# Create 2 VN's and connect them using a policy.
vn1_fixture = self.create_vn(vn1_name, vn1_subnets)
assert vn1_fixture.verify_on_setup()
vn2_fixture = self.create_vn(vn2_name, vn2_subnets)
assert vn2_fixture.verify_on_setup()
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name,
rules_list=rules, inputs=self.inputs,
connections=self.connections))
policy2_fixture = self.useFixture(
PolicyFixture(
policy_name=policy2_name,
rules_list=rev_rules, inputs=self.inputs,
connections=self.connections))
vn1_fixture.bind_policies(
[policy1_fixture.policy_fq_name], vn1_fixture.vn_id)
self.addCleanup(vn1_fixture.unbind_policies,
vn1_fixture.vn_id, [policy1_fixture.policy_fq_name])
vn2_fixture.bind_policies(
[policy2_fixture.policy_fq_name], vn2_fixture.vn_id)
self.addCleanup(vn2_fixture.unbind_policies,
vn2_fixture.vn_id, [policy2_fixture.policy_fq_name])
# Launch 2 VM's in the respective VN's.
vm1_fixture = self.create_vm(vn1_fixture,vm_name=vn1_vm1_name,
flavor='contrail_flavor_small', image_name='ubuntu-traffic')
vm2_fixture = self.create_vm(vn2_fixture,vm_name=vn2_vm2_name,
flavor='contrail_flavor_small', image_name='ubuntu-traffic')
assert vm1_fixture.verify_on_setup(), 'VM1 verifications FAILED'
assert vm2_fixture.verify_on_setup(), 'VM2 verifications FAILED'
assert vm1_fixture.wait_till_vm_is_up(), 'VM1 does not seem to be up'
assert vm2_fixture.wait_till_vm_is_up(), 'VM2 does not seem to be up'
assert vm1_fixture.ping_with_certainty(vm2_fixture.vm_ip), \
'Ping from VM1 to VM2 FAILED'
# Set num_flows to fixed, smaller value but > 1% of
# system max flows
max_system_flows = self.max_system_flows
vm_flow_limit = int((self.max_vm_flows/100.0)*max_system_flows)
num_flows = vm_flow_limit + 30
generated_flows = 2*num_flows
flow_gen_rate = 5
proto = 'udp'
# Start Traffic.
self.traffic_obj = self.useFixture(
traffic_tests.trafficTestFixture(self.connections))
startStatus = self.traffic_obj.startTraffic(
total_single_instance_streams=int(num_flows),
pps=flow_gen_rate,
start_sport=5000,
cfg_profile='ContinuousSportRange',
tx_vm_fixture=vm1_fixture,
rx_vm_fixture=vm2_fixture,
stream_proto=proto)
msg1 = "Status of start traffic : %s, %s, %s" % (
proto, vm1_fixture.vm_ip, startStatus['status'])
self.logger.info(msg1)
assert startStatus['status'], msg1
self.logger.info("Wait for 3 sec for flows to be setup.")
sleep(3)
# 4. Poll live traffic & verify VM flow count
flow_cmd = 'flow -l | grep %s -A2 |' % vm1_fixture.vm_ip
flow_cmd = flow_cmd + ' grep "Action" | grep -v "Action:D(FlowLim)" | wc -l'
sample_time = 2
vm_flow_list=[]
for i in range(5):
sleep(sample_time)
vm_flow_record = self.inputs.run_cmd_on_server(
vm1_fixture.vm_node_ip,
flow_cmd,
self.inputs.host_data[vm1_fixture.vm_node_ip]['username'],
self.inputs.host_data[vm1_fixture.vm_node_ip]['password'])
vm_flow_record = vm_flow_record.strip()
vm_flow_list.append(int(vm_flow_record))
self.logger.info("%s iteration DONE." % i)
self.logger.info("VM flow count = %s." % vm_flow_list[i])
self.logger.info("Sleeping for %s sec before next iteration."
% sample_time)
vm_flow_list.sort(reverse=True)
if vm_flow_list[0] > int(1.1*vm_flow_limit):
self.logger.error("TEST FAILED.")
self.logger.error("VM flow count seen is greater than configured.")
result = False
elif vm_flow_list[0] < int(0.9*vm_flow_limit):
self.logger.error("TEST FAILED.")
self.logger.error("VM flow count seen is much lower than config.")
self.logger.error("Something is stopping flow creation. Please debug")
result = False
else:
self.logger.info("TEST PASSED")
self.logger.info("Expected range of vm flows seen.")
self.logger.info("Max VM flows = %s" % vm_flow_list[0])
# Stop Traffic.
self.logger.info("Proceed to stop traffic..")
try:
self.traffic_obj.stopTraffic(wait_for_stop=False)
except:
self.logger.warn("Failed to get a VM handle and stop traffic.")
self.logger.info("Wait for the flows to get purged.")
sleep(self.flow_cache_timeout)
return result
# end test_max_vm_flows
@test.attr(type=['sanity'])
@preposttest_wrapper
def test_underlay_broadcast_traffic_handling(self):
''' Test the underlay brocast traffic handling by vrouter. (Bug-1545229).
1. Send broadcast traffic from one compute node.
2. Other compute in same subnet should receive that traffic.
3. Receiving compute should treat this traffic as underlay.
4. Compute should not replicate the packet and send the copy back.
Pass criteria: Step 3-4 should pass
Maintainer : chhandak@juniper.net
'''
if (len(self.inputs.compute_ips) < 2):
raise self.skipTest(
"Skipping Test. At least 2 compute node required to run the test")
result = True
# Find ignore brocast exiting value
ignore_broadcasts={}
cmd='cat /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts'
for item in self.inputs.compute_ips:
ignore_broadcasts[item]=self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Set ignore brocast to false
cmd='echo "0" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts'
for item in self.inputs.compute_ips:
self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Find the Brocast address from first compute
cmd='ifconfig | grep %s' %(self.inputs.host_data[item]['host_control_ip'])
output=self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
broadcast_address=output.split(" ")[3].split(":")[1]
# Start tcpdump on receiving compute
inspect_h = self.agent_inspect[self.inputs.compute_ips[1]]
comp_intf = inspect_h.get_vna_interface_by_type('eth')
if len(comp_intf) == 1:
comp_intf = comp_intf[0]
self.logger.info('Agent interface name: %s' % comp_intf)
compute_ip = self.inputs.compute_ips[1]
compute_user = self.inputs.host_data[self.inputs.compute_ips[1]]['username']
compute_password = self.inputs.host_data[self.inputs.compute_ips[1]]['password']
filters = "host %s" %(broadcast_address)
(session, pcap) = start_tcpdump_for_intf(compute_ip, compute_user,
compute_password, comp_intf, filters, self.logger)
sleep(5)
# Ping broadcast address
self.logger.info(
'Pinging broacast address %s from compute %s' %(broadcast_address,\
self.inputs.host_data[self.inputs.compute_ips[0]]['host_control_ip']))
packet_count = 10
cmd='ping -c %s -b %s' %(packet_count, broadcast_address)
output=self.inputs.run_cmd_on_server(
self.inputs.compute_ips[0], cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
sleep(5)
# Stop tcpdump
stop_tcpdump_for_intf(session, pcap, self.logger)
# Set back the ignore_broadcasts to original value
for item in self.inputs.compute_ips:
cmd='echo "%s" > /proc/sys/net/ipv4/icmp_echo_ignore_broadcasts' %(ignore_broadcasts[item])
self.inputs.run_cmd_on_server(
item, cmd,
self.inputs.host_data[item]['username'],
self.inputs.host_data[item]['password'])
# Analyze pcap
assert verify_tcpdump_count(self, session, pcap, exp_count=packet_count), "There should only be %s\
packet from source %s on compute %s" %(packet_count, broadcast_address, compute_ip)
self.logger.info(
'Packet count matched: Compute %s has receive only %s packet from source IP %s.\
No duplicate packet seen' %(compute_ip, packet_count, broadcast_address))
return result
# end test_underlay_brodcast_traffic_handling
# end TestBasicVMVN0
| 46.125918
| 140
| 0.626189
| 5,636
| 43,958
| 4.61533
| 0.099184
| 0.039982
| 0.022605
| 0.018299
| 0.631824
| 0.582231
| 0.531755
| 0.488851
| 0.439413
| 0.396702
| 0
| 0.020339
| 0.287524
| 43,958
| 952
| 141
| 46.17437
| 0.810211
| 0.142318
| 0
| 0.471879
| 0
| 0
| 0.140257
| 0.016847
| 0
| 0
| 0
| 0
| 0.082305
| 1
| 0.019204
| false
| 0.03155
| 0.027435
| 0
| 0.0631
| 0.001372
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b079948e8c02888049d1f77a57cfcbe4bb8e4b
| 1,432
|
py
|
Python
|
readouts/basic_readout.py
|
qbxlvnf11/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 20
|
2020-09-02T07:07:35.000Z
|
2022-03-16T15:22:14.000Z
|
readouts/basic_readout.py
|
yuexiarenjing/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 2
|
2021-11-01T08:32:10.000Z
|
2022-03-25T04:29:35.000Z
|
readouts/basic_readout.py
|
yuexiarenjing/graph-neural-networks-for-graph-classification
|
5d69ead58c786aa8e472ab0433156fe09fe6ca4b
|
[
"MIT"
] | 11
|
2020-09-02T07:13:46.000Z
|
2022-03-23T10:38:07.000Z
|
import torch
def readout_function(x, readout, batch=None, device=None):
if len(x.size()) == 3:
if readout == 'max':
return torch.max(x, dim=1)[0].squeeze() # max readout
elif readout == 'avg':
return torch.mean(x, dim=1).squeeze() # avg readout
elif readout == 'sum':
return torch.sum(x, dim=1).squeeze() # sum readout
elif len(x.size()) == 2:
batch = batch.cpu().tolist()
readouts = []
max_batch = max(batch)
temp_b = 0
last = 0
for i, b in enumerate(batch):
if b != temp_b:
sub_x = x[last:i]
if readout == 'max':
readouts.append(torch.max(sub_x, dim=0)[0].squeeze()) # max readout
elif readout == 'avg':
readouts.append(torch.mean(sub_x, dim=0).squeeze()) # avg readout
elif readout == 'sum':
readouts.append(torch.sum(sub_x, dim=0).squeeze()) # sum readout
last = i
temp_b = b
elif b == max_batch:
sub_x = x[last:len(batch)]
if readout == 'max':
readouts.append(torch.max(sub_x, dim=0)[0].squeeze()) # max readout
elif readout == 'avg':
readouts.append(torch.mean(sub_x, dim=0).squeeze()) # avg readout
elif readout == 'sum':
readouts.append(torch.sum(sub_x, dim=0).squeeze()) # sum readout
break
readouts = torch.cat(readouts, dim=0)
return readouts
| 34.095238
| 77
| 0.552374
| 197
| 1,432
| 3.944162
| 0.192893
| 0.046332
| 0.138996
| 0.061776
| 0.552124
| 0.552124
| 0.512227
| 0.471042
| 0.471042
| 0.471042
| 0
| 0.016865
| 0.296089
| 1,432
| 42
| 78
| 34.095238
| 0.753968
| 0.074721
| 0
| 0.405405
| 0
| 0
| 0.020517
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.027027
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b2b6ec5edc971fee6f55c38fd27eec4af6014d
| 11,493
|
py
|
Python
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 4
|
2019-03-26T15:54:35.000Z
|
2021-05-27T13:18:43.000Z
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 12
|
2019-04-23T14:45:04.000Z
|
2022-03-17T09:40:04.000Z
|
plugins/helpers/EFO.py
|
opentargets/platform-input-support
|
555c3ed091a7a3a767dc0c37054dbcd369f02252
|
[
"Apache-2.0"
] | 2
|
2019-06-15T17:21:14.000Z
|
2021-05-14T18:35:18.000Z
|
import logging
import re
import json
import jsonlines
from urllib import parse
logger = logging.getLogger(__name__)
# EFO
# The current implementation is based on the conversion from owl format to json lines format using Apache RIOT
# The structure disease_obsolete stores the obsolete terms and it is used to retrieve the relationship between valid
# term and obsolete terms.
# The locationIds are generated retriving the structure parent/child and recursevely retrieve the proper info
class EFO(object):
def __init__(self, efo_input):
self.efo_input = efo_input
self.diseases = {}
self.diseases_obsolete = {}
self.has_location_ids = {}
self.all_path = {}
self.parent_child_tuples = []
def init_disease(self, id, code):
self.diseases[id] = {}
self.diseases[id]['id'] = id
self.diseases[id]['code'] = code
# return the cross reference for the phenotype.
# ETL uses it with hpo-phenotypes-_yyyy-mm-dd_.jsonl
def set_phenotypes(self, id, disease):
if 'hasDbXref' in disease:
self.diseases[id]['dbXRefs'] = disease['hasDbXref']
# Retrieve the definition info
def set_definition(self, id, disease):
if 'IAO_0000115' in disease:
if isinstance(disease['IAO_0000115'], str):
self.diseases[id]['definition'] = disease['IAO_0000115'].strip('\n')
else:
definitions = self.get_array_value(disease['IAO_0000115'])
self.diseases[id]['definition'] = definitions[0]
if len(definitions) > 1: self.diseases[id]['definition_alternatives'] = definitions[1:]
# Return an array of strings without new line.
def get_array_value(self, value):
if isinstance(value, str):
return [value.strip('\n')]
else:
return [x.strip() for x in value if isinstance(x, str)]
# Return the synonyms. Complex structure. Clean and flatten.
def set_efo_synonyms(self, id, disease):
synonyms_details = {}
if 'hasExactSynonym' in disease:
if len(disease['hasExactSynonym']) > 0:
synonyms = self.get_array_value(disease['hasExactSynonym'])
synonyms_details['hasExactSynonym'] = synonyms
if 'hasRelatedSynonym' in disease:
if len(disease['hasRelatedSynonym']) > 0:
synonyms = self.get_array_value(disease['hasRelatedSynonym'])
synonyms_details['hasRelatedSynonym'] = synonyms
if 'hasBroadSynonym' in disease:
if len(disease['hasBroadSynonym']) > 0:
synonyms = self.get_array_value(disease['hasBroadSynonym'])
synonyms_details['hasBroadSynonym'] = synonyms
if 'hasNarrowSynonym' in disease:
if len(disease['hasNarrowSynonym']) > 0:
synonyms = self.get_array_value(disease['hasNarrowSynonym'])
synonyms_details['hasNarrowSynonym'] = synonyms
if len(synonyms_details.keys()) > 0:
self.diseases[id]['synonyms'] = synonyms_details
# Extract skos: related: used for check phenotype info.
def get_phenotypes(self, phenotypes):
if isinstance(phenotypes, str):
return [self.get_id(phenotypes)]
else:
return [self.get_id(phenotype) for phenotype in phenotypes]
# The field sko is used to check if the phenotype cross references are correct.
# ETL - GraphQL test.
def set_phenotypes_old(self, id, disease):
if "related" in disease:
self.diseases[id]['sko'] = self.get_phenotypes(disease["related"])
# Return if the term is a TherapeuticArea
def set_therapeutic_area(self, id, disease):
if 'oboInOwl:inSubset' in disease:
self.diseases[id]['isTherapeuticArea'] = True
else:
self.diseases[id]['isTherapeuticArea'] = False
# Return the label of the term
def set_label(self, id, disease):
if 'label' in disease:
if isinstance(disease['label'], str):
self.diseases[id]['label'] = disease['label'].strip('\n')
elif isinstance(disease['label'], dict):
self.diseases[id]['label'] = disease['label']['@value'].strip('\n')
else:
self.diseases[id]['label'] = self.get_array_value(disease['label'])[0]
# Return the parents for the term
def set_parents(self, id, disease):
if 'subClassOf' in disease:
subset = disease['subClassOf']
parents = []
if len(subset) > 0:
for father in subset:
if father.startswith('_:'):
self.has_location_ids[father] = id
else:
father_id = self.get_id(father)
parents.append(father_id)
self.diseases[id]['parents'] = parents
def extract_id(self, elem):
return elem.replace(":", "_")
# return the proper prefix.
def get_prefix(self, id):
simple_id = re.match(r'^(.+?)_', id)
if simple_id.group() in ["EFO_", "OTAR_"]:
return "http://www.ebi.ac.uk/efo/"
elif (simple_id.group() in 'Orphanet_'):
return "http://www.orpha.net/ORDO/"
else:
return "http://purl.obolibrary.org/obo/"
def extract_id_from_uri(self, uri):
new_terms = []
if isinstance(uri, str):
uris_to_extract = [uri]
elif isinstance(uri, list):
uris_to_extract = self.get_array_value(uri)
else:
# todo: investigate to this case.
uris_to_extract = []
for uri_i in uris_to_extract:
full_path = parse.urlsplit(uri_i).path
new_terms.append(full_path.rpartition('/')[2])
return new_terms
# Get the id and create a standard output. Eg. EFO:123 -> EFO_123, HP:9392 -> HP_9392
def get_id(self, id):
ordo = re.sub(r'^.*?ORDO/', '', id)
new_id = re.sub(r'^.*?:', '', ordo)
return new_id
# Check if the efo term is valid. term obsolete goes to a dedicated structure
def is_obsolete(self, disease, disease_id):
if 'owl:deprecated' in disease:
if 'IAO_0100001' in disease:
new_terms = self.extract_id_from_uri(disease['IAO_0100001'])
for term in new_terms:
if term in self.diseases_obsolete:
self.diseases_obsolete[term].append(disease_id)
else:
self.diseases_obsolete[term] = [disease_id]
return True
else:
return False
# LocationIds: This procedure fills in the structure parent,child
def set_locationIds_structure(self, disease_id, disease):
collection = None
if "unionOf" in disease:
collection = disease["unionOf"]["@list"]
elif "intersectionOf" in disease:
collection = disease["intersectionOf"]["@list"]
if collection is not None:
for elem in collection:
if elem.startswith('_:'):
self.parent_child_tuples.append((disease["@id"], elem))
def load_type_class(self, disease, disease_id):
if not disease["@id"].startswith('_:'):
code = self.get_prefix(disease_id) + disease_id
self.init_disease(disease_id, code)
self.set_label(disease_id, disease)
self.set_definition(disease_id, disease)
self.set_therapeutic_area(disease_id, disease)
self.set_efo_synonyms(disease_id, disease)
self.set_phenotypes(disease_id, disease)
self.set_phenotypes_old(disease_id, disease)
self.set_parents(disease_id, disease)
else:
self.set_locationIds_structure(disease_id, disease)
#
def get_obsolete_info(self):
for k, v in self.diseases_obsolete.items():
if k in self.diseases:
self.diseases[k]['obsoleteTerms'] = list(self.diseases_obsolete[k])
# LocationIds: This is part of the structure to retrieve the info about locationIds
def get_children(self, node):
return [x[1] for x in self.parent_child_tuples if x[0] == node]
# LocationIds: This is part of the structure to retrieve the info about locationIds.
# Recursively retrieve the location.
def get_nodes(self, node, path):
data = set()
data.add(node)
path.add(node)
children = self.get_children(node)
if children:
lista = set()
for child in children:
if not child.startswith("obo:"):
lista.update(self.get_nodes(child, path))
else:
child_clean_code = re.sub(r'^.*?:', '', child)
lista.add(child_clean_code)
data.update(lista)
return data
# LocationIds are stored in the restriction tag.
# The info are stored inside a structure json parent-child
def get_locationIds(self):
parents, children = zip(*self.parent_child_tuples)
self.root_nodes = {x for x in parents if x not in children}
for node in self.root_nodes:
result = self.get_nodes(node, set())
self.all_path[node] = [x for x in list(result) if not x.startswith('_:')]
for k, v in self.has_location_ids.items():
if k in self.all_path:
if not "locationIds" in self.diseases[v]:
self.diseases[v]["locationIds"] = set()
self.diseases[v]["locationIds"].update(self.all_path[k])
# For any term it generates the dict id info.
def generate(self):
with open(self.efo_input) as input:
for line in input:
disease = json.loads(line)
disease_id = self.get_id(disease['@id'])
if not self.is_obsolete(disease, disease_id):
if disease["@type"] == "Class":
self.load_type_class(disease, disease_id)
else:
# @Type: Restriction
if 'someValuesFrom' in disease:
self.parent_child_tuples.append((disease["@id"], disease["someValuesFrom"]))
self.get_obsolete_info()
self.get_locationIds()
# Static file for alpha and production
def save_static_disease_file(self, output_filename):
valid_keys = ["parents", "id", "label"]
with jsonlines.open(output_filename, mode='w') as writer:
for id in self.diseases:
entry = {k: v for k, v in self.diseases[id].items() if k in valid_keys}
entry["parentIds"] = entry["parents"]
del (entry["parents"])
entry["name"] = entry["label"]
del (entry["label"])
writer.write(entry)
def save_diseases(self, output_filename):
with jsonlines.open(output_filename, mode='w') as writer:
for disease in self.diseases:
# Set cannot be transform in Json. Transform into list.
if 'locationIds' in self.diseases[disease]:
listValues = list(self.diseases[disease]['locationIds'])
self.diseases[disease]['locationIds'] = listValues
writer.write(self.diseases[disease])
return output_filename
| 40.326316
| 116
| 0.59297
| 1,352
| 11,493
| 4.892012
| 0.180473
| 0.061687
| 0.033868
| 0.017992
| 0.156033
| 0.090717
| 0.065618
| 0.034775
| 0.034775
| 0.034775
| 0
| 0.00859
| 0.301053
| 11,493
| 284
| 117
| 40.46831
| 0.814764
| 0.137736
| 0
| 0.07109
| 0
| 0
| 0.095465
| 0.002328
| 0
| 0
| 0
| 0.003521
| 0
| 1
| 0.118483
| false
| 0
| 0.023697
| 0.009479
| 0.218009
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b664d9f22c51662666d538e6f424b0f69a4ea2
| 948
|
py
|
Python
|
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | 2
|
2020-07-08T14:42:52.000Z
|
2022-03-13T05:25:55.000Z
|
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | null | null | null |
interaction3/mfield/tests/test_transmit_receive_beamplot.py
|
bdshieh/interaction3
|
b44c390045cf3b594125e90d2f2f4f617bc2433b
|
[
"MIT"
] | null | null | null |
import numpy as np
from interaction3 import abstract
from interaction3.arrays import matrix
from interaction3.mfield.solvers.transmit_receive_beamplot_2 import TransmitReceiveBeamplot2
array = matrix.create(nelem=[2, 2])
simulation = abstract.MfieldSimulation(sampling_frequency=100e6,
sound_speed=1540,
excitation_center_frequecy=5e6,
excitation_bandwidth=4e6,
field_positions=np.array([[0, 0, 0.05],
[0, 0, 0.06],
[0, 0, 0.07]])
)
kwargs, meta = TransmitReceiveBeamplot2.connector(simulation, array)
sim = TransmitReceiveBeamplot2(**kwargs)
sim.solve()
rf_data = sim.result['rf_data']
times = sim.result['times']
| 35.111111
| 92
| 0.517932
| 82
| 948
| 5.853659
| 0.585366
| 0.025
| 0.01875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.06383
| 0.405063
| 948
| 26
| 93
| 36.461538
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0.012685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b7efff7d41c4651007c0d46a051ea437cab70c
| 16,172
|
py
|
Python
|
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | 5
|
2020-11-18T23:55:17.000Z
|
2022-01-14T07:15:35.000Z
|
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | null | null | null |
scripts/make_plots.py
|
facebookresearch/mpcfp
|
cb29797aa4f2ce524dd584ecf47c863fd9f414a6
|
[
"MIT"
] | 2
|
2021-11-06T14:06:13.000Z
|
2022-01-14T07:16:29.000Z
|
#!/usr/bin/env python2
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import math
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import seaborn
# constants:
NAN = float('nan')
# From https://blog.graphiq.com/
# finding-the-right-color-palettes-for-data-visualizations-fcd4e707a283
BAR_COLORS_PURPLES = [
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_GRAY_PURPLES = [
(.7, .7, .7),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
BAR_COLORS_DETECTION = [
(.8, .8, .8),
(.4, .4, .4),
(0.9020, 0.6196, 0.6157),
(0.7765, 0.3412, 0.5294),
(0.4471, 0.1922, 0.5647),
(0.2549, 0.1098, 0.3804),
]
LINE_COLORS = seaborn.cubehelix_palette(
4, start=2, rot=0, dark=0.15, light=0.75, reverse=False, as_cmap=False)
BAR_COLORS = BAR_COLORS_GRAY_PURPLES
FS = 18
color_counter = [0]
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
def set_style():
params = {
"legend.fontsize": FS - 4,
"axes.labelsize": FS,
"axes.titlesize": FS,
"xtick.labelsize": FS - 4,
"ytick.labelsize": FS - 4,
}
matplotlib.rcParams.update(params)
fig = plt.gcf()
for ax in fig.axes:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
# make generic line plot:
def make_line_plot(Y, x=None, title='',
xlabel='', ylabel='', xlog=False, ylog=False,
xmin=None, xmax=None, ymin=None, ymax=None,
legend=[], legend_title=None, show_legend=True,
text_labels=None, colors=[], linestyle=[], markerstyle=[],
append=False, filename=None, linewidth=2., legloc=None,
errors=None, xticks=None, yticks=None):
# assertions and defaults:
x = np.linspace(0, Y.shape[1]) if x is None else x
ymin = Y.min() if ymin is None else ymin
ymax = Y.max() if ymax is None else ymax
xmin = x.min() if xmin is None else xmin
xmax = x.max() if xmax is None else xmax
if len(legend) > 0:
assert len(legend) == Y.shape[0]
if len(colors) == 0:
colors = LINE_COLORS
if isinstance(linestyle, str):
linestyle = [linestyle] * Y.shape[0]
if len(linestyle) == 0:
linestyle = ['-'] * Y.shape[0]
if isinstance(markerstyle, str):
markerstyle = [markerstyle] * Y.shape[0]
if len(markerstyle) == 0:
markerstyle = [''] * Y.shape[0]
# make plot:
if not append:
plt.clf()
for n in range(Y.shape[0]):
linecolor = colors[color_counter[0] % len(colors)]
color_counter[0] += 1
plt.plot(x, Y[n, :],
label=legend[n] if len(legend) > 0 else None,
linewidth=linewidth, linestyle=linestyle[n],
marker=markerstyle[n], markersize=linewidth * 1.5,
color=linecolor)
if errors is not None:
plt.fill_between(
x, Y[n, :] - errors[n, :], Y[n, :] + errors[n, :],
alpha=0.2, color=linecolor)
plt.xlabel(xlabel, fontweight='bold', fontsize=FS)
plt.ylabel(ylabel, fontweight='bold', fontsize=FS)
if show_legend:
plt.legend(fontsize=FS - 4, loc=0 if legloc is None else legloc,
title=legend_title)
# add text labels:
if text_labels is not None:
assert isinstance(text_labels, list)
for text_label in text_labels:
assert isinstance(text_label, list) \
or isinstance(text_label, tuple)
assert len(text_label) == 3
plt.text(*text_label)
# makes axes look pretty:
axes = plt.gca()
axes.set_xlim([xmin, xmax])
axes.set_ylim([ymin, ymax])
if xlog:
axes.semilogx(10.)
if ylog:
axes.semilogy(10.)
if xticks is not None:
axes.set_xticks(xticks)
if yticks is not None:
axes.set_yticks(yticks)
for tick in axes.xaxis.get_major_ticks():
tick.label.set_fontsize(FS - 4)
for tick in axes.yaxis.get_major_ticks():
tick.label.set_fontsize(FS - 4)
if title != '':
axes.set_title(title, fontweight='bold', fontsize=FS)
if show_legend and legend_title is not None:
legend_title = axes.get_legend().get_title().properties()[
'fontproperties']
legend_title.set_weight('bold')
# remove legend border:
legend = axes.get_legend()
if legend is not None:
legend.get_frame().set_linewidth(0.0)
# export plot:
set_style()
if filename is not None:
plt.savefig(filename, format='pdf', transparent=True,
bbox_inches='tight')
def read_log(logfile, timings=False, test=False):
x = []
y = []
yy = []
z = []
with open(os.path.join("results/", logfile), 'r') as fid:
for line in fid:
if test and "Test Set" in line:
fields = line.strip().split()
if len(fields) > 4:
test_loss = float(fields[3][:-1])
test_accuracy = float(fields[5])
else:
test_loss = float(fields[3])
test_accuracy = 0
if "Iter" not in line:
continue
fields = line.strip().split()
it = int(fields[1][:-1])
loss = float(fields[3][:-1])
if len(fields) > 6:
accuracy = float(fields[5][:-1])
runtime = float(fields[7])
yy.append(accuracy)
else:
runtime = float(fields[5])
x.append(it)
y.append(loss)
z.append(runtime)
if test:
return test_loss, test_accuracy
return np.array(x), np.array(y), np.array(yy), np.array(z)
def read_log_synth(logfile):
x = []
with open(os.path.join("results/", logfile), 'r') as fid:
for line in fid:
if "normalizing both weights and iweights" not in line:
continue
fields = line.strip().split()
diff = float(fields[7])
x.append(diff)
return np.array(x)
def mnist_width_train(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Train Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['mnist_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
it, loss, _, _ = read_log(logfile, test=False)
Y.append(loss[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.2,
xmin=9e2, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def mnist_width_test(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Test Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['mnist_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
loss, _ = read_log(logfile, test=True)
Y.append(loss)
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.2,
xmin=9e2, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_width_train(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Train Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6', '1e7']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['covtype_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
it, loss, _, _ = read_log(logfile, test=False)
Y.append(loss[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=1.3,
xmin=9e2, xmax=1.2e7,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_width_test(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Test Loss}'
widths = ['1e3', '1e4', '1e5', '1e6', '2e6', '5e6', '1e7']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in links:
files = ['covtype_width%d_link_%s.txt' % (int(float(w)), link.lower())
for w in widths]
Y = []
for logfile in files:
loss, _ = read_log(logfile, test=True)
Y.append(loss)
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=1.3,
xmin=9e2, xmax=1.2e7,
filename=filename, linewidth=2.,
legloc='upper left')
def synth_width(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'$\mathbf{\|\frac{x}{\|x\|} - \frac{w}{\|w\|}\|}$'
widths = ['1e1', '1e2', '1e3', '1e4', '1e5', '1e6', '5e6']
Ys = []
links = ["Identity", "Logit", "Probit"]
for link in ['identity', 'logit', 'probit']:
files = ['synth_width%d_link_%s.txt' % (int(float(w)), link)
for w in widths]
Y = []
for logfile in files:
normdiff = read_log_synth(logfile)
Y.append(normdiff[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k', 'k'],
linestyle=['-', '--', ':'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.02,
xmin=8, xmax=6e6,
filename=filename, linewidth=2.,
legloc='upper left')
def synth_terms(filename):
global color_counter
color_counter = [0]
xlabel = r'\textbf{Terms}'
ylabel = r'$\mathbf{\|\frac{x}{\|x\|} - \frac{w}{\|w\|}\|}$'
terms = list(range(6, 42, 2))
Ys = []
links = ["Logit", "Probit"]
for link in links:
files = ['synth_terms%d_link_%s.txt' % (t, link.lower())
for t in terms]
Y = []
for logfile in files:
normdiff = read_log_synth(logfile)
Y.append(normdiff[-1])
Ys.append(Y)
Y = np.stack(Ys)
x = np.array(terms)
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel, legend=links,
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=False, ylog=False, markerstyle='s',
ymin=0.0, ymax=0.025, xticks=list(range(6, 42, 4)),
xmin=5, xmax=42,
filename=filename, linewidth=2.,
legloc='upper right')
def mnist_multi(filename):
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Accuracy}'
widths = ['1e3', '1e4', '1e5', '1e6']
files = ['mnist_width%d_multi.txt' % (int(float(w))) for w in widths]
Y = []
Y_train = []
for logfile in files:
_, acc = read_log(logfile, test=True)
_, _, train_acc, _ = read_log(logfile)
Y.append(acc)
Y_train.append(train_acc[-1])
Y = np.stack([Y_train, Y])
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel,
legend=['Train', 'Test'],
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.7, ymax=1,
xmin=9e2, xmax=1.2e6,
filename=filename, linewidth=2.,
legloc='upper left')
def covtype_multi(filename):
xlabel = r'\textbf{Width (}$\mathbf{\gamma}$\textbf{)}'
ylabel = r'\textbf{Accuracy}'
widths = ['1e3', '1e4', '1e5', '1e6']
files = ['covtype_width%d_multi.txt' % (int(float(w))) for w in widths]
Y = []
Y_train = []
for logfile in files:
_, acc = read_log(logfile, test=True)
_, _, train_acc, _ = read_log(logfile)
Y.append(acc)
Y_train.append(train_acc[-1])
Y = np.stack([Y_train, Y])
x = np.array([float(w) for w in widths])
# produce plots:
make_line_plot(Y, x=x, xlabel=xlabel, ylabel=ylabel,
legend=['Train', 'Test'],
colors=['k', 'k'],
linestyle=['-', '--'],
xlog=True, ylog=False, markerstyle='s',
ymin=0.5, ymax=0.8,
xmin=9e2, xmax=1.2e6,
filename=filename, linewidth=2.,
legloc='upper left')
# make all the plots:
def main():
# get destination folder:
parser = argparse.ArgumentParser(
description='Make plots for floating point MPC')
parser.add_argument('--destination', default='./results/', type=str,
help='folder in which to dump figures')
args = parser.parse_args()
# create plots:
mnist_width_train(os.path.join(args.destination,
'mnist_widths_train_loss.pdf'))
mnist_width_test(os.path.join(args.destination,
'mnist_widths_test_loss.pdf'))
covtype_width_train(os.path.join(args.destination,
'covtype_widths_train_loss.pdf'))
covtype_width_test(os.path.join(args.destination,
'covtype_widths_test_loss.pdf'))
synth_width(os.path.join(args.destination, 'synth_widths_weightdiffs.pdf'))
synth_terms(os.path.join(args.destination, 'synth_terms_weightdiffs.pdf'))
mnist_multi(os.path.join(args.destination,
'mnist_multiclass_accuracy.pdf'))
covtype_multi(os.path.join(args.destination,
'covtype_multiclass_accuracy.pdf'))
# run all the things:
if __name__ == '__main__':
main()
| 32.539235
| 79
| 0.537225
| 2,046
| 16,172
| 4.134897
| 0.156892
| 0.021277
| 0.009929
| 0.019858
| 0.56182
| 0.540544
| 0.520567
| 0.486525
| 0.477778
| 0.448936
| 0
| 0.039293
| 0.310722
| 16,172
| 496
| 80
| 32.604839
| 0.719656
| 0.039327
| 0
| 0.51005
| 0
| 0
| 0.10826
| 0.043974
| 0
| 0
| 0
| 0
| 0.01005
| 1
| 0.032663
| false
| 0
| 0.030151
| 0
| 0.070352
| 0.002513
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b87ef1f9d957ce2aacbc7ba9bf31d3f24627e5
| 2,782
|
py
|
Python
|
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | 6
|
2016-12-28T03:40:46.000Z
|
2017-03-31T12:04:43.000Z
|
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | null | null | null |
example_backtesting.py
|
brokenlab/finance4py
|
839fb4c262c369973c1afaebb23291355f8b4668
|
[
"MIT"
] | 3
|
2018-04-26T03:14:29.000Z
|
2021-06-13T16:18:04.000Z
|
# -*- coding: utf-8 -*-
'''
* finance4py
* Based on Python Data Analysis Library.
* 2016/03/22 by Sheg-Huai Wang <m10215059@csie.ntust.edu.tw>
* Copyright (c) 2016, finance4py team
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to
endorse or promote products derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from finance4py import Stock
from finance4py.backtesting import BandTest
from pylab import *
if __name__ == '__main__':
# 建立股票資訊連結以及將資訊丟入回測程式
s = Stock('2330', '2015-10-31', '2016-03-05')
bt = BandTest(s)
# 範例策略一
# 在歷史股價內新增K, D兩個值的欄位
s['K'], s['D'] = s.KD()
# 撰寫個人策略 => def 名稱自取(今日, 今日資訊, 股票資訊)
def golden_cross(today, today_data, stock):
# 回傳資訊為 True = 持有狀態, False = 非持有狀態
return today_data['K'] > today_data['D']
# 將策略新增至回測程式中並取名
bt.addStrategy('KD黃金交叉', golden_cross)
# 範例策略二
s['MA5'] = s.MA()
s['MA20'] = s.MA(20)
def average_cross(today, today_data, stock):
return today_data['MA5'] > today_data['MA20']
bt.addStrategy('均線黃金交叉', average_cross)
# 範例策略三
s['DIF'], s['DEM'], s['OSC']= s.MACD()
def macd_cross(today, today_data, stock):
# 可調整today並透過stock取得其他日的資訊
yesterday = today - 1
yesterday_data = stock.getData(yesterday)
return (today_data['DIF'] > today_data['DEM']) & \
(yesterday_data['DIF'] > yesterday_data['DEM'])
bt.addStrategy('MACD連續兩日黃金交叉', macd_cross)
# 繪製回測結果 (縱軸為資產倍率)
bt.plot()
show()
| 35.21519
| 104
| 0.727175
| 392
| 2,782
| 5.094388
| 0.512755
| 0.040561
| 0.022534
| 0.028543
| 0.128192
| 0.068102
| 0.068102
| 0.068102
| 0.068102
| 0.068102
| 0
| 0.025033
| 0.181524
| 2,782
| 79
| 105
| 35.21519
| 0.851998
| 0.672538
| 0
| 0
| 0
| 0
| 0.108945
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0.083333
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05b95038357172273cd6bf5b94205ef5e3a1bff8
| 2,818
|
py
|
Python
|
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
main.py
|
af12066/cancel-sit
|
29977bb86927e69ae7f94a160ef4d1fb810f0117
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) T. H.
import urllib.request
import re
import urllib.parse
import codecs
import filecmp
import os.path
import os
from bs4 import BeautifulSoup
from slacker import Slacker
from datetime import datetime
class Slack(object):
__slacker = None
def __init__(self, token):
self.__slacker = Slacker(token)
def get_channnel_list(self):
"""
Slackチーム内のチャンネルID、チャンネル名一覧を取得する。
"""
# bodyで取得することで、[{チャンネル1},{チャンネル2},...,]の形式で取得できる。
raw_data = self.__slacker.channels.list().body
result = []
for data in raw_data["channels"]:
result.append(dict(channel_id=data["id"], channel_name=data["name"]))
return result
def post_message_to_channel(self, channel, message):
"""
Slackチームの任意のチャンネルにメッセージを投稿する。
"""
channel_name = "#" + channel
self.__slacker.chat.post_message(channel_name, message)
def writeFile(fileName, content):
print(fileName)
f = codecs.open(fileName, 'w', 'utf-8')
f.write(content)
f.close()
if __name__ == '__main__':
slack = Slack('...')
print(slack.get_channnel_list())
#今月と翌月のデータを取得
uri = 'http://attend.sic.shibaura-it.ac.jp/cancelCalendar/t04/calendar{0:d}{1:02d}-{2:02d}.html'.format(datetime.today().year, datetime.today().month, (lambda x: x if x != 12 else x - 11)(datetime.today().month + 1))
html = urllib.request.urlopen(uri)
soup = BeautifulSoup(html, 'lxml')
link = soup.find_all('a', href=re.compile("/cancel/")) #href属性に'/cancel/'を含むa要素を取得し,相対パスを絶対パスに変換
for a in link:
path = urllib.parse.urljoin(uri, a['href']) #href属性のみを取得
print(path)
fileName = path.split('/')[-1]
fileName = fileName.replace("html", "txt")
html2 = urllib.request.urlopen(path) #リストの要素のURLをオープン
soup2 = BeautifulSoup(html2, 'lxml')
dat = soup2.find_all(text=True) #テキストをすべて取得
settext = "\n".join([x for x in dat if x != '\n']) #改行文字のみのリスト項目を削除.リストを結合し,文字列を整形
# スクレイピングしたテキストを書き出す.
# もしその日付のファイルが存在しなければ新規に作成し,
# 既にファイルが存在していれば拡張子に'.tmp'を付加して一時ファイルを作成する.
# もとのtxtファイルとtmpファイルの差分を比較し,更新があればtxtファイルを更新し,Slackにポストする.
if os.path.isfile(fileName):
tmpfileName = fileName + '.tmp'
writeFile(tmpfileName, settext)
if filecmp.cmp(fileName, tmpfileName):
print("no diff")
else:
writeFile(fileName, settext)
slack.post_message_to_channel("class", settext) #Slackにポスト (チャンネル, テキスト)
os.remove(tmpfileName)
else:
#print('write a new file')
slack.post_message_to_channel("class", settext) #Slackにポスト (チャンネル, テキスト)
writeFile(fileName, settext)
| 29.663158
| 220
| 0.625621
| 313
| 2,818
| 5.498403
| 0.460064
| 0.025567
| 0.022661
| 0.034863
| 0.063916
| 0.063916
| 0.063916
| 0.063916
| 0.063916
| 0.063916
| 0
| 0.011236
| 0.242016
| 2,818
| 94
| 221
| 29.978723
| 0.794476
| 0.173172
| 0
| 0.107143
| 0
| 0.017857
| 0.07672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.303571
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05be03857ac9bab749c288e65ba7f0f36541df9b
| 4,561
|
py
|
Python
|
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/gsi_handlers/object_lost_and_found_service_handlers.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\gsi_handlers\object_lost_and_found_service_handlers.py
# Compiled at: 2018-10-26 00:20:22
# Size of source mod 2**32: 4629 bytes
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema
import services
olaf_service_objects_schema = GsiGridSchema(label='Object Lost & Found')
olaf_service_objects_schema.add_field('object', label='Object')
olaf_service_objects_schema.add_field('zone', label='Zone')
olaf_service_objects_schema.add_field('street', label='Street')
olaf_service_objects_schema.add_field('sim', label='Sim')
olaf_service_objects_schema.add_field('household', label='Household')
olaf_service_deleted_clone_schema = GsiGridSchema(label='Object Lost & Found/To Be Deleted')
olaf_service_deleted_clone_schema.add_field('object', label='Object')
olaf_service_deleted_clone_schema.add_field('zone', label='Zone')
olaf_service_deleted_clone_schema.add_field('street', label='Street')
def _olaf_zone_str(zone_id, zone):
if zone:
return '{}:{}'.format(str(zone), zone.lot.get_lot_name())
return str(zone_id)
def _olaf_obj_str(zone, object_id):
obj_str = str(object_id)
if zone is not None:
if zone.is_instantiated:
obj = zone.object_manager.get(object_id)
if obj:
obj_str = str(obj)
return obj_str
@GsiHandler('object_lost_and_found_service_objects', olaf_service_objects_schema)
def generate_object_lost_and_found_service_data(*args, zone_id: int=None, filter=None, **kwargs):
lost_and_found = services.get_object_lost_and_found_service()
zone_manager = services.get_zone_manager()
sim_info_manager = services.sim_info_manager()
household_manager = services.household_manager()
if not (lost_and_found and zone_manager and sim_info_manager and household_manager):
return []
registered_objects = []
for locator in lost_and_found.registered_object_locators:
if zone_id is not None:
if zone_id != locator.zone_id:
continue
zone = zone_manager.get(locator.zone_id)
sim_str = str(locator.sim_id)
sim_info = sim_info_manager.get(locator.sim_id)
if sim_info:
sim_str = '{}:{}'.format(str(sim_info), locator.sim_id)
household_str = str(locator.household_id)
household = household_manager.get(locator.household_id)
if household:
household_str = '{}:{}'.format(household.name, locator.household_id)
registered_objects.append({'object':_olaf_obj_str(zone, locator.object_id),
'zone':_olaf_zone_str(locator.zone_id, zone),
'street':locator.open_street_id,
'sim':sim_str,
'household':household_str})
return registered_objects
@GsiHandler('object_lost_and_found_service_clones', olaf_service_deleted_clone_schema)
def generate_olaf_service_deleted_clone_schema_data(*args, zone_id: int=None, filter=None, **kwargs):
lost_and_found = services.get_object_lost_and_found_service()
zone_manager = services.get_zone_manager()
return lost_and_found and zone_manager or []
clones_to_delete_by_zone = lost_and_found.clones_to_delete_by_zone
clones_to_delete_by_street = lost_and_found.clones_to_delete_by_street
clones_to_delete = []
object_ids = set()
for zone_id, objects in clones_to_delete_by_zone.items():
if zone_id is not None:
if zone_id != zone_id:
continue
zone = zone_manager.get(zone_id)
for object_id in objects:
street_str = 'n/a'
for street_id, objects in clones_to_delete_by_street.items():
if object_id in objects:
street_str = str(street_id)
break
clones_to_delete.append({'object':_olaf_obj_str(zone, object_id),
'zone':_olaf_zone_str(zone_id, zone),
'street':street_str})
object_ids.add(object_id)
if zone_id is None:
for street_id, objects in clones_to_delete_by_street.items():
for object_id in objects:
if object_id in object_ids:
continue
clones_to_delete.append({'object':_olaf_obj_str(services.current_zone(), object_id),
'zone':'n/a',
'street':street_id})
return clones_to_delete
| 44.281553
| 110
| 0.70182
| 642
| 4,561
| 4.601246
| 0.182243
| 0.032498
| 0.05281
| 0.056872
| 0.531483
| 0.455315
| 0.27759
| 0.207177
| 0.128639
| 0.111713
| 0
| 0.018407
| 0.201929
| 4,561
| 103
| 111
| 44.281553
| 0.793132
| 0.073668
| 0
| 0.151163
| 0
| 0
| 0.069685
| 0.017303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.034884
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05bf284e1bf49c109f8df75324eddb8540d17a61
| 685
|
py
|
Python
|
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
testing/test_pendulum.py
|
delock/pytorch-a3c-mujoco
|
82e0c854417ac05e0f414eab1710794d41515591
|
[
"MIT"
] | null | null | null |
#Importing OpenAI gym package and MuJoCo engine
import gym
import numpy as np
import mujoco_py
import matplotlib.pyplot as plt
import env
#Setting MountainCar-v0 as the environment
env = gym.make('InvertedPendulum-down')
#Sets an initial state
env.reset()
print (env.action_space)
# Rendering our instance 300 times
i = 0
while True:
#renders the environment
env.render()
#Takes a random action from its action space
# aka the number of unique actions an agent can perform
action = env.action_space.sample()
ob, reward, done, _ = env.step([-5])
if i == 0:
print (action)
print ("ob = {}, reward = {}, done = {}".format(ob, reward, done))
i += 1
env.close()
| 25.37037
| 70
| 0.706569
| 104
| 685
| 4.615385
| 0.634615
| 0.06875
| 0.075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014388
| 0.188321
| 685
| 26
| 71
| 26.346154
| 0.848921
| 0.382482
| 0
| 0
| 0
| 0
| 0.125
| 0.050481
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.277778
| 0
| 0.277778
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05bf7c9f0303c517554bb2670af4a9a4baf2a54a
| 5,317
|
py
|
Python
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 1
|
2021-06-22T20:54:03.000Z
|
2021-06-22T20:54:03.000Z
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 5
|
2020-04-20T09:31:02.000Z
|
2021-07-10T01:23:36.000Z
|
plots/plot_drift_types.py
|
ChristophRaab/RRSLVQ
|
e265f62e023bd3ca23273b51e06035fd3c0b7c94
|
[
"MIT"
] | 1
|
2020-07-03T04:00:47.000Z
|
2020-07-03T04:00:47.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from scipy.special import logit
import pandas as pd
from matplotlib.axes import Axes, Subplot
from matplotlib.collections import LineCollection
from matplotlib.colors import ListedColormap, BoundaryNorm
SMALL = 14
SIZE = 16
plt.rc('font', size=SIZE) # controls default text sizes
plt.rc('axes', titlesize=SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL) # legend fontsize
plt.rc('figure', titlesize=SIZE) # fontsize of the figure title
plt.rc('lines', lw=4)
def reoccuring_drift(length=50000,width=10,rate=0.1,plot=True,filename="reoccuring_drift.eps"):
length = length / 2
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
part_length = rate*length
for part in range(int(length/part_length)):
t = np.arange(time.size, time.size+part_length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(time.size+part_length-part_length/2)) / float(width))) for i in t])
y = np.array([1 - p for p in x])
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plt.xticks(np.arange(0, 45000, step=10000))
plot_attributes(plt,ax)
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
plt.show() if plot else ""
def incremental_drift(length=50000,width=10000,plot=True,filename="incremental_drift.eps"):
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
t = np.arange(time.size, length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(length/2)) / float(width))) for i in t])
probability_drift = np.append(probability_drift,x)
# probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plot_attributes(plt,ax)
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
plt.show() if plot else ""
def gradual_drift(length=50000,width=10,rate=0.4,plot=True,filename="gradual_drift.eps"):
length = length / 2
probability_drift = np.array([])
time = np.array([])
fig, ax = plt.subplots()
fig.set_size_inches(6.4, 4.8)
part_length = rate*length
for part in range(int(length/part_length)):
t = np.arange(time.size, time.size+part_length, 1)
x = np.array([1.0 / (1.0 + np.exp(-4.0 * float(i - int(time.size+part_length-part_length/2)) / float(width))) for i in t])
y = np.array([1 - p for p in x])
if 0 == part:
probability_drift = np.append(probability_drift,np.zeros(10000))
if int(length/part_length)-1 == part:
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,np.ones(10000))
else:
probability_drift = np.append(probability_drift,x)
probability_drift = np.append(probability_drift,y)
time = np.append(time,t)
probability_drift = (probability_drift-.5)*2
t = np.arange(1, probability_drift.size+1, 1)
signal = probability_drift
pos_signal = signal.copy()
neg_signal = signal.copy()
pos_signal[pos_signal <= 0] = np.nan
neg_signal[neg_signal > 0] = np.nan
ax.plot(pos_signal,label="Concept 2", linestyle='dotted')
ax.plot(neg_signal,label="Concept 1")
plot_attributes(plt,ax)
plt.show() if plot else ""
fig.savefig(filename,dpi=1000, format='eps',bbox_inches='tight')
def plot_attributes(plt,ax):
#plotting
ax.set_xlabel('Timestep')
ax.set_ylabel('Data Mean')
plt.style.use('seaborn-paper')
ax.legend()
plt.yticks([-1,1.0],["Concept 1","Concept 2"],rotation='vertical')
ticks = ax.yaxis.get_majorticklabels()
ticks[0].set_verticalalignment("center")
ticks[1].set_verticalalignment("center")
# ax1 = ax.twinx()
# plt.yticks([-1,0,1],["","",""],rotation='vertical')
#reoccuring_drift(width=600,filename="frequent_reoccuing_drift.eps") # Frequent Reoccurring
#reoccuring_drift(width=1000,rate=0.4) # Reoccurring
#incremental_drift(width=15000) # Incremental
#incremental_drift(width=2500,filename="abrupt_drift.eps") # Abrupt
gradual_drift(length=45000,width=1000,rate=0.3) #Gradual
| 33.024845
| 130
| 0.671995
| 792
| 5,317
| 4.385101
| 0.179293
| 0.15203
| 0.07256
| 0.062194
| 0.663979
| 0.639217
| 0.639217
| 0.60812
| 0.601209
| 0.577023
| 0
| 0.040267
| 0.182622
| 5,317
| 160
| 131
| 33.23125
| 0.758859
| 0.106827
| 0
| 0.64486
| 0
| 0
| 0.055203
| 0.004442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037383
| false
| 0
| 0.065421
| 0
| 0.102804
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05c1f456776569370085a917d41ee8b850f0a3b7
| 15,773
|
py
|
Python
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 2
|
2020-01-14T12:44:11.000Z
|
2021-09-29T16:09:37.000Z
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 1
|
2021-09-11T14:13:57.000Z
|
2021-09-11T14:13:57.000Z
|
simulation/src/utils.py
|
VIDA-NYU/pedestrian-sensing-model
|
e8f0a6d3e47fc2a2577ac502f607568b3b7f2abf
|
[
"MIT"
] | 2
|
2020-07-13T17:08:25.000Z
|
2021-03-31T15:10:58.000Z
|
#!/usr/bin/env python3
import numpy as np
import math
import random
import time
import scipy.misc
import scipy.signal
import multiprocessing
import json
import itertools
import os
import pprint
from collections import namedtuple
from fractions import gcd
from optimized import get_distance
OBSTACLE = -1
MAX = 2147483647 #MAXIMUM INT 32
Graph = namedtuple('Graph', 'adj nodes2d nodesflat indices cachedravel ' \
'mapshape nnodes maplen')
##########################################################
def compute_gcd_intervals(speed1, speed2):
_gcd = gcd(speed1, speed2)
interval2 = int(min(speed1, speed2) / _gcd)
interval1 = int(max(speed1, speed2) / _gcd)
return interval1, interval2
def get_distance_from_npy_idx(npypos1, npypos2, mapshape):
"""Compute manhattan difference tween @pos1 and @pos2.
Args:
pos1(tuple): position 1 in flattened numpy array
pos2(tuple): position 2 in flattened numpy array
Returns:
float: manhattan difference
"""
pos1 = np.array(np.unravel_index(npypos1, mapshape))
pos2 = np.array(np.unravel_index(npypos2, mapshape))
return get_distance(pos1, pos2)
def flatten_indices(indices, mapshape):
return np.ravel_multi_index(np.transpose(indices), mapshape)
def unflatten_indices(indices, mapshape):
out = np.unravel_index(indices, mapshape)
return list(zip(out[0], out[1]))
def parse_image(imagefile, thresh=128):
"""Parse the streets from image and return a numpy ndarray,
with 0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in white representing streets.
Args:
imagefile(str): image path
Returns:
numpy.ndarray: structure of the image
"""
img = scipy.misc.imread(imagefile)
if img.ndim > 2: img = img[:, :, 0]
return (img > thresh).astype(int) - 1
def find_crossings_crossshape(npmap):
"""Convolve with kernel considering input with
0 as streets and OBSTACLE as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[0,1,0], [1, 1, 1], [0, 1, 0]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= OBSTACLE)
return set([ (a,b) for a,b in zip(inds[0], inds[1]) ])
def find_crossings_squareshape(npmap, supressredundant=True):
"""Convolve with kernel considering input with
0 as streets and -1 as non-streets. Assumes a
BW image as input, with pixels in black representing streets.
Args:
npmap(numpy.ndarray): ndarray with two dimensions composed of -1 (obstacles)
and 0 (travesable paths)
Returns:
list: set of indices that contains the nodes
"""
ker = np.array([[1,1], [1, 1]])
convolved = scipy.signal.convolve2d(npmap, ker, mode='same',
boundary='fill', fillvalue=OBSTACLE)
inds = np.where(convolved >= 0)
crossings = np.array([ np.array([a,b]) for a,b in zip(inds[0], inds[1]) ])
if supressredundant:
return filter_by_distance(crossings)
else: return crossings
def filter_by_distance(points, mindist=4):
"""Evaluate the distance between each pair os points in @points
and return just the ones with distance gt @mindist
Args:
points(set of tuples): set of positions
mindist(int): minimum distance
Returns:
set: set of points with a minimum distance between each other
"""
cr = list(points)
npoints = len(points)
valid = np.full(npoints, np.True_)
for i in range(npoints):
if not valid[i]: continue
for j in range(i + 1, npoints):
dist = get_distance(cr[i], cr[j])
if dist < mindist: valid[j] = np.False_
return points[valid]
def get_adjacency_dummy(nodes, npmap):
return set([ (a,b) for a,b in zip(ind[0], ind[1]) ])
##########################################################
def compute_heuristics(nodes, goal):
"""Compute heuristics based on the adjcency matrix provided and on the goal. If the guy is in the adjmatrix, then it is not an obstacle.
IMPORTANT: We assume that there is just one connected component.
Args:
adjmatrix(dict of list of neighbours): posiitons as keys and neighbours as values
goal(tuple): goal position
Returns:
dict of heuristics: heuristic for each position
"""
subt = np.subtract
abso = np.absolute
return {v: np.sum(abso(subt(v, goal))) for v in nodes}
##########################################################
def compute_heuristics_from_map(searchmap, goal):
s = searchmap
gy, gx = goal
height, width = s.shape
h = {}
for j in range(height):
disty = math.fabs(j-gy)
for i in range(width):
v = s[j][i]
if v == OBSTACLE:
h[(j, i)] = MAX
else:
distx = math.fabs(j-gx)
h[(j, i)] = distx + disty + v
return h
##########################################################
def get_adjmatrix_from_npy(_map):
"""Easiest approach, considering 1 for each neighbour.
"""
connectivity = 8
h, w = _map.shape
nodes = np.empty((1, 0), dtype=int)
adj = np.empty((0, 10), dtype=int)
for j in range(0, h):
for i in range(0, w):
if _map[j, i] == OBSTACLE: continue
nodes = np.append(nodes, np.ravel_multi_index((j, i), _map.shape))
ns1, ns2 = get_neighbours_coords((j, i), _map.shape)
neigh[0] = -1
acc = 1
neigh = np.full(connectivity, -1)
for jj, ii in ns1:
if _map[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _map.shape)
acc += 1
neigh[acc] = -1.4142135623730951 #sqrt(2)
acc += 1
adj = np.append(adj, np.reshape(neigh, (1, 10)), axis=0)
return nodes, adj
##########################################################
def get_full_adjmatrix_from_npy(_mapmatrix):
"""Create a graph structure of a 2d matrix with two possible values: OBSTACLE
or 0. It returns a big structure in different formats to suit every need
Returns:
Structure with attributes
adj(maplen, 10) - stores the neighbours of each npy coordinate
nodes2d(nnodes, 2) - sparse list of nodes in 2d
nodesflat(nnodes) - sparse list of nodes in npy
indices(maplen) - dense list of points in sparse indexing
cachedravel(mapshape) - cached ravel of points to be used
mapshape(2) - height and width
nnodes(1) - number of traversable nodes
"""
h, w = _mapmatrix.shape
maplen = np.product(_mapmatrix.shape)
adj = np.full((np.product(_mapmatrix.shape), 10), -1, dtype=int)
nodes2d = np.full((maplen, 2), -1, dtype=int)
nodesflat = np.empty((0, 1), dtype=int)
indices = np.full(maplen, -1, dtype=int)
cachedravel = np.full(_mapmatrix.shape, -1)
nodesidx = 0
#TODO: convert everything to numpy indexing
for j in range(h):
for i in range(w):
if _mapmatrix[j, i] == OBSTACLE: continue
npyidx = np.ravel_multi_index((j, i), _mapmatrix.shape)
indices[npyidx] = nodesidx
nodes2d[nodesidx] = np.array([j, i])
ns1, ns2 = get_neighbours_coords((j, i), _mapmatrix.shape)
neigh = np.full(10, -MAX)
neigh[0] = -1
acc = 1
cachedravel[j, i] = npyidx
for jj, ii in ns1:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
neigh[acc] = -2 #sqrt(2)
acc += 1
for jj, ii in ns2:
if _mapmatrix[jj, ii] != OBSTACLE:
neigh[acc] = np.ravel_multi_index((jj, ii), _mapmatrix.shape)
acc += 1
adj[npyidx] = np.reshape(neigh, (1, 10))
nodesidx += 1
nodes2d = nodes2d[:nodesidx]
nodesflat = np.array([ np.ravel_multi_index((xx, yy),_mapmatrix.shape) for xx, yy in nodes2d])
return Graph(adj=adj, nodes2d=nodes2d, nodesflat=nodesflat,
indices=indices, cachedravel=cachedravel,
mapshape=_mapmatrix.shape, nnodes=len(nodesflat),
maplen=np.product(_mapmatrix.shape))
##########################################################
def get_neighbours_coords(pos, mapshape):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
j(int): y coordinate
i(int): x coordinate
connectedness(int): how consider the neighbourhood, 4 or 8
yourself(bool): the point itself is included in the return
The order returned is:
5 1 6
4 9 2
8 3 7
"""
j, i = pos
neighbours1 = [ (j-1, i), (j, i+1), (j+1, i), (j, i-1) ]
neighbours2 = [(j-1, i-1), (j-1, i+1), (j+1, i+1), (j+1, i-1) ]
n1 = eliminate_nonvalid_coords(neighbours1, mapshape)
n2 = eliminate_nonvalid_coords(neighbours2, mapshape)
return (n1, n2)
#########################################################
def get_neighbours_coords_npy_indices(idx, mapshape, connectedness=8,
yourself=False):
""" Get neighbours. Do _not_ verify whether it is a valid coordinate
Args:
idx(int): npy indexing of a matrix
connectedness(int): how consider the neighbourhood, 8 or 4
yourself(bool): the point itself is included in the return
The order returned is:
c5 c1 c6
c4 c9 c2
c8 c3 c7
"""
nrows, ncols = mapshape
maplen = np.product(mapshape)
c1 = idx - ncols
c2 = idx + 1
c3 = idx + ncols
c4 = idx - 1
neighbours = []
if c1 >= 0 : neighbours.append(c1)
if c2 < maplen: neighbours.append(c2)
if c3 < maplen: neighbours.append(c3)
if c4 >= 0 : neighbours.append(c4)
if connectedness == 8:
c5 = c1 - 1
c6 = c1 + 1
c7 = c3 + 1
c8 = c3 - 1
if c5 >= 0:
neighbours.append(c5)
neighbours.append(c6)
if c7 < maplen:
neighbours.append(c7)
neighbours.append(c8)
if yourself: neighbours.append(idx)
return neighbours
##########################################################
def eliminate_nonvalid_coords(coords, mapshape):
""" Eliminate nonvalid indices
Args:
coords(set of tuples): input set of positions
h(int): height
w(int): width
Returns:
set of valid coordinates
"""
h, w = mapshape
valid = []
for j, i in coords:
if j < 0 or j >= h: continue
if i < 0 or i >= w: continue
valid.append((j, i))
return valid
##########################################################
def get_adjmatrix_from_image(image):
"""Get the adjacenty matrix from image
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(image)
return get_full_adjmatrix_from_npy(searchmap)
##########################################################
def get_crossings_from_image(imagefile):
"""Get crossings from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
return find_crossings_squareshape(searchmap)
##########################################################
def get_obstacles_from_image(imagefile):
"""Get obstacles from image file
Args:
searchmap(np.ndarray): our structure of searchmap
Returns:
set of tuples: set of the crossing positions
"""
searchmap = parse_image(imagefile)
indices = np.where(searchmap==OBSTACLE)
return set(map(tuple, np.transpose(indices)))
##########################################################
def get_mapshape_from_searchmap(hashtable):
"""Suppose keys have the form (x, y). We want max(x), max(y)
such that not necessarily the key (max(x), max(y)) exists
Args:
hashtable(dict): key-value pairs
Returns:
int, int: max values for the keys
"""
ks = hashtable.keys()
h = max([y[0] for y in ks])
w = max([x[1] for x in ks])
return h+1, w+1
##########################################################
def get_random_els_with_reposition(inputlist, rng, n=1, avoided=[]):
if not avoided: return [rng.choice(inputlist) for _ in range(n)]
_list = list(inputlist)
nfree = len(_list)
els = [] # we accept repetitions
while len(els) < n:
rndidx = rng.randrange(0, nfree)
chosen = _list[rndidx]
if chosen != avoided: els.append(chosen)
return els
##########################################################
def get_multiprocessing_logger(loglevel):
log = multiprocessing.log_to_stderr()
log.setLevel(loglevel)
return log
##########################################################
def split_all_combinations_from_config(configfile, tmpdir, prefix=''):
with open(configfile) as fh:
config = json.load(fh)
configcopy = []
_keys = []
_values = []
for k, v in config.items():
if type(v) == list:
_keys.append(k)
_values.append(v)
comb = itertools.product(*_values)
f = os.path.basename(configfile)
for c in comb:
filename = os.path.join(tmpdir, prefix + '_' + (str(c))[1:-1].replace(', ', '-') + '_' + f)
newconfig = config.copy()
for i in range(len(c)):
newconfig[_keys[i]] = [c[i]]
with open(filename, 'w') as fh:
json.dump(newconfig, fh)
##########################################################
def copy_list_to_boolsparseindexing(_list, sparseindex):
boolsparseidx = np.full(sparseindex.shape, np.False_, dtype=np.bool_)
for el in _list:
boolsparseidx[el] = True
return boolsparseidx
##########################################################
def copy_list_to_boolindexing(_list, maplen):
boolidx = np.full(maplen, 0, dtype=np.int64)
boolidx[_list] = 1
return boolidx
##########################################################
def rename_old_folder(filesdir):
# Unfortunately, it cannot be called from numpy due to the cython file dependency
# Just create a .py file calling utils.rename_old_folder()
if not os.path.exists(filesdir):
print('Dir {} does not exist'.format(filesdir))
return
os.chdir(filesdir)
newnames = {
'fleetsz':'sensorsnum',
'rad': 'sensorrange',
'splng': 'sensorinterval',
'detprob': 'sensortpr',
'speed': 'sensorspeed'
}
def get_new_set_of_names(params):
newparams = []
for param in params:
p = param
for k, v in newnames.items():
if k in p:
p = p.replace(k, v)
newparams.append(p)
return newparams
for f in os.listdir('./'):
if not f.endswith('.npy'): continue
print(f)
suff = f.split('.npy')[0]
params = suff.split('_')
newparams = get_new_set_of_names(params)
beg = '_'.join(newparams[:5])
beg = beg.replace('sensortpr1', 'sensortpr1.0')
en = '_'.join(newparams[5:])
newname = '{}_sensorexfp0.0_{}.npy'.format(beg, en)
print(newname)
os.rename(f, newname)
| 30.216475
| 140
| 0.573131
| 1,984
| 15,773
| 4.470262
| 0.21119
| 0.00902
| 0.009471
| 0.013418
| 0.252227
| 0.216597
| 0.19337
| 0.187282
| 0.187282
| 0.183899
| 0
| 0.022758
| 0.258987
| 15,773
| 521
| 141
| 30.274472
| 0.736054
| 0.246878
| 0
| 0.077206
| 0
| 0
| 0.024402
| 0.002227
| 0
| 0
| 0
| 0.001919
| 0
| 1
| 0.099265
| false
| 0
| 0.051471
| 0.007353
| 0.246324
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05c354eab5a376b1dcdf00dc912ca4e24bdc43ea
| 2,438
|
py
|
Python
|
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | null | null | null |
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | 5
|
2020-09-06T15:44:13.000Z
|
2020-11-02T11:30:22.000Z
|
luxor/controllers/types.py
|
sam007961/luxor
|
31838c937b61bfbc400103d58ec5b5070471767e
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import Union
from luxor.core.events import Event
from luxor.controllers.expressions import Var
class Int(Var):
def __init__(self, value: Number = 0, **kwargs) -> None:
super(Int, self).__init__(**kwargs)
self.event_prefix = self.name + '.int.'
self.__obj = self.ctx.request_object()
self.__obj['class'] = frozenset({'int', self.callstack + '.int'})
self.__obj['label'] = self.name
self.sset(value)
self.trigger('new', value)
def sget(self) -> int:
return self.__obj.sget('value')
def sset(self, value: Number) -> (int, int):
if type(value) == Int:
new = value.get()
else:
new = int(value)
old = self.sget()
self.__obj['value'] = new
return old, new
def get(self) -> int:
value = self.__obj['value']
self.trigger('get', value)
return value
def set(self, value: Number) -> None:
old, new = self.sset(value)
if type(value) == float:
self.trigger('cast_literal', value, new)
self.trigger('set', old, new)
@property
def value(self) -> int:
pass
@value.getter
def value(self) -> int:
return self.get()
@value.setter
def value(self, value: Number) -> None:
self.set(value)
def trigger_new(self, value) -> None:
return Event(self.event_prefix + 'new',
source=self.__obj, meta={
'new.value': value
})
def trigger_get(self, value) -> Event:
return Event(self.event_prefix + 'get',
source=self.__obj, meta={
'get.value': value
})
def trigger_set(self, old: int, new: int) -> None:
return Event(self.event_prefix + 'set',
source=self.__obj, meta={
'set.value.old': old,
'set.value.new': new
})
def trigger_cast_literal(self, old: float, new: int) -> None:
return Event(self.event_prefix + 'literal.cast',
source=self.__obj, meta={
'cast.value.type': type(old),
'cast.value.old': old,
'cast.value.new': new
})
Number = Union[int, float, Int]
| 30.098765
| 73
| 0.511895
| 275
| 2,438
| 4.378182
| 0.192727
| 0.05814
| 0.062292
| 0.066445
| 0.106312
| 0.084718
| 0.059801
| 0.059801
| 0
| 0
| 0
| 0.000637
| 0.35644
| 2,438
| 80
| 74
| 30.475
| 0.76673
| 0
| 0
| 0.153846
| 0
| 0
| 0.068089
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184615
| false
| 0.015385
| 0.061538
| 0.092308
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05c66e3dcdf2a391e7cb2ae90afaebe8a08c59e9
| 3,483
|
py
|
Python
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 80
|
2018-05-25T00:37:25.000Z
|
2022-03-13T12:31:02.000Z
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 1
|
2021-01-08T20:22:52.000Z
|
2021-01-08T20:22:52.000Z
|
skeletons/browser/browser.py
|
gbkim000/wxPython
|
b1604d71cf04801f9efa8b26b935561a88ef1daa
|
[
"BSD-2-Clause"
] | 32
|
2018-05-24T05:40:55.000Z
|
2022-03-24T00:32:11.000Z
|
#!/usr/bin/python
"""
ZetCode wxPython tutorial
This program creates a browser UI.
author: Jan Bodnar
website: zetcode.com
last edited: May 2018
"""
import wx
from wx.lib.buttons import GenBitmapTextButton
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
self.CreateMenuBar()
panel = wx.Panel(self)
# panel.SetBackgroundColour('white')
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
line1 = wx.StaticLine(panel)
vbox.Add(line1, 0, wx.EXPAND)
toolbar1 = wx.Panel(panel, size=(-1, 30))
back = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/back.png'),
style=wx.NO_BORDER)
forward = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/forw.png'),
style=wx.NO_BORDER)
refresh = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/refresh.png'),
style=wx.NO_BORDER)
stop = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/stop.png'),
style=wx.NO_BORDER)
home = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/home.png'),
style=wx.NO_BORDER)
address = wx.ComboBox(toolbar1, size=(50, -1))
go = wx.BitmapButton(toolbar1, bitmap=wx.Bitmap('images/play.png'),
style=wx.NO_BORDER)
text = wx.TextCtrl(toolbar1, size=(150, -1))
hbox1.Add(back)
hbox1.Add(forward)
hbox1.Add(refresh)
hbox1.Add(stop)
hbox1.Add(home)
hbox1.Add(address, 1, wx.TOP, 3)
hbox1.Add(go, 0, wx.TOP | wx.LEFT, 3)
hbox1.Add(text, 0, wx.TOP | wx.RIGHT, 3)
toolbar1.SetSizer(hbox1)
vbox.Add(toolbar1, 0, wx.EXPAND)
line = wx.StaticLine(panel)
vbox.Add(line, 0, wx.EXPAND)
toolbar2 = wx.Panel(panel, size=(-1, 30))
bookmark1 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/love.png'),
style=wx.NO_BORDER)
bookmark2 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/book.png'),
style=wx.NO_BORDER)
bookmark3 = wx.BitmapButton(toolbar2, bitmap=wx.Bitmap('images/sound.png'),
style=wx.NO_BORDER)
hbox2.Add(bookmark1, flag=wx.RIGHT, border=5)
hbox2.Add(bookmark2, flag=wx.RIGHT, border=5)
hbox2.Add(bookmark3)
toolbar2.SetSizer(hbox2)
vbox.Add(toolbar2, 0, wx.EXPAND)
line2 = wx.StaticLine(panel)
vbox.Add(line2, 0, wx.EXPAND)
panel.SetSizer(vbox)
self.CreateStatusBar()
self.SetTitle("Browser")
self.Centre()
def CreateMenuBar(self):
menubar = wx.MenuBar()
file = wx.Menu()
file.Append(wx.ID_ANY, '&Quit', '')
edit = wx.Menu()
view = wx.Menu()
go = wx.Menu()
bookmarks = wx.Menu()
tools = wx.Menu()
help = wx.Menu()
menubar.Append(file, '&File')
menubar.Append(edit, '&Edit')
menubar.Append(view, '&View')
menubar.Append(go, '&Go')
menubar.Append(bookmarks, '&Bookmarks')
menubar.Append(tools, '&Tools')
menubar.Append(help, '&Help')
self.SetMenuBar(menubar)
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main()
| 27.642857
| 83
| 0.584266
| 425
| 3,483
| 4.727059
| 0.275294
| 0.062718
| 0.062718
| 0.089597
| 0.349428
| 0.232952
| 0.214037
| 0
| 0
| 0
| 0
| 0.028402
| 0.272179
| 3,483
| 125
| 84
| 27.864
| 0.764103
| 0.050531
| 0
| 0.107143
| 0
| 0
| 0.060036
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.02381
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05c7ce421e8fdf3698aad581723528f431eaafbe
| 1,673
|
py
|
Python
|
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | 1
|
2022-03-02T02:52:24.000Z
|
2022-03-02T02:52:24.000Z
|
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | null | null | null |
model/tds_block.py
|
SABER-labs/SABERv2
|
028d403beadec3adebd51582fd8ef896a2fe3696
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class TDSBlock(nn.Module):
def __init__(self, channels, kernel_size, width, dropout, right_padding):
super().__init__()
self.channels = channels
self.width = width
assert(right_padding >= 0)
self.conv_block = nn.Sequential(
nn.ConstantPad2d(
(kernel_size - 1 - right_padding, right_padding, 0, 0), 0),
nn.Conv2d(
channels, channels, (1, kernel_size), 1, (0, 0)),
nn.ReLU(inplace=True),
nn.Dropout(dropout)
)
linear_dim = channels * width
self.linear_block = nn.Sequential(
nn.Linear(linear_dim, linear_dim),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(linear_dim, linear_dim),
nn.Dropout(dropout)
)
self.conv_layerN = nn.LayerNorm([channels, width])
self.linear_layerN = nn.LayerNorm([channels, width])
def forward(self, x):
# X is B, C, W, T
out = self.conv_block(x) + x
out = out.permute(0, 3, 1, 2) # B, T, C, W
out = self.conv_layerN(out)
B, T, C, W = out.shape
out = out.view((B, T, 1, C * W))
out = self.linear_block(out) + out
out = out.view(B, T, C, W)
out = self.linear_layerN(out)
out = out.permute(0, 2, 3, 1) # B, C, W, T
return out
if __name__ == "__main__":
model = TDSBlock(15, 10, 80, 0.1, 1)
x = torch.rand(8, 15, 80, 400)
import time
start = time.perf_counter()
model(x)
end = time.perf_counter()
print(f"Time taken: {(end-start)*1000:.3f}ms")
| 28.355932
| 77
| 0.545129
| 228
| 1,673
| 3.833333
| 0.298246
| 0.048055
| 0.022883
| 0.01373
| 0.28833
| 0.162471
| 0.1373
| 0
| 0
| 0
| 0
| 0.037004
| 0.321578
| 1,673
| 58
| 78
| 28.844828
| 0.73304
| 0.022116
| 0
| 0.133333
| 0
| 0
| 0.026961
| 0.014706
| 0
| 0
| 0
| 0
| 0.022222
| 1
| 0.044444
| false
| 0
| 0.066667
| 0
| 0.155556
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05c8724a622688c0f5c093058bd7213a2efddffc
| 1,968
|
py
|
Python
|
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
blackcompany/serve/vcs.py
|
clckwrkbdgr/blackcompany
|
9164a0db3e9f11878ce12da6ebdf82a300e1c6f4
|
[
"WTFPL"
] | null | null | null |
from ._base import Endpoint
from ..util._six import Path
import bottle
from ..util import gitHttpBackend
class GitHTTPBackend:
""" WSGI git-http-backend interface to actual endpoints.
"""
def __init__(self, route, repo_root):
self.route = route
self.repo_root = Path(repo_root)
def get(self, path):
return self._serve(path)
def post(self, path):
return self._serve(path)
def _serve(self, path):
git_project_root = self.repo_root
git_dir = git_project_root/'.git'
if not git_dir.exists() and (git_project_root/'HEAD').exists():
git_dir = git_project_root
git_info = git_dir/'info'
if path == 'sparse-checkout' or (git_info/path).exists():
return bottle.static_file(path, root=str(git_info))
webroot = self.route
environ = dict(bottle.request.environ)
environ['PATH_INFO'] = environ['PATH_INFO'][len(webroot):]
status_line, headers, response_body_generator = gitHttpBackend.wsgi_to_git_http_backend(environ, str(git_project_root))
response = bottle.Response(response_body_generator, status_line, headers)
bottle.response.content_type = response.get_header('Content-Type')
return response
class MethodHandler:
def __init__(self, handler_func, path_param):
self.handler_func = handler_func
self.path_param = path_param
def __call__(self, route, _data, path, **kwargs):
return self.handler_func(path, **kwargs)
def git_repo(route, repo_root, **serve_params):
""" Defines Git repo endpoint on given route with given root.
Endpoint() objects will be created for GET and POST.
Rest of parameters will be passed through to underlying Endpoint() objects.
"""
backend = GitHTTPBackend(route, repo_root)
get_endpoint = Endpoint(route, None, method='GET', custom_handler=MethodHandler(backend.get, 'path:path'), **serve_params)
get_endpoint.serve()
post_endpoint = Endpoint(route, None, method='POST', custom_handler=MethodHandler(backend.post, 'path:path'), read_data=False, **serve_params)
post_endpoint.serve()
| 37.846154
| 143
| 0.758638
| 280
| 1,968
| 5.067857
| 0.3
| 0.033827
| 0.049331
| 0.02537
| 0.118393
| 0.0747
| 0.042283
| 0
| 0
| 0
| 0
| 0
| 0.124492
| 1,968
| 51
| 144
| 38.588235
| 0.823564
| 0.126524
| 0
| 0.051282
| 0
| 0
| 0.047981
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179487
| false
| 0
| 0.102564
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05cc0547376efd7b3d0398149b11f68433ccaf60
| 2,999
|
py
|
Python
|
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
imaginaire/discriminators/cagan.py
|
zebincai/imaginaire
|
f5a707f449d93c33fbfe19bcd975a476f2c1dd7a
|
[
"RSA-MD"
] | null | null | null |
# Copyright (C) 2020 NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, check out LICENSE.md
import torch
import torch.nn as nn
from imaginaire.layers import Conv2dBlock
from imaginaire.layers.misc import ApplyNoise
class Discriminator(nn.Module):
"""Dummy Discriminator constructor.
Args:
dis_cfg (obj): Discriminator definition part of the yaml config file.
data_cfg (obj): Data definition part of the yaml config file
"""
def __init__(self, gen_cfg, data_cfg):
super(Discriminator, self).__init__()
nonlinearity = gen_cfg.nonlinearity
# input downsample
self.downsample1 = nn.Upsample(scale_factor=0.5, mode='bilinear')
self.downsample2 = nn.Upsample(scale_factor=0.25, mode='bilinear')
self.downsample3 = nn.Upsample(scale_factor=0.125, mode='bilinear')
self.downsample4 = nn.Upsample(scale_factor=0.0625, mode='bilinear')
conv_params = dict(kernel_size=3,
padding=1,
activation_norm_type="instance",
nonlinearity=nonlinearity,
inplace_nonlinearity=True)
# encoder
self.apply_noise = ApplyNoise()
self.layer1 = Conv2dBlock(in_channels=6, out_channels=64, kernel_size=3, padding=1, stride=2,
nonlinearity=nonlinearity, inplace_nonlinearity=True)
self.layer2 = Conv2dBlock(in_channels=64 + 6, out_channels=128, stride=2, **conv_params)
self.layer3 = Conv2dBlock(in_channels=128 + 6, out_channels=256, stride=2, **conv_params)
self.layer4 = Conv2dBlock(in_channels=256 + 6, out_channels=512, stride=2, **conv_params)
self.outlayer = Conv2dBlock(in_channels=512 + 6, out_channels=1, kernel_size=3,
nonlinearity="sigmoid")
# self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.apply_noise(x)
x_d02 = self.downsample1(x)
x_d04 = self.downsample2(x)
x_d08 = self.downsample3(x)
x_d16 = self.downsample4(x)
# encoder
x_en2 = self.layer1(x)
x_en2 = torch.cat([x_en2, x_d02], dim=1)
x_en4 = self.layer2(x_en2)
x_en4 = torch.cat([x_en4, x_d04], dim=1)
x_en8 = self.layer3(x_en4)
x_en8 = torch.cat([x_en8, x_d08], dim=1)
x_en16 = self.layer4(x_en8)
x_en16 = torch.cat([x_en16, x_d16], dim=1)
out = self.outlayer(x_en16)
# out = self.sigmoid(out)
return out
if __name__ == "__main__":
from imaginaire.config import Config
cfg = Config("D:/workspace/develop/imaginaire/configs/projects/cagan/LipMPV/base.yaml")
dis = Discriminator(cfg.dis, cfg.data)
batch = torch.randn((8, 6, 256, 192))
y = dis(batch)
print(y.shape)
| 40.527027
| 102
| 0.617206
| 383
| 2,999
| 4.644909
| 0.357702
| 0.006745
| 0.059022
| 0.047218
| 0.196178
| 0.037099
| 0.037099
| 0
| 0
| 0
| 0
| 0.057683
| 0.277426
| 2,999
| 73
| 103
| 41.082192
| 0.763267
| 0.14905
| 0
| 0
| 0
| 0
| 0.051534
| 0.029039
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.104167
| 0
| 0.1875
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05cc10143e791bcc38db23bf914cc748df6a3237
| 2,959
|
py
|
Python
|
Chapter10/Ch10/server/database.py
|
henrryyanez/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 127
|
2018-08-27T16:34:43.000Z
|
2022-03-22T19:20:53.000Z
|
Chapter10/Ch10/server/database.py
|
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 8
|
2019-04-11T06:47:36.000Z
|
2022-03-11T23:23:42.000Z
|
Chapter10/Ch10/server/database.py
|
PiotrAdaszewski/Tkinter-GUI-Programming-by-Example
|
c8a326d6034b5e54f77605a8ec840cb8fac89412
|
[
"MIT"
] | 85
|
2018-04-30T19:42:21.000Z
|
2022-03-30T01:22:54.000Z
|
import sqlite3
class Database:
def __init__(self):
self.database = "chat.db"
def perform_insert(self, sql, params):
conn = sqlite3.connect(self.database)
cursor = conn.cursor()
cursor.execute(sql, params)
conn.commit()
conn.close()
def perform_select(self, sql, params):
conn = sqlite3.connect(self.database)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
cursor.execute(sql, params)
results = [dict(row) for row in cursor.fetchall()]
conn.close()
return results
def add_user(self, username, real_name):
sql = "INSERT INTO users (username, real_name) VALUES (?,?)"
query_params = (username, real_name)
self.perform_insert(sql, query_params)
def get_all_users(self):
sql = "SELECT username, real_name, avatar FROM users"
params = []
return self.perform_select(sql, params)
def user_exists(self, username):
sql = "SELECT username FROM users WHERE username = ?"
params = (username,)
results = self.perform_select(sql, params)
if len(results):
return True
return False
def update_avatar(self, username, img_b64):
sql = "UPDATE users SET avatar=? WHERE username=?"
params = (img_b64, username)
return self.perform_insert(sql, params)
def get_user_avatar(self, username):
sql = "SELECT avatar FROM users WHERE username=?"
params = (username,)
return self.perform_select(sql, params)
def add_friend(self, user_one, user_two):
sql = "INSERT INTO friends (user_one, user_two, blocked) VALUES (?,?,0)"
query_params = (user_one, user_two)
self.perform_insert(sql, query_params)
def get_friends(self, username):
all_friends = []
sql = "SELECT user_two FROM friends WHERE user_one=? AND blocked=0"
params = (username,)
friends = self.perform_select(sql, params)
sql = "SELECT user_one FROM friends WHERE user_two=? AND blocked=0"
friends2 = self.perform_select(sql, params)
for friend in friends:
all_friends.append(friend["user_two"])
for friend in friends2:
all_friends.append(friend["user_one"])
return all_friends
def get_users_by_usernames(self, usernames):
question_marks = ','.join(['?' for user in usernames])
sql = f"SELECT * FROM users WHERE username IN ({question_marks})"
params = [user for user in usernames]
friends = self.perform_select(sql, params)
return friends
def block_friend(self, username, contact_to_block):
sql = "UPDATE friends SET blocked=1 WHERE (user_one = ? AND user_two = ?) OR (user_two = ? AND user_one = ?)"
query_params = (username, contact_to_block, username, contact_to_block)
self.perform_insert(sql, query_params)
| 30.822917
| 117
| 0.630618
| 365
| 2,959
| 4.920548
| 0.194521
| 0.055122
| 0.056793
| 0.066815
| 0.322383
| 0.264477
| 0.170379
| 0.089087
| 0
| 0
| 0
| 0.006464
| 0.267996
| 2,959
| 95
| 118
| 31.147368
| 0.822715
| 0
| 0
| 0.268657
| 0
| 0.014925
| 0.199054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179104
| false
| 0
| 0.014925
| 0
| 0.328358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05cea8e33b54e9775229454c04e0071781d3127e
| 938
|
py
|
Python
|
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | 2
|
2021-03-06T16:07:30.000Z
|
2021-03-17T10:27:25.000Z
|
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | null | null | null |
ad_hoc_scripts/update_by_condition.py
|
IgorZyktin/MediaStorageSystem
|
df8d260581cb806eb54f320d63aa674c6175c17e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Non user friendly script.
"""
from mss.core.class_filesystem import Filesystem
def update_by_condition(root_path: str, theme: str):
"""Change records by condition."""
fs = Filesystem()
path = fs.join(root_path, theme, 'metainfo')
for folder, filename, name, ext in fs.iter_ext(path):
modified = False
if ext != '.json':
continue
full_path = fs.join(folder, filename)
content = fs.read_json(full_path)
for uuid, record in content.items():
if record['group_name'] == 'grand mal 1 rus':
record['sub_series'] = 'grand mal 1 rus'
modified = True
if modified:
fs.write_json(full_path, content)
print(f'Modified: {full_path}')
if __name__ == '__main__':
update_by_condition(
root_path='D:\\BGC_ARCHIVE_TARGET\\',
theme='bubblegum_crisis',
)
| 26.055556
| 57
| 0.590618
| 115
| 938
| 4.573913
| 0.53913
| 0.060837
| 0.064639
| 0.079848
| 0.095057
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004458
| 0.282516
| 938
| 35
| 58
| 26.8
| 0.777117
| 0.08209
| 0
| 0
| 0
| 0
| 0.155477
| 0.028269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.045455
| 0
| 0.090909
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05cf590b42b6da085a51776ee9e5aa949a057c25
| 2,555
|
py
|
Python
|
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | null | null | null |
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 11
|
2020-01-28T22:33:49.000Z
|
2022-03-11T23:41:08.000Z
|
2.ReinforcementLearning/RL_Book/1-gridworld/environment_value_iteration.py
|
link-kut/deeplink_public
|
688c379bfeb63156e865d78d0428f97d7d203cc1
|
[
"MIT"
] | 2
|
2019-06-01T04:14:52.000Z
|
2020-05-31T08:13:23.000Z
|
from environment import *
import random
class ValueIterationGraphicDisplay(GraphicDisplay):
def __init__(self, agent, title):
self.btn_1_text = "Calculate"
self.btn_2_text = "Print Policy"
self.btn_1_func = self.calculate_value
self.btn_2_func = self.print_optimal_policy
self.btn_3_func = self.move_by_value_iteration
GraphicDisplay.__init__(self, agent, title)
def move_by_value_iteration(self):
if self.improvement_count != 0 and self.is_moving != 1:
self.is_moving = 1
x, y = self.canvas.coords(self.rectangle)
self.canvas.move(self.rectangle, UNIT / 2 - x, UNIT / 2 - y)
x, y = self.find_rectangle()
while len(self.agent.get_action([x, y])) != 0:
action = random.sample(self.agent.get_action([x, y]), 1)[0]
self.after(100, self.rectangle_move(action))
x, y = self.find_rectangle()
self.is_moving = 0
def draw_one_arrow(self, col, row, action):
if col == 2 and row == 2:
return
if action == 0: # up
origin_x, origin_y = 50 + (UNIT * row), 10 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.up))
elif action == 1: # down
origin_x, origin_y = 50 + (UNIT * row), 90 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.down))
elif action == 3: # right
origin_x, origin_y = 90 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.right))
elif action == 2: # left
origin_x, origin_y = 10 + (UNIT * row), 50 + (UNIT * col)
self.arrows.append(self.canvas.create_image(origin_x, origin_y, image=self.left))
def draw_from_values(self, state, action_list):
i = state[0]
j = state[1]
for action in action_list:
self.draw_one_arrow(i, j, action)
def calculate_value(self):
self.iter_count += 1
for i in self.texts:
self.canvas.delete(i)
self.agent.value_iteration()
self.print_value_table(self.agent.value_table)
def print_optimal_policy(self):
self.improvement_count += 1
for i in self.arrows:
self.canvas.delete(i)
for state in self.env.all_states:
action = self.agent.get_action(state)
self.draw_from_values(state, action)
| 39.307692
| 94
| 0.600391
| 351
| 2,555
| 4.150997
| 0.22792
| 0.054907
| 0.07138
| 0.07687
| 0.303363
| 0.277282
| 0.227865
| 0.196294
| 0.196294
| 0.196294
| 0
| 0.023613
| 0.28728
| 2,555
| 65
| 95
| 39.307692
| 0.776496
| 0.007045
| 0
| 0.074074
| 0
| 0
| 0.008291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.037037
| 0
| 0.185185
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05cff405e8dd7ef93166ffc63471b8011294be84
| 8,289
|
py
|
Python
|
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
csimpy/test.py
|
dewancse/csimpy
|
58c32e40e5d991b4ca98df05e6f61020def475a9
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
from math import *
from scipy import integrate
import matplotlib.pyplot as plt
from libcellml import *
import lxml.etree as ET
__version__ = "0.1.0"
LIBCELLML_VERSION = "0.2.0"
STATE_COUNT = 1
VARIABLE_COUNT = 29
class VariableType(Enum):
CONSTANT = 1
COMPUTED_CONSTANT = 2
ALGEBRAIC = 3
VOI_INFO = {"name": "time", "units": "second", "component": "environment"}
STATE_INFO = [
{"name": "pH_ext", "units": "dimensionless", "component": "Concentrations"}
]
VARIABLE_INFO = [
{"name": "C_ext_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_ext_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_H", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_NH4", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "C_int_Na", "units": "mM", "component": "Concentrations", "type": VariableType.CONSTANT},
{"name": "K_NHE3_H", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_NH4", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "K_NHE3_Na", "units": "mM", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "XTxP0_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3_Parameters", "type": VariableType.CONSTANT},
{"name": "C_ext_H", "units": "mM", "component": "Concentrations", "type": VariableType.ALGEBRAIC},
{"name": "alpha_ext_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_ext_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "gamma_ext_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "alpha_int_Na", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "beta_int_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "gamma_int_NH4", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "XTxP_NHE_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "sum_NHE3", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_H", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_NH4", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "J_NHE3_Na_Max", "units": "nmol_per_s_per_cm2", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT},
{"name": "plot_a", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "plot_b", "units": "dimensionless", "component": "NHE3", "type": VariableType.ALGEBRAIC},
{"name": "K_H", "units": "dimensionless", "component": "NHE3", "type": VariableType.COMPUTED_CONSTANT}
]
def create_states_array():
return [nan]*STATE_COUNT
def create_variables_array():
return [nan]*VARIABLE_COUNT
def initialize_states_and_constants(states, variables):
variables[0] = 0.0
variables[1] = 0.1
variables[2] = 1.0e-3
variables[3] = 0.0
variables[4] = 0.0
variables[5] = 72.0e-6
variables[6] = 0.027e3
variables[7] = 30.0
variables[8] = 0.48e-3
variables[9] = 1.6e-3
variables[10] = 1.6e-3
states[0] = 6.0
def compute_computed_constants(variables):
variables[12] = variables[1]/variables[7]
variables[14] = variables[0]/variables[6]
variables[15] = variables[4]/variables[7]
variables[16] = variables[2]/variables[5]
variables[17] = variables[3]/variables[6]
variables[18] = variables[10]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[19] = variables[8]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[20] = variables[9]*2.0*variables[2]/(variables[2]+1.0e-6)
variables[25] = variables[18]*variables[19]/(variables[18]+variables[19])
variables[28] = ((1.0+variables[12])*variables[16]+(1.0+variables[16])*variables[12]*variables[18]/variables[19])/(1.0+2.0*variables[16])
def compute_rates(voi, states, rates, variables):
rates[0] = 2.0
def compute_variables(voi, states, rates, variables):
variables[11] = 1.0e3*pow(10.0, -states[0])
variables[13] = variables[11]/variables[5]
variables[21] = (1.0+variables[12]+variables[13]+variables[14])*(variables[18]*variables[15]+variables[19]*variables[16]+variables[20]*variables[17])+(1.0+variables[15]+variables[16]+variables[17])*(variables[18]*variables[12]+variables[19]*variables[13]+variables
[20]*variables[14])
variables[22] = variables[18]*variables[19]/variables[21]*(variables[12]*variables[16]-variables[15]*variables[13])+variables[18]*variables[20]/variables[21]*(variables[12]*variables[17]-variables[15]*variables[14])
variables[23] = variables[18]*variables[19]/variables[21]*(variables[15]*variables[13]-variables[12]*variables[16])+variables[19]*variables[20]/variables[21]*(variables[13]*variables[17]-variables[16]*variables[14])
variables[24] = variables[18]*variables[20]/variables[21]*(variables[15]*variables[14]-variables[12]*variables[17])+variables[19]*variables[20]/variables[21]*(variables[14]*variables[16]-variables[13]*variables[17])
variables[26] = variables[22]/variables[25]
variables[27] = 1.0/variables[26]
# LSODA
start = 0.0
end = 1
numpoints = 1000
stepsize = (end - start) / numpoints
print(start, end, numpoints, stepsize)
states = create_states_array()
variables = create_variables_array()
initialize_states_and_constants(states, variables)
compute_computed_constants(variables) # added this line
temp = []
def func(t, y):
rates = create_states_array()
compute_rates(t, y, rates, variables)
compute_variables(t, y, rates, variables) # added this line
print("variables[22]: ", variables[22])
temp.append(variables[22])
return rates
print("start: ", start)
print("end: ", end)
print("states: ", states)
solution = integrate.solve_ivp(func,[start, end], states, method='LSODA', max_step=stepsize, atol=1e-4, rtol=1e-6)
print(solution.t)
print(solution.y)
# graph
fig, ax = plt.subplots()
ax.plot(solution.y[0], temp, label='Line 1')
ax.set_xlabel('t')
ax.set_ylabel('y')
ax.set_title('Some Title')
ax.legend()
fig.savefig('test.png')
# # test
# def exponential_decay(t, y):
# return -0.5 * y
#
# sol = integrate.solve_ivp(exponential_decay, [0, 10], [2, 4, 8])
#
# print(sol.t)
# print(sol.y)
#
# fig2, ax2 = plt.subplots()
# ax2.plot(sol.t, sol.y[0], label='Line 1')
# ax2.plot(sol.t, sol.y[1], label='Line 2')
# ax2.plot(sol.t, sol.y[2], label='Line 3')
# ax2.set_xlabel('x label')
# ax2.set_ylabel('y label')
# ax2.set_title('Simple Plot')
# ax2.legend()
# fig2.savefig('test.png')
# convert cellml1.0 or 1.1 to 2.0
# with open('../tests/fixtures/chang_fujita_1999.xml') as f:
# read_data = f.read()
# f.close()
#
# p = Parser()
# importedModel = p.parseModel(read_data)
#
# # parsing cellml 1.0 or 1.1 to 2.0
# dom = ET.fromstring(read_data.encode("utf-8"))
# xslt = ET.parse("../tests/fixtures/cellml1to2.xsl")
# transform = ET.XSLT(xslt)
# newdom = transform(dom)
#
# mstr = ET.tostring(newdom, pretty_print=True)
# mstr = mstr.decode("utf-8")
#
# # parse the string representation of the model to access by libcellml
# importedModel = p.parseModel(mstr)
#
# f = open('../tests/fixtures/chang_fujita_1999.xml', 'w')
# f.write(mstr)
| 42.507692
| 268
| 0.68054
| 1,130
| 8,289
| 4.839823
| 0.177876
| 0.084842
| 0.052843
| 0.090144
| 0.520205
| 0.465533
| 0.439386
| 0.368257
| 0.335345
| 0.287073
| 0
| 0.058824
| 0.120159
| 8,289
| 195
| 269
| 42.507692
| 0.691074
| 0.129328
| 0
| 0
| 0
| 0
| 0.231381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.05042
| 0.016807
| 0.168067
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05d4760733051270e73120a1ac9a61ea86e6cde5
| 1,800
|
py
|
Python
|
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | 6
|
2020-11-24T06:51:02.000Z
|
2022-02-26T23:19:46.000Z
|
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | null | null | null |
DOOM.py
|
ariel139/DOOM-port-scanner
|
328678b9f79855de472967f1a3e4b3e9181a3706
|
[
"MIT"
] | null | null | null |
import socket
from IPy import IP
print("""
You are using the DOOM Port scanner.
This tool is for educational purpose ONLY!!!!
1. You can change the range of the ports you want to scan.
2. You can change the speedof the scan
3. you can scan a list of targets by using ', ' after each target
4. You can scan both URL links and both IP's
""")
# ip adresess
targets = input("enter targets or URL's ")
# min range of ports
min_port = int(input("enter min number of ports "))
# max range of ports
max_port = int(input("enter max number of ports "))
try:
speed = int(input("Enter the speed you want to scan in (try using a Irrational number, deffult is 0.1) "))
except:
speed = 0.1
def multi_targets(ip):
converted_ip = check_ip(ip)
# using loop to scan the port
print(f'scaning port for {ip}')
for port in range(min_port,max_port +1):
scan_port(converted_ip,port)
# check if the ip is URL link or ip
def check_ip(ip):
try:
IP(ip)
return ip
except ValueError:
socket.gethostbyname(ip)
return ip
def get_data_from_port(soc):
return soc.recv(1024)
# scan port function
def scan_port(ip, port):
try:
sc = socket.socket()
sc.settimeout(speed)
sc.connect((ip, port))
try:
data = get_data_from_port(sc)
print(f'[+] port {port} is on and recived data is: {data}')
except:
print(f'[+] {port} port is open')
except:
print('scaning ports...')
# converted ip adress to link and int ip
if ', ' in targets:
for ip_add in targets.split(','):
multi_targets(ip_add.strip(' '))
else:
multi_targets(targets)
| 24.657534
| 111
| 0.597778
| 270
| 1,800
| 3.907407
| 0.32963
| 0.022749
| 0.036967
| 0.028436
| 0.030332
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010408
| 0.306111
| 1,800
| 72
| 112
| 25
| 0.834267
| 0.095
| 0
| 0.1875
| 0
| 0.020833
| 0.367979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.041667
| 0.020833
| 0.1875
| 0.104167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05d4a6a91e58732f8757086328fccaf5f8b61a70
| 9,380
|
py
|
Python
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 10
|
2018-01-04T07:59:59.000Z
|
2022-01-17T08:56:33.000Z
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 2
|
2020-01-12T19:32:05.000Z
|
2020-04-11T09:38:07.000Z
|
finding_models/testing_classifiers.py
|
NtMalDetect/NtMalDetect
|
5bf8f35491bf8081d0b721fa1bf90582b410ed74
|
[
"MIT"
] | 1
|
2018-08-31T04:13:43.000Z
|
2018-08-31T04:13:43.000Z
|
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
from sklearn.utils import shuffle
useTFIDF = True
showSampleVector = False
showMostInformativeFeatures = True
howManyInformativeFeatures = 10
nGRAM1 = 10
nGRAM2 = 10
weight = 10
ask = input("Do you want to specify parameters or use default values? Input 'T' or 'F'. ")
if ask == "T":
useTFIDFStr = input("Do you want to use tfidfVectorizer or CountVectorizer? Type T for tfidfVectorizer and F for CountVectorizer ")
if useTFIDFStr == "T":
useTFIDF = True
else:
useTFIDF = False
showSampleVectorStr = input("Do you want to print an example vectorized corpus? (T/F) ")
if showSampleVectorStr == "T":
showSampleVector = True
else:
showSampleVector = False
showMostInformativeFeaturesStr = input("Do you want to print the most informative feature in some of the classifiers? (T/F) ")
if showMostInformativeFeaturesStr == "T":
showMostInformativeFeatures = True
howManyInformativeFeatures = int(input("How many of these informative features do you want to print for each binary case? Input a number "))
else:
showMostInformativeFeatures = False
nGRAM1 = int(input("N-Gram lower bound (Read README.md for more information)? Input a number "))
nGRAM2 = int(input("N-Gram Upper bound? Input a number "))
weight = int(input("What weight do you want to use to separate train & testing? Input a number "))
main_corpus = []
main_corpus_target = []
my_categories = ['benign', 'malware']
# feeding corpus the testing data
print("Loading system call database for categories:")
print(my_categories if my_categories else "all")
import glob
import os
malCOUNT = 0
benCOUNT = 0
for filename in glob.glob(os.path.join('./sysMAL', '*.txt')):
fMAL = open(filename, "r")
aggregate = ""
for line in fMAL:
linea = line[:(len(line)-1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(1)
malCOUNT += 1
for filename in glob.glob(os.path.join('./sysBEN', '*.txt')):
fBEN = open(filename, "r")
aggregate = ""
for line in fBEN:
linea = line[:(len(line) - 1)]
aggregate += " " + linea
main_corpus.append(aggregate)
main_corpus_target.append(0)
benCOUNT += 1
# shuffling the dataset
main_corpus_target, main_corpus = shuffle(main_corpus_target, main_corpus, random_state=0)
# weight as determined in the top of the code
train_corpus = main_corpus[:(weight*len(main_corpus)//(weight+1))]
train_corpus_target = main_corpus_target[:(weight*len(main_corpus)//(weight+1))]
test_corpus = main_corpus[(len(main_corpus)-(len(main_corpus)//(weight+1))):]
test_corpus_target = main_corpus_target[(len(main_corpus)-len(main_corpus)//(weight+1)):]
print("%d documents - %0.3fMB (training set)" % (
len(train_corpus_target), train_corpus_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(test_corpus_target), test_corpus_size_mb))
print("%d categories" % len(my_categories))
print()
print("Benign Traces: "+str(benCOUNT)+" traces")
print("Malicious Traces: "+str(malCOUNT)+" traces")
print()
print("Extracting features from the training data using a sparse vectorizer...")
t0 = time()
if useTFIDF:
vectorizer = TfidfVectorizer(ngram_range=(nGRAM1, nGRAM2), min_df=1, use_idf=True, smooth_idf=True) ##############
else:
vectorizer = CountVectorizer(ngram_range=(nGRAM1, nGRAM2))
analyze = vectorizer.build_analyzer()
if showSampleVector:
print(analyze(test_corpus[1]))
X_train = vectorizer.fit_transform(train_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, train_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer...")
t0 = time()
X_test = vectorizer.transform(test_corpus)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, test_corpus_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# show which are the definitive features
def show_most_informative_features(vectorizer, clf, n=20):
feature_names = vectorizer.get_feature_names()
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))
coefs_with_fns_mal = coefs_with_fns[:-(n + 1):-1]
coefs_with_fns = sorted(zip(clf.coef_[0], feature_names))[:n]
print()
print("Most Informative Benign Features:")
for (coef_1, fn_1) in coefs_with_fns:
print(coef_1, fn_1)
print()
print("Most Informative Malicious Features:")
for (coef_2, fn_2) in coefs_with_fns_mal:
print(coef_2, fn_2)
print()
def benchmark(clf, showTopFeatures=False):
print('_'*60)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, train_corpus_target)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(test_corpus_target, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
print()
print(metrics.classification_report(test_corpus_target, pred,target_names=my_categories))
print()
clf_descr = str(clf).split('(')[0]
print("Predicted values: ")
print(pred.tolist());
print()
print("Real values:")
print(test_corpus_target)
print()
mCount = 0
for i in test_corpus_target:
if i == 1:
mCount+=1
print("Proportion of malicious trace:")
print(mCount/len(test_corpus_target))
if showTopFeatures:
show_most_informative_features(vectorizer, clf, 10)
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(penalty=penalty, dual=False,
tol=1e-3), showMostInformativeFeatures))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty), showMostInformativeFeatures))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False,
tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
# plotting results
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='navy')
plt.barh(indices + .3, training_time, .2, label="training time",
color='c')
plt.barh(indices + .6, test_time, .2, label="test time", color='darkorange')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| 31.059603
| 150
| 0.698294
| 1,219
| 9,380
| 5.231337
| 0.248564
| 0.031363
| 0.027599
| 0.01035
| 0.243218
| 0.168104
| 0.12106
| 0.105065
| 0.084993
| 0.084993
| 0
| 0.018305
| 0.178785
| 9,380
| 301
| 151
| 31.162791
| 0.809555
| 0.042111
| 0
| 0.198157
| 0
| 0
| 0.179058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009217
| false
| 0.009217
| 0.119816
| 0
| 0.133641
| 0.276498
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05d5479edfdc67ed72d1fed7ba706e163051f970
| 5,953
|
py
|
Python
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
neutron/tests/fullstack/test_firewall.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_log import log as logging
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.linux import iptables_firewall
from neutron.agent.linux import iptables_manager
from neutron.agent.linux.openvswitch_firewall import iptables as ovs_iptables
from neutron.common import utils
from neutron.tests.common import machine_fixtures
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
LOG = logging.getLogger(__name__)
class IptablesNotConfiguredException(Exception):
pass
class VmsUnreachableException(Exception):
pass
class FirewallMigrationTestCase(base.BaseFullStackTestCase):
def setUp(self):
host_descriptions = [
environment.HostDescription(
l3_agent=False,
of_interface='native',
l2_agent_type=constants.AGENT_TYPE_OVS,
firewall_driver='iptables_hybrid',
dhcp_agent=False,
)]
env = environment.Environment(
environment.EnvironmentDescription(),
host_descriptions)
super(FirewallMigrationTestCase, self).setUp(env)
# fullstack doesn't separate nodes running ovs agent so iptables rules
# are implemented in root namespace
self.iptables_manager = iptables_manager.IptablesManager()
def _prepare_resources(self):
self.tenant_uuid = uuidutils.generate_uuid()
network = self.safe_client.create_network(self.tenant_uuid)
self.safe_client.create_subnet(
self.tenant_uuid, network['id'], '20.0.0.0/24', enable_dhcp=False)
vms = machine.FakeFullstackMachinesList(
self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
network['id'],
self.tenant_uuid,
self.safe_client,
use_dhcp=False))
for i in range(2))
vms.block_until_all_boot()
for vm in vms:
self._add_icmp_security_group_rule(vm)
return vms
def _add_icmp_security_group_rule(self, vm):
sg_id = self.safe_client.create_security_group(self.tenant_uuid)['id']
self.safe_client.create_security_group_rule(
self.tenant_uuid, sg_id,
direction=constants.INGRESS_DIRECTION,
ethertype=constants.IPv4,
protocol=constants.PROTO_NAME_ICMP)
self.safe_client.client.update_port(
vm.neutron_port['id'],
body={'port': {'security_groups': [sg_id]}})
self.addCleanup(
self.safe_client.client.update_port,
vm.neutron_port['id'],
body={'port': {'security_groups': []}})
def _validate_iptables_rules(self, vms):
"""Check if rules from iptables firewall are configured.
Raises IptablesNotConfiguredException exception if no rules are found.
"""
for vm in vms:
vm_tap_device = iptables_firewall.get_hybrid_port_name(
vm.neutron_port['id'])
filter_rules = self.iptables_manager.get_rules_for_table('filter')
if not any(vm_tap_device in line for line in filter_rules):
raise IptablesNotConfiguredException(
"There are no iptables rules configured for interface %s" %
vm_tap_device)
def _switch_firewall(self, firewall_driver):
"""Switch firewall_driver to given driver and restart the agent."""
l2_agent = self.environment.hosts[0].l2_agent
l2_agent_config = l2_agent.agent_cfg_fixture.config
l2_agent_config['securitygroup']['firewall_driver'] = firewall_driver
l2_agent.agent_cfg_fixture.write_config_to_configfile()
l2_agent.restart()
int_bridge = ovs_lib.OVSBridge(
l2_agent_config['ovs']['integration_bridge'])
predicate = functools.partial(
ovs_iptables.is_bridge_cleaned, int_bridge)
utils.wait_until_true(
predicate,
exception=RuntimeError(
"Bridge %s hasn't been marked as clean." % int_bridge.br_name))
def test_migration(self):
vms = self._prepare_resources()
# Make sure ICMP packets can get through with iptables firewall
vms.ping_all()
self._validate_iptables_rules(vms)
self._switch_firewall('openvswitch')
# Make sure security groups still work after migration
vms.ping_all()
self.assertRaises(
IptablesNotConfiguredException, self._validate_iptables_rules, vms)
# Remove security groups so traffic cannot get through
for vm in vms:
self.safe_client.client.update_port(
vm.neutron_port['id'],
body={'port': {'security_groups': []}})
# TODO(jlibosva): Test all permutations and don't fail on the first one
self.assertRaises(machine_fixtures.FakeMachineException, vms.ping_all)
# Add back some security groups allowing ICMP and test traffic can now
# get through
for vm in vms:
self._add_icmp_security_group_rule(vm)
vms.ping_all()
| 38.908497
| 79
| 0.666891
| 702
| 5,953
| 5.433048
| 0.340456
| 0.028841
| 0.029366
| 0.020975
| 0.186418
| 0.153907
| 0.099895
| 0.073676
| 0.073676
| 0.073676
| 0
| 0.006567
| 0.258189
| 5,953
| 152
| 80
| 39.164474
| 0.857111
| 0.199731
| 0
| 0.173077
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0.006579
| 0.019231
| 1
| 0.057692
| false
| 0.019231
| 0.125
| 0
| 0.221154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05d679b96fcc27f56541b2f87e6ba4b22f90adbe
| 709
|
py
|
Python
|
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
Analysis/pdf_to_txt.py
|
ashishnitinpatil/resanalysersite
|
0604d2fed4760be741c4d90b6d230d0f2cd8bf9e
|
[
"CC-BY-4.0"
] | null | null | null |
from pdfminer.pdfinterp import PDFResourceManager, process_pdf
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from cStringIO import StringIO
def convert_pdf(path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = file(path, 'rb')
process_pdf(rsrcmgr, device, fp)
fp.close()
device.close()
str = retstr.getvalue()
retstr.close()
return str
with open('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.txt', 'w') as to_write:
to_write.write(convert_pdf('C:\\Users\\ashis\\Desktop\\CIVIL ENGINEERING.pdf'))
| 27.269231
| 83
| 0.712271
| 87
| 709
| 5.735632
| 0.482759
| 0.072144
| 0.044088
| 0.072144
| 0.136273
| 0.136273
| 0
| 0
| 0
| 0
| 0
| 0.001692
| 0.166432
| 709
| 25
| 84
| 28.36
| 0.84264
| 0
| 0
| 0
| 0
| 0
| 0.146685
| 0.090268
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05d6c824429b4f5fccdfe1433815eb6c96e18c8f
| 480
|
py
|
Python
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 7
|
2016-07-16T22:16:37.000Z
|
2021-06-14T20:45:37.000Z
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 30
|
2015-06-03T22:40:28.000Z
|
2022-02-11T08:49:44.000Z
|
local/handler/TravisHandler.py
|
fasterit/supybot-github
|
37b80046c0f0d5a66b2107a63e380002adbb66f5
|
[
"MIT"
] | 5
|
2018-01-12T21:28:50.000Z
|
2020-10-01T13:44:09.000Z
|
from ..utility import *
def handle(data, theme):
if isStatusVisible(data['repository']['url'], data['status_message'].lower()):
theme.travis(
branch = data['branch'],
repo = data['repository']['name'],
status = data['status_message'],
commitId = data['commit'],
commitMessage = data['message'],
commitAuthor = data['author_name'],
buildUrl = getShortURL(data['build_url'])
)
| 34.285714
| 82
| 0.554167
| 44
| 480
| 5.954545
| 0.590909
| 0.10687
| 0.129771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2875
| 480
| 13
| 83
| 36.923077
| 0.766082
| 0
| 0
| 0
| 0
| 0
| 0.195833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05e10cbd60c9a8c4e9d6e849c57e56e13a3dc1f5
| 3,596
|
py
|
Python
|
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | 1
|
2020-12-15T16:35:20.000Z
|
2020-12-15T16:35:20.000Z
|
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | null | null | null |
Code/network_model_HiCoDe.py
|
AbinavRavi/Network_Analysis_Eur_Parl
|
dea84d3375eea07676e0193d575e3deef76312bc
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
import pandas as pd
import ast
import itertools
from itertools import product
from collections import Counter
import networkx as nx
import network_utils as nu
import hicode as hc
import matplotlib.pyplot as plt
import matplotlib.cm as cm
plt.style.use('classic')
# -----------------------------------------------------------------------------------------------------------------------
## Loading data
topicDF = pd.read_csv('../Topics/topicsData350.csv')
topicDF['date'] = pd.to_datetime(topicDF['date'])
# topicDF_part = topicDF[(topicDF.date < '2001-07-01') & (topicDF.date >= '2000-07-01')]
# topicDF_part = topicDF[topicDF.date == '2000-07-01']
sit = 0
count = Counter([])
for i in range(58):
year = 1999 + (i + 6) // 12
month = (i + 6) % 12 + 1
date = '{:4d}-{:02d}-01'.format(year, month)
year = 1999 + (i + 9) // 12
month = (i + 9) % 12 + 1
date2 = '{:4d}-{:02d}-01'.format(year, month)
topicDF_part = topicDF[(topicDF.date < date2) & (topicDF.date >= date)]
if topicDF_part.shape[0] == 0:
continue
else:
sit += 1
f = open('../data/outliers.txt', 'a')
f.write('{:s}\n'.format(date))
print(date)
# -----------------------------------------------------------------------------------------------------------------------
## Building network
network = nu.build_network(topicDF_part, 350, exclude=[])
#print(len(network.nodes()))
bottom_nodes = [n for n in network.nodes() if n not in range(350)]
network = nu.fold_network(network, bottom_nodes, mode='single')
network = nu.normalize_edgeweight(network)
# -----------------------------------------------------------------------------------------------------------------------
## Analyzing network
networks, partitions = hc.hicode(network, True)
candidates = [(u, v) for u, v in product(network.nodes(), network.nodes()) if
u != v and partitions[0][u] != partitions[0][v]]
for i in range(1,len(partitions)):
candidates = [(u,v) for u, v in candidates if partitions[i][u] == partitions[i][v]]
candidates = [(u,v) for u,v in candidates]
# candidates.sort()
# candidates = list(k for k,_ in itertools.groupby(candidates))
# print(candidates)
# candidates = [tuple(c) for c in candidates ]
count+=Counter(candidates)
count = dict(count)
count = sorted(count.items(), key=lambda kv: kv[1], reverse=True)
with open('../Results_Hicode/first_session_redweight.txt', 'w') as f:
f.write('Total sittings: {:d}\n\n'.format(int(sit)))
for k, v in count:
f.write('{:s}: {:d}, {:f}\n'.format(str(k), int(v), v / sit))
# -----------------------------------------------------------------------------------------------------------------------
## Drawing network
# for i in range(len(networks)):
# plt.figure()
# values = [partitions[0].get(n) for n in networks[i].nodes()]
# removeE = [e for e in networks[i].edges() if partitions[i][e[0]] != partitions[i][e[1]]]
# networks[i].remove_edges_from(removeE)
# pos = nx.spring_layout(networks[i], iterations=15, weight='weight')
# sizes = [50 * nu.node_weight(networks[i], node) for node in networks[i].nodes()]
# weights = [networks[i][u][v]['weight'] for u, v, in networks[i].edges()]
# nc = nx.draw_networkx_nodes(networks[i], pos, with_labels=False, node_color=values, node_size=sizes, alpha=0.4,
# cmap=cm.gist_rainbow)
# nx.draw_networkx_edges(networks[i], pos, width=weights)
# plt.axis('off')
# plt.colorbar(nc)
# plt.show()
| 38.666667
| 121
| 0.547553
| 464
| 3,596
| 4.181034
| 0.331897
| 0.046392
| 0.010309
| 0.014433
| 0.121134
| 0.062371
| 0.039691
| 0.029897
| 0
| 0
| 0
| 0.02883
| 0.170467
| 3,596
| 92
| 122
| 39.086957
| 0.621522
| 0.449944
| 0
| 0
| 0
| 0
| 0.09928
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.244898
| 0
| 0.244898
| 0.020408
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05e5ab63cfbf61b1260c3430dac86bcf4cae1b06
| 17,452
|
py
|
Python
|
prompt_tuning/data/super_glue.py
|
techthiyanes/prompt-tuning
|
9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7
|
[
"Apache-2.0"
] | 108
|
2021-11-05T21:44:27.000Z
|
2022-03-31T14:19:30.000Z
|
prompt_tuning/data/super_glue.py
|
techthiyanes/prompt-tuning
|
9f4d7082aa6dbd955e38488d6d3fa5a7c039f6c7
|
[
"Apache-2.0"
] | 172
|
2022-02-01T00:08:39.000Z
|
2022-03-31T12:44:07.000Z
|
prompt_tuning/data/super_glue.py
|
dumpmemory/prompt-tuning
|
bac77e4f5107b4a89f89c49b14d8fe652b1c5734
|
[
"Apache-2.0"
] | 9
|
2022-01-16T11:55:18.000Z
|
2022-03-06T23:26:36.000Z
|
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Special version of the SuperGlue Tasks.
The main task formats here are:
* super_glue_{name}_v102_examples
* mt5_super_glue_{name}_v102_examples
* taskless_super_glue_{name}_v102
* taskless_super_glue_{name}_v102_examples
* mt5_taskless_super_glue_{name}_v102
* mt5_taskless_super_glue_{name}_v102_examples
Any task that starts with `mT5` uses the `mT5` vocab. Any task that ends with
`examples` is setup to log intermediate examples to tensorboard. Any task with
`taskless` does not have the task name as the initial text token (like t5 tasks
do). Any task with `task_index` in the name has a special task index as the
initial post-integerization token.
"""
import functools
from prompt_tuning.data import features
from prompt_tuning.data import metrics as pt_metrics
from prompt_tuning.data import postprocessors as pt_postprocessors
from prompt_tuning.data import preprocessors as pt_preprocessors
from prompt_tuning.data import utils
import seqio
from t5.data import postprocessors
from t5.data import preprocessors
from t5.data.glue_utils import get_glue_postprocess_fn
from t5.data.glue_utils import get_glue_text_preprocessor
from t5.data.glue_utils import get_super_glue_metric
from t5.evaluation import metrics
import tensorflow_datasets as tfds
super_glue_task_indexer = utils.task_mapping(
tuple(b.name
for b in tfds.text.super_glue.SuperGlue.builder_configs.values()), {
"wsc.fixed": "wsc",
"axb": "rte",
"axg": "rte"
})
for model_prefix, feats in features.MODEL_TO_FEATURES.items():
for log_examples in (True, False):
# ========== SuperGlue ==========
# This section adds the core SuperGlue tasks. We do not include WSC in this
# loop WSC has different setting for training and validation because t5
# casts it as a short text generation task instead of as classification (via
# generation of class labels). We will add that as a mixture later.
for b in tfds.text.super_glue.SuperGlue.builder_configs.values():
if "wsc" in b.name:
continue
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
get_glue_postprocess_fn(b))
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, func)
for func in get_super_glue_metric(b.name)
] + [functools.partial(pt_metrics.text_examples, task_name=b.name)]
examples_suffix = "_examples"
else:
postprocess_fn = get_glue_postprocess_fn(b)
metric_fns = get_super_glue_metric(b.name)
examples_suffix = ""
# The axb task needs to be rekeyed before we apply the glue text
# preprocessor, instead of detecting this and registering axb different
# (which would need to be repeated for each variant of the dataset we
# have) we have a list of preprocessors, for most tasks this is empty and
# for axb it has the rekey function. Then when we register a task we add
# the text processor to this list and it all works out. We can't
# predefined the full list upfront (like they do in t5) because the actual
# text preprocessor can be different for tasks like the taskless version.
pre_preprocessors = []
if b.name == "axb":
pre_preprocessors = [
functools.partial(
preprocessors.rekey,
key_map={
"premise": "sentence1",
"hypothesis": "sentence2",
"label": "label",
"idx": "idx"
})
]
# The default tasks have already be register elsewhere so only add the
# example logging version
if log_examples:
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b), seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task removes the initial text token of the dataset
# name
seqio.TaskRegistry.add(
f"{model_prefix}taskless_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# This version of the task adds a task index to the first token.
seqio.TaskRegistry.add(
f"{model_prefix}task_index_super_glue_{b.name}_v102{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name=f"super_glue/{b.name}:1.0.2",
splits=["test"] if b.name in ["axb", "axg"] else None),
preprocessors=pre_preprocessors + [
get_glue_text_preprocessor(b),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer[b.name]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========= Definite Pronoun Resolution =========
# Similar to the Winograd Schema Challenge but doesn't require semantic
# knowledge to disambiguate between two different options. Training on this
# has been shown to be effective for increasing performance on WSC.
# [Kocijan, et. al., 2019](https://arxiv.org/abs/1905.06290)
if log_examples:
dpr_postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples, utils.identity),
dpr_metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy)
] + [functools.partial(pt_metrics.text_examples, task_name="dpr")]
else:
dpr_postprocess_fn = utils.identity
dpr_metric_fns = [metrics.accuracy]
# DPR without the initial dataset text token.
seqio.TaskRegistry.add(
f"{model_prefix}taskless_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=dpr_metric_fns,
output_features=feats,
)
seqio.TaskRegistry.add(
f"{model_prefix}task_index_dpr_v001_simple{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="definite_pronoun_resolution:1.1.0"),
preprocessors=[
preprocessors.definite_pronoun_resolution_simple,
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=dpr_postprocess_fn,
metric_fns=metric_fns,
output_features=feats,
)
# ========== WSC ==========
# This adds a "simplified" version of WSC like they do in t5. Instead of
# predicting if the supplied referent matches the highlighted pronoun in the
# text, the model generate a referent. If the referent matches the supplied
# one then the model predictions True, otherwise it will predict false. This
# means that we can only train on examples where the referent is correct.
# T5 does WSC in two different tasks. The first is a training task that only
# uses examples where the referent is true. We never do any evaluation on
# this dataset so the training data doesn't need anything like post
# processors or metric_fns. The second task is the evaluation task. This
# considers all examples and does use the output functions. These tasks are
# then combined into a mixture.
# Looking at positive and negative examples of WSC can be hard. If the label
# is 1 then the target referent should match the models predicted referent.
# If they match this examples was correct, if they don't the model was
# wrong. If the label is 0, then the target referent is not correct and we
# hope the model output something different.
if log_examples:
postprocess_fn = functools.partial(
pt_postprocessors.postprocess_with_examples,
postprocessors.wsc_simple)
metric_fns = [
functools.partial(pt_metrics.metric_with_examples, metrics.accuracy),
functools.partial(pt_metrics.text_examples, task_name="wsc")
]
else:
postprocess_fn = postprocessors.wsc_simple
metric_fns = [metrics.accuracy]
if log_examples:
# This version outputs examples to tensorboard.
seqio.TaskRegistry.add(
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}",
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=("validation", "test")),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
# This mixture is WSC where predictions are output to tensorboard.
seqio.MixtureRegistry.add(
f"{model_prefix}super_glue_wsc_and_dev_v102_simple{examples_suffix}",
[
# We don't need a special version of the training data because it
# is never processed for output anyway.
f"{model_prefix}super_glue_wsc_v102_simple_train",
f"{model_prefix}super_glue_wsc_v102_simple_eval{examples_suffix}"
],
default_rate=1.0)
# This version remove the initial dataset text token.
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}taskless_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[
# We don't need a special version of the training data because it is
# never processed for output anyway.
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}taskless_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")
],
default_rate=1.0)
# This version adds a task index as the first token.
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2", splits=("train",)),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=True),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
metric_fns=[],
output_features=feats)
seqio.TaskRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}"),
source=seqio.TfdsDataSource(
tfds_name="super_glue/wsc.fixed:1.0.2",
splits=["validation", "test"]),
preprocessors=[
functools.partial(
preprocessors.wsc_simple, correct_referent_only=False),
pt_preprocessors.remove_first_text_token,
seqio.preprocessors.tokenize,
functools.partial(
pt_preprocessors.add_sentinel_to_beginning,
field="inputs",
offset=super_glue_task_indexer["wsc"]),
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
postprocess_fn=postprocess_fn,
metric_fns=metric_fns,
output_features=feats)
seqio.MixtureRegistry.add(
(f"{model_prefix}task_index_super_glue_wsc_and_dev_v102_simple"
f"{examples_suffix}"),
[(f"{model_prefix}task_index_super_glue_wsc_v102_simple_train"
f"{examples_suffix}"),
(f"{model_prefix}task_index_super_glue_wsc_v102_simple_eval"
f"{examples_suffix}")],
default_rate=1.0)
# =========== Mixtures ==========
# These are Mixtures of the task index tasks to train on all super glue tasks
# at once.
# This is a copy of the super glue weights from t5 but adapted to use the task
# index version of the datasets.
WEIGHT_MAPPING = {
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
WEIGHT_MAPPING_WITH_DPR = {
"task_index_dpr_v001_simple_examples": 1_322.,
"task_index_super_glue_wsc_v102_simple_train": 259.,
"task_index_super_glue_wsc_v102_simple_eval_examples": 0.,
"task_index_super_glue_boolq_v102_examples": 9_427.,
"task_index_super_glue_cb_v102_examples": 250.,
"task_index_super_glue_copa_v102_examples": 400.,
"task_index_super_glue_multirc_v102_examples": 27_243.,
"task_index_super_glue_record_v102_examples": 138_854.,
"task_index_super_glue_rte_v102_examples": 2_490.,
"task_index_super_glue_wic_v102_examples": 5_428.,
}
seqio.MixtureRegistry.add("task_index_super_glue_v102_examples_proportional",
list(WEIGHT_MAPPING.items()))
seqio.MixtureRegistry.add(
"task_index_super_glue_with_dpr_v102_examples_proportional",
list(WEIGHT_MAPPING_WITH_DPR.items()))
| 42.77451
| 80
| 0.67276
| 2,154
| 17,452
| 5.16156
| 0.160631
| 0.050998
| 0.03274
| 0.042094
| 0.664238
| 0.632218
| 0.604965
| 0.586167
| 0.565839
| 0.54812
| 0
| 0.021744
| 0.246333
| 17,452
| 407
| 81
| 42.879607
| 0.823538
| 0.253725
| 0
| 0.693603
| 0
| 0
| 0.203536
| 0.174118
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047138
| 0
| 0.047138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05e5bab9ff77cdee550c0152d15077d78e190eff
| 952
|
py
|
Python
|
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
src/runtime/tasks.py
|
HitLuca/predict-python
|
14f2f55cb29f817a5871d4c0b11a3758285301ca
|
[
"MIT"
] | null | null | null |
from django_rq.decorators import job
from src.core.core import runtime_calculate
from src.jobs.models import JobStatuses
from src.jobs.ws_publisher import publish
from src.logs.models import Log
from src.utils.file_service import get_log
@job("default", timeout='1h')
def runtime_task(job, model):
print("Start runtime task ID {}".format(job.pk))
try:
job.status = JobStatuses.RUNNING.value
job.save()
log = Log.objects.get(pk=job.config['log_id'])
run_log = get_log(log.path)
result_data = runtime_calculate(run_log, model.to_dict())
result = result_data['prediction']
job.result = result
job.status = JobStatuses.COMPLETED.value
job.error = ''
except Exception as e:
print("error " + str(e.__repr__()))
job.status = JobStatuses.ERROR.value
job.error = str(e.__repr__())
raise e
finally:
job.save()
publish(job)
| 30.709677
| 65
| 0.657563
| 129
| 952
| 4.682171
| 0.44186
| 0.057947
| 0.099338
| 0.043046
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001364
| 0.230042
| 952
| 30
| 66
| 31.733333
| 0.822647
| 0
| 0
| 0.074074
| 0
| 0
| 0.057773
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.222222
| 0
| 0.259259
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05ec45e9e0486f8c0920e8e4a6acabaf4897caee
| 417
|
py
|
Python
|
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
ch3/ricolisp/token.py
|
unoti/rico-lisp
|
367f625dcd086e207515bdeb5561763754a3531c
|
[
"MIT"
] | null | null | null |
from collections import UserString
from typing import List
class Token(UserString):
"""A string that has additional information about the source code for the string."""
def __init__(self, s: str, line_number:int, character_number: int, filename: str = None):
super().__init__(s)
self.line_number = line_number
self.character_number = character_number
self.filename = filename
| 37.909091
| 93
| 0.717026
| 54
| 417
| 5.277778
| 0.574074
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.203837
| 417
| 11
| 94
| 37.909091
| 0.858434
| 0.18705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05ed3bd6a82da190685915c3b42fde3a3b5e118a
| 2,655
|
py
|
Python
|
utils.py
|
ali-ramadhan/wxConch
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | 1
|
2019-03-09T01:10:59.000Z
|
2019-03-09T01:10:59.000Z
|
utils.py
|
ali-ramadhan/weather-prediction-model-consensus
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | 1
|
2019-08-19T12:26:06.000Z
|
2019-08-19T12:26:06.000Z
|
utils.py
|
ali-ramadhan/weather-prediction-model-consensus
|
1106ce17d25f96a038ca784029261faafd7cfaf9
|
[
"MIT"
] | null | null | null |
import os
import time
import math
import logging.config
from datetime import datetime
from subprocess import run
from urllib.request import urlopen, urlretrieve
from urllib.parse import urlparse, urljoin
import smtplib, ssl
from os.path import basename
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from bs4 import BeautifulSoup
logging.config.fileConfig("logging.ini", disable_existing_loggers=False)
logger = logging.getLogger(__name__)
HEADERS = {
"User-Agent": "wxConch (Python3.7) https://github.com/ali-ramadhan/wxConch",
"From": "alir@mit.edu"
}
def K2F(K):
return (K - 273.15) * (9/5) + 32
def download_file(url, local_filepath):
run(["wget", "-nc", url, "-O", local_filepath])
def make_soup(url):
html = urlopen(url).read()
return BeautifulSoup(html, features="lxml")
def download_images(url, filename=None):
soup = make_soup(url)
# Make a list of bs4 element tags.
images = [img for img in soup.findAll("img")]
logger.debug("{:s}: {:d} images found.".format(url, len(images)))
# Compile our unicode list of image links.
image_links = [img.get("src") for img in images]
for img_url in image_links:
if filename is None:
filename = img_url.split('/')[-1]
url_parts = urlparse(url)
real_img_url = url_parts.scheme + "://" + url_parts.netloc + img_url
logger.debug("Downloading image: {:s} -> {:s}".format(real_img_url, filename))
# urlretrieve(real_img_url, filename)
download_file(real_img_url, filename)
return image_links
def send_email(send_from, send_to, subject, text, files=None, gmail="wxconch.forecast@gmail.com"):
assert isinstance(send_to, list)
msg = MIMEMultipart()
msg["From"] = send_from
msg["To"] = COMMASPACE.join(send_to)
msg["Date"] = formatdate(localtime=True)
msg["Subject"] = subject
msg.attach(MIMEText(text))
for f in files or []:
with open(f, "rb") as fil:
part = MIMEApplication(fil.read(), Name=basename(f))
# After the file is closed
part['Content-Disposition'] = 'attachment; filename="%s"' % basename(f)
msg.attach(part)
port = 465 # For SSL
password = input("Gmail password for {:s}: ".format(gmail))
# Create a secure SSL context
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", port, context=context) as server:
server.login(gmail, password)
server.sendmail(send_from, send_to, msg.as_string())
| 28.858696
| 98
| 0.680979
| 362
| 2,655
| 4.878453
| 0.428177
| 0.023783
| 0.02265
| 0.030578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008407
| 0.193597
| 2,655
| 91
| 99
| 29.175824
| 0.816441
| 0.06403
| 0
| 0
| 0
| 0
| 0.121872
| 0.010492
| 0
| 0
| 0
| 0
| 0.016667
| 1
| 0.083333
| false
| 0.033333
| 0.25
| 0.016667
| 0.383333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05f2bf19df0a5655faf30da01ad995b33a5ff920
| 4,674
|
py
|
Python
|
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
create_multi_langs/command_line.py
|
mychiux413/ConstConv
|
6c2190d1bb37ae5cfef8464f88371db97719b032
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
from create_multi_langs.creater.go import CreaterGo
from create_multi_langs.creater.python import CreaterPython
from create_multi_langs.creater.python_typing import CreaterPythonTyping
from create_multi_langs.creater.typescript_backend import CreaterTypeScriptBackEnd # noqa: E501
from create_multi_langs.creater.typescript_frontend import CreaterTypeScriptFrontEnd # noqa: E501
from create_multi_langs.creater.javascript_backend import CreaterJavaScriptBackEnd # noqa: E501
from create_multi_langs.creater.javascript_frontend import CreaterJavaScriptFrontEnd # noqa: E501
import argparse
import time
import os
import sys
from functools import partial
VALID_EXTS = ['.py', '.go', '.ts', '.js', '.mjs']
def main():
parser = argparse.ArgumentParser(
description='Running DeepSpeech inference.')
parser.add_argument(dest='from_csv',
type=str, help='Generate script from csv')
parser.add_argument(dest='to_file',
type=str,
help='generate file path, support ext: .go .py .js .ts .mjs') # noqa: E501
parser.add_argument('--backend', '-b', action='store_true',
help='Default is generate frontend script for js/ts')
parser.add_argument('--py_typing', '-t', action='store_true',
help='Default is generate python script without typing') # noqa: E501
parser.add_argument('--watch', '-w', action='store_true',
help='Watch csv file changed')
parser.add_argument('--sep', '-s', default=',', type=str,
help='CSV seperated punctuation')
naming_help = """specify your property style,
[valid options]
`ucc`(UpperCamelCase),
`lcc`(lowerCamelCase),
`upper`(ALL_UPERCASE_UNDERSCORE),
`lower`(all_lowercase_underscore)
[default setting]
Go: `ucc`,
Python: `lower`,
Typescript: `lcc`,
javascript: `lcc`
"""
parser.add_argument('--naming_rule', '-n', type=str,
help=naming_help)
args = parser.parse_args()
args.from_csv = os.path.abspath(args.from_csv)
args.to_file = os.path.abspath(args.to_file)
assert os.path.exists(args.from_csv), \
"The csv file `{}` doesn't exists".format(args.from_csv)
assert os.path.splitext(args.to_file)[1] in VALID_EXTS, \
"The extension filename must be in " + str(VALID_EXTS)
if os.path.exists(args.to_file):
print('[WARNING] the to_file `{}` already exists'.format(
args.to_file) +
', and will be overwritten.')
if args.watch:
try:
print('[Enable Watching Mode]')
print('[From CSV File] {}'.format(args.from_csv))
print('[To File] {}'.format(args.to_file))
last_mtime = os.stat(args.from_csv).st_mtime
while True:
time.sleep(0.5)
current_mtime = os.stat(args.from_csv).st_mtime
if current_mtime != last_mtime:
print('Detect csv file changed...')
_generate(args)
last_mtime = current_mtime
except KeyboardInterrupt:
print('Stop watching')
sys.exit(0)
if os.path.exists(args.to_file):
yes_no = input('Overwrite (y/n)?').lower()
if yes_no != "y":
print('Abort program')
sys.exit(0)
_generate(args)
def _generate(args: argparse.Namespace):
to_file = args.to_file
if to_file.endswith('.go'):
from_csv_file = CreaterGo.from_csv_file
elif to_file.endswith('.py'):
if args.py_typing:
from_csv_file = CreaterPythonTyping.from_csv_file
else:
from_csv_file = CreaterPython.from_csv_file
elif to_file.endswith('.ts'):
if args.backend:
from_csv_file = CreaterTypeScriptBackEnd.from_csv_file
else:
from_csv_file = CreaterTypeScriptFrontEnd.from_csv_file
elif to_file.endswith(('.js', '.mjs')):
if args.backend:
from_csv_file = CreaterJavaScriptBackEnd.from_csv_file
else:
from_csv_file = CreaterJavaScriptFrontEnd.from_csv_file
else:
raise argparse.ArgumentError(
"must set to_file as .go .py .ts .js or .mjs, but got {}".format(
to_file
))
if args.naming_rule:
from_csv_file = partial(from_csv_file, naming_rule=args.naming_rule)
creater = from_csv_file(
args.from_csv,
to_file,
sep=args.sep)
creater()
if __name__ == "__main__":
main()
| 37.095238
| 99
| 0.627942
| 566
| 4,674
| 4.952297
| 0.273852
| 0.069925
| 0.070639
| 0.049946
| 0.251873
| 0.224402
| 0.166964
| 0.052801
| 0
| 0
| 0
| 0.006657
| 0.260804
| 4,674
| 125
| 100
| 37.392
| 0.804631
| 0.0184
| 0
| 0.109091
| 0
| 0
| 0.214582
| 0.024012
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.018182
| false
| 0
| 0.118182
| 0
| 0.136364
| 0.063636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05f359b7dd7f8c17e74d1e4576ab789a5ca9047c
| 297
|
py
|
Python
|
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
test_resources/run_tests.py
|
tud-python-courses/lesson-builder
|
11b1cc958723e9f75de27cde68daa0fdc18b929f
|
[
"MIT"
] | null | null | null |
__author__ = 'Justus Adam'
__version__ = '0.1'
def main():
import unittest
import sys
import os
m = os.path.dirname(__file__)
sys.path = [m, os.path.split(m)[0]] + sys.path
import test
unittest.main(test)
if __name__ == '__main__':
main()
else:
del main
| 13.5
| 50
| 0.606061
| 41
| 297
| 3.902439
| 0.536585
| 0.0375
| 0.0875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013699
| 0.262626
| 297
| 22
| 51
| 13.5
| 0.716895
| 0
| 0
| 0
| 0
| 0
| 0.073826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05fd8b2f68e0ad751b568376c91ded4488f3dd84
| 55,975
|
py
|
Python
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | 3
|
2021-11-10T04:03:10.000Z
|
2022-02-27T10:36:02.000Z
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | 1
|
2021-10-12T17:29:53.000Z
|
2021-10-12T17:29:53.000Z
|
cc_bm_parallel_pyr_dev.py
|
xdenisx/ice_drift_pc_ncc
|
f2992329e8509dafcd37596271e80cbf652d14cb
|
[
"MIT"
] | null | null | null |
import matplotlib
matplotlib.use('Agg')
# coding: utf-8
#
# Ice drift retrieval algorithm based on [1] from a pair of SAR images
# [1] J. P. Lewis, "Fast Normalized Cross-Correlation", Industrial Light and Magic.
#
##################################################
# Last modification: 22 July, 2019
# TODO:
# 1) Pyramidal strategy (do we need this?)
# 2) add ocean cm maps ('Balance' for divergence)
##################################################
import cv2
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
import time
import multiprocessing
from skimage.feature import match_template
from skimage.transform import rescale, resize, downscale_local_mean
from skimage import io, img_as_ubyte
from skimage.morphology import disk
from skimage.filters.rank import median
from skimage.filters import laplace
from skimage import exposure
from skimage.filters.rank import gradient
from skimage import filters
from sklearn.neighbors import KDTree
import sys
import sklearn.neighbors
import re
import geojson
import shapefile as sf
import pyproj
from osgeo import gdal, osr
from datetime import datetime
from netCDF4 import Dataset
from osgeo import gdal, osr, gdal_array, ogr
import warnings
warnings.filterwarnings('ignore')
import matplotlib as mpl
import time
def remove_files(ddir):
ffiles = glob.glob('%s/*.*' % ddir)
for ifile in ffiles:
try:
os.remove(ifile)
except:
pass
def length_between(v1, v2):
v1_length = np.hypot(v1[0], v1[1])
v2_length = np.hypot(v2[0], v2[1])
return abs(v1_length - v2_length)
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0))
0.0
angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
angle = np.arccos(np.dot(v1_u, v2_u))
if np.isnan(angle):
if (v1_u == v2_u).all():
return np.degrees(0.0)
else:
return np.degrees(np.pi)
return np.degrees(angle)
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 6}
matplotlib.rc('font', **font)
def plot_peaks(immm1, immm2, uuu, vvv, iidx_line, iidx_row, resss, pref,
lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1):
plt.clf()
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1)
ax2 = plt.subplot(1, 3, 2)
ax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2)
ax1.imshow(immm1, cmap=plt.cm.gray)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(immm2, cmap=plt.cm.gray)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
rect = plt.Rectangle((uuu - Conf.grid_step, vvv - Conf.grid_step), Conf.block_size, Conf.block_size,
edgecolor='r', facecolor='none')
ax2.add_patch(rect)
ax3.imshow(resss)
ax3.set_axis_off()
ax3.set_title('match_template`\nresult')
# highlight matched region
ax3.autoscale(False)
ax3.plot(uuu, vvv, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)
# !Plot control imformation
plt.title('ll1: %s rr1:%s ll2:%s rr2:%s\nu: %s v: %s Li0: %s Li1: %s' %
(lline_1, rrow_1, lline_2, rrow_2,
u_direct, v_direct, Li0, Li1))
# plt.show()
plt.savefig('peaks_plot/%s_%s_%s.png' % (pref, iidx_line, iidx_row), bbox_inches='tight', dpi=300)
# TODO: check
def check_borders(im):
''' n pixels along line means image has a black border '''
flag = 0
ch = 0
j = 0
for i in range(im.shape[0] - 1):
while j < im.shape[1] - 1 and im[i,j] > 0:
j += 1
else:
if j < im.shape[1] - 1 and (im[i,j] == 0 or im[i,j] == 255):
while im[i,j] == 0 and j < im.shape[1] - 1:
j += 1
ch += 1
if ch >= 15:
flag = 1
#print('Black stripe detected!')
return flag
j = 0
ch = 0
return flag
# Matching
def matching(templ, im):
''' Matching '''
# Direct macthing
#pool = Pool(processes=3)
#result = pool.apply(match_template, args=(im, templ, True, 'edge',))
#pool.close()
result = match_template(im, templ, True, 'edge',)
# Drihle statement
# No need if 'edge' in 'match_template'
#n = Conf.block_size #/ 2 # 100
n = int(im.shape[0]/10.)
# First and last n lines
result[0:n, :] = 0.
result[-n:, :] = 0.
# First and last n rows
result[:, 0:n] = 0.
result[:, -n:] = 0.
ij = np.unravel_index(np.argmax(result), result.shape)
u_peak, v_peak = ij[::-1]
#print('u_peak, v_peak: (%s, %s)' % (u_peak, v_peak))
return u_peak, v_peak, result
def filter_local_homogenity(arr_cc_max, y, x, u, v, filter_all=False):
'''
Local homogenity filtering (refine CC peak)
y - axe (top -> bottom)
x - axe (left -> right)
u - along Y (top -> bottom)
v - along X (left -> right)
mask - indicate that a vector has been reprocessed
'''
# Mask array with refined tie points
mask = np.zeros_like(arr_cc_max)
# TODO: processing of border vectors
for i in range(1, x.shape[0] - 1):
for j in range(1, x.shape[1] - 1):
# Calculate median of u and v for 8 neighbors
# Matrix with negbors
nn = np.zeros(shape=(2, 3, 3))
nn[:] = np.nan
# U and V
#if not np.isnan(u[i - 1, j - 1]):
nn[0, 0, 0] = u[i - 1, j - 1]
nn[0, 0, 1] = u[i - 1, j]
nn[0, 0, 2] = u[i - 1, j + 1]
nn[1, 0, 0] = v[i - 1, j - 1]
nn[1, 0, 1] = v[i - 1, j]
nn[1, 0, 2] = v[i - 1, j + 1]
nn[0, 1, 0] = u[i, j-1]
nn[0, 1, 2] = u[i, j+1]
nn[1, 1, 0] = v[i, j - 1]
nn[1, 1, 2] = v[i, j + 1]
nn[0, 2, 0] = u[i + 1, j - 1]
nn[0, 2, 1] = u[i + 1, j]
nn[0, 2, 2] = u[i + 1, j + 1]
nn[1, 2, 0] = v[i + 1, j - 1]
nn[1, 2, 1] = v[i + 1, j]
nn[1, 2, 2] = v[i + 1, j + 1]
# Check number of nans and find median for U and V
uu = nn[0, :, :]
# If number of neighbors <= 3
if len(uu[np.isnan(uu)]) > 5:
u[i, j] = np.nan
v[i, j] = np.nan
arr_cc_max[i, j] = 0
#print 'NANs > 3!'
else:
u_median = np.nanmedian(nn[0, :, :])
v_median = np.nanmedian(nn[1, :, :])
if not filter_all:
if np.isnan(u[i, j]) or abs(u[i, j] - u_median) > abs(u_median) or \
abs(v[i, j] - v_median) > abs(v_median):
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
#print '%s %s %s %s' % (u[i, j], v[i, j], u_median, v_median)
else:
u[i, j] = u_median
v[i, j] = v_median
mask[i, j] = 1
arr_cc_max[i, j] = 1
return mask, y, x, u, v, arr_cc_max
def filter_Rmin(arr_cc_max):
''' Minimum correlation threshold filtering '''
# Remove and plot vectors with R < Rmin, where Rmin = Rmean - Rstd
R_mean = np.nanmean(arr_cc_max)
R_std = np.nanstd(arr_cc_max)
R_min = R_mean - R_std
mask = np.zeros_like(arr_cc_max)
mask[(arr_cc_max < R_min)] = 1
return mask
def plot_scatter(fname, img, x, y, msize=0.1):
''' Plot scatter of initial points '''
plt.clf()
plt.imshow(Conf.img1, cmap='gray')
plt.scatter(x, y, s=msize, color='red')
plt.savefig(fname, bbox_inches='tight', dpi=600)
def plot_arrows(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image '''
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Plot start points
plt.clf()
fig, ax = plt.subplots(figsize=(16, 9))
plt.imshow(img, cmap='gray')
plt.scatter(x[~np.isnan(u)], y[~np.isnan(u)], s=Conf.grid_step/2., facecolors='yellow', edgecolors='black')
plt.savefig('%s/pts_%s' % (os.path.dirname(fname), os.path.basename(fname)), bbox_inches='tight', dpi=600)
# TODO!: remove
def plot_arrows_one_color(fname, img, x, y, u, v, cc, arrwidth=0.005, headwidth=3.5, flag_color=False):
''' Plot arrows on top of image '''
plt.clf()
plt.imshow(img, cmap='gray')
if flag_color:
plt.quiver(x, y, u, v, cc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
else:
plt.quiver(x, y, u, v, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=1200)
def crop_images(img1, img2, y0, x0):
'''
:param Conf.img1: image1
:param Conf.img2: image2
:param x0: center of patch on image2
:param y0: center of patch on image2
:return: image patches
'''
# TODO: x2, y2 for Conf.img2
height, width = img1.shape
# Crop Conf.img1
iidx_line = int(x0)
iidx_row = int(y0)
LLt0 = np.max([0, iidx_line - Conf.grid_step])
LLt1 = np.max([0, iidx_row - Conf.grid_step])
RRt0 = np.min([iidx_line + Conf.grid_step, height])
RRt1 = np.min([iidx_row + Conf.grid_step, width])
# Crop patch from Conf.img1
im1 = Conf.img1[LLt0:RRt0, LLt1:RRt1]
LLi0 = np.max([0, iidx_line - Conf.block_size * Conf.search_area])
LLi1 = np.max([0, iidx_row - Conf.block_size * Conf.search_area])
RRi0 = np.min([iidx_line + Conf.block_size * Conf.search_area, height])
RRi1 = np.min([iidx_row + Conf.block_size * Conf.search_area, width])
# Crop search area from Conf.img2
im2 = Conf.img2[LLi0:RRi0, LLi1:RRi1]
# Offset for image1
y_offset_Conf.img1 = iidx_line # - Conf.block_size/2
x_offset_Conf.img1 = iidx_row # - Conf.block_size/2
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im2 = median(im2, disk(Conf.median_kernel))
im1 = median(im1, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im2 = laplace(im2)
im1 = laplace(im1)
if Conf.img_gradient_filtering:
im2 = gradient(im2, disk(3))
im1 = gradient(im1, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im2 = filters.scharr(im2)
im1 = filters.scharr(im1)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
return im1, im2
# TODO: EXPERIMENTAL
def cc_bm(arguments):
# BM test flag
f=0
# Parse arguments
iidx_line, iidx_row, LLi0, LLi1, im1_name, im2_name, pref, lll_line_start, lll_row_start = arguments
if iidx_line is not None:
# Open two images
im1 = io.imread(im1_name, 0)
im2 = io.imread(im2_name, 0)
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im1 = median(im1, disk(Conf.median_kernel))
im2 = median(im2, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im1 = laplace(im1)
im2 = laplace(im2)
if Conf.img_gradient_filtering:
im1 = gradient(im1, disk(3))
im2 = gradient(im2, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im1 = filters.scharr(im1)
im2 = filters.scharr(im2)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
# No black borders in the first image
if flag1 == 0 and flag2 == 0:
u_direct, v_direct, result = matching(im1, im2)
# Peak maximum CC
cc_max = np.max(result)
# Get coordinates with offsets
lline_2, rrow_2 = u_direct + LLi0, v_direct + LLi1
lline_2_test, rrow_2_test = v_direct + LLi0, u_direct + LLi1
lline_1, rrow_1 = iidx_line, iidx_row
# If obtained end of bm vectors compared to start points of direct
if abs(lline_2_test - lll_line_start) < Conf.bm_th and abs(rrow_2_test - lll_row_start) < Conf.bm_th:
#print('\nlline_2_test, lll_line_start: (%s, %s)' % (lline_2_test, lll_line_start))
#print('rrow_2_test, lll_row_start: (%s, %s)\n' % (rrow_2_test, lll_row_start))
#print('\nCOORDS: %s %s' % (arr_lines_1[i, j], arr_rows_1[i, j]))
#print('COORDS: %s %s\n' % (arr_lines_2[i, j], arr_rows_2[i, j]))
# Peaks plot
if Conf.plot_correlation_peaks:
plot_peaks(im1, im2, u_direct, v_direct, iidx_line, iidx_row, result, pref)
#plot_peaks(im1_bm, im2_bm, uu_bm, vv_bm, iidx_line, iidx_row,
# result_bm, 'bm')
return lline_1, rrow_1, lline_2-lline_1, rrow_2-rrow_1, cc_max
#return lline_2, rrow_2, lline_1 - lline_2, rrow_1 - rrow_2, cc_max
else:
pass
else:
# if crop images have black stripes
if flag1 == 1:
print('IMG_1: %s_%s' % (iidx_line, iidx_row))
io.imsave('ci_%s_1/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im1)
if flag2 == 1:
print('IMG_2: %s_%s' % (idx_line, idx_row))
io.imsave('ci_%s_2/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im2)
def filter_BM(th = 10):
''' Back matching test '''
Conf.bm_th = th # pixels
u_back = arr_rows_2_bm - arr_rows_1_bm
u_direct = arr_rows_2 - arr_rows_1
v_back = arr_lines_2_bm - arr_lines_1_bm
v_direct = arr_lines_2 - arr_lines_1
u_dif = u_direct - u_back * (-1)
v_dif = v_direct - v_back * (-1)
#arr_rows_1, arr_lines_1, arr_rows_2, arr_lines_2, arr_cc_max
#arr_rows_1_bm, arr_lines_1_bm, arr_rows_2_bm, arr_lines_2_bm, arr_cc_max_bm
mask = np.zeros_like(arr_cc_max)
mask[:,:] = 1
mask[((abs(u_dif) < Conf.bm_th) & (abs(v_dif) < Conf.bm_th))] = 0
#mask[((abs(arr_lines_1 - arr_lines_2_bm) > Conf.bm_th) | (abs(arr_rows_1 - arr_rows_2_bm) > Conf.bm_th))] = 1
return mask
def plot_arrows_from_list(pref, fname, img, ll_data, arrwidth=0.005, headwidth=3.5, flag_color=True):
''' Plot arrows on top of image form a list of data '''
plt.clf()
plt.imshow(img, cmap='gray')
# Get list without none and each elements
ll_data = [x for x in ll_data if x is not None]
yyy = [i[0] for i in ll_data]
xxx = [i[1] for i in ll_data]
uuu = [i[2] for i in ll_data]
vvv = [i[3] for i in ll_data]
ccc = [i[4] for i in ll_data]
if flag_color:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, cmap='jet')
cbar = plt.colorbar()
cbar.set_label('Correlation coeff.')
# Plot text with coordinates
for i in range(len(xxx)):
plt.text(xxx[i], yyy[i], r'(%s,%s)' % (yyy[i], xxx[i]), fontsize=0.07, color='yellow')
plt.text(xxx[i] + uuu[i], yyy[i] + vvv[i], r'(%s,%s)' % (yyy[i] + vvv[i], xxx[i] + uuu[i]),
fontsize=0.07, color='yellow') # bbox={'facecolor': 'yellow', 'alpha': 0.5}
else:
plt.quiver(xxx, yyy, uuu, vvv, ccc, angles='xy', scale_units='xy', width=arrwidth, headwidth=headwidth,
scale=1, color='yellow')
plt.savefig(fname, bbox_inches='tight', dpi=600)
# Filter outliers here and plot
plt.clf()
plt.imshow(img, cmap='gray')
def outliers_filtering(x1, y1, uu, vv, cc, radius=256, angle_difference=5, length_difference=30,
total_neighbours=7, angle_neighbours=7, length_neighbours=7):
# Get values of vector components
#uu = x2 - x1
#vv = y2 - y1
idx_mask = []
# Make 2D data of components
#data = np.vstack((uu, vv)).T
x1, y1, uu, vv, cc = np.array(x1), np.array(y1),\
np.array(uu, np.float), np.array(vv, np.float), np.array(cc, np.float)
# Radius based filtering
vector_start_data = np.vstack((x1, y1)).T
vector_start_tree = sklearn.neighbors.KDTree(vector_start_data)
for i in range(0, len(x1), 1):
# For list
# req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
req_data = np.array((x1[i], y1[i])).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
if num_nn[0] < total_neighbours:
idx_mask.append(i)
# Keep small vectors
if np.hypot(uu[i], vv[i]) < 10.:
pass
else:
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
num_of_homo_NN = 0
num_of_length_homo_NN = 0
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Angle between "this" vector and others
angle_v1_v2 = angle_between([uu[i], vv[i]], [data[:, 0][ii], data[:, 1][ii]])
# Length between "this" vector and others
diff_v1_v2 = length_between([uu[i], vv[i]], [data[:, 0][ii], data[:, 1][ii]])
if angle_v1_v2 <= angle_difference:
num_of_homo_NN = num_of_homo_NN + 1
if diff_v1_v2 < length_difference:
num_of_length_homo_NN = num_of_length_homo_NN + 1
if not (num_of_homo_NN >= angle_neighbours and num_of_length_homo_NN >= length_neighbours):
idx_mask.append(i)
tt = list(set(idx_mask))
iidx_mask = np.array(tt)
# Delete bad data
'''
x1_f = np.delete(x1, iidx_mask)
y1_f = np.delete(y1, iidx_mask)
uu_f = np.delete(uu, iidx_mask)
vv_f = np.delete(vv, iidx_mask)
cc_f = np.delete(cc, iidx_mask)
'''
# Mask (=NaN) bad values
uu = np.array(uu, np.float)
vv = np.array(vv, np.float)
uu[iidx_mask] = np.nan
vv[iidx_mask] = np.nan
cc[iidx_mask] = 0.
return x1, y1, uu, vv, cc
def export_to_vector(gtiff, x1, y1, u, v, output_path, gridded=False, data_format='geojson'):
print('\nStart exporting to vector file...')
if data_format not in ['geojson', 'shp']:
print('Invalid format')
return
x2 = x1 + u
y2 = y1 + v
ds = gdal.Open(gtiff)
geotransform = ds.GetGeoTransform()
old_cs = osr.SpatialReference()
old_cs.ImportFromWkt(ds.GetProjection())
new_cs = osr.SpatialReference()
new_cs.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
transform = osr.CoordinateTransformation(old_cs, new_cs)
if data_format == 'shp':
w = sf.Writer(sf.POLYLINE)
# w.field('id', 'C', '40')
w.field('lat1', 'C', '40')
w.field('lon1', 'C', '40')
w.field('lat2', 'C', '40')
w.field('lon2', 'C', '40')
w.field('drift_m', 'C', '40')
w.field('direction', 'C', '40')
if data_format == 'geojson':
features = []
pixelWidth = geotransform[1]
pixelHeight = geotransform[-1]
print('Pixel size (%s, %s) m' % (pixelWidth, pixelHeight))
for i in range(len(x1)):
# print '%s %s %s %s' % (y[ch], x[ch], u[ch], v[ch])
if np.isnan(x2[i]) == False and np.isnan(y2[i]) == False:
xx1 = geotransform[0] + float(x1[i]) * pixelWidth
yy1 = geotransform[3] + float(y1[i]) * pixelHeight
xx2 = geotransform[0] + float(x2[i]) * pixelWidth
yy2 = geotransform[3] + float(y2[i]) * pixelHeight
# print(xx1, yy1)
latlon = transform.TransformPoint(float(xx1), float(yy1))
lon1 = latlon[0]
lat1 = latlon[1]
latlon = transform.TransformPoint(float(xx2), float(yy2))
lon2 = latlon[0]
lat2 = latlon[1]
# Big circle length
try:
mag, az = calc_distance(float(lon1), float(lat1), float(lon2), float(lat2))
az = float(az)
if az <= 180.0:
az = az + 180.0
else:
az = az - 180.0
except:
mag, az = 999., 999.
if data_format == 'shp':
w.line(parts=[[[lon1, lat1], [lon2, lat2]]])
w.record(str(i), str(lat1), str(lon1), str(lat2), str(lon2), str(mag), str(az))
# coords_list.append((lon1, lat1))
if data_format == 'geojson':
new_line = geojson.Feature(geometry=geojson.LineString([(lon1, lat1), (lon2, lat2)]),
properties={'id': str(i),
'lat1': lat1,
'lon1': lon1,
'lat2': lat2,
'lon2': lon2,
'drift_m': mag,
'azimuth': az})
features.append(new_line)
if data_format == 'shp':
try:
w.save(output_path)
# create the PRJ file
prj = open('%s.prj' % output_path.split('.')[0], "w")
prj.write(
'''GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]''')
prj.close()
except:
print('Impossible to create shapefile, sorry.')
if data_format == 'geojson':
try:
collection = geojson.FeatureCollection(features=features)
output_geojson = open(output_path, 'w')
output_geojson.write(geojson.dumps(collection))
output_geojson.close()
except Exception:
print('Impossible to create geojson, sorry: %s' % str(Exception))
print('Geojson creation success!\n')
def calc_distance(lon1, lat1, lon2, lat2):
import pyproj
geod = pyproj.Geod(ellps="WGS84")
angle1, angle2, distance = geod.inv(lon1, lat1, lon2, lat2)
return '%0.2f' % distance, '%0.1f' % angle2
def median_filtering(x1, y1, uu, vv, cc, radius=512, total_neighbours=7):
'''
Median filtering of resultant ice vectors as a step before deformation calculation
'''
fast_ice_th = 5.
# Get values of vector components
#uu = x2 - x1
#vv = y2 - y1
idx_mask = []
# Make 2D data of components
#data = np.vstack((uu, vv)).T
x1, y1, uu, vv, cc = np.array(x1), np.array(y1), np.array(uu), np.array(vv), np.array(cc)
# Radius based filtering
vector_start_data = np.vstack((x1, y1)).T
vector_start_tree = sklearn.neighbors.KDTree(vector_start_data)
for i in range(0, len(x1), 1):
# If index of element in mask list form 'outliers_filtering' then replace with median
#if i in mask_proc:
# print('Replace with median!')
req_data = np.array([x1[i], y1[i]]).reshape(1, -1)
# Getting number of neighbours
num_nn = vector_start_tree.query_radius(req_data, r=radius, count_only=True)
# Check number of neighboors
'''
if num_nn[0] < total_neighbours:
idx_mask.append(i)
cc[i] = 0.
else:
'''
# Apply median filtering
nn = vector_start_tree.query_radius(req_data, r=radius)
data = np.vstack((uu[nn[0]], vv[nn[0]])).T
####################################################################
# Loop through all found ice drift vectors to filter not homo
####################################################################
for ii in range(num_nn[0]):
# Calculate median
#data[:, 0][ii], data[:, 1][ii]
# Replace raw with median
# If not fast ice (> 5 pixels)
if (np.hypot(uu[i], vv[i]) > fast_ice_th or np.isnan(uu[i]) or np.isnan(vv[i])):
u_median = np.nanmedian(data[:, 0][ii])
v_median = np.nanmedian(data[:, 1][ii])
#u_median = np.nanmean(data[:, 0][ii])
#v_median = np.nanmean(data[:, 1][ii])
uu[i], vv[i] = u_median, v_median
cc[i] = 0
#tt = list(set(idx_mask))
#iidx_mask = np.array(tt)
x1_f = np.array(x1)
y1_f = np.array(y1)
uu_f = np.array(uu)
vv_f = np.array(vv)
cc_f = np.array(cc)
return x1_f, y1_f, uu_f, vv_f, cc_f
def calc_deformations(dx, dy, normalization=False, normalization_time=None, cell_size=1.,
invert_meridional=True, out_png_name='test.png'):
'''
Calculate deformation invariants from X and Y ice drift components
dx, dy - x and y component of motion (pixels)
normalization - normalize to time (boolean)
normalization_time - normalization time (in seconds)
cell_size - ground meters in a pixel
invert_meridional - invert y component (boolean)
'''
# Cell size factor (in cm)
cell_size_cm = cell_size * 100.
cell_size_factor = 1 / cell_size_cm
m_div = np.empty((dx.shape[0], dx.shape[1],))
m_div[:] = np.NAN
m_curl = np.empty((dx.shape[0], dx.shape[1],))
m_curl[:] = np.NAN
m_shear = np.empty((dx.shape[0], dx.shape[1],))
m_shear[:] = np.NAN
m_tdef = np.empty((dx.shape[0], dx.shape[1],))
m_tdef[:] = np.NAN
# Invert meridional component
if invert_meridional:
dy = dy * (-1)
# Normilize u and v to 1 hour
if not normalization:
pass
else:
# Convert to ground distance (pixels*cell size(m) * 100.)
dx = dx * cell_size_cm # cm
dy = dy * cell_size_cm # cm
# Get U/V components of speed (cm/s)
dx = dx / normalization_time
dy = dy / normalization_time
# Calculate magnitude (speed module) (cm/s)
mag_speed = np.hypot(dx, dy)
# Print mean speed in cm/s
print('Mean speed: %s [cm/s]' % (np.nanmean(mag_speed)))
#cell_size_factor = 1 / cell_size
# Test
#plt.clf()
#plt.imshow(m_div)
for i in range(1, dx.shape[0] - 1):
for j in range(1, dx.shape[1] - 1):
# div
if (np.isnan(dx[i, j + 1]) == False and np.isnan(dx[i, j - 1]) == False
and np.isnan(dy[i - 1, j]) == False and np.isnan(dy[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# m_div[i,j] = 0.5*((u_int[i,j + 1] - u_int[i,j - 1]) + (v_int[i + 1,j] - v_int[i - 1,j]))/m_cell_size
# !Exclude cell size factor!
m_div[i, j] = cell_size_factor * 0.5 * ((dx[i, j + 1] - dx[i, j - 1])
+ (dy[i - 1, j] - dy[i + 1, j]))
# print m_div[i,j]
# Curl
if (np.isnan(dy[i, j + 1]) == False and np.isnan(dy[i, j - 1]) == False and
np.isnan(dx[i - 1, j]) == False and np.isnan(dx[i + 1, j]) == False
and (np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
# !Exclude cell size factor!
m_curl[i, j] = cell_size_factor * 0.5 * (dy[i, j + 1] - dy[i, j - 1]
- dx[i - 1, j] + dx[i + 1, j]) / cell_size
# Shear
if (np.isnan(dy[i + 1, j]) == False and np.isnan(dy[i - 1, j]) == False and
np.isnan(dx[i, j - 1]) == False and np.isnan(dx[i, j + 1]) == False and
np.isnan(dy[i, j - 1]) == False and np.isnan(dy[i, j + 1]) == False and
np.isnan(dx[i + 1, j]) == False and np.isnan(dx[i - 1, j]) == False and
(np.isnan(dx[i, j]) == False or np.isnan(dy[i, j]) == False)):
dc_dc = cell_size_factor * 0.5 * (dy[i + 1, j] - dy[i - 1, j])
dr_dr = cell_size_factor * 0.5 * (dx[i, j - 1] - dx[i, j + 1])
dc_dr = cell_size_factor * 0.5 * (dy[i, j - 1] - dy[i, j + 1])
dr_dc = cell_size_factor * 0.5 * (dx[i + 1, j] - dx[i - 1, j])
# !Exclude cell size factor!
m_shear[i, j] = np.sqrt(
(dc_dc - dr_dr) * (dc_dc - dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc)) / cell_size
'''
# Den
dc_dc = 0.5*(v_int[i + 1,j] - v_int[i - 1,j])
dr_dr = 0.5*(u_int[i,j + 1] - u_int[i,j - 1])
dc_dr = 0.5*(v_int[i,j + 1] - v_int[i,j - 1])
dr_dc = 0.5*(u_int[i + 1,j] - u_int[i - 1,j])
m_shear[i,j] = np.sqrt((dc_dc -dr_dr) * (dc_dc -dr_dr) + (dc_dr - dr_dc) * (dc_dr - dr_dc))/m_cell_size
'''
# Total deformation
if (np.isnan(m_shear[i, j]) == False and np.isnan(m_div[i, j]) == False):
m_tdef[i, j] = np.hypot(m_shear[i, j], m_div[i, j])
# Invert dy back
if invert_meridional:
dy = dy * (-1)
# data = np.vstack((np.ravel(xx_int), np.ravel(yy_int), np.ravel(m_div), np.ravel(u_int), np.ravel(v_int))).T
divergence = m_div
# TODO: Plot Test Div
plt.clf()
plt.gca().invert_yaxis()
plt.imshow(divergence, cmap='RdBu', vmin=-0.00008, vmax=0.00008,
interpolation='nearest', zorder=2) # vmin=-0.06, vmax=0.06,
# Plot u and v values inside cells (for testing porposes)
'''
font_size = .0000003
for ii in range(dx.shape[1]):
for jj in range(dx.shape[0]):
try:
if not np.isnan(divergence[ii,jj]):
if divergence[ii,jj] > 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '+', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] < 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if divergence[ii,jj] == 0:
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)\n%.6f' %
(dx[ii,jj], dy[ii,jj], '0', ii, jj, divergence[ii,jj]),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
if np.isnan(divergence[ii,jj]):
plt.text(jj, ii,
'u:%.2f\nv:%.2f\n%s ij:(%s,%s)' %
(dx[ii,jj], dy[ii,jj], '-', ii, jj),
horizontalalignment='center',
verticalalignment='center', fontsize=font_size, color='k')
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
except:
pass
'''
# Plot drift arrows on the top
#import matplotlib.cm as cm
#from matplotlib.colors import Normalize
# Invert meridional component for plotting
ddy = dy * (-1)
#norm = Normalize()
colors = np.hypot(dx, ddy)
#print(colors)
#norm.autoscale(colors)
# we need to normalize our colors array to match it colormap domain
# which is [0, 1]
#colormap = cm.inferno
# Plot arrows on top of the deformation
xxx = range(dx.shape[1])
yyy = range(dx.shape[0])
plt.quiver(xxx, yyy, dx, ddy, colors, cmap='Greys', zorder=3) #'YlOrBr')
# Invert Y axis
plt.savefig(out_png_name, bbox_inches='tight', dpi=800)
curl = m_curl
shear = m_shear
total_deform = m_tdef
# return mag in cm/s
return mag_speed, divergence, curl, shear, total_deform
# !TODO:
def make_nc(nc_fname, lons, lats, data):
"""
Make netcdf4 file for deformation (divergence, shear, total deformation), scaled 10^(-4)
"""
print('\nStart making nc for defo...')
ds = Dataset(nc_fname, 'w', format='NETCDF4_CLASSIC')
print(ds.file_format)
# Dimensions
y_dim = ds.createDimension('y', lons.shape[0])
x_dim = ds.createDimension('x', lons.shape[1])
time_dim = ds.createDimension('time', None)
#data_dim = ds.createDimension('data', len([k for k in data.keys()]))
# Variables
times = ds.createVariable('time', np.float64, ('time',))
latitudes = ds.createVariable('lat', np.float32, ('y', 'x',))
longitudes = ds.createVariable('lon', np.float32, ('y', 'x',))
for var_name in data.keys():
globals()[var_name] = ds.createVariable(var_name, np.float32, ('y', 'x',))
globals()[var_name][:, :] = data[var_name]['data']
globals()[var_name].units = data[var_name]['units']
globals()[var_name].scale_factor = data[var_name]['scale_factor']
# Global Attributes
ds.description = 'Sea ice deformation product'
ds.history = 'Created ' + time.ctime(time.time())
ds.source = 'NIERSC/NERSC'
# Variable Attributes
latitudes.units = 'degree_north'
longitudes.units = 'degree_east'
times.units = 'hours since 0001-01-01 00:00:00'
times.calendar = 'gregorian'
# Put variables
latitudes[:, :] = lats
longitudes[:, :] = lons
ds.close()
def _create_geotiff(suffix, Array, NDV, xsize, ysize, GeoT, Projection, deformation):
from osgeo import gdal_array
DataType = gdal_array.NumericTypeCodeToGDALTypeCode(Array.dtype)
if type(DataType) != np.int:
if DataType.startswith('gdal.GDT_') == False:
DataType = eval('gdal.GDT_' + DataType)
NewFileName = suffix + '.tif'
zsize = 1 #Array.shape[0]
driver = gdal.GetDriverByName('GTiff')
Array[np.isnan(Array)] = NDV
DataSet = driver.Create(NewFileName, xsize, ysize, zsize, DataType)
DataSet.SetGeoTransform(GeoT)
DataSet.SetProjection(Projection)#.ExportToWkt())
# for testing
# DataSet.SetProjection('PROJCS["NSIDC Sea Ice Polar Stereographic North",GEOGCS["Unspecified datum based upon the Hughes 1980 ellipsoid",DATUM["Not_specified_based_on_Hughes_1980_ellipsoid",SPHEROID["Hughes 1980",6378273,298.279411123061,AUTHORITY["EPSG","7058"]],AUTHORITY["EPSG","6054"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4054"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",70],PARAMETER["central_meridian",-45],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3411"]]')
#for i in xrange(0, zsize):
DataSet.GetRasterBand(1).WriteArray(deformation) # Array[i])
DataSet.GetRasterBand(1).SetNoDataValue(NDV)
DataSet.FlushCache()
return NewFileName
def create_geotiff(suffix, data, NDV, GeoT, Projection):
''' Create geotiff file (1 band)'''
# Get GDAL data type
dataType = gdal_array.NumericTypeCodeToGDALTypeCode(data.dtype)
# NaNs to the no data value
data[np.isnan(data)] = NDV
if type(dataType) != np.int:
if dataType.startswith('gdal.GDT_') == False:
dataType = eval('gdal.GDT_' + dataType)
newFileName = suffix + '_test.tif'
cols = data.shape[1]
rows = data.shape[0]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newFileName, cols, rows, 1, dataType)
#outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outRaster.SetGeoTransform(GeoT)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(data)
outRaster.SetProjection(Projection)
outband.SetNoDataValue(NDV)
outband.FlushCache()
return newFileName
def cc(arguments):
# BM test flag
f=0
# Parse arguments
#iidx_line, iidx_row, LLi0, LLi1, im1_name, im2_name, pref = arguments
iidx_line, iidx_row, Lt0, Rt0, Lt1, Rt1, Li0, Ri0, Li1, Ri1, pref, Conf.img1, Conf.img2, itr, itrCnt = arguments
#print("Processing block: {} from {} ({:.2f}%) at pid={}".format(itr, itrCnt, itr/itrCnt*100, multiprocessing.current_process()))
if iidx_line is not None:
# Open two images
im1 = Conf.img1[Lt0:Rt0, Lt1:Rt1]
im2 = Conf.img2[Li0:Ri0, Li1:Ri1]
#####################
# Filtering
#####################
# Median filtering
if Conf.img_median_filtering:
# print 'Median filtering'
# im2 = median(im2, disk(3))
# im1 = median(im1, disk(3))
im1 = median(im1, disk(Conf.median_kernel))
im2 = median(im2, disk(Conf.median_kernel))
if Conf.img_laplace_filtering:
im1 = laplace(im1)
im2 = laplace(im2)
if Conf.img_gradient_filtering:
im1 = gradient(im1, disk(3))
im2 = gradient(im2, disk(3))
if Conf.img_scharr_filtering:
# filters.scharr(camera)
im1 = filters.scharr(im1)
im2 = filters.scharr(im2)
########################
# End filtering
########################
# Check for black stripes
flag1 = check_borders(im1)
flag2 = check_borders(im2)
# No black borders in the first image
if flag1 == 0: # and flag2 == 0:
u_direct, v_direct, result = matching(im1, im2)
# Peak maximum CC
cc_max = np.max(result)
# Get coordinates with offsets
lline_2, rrow_2 = v_direct + Li0, u_direct + Li1
lline_1, rrow_1 = iidx_line, iidx_row
#ff_out_txt.write('%s, %s, %s, %s, %s, %s, %s, %s' %
# (lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1))
print(lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1)
#print('\nCOORDS: %s %s' % (arr_lines_1[i, j], arr_rows_1[i, j]))
#print('COORDS: %s %s\n' % (arr_lines_2[i, j], arr_rows_2[i, j]))
# Peaks plot
if Conf.plot_correlation_peaks:
plot_peaks(im1, im2, u_direct, v_direct, iidx_line, iidx_row, result, pref,
lline_1, rrow_1, lline_2, rrow_2, u_direct, Li0, v_direct, Li1)
#plot_peaks(im1_bm, im2_bm, uu_bm, vv_bm, iidx_line, iidx_row,
# result_bm, 'bm')
# If all elements are equal
if np.unique(result).size == 1:
return np.nan, np.nan, np.nan, np.nan, np.nan
# If second peak close to first
flat = result.flatten()
flat.sort()
#print('#Flat: %s' % flat)
#if abs(flat[-1]-flat[-2]) < 0.05:
# return np.nan, np.nan, np.nan, np.nan, np.nan
ret = (lline_1, rrow_1, rrow_2-rrow_1, lline_2-lline_1, cc_max)
#return lline_1, rrow_1, u_direct, v_direct, cc_max
else:
#pass
# ! Testing (return result in any case)
ret = (np.nan, np.nan, np.nan, np.nan, np.nan)
'''
# if crop images have black stripes
if flag1 == 1:
print('IMG_1: %s_%s' % (iidx_line, iidx_row))
io.imsave('ci_%s_1/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im1)
if flag2 == 1:
print('IMG_2: %s_%s' % (idx_line, idx_row))
io.imsave('ci_%s_2/black_%s_%s.png' % (Conf.out_fname, iidx_line, iidx_row), im2)
'''
#print("Processed block: {} from {}".format(itr, itrCnt))
return ret
def apply_anisd(img, gamma=0.25, step=(1., 1.), ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Sep 2017 modified by Denis Demchev
"""
# init args
kappa = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['kappa']
niter = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['N']
option = Conf.speckle_filter_parameters[Conf.speckle_filter_name]['equation']
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# niter
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
fig = pl.figure(figsize=(20, 5.5), num="Anisotropic diffusion")
ax1, ax2 = fig.add_subplot(1, 2, 1), fig.add_subplot(1, 2, 2)
ax1.imshow(img, interpolation='nearest')
ih = ax2.imshow(imgout, interpolation='nearest', animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS / kappa) ** 2.) / step[0]
gE = np.exp(-(deltaE / kappa) ** 2.) / step[1]
elif option == 2:
gS = 1. / (1. + (deltaS / kappa) ** 2.) / step[0]
gE = 1. / (1. + (deltaE / kappa) ** 2.) / step[1]
# update matrices
E = gE * deltaE
S = gS * deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma * (NS + EW)
if ploton:
iterstring = "Iteration %i" % (ii + 1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return cv2.convertScaleAbs(imgout)
#################################################################################
#################################################################################
#################################################################################
# MAIN PROGRAM
#################################################################################
#################################################################################
#################################################################################
# run cc_bm_parallel_dev.py ./data/test_kara_01.tif ./data/test_kara_02.tif 64 4 100
import cc_config
import cc_calc_drift
import cc_calc_drift_filter
import cc_calc_defo
#VAS
if __name__ == '__main__':
multiprocessing.freeze_support()
# check command line args
assert (len(sys.argv) == 6), "Expecting 5 arguments: filename1 filename2 block_size search_area grid_step"
# init config class
Conf = cc_config.Config()
Conf.init(f1_name=sys.argv[1], f2_name=sys.argv[2],
block_size=int(sys.argv[3]), search_area=int(sys.argv[4]), grid_step=int(sys.argv[5]))
Conf.self_prepare()
global_start_time = time.time()
# Downscale
if Conf.rescale_apply:
print('Rescaling...')
Conf.img1 = rescale(Conf.img1, 1.0 / Conf.rescale_factor)
Conf.img2 = rescale(Conf.img2, 1.0 / Conf.rescale_factor)
print('Done!')
# Image intensity normalization
if Conf.image_intensity_byte_normalization:
print('\nImage intensity rescaling (0, 255)...')
#Conf.img1 = exposure.adjust_log(Conf.img1)
#Conf.img2 = exposure.adjust_log(Conf.img2)
# Rescale intensity only
Conf.img1 = exposure.rescale_intensity(Conf.img1, out_range=(0, 255))
Conf.img2 = exposure.rescale_intensity(Conf.img2, out_range=(0, 255))
p2, p98 = np.percentile(Conf.img1, (2, 98))
Conf.img1 = img_as_ubyte(exposure.rescale_intensity(Conf.img1, in_range=(p2, p98)))
p2, p98 = np.percentile(Conf.img2, (2, 98))
Conf.img2 = img_as_ubyte(exposure.rescale_intensity(Conf.img2, in_range=(p2, p98)))
print('Done!')
# Normalization
#print('\n### Laplacian! ###\n')
#Conf.img1 = cv2.Laplacian(Conf.img1, cv2.CV_64F, ksize=19)
#Conf.img2 = cv2.Laplacian(Conf.img2, cv2.CV_64F, ksize=19)
# Speckle filtering
if Conf.speckle_filtering:
assert (Conf.speckle_filtering and (Conf.speckle_filter_name in Conf.speckle_filter_name)), \
'%s error: appropriate processor is not found' % Conf.speckle_filter_name
print('\nSpeckle filtering with %s\n' % Conf.speckle_filter_name)
if Conf.speckle_filter_name == 'Anisd':
Conf.img1 = apply_anisd(Conf.img1, gamma=0.25, step=(1., 1.), ploton=False)
Conf.img2 = apply_anisd(Conf.img2, gamma=0.25, step=(1., 1.), ploton=False)
#####################
### Calculate Drift ###
#####################
print('\nStart multiprocessing...')
nb_cpus = 10
height, width = Conf.img1.shape
print('Image size Height: %s px Width: %s px' % (height, width))
# init drift calculator class
Calc = cc_calc_drift.CalcDrift(Conf, Conf.img1, Conf.img2)
Calc.create_arguments(height, width)
# arg generator
argGen = ((i) for i in range(Calc.Count))
pool = multiprocessing.Pool(processes=nb_cpus)
# calculate
results = pool.map(Calc.calculate_drift, argGen)
pool.close()
pool.join()
print('Done!')
exec_t = (time.time() - global_start_time) / 60.
print('Calculated in--- %.1f minutes ---' % exec_t)
pref = 'dm'
'''
print('\nPlotting...')
try:
plot_arrows_from_list(pref, '%s/%s_%s_01.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img1, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
plot_arrows_from_list(pref, '%s/%s_%s_02.png' % (Conf.res_dir, pref, Conf.out_fname),
Conf.img2, results, arrwidth=0.0021, headwidth=2.5, flag_color=True)
print('Plot end!')
except:
print('Plot FAULT!')
'''
#####################
#### Filter vectors ####
#####################
print('\nStart outliers filtering...')
# init result filtering class
Filter = cc_calc_drift_filter.CalcDriftFilter(Conf)
# filter
Cnt = Filter.filter_outliers(results)
# Filter land vectors
print('\nLand mask filtering...')
land_filtered_vectors = Filter.filter_land()
print('Done\n')
print('Done!')
print('\nNumber of vectors: \n Unfiltered: %d Filtered: %d\n' %
(Cnt[0], Cnt[1]))
print('\nPlotting...')
plot_arrows('%s/01_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img1, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
plot_arrows('%s/02_spikes_%s_%s.png' % (Conf.res_dir, pref, Conf.out_fname), Conf.img2, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f, Filter.ccc_f,
arrwidth=0.002, headwidth=5.5, flag_color=True)
#####################
#### Defo calculate ####
#####################
print('\n### Start deformation calculation...')
# init defo calculator class
Defo = cc_calc_defo.CalcDefo(Conf, Calc, Filter)
# calculate deformation from the 2D arrays
mag_speed, divergence, curl, shear, total_deform = Defo.calculate_defo()
print('\n### Success!\n')
#########################
# EXPORT TO GEO-FORMATS
#########################
files_pref = '%spx' % Conf.grid_step
try:
os.makedirs('%s/vec' % Conf.res_dir)
except:
pass
try:
os.makedirs('%s/defo/nc' % Conf.res_dir)
except:
pass
# Vector
export_to_vector(Conf.f1_name, Filter.xxx_f, Filter.yyy_f, Filter.uuu_f, Filter.vvv_f,
'%s/vec/%s_ICEDRIFT_%s.json' % (Conf.res_dir, files_pref, Conf.out_fname),
gridded=False, data_format='geojson')
################
# Geotiff
################
print('\nStart making geotiff..')
try:
os.makedirs('%s/defo/gtiff' % Conf.res_dir)
except:
pass
scale_factor = 1
divergence_gtiff = divergence * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step/2.*Calc.pixelHeight, Conf.grid_step*Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step/2.*Calc.pixelHeight, 0., Conf.grid_step*Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
#create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname),
# divergence_gtiff, NDV, u_2d.shape[0], u_2d.shape[1], GeoT, Projection, divergence_gtiff)
create_geotiff('%s/defo/gtiff/%s_ICEDIV_%s' % (Conf.res_dir, files_pref, Conf.out_fname), divergence_gtiff, NDV, GeoT, Projection)
#####################
# Shear
#####################
shear_gtiff = shear * scale_factor
GeoT = (Calc.geotransform[0] - Conf.grid_step / 2. * Calc.pixelHeight, Conf.grid_step * Calc.pixelWidth, 0.,
Calc.geotransform[3] + Conf.grid_step / 2. * Calc.pixelHeight, 0., Conf.grid_step * Calc.pixelHeight)
NDV = np.nan
# Get projection WKT
gd_raster = gdal.Open(Conf.f1_name)
Projection = gd_raster.GetProjection()
create_geotiff('%s/defo/gtiff/%s_ICESHEAR_%s' % (Conf.res_dir, files_pref, Conf.out_fname), shear_gtiff, NDV,
GeoT, Projection)
################
# END Geotiff
################
############
# Netcdf
############
dict_deformation = {'ice_speed': {'data': mag_speed, 'scale_factor': 1., 'units': 'cm/s'},
'ice_divergence': {'data': divergence, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_curl': {'data': curl, 'scale_factor': scale_factor, 'units': '1/h'},
'ice_shear': {'data': shear, 'scale_factor': scale_factor, 'units': '1/h'},
'total_deformation': {'data': total_deform, 'scale_factor': scale_factor, 'units': '1/h'}}
print('\nStart making netCDF for ice deformation...\n')
make_nc('%s/defo/nc/%s_ICEDEF_%s.nc' % (Conf.res_dir, files_pref, Conf.out_fname),
Calc.lon_2d, Calc.lat_2d, dict_deformation)
print('Success!\n')
############
# END Netcdf
############
############################
# END EXPORT TO GEO-FORMATS
############################
# Calc_img_entropy
calc_img_entropy = False
#ent_spikes_dm_S1A_EW_GRDM_1SDH_20150114T133134_20150114T133234_004168_0050E3_8C66_HV_S1A_EW_GRDM_1SDH_20150115T025040_20150115T025140_004176_005114_5C27_HV
d1 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f1_name)[0]
d2 = re.findall(r'\d\d\d\d\d\d\d\d\w\d\d\d\d\d\d', Conf.f2_name)[0]
# Calculate entropy
if calc_img_entropy:
print('Calculate entropy')
plt.clf()
from skimage.util import img_as_ubyte
from skimage.filters.rank import entropy
entr_Conf.img1 = entropy(Conf.img1, disk(16))
# xxx_f, yyy_f
ff = open('%s/entropy/ent_NCC_%s_%s.txt' % (Conf.res_dir, d1, d2), 'w')
for i in range(len(xxx_f)):
ff.write('%7d %7.2f\n' % (i+1, np.mean(entr_Conf.img1[yyy_f[i]-Conf.grid_step:yyy_f[i]+Conf.grid_step,
xxx_f[i]-Conf.grid_step:xxx_f[i]+Conf.grid_step])))
ff.close()
# TODO:
plt.imshow(entr_Conf.img1, cmap=plt.cm.get_cmap('hot', 10))
plt.colorbar()
plt.clim(0, 10);
plt.savefig('%s/entropy/img/ent_NCC_%s_%s.png' % (Conf.res_dir, d1, d2), bbox_inches='tight', dpi=300)
# END
| 35.517132
| 716
| 0.553318
| 7,716
| 55,975
| 3.864567
| 0.136599
| 0.004762
| 0.003521
| 0.009055
| 0.404004
| 0.341326
| 0.319528
| 0.301653
| 0.271706
| 0.265904
| 0
| 0.040175
| 0.285395
| 55,975
| 1,576
| 717
| 35.517132
| 0.7053
| 0.212595
| 0
| 0.263091
| 0
| 0.003831
| 0.06319
| 0.009492
| 0
| 0
| 0
| 0.004442
| 0.002554
| 1
| 0.033206
| false
| 0.00894
| 0.05364
| 0
| 0.11622
| 0.042146
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05fe79efe59900fb39e193105ec376940b5bbe44
| 426
|
py
|
Python
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 708
|
2019-10-11T06:23:40.000Z
|
2022-03-31T09:39:08.000Z
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 302
|
2019-11-11T22:09:21.000Z
|
2022-03-29T11:21:04.000Z
|
tests/test_version.py
|
hsh-nids/python-betterproto
|
f5d3b48b1aa49fd64513907ed70124b32758ad3e
|
[
"MIT"
] | 122
|
2019-12-04T16:22:53.000Z
|
2022-03-20T09:31:10.000Z
|
from betterproto import __version__
from pathlib import Path
import tomlkit
PROJECT_TOML = Path(__file__).joinpath("..", "..", "pyproject.toml").resolve()
def test_version():
with PROJECT_TOML.open() as toml_file:
project_config = tomlkit.loads(toml_file.read())
assert (
__version__ == project_config["tool"]["poetry"]["version"]
), "Project version should match in package and package config"
| 30.428571
| 78
| 0.706573
| 51
| 426
| 5.529412
| 0.568627
| 0.078014
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164319
| 426
| 13
| 79
| 32.769231
| 0.792135
| 0
| 0
| 0
| 0
| 0
| 0.21831
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af029a134b4e84a7dca43a17a1ce48c9d78abdd2
| 9,722
|
py
|
Python
|
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
Models.py
|
BradHend/machine_learning_from_scratch
|
6c83f17d1c48da9ad3df902b3090a8cb2c544f15
|
[
"MIT"
] | null | null | null |
"""classes and methods for different model architectures
"""
#python packages
import numpy as np
# Machine Learning from Scratch packages
from Layers import FullyConnected
from utils.optimizers import *
class NeuralNet():
"""
Linear stack of layers.
"""
def __init__(self, layers=None):
# Add any layers passed into constructor to the model
if layers:
for layer in layers:
self.layers.append(layer)
else:
self.layers = []
self.output = None
def add_layer(self, layer_type=None,
input_shape=None,
output_shape=None,
activation=None,
dropout=1.,
lambd=0,):
"""Adds a Layer class to model
"""
#only FullyConnected layer type supported right now
if layer_type=="FullyConnected":
layer = FullyConnected(input_shape=input_shape,
output_shape=output_shape,
activation=activation,
dropout=dropout,
lambd=lambd
)
#append layer to model Class
self.layers.append(layer)
def model_forward(self,X,training=False):
""" Perform forward evaluation of model on given data
Inputs:
X -- input data to be evaluated by model vector shape=(len(Wl_1), number of examples)
training -- training flag, no layer dropout if True
Outputs:
predictions -- model prediction(s) for given data
"""
layer_inputs = X
for layer in self.layers:
if training==False: #only use dropout when training
layer.dropout=1.
#loop over all layers, using the output of previous layer as input
layer.layer_forward(layer_inputs=layer_inputs)
#update "layer_inputs" for next iteration
layer_inputs = layer.outputs
#predictions will be layer.output of the last layer
predictions = layer_inputs
return predictions
def model_backprop(self,Y):
""" Perform back-prop. of prediction error through model
Inputs:
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
None -- updates Layer properties
"""
# output_layer = self.layers[-1]
dZ = self.compute_loss_grad(Y)
#backprop output layer results through the network
for layer in reversed(self.layers):
#loop over all layers, using following layerdZ
layer.layer_backprop(dZ)
#update "dZ" for next iteration, set to current layer's Activation gradient
dZ = layer.dA
def compute_cost(self,predictions,Y):
""" compute "cost" for given predictions/truth
Inputs:
predictions -- model predictions vector shape=(n_y, number of examples)
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
cost - gradient of output layer's activation
"""
m = Y.shape[1]
# Compute loss from predictions and y.
predictions = np.clip(predictions, 1e-13, 1 - 1e-13)
if self.loss == 'binary-crossentropy':
cost = np.multiply(-np.log(predictions),Y) + np.multiply(-np.log(1 - predictions), 1 - Y)
elif self.loss == 'categorical-crossentropy':
#Categorical Crossentropy
cost = np.sum(np.multiply(Y, -np.log(predictions)),axis=0,keepdims=False)
else:
return None
return cost
def compute_loss_grad(self,Y):
"""
Inputs:
Y -- truth "label" vector shape=(n_y, number of examples)
Outputs:
dZ - gradient of output layer's loss
"""
output_layer = self.layers[-1]
# outputs = output_layer.outputs
predictions = np.clip(output_layer.outputs, 1e-13, 1 - 1e-13)
if self.loss == 'binary-crossentropy':
#gradient of sigmoid (for now)
# print("outputs: ", output_layer.outputs)
# print(1 - output_layer.outputs)
dZ = - (np.divide(Y, predictions) - np.divide(1 - Y, 1 - predictions))
elif self.loss == 'categorical-crossentropy':
#gradient of softmax
dZ = predictions - Y
return dZ
def predict(self, X):
predictions = self.model_forward(X,training=False)
return predictions
def train(self, X, Y,
optimizer="gd",
loss=None,
learning_rate = 0.007,
mini_batch_size = [],
num_epochs = 100,
print_cost=True):
"""
Inputs:
X -- input data, of shape=(n_x, number of examples)
Y -- truth "label" vector shape=(n_y, number of examples)
loss -- loss function to use
optimizer -- optimizer to use to update trainable params.
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of each dataset mini batch
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
"""
self.loss = loss
if print_cost:
#print at every 1% of training completion, or at every epoch if num_epoch <= 100
print_interval = np.max([1,int(0.01*num_epochs)])
m = X.shape[1] # number of training examples
if not mini_batch_size:
mini_batch_size = m #make the mini-batch the entire dataset
costs = [] # to keep track of the cost
accuracy_lst = [] # keep track of acc. for multi-class problems
seed = 10
# Initialize layers (weights & bias vectors)
for layer in self.layers:
layer.initialize_layer()
if layer.dropout > 1.: #check that inputs make sense
layer.dropout = 1.
#if true, dropout was requested, override/ignore user's L2 reg. request (as of this commit)
if layer.dropout < 1.:
layer.lambd = 0
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization needed
elif optimizer == "momentum":
initialize_velocity(self.layers)
beta = 0.90
elif optimizer == "adam":
t = 0 #counter required for Adam update
#use values from the ADAM paper
beta1 = 0.9
beta2 = 0.999
epsilon = 1e-7
learning_rate = 0.01
initialize_adam(self.layers)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches, change seed each time
seed = seed + 1
minibatches = make_sub_batches(X, Y, mini_batch_size, seed)
#init cost summation variable
cost_total = 0.
#init accuracy summation variable
training_correct = 0.
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward prop
predictions = self.model_forward(minibatch_X, training=True)
# Compute cost (for printing) and add to the running total
cost_total += np.nansum(self.compute_cost(predictions, minibatch_Y))
#compute train set acc. for multi-class class. problems
if (predictions.shape[0] > 1) | (self.loss == ('categorical-crossentropy')):
#compute number of examples correctly classified, assuming only one class can present right now
training_correct += np.sum(np.argmax(predictions,axis=0)==np.argmax(minibatch_Y,axis=0),keepdims=False)
# Backprop
self.model_backprop(Y=minibatch_Y)
# Update weights/bias
if optimizer == "gd":
update_layers_with_gradient_descent(self.layers, learning_rate)
elif optimizer == "momentum":
update_parameters_with_momentum(self.layers, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
update_parameters_with_adam(self.layers, t, learning_rate, beta1, beta2, epsilon)
#compute training stats. for this epoch
cost_avg = cost_total / m
if predictions.shape[0] > 1: #for multi-class class. problems show accuracy
accuracy_percent = 100.*(training_correct/m)
# Print the cost every epoch
# if print_cost and i % print_interval == 0:
if print_cost and i % 1 == 0:
if predictions.shape[0] > 1: #for multi-class class. problems show accuracy
print("Cost after epoch %i: %f, Acc.: %f" %(i, cost_avg, accuracy_percent))
accuracy_lst.append(accuracy_percent)
else:
print(("Cost after epoch %i: %f" %(i, cost_avg)))
costs.append(cost_avg)
#will need to implement better convergence detection..
if self.loss == ('categorical-crossentropy'):
pass
elif cost_avg < 0.17:
break
| 40.508333
| 123
| 0.54783
| 1,081
| 9,722
| 4.825162
| 0.236818
| 0.02684
| 0.02454
| 0.012462
| 0.151074
| 0.090107
| 0.082055
| 0.079563
| 0.079563
| 0.079563
| 0
| 0.015425
| 0.373174
| 9,722
| 240
| 124
| 40.508333
| 0.840499
| 0.337071
| 0
| 0.19685
| 0
| 0
| 0.038576
| 0.015826
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062992
| false
| 0.015748
| 0.023622
| 0
| 0.133858
| 0.047244
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af055ba7a6d6cbe2445070c4e478e7e26c56dad3
| 1,724
|
py
|
Python
|
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
ipmi_power_manager.py
|
spirkaa/ansible-homelab
|
94138c85ddb132a08dab55b4e9a9b43160d02c76
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import os
import requests
import urllib3
from dotenv import load_dotenv
logger = logging.getLogger("__name__")
logging.basicConfig(
format="%(asctime)s [%(levelname)8s] [%(name)s:%(lineno)s:%(funcName)20s()] --- %(message)s",
level=logging.INFO,
)
logging.getLogger("urllib3").setLevel(logging.WARNING)
urllib3.disable_warnings()
load_dotenv()
IPMI_USERNAME = os.getenv("IPMI_USERNAME")
IPMI_PASSWORD = os.getenv("IPMI_PASSWORD")
API_ROOT = "https://spmaxi-ipmi.home.devmem.ru/redfish/v1/"
API_AUTH = "SessionService/Sessions"
API_ACTIONS_RESET = "Systems/1/Actions/ComputerSystem.Reset"
POWER_STATE_ON = "On"
POWER_STATE_OFF = "GracefulShutdown"
parser = argparse.ArgumentParser(description="Supermicro IPMI Power Manager")
parser.add_argument("--on", dest="power_state", action="store_true")
parser.add_argument("--off", dest="power_state", action="store_false")
args = parser.parse_args()
if args.power_state:
power_state = POWER_STATE_ON
else:
power_state = POWER_STATE_OFF
def get_auth_headers():
logger.debug("Get session headers")
endpoint_url = API_ROOT + API_AUTH
payload = f'{{"UserName": "{IPMI_USERNAME}","Password": "{IPMI_PASSWORD}"}}'
headers = {"Content-Type": "application/json"}
r = requests.post(endpoint_url, headers=headers, data=payload, verify=False)
return r.headers
def set_power_state(value):
logger.debug("Set power state to '%s'", value)
endpoint_url = API_ROOT + API_ACTIONS_RESET
payload = f'{{"ResetType": "{value}"}}'
headers = get_auth_headers()
r = requests.post(endpoint_url, headers=headers, data=payload, verify=False)
print(r.json())
set_power_state(power_state)
| 28.262295
| 101
| 0.728538
| 226
| 1,724
| 5.323009
| 0.411504
| 0.108063
| 0.049875
| 0.0665
| 0.176226
| 0.099751
| 0.099751
| 0.099751
| 0.099751
| 0.099751
| 0
| 0.005319
| 0.12761
| 1,724
| 60
| 102
| 28.733333
| 0.794548
| 0
| 0
| 0.045455
| 0
| 0.022727
| 0.283643
| 0.074246
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0.045455
| 0.136364
| 0
| 0.204545
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af05ab26695bad32472af5a5dde8334bddbea53d
| 1,572
|
py
|
Python
|
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
pyhsi/gui/graphics.py
|
rddunphy/pyHSI
|
b55c2a49568e04e0a2fb39da01cfe1f129bc86a4
|
[
"MIT"
] | null | null | null |
"""Stuff to do with processing images and loading icons"""
import importlib.resources as res
import cv2
import PySimpleGUI as sg
def get_application_icon():
"""Get the PyHSI icon for this OS (.ico for Windows, .png otherwise)"""
return res.read_binary("pyhsi.gui.icons", "pyhsi.png")
def get_icon(icon_name, hidpi=False):
"""Return full path for icon with given name"""
size = 40 if hidpi else 25
return res.read_binary("pyhsi.gui.icons", f"{icon_name}{size}.png")
def get_icon_button(icon_name, hidpi=False, **kwargs):
"""Create a button with an icon as an image"""
mc = ("white", "#405e92")
icon = get_icon(icon_name, hidpi=hidpi)
return sg.Button("", image_data=icon, mouseover_colors=mc, **kwargs)
def set_button_icon(button, icon_name, hidpi=False, **kwargs):
"""Change image on button"""
icon = get_icon(icon_name, hidpi=hidpi)
button.update(image_data=icon, **kwargs)
def resize_img_to_area(img, size, preserve_aspect_ratio=True, interpolation=False):
"""Resize frame to fill available area in preview panel"""
max_w = max(size[0] - 20, 20)
max_h = max(size[1] - 20, 20)
if preserve_aspect_ratio:
old_h = img.shape[0]
old_w = img.shape[1]
new_w = round(min(max_w, old_w * max_h / old_h))
new_h = round(min(max_h, old_h * max_w / old_w))
else:
new_w = max_w
new_h = max_h
if interpolation:
interp = cv2.INTER_LINEAR
else:
interp = cv2.INTER_NEAREST
return cv2.resize(img, (new_w, new_h), interpolation=interp)
| 31.44
| 83
| 0.667939
| 247
| 1,572
| 4.052632
| 0.368421
| 0.047952
| 0.064935
| 0.044955
| 0.20979
| 0.18981
| 0.18981
| 0
| 0
| 0
| 0
| 0.020048
| 0.206743
| 1,572
| 49
| 84
| 32.081633
| 0.782678
| 0.176209
| 0
| 0.129032
| 0
| 0
| 0.056962
| 0.016614
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.096774
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af0729cb1679e26625740cd816c3bcd5296cbb19
| 315
|
py
|
Python
|
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | null | null | null |
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | 1
|
2020-12-30T13:49:29.000Z
|
2020-12-30T13:49:29.000Z
|
configs/densenet169_lr_0.001.py
|
FeiYuejiao/NLP_Pretrain
|
7aa4693c31a7bba9b90f401d2586ef154dd7fb81
|
[
"MIT"
] | null | null | null |
lr = 0.001
model_path = 'model/IC_models/densenet169_lr_0.001/'
crop_size = 32
log_step = 10
save_step = 500
num_epochs = 400
batch_size = 256
num_workers = 8
loading = False
# lr
# Model parameters
model = dict(
net='densenet169',
embed_size=256,
hidden_size=512,
num_layers=1,
resnet=101
)
| 14.318182
| 52
| 0.695238
| 50
| 315
| 4.12
| 0.7
| 0.029126
| 0.058252
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151394
| 0.203175
| 315
| 21
| 53
| 15
| 0.669323
| 0.060317
| 0
| 0
| 0
| 0
| 0.163823
| 0.12628
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af08ea1d739ab24c301e649fcfca7bffa176fb4c
| 3,750
|
py
|
Python
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | 1
|
2021-05-09T15:42:04.000Z
|
2021-05-09T15:42:04.000Z
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | 3
|
2020-10-21T09:04:03.000Z
|
2021-06-02T02:05:13.000Z
|
src/models/metapop.py
|
TLouf/multiling-twitter
|
9a39b5b70da53ca717cb74480697f3756a95b8e4
|
[
"RSA-MD"
] | null | null | null |
'''
Implements the computation of the time derivatives and associated Jacobian
corresponding to the approximated equations in a metapopulation. Added kwargs in
every function so that we may reuse the parameter dictionary used in the models,
even if some of the parameters it contains are not used in these functions.
'''
import numpy as np
def bi_model_system(N_L, N, nu, nu_T_N, a=1, s=0.5, rate=1, **kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for Castello's model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
N_A_eq = rate * (
s * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_B / nu_T_N)**a)
- (1-s) * N_A * np.dot(nu, (nu_T_N_B / nu_T_N)**a))
N_B_eq = rate * (
(1-s) * (N - N_A - N_B) * np.dot(nu, (1 - nu_T_N_A / nu_T_N)**a)
- s * N_B * np.dot(nu, (nu_T_N_A / nu_T_N)**a))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_system(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5, rate=1,
**kwargs):
'''
Computes the values of the time derivatives in every cell for the two
monolingual kinds, for our model.
'''
N_A = N_L[:N.shape[0]]
N_B = N_L[N.shape[0]:]
# Every element of the line i of nu must be divided by the same value
# sigma[i], hence this trick with the two transpose.
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
sum_nu_rows = np.sum(nu, axis=1)
nu_nu_T_N_L_term = np.dot(nu, ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N)
N_A_eq = rate * (
mu*s * (N - N_A - N_B) * (q*sum_nu_rows + nu_nu_T_N_L_term)
- c*(1-mu)*(1-s) * N_A * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term))
N_B_eq = rate * (
mu*(1-s) * (N - N_A - N_B) * ((1-q)*sum_nu_rows - nu_nu_T_N_L_term)
- c*(1-mu)*s * N_B * (q*sum_nu_rows + nu_nu_T_N_L_term))
return np.concatenate((N_A_eq, N_B_eq))
def bi_pref_jacobian(N_L, N, nu, nu_T_N, mu=0.02, c=0.1, s=0.5, q=0.5,
**kwargs):
'''
Computes the Jacobian of the system at a given point for our model.
'''
n_cells = N.shape[0]
N_A = N_L[:n_cells]
N_B = N_L[n_cells:]
nu_T_N_A = np.dot(nu.T, N_A)
nu_T_N_B = np.dot(nu.T, N_B)
nu_cols_prod = np.dot(nu / nu_T_N, nu.T)
nu_T_N_L_term = ((1-q)*nu_T_N_A - q*nu_T_N_B) / nu_T_N
sum_nu_rows = np.sum(nu, axis=1)
AA_block = ((mu*s*(1-q)*(N-N_A-N_B) + c*(1-mu)*(1-s)*(1-q)*N_A)
* nu_cols_prod.T).T
AA_block += np.eye(n_cells) * (
(-mu*s*q - c*(1-mu)*(1-s)*(1-q)) * sum_nu_rows
+ np.dot(
nu,
(c*(1-mu)*(1-s) - mu*s) * nu_T_N_L_term))
AB_block = ((-mu*s*q*(N-N_A-N_B) - c*(1-mu)*(1-s)*q*N_A)
* nu_cols_prod.T).T
AB_block += np.eye(n_cells) * (
-mu*s*q * sum_nu_rows
+ np.dot(
nu,
-mu*s * nu_T_N_L_term))
BA_block = (-(mu*(1-s)*(1-q)*(N-N_A-N_B) - c*(1-mu)*s*(1-q)*N_B)
* nu_cols_prod.T).T
BA_block += np.eye(n_cells) * (
-mu*(1-s)*(1-q) * sum_nu_rows
+ np.dot(
nu,
mu*(1-s) * nu_T_N_L_term))
BB_block = ((mu*(1-s)*q*(N-N_A-N_B) + c*(1-mu)*s*q*N_B)
* nu_cols_prod.T).T
BB_block += np.eye(n_cells) * (
(-mu*(1-s)*(1-q) - c*(1-mu)*s*q) * sum_nu_rows
+ np.dot(
nu,
(-c*(1-mu)*s + mu*(1-s)) * nu_T_N_L_term))
jacobian = np.block([[AA_block, AB_block],
[BA_block, BB_block]])
return jacobian
| 37.128713
| 80
| 0.553333
| 788
| 3,750
| 2.350254
| 0.135787
| 0.066415
| 0.086393
| 0.040497
| 0.711123
| 0.686825
| 0.660907
| 0.643089
| 0.524838
| 0.512419
| 0
| 0.02466
| 0.275467
| 3,750
| 100
| 81
| 37.5
| 0.656975
| 0.2224
| 0
| 0.463768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.014493
| 0
| 0.101449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af0ab77a97059c19f88a0b36ce01422819f17356
| 2,174
|
py
|
Python
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 1
|
2018-10-12T15:04:31.000Z
|
2018-10-12T15:04:31.000Z
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 169
|
2017-11-07T00:45:25.000Z
|
2022-03-12T00:08:59.000Z
|
tests/app/dao/test_marketings_dao.py
|
kentsanggds/api
|
651cdf7d496690722d6a4f5b51f04f4be97899d4
|
[
"MIT"
] | 1
|
2019-08-15T14:51:31.000Z
|
2019-08-15T14:51:31.000Z
|
from sqlalchemy.exc import IntegrityError
import pytest
from app.dao.marketings_dao import (
dao_update_marketing, dao_get_marketing_by_id, dao_get_marketings
)
from app.models import Marketing
from tests.db import create_marketing
class WhenUsingMarketingsDAO(object):
def it_creates_an_marketing(self, db_session):
marketing = create_marketing()
assert Marketing.query.count() == 1
marketing_from_db = Marketing.query.filter(Marketing.id == marketing.id).first()
assert marketing == marketing_from_db
def it_updates_a_marketing_dao(self, db, db_session, sample_marketing):
dao_update_marketing(sample_marketing.id, description='New posters')
marketing_from_db = Marketing.query.filter(Marketing.id == sample_marketing.id).first()
assert marketing_from_db.description == 'New posters'
def it_gets_all_active_marketings(self, db, db_session, sample_marketing):
create_marketing(description='Email')
create_marketing(description='Old magazine', active=False)
fetched_marketings = dao_get_marketings()
assert len(fetched_marketings) == 2
def it_gets_an_marketing_by_id(self, db, db_session, sample_marketing):
marketing = create_marketing(description='Email')
fetched_marketing = dao_get_marketing_by_id(marketing.id)
assert fetched_marketing == marketing
def it_doesnt_create_marketings_with_same_description(self, db_session, sample_marketing):
with pytest.raises(expected_exception=IntegrityError):
create_marketing(description=sample_marketing.description)
marketings = Marketing.query.all()
assert len(marketings) == 1
def it_doesnt_update_marketingss_with_same_description(self, db_session, sample_marketing):
marketing = create_marketing(description='New posters')
with pytest.raises(expected_exception=IntegrityError):
dao_update_marketing(str(marketing.id), description=sample_marketing.description)
found_marketing = Marketing.query.filter(Marketing.id == marketing.id).one()
assert found_marketing.description == 'New posters'
| 38.821429
| 95
| 0.75069
| 258
| 2,174
| 5.996124
| 0.228682
| 0.063995
| 0.048481
| 0.077569
| 0.411118
| 0.35488
| 0.215255
| 0.180995
| 0
| 0
| 0
| 0.001659
| 0.168353
| 2,174
| 55
| 96
| 39.527273
| 0.853982
| 0
| 0
| 0.054054
| 0
| 0
| 0.030359
| 0
| 0
| 0
| 0
| 0
| 0.189189
| 1
| 0.162162
| false
| 0
| 0.135135
| 0
| 0.324324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af0ac97f6ae7709623b9997f5f301e7547049b9a
| 14,898
|
py
|
Python
|
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | 6
|
2020-04-02T21:10:09.000Z
|
2021-06-07T06:56:16.000Z
|
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | null | null | null |
tracetools_analysis/tracetools_analysis/data_model/ros2.py
|
christophebedard/tracetools_analysis
|
1dfb747b62311ee370ed392a0ad4a5cd2d11d3be
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Robert Bosch GmbH
# Copyright 2020-2021 Christophe Bedard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for ROS 2 data model."""
import numpy as np
import pandas as pd
from . import DataModel
from . import DataModelIntermediateStorage
class Ros2DataModel(DataModel):
"""
Container to model pre-processed ROS 2 data for analysis.
This aims to represent the data in a ROS 2-aware way.
"""
def __init__(self) -> None:
"""Create a Ros2DataModel."""
super().__init__()
# Objects (one-time events, usually when something is created)
self._contexts: DataModelIntermediateStorage = []
self._nodes: DataModelIntermediateStorage = []
self._rmw_publishers: DataModelIntermediateStorage = []
self._rcl_publishers: DataModelIntermediateStorage = []
self._rmw_subscriptions: DataModelIntermediateStorage = []
self._rcl_subscriptions: DataModelIntermediateStorage = []
self._subscription_objects: DataModelIntermediateStorage = []
self._services: DataModelIntermediateStorage = []
self._clients: DataModelIntermediateStorage = []
self._timers: DataModelIntermediateStorage = []
self._timer_node_links: DataModelIntermediateStorage = []
self._callback_objects: DataModelIntermediateStorage = []
self._callback_symbols: DataModelIntermediateStorage = []
self._lifecycle_state_machines: DataModelIntermediateStorage = []
# Events (multiple instances, may not have a meaningful index)
self._rclcpp_publish_instances: DataModelIntermediateStorage = []
self._rcl_publish_instances: DataModelIntermediateStorage = []
self._rmw_publish_instances: DataModelIntermediateStorage = []
self._rmw_take_instances: DataModelIntermediateStorage = []
self._rcl_take_instances: DataModelIntermediateStorage = []
self._rclcpp_take_instances: DataModelIntermediateStorage = []
self._callback_instances: DataModelIntermediateStorage = []
self._lifecycle_transitions: DataModelIntermediateStorage = []
def add_context(
self, context_handle, timestamp, pid, version
) -> None:
self._contexts.append({
'context_handle': context_handle,
'timestamp': timestamp,
'pid': pid,
'version': version,
})
def add_node(
self, node_handle, timestamp, tid, rmw_handle, name, namespace
) -> None:
self._nodes.append({
'node_handle': node_handle,
'timestamp': timestamp,
'tid': tid,
'rmw_handle': rmw_handle,
'name': name,
'namespace': namespace,
})
def add_rmw_publisher(
self, handle, timestamp, gid,
) -> None:
self._rmw_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_publisher(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_publishers.append({
'publisher_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_publish_instance(
self, timestamp, message,
) -> None:
self._rclcpp_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rcl_publish_instance(
self, publisher_handle, timestamp, message,
) -> None:
self._rcl_publish_instances.append({
'publisher_handle': publisher_handle,
'timestamp': timestamp,
'message': message,
})
def add_rmw_publish_instance(
self, timestamp, message,
) -> None:
self._rmw_publish_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rmw_subscription(
self, handle, timestamp, gid
) -> None:
self._rmw_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'gid': gid,
})
def add_rcl_subscription(
self, handle, timestamp, node_handle, rmw_handle, topic_name, depth
) -> None:
self._rcl_subscriptions.append({
'subscription_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'topic_name': topic_name,
'depth': depth,
})
def add_rclcpp_subscription(
self, subscription_pointer, timestamp, subscription_handle
) -> None:
self._subscription_objects.append({
'subscription': subscription_pointer,
'timestamp': timestamp,
'subscription_handle': subscription_handle,
})
def add_service(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._services.append({
'service_handle': timestamp,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_client(
self, handle, timestamp, node_handle, rmw_handle, service_name
) -> None:
self._clients.append({
'client_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
'rmw_handle': rmw_handle,
'service_name': service_name,
})
def add_timer(
self, handle, timestamp, period, tid
) -> None:
self._timers.append({
'timer_handle': handle,
'timestamp': timestamp,
'period': period,
'tid': tid,
})
def add_timer_node_link(
self, handle, timestamp, node_handle
) -> None:
self._timer_node_links.append({
'timer_handle': handle,
'timestamp': timestamp,
'node_handle': node_handle,
})
def add_callback_object(
self, reference, timestamp, callback_object
) -> None:
self._callback_objects.append({
'reference': reference,
'timestamp': timestamp,
'callback_object': callback_object,
})
def add_callback_symbol(
self, callback_object, timestamp, symbol
) -> None:
self._callback_symbols.append({
'callback_object': callback_object,
'timestamp': timestamp,
'symbol': symbol,
})
def add_callback_instance(
self, callback_object, timestamp, duration, intra_process
) -> None:
self._callback_instances.append({
'callback_object': callback_object,
'timestamp': np.datetime64(timestamp, 'ns'),
'duration': np.timedelta64(duration, 'ns'),
'intra_process': intra_process,
})
def add_rmw_take_instance(
self, subscription_handle, timestamp, message, source_timestamp, taken
) -> None:
self._rmw_take_instances.append({
'subscription_handle': subscription_handle,
'timestamp': timestamp,
'message': message,
'source_timestamp': source_timestamp,
'taken': taken,
})
def add_rcl_take_instance(
self, timestamp, message
) -> None:
self._rcl_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_rclcpp_take_instance(
self, timestamp, message
) -> None:
self._rclcpp_take_instances.append({
'timestamp': timestamp,
'message': message,
})
def add_lifecycle_state_machine(
self, handle, node_handle
) -> None:
self._lifecycle_state_machines.append({
'state_machine_handle': handle,
'node_handle': node_handle,
})
def add_lifecycle_state_transition(
self, state_machine_handle, start_label, goal_label, timestamp
) -> None:
self._lifecycle_transitions.append({
'state_machine_handle': state_machine_handle,
'start_label': start_label,
'goal_label': goal_label,
'timestamp': timestamp,
})
def _finalize(self) -> None:
# Some of the lists of dicts might be empty, and setting
# the index for an empty dataframe leads to an error
self.contexts = pd.DataFrame.from_dict(self._contexts)
if self._contexts:
self.contexts.set_index('context_handle', inplace=True, drop=True)
self.nodes = pd.DataFrame.from_dict(self._nodes)
if self._nodes:
self.nodes.set_index('node_handle', inplace=True, drop=True)
self.rmw_publishers = pd.DataFrame.from_dict(self._rmw_publishers)
if self._rmw_publishers:
self.rmw_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rcl_publishers = pd.DataFrame.from_dict(self._rcl_publishers)
if self._rcl_publishers:
self.rcl_publishers.set_index('publisher_handle', inplace=True, drop=True)
self.rmw_subscriptions = pd.DataFrame.from_dict(self._rmw_subscriptions)
if self._rmw_subscriptions:
self.rmw_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.rcl_subscriptions = pd.DataFrame.from_dict(self._rcl_subscriptions)
if self._rcl_subscriptions:
self.rcl_subscriptions.set_index('subscription_handle', inplace=True, drop=True)
self.subscription_objects = pd.DataFrame.from_dict(self._subscription_objects)
if self._subscription_objects:
self.subscription_objects.set_index('subscription', inplace=True, drop=True)
self.services = pd.DataFrame.from_dict(self._services)
if self._services:
self.services.set_index('service_handle', inplace=True, drop=True)
self.clients = pd.DataFrame.from_dict(self._clients)
if self._clients:
self.clients.set_index('client_handle', inplace=True, drop=True)
self.timers = pd.DataFrame.from_dict(self._timers)
if self._timers:
self.timers.set_index('timer_handle', inplace=True, drop=True)
self.timer_node_links = pd.DataFrame.from_dict(self._timer_node_links)
if self._timer_node_links:
self.timer_node_links.set_index('timer_handle', inplace=True, drop=True)
self.callback_objects = pd.DataFrame.from_dict(self._callback_objects)
if self._callback_objects:
self.callback_objects.set_index('reference', inplace=True, drop=True)
self.callback_symbols = pd.DataFrame.from_dict(self._callback_symbols)
if self._callback_symbols:
self.callback_symbols.set_index('callback_object', inplace=True, drop=True)
self.lifecycle_state_machines = pd.DataFrame.from_dict(self._lifecycle_state_machines)
if self._lifecycle_state_machines:
self.lifecycle_state_machines.set_index(
'state_machine_handle', inplace=True, drop=True)
self.rclcpp_publish_instances = pd.DataFrame.from_dict(self._rclcpp_publish_instances)
self.rcl_publish_instances = pd.DataFrame.from_dict(self._rcl_publish_instances)
self.rmw_publish_instances = pd.DataFrame.from_dict(self._rmw_publish_instances)
self.rmw_take_instances = pd.DataFrame.from_dict(self._rmw_take_instances)
self.rcl_take_instances = pd.DataFrame.from_dict(self._rcl_take_instances)
self.rclcpp_take_instances = pd.DataFrame.from_dict(self._rclcpp_take_instances)
self.callback_instances = pd.DataFrame.from_dict(self._callback_instances)
self.lifecycle_transitions = pd.DataFrame.from_dict(self._lifecycle_transitions)
def print_data(self) -> None:
print('====================ROS 2 DATA MODEL===================')
print('Contexts:')
print(self.contexts.to_string())
print()
print('Nodes:')
print(self.nodes.to_string())
print()
print('Publishers (rmw):')
print(self.rmw_publishers.to_string())
print()
print('Publishers (rcl):')
print(self.rcl_publishers.to_string())
print()
print('Subscriptions (rmw):')
print(self.rmw_subscriptions.to_string())
print()
print('Subscriptions (rcl):')
print(self.rcl_subscriptions.to_string())
print()
print('Subscription objects:')
print(self.subscription_objects.to_string())
print()
print('Services:')
print(self.services.to_string())
print()
print('Clients:')
print(self.clients.to_string())
print()
print('Timers:')
print(self.timers.to_string())
print()
print('Timer-node links:')
print(self.timer_node_links.to_string())
print()
print('Callback objects:')
print(self.callback_objects.to_string())
print()
print('Callback symbols:')
print(self.callback_symbols.to_string())
print()
print('Callback instances:')
print(self.callback_instances.to_string())
print()
print('Publish instances (rclcpp):')
print(self.rclcpp_publish_instances.to_string())
print()
print('Publish instances (rcl):')
print(self.rcl_publish_instances.to_string())
print()
print('Publish instances (rmw):')
print(self.rmw_publish_instances.to_string())
print()
print('Take instances (rmw):')
print(self.rmw_take_instances.to_string())
print()
print('Take instances (rcl):')
print(self.rcl_take_instances.to_string())
print()
print('Take instances (rclcpp):')
print(self.rclcpp_take_instances.to_string())
print()
print('Lifecycle state machines:')
print(self.lifecycle_state_machines.to_string())
print()
print('Lifecycle transitions:')
print(self.lifecycle_transitions.to_string())
print('==================================================')
| 37.716456
| 94
| 0.631293
| 1,521
| 14,898
| 5.884287
| 0.122288
| 0.018771
| 0.036872
| 0.046704
| 0.433743
| 0.335978
| 0.255978
| 0.169609
| 0.143017
| 0.09743
| 0
| 0.002364
| 0.261847
| 14,898
| 394
| 95
| 37.812183
| 0.811494
| 0.066251
| 0
| 0.389381
| 0
| 0
| 0.109636
| 0.006997
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073746
| false
| 0
| 0.011799
| 0
| 0.088496
| 0.20059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af0d81f9655852ff10a8be8a0499f540fd5bf5d2
| 1,543
|
py
|
Python
|
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | 10
|
2016-12-17T03:37:43.000Z
|
2019-09-09T23:00:40.000Z
|
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | null | null | null |
setup.py
|
KunihikoKido/elasticsearch-fabric
|
5dea163b455f954d31dc685cf2b4fec077aee50a
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
from distutils.spawn import find_executable
from setuptools import setup, find_packages
import sys
sys.path.append('./test')
from esfabric import __version__
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
if os.path.exists(os.path.join(os.path.dirname(__file__), 'README.txt')):
with open(os.path.join(os.path.dirname(__file__), 'README.txt')) as readme:
README = readme.read()
with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as requirements:
REQUIREMENTS = requirements.read().splitlines()
setup(
name='elasticsearch-fabric',
version=__version__,
packages=find_packages(),
install_requires=REQUIREMENTS,
license='MIT',
author='Kunihiko Kido',
author_email='kunihiko.kido@me.com',
url='https://github.com/KunihikoKido/elasticsearch-fabric',
description='This package provides a unified command line interface to Elasticsearch in Fabric.',
long_description=README,
platforms=['OS Independent'],
keywords=['elasticsearch', 'fabric'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.7',
],
include_package_data=True,
test_suite = "tasks_test.suite",
scripts=['bin/es_bash_completion'],
)
| 32.829787
| 101
| 0.695399
| 182
| 1,543
| 5.697802
| 0.510989
| 0.052073
| 0.038573
| 0.046287
| 0.196721
| 0.150434
| 0.150434
| 0.150434
| 0.150434
| 0.079074
| 0
| 0.002338
| 0.168503
| 1,543
| 46
| 102
| 33.543478
| 0.805924
| 0.007777
| 0
| 0.052632
| 0
| 0
| 0.35448
| 0.014388
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.131579
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af16c33bdba13b28d77f33ac28f80dcfc81a9c64
| 11,704
|
py
|
Python
|
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | null | null | null |
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | 3
|
2018-12-08T16:51:11.000Z
|
2020-10-16T09:39:00.000Z
|
bin/server.py
|
tolstoyevsky/blackmagic
|
0be5f041cbd42d9fb140957f0946d0ac7cb68848
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import logging
import os
import os.path
import tornado.web
import tornado.options
from appleseed import AlpineIndexFile, DebianIndexFile
from cdtz import set_time_zone
from motor import MotorClient
from shirow.ioloop import IOLoop
from shirow.server import RPCServer, TOKEN_PATTERN, remote
from tornado.options import define, options
from blackmagic import defaults, docker
from blackmagic.db import Image
from blackmagic.codes import (
IMAGE_BUILDING_UNAVAILABLE,
IMAGE_IS_NOT_AVAILABLE_FOR_RECOVERY,
LOCKED,
READY,
RECOVERY_IMAGE_MISSING,
)
from blackmagic.decorators import only_if_initialized
from blackmagic.exceptions import RecoveryImageIsMissing
from images.models import Image as ImageModel
from images.serializers import ImageSerializer
define('base_systems_path',
default='/var/chroot',
help='The path to the directory which contains chroot environments '
'which, in turn, contain the Debian base system')
define('db_name',
default='cusdeb',
help='')
define('dominion_workspace',
default='/var/dominion/workspace/',
help='')
define('max_builds_number',
default=8,
type=int,
help='Maximum allowed number of builds at the same time.')
define('mongodb_host',
default='',
help='')
define('mongodb_port',
default='33018',
help='')
LOGGER = logging.getLogger('tornado.application')
class DistroDoesNotExist(Exception):
"""Exception raised by the get_os_name function if the specified suite is not valid. """
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/bm/token/' + TOKEN_PATTERN, RPCHandler),
]
super().__init__(handlers)
class RPCHandler(RPCServer):
base_packages_list = {}
users_list = {}
def __init__(self, application, request, **kwargs):
super().__init__(application, request, **kwargs)
self._global_lock = True
self._init_lock = False
self._collection = None
self._collection_name = ''
self._db = None
self._distro = None
self._target_device = None
self._base_packages_number = 0
self._base_packages_query = {}
self._selected_packages = []
self._configuration = dict(defaults.CONFIGURATION)
self._image = None
self._need_update = True
def destroy(self):
if self._need_update and self._image:
self._image.dump_sync()
def _init_mongodb(self):
client = MotorClient(options.mongodb_host, int(options.mongodb_port))
self._db = client[options.db_name]
async def _init(self, request, image_id=None, device_name=None, distro_name=None, flavour=None):
if self._init_lock:
request.ret(LOCKED)
self._init_lock = True
try:
self._image = Image(image_id=image_id, user_id=self.user_id, device_name=device_name,
distro_name=distro_name, flavour=flavour)
except RecoveryImageIsMissing:
request.ret(RECOVERY_IMAGE_MISSING)
if image_id:
self._selected_packages = self._image.selected_packages
self._configuration = self._image.configuration
self._init_mongodb()
self._collection_name = self._image.distro_name
self._collection = self._db[self._collection_name]
self._base_packages_query = {
'package': {
'$in': self.base_packages_list[self._collection_name],
},
}
self._base_packages_number = await self._collection.count_documents(self._base_packages_query)
LOGGER.debug('Finishing initialization')
self._init_lock = False
self._global_lock = False
@remote
async def init_new_image(self, request, device_name, distro_name, flavour):
await self._init(request, device_name=device_name, distro_name=distro_name, flavour=flavour)
request.ret_and_continue(self._image.image_id)
request.ret(READY)
@remote
async def init_existing_image(self, request, image_id):
await self._init(request, image_id=image_id)
request.ret(READY)
@remote
async def is_image_available_for_recovery(self, request, image_id):
try:
image = ImageModel.objects.get(image_id=image_id, status=ImageModel.UNDEFINED)
serializer = ImageSerializer(image)
request.ret(serializer.data)
except ImageModel.DoesNotExist:
request.ret_error(IMAGE_IS_NOT_AVAILABLE_FOR_RECOVERY)
@only_if_initialized
@remote
async def build(self, request):
from users.models import Person
if not Person.objects.filter(user__pk=self.user_id).exists():
request.ret_error(IMAGE_BUILDING_UNAVAILABLE)
self._image.enqueue()
await self._image.dump()
self._need_update = False
request.ret(READY)
@only_if_initialized
@remote
async def add_user(self, request, username, password):
self._image.pieman_user = {
'username': username,
'password': password,
}
request.ret(READY)
@only_if_initialized
@remote
async def change_root_password(self, request, password):
self._image.root_password = password
request.ret(READY)
@only_if_initialized
@remote
async def get_configuration(self, request):
request.ret(self._configuration)
@only_if_initialized
@remote
async def set_configuration(self, request, configuration):
for key in configuration:
if key in self._configuration:
self._configuration[key] = configuration[key]
self._image.configuration = self._configuration
request.ret(READY)
@only_if_initialized
@remote
async def get_packages_list(self, request, page_number, per_page, search_token=None):
if page_number > 0:
start_position = (page_number - 1) * per_page
else:
start_position = 0
find_query = {}
if search_token:
find_query.update({
'package': {'$regex': search_token, '$options': '-i'},
})
packages_list = []
async for document in self._collection.find(find_query).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
if document['package'] in self.base_packages_list[self._collection_name]:
document['type'] = 'base'
if document['package'] in self._selected_packages:
document['type'] = 'selected'
packages_list.append(document)
request.ret(packages_list)
@only_if_initialized
@remote
async def get_base_packages_list(self, request, page_number, per_page):
start_position = (page_number - 1) * per_page if page_number > 0 else 0
collection = self._collection
base_packages_list = []
async for document in collection.find(
self._base_packages_query
).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
base_packages_list.append(document)
request.ret(base_packages_list)
@only_if_initialized
@remote
async def get_selected_packages_list(self, request, page_number, per_page):
start_position = (page_number - 1) * per_page if page_number > 0 else 0
collection = self._collection
selected_packages_list = []
async for document in collection.find({
'package': {
'$in': self._selected_packages,
}
}).skip(start_position).limit(per_page):
# Originally _id is an ObjectId instance and it's not JSON serializable
document['_id'] = str(document['_id'])
selected_packages_list.append(document)
request.ret(selected_packages_list)
@only_if_initialized
@remote
async def get_initial_selected_packages_list(self, request):
request.ret(self._selected_packages)
@only_if_initialized
@remote
async def get_root_password(self, request):
request.ret(self._image.root_password)
@only_if_initialized
@remote
async def get_shells_list(self, request):
request.ret(['/bin/sh', '/bin/dash', '/bin/bash', '/bin/rbash'])
@only_if_initialized
@remote
async def get_packages_number(self, request, search_token=None):
find_query = {}
if search_token:
find_query.update({
'package': {'$regex': search_token, '$options': '-i'}
})
packages_number = await self._collection.count_documents(find_query)
request.ret(packages_number)
@only_if_initialized
@remote
async def get_base_packages_number(self, request):
request.ret(self._base_packages_number)
@only_if_initialized
@remote
async def get_selected_packages_number(self, request):
selected_packages_count = await self._collection.count_documents({
'package': {
'$in': self._selected_packages,
}
})
request.ret(selected_packages_count)
@only_if_initialized
@remote
async def get_user(self, request):
request.ret(self._image.pieman_user)
@only_if_initialized
@remote
async def get_users_list(self, request):
request.ret(self.users_list[self._collection_name])
@only_if_initialized
@remote
async def resolve(self, request, packages_list):
LOGGER.debug(f'Resolve dependencies for {packages_list}')
self._selected_packages = self._image.selected_packages = packages_list
request.ret([])
def main():
set_time_zone(docker.TIME_ZONE)
tornado.options.parse_command_line()
if not os.path.isdir(options.base_systems_path):
LOGGER.error('The directory specified via the base_systems_path parameter does not exist')
exit(1)
for item_name in os.listdir(options.base_systems_path):
item_path = os.path.join(options.base_systems_path, item_name)
if os.path.isdir(item_path):
debian_status_file = os.path.join(item_path, 'var/lib/dpkg/status')
alpine_installed_file = os.path.join(item_path, 'lib/apk/db/installed')
if os.path.exists(debian_status_file):
file_path = debian_status_file
index_file_cls = DebianIndexFile
elif os.path.exists(alpine_installed_file):
file_path = alpine_installed_file
index_file_cls = AlpineIndexFile
else:
continue
distro, suite, arch = item_name.split('-')
with index_file_cls(distro, suite, arch, file_path) as index_file:
RPCHandler.base_packages_list[item_name] = []
for package in index_file.iter_paragraphs():
RPCHandler.base_packages_list[item_name].append(package['package'])
passwd_file = os.path.join(item_path, 'etc/passwd')
with open(passwd_file, encoding='utf-8') as infile:
RPCHandler.users_list[item_name] = []
for line in infile:
RPCHandler.users_list[item_name].append(line.split(':'))
LOGGER.info('RPC server is ready!')
IOLoop().start(Application(), options.port)
if __name__ == "__main__":
main()
| 32.242424
| 102
| 0.655161
| 1,369
| 11,704
| 5.293645
| 0.178232
| 0.034497
| 0.038637
| 0.053953
| 0.411481
| 0.366634
| 0.287843
| 0.22837
| 0.194977
| 0.13785
| 0
| 0.002174
| 0.253161
| 11,704
| 362
| 103
| 32.331492
| 0.826908
| 0.026828
| 0
| 0.263345
| 0
| 0
| 0.063527
| 0.002109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017794
| false
| 0.02847
| 0.067616
| 0
| 0.103203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af1703462ef77f78c9cf88e812154fcfc28474a9
| 2,318
|
py
|
Python
|
postgres_audit_triggers/operations.py
|
carta/postgres_audit_triggers
|
fece63c5ad2924ff5e2aeb38d7bbd5bee6e6547c
|
[
"MIT"
] | 23
|
2018-03-26T11:18:03.000Z
|
2020-12-28T05:11:04.000Z
|
postgres_audit_triggers/operations.py
|
carta/postgres_audit_triggers
|
fece63c5ad2924ff5e2aeb38d7bbd5bee6e6547c
|
[
"MIT"
] | 1
|
2019-02-13T23:58:53.000Z
|
2020-07-01T18:16:13.000Z
|
postgres_audit_triggers/operations.py
|
carta/postgres_audit_triggers
|
fece63c5ad2924ff5e2aeb38d7bbd5bee6e6547c
|
[
"MIT"
] | 3
|
2019-03-26T15:50:38.000Z
|
2021-03-05T00:27:53.000Z
|
from django.db.migrations.operations.base import Operation
from django.utils.functional import cached_property
__all__ = (
'AddAuditTrigger',
'RemoveAuditTrigger',
)
class AddAuditTrigger(Operation):
reduces_to_sql = True
reversible = True
option_name = 'audit_trigger'
enabled = True
def __init__(self, model_name):
self.name = model_name
@cached_property
def model_name_lower(self):
return self.name.lower()
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
model_state.options[self.option_name] = self.enabled
state.reload_model(app_label, self.model_name_lower, delay=True)
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
model = to_state.apps.get_model(app_label, self.name)
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'audit.logged_actions\')')
has_audit = cursor.fetchone()[0]
if has_audit:
schema_editor.execute(
'SELECT audit.audit_table(\'{}\')'.format(table),
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
model = to_state.apps.get_model(app_label, self.name)
table = model._meta.db_table
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_row ON {}'.format(table),
)
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_stm ON {}'.format(table),
)
def describe(self):
return 'Add audit triggers on model {}'.format(self.name)
class RemoveAuditTrigger(AddAuditTrigger):
enabled = False
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_backwards(
app_label, schema_editor, from_state, to_state,
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_forwards(
app_label, schema_editor, from_state, to_state,
)
def describe(self):
return 'Remove audit triggers on model {}'.format(self.name)
| 30.103896
| 75
| 0.654875
| 276
| 2,318
| 5.199275
| 0.264493
| 0.061324
| 0.058537
| 0.083624
| 0.478049
| 0.478049
| 0.441812
| 0.394425
| 0.394425
| 0.34216
| 0
| 0.000575
| 0.249353
| 2,318
| 76
| 76
| 30.5
| 0.824138
| 0
| 0
| 0.377049
| 0
| 0
| 0.10742
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147541
| false
| 0
| 0.032787
| 0.04918
| 0.344262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af190a09ca44bce44b5b0163ba1e2eceb805790a
| 18,922
|
py
|
Python
|
tests/unit/test_infra_communication.py
|
gauthier-emse/pyDcop
|
a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed
|
[
"BSD-3-Clause"
] | 28
|
2018-05-18T10:25:58.000Z
|
2022-03-05T16:24:15.000Z
|
tests/unit/test_infra_communication.py
|
gauthier-emse/pyDcop
|
a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed
|
[
"BSD-3-Clause"
] | 19
|
2018-09-21T21:50:15.000Z
|
2022-02-22T20:23:32.000Z
|
tests/unit/test_infra_communication.py
|
gauthier-emse/pyDcop
|
a51cc3f7d8ef9ee1f863beeca4ad60490862d2ed
|
[
"BSD-3-Clause"
] | 17
|
2018-05-29T19:54:07.000Z
|
2022-02-22T20:14:46.000Z
|
# BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from http.server import HTTPServer
from threading import Thread
from time import sleep
from unittest.mock import MagicMock, create_autospec, call, ANY
import pytest
import requests
from pydcop.infrastructure.communication import Messaging, \
InProcessCommunicationLayer, \
MPCHttpHandler, HttpCommunicationLayer, ComputationMessage, \
UnreachableAgent, MSG_MGT, UnknownAgent, UnknownComputation, MSG_ALGO
from pydcop.infrastructure.computations import Message
from pydcop.infrastructure.discovery import Discovery
def skip_http_tests():
import os
try:
return os.environ['HTTP_TESTS'] == 'NO'
except KeyError:
return False
@pytest.fixture
def local_messaging():
comm = InProcessCommunicationLayer()
comm.discovery = Discovery('a1', 'addr1')
messaging = Messaging('a1', comm)
return messaging
class TestMessaging(object):
def test_messaging_local_msg(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a1')
msg = MagicMock()
local_messaging.post_msg('c1', 'c2', msg)
(src, dest, o_msg, type), t = local_messaging.next_msg()
assert o_msg == msg
assert dest, 'c2'
assert src, 'c1'
def test_retry_when_posting_msg_to_unknown_computation(
self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.post_msg('c1', 'c2', 'a msg')
# c2 is unknown, the message should not be in the queue
full_msg, _ = local_messaging.next_msg()
assert full_msg is None
# Register c2 : the message will now be delivered to the queue
local_messaging.discovery.register_computation('c2', 'a1')
(src, dest, full_msg, type), _ = local_messaging.next_msg()
assert full_msg is 'a msg'
def test_raise_when_posting_msg_from_unknown_computation(
self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a2', 'addr2')
# Attempt to send a message to c2, from c3 which is not hosted locally
with pytest.raises(UnknownComputation):
local_messaging.post_msg('c3', 'c2', 'a msg')
def test_next_message_returns_None_when_no_msg(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
full_msg, _ = local_messaging.next_msg()
assert full_msg is None
def test_msg_to_computation_hosted_on_another_agent(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a2', 'addr2')
local_messaging._comm.send_msg = MagicMock()
msg = MagicMock()
local_messaging.post_msg('c1', 'c2', msg)
# Check that the msg was passed to the communication layer
local_messaging._comm.send_msg.assert_called_with(
'a1', 'a2',
ComputationMessage('c1', 'c2', msg, ANY),
on_error=ANY)
# Check it's not in the local queue
full_msg, _ = local_messaging.next_msg()
assert full_msg is None
def test__metrics_local_msg(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a1')
local_messaging.discovery.register_computation('c3', 'a1')
msg = MagicMock()
msg.size = 42
local_messaging.post_msg('c1', 'c2', msg)
assert local_messaging.count_all_ext_msg == 0
assert local_messaging.size_all_ext_msg == 0
msg2 = MagicMock()
msg2.size = 12
local_messaging.post_msg('c1', 'c3', msg2)
assert local_messaging.count_all_ext_msg == 0
assert local_messaging.size_all_ext_msg == 0
def test__metrics_ext_msg(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a2', 'addr2')
local_messaging.discovery.register_computation('c3', 'a1')
local_messaging._comm.send_msg = MagicMock()
msg = MagicMock()
msg.size = 42
local_messaging.post_msg('c1', 'c2', msg)
assert local_messaging.size_ext_msg['c1'] == 42
assert local_messaging.count_ext_msg['c1'] == 1
assert local_messaging.count_all_ext_msg == 1
assert local_messaging.size_all_ext_msg == 42
msg2, msg3 = MagicMock(), MagicMock()
msg2.size, msg3.size = 12, 5
local_messaging.post_msg('c1', 'c2', msg2)
local_messaging.post_msg('c1', 'c3', msg3)
assert local_messaging.size_ext_msg['c1'] == 12 + 42
assert local_messaging.count_ext_msg['c1'] == 2
assert local_messaging.count_all_ext_msg == 2
assert local_messaging.size_all_ext_msg == 42 + 12
def test_do_not_count_mgt_messages(self, local_messaging):
local_messaging.discovery.register_computation('c1', 'a1')
local_messaging.discovery.register_computation('c2', 'a1')
local_messaging._comm.send_msg = MagicMock()
msg = MagicMock()
msg.size = 42
local_messaging.post_msg('c1', 'c2', msg, msg_type=MSG_MGT)
assert local_messaging.count_all_ext_msg == 0
assert local_messaging.size_all_ext_msg == 0
class TestInProcessCommunictionLayer(object):
def test_address(self):
# for in-process, the address is the object it-self
comm1 = InProcessCommunicationLayer()
assert comm1.address == comm1
def test_addresses_are_not_shared_accross_instances(self):
comm1 = InProcessCommunicationLayer()
comm1.discovery = Discovery('a1', 'addr1')
comm2 = InProcessCommunicationLayer()
comm2.discovery = Discovery('a2', 'addr2')
comm1.discovery.register_agent('a1', comm1)
with pytest.raises(UnknownAgent):
comm2.discovery.agent_address('a1')
def test_msg_to_another_agent(self):
comm1 = InProcessCommunicationLayer()
Messaging('a1', comm1)
comm1.discovery = Discovery('a1', comm1)
comm2 = InProcessCommunicationLayer()
Messaging('a2', comm2)
comm2.discovery = Discovery('a2', comm2)
comm2.receive_msg = MagicMock()
comm1.discovery.register_agent('a2', comm2)
full_msg = ('c1', 'c2', 'msg')
comm1.send_msg('a1', 'a2', full_msg)
comm2.receive_msg.assert_called_with('a1', 'a2', full_msg)
def test_received_msg_is_delivered_to_messaging_queue(self):
comm1 = InProcessCommunicationLayer()
Messaging('a1', comm1)
comm1.messaging.post_msg = MagicMock()
comm1.receive_msg('a2', 'a1', ('c2', 'c1', 'msg', MSG_MGT))
comm1.messaging.post_msg.assert_called_with('c2', 'c1', 'msg', 10)
def test_raise_when_sending_to_unknown_agent_fail_default(self):
comm1 = InProcessCommunicationLayer(on_error='fail')
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg', MSG_MGT)
with pytest.raises(UnknownAgent):
comm1.send_msg('a1', 'a2', full_msg)
def test_raise_when_sending_to_unknown_agent_fail_on_send(self):
comm1 = InProcessCommunicationLayer()
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg')
with pytest.raises(UnknownAgent):
comm1.send_msg('a1', 'a2', full_msg, on_error='fail')
def test_ignore_when_sending_to_unknown_agent_ignore_default(self):
comm1 = InProcessCommunicationLayer(on_error='ignore')
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg', MSG_MGT)
assert comm1.send_msg('a1', 'a2', full_msg)
def test_ignore_when_sending_to_unknown_agent_ignore_on_send(self):
comm1 = InProcessCommunicationLayer()
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg')
assert comm1.send_msg('a1', 'a2', full_msg,on_error='ignore')
@pytest.mark.skip
def test_retry_when_sending_to_unknown_agent_retry_default(self):
comm1 = InProcessCommunicationLayer(on_error='retry')
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg')
assert not comm1.send_msg('a1', 'a2', full_msg)
comm2 = create_autospec(InProcessCommunicationLayer)
comm1.discovery.register_agent('a2', comm2)
comm2.receive_msg.assert_called_with('a1', 'a2', full_msg)
comm2.receive_msg.assert_called_with('a1', 'a2', full_msg)
@pytest.mark.skip
def test_retry_when_sending_to_unknown_agent_retry_on_send(self):
comm1 = InProcessCommunicationLayer(None)
comm1.discovery = Discovery('a1', comm1)
full_msg = ('c1', 'c2', 'msg')
assert not comm1.send_msg('a1', 'a2', full_msg,on_error='retry')
comm2 = create_autospec(InProcessCommunicationLayer)
comm1.discovery.register_agent('a2', comm2)
comm2.receive_msg.assert_called_with('a1', 'a2', full_msg)
@pytest.fixture
def httpd():
server_address = ('127.0.0.1', 8001)
httpd = HTTPServer(server_address, MPCHttpHandler)
httpd.comm = MagicMock()
yield httpd
httpd.shutdown()
httpd.server_close()
class TestHttpHandler(object):
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_http_handler_one_message(self, httpd):
t = Thread(name='http_thread',
target=httpd.serve_forever)
t.start()
requests.post('http://127.0.0.1:8001/test',
json={'key': 'value'},
timeout=0.5)
sleep(0.5)
httpd.comm.on_post_message.assert_called_once_with(
'/test', None, None,
ComputationMessage(
src_comp=None,dest_comp=None,msg={'key': 'value'},
msg_type=MSG_ALGO))
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_http_handler_several_messages(self, httpd):
t = Thread(name='http_thread',
target=httpd.serve_forever)
t.start()
requests.post('http://127.0.0.1:8001/test',
json={'key':'value'},
timeout=0.5)
requests.post('http://127.0.0.1:8001/test2',
headers={'sender-agent': 'zero'},
json={'key':'value2'},
timeout=0.5)
requests.post('http://127.0.0.1:8001/test3',
headers={'sender-agent': 'sender',
'dest-agent': 'dest',
'type': '15'},
json={'key':'value3'},
timeout=0.5)
sleep(0.5)
httpd.comm.on_post_message.assert_has_calls([
call('/test', None, None,
ComputationMessage(src_comp=None,
dest_comp=None,
msg={'key': 'value'},
msg_type=MSG_ALGO)),
call('/test2', 'zero', None,
ComputationMessage(src_comp=None,
dest_comp=None,
msg={'key': 'value2'},
msg_type=MSG_ALGO)),
call('/test3', 'sender', 'dest',
ComputationMessage(src_comp=None,
dest_comp=None,
msg={'key': 'value3'},
msg_type=15)),
])
@pytest.fixture
def http_comms():
comm1 = HttpCommunicationLayer(('127.0.0.1', 10001))
comm1.discovery = Discovery('a1', ('127.0.0.1', 10001))
Messaging('a1', comm1)
comm2 = HttpCommunicationLayer(('127.0.0.1', 10002))
comm2.discovery = Discovery('a2', ('127.0.0.1', 10002))
Messaging('a2', comm2)
comm2.messaging.post_msg = MagicMock()
yield comm1, comm2
comm1.shutdown()
comm2.shutdown()
class TestHttpCommLayer(object):
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_one_message_between_two(self, http_comms):
comm1, comm2 = http_comms
comm1.discovery.register_computation('c2', 'a2', ('127.0.0.1', 10002))
comm2.discovery.register_computation('c1', 'a1', ('127.0.0.1', 10001))
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('test', 'test'), MSG_ALGO))
comm2.messaging.post_msg.assert_called_with(
'c1', 'c2', Message('test','test'), MSG_ALGO)
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_several_messages_between_two(self, http_comms):
comm1, comm2 = http_comms
comm1.discovery.register_computation('c1', 'a2', ('127.0.0.1', 10002))
comm2.discovery.register_computation('c2', 'a1', ('127.0.0.1', 10001))
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('test', 'test1'), MSG_ALGO))
comm1.send_msg\
('a1', 'a2',
ComputationMessage('c1', 'c2', Message('test', 'test2'), MSG_ALGO))
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2',Message('test','test3'), MSG_MGT))
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2',Message('test', 'test4'), MSG_ALGO))
comm2.messaging.post_msg.assert_has_calls([
call('c1', 'c2', Message('test', 'test1'), MSG_ALGO),
call('c1', 'c2', Message('test', 'test2'), MSG_ALGO),
call('c1', 'c2', Message('test', 'test3'), MSG_MGT),
call('c1', 'c2', Message('test', 'test4'), MSG_ALGO),
])
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unknown_computation_fail_mode(self, http_comms):
comm1, comm2 = http_comms
comm1.discovery.register_computation('c2', 'a2', ('127.0.0.1', 10002))
comm2.discovery.register_computation('c1', 'a1', ('127.0.0.1', 10001))
def raise_unknown(*args):
raise UnknownComputation('test')
comm2.messaging.post_msg = MagicMock(side_effect=raise_unknown)
with pytest.raises(UnknownComputation):
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('a1', 't1'), MSG_ALGO),
on_error='fail')
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unknown_computation_ignore_mode(self, http_comms):
comm1, comm2 = http_comms
comm1.discovery.register_computation('c2', 'a2', ('127.0.0.1', 10002))
comm2.discovery.register_computation('c1', 'a1', ('127.0.0.1', 10001))
def raise_unknown(*args):
raise UnknownComputation('test')
comm2.messaging.post_msg = MagicMock(side_effect=raise_unknown)
# Default mode is ignore : always returns True
assert comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('a1', 'test1'), MSG_ALGO))
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unknown_agent_fail_mode(self, http_comms):
comm1, comm2 = http_comms
# on a1, do NOT register a2, and still try to send a message to it
with pytest.raises(UnknownAgent):
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('a1', 't1'), MSG_ALGO),
on_error='fail')
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unknown_agent_ignore_mode(self, http_comms):
comm1, comm2 = http_comms
# on a1, do NOT register a2, and still try to send a message to it
# Default mode is ignore : always returns True
assert comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2',Message('a1','t1'), MSG_ALGO))
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unreachable_agent_fail_mode(self, http_comms):
comm1, comm2 = http_comms
# on a1, register a2 with the wrong port number
comm1.discovery.register_computation('c2', 'a2', ('127.0.0.1', 10006))
comm2.discovery.register_computation('c1', 'a1', ('127.0.0.1', 10001))
with pytest.raises(UnreachableAgent):
comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('a1', '1'), MSG_ALGO),
on_error='fail')
@pytest.mark.skipif(skip_http_tests(), reason='HTTP_TESTS == NO')
def test_msg_to_unreachable_agent_ignore_mode(self, http_comms):
comm1, comm2 = http_comms
# on a1, register a2 with the wrong port number
comm1.discovery.register_computation('c2', 'a2', ('127.0.0.1', 10006))
comm2.discovery.register_computation('c1', 'a1', ('127.0.0.1', 10001))
assert comm1.send_msg(
'a1', 'a2',
ComputationMessage('c1', 'c2', Message('a1', 't'), MSG_ALGO))
| 37.395257
| 80
| 0.63228
| 2,299
| 18,922
| 4.977381
| 0.132231
| 0.072184
| 0.07096
| 0.011011
| 0.690378
| 0.65752
| 0.623962
| 0.567596
| 0.548895
| 0.513764
| 0
| 0.046368
| 0.245481
| 18,922
| 505
| 81
| 37.469307
| 0.755131
| 0.111933
| 0
| 0.530792
| 0
| 0
| 0.075289
| 0
| 0
| 0
| 0
| 0
| 0.11437
| 1
| 0.099707
| false
| 0
| 0.032258
| 0
| 0.152493
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|