hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b97a216ca7bed17169dea75598a8db7b38aa938 | 3,225 | py | Python | tests/conftest.py | jeromedockes/neuroquery_image_search | 2222caf464de84694273a494ec2d00071b3d14a2 | [
"BSD-3-Clause"
] | 3 | 2021-01-26T20:27:24.000Z | 2021-09-28T19:51:36.000Z | tests/conftest.py | jeromedockes/neuroquery_image_search | 2222caf464de84694273a494ec2d00071b3d14a2 | [
"BSD-3-Clause"
] | null | null | null | tests/conftest.py | jeromedockes/neuroquery_image_search | 2222caf464de84694273a494ec2d00071b3d14a2 | [
"BSD-3-Clause"
] | 1 | 2021-01-21T22:27:16.000Z | 2021-01-21T22:27:16.000Z | from pathlib import Path
import tempfile
from unittest.mock import MagicMock
import pytest
import numpy as np
import pandas as pd
from scipy import sparse
import nibabel
import nilearn
from nilearn.datasets import _testing
from nilearn.datasets._testing import request_mocker # noqa: F401
def make_fake_img():
rng = np.random.default_rng(0)
img = rng.random(size=(4, 3, 5))
return nibabel.Nifti1Image(img, np.eye(4))
@pytest.fixture()
def fake_img():
return make_fake_img()
def make_fake_data():
n_voxels, n_components, n_studies, n_terms = 23, 8, 12, 9
rng = np.random.default_rng(0)
difumo_maps = rng.random((n_components, n_voxels))
difumo_maps[rng.binomial(1, 0.3, size=difumo_maps.shape).astype(int)] = 0
difumo_inverse_covariance = np.linalg.pinv(difumo_maps.dot(difumo_maps.T))
difumo_maps = sparse.csr_matrix(difumo_maps)
projections = rng.random((n_studies, n_components))
term_projections = rng.random((n_terms, n_components))
articles_info = pd.DataFrame({"pmid": np.arange(n_studies) + 100})
articles_info["title"] = [
f"title {pmid}" for pmid in articles_info["pmid"]
]
articles_info["pubmed_url"] = [
f"url {pmid}" for pmid in articles_info["pmid"]
]
mask = np.zeros(4 * 3 * 5, dtype=int)
mask[:n_voxels] = 1
mask = mask.reshape((4, 3, 5))
mask_img = nibabel.Nifti1Image(mask, np.eye(4))
doc_freq = pd.DataFrame(
{
"term": ["term_{i}" for i in range(n_terms)],
"document_frequency": np.arange(n_terms),
}
)
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir = Path(temp_dir)
sparse.save_npz(temp_dir / "difumo_maps.npz", difumo_maps)
np.save(
temp_dir / "difumo_inverse_covariance.npy",
difumo_inverse_covariance,
)
np.save(temp_dir / "projections.npy", projections)
np.save(temp_dir / "term_projections.npy", term_projections)
articles_info.to_csv(temp_dir / "articles-info.csv", index=False)
mask_img.to_filename(str(temp_dir / "mask.nii.gz"))
doc_freq.to_csv(
str(temp_dir / "document_frequencies.csv"), index=False
)
archive = _testing.dict_to_archive(
{"neuroquery_image_search_data": temp_dir}
)
return archive
@pytest.fixture(autouse=True)
def temp_data_dir(tmp_path_factory, monkeypatch):
home_dir = tmp_path_factory.mktemp("temp_home")
monkeypatch.setenv("HOME", str(home_dir))
monkeypatch.setenv("USERPROFILE", str(home_dir))
data_dir = home_dir / "neuroquery_data"
data_dir.mkdir()
monkeypatch.setenv("NEUROQUERY_DATA", str(data_dir))
@pytest.fixture(autouse=True, scope="function")
def map_mock_requests(request_mocker):
request_mocker.url_mapping[
"https://osf.io/mx3t4/download"
] = make_fake_data()
return request_mocker
@pytest.fixture(autouse=True)
def patch_nilearn(monkeypatch):
def fake_motor_task(*args, **kwargs):
return {"images": [make_fake_img()]}
monkeypatch.setattr(
nilearn.datasets, "fetch_neurovault_motor_task", fake_motor_task
)
monkeypatch.setattr("webbrowser.open", MagicMock())
| 32.25 | 78 | 0.68093 | 438 | 3,225 | 4.753425 | 0.321918 | 0.036984 | 0.01585 | 0.018732 | 0.074928 | 0.048991 | 0.027858 | 0 | 0 | 0 | 0 | 0.013143 | 0.197829 | 3,225 | 99 | 79 | 32.575758 | 0.791651 | 0.003101 | 0 | 0.047619 | 0 | 0 | 0.117336 | 0.033613 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.130952 | 0.02381 | 0.27381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b9e0d5209b7b62f3db7419587e03282debb98ce | 524 | py | Python | Praticas/pratica03/latitude_longitude.py | andrepinto42/Processamento-de-Linguagens | 98facba0d1c9ca751743b1c83dca7f441aa182e9 | [
"MIT"
] | 1 | 2022-03-18T21:39:47.000Z | 2022-03-18T21:39:47.000Z | Praticas/pratica03/latitude_longitude.py | andrepinto42/Processamento-de-Linguagens | 98facba0d1c9ca751743b1c83dca7f441aa182e9 | [
"MIT"
] | null | null | null | Praticas/pratica03/latitude_longitude.py | andrepinto42/Processamento-de-Linguagens | 98facba0d1c9ca751743b1c83dca7f441aa182e9 | [
"MIT"
] | null | null | null | import re
import sys
real_num = r'[+-]?\d+(?:\.\d+)?'
# Falta colocar as os paratenses para identificar o grupo correto
coord = rf'\(({real_num}),\s*({real_num})\)'
for line in sys.stdin:
line = re.sub(coord,r"<point lat='\1', lon='\2' />",line)
if (line):
print(line)
quit()
# Tambem dá para executar assim
coord = rf'\((?P<lat>{real_num}),\s*(?P<lon>{real_num})\)'
for line in sys.stdin:
line = re.sub(coord,r"<point lat='\g<lat>', lon='\g<lon>' />",line)
if (line):
print(line) | 22.782609 | 71 | 0.574427 | 85 | 524 | 3.482353 | 0.458824 | 0.118243 | 0.054054 | 0.094595 | 0.445946 | 0.317568 | 0.317568 | 0.317568 | 0.317568 | 0.317568 | 0 | 0.004717 | 0.19084 | 524 | 23 | 72 | 22.782609 | 0.693396 | 0.179389 | 0 | 0.428571 | 0 | 0 | 0.378505 | 0.182243 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6b9f0ffcb6c75b079a5eb98125aa38eb4f61fd76 | 375 | py | Python | includes/prav_modules/test2.py | praveen868686/DAGAirflow-with-Python | 483fffc2e7f987e523ae3653a90869a67cdad886 | [
"MIT"
] | null | null | null | includes/prav_modules/test2.py | praveen868686/DAGAirflow-with-Python | 483fffc2e7f987e523ae3653a90869a67cdad886 | [
"MIT"
] | null | null | null | includes/prav_modules/test2.py | praveen868686/DAGAirflow-with-Python | 483fffc2e7f987e523ae3653a90869a67cdad886 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
def see():
m = pd.read_csv('C:/dag/Expenditure.csv')
#m = pd.read_csv('C:\dag\Expendture.csv')
# print(m.head())
countt= m ['Category'].value_counts(sort=True, ascending=True).to_frame()
print(countt)
pivottable= m.pivot_table(index=['Category'], values=['Myself'], aggfunc='sum')
print(pivottable)
see()
| 25 | 83 | 0.656 | 55 | 375 | 4.381818 | 0.618182 | 0.024896 | 0.058091 | 0.082988 | 0.116183 | 0.116183 | 0 | 0 | 0 | 0 | 0 | 0 | 0.16 | 375 | 14 | 84 | 26.785714 | 0.765079 | 0.149333 | 0 | 0 | 0 | 0 | 0.148265 | 0.069401 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.333333 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba002cacb0aea0efbea0d09c9f0563aeccf4db3 | 4,134 | py | Python | MLP/generate_args.py | AMNoureldin/COMP551-HW4 | 0c855372862300cc0454f144bb40b2e72ba93861 | [
"Apache-2.0"
] | 15 | 2021-03-18T03:00:15.000Z | 2022-02-28T04:42:54.000Z | MLP/generate_args.py | AMNoureldin/COMP551-HW4 | 0c855372862300cc0454f144bb40b2e72ba93861 | [
"Apache-2.0"
] | null | null | null | MLP/generate_args.py | AMNoureldin/COMP551-HW4 | 0c855372862300cc0454f144bb40b2e72ba93861 | [
"Apache-2.0"
] | 2 | 2021-11-05T15:50:20.000Z | 2022-01-16T11:48:27.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from datetime import datetime
from utils import *
#===== time stamp for experiment file names
timestamp = datetime.now()
timestamp = timestamp.strftime("%d-%m-%Y_%H%M")
script_name = 'main' # main script to be executed
#================================
# args for main script #
#================================
seed= 1 # setting random seed for reproducibility
#===== MODEL =====
#
model_type= 'MLP1'
no_bias= True # don't use biases in layers
make_linear= False # linear activation function (if False, then ReLU)
no_BN= True # disable BatchNorm
NTK_style= True # NTK-style parametrization of the network
base_width= 8
all_widths= [8, 32, 128, 216, 328, 512, 635]
fract_freeze_cl= 0 # allowed fraction of all cl-layer weights that may be frozen
dense_only= False # consider dense models only, no weight freezing
#===== TRAINING =====
#
no_ES= True # disable Early Stopping
train_subset_size= 2048 # train on a subset of the train set
mbs= 256 # mini-batch size
max_epochs= 300 # max number of training epochs
#===== DATASET =====
#
dataset= 'MNIST'
normalize_pixelwise= True
#=== for NTK-style nets, the LR value is width-dependent
# loading optimized LR values for each width from file
if NTK_style: bta_avg_and_lr= torch.load('optimized_LR_for_NTK_style_MLP1.pt')
# NWTF (for "Num. Weights To Freeze") is a dictionary with
# key = width
# val = [(nwtf_cl, nwtf_fc)_1, (nwtf_cl, nwtf_fc)_2, ...]
# i.e., a list of valid combinations of weights to freeze for the respective layer (cl and fc)
if dense_only:
NWTF = {base_width: [(0,0)]}
else:
NWTF = get_NWTF(base_width, all_widths, fract_freeze_cl)
#=== tags for file names
bias_tag='_no_bias' if no_bias else ''
NTK_tag='_NTK_style' if NTK_style else ''
act_fctn='Linear' if make_linear else 'ReLU'
job_configs=[]
for width, val in NWTF.items():
for nwtf_cl,nwtf_fc in val:
cur_base_width=width if nwtf_cl==nwtf_fc else base_width
# compose name for output dir
output_dir = f'{dataset}_{model_type}_{NTK_tag}'
output_dir+= f'_base_{cur_base_width}_width_{width}_{act_fctn}{bias_tag}'
if train_subset_size>0: output_dir+=f'_train_on_{train_subset_size}_samples'
if normalize_pixelwise: output_dir+=f'_pixelwise_normalization'
if NTK_style: # get LR from file
lrkey=f'{cur_base_width}_{width}'
lr=bta_avg_and_lr[lrkey]
else:
lr= 0.1
config ={
'base_width': int(cur_base_width),
'width': int(width),
'lr': lr,
'seed': seed,
'nwtf_cl': int(nwtf_cl),
'nwtf_fc': int(nwtf_fc),
'dataset': dataset,
'normalize_pixelwise': normalize_pixelwise,
'train_subset_size': train_subset_size,
'no_ES': no_ES,
'max_epochs': max_epochs,
'mbs': mbs,
'no_bias': no_bias,
'NTK_style': NTK_style,
'make_linear': make_linear,
'no_BN': no_BN,
'output_dir': output_dir
}
job_configs.append(config)
for config in job_configs:
my_str=f'\npython -m {script_name} '
for k, v in config.items():
if isinstance(v, bool):
if v: my_str+=f'--{k} '
else:
my_str+=f'--{k} {v} '
print(my_str)
| 32.046512 | 94 | 0.618287 | 577 | 4,134 | 4.211438 | 0.37435 | 0.032922 | 0.030864 | 0.024691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015815 | 0.265844 | 4,134 | 128 | 95 | 32.296875 | 0.784843 | 0.376149 | 0 | 0.042254 | 0 | 0 | 0.174566 | 0.082149 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.042254 | 0 | 0.042254 | 0.014085 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba35922a8da9226341017db55248451774263a5 | 38,712 | py | Python | EasyRegression.py | pankajchejara23/EasyRegression | 7f76d92c4a9d056a83bde6abc2fd6eb980602e44 | [
"MIT"
] | 1 | 2021-04-19T16:47:27.000Z | 2021-04-19T16:47:27.000Z | EasyRegression.py | pankajchejara23/EasyRegression | 7f76d92c4a9d056a83bde6abc2fd6eb980602e44 | [
"MIT"
] | null | null | null | EasyRegression.py | pankajchejara23/EasyRegression | 7f76d92c4a9d056a83bde6abc2fd6eb980602e44 | [
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
import librosa
import seaborn as sns
from sklearn.model_selection import train_test_split
import math
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.metrics import mean_squared_error, mean_absolute_error
import traceback
import statistics
# Regression Model
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lars
from sklearn.linear_model import BayesianRidge
from sklearn.linear_model import SGDRegressor
from sklearn.linear_model import RANSACRegressor
from pyfiglet import Figlet
from sklearn.model_selection import cross_val_score
from joblib import dump, load
from sklearn.kernel_ridge import KernelRidge
from sklearn.tree import DecisionTreeRegressor
from xgboost import XGBRegressor
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.ensemble import StackingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import cross_val_score
import statistics
from sklearn.model_selection import cross_validate
# Dimensionality reduction
from sklearn.decomposition import PCA
from sklearn import manifold
import numpy as np
from sklearn.model_selection import GridSearchCV
from scipy.special import entr
import random
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import metrics
from art import *
class EasyRegression:
def __init__(self):
print(text2art('Easy'))
print(text2art('Regression'))
self.seed = 40
self.strategy = None
self.parameterFound = dict()
self.configured = False
self.models = None
# Scalers
self.std = StandardScaler()
self.mmax = MinMaxScaler()
self.random_state = 42
self.feature_set = dict()
self.label_set = dict()
self.groups = None
self.datasets = None
self.label = None
self.flagDataset = False
self.flagGroup = False
self.flagParameterFind = False
self.train_test = None
self.cross_val = None
self.leave_group = None
self.leave_dataset = None
self.stratified = None
def loadFeature(self,feature_file,feature_type,feature_name):
if len(self.feature_set) == 0:
print('-------------------------------')
print(' STEP : Features loading')
print('-------------------------------')
if feature_type not in ['ind','grp']:
print('===> Error: Undefined feature type')
return
else:
try:
if feature_name in self.feature_set.keys():
print('===> Feature with name ',feature_name,' already exist. Choose a different name')
return
else:
tmp = pd.read_csv(feature_file)
if len(self.feature_set) > 0:
first_feat = self.feature_set[list(self.feature_set.keys())[0]]
if tmp.shape[0] != first_feat[2].shape[0]:
print('===> Error: Mismatch in feature size with previously added features ',first_feat[1] )
return
self.feature_set[feature_name] = [feature_type,feature_name,tmp]
print('===> Feature file:',feature_file,' is loaded successfully !')
print('===> Summary:')
print(' #instances:',tmp.shape[0])
print(' #attributes:',tmp.shape[1])
num_cols = tmp.select_dtypes(['int64','float64'])
print(' #numeric-attributes:',num_cols.shape[1])
print('')
return num_cols
except:
print('===> Error occurred while loading the file')
traceback.print_exc()
def loadLabels(self,label_file):
try:
print('-------------------------------')
print(' STEP : Labels loading')
print('-------------------------------')
tmp = pd.read_csv(label_file)
if len(self.feature_set) > 0:
first_feat = self.feature_set[list(self.feature_set.keys())[0]]
if tmp.shape[0] != first_feat[2].shape[0]:
print(' Error: Mismatch in feature size with loaded feature ',first_feat[1] )
return None
for label in tmp.columns:
self.label_set[label] = tmp[label]
print('===> Label file:',label_file,' is loaded successfully !')
print('===> Summary:')
print(' #labels:',len(tmp.columns.tolist()))
print(' labels:', tmp.columns.tolist())
print('')
return tmp
except:
print('===> Error occurred while loading the file:',label_file)
traceback.print_exc()
return None
def feature_name_check(self,feature_name):
if feature_name not in self.feature_set.keys():
print(' Feature name:', feature_name,' is not available.')
return None
def label_name_check(self,label_name):
if label_name not in self.label_set.keys():
print(' Label name:',label_name,' is not available.')
return None
def extractFeatures(self,data,cor=.80):
print('-------------------------------')
print(' STEP : Feature Extraction ')
print('-------------------------------')
correlated_features = set()
features = data
correlation_matrix = features.corr()
for i in range(len(correlation_matrix .columns)):
for j in range(i):
if abs(correlation_matrix.iloc[i, j]) > cor:
colname = correlation_matrix.columns[i]
correlated_features.add(colname)
#print('Correlated Features:')
#print(correlated_features)
features.drop(labels=correlated_features,axis=1,inplace=True)
print('===> ',len(correlated_features),' correlated features are removed.')
print('===> Final features shape:',features.shape)
return features
def findCorrelation(self,label_name=None,sort=True):
if self.dataReady == False:
print('Data is not ready yet for analysis.')
return
if label_name is not None:
if label_name in self.labels.columns:
tmp_features = self.features.copy()
tmp_features[label_name] = self.labels[label_name]
cor_table = tmp_features.corr()
print(' Correlation ')
print(' -------------------------------')
print(cor_table[label_name])
print(' -------------------------------')
else:
if self.labels.shape[1] > 1:
print(' There are more than one label available.')
print(self.labels.columns)
print('Deafult: first column is used to computer correlation')
label_name = self.labels.columns[0]
tmp_features = self.features.copy()
tmp_features[label_name] = self.labels[label_name]
cor_table = tmp_features.corr()
print(' Correlation ')
print(' -------------------------------')
print(cor_table[label_name])
print(' -------------------------------')
def setGroupFeatureLabels(self,feat_labels):
self.group_feature_labels = feat_labels
"""
This function performs group-level feature computation
supported fusions: Dimensionality reduction, Entropy, Gini, Average
"""
def getGroupFeatures(self,data):
group_feature_labels = ['add','del','speak','turns']
features_group = dict()
# iterate for each group-level feature
for grp_feature in group_feature_labels:
tmp = list()
# get all column names similar to grp_feature
for indiv_feature in data.columns:
if grp_feature in indiv_feature:
tmp.append(indiv_feature)
features_group[grp_feature] = tmp.copy()
return features_group
# preparing gini coefficient
def getGINI(self,data):
"""Calculate the Gini coefficient of a numpy array."""
print('-------------------------------')
print(' STEP : Feature Fusion using Gini')
print('-------------------------------')
group_features = self.getGroupFeatures(data)
gini = dict()
for key in group_features.keys():
tmp = data[group_features[key]].values
tmp = tmp + 0.0000001
tmp = np.sort(tmp)
index = np.arange(1,tmp.shape[1]+1)
n = tmp.shape[1]
key = 'grp_gini_'+key
gini[key] = ((np.sum((2 * index - n - 1) * tmp,axis=1)) / (n * np.sum(tmp,axis=1))) #Gini coefficient
gini_features = pd.DataFrame(gini)
return gini_features
# Compute entropy features for individual features
def getEntropy(self,data):
print('-------------------------------')
print(' STEP : Feature Fusion using Entropy')
print('-------------------------------')
group_features = self.getGroupFeatures(data)
entropy = dict()
for key in group_features.keys():
tmp = data[group_features[key]].values
tmp = tmp
tmp_sum = tmp.sum(axis=1,keepdims=True) + .0000000000001
p = tmp/tmp_sum
key = 'grp_entropy_'+key
entropy[key] = entr(p).sum(axis=1)/np.log(2)
entropy_features = pd.DataFrame(entropy)
return entropy_features
"""
Apply dimentionality reduction on features
PCA
"""
def Scaling(self,data,algo):
print('-------------------------------')
print(' STEP : Feature Scaling')
print('-------------------------------')
if algo in ['std','mmax']:
if algo == 'std':
res = pd.DataFrame(self.std.fit_transform(data), columns=data.columns)
print('===> Successfully applied Standard Scaling')
return res
elif algo == 'mmax':
res = pd.DataFrame(self.mmax.fit_transform(data), columns=data.columns)
print('===> Successfully applied MinMax Scaling')
return res
else:
print('===> Error: Unsupported scaling method')
return None
def DimRed(self,algo,data,params=None):
print('-------------------------------')
print(' STEP : Feature fusion using DimRed')
print('-------------------------------')
if algo not in ['pca','mds','isomap','tsne']:
print('===> Erro: Unsupported dimension reduction algorithm specified')
return None
else:
if algo!='pca' and len(params) ==0:
print('===> Error: Specify n_components/n_neighbors parameters')
return None
else:
# Dimensionality reduction
X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[self.label],train_size=.7,random_state=self.seed)
self.pca = PCA(random_state = self.seed)
self.mds = manifold.MDS(n_components=params['n_components'],max_iter=100,n_init=1,random_state = self.seed)
self.isomap = manifold.Isomap(n_neighbors=params['n_neighbors'],n_components=params['n_components'])
self.tsne = manifold.TSNE(n_components=params['n_components'],init='pca',random_state = self.seed)
if algo == 'pca':
self.pca.fit(X_train)
pca_features = self.pca.transform(data)
print('===> Successfully applied PCA')
pca_columns = [None] * pca_features.shape[1]
for k in range(pca_features.shape[1]):
pca_columns[k] = 'pca_' + str(k)
return pd.DataFrame(pca_features,columns=pca_columns)
if algo == 'mds':
self.mds.fit(X_train)
mds_features = self.mds.transform(data)
mds_columns = [None] * mds_features.shape[1]
for k in range(mds_features.shape[1]):
mds_columns[k] = 'mds_' + str(k)
print('===> Successfully applied MDS')
return pd.DataFrame(mds_features,columns=mds_columns)
if algo== 'isomap':
self.isomap.fit(X_train)
isomap_features = self.isomap.transform(data)
print('===> Successfully applied ISOMAP')
isomap_columns = [None] * isomap_features.shape[1]
for k in range(isomap_features.shape[1]):
isomap_columns[k] = 'iso_' + str(k)
return pd.DataFrame(isomap_features,columns=isomap_columns)
if algo=='tsne':
tsne_features = self.tsne.fit_transform(data)
print('===> Successfully applied t-SNE')
tsne_columns = [None] * tsne_features.shape[1]
for k in range(tsne_features.shape[1]):
tsne_columns[k] = 'tsne_' + str(k)
return pd.DataFrame(tsne_features,columns=tsne_columns) ;
def loadConfiguredModules(self,modules):
print('-------------------------------')
print(' STEP : Configured Regression Moduel Loaded')
print('-------------------------------')
self.models = modules
self.configured = True
def regressionModelInitialize(self):
print('-------------------------------')
print(' STEP : Regression Moduel Initialised')
print('-------------------------------')
self.models = dict()
self.params=dict()
self.models['knn'] = KNeighborsRegressor()
self.models['rf'] = RandomForestRegressor(random_state = self.seed)
self.models['ada'] = AdaBoostRegressor(random_state = self.seed)
self.models['gb'] = GradientBoostingRegressor(random_state = self.seed)
self.models['xg'] = XGBRegressor(random_state = self.seed)
self.models['mlp'] = MLPRegressor()
self.models['svm'] = SVR()
self.models['vot'] = VotingRegressor([('knn',self.models['knn']),('ada',self.models['ada']),('rand',self.models['rf']),('svm',self.models['svm'])])
# Preparing parameter for finding optimal parameters
self.params['knn'] ={'n_neighbors':[2,3,4,5],'algorithm':['auto', 'ball_tree', 'kd_tree', 'brute']}
self.params['rf'] = {'max_depth':[2,3,4,5,6],'n_estimators':[50,100,150,200],'min_samples_split':[3,4,5]}
self.params['ada'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['linear', 'square', 'exponential']}
self.params['gb'] = {'learning_rate':[.01,.001,.0001],'n_estimators':[50,100,150,200],'loss':['ls', 'lad', 'huber', 'quantile'],'min_samples_split':[3,4,5]}
self.params['xg']={'booster':['gbtree', 'gblinear','dart']}
self.params['mlp']={'solver':['lbfgs','sgd','adam'],'activation':['identity', 'logistic', 'tanh', 'relu'],'hidden_layer_sizes':[(5,5,5),(5,4,3),(10,10,5)]}
k=['rbf', 'linear','poly','sigmoid']
c= [1,10,100,.1]
g=[.0001,.001,.001,.01,.1]
self.params['svm']=dict(kernel=k, C=c, gamma=g)
print('-------------------------------------------')
print('===> K-Nearest Neighbors initialized')
print('===> Random Forest initialized')
print('===> AdaBoost initialized')
print('===> Gradient Boost initialized')
print('===> XGBoost initialized')
print('===> Neural Network initialized')
print('===> SVM initialized')
print('===> Voting classifier with KNN, AdaBoost, SVM and Random Forest')
def findParametersAndEvaluate(self,data,strategy,label_name,group=None,dataset=None,cv=5):
self.strategy = strategy
self.results = {}
print('-------------------------------')
print(' STEP : Finding Parameters & Evaluate Models')
print('-------------------------------')
self.label_name_check(label_name)
#print(self.labelset.columns)
# store performance data for each strategy
if (strategy == 'train_test_split' or strategy == 'all'):
self.train_test = dict()
for model in self.models.keys():
self.train_test[model] = None
print('===> Evaluation strategy: Train and Test Split ')
X_train, X_test, y_train, y_test = train_test_split(data,self.label_set[label_name],train_size=.7,random_state=self.seed)
print('===> Parameters find-> Start')
for model in self.models.keys():
if model == 'vot':
continue
if not self.configured:
gd = GridSearchCV(self.models[model],self.params[model],cv=cv,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters for ',model,': ',gd.best_params_)
self.models[model] = gd.best_estimator_
print('===> Parameters find-> End')
test_performances = dict()
print('===> Test data performance[RMSE] ')
for model in self.models.keys():
self.models[model].fit(X_train,y_train)
test_performances[model] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',test_performances[model])
self.train_test[model] = test_performances[model]
print(self.train_test)
self.results['train_test'] = self.train_test
if (strategy == 'cross_val' or strategy == 'all'):
self.cross_val = dict()
cross_val = dict()
for model in self.models.keys():
self.cross_val[model] = None
print('==============================================')
print('Evaluation strategy: Cross Validation')
print('==============================================')
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(data,self.label_set[label_name])
print(' Parameters: ',gd.best_params_)
self.models[model] = gd.best_estimator_
cross_val[model] = cross_val_score(self.models[model],data,self.label_set[label_name],scoring='neg_root_mean_squared_error',cv=cv)
#print(' Score[',model,']:',cross_val_scores[model])
cross_val_mean = -1 * statistics.mean(cross_val[model])
cross_val_var = statistics.variance(cross_val[model])
self.cross_val[model] = [cross_val_mean,cross_val_var]
self.results['cross_val'] = self.cross_val
if (strategy == 'leave_one_group_out' or strategy == 'all'):
self.leave_group = dict()
for model in self.models.keys():
self.leave_group[model] = None
print('==============================================')
print('Evaluation strategy: Leave one group out')
print('==============================================')
logo = LeaveOneGroupOut()
n_splits = logo.get_n_splits(groups=group)
error= dict()
for model in self.models.keys():
error[model] = [None]*n_splits
k =0
for train_index, test_index in logo.split(data,self.label_set[label_name],group):
#print(test_index)
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
k = k+1
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.leave_group[model] = [err_mean,err_var]
self.results['leave_group'] = self.leave_group
if (strategy == 'leave_one_dataset_out' or strategy == 'all'):
self.leave_dataset = dict()
for model in self.models.keys():
self.leave_dataset[model] = None
print('==============================================')
print('Evaluation strategy: Leave one dataset out')
print('==============================================')
logo = LeaveOneGroupOut()
n_splits = logo.get_n_splits(groups=dataset)
error= dict()
for model in self.models.keys():
error[model] = [None]*n_splits
k =0
for train_index, test_index in logo.split(data,self.label_set[label_name],dataset):
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
#print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][k] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
k = k+1
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.leave_dataset[model] = [err_mean,err_var]
self.results['leave_dataset'] = self.leave_dataset
if (strategy=='sorted_stratified' or strategy == 'all') :
self.stratified = dict()
for model in self.models.keys():
self.stratified[model] = None
# idea from https://scottclowe.com/2016-03-19-stratified-regression-partitions/
print('==============================================')
print('Evaluation strategy: Sorted Stratification')
print('==============================================')
label_df = pd.DataFrame(self.label_set)
indices = label_df.sort_values(by=[label_name]).index.tolist()
splits = dict()
error = dict()
for model in self.models.keys():
error[model] = [None]*cv
for i in range(cv):
splits[i] = list()
for i in range(len(indices)):
if i%cv == 0:
pick = random.sample(range(cv),cv)
cur_pick = pick.pop()
splits[cur_pick].append(indices[i])
for i in range(cv):
test_index = splits[i]
train_index = []
for j in range(cv):
if j != i:
train_index = train_index + splits[j]
##########################################
# Code to training model on sorted stratified set
X_train, y_train = data.iloc[train_index],self.label_set[label_name][train_index]
X_test, y_test = data.iloc[test_index],self.label_set[label_name][test_index]
for model in self.models.keys():
if model != 'vot' and not self.configured:
print(' ==> Finding params for ',model)
gd = GridSearchCV(self.models[model],self.params[model],cv=10,scoring='neg_root_mean_squared_error')
gd.fit(X_train,y_train)
print(' Parameters: ',gd.best_params_)
estimator = gd.best_estimator_
self.models[model] = estimator
self.models[model].fit(X_train,y_train)
error[model][i] = mean_squared_error(y_test,self.models[model].predict(X_test),squared=False)
#print(' Model[',model,']:',error[model])
for model in self.models.keys():
err_mean = statistics.mean(error[model])
err_var = statistics.variance(error[model])
self.stratified[model] = [err_mean,err_var]
##########################################
self.results['stratified'] = self.stratified
else:
print('Unsupported evaluation strategy')
return None
return self.results
# Preparing dataframe with results for report generation
"""
if strategy == 'train_test_split':
df = pd.DataFrame(columns = ['model','train_test])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model]},ignore_index=True)
if strategy == 'cross_val':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'leave_one_group_out':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'leave_one_dataset_out':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'sorted_stratified':
df = pd.DataFrame(columns = ['model','train_test_mean','train_test_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test_mean':self.train_test[model][0],'train_test_var':self.train_test[model][1]},ignore_index=True)
if strategy == 'all':
df = pd.DataFrame(columns = ['model','train_test','cross_val','leave_group','leave_dataset','stratified'])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model],'cross_val':self.cross_val[model],'leave_group':self.leave_group[model],'leave_dataset':self.leave_dataset[model],'stratified':self.stratified[model]},ignore_index=True)
return df
"""
def report(self,currentOutput,report_name=''):
df = pd.DataFrame(columns = ['model','train_test','cross_val_mean','cross_val_var','leave_group_mean','leave_group_var','leave_dataset_mean','leave_dataset_var','stratified_mean','stratified_var'])
for model in self.models.keys():
df = df.append({'model':model,'train_test':self.train_test[model],'cross_val_mean':self.cross_val[model][0],'cross_val_var':self.cross_val[model][1],'leave_group_mean':self.leave_group[model][0],'leave_group_var':self.leave_group[model][1],'leave_dataset_mean':self.leave_dataset[model][0],'leave_dataset_var':self.leave_dataset[model][1],'stratified_mean':self.stratified[model][0],'stratified_var':self.stratified[model][1]},ignore_index=True)
filename = report_name
df.to_csv(filename,index=False)
print('==============================================')
print(' Report Generation')
print('==============================================')
print(' ===> Successfully generated ')
print(' ===> Results saved in easyRegress_report.csv file')
def activateGroups(self,groups):
self.groups = groups
self.flagGroup = True
def activateDatasets(self,datasets):
self.datasets = datasets
self.flagDataset = True
def activateLabel(self,label):
self.label = label
def buildPipeline(self,sequence,report_name=''):
"""
<feature_name> : Name of feature
feature_extraction: Apply feature extraction based on correlation
feature_scaling: Apply feature scaling. Options: Standard, MinMax
feature_fusion: Apply feature fusion. Options: gini, entropy, pca, isomap, mds, tsne
load_models: Load regression models.
find_evaluate: Model evaluation. Options: train_test_split, cross_validation, leave_one_group_out, leave_one_dataset_out, sorted_stratified
report_results: Report results. Options: table, chart
"""
currentOutput = None
for index, step in enumerate(sequence):
label = self.label
groups = self.groups
datasets = self.datasets
if index == 0:
self.feature_name_check(step)
currentOutput = self.feature_set[step][2]
elif step == 'feature_extraction':
results = self.extractFeatures(currentOutput)
currentOutput = results
elif step == 'feature_scaling_std':
print(currentOutput.shape)
results = self.Scaling(currentOutput,'std')
currentOutput = results
elif step == 'feature_scaling_mmax':
results = self.Scaling(currentOutput,'mmax')
currentOutput = results
elif step == 'feature_fusion_pca':
results = self.DimRed('pca',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_mds':
results = self.DimRed('mds',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_isomap':
results = self.DimRed('isomap',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_tsne':
results = self.DimRed('tsne',currentOutput,{'n_components':2,'n_neighbors':3})
currentOutput = results
elif step == 'feature_fusion_entropy':
results = self.getEntropy(currentOutput)
currentOutput = results
print(results)
elif step == 'feature_fusion_gini':
results = self.getGINI(currentOutput)
currentOutput = results
print(results)
elif step == 'load_modules':
self.regressionModelInitialize()
elif step == 'evaluate_train_test':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'train_test_split',label)
currentOutput = results
elif step == 'evaluate_cross_val':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'cross_val',label)
currentOutput = results
elif step == 'evaluate_leave_group_out':
if label == None:
print(' ====> Error: labels are not loaded')
if self.flagDataset == False:
print(' ====> Error: groups ids are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'leave_one_group_out',label,group=groups)
currentOutput = results
elif step == 'evaluate_leave_dataset_out':
if label == None:
print(' ====> Error: labels are not loaded')
if self.flagDataset == False:
print(' ====> Error: datasets ids are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'leave_one_dataset_out',label,dataset = datasets)
currentOutput = results
elif step == 'evaluate_stratified':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'sorted_stratified',label)
currentOutput = results
elif step == 'all':
if label == None:
print(' ====> Error: labels are not loaded')
results =self.findParametersAndEvaluate(currentOutput,'all',label,group = groups, dataset = datasets)
currentOutput = results
elif step == 'report_csv':
self.report(currentOutput,report_name)
else:
print(' Unsupported module ',step,' is specified')
| 41.139214 | 457 | 0.517462 | 3,813 | 38,712 | 5.084448 | 0.120378 | 0.029917 | 0.012379 | 0.017331 | 0.469851 | 0.41043 | 0.359983 | 0.325012 | 0.293857 | 0.271264 | 0 | 0.008895 | 0.346585 | 38,712 | 941 | 458 | 41.139214 | 0.757541 | 0.03712 | 0 | 0.3457 | 0 | 0 | 0.161192 | 0.045734 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035413 | false | 0 | 0.097808 | 0 | 0.177066 | 0.215852 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba459622ba98919bfa10ab43ed05d7011713aea | 469 | py | Python | Moderate/Prime Numbers/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | Moderate/Prime Numbers/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | Moderate/Prime Numbers/main.py | AstrorEnales/CodeEval | eae0fb471d27d3a83d544ff4a4651ed1a2076930 | [
"MIT"
] | null | null | null | import sys
lines = open(sys.argv[1], 'r')
for line in lines:
line = line.replace('\n', '').replace('\r', '')
if len(line) > 0:
n = int(line)
primes = set([2])
num = 3
while num < n:
if all(num % i != 0 for i in primes):
primes = set(list(primes) + [num])
num = num + 1
primes = sorted(list(primes))
print(','.join([str(x) for x in primes]))
lines.close()
| 26.055556 | 52 | 0.45629 | 64 | 469 | 3.34375 | 0.484375 | 0.084112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020408 | 0.373134 | 469 | 17 | 53 | 27.588235 | 0.707483 | 0 | 0 | 0 | 0 | 0 | 0.013274 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba4e572e52707590a52608ce4cc12b513909627 | 2,117 | py | Python | gemtown/users/serializers.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | null | null | null | gemtown/users/serializers.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | 5 | 2020-09-04T20:13:39.000Z | 2022-02-17T22:03:33.000Z | gemtown/users/serializers.py | doramong0926/gemtown | 2c39284e3c68f0cc11994bed0ee2abaad0ea06b6 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from gemtown.modelphotos import models as modelphoto_models
from gemtown.modelers import models as modeler_models
from gemtown.musicians import models as musician_models
from . import models
import time
class TimestampField(serializers.Field):
def to_representation(self, value):
return int(time.mktime(value.timetuple()))
class UsernameSerializer(serializers.ModelSerializer):
class Meta:
model = models.User
fields = (
'username',
)
class MusicianSerializer(serializers.ModelSerializer):
class Meta:
model = musician_models.Musician
fields = (
'id',
'nickname',
'country',
)
class ModelPhotoSerializer(serializers.ModelSerializer):
class Meta:
model = modelphoto_models.ModelPhoto
fields = (
'file',
'photo_type',
)
class ModelerSerializer(serializers.ModelSerializer):
cover_image = ModelPhotoSerializer()
class Meta:
model = modeler_models.Modeler
fields = (
'id',
'cover_image',
'nickname',
'country',
)
class UserSerializer(serializers.ModelSerializer):
created_at = TimestampField()
updated_at = TimestampField()
followers = UsernameSerializer(many=True)
followings = UsernameSerializer(many=True)
musician = MusicianSerializer()
modeler = ModelerSerializer()
class Meta:
model = models.User
fields = (
'id',
'username',
'email',
'first_name',
'last_name',
'user_class',
'gem_amount',
'musician',
'modeler',
'gender',
'profile_photo',
'country',
'mobile_number',
'mobile_country',
'followers',
'followings',
'is_superuser',
'is_staff',
'created_at',
'updated_at'
)
| 25.817073 | 60 | 0.561171 | 166 | 2,117 | 7.012048 | 0.39759 | 0.111684 | 0.060137 | 0.090206 | 0.142612 | 0.051546 | 0 | 0 | 0 | 0 | 0 | 0 | 0.352858 | 2,117 | 81 | 61 | 26.135802 | 0.849635 | 0 | 0 | 0.309859 | 0 | 0 | 0.117313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014085 | false | 0 | 0.084507 | 0.014085 | 0.366197 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba78991985070dc29bb6e09cbc030857e571e30 | 6,702 | py | Python | alexa_skills/cis_diagnosis.py | paramraghavan/sls-py-alexa-color-picker | da4752442dd4ead19832930103adb9d81cfc163a | [
"MIT"
] | null | null | null | alexa_skills/cis_diagnosis.py | paramraghavan/sls-py-alexa-color-picker | da4752442dd4ead19832930103adb9d81cfc163a | [
"MIT"
] | null | null | null | alexa_skills/cis_diagnosis.py | paramraghavan/sls-py-alexa-color-picker | da4752442dd4ead19832930103adb9d81cfc163a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
from ask_sdk_model.ui import SimpleCard
import os
from alexa_skills import aws_utils
CIS_SERVICE_URL = os.environ['CIS_SERVICE_URL']
CIS_AWS_ACCESS_KEY_ID = os.environ['CIS_AWS_ACCESS_KEY_ID']
CIS_AWS_SECRET_ACCESS_KEY = os.environ.get('CIS_AWS_SECRET_ACCESS_KEY')
skill_name = "CISDiagnosis"
help_text = ("Please tell me your medical condition. You can say "
"I have cold headache.")
report_slot = "report"
sb = SkillBuilder()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@sb.request_handler(can_handle_func=is_request_type("LaunchRequest"))
def launch_request_handler(handler_input):
"""Handler for Skill Launch."""
# type: (HandlerInput) -> Response
speech = "Welcome, Tell me your medical condition."
handler_input.response_builder.speak(
speech + " " + help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.HelpIntent"))
def help_intent_handler(handler_input):
"""Handler for Help Intent."""
# type: (HandlerInput) -> Response
handler_input.response_builder.speak(help_text).ask(help_text)
return handler_input.response_builder.response
@sb.request_handler(
can_handle_func=lambda handler_input:
is_intent_name("AMAZON.CancelIntent")(handler_input) or
is_intent_name("AMAZON.StopIntent")(handler_input))
def cancel_and_stop_intent_handler(handler_input):
"""Single handler for Cancel and Stop Intent."""
# type: (HandlerInput) -> Response
speech_text = "Goodbye!"
return handler_input.response_builder.speak(speech_text).response
@sb.request_handler(can_handle_func=is_request_type("SessionEndedRequest"))
def session_ended_request_handler(handler_input):
"""Handler for Session End."""
# type: (HandlerInput) -> Response
return handler_input.response_builder.response
from io import StringIO
def getMedicalAnalysis(medical_report):
client = aws_utils.get_boto3_client(CIS_AWS_ACCESS_KEY_ID, CIS_AWS_SECRET_ACCESS_KEY, 'comprehendmedical')
response = client.detect_entities_v2(
Text=medical_report
)
mc_dict = {}
for entity in response['Entities']:
if entity["Category"] == "MEDICAL_CONDITION" and len(entity["Traits"]) > 0:
#print(f'| {entity["Text"]} |{entity["Category"]} |')
mc_dict[entity["Text"]] = entity["Category"]
#print(mc_dict)
string_buffer = StringIO()
for item in mc_dict:
string_buffer.write( item + ' is ' + mc_dict[item] + ' ')
return string_buffer.getvalue()
@sb.request_handler(can_handle_func=is_intent_name("MedicalIntent"))
def my_medical_diagnosis_handler(handler_input):
"""Check if color is provided in slot values. If provided, then
set your favorite color from slot value into session attributes.
If not, then it asks user to provide the color.
"""
# type: (HandlerInput) -> Response
slots = handler_input.request_envelope.request.intent.slots
if report_slot in slots:
medical_report = slots[report_slot].value
speakOutput = getMedicalAnalysis(medical_report)
# build json object as per the CISApi
# handler_input.attributes_manager.session_attributes[color_slot_key] = fav_color
speech = "Identified diseases are " + speakOutput
reprompt = ("That's " + speakOutput)
else:
speech = "I'm not sure, please try again"
reprompt = ("I'm not sure, please try again")
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response
@sb.request_handler(can_handle_func=is_intent_name("AMAZON.FallbackIntent"))
def fallback_handler(handler_input):
"""AMAZON.FallbackIntent is only available in en-US locale.
This handler will not be triggered except in that locale,
so it is safe to deploy on any locale.
"""
# type: (HandlerInput) -> Response
speech = (
"The {} skill can't help you with that. " + help_text ).format(skill_name)
reprompt = (help_text)
handler_input.response_builder.speak(speech).ask(reprompt)
return handler_input.response_builder.response
def convert_speech_to_text(ssml_speech):
"""convert ssml speech to text, by removing html tags."""
# type: (str) -> str
s = SSMLStripper()
s.feed(ssml_speech)
return s.get_data()
@sb.global_response_interceptor()
def add_card(handler_input, response):
"""Add a card by translating ssml text to card content."""
# type: (HandlerInput, Response) -> None
response.card = SimpleCard(
title=skill_name,
content=convert_speech_to_text(response.output_speech.ssml))
@sb.global_response_interceptor()
def log_response(handler_input, response):
"""Log response from alexa service."""
# type: (HandlerInput, Response) -> None
print("Alexa Response: {}\n".format(response))
@sb.global_request_interceptor()
def log_request(handler_input):
"""Log request to alexa service."""
# type: (HandlerInput) -> None
print("Alexa Request: {}\n".format(handler_input.request_envelope.request))
@sb.exception_handler(can_handle_func=lambda i, e: True)
def all_exception_handler(handler_input, exception):
"""Catch all exception handler, log exception and
respond with custom message.
"""
# type: (HandlerInput, Exception) -> None
print("Encountered following exception: {}".format(exception))
speech = "Sorry, there was some problem. Please try again!!"
handler_input.response_builder.speak(speech).ask(speech)
return handler_input.response_builder.response
######## Convert SSML to Card text ############
# This is for automatic conversion of ssml to text content on simple card
# You can create your own simple cards for each response, if this is not
# what you want to use.
from six import PY2
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
class SSMLStripper(HTMLParser):
def __init__(self):
self.reset()
self.full_str_list = []
if not PY2:
self.strict = False
self.convert_charrefs = True
def handle_data(self, d):
self.full_str_list.append(d)
def get_data(self):
return ''.join(self.full_str_list)
################################################
# Handler to be provided in lambda console.
lambda_handler = sb.lambda_handler()
| 32.852941 | 110 | 0.716801 | 873 | 6,702 | 5.255441 | 0.273769 | 0.07585 | 0.061029 | 0.070619 | 0.278989 | 0.209895 | 0.159765 | 0.15279 | 0.151046 | 0.123801 | 0 | 0.001084 | 0.173829 | 6,702 | 203 | 111 | 33.014778 | 0.827524 | 0.222769 | 0 | 0.090909 | 0 | 0 | 0.128404 | 0.013317 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.118182 | 0.009091 | 0.354545 | 0.027273 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6ba8948db01a555810296ad83a1297622916c86e | 988 | py | Python | armautils_cli/smdimerge.py | KoffeinFlummi/ArmaUtils | 2f1fdc8fb561fb54077f3c328d7a788e75c78dad | [
"MIT"
] | 1 | 2015-02-19T17:31:17.000Z | 2015-02-19T17:31:17.000Z | armautils_cli/smdimerge.py | KoffeinFlummi/ArmaUtils | 2f1fdc8fb561fb54077f3c328d7a788e75c78dad | [
"MIT"
] | null | null | null | armautils_cli/smdimerge.py | KoffeinFlummi/ArmaUtils | 2f1fdc8fb561fb54077f3c328d7a788e75c78dad | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
from PIL import Image
def smdimerge(pargs, oargs):
if len(pargs) != 3:
return -1
path_spec, path_gloss, path_target = pargs
try:
spec = Image.open(path_spec).convert("RGBA")
gloss = Image.open(path_gloss).convert("RGBA")
except:
print("Failed to read images. Please check your paths.")
return 1
if spec.size != gloss.size:
print("Image sizes do not match, aborting.")
return 1
smdi = Image.new("RGBA", spec.size, "white")
data = np.array(smdi)
r,g,b,a = data.transpose()
g = np.array(spec).transpose()[0]
b = np.array(gloss).transpose()[0]
data = np.array([r,g,b,a]).transpose()
smdi = Image.fromarray(data)
try:
smdi.save(path_target)
except:
print("Failed to write final image to disk. Check permissions.")
return 1
else:
print("SMDI map saved at: {}".format(path_target))
return 0
| 23.52381 | 72 | 0.601215 | 140 | 988 | 4.192857 | 0.471429 | 0.0477 | 0.044293 | 0.064736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01238 | 0.26417 | 988 | 41 | 73 | 24.097561 | 0.795048 | 0.021255 | 0 | 0.233333 | 0 | 0 | 0.181159 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.066667 | 0 | 0.266667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6baa9c56ec82f3de3e848ccc5b1bc7bfad503442 | 7,001 | py | Python | detect_motor_test3.py | binghaohuang1/object-detective-visual-tracking | e61680a771dc13a006113d96965e59ff1bc3ce6d | [
"MIT"
] | null | null | null | detect_motor_test3.py | binghaohuang1/object-detective-visual-tracking | e61680a771dc13a006113d96965e59ff1bc3ce6d | [
"MIT"
] | null | null | null | detect_motor_test3.py | binghaohuang1/object-detective-visual-tracking | e61680a771dc13a006113d96965e59ff1bc3ce6d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
global RV2_motor1_joint
yolo = YOLO()
bridge = CvBridge()
def send():
rospy.Subscriber('/mid_camera/color/image_raw/compressed', CompressedImage, ReceiveVideo_right)
rospy.spin()
def ReceiveVideo_right(data):
global cv_image
# print(1)
cv_image = bridge.compressed_imgmsg_to_cv2(data, 'bgr8')
def main():
global delta_x,cv_image
time.sleep(4)
fps = 0
while not rospy.is_shutdown():
t1 = time.time()
# 读取某一帧
frame = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = pilimage.fromarray(np.uint8(frame))
# 进行检测
frame, bbox_list, label_list = yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
print(frame.shape)
cv2.imshow("video",frame)
cv2.waitKey(3)
# c= cv2.waitKey(1) & 0xff
# if c==27:
# break
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'banana' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_x = 320-object_center
#print(delta_x)
#return delta_x
# location_pub.publish(delta_x)
#motor1_move()
elif 'bed' in label_list[i]:
print("yyy")
pass
else:
print('yolo未识别到任何物体')
pass
def motor1_move():
time.sleep(1)
global command_vel_pub_m, delta_x, RV2_motor1_joint
delta_x = 0
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
# rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
while not rospy.is_shutdown():
print(delta_x)
#中间位判断
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
#左限位判断条件
if 1.5 < RV2_motor1_joint:
#左转判断条件
if delta_x > 80:
motor_vel.velocity = [0]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -200:
motor_vel.velocity = [-0.48]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif -200 < delta_x < -80:
motor_vel.velocity = [(delta_x + 40) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#右限位判断条件
if RV2_motor1_joint < -1.5:
#左转判断条件
if delta_x > 200:
motor_vel.velocity = [0.48]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
elif 80 < delta_x < 200:
motor_vel.velocity = [(delta_x - 40) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
#for object in vision_database_dict:
# 再将opencv格式额数据转换成ros image格式的数据发布
# try:
# #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
# location_pub.publish(location_pub)
# except CvBridgeError as e:
# print('e')
def RV2_motorjointstate_callback(data):
# 定义RV2 motor数据全局变量,进行赋值
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
print(RV2_motor1_joint)
if __name__ == '__main__':
# 初始化ros节点
rospy.init_node("cv_bridge_test")
rospy.loginfo("Starting cv_bridge_test node")
global command_vel_pub_m, delta_x
#创建发布者
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
#订阅躯干点击位置信息
rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
#定义yolo识别子程序
t_send = threading.Thread(target = send)
t_send.start()
t_main = threading.Thread(target=main)
t_main.start()
#time.sleep(2)
# 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
rospy.spin()
# except KeyboardInterrupt:
# print("Shutting down cv_bridge_test node.")
# cv2.destroyAllWindows()
| 32.868545 | 114 | 0.571347 | 862 | 7,001 | 4.37935 | 0.230858 | 0.08053 | 0.058543 | 0.063046 | 0.423841 | 0.358146 | 0.352318 | 0.331921 | 0.331921 | 0.296424 | 0 | 0.042836 | 0.333095 | 7,001 | 212 | 115 | 33.023585 | 0.765689 | 0.12084 | 0 | 0.446667 | 0 | 0 | 0.032749 | 0.010971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.013333 | 0.133333 | 0 | 0.166667 | 0.073333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6baba2b5a50d24a7e67ce4b5e3c206e3e6b416ad | 7,184 | py | Python | git_pull_all.py | searKing/git-pull-all | b77a1b0461cd00a5f3ccd48253a12674557302b6 | [
"MIT"
] | null | null | null | git_pull_all.py | searKing/git-pull-all | b77a1b0461cd00a5f3ccd48253a12674557302b6 | [
"MIT"
] | null | null | null | git_pull_all.py | searKing/git-pull-all | b77a1b0461cd00a5f3ccd48253a12674557302b6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import git
from git import *
import threading
import os
import sys
import getopt
from enum import Enum
class GitCommandType(Enum):
pull = 1
push = 2
nop = 3
def yes_or_no(msg: str):
yes_no = input(msg + " ? [Y]es or [n]o?")
yes_no = yes_no.lower()
if yes_no == "yes" or yes_no == "y":
return True
elif yes_no == "no" or yes_no == "n":
return False
else:
return True
# is_git_dir returns if current directory has .git/
def is_git_dir(dir_path: str):
repo_git_dir = os.path.join(dir_path, '.git')
if not os.path.exists(repo_git_dir):
return False
return True
def update_git_repo(git_cmd_type: GitCommandType, git_repo_dir: str, git_stash_if_have_uncommitted_changes: bool,
unhandled_git_repo_dirs: list):
try:
git_repo = git.Repo(git_repo_dir)
if git_cmd_type == GitCommandType.pull and git_repo.is_dirty():
if not git_stash_if_have_uncommitted_changes:
if not yes_or_no("Repo " + git_repo_dir + " have uncommitted changes, \n\tgit reset --hard"):
unhandled_git_repo_dirs.append(git_repo_dir)
return
try:
git_repo.git.stash('save', True)
except Exception as exception:
print(
"git stash repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str(exception))
unhandled_git_repo_dirs.append(git_repo_dir)
return
remote_repo = git_repo.remote()
print("start git %s from remote for: %s" % (git_cmd_type.name, git_repo_dir), end='')
try:
if git_cmd_type == GitCommandType.pull:
remote_repo.pull()
elif git_cmd_type == GitCommandType.push:
remote_repo.push()
elif git_cmd_type == GitCommandType.nop:
pass
else:
print("")
raise Exception('unrecognised git command: ' + git_cmd_type.name)
except Exception as exception:
print("")
print(
"git " + git_cmd_type.name + " repo:" + git_repo_dir + " Failed:\r\n git reset --hard recommended" + str(
exception))
unhandled_git_repo_dirs.append(git_repo_dir)
return
print("... Done.")
except NoSuchPathError as e:
pass
except InvalidGitRepositoryError as e:
pass
finally:
pass
def update_git_repo_thread(git_cmd_type: GitCommandType, root_path: str, git_stash_if_have_uncommitted_changes: bool,
dirty_git_repo_dirs: list,
git_update_thread_pools: list):
if git_stash_if_have_uncommitted_changes:
git_update_thread_ = threading.Thread(target=update_git_repo,
args=(git_cmd_type, root_path, True, dirty_git_repo_dirs))
git_update_thread_.start()
git_update_thread_pools.append(git_update_thread_)
else:
update_git_repo(git_cmd_type, root_path, False, dirty_git_repo_dirs)
def walk_and_update(git_cmd_type: GitCommandType, root_path: str, continue_when_meet_git: bool, depth: int,
max_depth: int,
git_stash_if_have_uncommitted_changes: bool, dirty_git_repo_dirs: list,
git_update_thread_pools: list):
if depth >= max_depth:
print("jump for %s too deep: depth[%d] max_depth[%d]" % (root_path, depth, max_depth))
return
if is_git_dir(root_path):
update_git_repo_thread(git_cmd_type, root_path, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs,
git_update_thread_pools)
if not continue_when_meet_git:
# print("jump subdirs for %s meet git" % (root_path))
return
depth = depth + 1
for root_dir, sub_dirs, sub_files in os.walk(root_path):
for sub_dir in sub_dirs:
walk_and_update(git_cmd_type, os.path.join(root_dir, sub_dir), continue_when_meet_git, depth,
max_depth, git_stash_if_have_uncommitted_changes, dirty_git_repo_dirs,
git_update_thread_pools)
sub_dirs.clear()
sub_files.clear()
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
g_git_cmd_type: GitCommandType = GitCommandType.nop
g_walk_paths: list = ["."]
g_git_stash_if_have_uncommitted_changes: bool = False
g_continue_when_meet_git: bool = False
g_stop_when_meet_max_depth: int = 10
opts, args = getopt.getopt(argv[1:], "hycd:",
["help", "path", "git_stash_if_have_uncommitted_changes",
"continue_when_meet_git", "stop_when_meet_max_depth=10"])
if len(args) > 0:
g_git_cmd_type = GitCommandType[args[0]]
if len(args) > 1:
g_walk_paths = args[1:]
for op, value in opts:
if op == "-y":
g_git_stash_if_have_uncommitted_changes = True
if op == "-c":
g_continue_when_meet_git = True
elif op == "-d":
g_stop_when_meet_max_depth = value
elif op == "-h":
print("=======""Usage:")
print("python git_pull_all.py pull|push .")
print("python git_pull_all.py -y -c -d 10 pull|push YourPath")
print("python git_pull_all.py"
" --git_stash_if_have_uncommitted_changes "
"--continue_when_meet_git "
"--stop_when_meet_max_depth=10 pull|push YourPath")
print("=======")
Usage("-h")
sys.exit()
g_dirty_git_repo_dirs = []
g_git_update_thread_pools = []
for walk_path in g_walk_paths:
walk_and_update(g_git_cmd_type, walk_path, g_continue_when_meet_git, 0,
g_stop_when_meet_max_depth, g_git_stash_if_have_uncommitted_changes,
g_dirty_git_repo_dirs, g_git_update_thread_pools)
for git_update_thread in g_git_update_thread_pools:
git_update_thread.join(30)
if len(g_dirty_git_repo_dirs) != 0:
print('these repos have uncommitted changes or conflicts:\r\n')
for dirty_repo_dir in g_dirty_git_repo_dirs:
print('dir %s has uncommited changes or conflicts, please check\r\n' % (dirty_repo_dir))
print("Done git " + g_git_cmd_type.name + " all")
except getopt.error as msg:
raise Usage(msg)
except Usage as err:
print >> sys.stderr, err.msg
print >> sys.stderr, "for help use --help"
return 2
if __name__ == "__main__":
sys.exit(main())
| 39.256831 | 121 | 0.579065 | 921 | 7,184 | 4.127036 | 0.156352 | 0.060773 | 0.047356 | 0.044199 | 0.46935 | 0.36964 | 0.280716 | 0.213102 | 0.202052 | 0.191002 | 0 | 0.004607 | 0.335329 | 7,184 | 182 | 122 | 39.472527 | 0.791414 | 0.016425 | 0 | 0.224359 | 0 | 0 | 0.114682 | 0.025202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044872 | false | 0.025641 | 0.044872 | 0 | 0.192308 | 0.108974 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bac63dfef6fa2d50e75637d8cb0e279922534d7 | 1,943 | py | Python | 03-dearsanta/dateconvert.py | hugovk/NaNoGenMo-2016 | e71b333173b221066f56adcdea4fe8cfdfd4e7c7 | [
"FTL"
] | null | null | null | 03-dearsanta/dateconvert.py | hugovk/NaNoGenMo-2016 | e71b333173b221066f56adcdea4fe8cfdfd4e7c7 | [
"FTL"
] | null | null | null | 03-dearsanta/dateconvert.py | hugovk/NaNoGenMo-2016 | e71b333173b221066f56adcdea4fe8cfdfd4e7c7 | [
"FTL"
] | null | null | null | #!/usr/bin/env python3
"""
Take a timestamp like:
25/11/2016 23:05:03
Convert it to:
25 November 2016, 13:05 PST
25 November 2016, 16:05 EST
25 November 2016, 21:05 GMT
25 November 2016, 21:05 UTC
25 November 2016, 23:05 EET
26 November 2016, 02:35 IST
26 November 2016, 05:05 CST
26 November 2016, 06:05 JST
26 November 2016, 08:05 AEDT
"""
import argparse
import pytz # pip install pytz
from dateutil.parser import parse # pip install python-dateutil
def utc_to_local(utc_dt, local_tz):
local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)
return local_tz.normalize(local_dt) # .normalize might be unnecessary
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert a timestamp into eight others.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("timestamp", help="Input timestamp")
args = parser.parse_args()
# print(args.timestamp)
indate = parse(args.timestamp, dayfirst=True, yearfirst=False)
local_tz = pytz.timezone("Europe/Helsinki")
# print(indate, local_tz)
localdt = local_tz.localize(indate)
us_pacific = pytz.timezone("US/Pacific")
us_eastern = pytz.timezone("US/Eastern")
london = pytz.timezone("Europe/London")
india = pytz.timezone("Asia/Calcutta")
china = pytz.timezone("Asia/Shanghai")
japan = pytz.timezone("Asia/Tokyo")
sydney = pytz.timezone("Australia/Sydney")
for tz in [
us_pacific,
us_eastern,
london,
pytz.UTC,
local_tz,
india,
china,
japan,
sydney,
]:
timezone_name = tz.localize(indate).tzname()
local_date = localdt.astimezone(tz).strftime("%d %B %Y, %H:%M")
print(f"{local_date} {timezone_name}")
# x = tz.localize(indate)
# print("{} ({})".format(localdt.astimezone(tz), x.tzname()))
# print()
# End of file
| 25.906667 | 74 | 0.662378 | 261 | 1,943 | 4.808429 | 0.43295 | 0.086056 | 0.055777 | 0.025498 | 0.028685 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068898 | 0.215646 | 1,943 | 74 | 75 | 26.256757 | 0.754593 | 0.288214 | 0 | 0 | 0 | 0 | 0.15593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.081081 | 0 | 0.135135 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bad66e09e43cf21ce9d5331e922f80d18955982 | 799 | py | Python | priv_tube/database/repositories/system_flags.py | ActionCactus/prive_tube | d6d53b87e1a91248e32532a86b23f2d2f3196c58 | [
"MIT"
] | null | null | null | priv_tube/database/repositories/system_flags.py | ActionCactus/prive_tube | d6d53b87e1a91248e32532a86b23f2d2f3196c58 | [
"MIT"
] | null | null | null | priv_tube/database/repositories/system_flags.py | ActionCactus/prive_tube | d6d53b87e1a91248e32532a86b23f2d2f3196c58 | [
"MIT"
] | null | null | null | from priv_tube.database.models.system_flags import SystemFlags as Model
from priv_tube.database import db
class SystemFlags:
"""
Repository for interacting with the `system_flags` database table responsible for system-wide toggles.
"""
@staticmethod
def is_enabled(setting_name: str) -> bool:
flag: Model = Model.query.filter_by(flag_name=setting_name).first()
return bool(flag.value)
@staticmethod
def enable(setting_name: str):
model: Model = Model.query.filter_by(flag_name=setting_name).first()
model.value = True
db.session.commit()
@staticmethod
def disable(setting_name: str):
model: Model = Model.query.filter_by(flag_name=setting_name).first()
model.value = False
db.session.commit()
| 29.592593 | 106 | 0.693367 | 102 | 799 | 5.264706 | 0.421569 | 0.122905 | 0.078212 | 0.117318 | 0.370577 | 0.370577 | 0.370577 | 0.370577 | 0.370577 | 0.370577 | 0 | 0 | 0.210263 | 799 | 26 | 107 | 30.730769 | 0.85103 | 0.12766 | 0 | 0.411765 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6baec59b9ec0b23b54791668f80cdb75f7f78fe2 | 2,899 | py | Python | borisat/models/rd/unnested.py | CircleOnCircles/borisat | 4f170ee1a0b11f06b1c3e99f42823061d7e0028e | [
"MIT"
] | null | null | null | borisat/models/rd/unnested.py | CircleOnCircles/borisat | 4f170ee1a0b11f06b1c3e99f42823061d7e0028e | [
"MIT"
] | 1 | 2020-10-10T08:18:29.000Z | 2020-10-10T08:18:29.000Z | borisat/models/rd/unnested.py | CircleOnCircles/borisat | 4f170ee1a0b11f06b1c3e99f42823061d7e0028e | [
"MIT"
] | null | null | null | """
{
'vNID': {
'anyType': [
'0105558096348'
]
},
'vtin': None,
'vtitleName': {
'anyType': [
'บริษัท'
]
},
'vName': {
'anyType': [
'โฟลว์แอคเคาท์ จำกัด'
]
},
'vSurname': {
'anyType': [
'-'
]
},
'vBranchTitleName': {
'anyType': [
'บริษัท'
]
},
'vBranchName': {
'anyType': [
'โฟลว์แอคเคาท์ จำกัด'
]
},
'vBranchNumber': {
'anyType': [
0
]
},
'vBuildingName': {
'anyType': [
'ชุดสกุลไทย สุรวงศ์ ทาวเวอร์'
]
},
'vFloorNumber': {
'anyType': [
'11'
]
},
'vVillageName': {
'anyType': [
'-'
]
},
'vRoomNumber': {
'anyType': [
'12B'
]
},
'vHouseNumber': {
'anyType': [
'141/12'
]
},
'vMooNumber': {
'anyType': [
'-'
]
},
'vSoiName': {
'anyType': [
'-'
]
},
'vStreetName': {
'anyType': [
'สุรวงศ์'
]
},
'vThambol': {
'anyType': [
'สุริยวงศ์'
]
},
'vAmphur': {
'anyType': [
'บางรัก'
]
},
'vProvince': {
'anyType': [
'กรุงเทพมหานคร'
]
},
'vPostCode': {
'anyType': [
'10500'
]
},
'vBusinessFirstDate': {
'anyType': [
'2016/04/07'
]
},
'vmsgerr': None
}
"""
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Union
import stringcase as stringcase
from loguru import logger
def unnest(soap_data: Dict[str, Optional[Dict[str, List[Union[str, int]]]]], nonull:bool=True):
# drop none
if nonull:
notnull = {k: v for k, v in soap_data.items() if v}
# anytype flatten
flatten = {}
for k, v in notnull.items():
if k.startswith('v'):
k = k[1:]
k = stringcase.snakecase(k)
try:
flatten[k] = v['anyType'][0]
if len(v['anyType']) > 1:
logger.info(
"please let dev. know this case exists. by creating an issue on https://github.com/CircleOnCircles/borisat/issues.")
except Exception as e:
logger.exception("unseen format")
return flatten
def get_error(unnested: Dict[str, Any]) -> Optional[str]:
""" get error if any"""
if error_message := unnested.get('msgerr'):
return error_message
else:
return False
| 19.993103 | 137 | 0.397378 | 255 | 2,899 | 4.592157 | 0.494118 | 0.042699 | 0.068318 | 0.018787 | 0.06661 | 0.06661 | 0.044406 | 0.044406 | 0.044406 | 0.044406 | 0 | 0.024809 | 0.457744 | 2,899 | 144 | 138 | 20.131944 | 0.704835 | 0.588479 | 0 | 0 | 0 | 0.035714 | 0.142166 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.25 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bb3065e877b04d04efa39f8cb2dec584e2b65df | 3,696 | py | Python | app/utils/logger.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | 6 | 2021-03-27T08:58:04.000Z | 2021-05-23T17:07:09.000Z | app/utils/logger.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | 2 | 2021-05-30T08:06:53.000Z | 2021-06-02T17:02:06.000Z | app/utils/logger.py | janaSunrise/ZeroCOM | 7197684ce708f080fe215b0a6e57c12836e4c0ab | [
"Apache-2.0"
] | null | null | null | # -- Imports --
from datetime import datetime
from colorama import Back
from rich.console import Console
from .colors import get_bright_color, get_color
# -- Mappings --
log_color_mapping = {
"error": get_bright_color("RED"),
"warning": get_bright_color("YELLOW"),
"message": get_color("CYAN"),
"success": get_bright_color("GREEN"),
"info": get_bright_color("MAGENTA"),
"critical": get_bright_color("RED") + Back.YELLOW,
"flash": get_bright_color("BLUE"),
}
log_mapping = {
"error": f"[{log_color_mapping['error']}%{get_color('RESET')}]",
"warning": f"[{log_color_mapping['warning']}!{get_color('RESET')}]",
"message": f"[{log_color_mapping['message']}>{get_color('RESET')}]",
"success": f"[{log_color_mapping['success']}+{get_color('RESET')}]",
"info": f"[{log_color_mapping['info']}#{get_color('RESET')}]",
"critical": f"[{log_color_mapping['critical']}X{get_color('RESET')}{Back.RESET}]",
"flash": f"[{log_color_mapping['flash']}-{get_color('RESET')}]",
}
class Logger:
def __init__(self):
self._console = Console()
@staticmethod
def _append_date(message: str) -> str:
timestamp = datetime.now()
timestamp = (
f"{get_bright_color('CYAN')}"
f"{timestamp.hour}:{timestamp.minute}:{timestamp.second}"
f"{get_bright_color('RESET')}"
)
return f"[{timestamp}]{message}"
def error(self, message: str, date: bool = True) -> None:
log_type = "error"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def warning(self, message: str, date: bool = True) -> None:
log_type = "warning"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def message(self, username: str, message: str, date: bool = True, **kwargs) -> None:
log_type = "message"
message_prefix = log_mapping[log_type]
message_pre = f"{get_bright_color('YELLOW')} {username}{get_color('RESET')} {message_prefix} "
if date:
message_pre = self._append_date(message_pre)
print(message_pre, end="")
self._console.print(message, **kwargs)
def success(self, message: str, date: bool = True) -> None:
log_type = "success"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def info(self, message: str, date: bool = True) -> None:
log_type = "info"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def critical(self, message: str, date: bool = True) -> None:
log_type = "critical"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
def flash(self, message: str, date: bool = True) -> None:
log_type = "flash"
message_prefix = log_mapping[log_type]
message = f"{message_prefix} {log_color_mapping[log_type]}{message}"
if date:
message = self._append_date(message)
print(message)
| 29.806452 | 102 | 0.619589 | 444 | 3,696 | 4.873874 | 0.128378 | 0.064695 | 0.097043 | 0.126155 | 0.507394 | 0.47597 | 0.47597 | 0.458872 | 0.458872 | 0.356285 | 0 | 0 | 0.232413 | 3,696 | 123 | 103 | 30.04878 | 0.762778 | 0.007576 | 0 | 0.380952 | 0 | 0 | 0.293042 | 0.216098 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.047619 | 0 | 0.178571 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bb5a3ae1541a6323233d61a349487d06b8c5714 | 1,907 | py | Python | Simplex_Files/Dual_Problem.py | c-randall/Primal-Simplex-Method | 620d7598691ed9717d2d18706c44e462f75e85c5 | [
"BSD-3-Clause"
] | 1 | 2021-12-04T12:18:17.000Z | 2021-12-04T12:18:17.000Z | Simplex_Files/Dual_Problem.py | c-randall/Primal-Simplex-Method | 620d7598691ed9717d2d18706c44e462f75e85c5 | [
"BSD-3-Clause"
] | null | null | null | Simplex_Files/Dual_Problem.py | c-randall/Primal-Simplex-Method | 620d7598691ed9717d2d18706c44e462f75e85c5 | [
"BSD-3-Clause"
] | 2 | 2020-05-30T16:38:37.000Z | 2022-01-22T19:50:42.000Z | """
Created on Wed Apr 3 13:07:18 2019
Author:
Corey R. Randall
Summary:
If the user wishes to solve the Dual Problem over the Primal one, this
function provides support to appropriately convert the problem into its
alternate form.
"""
""" Import needed modules """
"-----------------------------------------------------------------------------"
import numpy as np
""" Function definition """
"-----------------------------------------------------------------------------"
def dual_problem(user_inputs, conversion):
# Extract dictionary for readibility:
A = conversion['A']
b = conversion['b']
c_coeff = conversion['c_coeff']
n = conversion['n']
m = conversion['m']
n_slack = conversion['n_slack']
# Convert A, c_coeff to allow for unrestricted y (i.e. y = y' - y''):
A_temp = np.repeat(A.T, 2)
A_temp[1::2] = -A_temp[1::2]
A = np.reshape(A_temp, [A.shape[1], 2*A.shape[0]])
A = np.hstack([A, np.identity(A.shape[0])])
b_temp = np.repeat(b.T, 2) # the obj. coeff. in (D) are b from (P)
b_temp[1::2] = -b_temp[1::2]
b = np.reshape(b_temp, [b.shape[1], 2*b.shape[0]])
b = np.hstack([b, np.zeros([1, A.shape[0]])])
# Ensure no negative values on RHS:
for i in range(c_coeff.shape[1]):
if c_coeff[0,i] < 0: # the RHS, b values, in (D) are c from (P)
A[i,:] = -A[i,:]
c_coeff[0,i] = -c_coeff[0,i]
# Generate dictionary for outputs:
dual_conversion = {}
dual_conversion['A'] = A
dual_conversion['b'] = c_coeff.T
dual_conversion['c_coeff'] = -b
dual_conversion['n'] = 2*m
dual_conversion['m'] = n +n_slack
dual_conversion['n_slack'] = n +n_slack
dual_conversion['n_prim'] = n
dual_conversion['n_slack_prim'] = n_slack
return dual_conversion
| 31.262295 | 80 | 0.529628 | 273 | 1,907 | 3.56044 | 0.322344 | 0.144033 | 0.024691 | 0.024691 | 0.09465 | 0.061728 | 0 | 0 | 0 | 0 | 0 | 0.025228 | 0.251704 | 1,907 | 60 | 81 | 31.783333 | 0.655922 | 0.262192 | 0 | 0.0625 | 0 | 0 | 0.162882 | 0.120595 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.03125 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bbbb5f359c8e40427bcd1ee484f14b132a99a62 | 2,593 | py | Python | gallery/m_cardloader.py | kengoon/KvGallery | 4d946fa06479636411e027bfdebbb15c58c176cf | [
"MIT"
] | 2 | 2021-05-28T13:37:07.000Z | 2021-06-20T06:47:20.000Z | gallery/m_cardloader.py | kengoon/KvGallery | 4d946fa06479636411e027bfdebbb15c58c176cf | [
"MIT"
] | null | null | null | gallery/m_cardloader.py | kengoon/KvGallery | 4d946fa06479636411e027bfdebbb15c58c176cf | [
"MIT"
] | null | null | null | from kivy.event import EventDispatcher
from kivy.metrics import dp
from kivy.properties import ListProperty, StringProperty
from kivymd.uix.card import MDCard
from kivy.lang import Builder
__all__ = "M_CardLoader"
Builder.load_string(
"""
# kv_start
<M_CardLoader>:
md_bg_color: 0, 0, 0, 0
radius: [dp(10), ]
ripple_behavior: True
RelativeLayout:
AsyncImage:
id: image
color: 0,0,0,0
source: root.source
anim_delay: .1
allow_stretch: True
keep_ratio: False
nocache: True
on_load:
root.dispatch("on_load")
canvas.before:
StencilPush
RoundedRectangle:
pos: self.pos
size: self.size
radius: root.radius
StencilUse
canvas.after:
StencilUnUse
RoundedRectangle:
size: self.size
pos: self.pos
radius: root.radius
StencilPop
M_AKImageLoader:
id: loader
radius: root.radius
circle: False
MDBoxLayout:
id:box
opacity: 0
padding: dp(10)
adaptive_height: True
md_bg_color: 0, 0, 0, .6
radius: [0, 0, root.radius[0], root.radius[0]]
M_AKLabelLoader:
text: root.text
radius: root.text_radius
size_hint_y: None
theme_text_color: "Custom"
text_color: root.text_color
height: dp(20) if not self.text else self.texture_size[1]
font_style: "Money"
font_size: dp(16)
halign:"center"
# kv_end
"""
)
class M_CardLoader(MDCard):
text = StringProperty("")
text_radius = ListProperty([dp(5), ])
text_color = ListProperty([1, 1, 1, 1])
source = StringProperty("")
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.register_event_type("on_load")
def on_load(self):
self.ids.loader.opacity = 0
self.ids.image.color = [1, 1, 1, 1]
def on_touch_down(self, touch):
self.root.pause_clock()
def on_touch_up(self, touch):
timer = touch.time_end - touch.time_start
if timer < 0.2:
self.root.ids.raw.switch_tab("feeds")
self.root.resume_clock()
def on_release(self):
self.root.ids.feeds.dispatch("on_tab_release")
| 27.585106 | 73 | 0.529117 | 288 | 2,593 | 4.559028 | 0.388889 | 0.013709 | 0.011424 | 0.018279 | 0.025895 | 0.018279 | 0 | 0 | 0 | 0 | 0 | 0.024284 | 0.38064 | 2,593 | 93 | 74 | 27.88172 | 0.793275 | 0 | 0 | 0 | 0 | 0 | 0.038657 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.178571 | 0 | 0.535714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bbc545ddb8b337163afbee7b7359f2bf1545ca8 | 763 | py | Python | setup.py | OpenTAI/pre-commit-hooks | e123691fa26ff26d1a5f3513ee419bec6eef02ab | [
"MIT"
] | null | null | null | setup.py | OpenTAI/pre-commit-hooks | e123691fa26ff26d1a5f3513ee419bec6eef02ab | [
"MIT"
] | 1 | 2022-02-16T10:19:25.000Z | 2022-02-16T10:19:26.000Z | setup.py | OpenTAI/pre-commit-hooks | e123691fa26ff26d1a5f3513ee419bec6eef02ab | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup # type: ignore
def readme():
with open('./README.md', encoding='utf-8') as f:
content = f.read()
return content
setup(
name='pre_commit_hooks',
version='0.1.0',
description='A pre-commit hook for OpenTAI projects',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/OpenTAI/pre-commit-hooks',
author='OpenTAI Team',
author_email='',
packages=find_packages(),
python_requires='>=3.6',
install_requires=['PyYAML'],
entry_points={
'console_scripts': [
'say-hello=pre_commit_hooks.say_hello:main',
'check-copyright=pre_commit_hooks.check_copyright:main',
],
},
)
| 26.310345 | 68 | 0.647444 | 93 | 763 | 5.107527 | 0.634409 | 0.094737 | 0.117895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009934 | 0.208388 | 763 | 28 | 69 | 27.25 | 0.77649 | 0.015727 | 0 | 0 | 0 | 0 | 0.351135 | 0.125501 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.041667 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bc548fb933264d47e42737add228df3e1a66805 | 3,509 | py | Python | main.py | lhs9842/KNUTNoticeBot | cfc83f2abc079a660177d00da1eab288cad021b4 | [
"MIT"
] | 1 | 2022-02-23T01:54:07.000Z | 2022-02-23T01:54:07.000Z | main.py | lhs9842/KNUTNoticeBot | cfc83f2abc079a660177d00da1eab288cad021b4 | [
"MIT"
] | null | null | null | main.py | lhs9842/KNUTNoticeBot | cfc83f2abc079a660177d00da1eab288cad021b4 | [
"MIT"
] | 1 | 2022-02-23T07:17:31.000Z | 2022-02-23T07:17:31.000Z | import setting
import requests
import threading
import time
import sqlite3
from bs4 import BeautifulSoup
from urllib import parse
public_board = [["BBSMSTR_000000000059", "일반소식"], ["BBSMSTR_000000000060", "장학안내"], ["BBSMSTR_000000000055", "학사공지사항"]] # [boardId, 게시판 명칭]
db_conn = sqlite3.connect("NoticeBot.db", check_same_thread=False)
db_cur = db_conn.cursor()
db_cur.execute('SELECT * FROM sqlite_master WHERE type="table" AND name="final_ntt"') # 테이블 존재 여부 확인
r = db_cur.fetchall()
if r:
print("기존 데이터를 불러옵니다.")
else:
print("새로 데이터베이스를 구축합니다.")
db_conn.execute('CREATE TABLE final_ntt(boardId TEXT, final_nttId TEXT)')
for n in public_board:
db_conn.execute('INSERT INTO final_ntt VALUES ("' + n[0] + '", "1049241")') # 초기값 부여 시 검색 대상 게시판 중 하나의 게시글 하나를 적당히 선택하여 그 게시글의 nttId로 지정할 것. 제대로 지정하지 않으면 최초 구동 시 Many Request로 텔레그램 API 서버가 오류 발생시킴.
db_conn.commit()
def send_message(channel, message):
encode_message = parse.quote(message)
url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + channel + '&text=' + encode_message
response = requests.get(url)
if response.status_code != 200:
print("ERROR!!" + str(response.status_code))
def find_new_ntt(board_info):
try:
url = 'https://www.ut.ac.kr/cop/bbs/' + board_info[0] + '/selectBoardList.do'
response = requests.get(url)
if response.status_code == 200:
db_cur.execute("SELECT final_nttId FROM final_ntt WHERE boardId='" + board_info[0] + "'")
rows = db_cur.fetchall()
final = int(rows[0][0])
html = response.text
soup = BeautifulSoup(html, 'html.parser')
result_id = soup.findAll('input', {'name':'nttId', 'type':'hidden'})
r_n = soup.findAll('input', {'type':'submit'})
result_name = []
for n in r_n:
na = n.get('value')
if (na != "검색") & (na != "등록하기"): # 최상부 검색 버튼 및 최하부 페이지 만족도 조사 부분의 submit 버튼 예외 처리
result_name.append(na)
count = 0
result_name.reverse()
result_id.reverse()
for n in result_id:
i = int(n.get('value'))
if i == 0: # 최상부 검색 버튼 부분에 지정된 nttId 값 0에 대한 예외처리
break
if i <= final:
count += 1
continue
send_message(setting.all_notice_channel, "[" + board_info[1] + "] " + result_name[count] + " : http://www.ut.ac.kr/cop/bbs/" + board_info[0] + "/selectBoardArticle.do?nttId=" + str(i))
db_conn.execute("UPDATE final_ntt SET final_nttId='" + str(i) + "' WHERE boardId='" + board_info[0] + "'")
count += 1
db_conn.commit()
except:
now = time.localtime()
message = "EXCEPT!! " + board_info[1]
message += "%04d/%02d/%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
encode_message = parse.quote(message)
url = 'https://api.telegram.org/bot' + setting.bot_token + '/sendmessage?chat_id=' + setting.admin_channel + '&text=' + encode_message
response = requests.get(url)
if response.status_code != 200:
print("NETWORK ERROR!!" + str(response.status_code) + "\n" + message)
find_new_ntt(board_info)
def Bot_Start():
for c in public_board:
find_new_ntt(c)
threading.Timer(30, Bot_Start).start()
Bot_Start() | 44.987179 | 205 | 0.596751 | 477 | 3,509 | 4.224319 | 0.404612 | 0.035732 | 0.044665 | 0.032754 | 0.273449 | 0.206948 | 0.206948 | 0.206948 | 0.206948 | 0.159801 | 0 | 0.032133 | 0.263893 | 3,509 | 78 | 206 | 44.987179 | 0.747967 | 0.066686 | 0 | 0.152778 | 0 | 0 | 0.224159 | 0.021713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.097222 | 0 | 0.138889 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bc83f48988080bc745090b0af2be2b40f9b6a5e | 2,041 | py | Python | inference_methods_local.py | Yaakoubi/Struct-CKN | fa007fa71310866584bdf2e5b038e6663b94e965 | [
"MIT"
] | 1 | 2021-05-30T13:42:56.000Z | 2021-05-30T13:42:56.000Z | inference_methods_local.py | Yaakoubi/Struct-CKN | fa007fa71310866584bdf2e5b038e6663b94e965 | [
"MIT"
] | null | null | null | inference_methods_local.py | Yaakoubi/Struct-CKN | fa007fa71310866584bdf2e5b038e6663b94e965 | [
"MIT"
] | 2 | 2022-03-16T22:00:30.000Z | 2022-03-29T20:08:57.000Z | import ad3
import numpy as np
from pystruct.inference.common import _validate_params
class InferenceException(Exception):
pass
def inference_ad3_local(unary_potentials, pairwise_potentials, edges, relaxed=False,
verbose=0, return_energy=False, branch_and_bound=False,
inference_exception=None, return_marginals=False):
b_multi_type = isinstance(unary_potentials, list)
if b_multi_type:
res = ad3.general_graph(unary_potentials, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
else:
n_states, pairwise_potentials = \
_validate_params(unary_potentials, pairwise_potentials, edges)
unaries = unary_potentials.reshape(-1, n_states)
res = ad3.general_graph(unaries, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
unary_marginals, pairwise_marginals, energy, solver_status = res
if verbose:
print(solver_status)
if solver_status in ["fractional", "unsolved"] and relaxed:
if b_multi_type:
y = (unary_marginals, pairwise_marginals)
else:
unary_marginals = unary_marginals.reshape(unary_potentials.shape)
y = (unary_marginals, pairwise_marginals)
else:
if b_multi_type:
if inference_exception and solver_status in ["fractional", "unsolved"]:
raise InferenceException(solver_status)
ly = list()
_cum_n_states = 0
for unary_marg in unary_marginals:
ly.append(_cum_n_states + np.argmax(unary_marg, axis=-1))
_cum_n_states += unary_marg.shape[1]
y = np.hstack(ly)
else:
y = np.argmax(unary_marginals, axis=-1)
if return_energy:
return y, -energy
if return_marginals:
return y, unary_marginals
return y
| 39.25 | 92 | 0.632533 | 227 | 2,041 | 5.374449 | 0.295154 | 0.091803 | 0.032787 | 0.029508 | 0.290164 | 0.17541 | 0.116393 | 0.116393 | 0.116393 | 0.116393 | 0 | 0.012544 | 0.296913 | 2,041 | 51 | 93 | 40.019608 | 0.837631 | 0 | 0 | 0.25 | 0 | 0 | 0.01809 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022727 | false | 0.022727 | 0.068182 | 0 | 0.181818 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bcb30fd29a6ef624f4b1ad7d00a496c9b08cdb4 | 6,611 | py | Python | leo/modes/lotos.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 1,550 | 2015-01-14T16:30:37.000Z | 2022-03-31T08:55:58.000Z | leo/modes/lotos.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 2,009 | 2015-01-13T16:28:52.000Z | 2022-03-31T18:21:48.000Z | leo/modes/lotos.py | ATikhonov2/leo-editor | 225aac990a9b2804aaa9dea29574d6e072e30474 | [
"MIT"
] | 200 | 2015-01-05T15:07:41.000Z | 2022-03-07T17:05:01.000Z | # Leo colorizer control file for lotos mode.
# This file is in the public domain.
# Properties for lotos mode.
properties = {
"commentEnd": "*)",
"commentStart": "(*",
"indentNextLines": "\\s*(let|library|process|specification|type|>>).*|\\s*(\\(|\\[\\]|\\[>|\\|\\||\\|\\|\\||\\|\\[.*\\]\\||\\[.*\\]\\s*->)\\s*",
}
# Attributes dict for lotos_main ruleset.
lotos_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "false",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for lotos mode.
attributesDictDict = {
"lotos_main": lotos_main_attributes_dict,
}
# Keywords dict for lotos_main ruleset.
lotos_main_keywords_dict = {
"accept": "keyword1",
"actualizedby": "keyword1",
"any": "keyword1",
"basicnaturalnumber": "keyword2",
"basicnonemptystring": "keyword2",
"behavior": "keyword1",
"behaviour": "keyword1",
"bit": "keyword2",
"bitnatrepr": "keyword2",
"bitstring": "keyword2",
"bool": "keyword2",
"boolean": "keyword2",
"choice": "keyword1",
"decdigit": "keyword2",
"decnatrepr": "keyword2",
"decstring": "keyword2",
"element": "keyword2",
"endlib": "keyword1",
"endproc": "keyword1",
"endspec": "keyword1",
"endtype": "keyword1",
"eqns": "keyword1",
"exit": "keyword1",
"false": "literal1",
"fbool": "keyword2",
"fboolean": "keyword2",
"for": "keyword1",
"forall": "keyword1",
"formaleqns": "keyword1",
"formalopns": "keyword1",
"formalsorts": "keyword1",
"hexdigit": "keyword2",
"hexnatrepr": "keyword2",
"hexstring": "keyword2",
"hide": "keyword1",
"i": "keyword1",
"in": "keyword1",
"is": "keyword1",
"let": "keyword1",
"library": "keyword1",
"nat": "keyword2",
"natrepresentations": "keyword2",
"naturalnumber": "keyword2",
"noexit": "keyword1",
"nonemptystring": "keyword2",
"octdigit": "keyword2",
"octet": "keyword2",
"octetstring": "keyword2",
"octnatrepr": "keyword2",
"octstring": "keyword2",
"of": "keyword1",
"ofsort": "keyword1",
"opnnames": "keyword1",
"opns": "keyword1",
"par": "keyword1",
"process": "keyword1",
"renamedby": "keyword1",
"richernonemptystring": "keyword2",
"set": "keyword2",
"sortnames": "keyword1",
"sorts": "keyword1",
"specification": "keyword1",
"stop": "keyword1",
"string": "keyword2",
"string0": "keyword2",
"string1": "keyword2",
"true": "literal1",
"type": "keyword1",
"using": "keyword1",
"where": "keyword1",
}
# Dictionary of keywords dictionaries for lotos mode.
keywordsDictDict = {
"lotos_main": lotos_main_keywords_dict,
}
# Rules for lotos_main ruleset.
def lotos_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="(*", end="*)",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="",exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def lotos_rule1(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq=">>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule2(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[>",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule3(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|||",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule4(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="||",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule5(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="|[",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule6(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="]|",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule7(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="[]",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
def lotos_rule8(colorer, s, i):
return colorer.match_keywords(s, i)
# Rules dict for lotos_main ruleset.
rulesDict1 = {
"(": [lotos_rule0,],
"0": [lotos_rule8,],
"1": [lotos_rule8,],
"2": [lotos_rule8,],
"3": [lotos_rule8,],
"4": [lotos_rule8,],
"5": [lotos_rule8,],
"6": [lotos_rule8,],
"7": [lotos_rule8,],
"8": [lotos_rule8,],
"9": [lotos_rule8,],
">": [lotos_rule1,],
"@": [lotos_rule8,],
"A": [lotos_rule8,],
"B": [lotos_rule8,],
"C": [lotos_rule8,],
"D": [lotos_rule8,],
"E": [lotos_rule8,],
"F": [lotos_rule8,],
"G": [lotos_rule8,],
"H": [lotos_rule8,],
"I": [lotos_rule8,],
"J": [lotos_rule8,],
"K": [lotos_rule8,],
"L": [lotos_rule8,],
"M": [lotos_rule8,],
"N": [lotos_rule8,],
"O": [lotos_rule8,],
"P": [lotos_rule8,],
"Q": [lotos_rule8,],
"R": [lotos_rule8,],
"S": [lotos_rule8,],
"T": [lotos_rule8,],
"U": [lotos_rule8,],
"V": [lotos_rule8,],
"W": [lotos_rule8,],
"X": [lotos_rule8,],
"Y": [lotos_rule8,],
"Z": [lotos_rule8,],
"[": [lotos_rule2,lotos_rule7,],
"]": [lotos_rule6,],
"a": [lotos_rule8,],
"b": [lotos_rule8,],
"c": [lotos_rule8,],
"d": [lotos_rule8,],
"e": [lotos_rule8,],
"f": [lotos_rule8,],
"g": [lotos_rule8,],
"h": [lotos_rule8,],
"i": [lotos_rule8,],
"j": [lotos_rule8,],
"k": [lotos_rule8,],
"l": [lotos_rule8,],
"m": [lotos_rule8,],
"n": [lotos_rule8,],
"o": [lotos_rule8,],
"p": [lotos_rule8,],
"q": [lotos_rule8,],
"r": [lotos_rule8,],
"s": [lotos_rule8,],
"t": [lotos_rule8,],
"u": [lotos_rule8,],
"v": [lotos_rule8,],
"w": [lotos_rule8,],
"x": [lotos_rule8,],
"y": [lotos_rule8,],
"z": [lotos_rule8,],
"|": [lotos_rule3,lotos_rule4,lotos_rule5,],
}
# x.rulesDictDict for lotos mode.
rulesDictDict = {
"lotos_main": rulesDict1,
}
# Import dict for lotos mode.
importDict = {}
| 29.513393 | 149 | 0.570867 | 715 | 6,611 | 5.044755 | 0.244755 | 0.177433 | 0.022456 | 0.037427 | 0.440255 | 0.433879 | 0.418908 | 0.401164 | 0.401164 | 0.401164 | 0 | 0.032126 | 0.223113 | 6,611 | 223 | 150 | 29.64574 | 0.670171 | 0.062472 | 0 | 0.036269 | 0 | 0 | 0.250084 | 0.020463 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046632 | false | 0 | 0.005181 | 0.046632 | 0.098446 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bce53c9abe42e1145ff3c9feca2ddc25c7666a1 | 275 | py | Python | src/ProblemSolving/DiagonalDifference.py | Feng-Zhao/hackerrankPy | fc04f0a11cf543ad3697860eca774103593abcd5 | [
"Apache-2.0"
] | null | null | null | src/ProblemSolving/DiagonalDifference.py | Feng-Zhao/hackerrankPy | fc04f0a11cf543ad3697860eca774103593abcd5 | [
"Apache-2.0"
] | null | null | null | src/ProblemSolving/DiagonalDifference.py | Feng-Zhao/hackerrankPy | fc04f0a11cf543ad3697860eca774103593abcd5 | [
"Apache-2.0"
] | null | null | null | def diagonalDifference(arr):
a = 0
b = 0
for i in range(0, len(arr)):
a += arr[i][i]
b += arr[i][len(arr) - i - 1]
return abs(a - b)
if __name__ == '__main__':
arr = [[11, 2, 4],[4, 5, 6],[10, 8, -12]]
print(diagonalDifference(arr))
| 21.153846 | 45 | 0.490909 | 45 | 275 | 2.822222 | 0.577778 | 0.094488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 0.301818 | 275 | 12 | 46 | 22.916667 | 0.578125 | 0 | 0 | 0 | 0 | 0 | 0.029091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bd0d6670874846404621cb14ddcf0728b11d685 | 1,395 | py | Python | balltze_simulation/balltze_pybullet/balltze/balltze.py | Kotochleb/Balltze | 55b15cb57d20f7f212293bf838e1d6cf874bb4c2 | [
"MIT"
] | 1 | 2021-09-04T03:59:01.000Z | 2021-09-04T03:59:01.000Z | balltze_simulation/balltze_pybullet/balltze/balltze.py | Kotochleb/Balltze | 55b15cb57d20f7f212293bf838e1d6cf874bb4c2 | [
"MIT"
] | null | null | null | balltze_simulation/balltze_pybullet/balltze/balltze.py | Kotochleb/Balltze | 55b15cb57d20f7f212293bf838e1d6cf874bb4c2 | [
"MIT"
] | null | null | null | import pybullet as p
import time
import numpy as np
import pybullet_data
from balltze_description import Balltze, BalltzeKinematics
import math
if __name__ == '__main__':
time_step = 1./240.
physicsClient = p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-9.81)
p.setTimeStep(time_step)
planeId = p.loadURDF('plane.urdf')
robot = Balltze('../../../balltze_description/balltze_description/urdf/balltze.urdf', p, position=[0,0,0.11])
kinematics = BalltzeKinematics(None)
i = 0.0
dir = 1
while True:
try:
ends = kinematics.body_inverse([0.0,0.0,i], [0.0,i/10,0.02], [[0.1, -0.1, -0.06],[0.1, 0.06, -0.02],[-0.1, -0.06, -0.06],[-0.1, 0.06, -0.06]])
joints = kinematics.inverse(ends)
robot.set_joint_arr(np.array(joints.T).reshape(1,12)[0])
# print((kinematics.forward_leg(joints)*1000).astype(np.int64)/1000)
# print(joints)
# print(ends)
except Exception as e:
print(e)
i += dir*0.0007
if i >= np.pi/10:
dir = -1
if i <= -np.pi/10:
dir = 1
# robot.set_joint_arr([0, -np.pi/2, np.pi/2]*4)
p.stepSimulation()
time.sleep(time_step)
cubePos, cubeOrn = p.getBasePositionAndOrientation(robot)
print(cubePos,cubeOrn)
p.disconnect()
| 32.44186 | 154 | 0.597133 | 200 | 1,395 | 4.055 | 0.38 | 0.019729 | 0.018496 | 0.024661 | 0.073983 | 0.062885 | 0.054254 | 0 | 0 | 0 | 0 | 0.083491 | 0.244444 | 1,395 | 42 | 155 | 33.214286 | 0.685958 | 0.098925 | 0 | 0.058824 | 0 | 0 | 0.067093 | 0.052716 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.176471 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bd20797291e733d3485db7e9a7d16d42673718a | 8,850 | py | Python | urbit_sniffer.py | laanwj/urbit-tools | b3823d50d5ab84c0852593e3255c0d7c51de6d1c | [
"MIT"
] | 18 | 2015-02-03T19:27:18.000Z | 2021-04-04T03:03:57.000Z | urbit_sniffer.py | laanwj/urbit-tools | b3823d50d5ab84c0852593e3255c0d7c51de6d1c | [
"MIT"
] | null | null | null | urbit_sniffer.py | laanwj/urbit-tools | b3823d50d5ab84c0852593e3255c0d7c51de6d1c | [
"MIT"
] | 2 | 2015-10-02T01:37:13.000Z | 2017-06-04T03:41:49.000Z | #!/usr/bin/python3
# Copyright (c) 2014 Wladimir J. van der Laan, Visucore
# Distributed under the MIT software license, see
# http://www.opensource.org/licenses/mit-license.php.
'''
urbit UDP sniffer
Usage: urbit_sniffer.py [-p <port1>-<port2>,<port3>,...] [-i <interface>]
'''
import struct, sys, io, argparse, datetime
from struct import pack,unpack
from binascii import b2a_hex
from urbit.util import format_hexnum,from_le,to_le,dump_noun
from urbit.cue import cue
from urbit.pname import pname
from urbit.crua import de_crua
from misc.sniffer import Sniffer, PCapLoader
if sys.version_info[0:2] < (3,0):
print("Requires python3", file=sys.stderr)
exit(1)
class Args: # default args
# interface we're interested in
interface = b'eth0'
# ports we're interested in
ports = set(list(range(4000,4008)) + [13337, 41954])
# known keys for decrypting packets
keys = {}
# dump entire nouns
show_nouns = True
# show hex for decrypted packets
show_raw = False
# show timestamps
show_timestamps = False
# show keyhashes for decrypted packets
always_show_keyhashes = False
# constants...
CRYPTOS = {0:'%none', 1:'%open', 2:'%fast', 3:'%full'}
# utilities...
def ipv4str(addr):
'''Bytes to IPv4 address'''
return '.'.join(['%i' % i for i in addr])
def crypto_name(x):
'''Name for crypto algo'''
if x in CRYPTOS:
return CRYPTOS[x]
else:
return 'unk%02i' % x
def hexstr(x):
'''Bytes to hex string'''
return b2a_hex(x).decode()
def colorize(str, col):
return ('\x1b[38;5;%im' % col) + str + ('\x1b[0m')
# cli colors and glyphs
COLOR_TIMESTAMP = 38
COLOR_RECIPIENT = 51
COLOR_IP = 21
COLOR_HEADER = 27
COLOR_VALUE = 33
COLOR_DATA = 250
COLOR_DATA_ENC = 245
v_arrow = colorize('→', 240)
v_attention = colorize('>', 34) + colorize('>', 82) + colorize('>', 118)
v_colon = colorize(':', 240)
v_equal = colorize('=', 245)
def parse_args():
args = Args()
parser = argparse.ArgumentParser(description='Urbit sniffer. Dump incoming and outgoing urbit packets.')
pdefault = '4000-4007,13337,41954' # update this when Args changes...
idefault = args.interface.decode()
parser.add_argument('-p, --ports', dest='ports', help='Ports to listen on (default: '+pdefault+')')
parser.add_argument('-i, --interface', dest='interface', help='Interface to listen on (default:'+idefault+')', default=idefault)
parser.add_argument('-k, --keys', dest='keys', help='Import keys from file (with <keyhash> <key> per line)', default=None)
parser.add_argument('-n, --no-show-nouns', dest='show_nouns', action='store_false', help='Don\'t show full noun representation of decoded packets', default=True)
parser.add_argument('-r, --show-raw', dest='show_raw', action='store_true', help='Show raw hex representation of decoded packets', default=False)
parser.add_argument('-t, --show-timestamp', dest='show_timestamps', action='store_true', help='Show timestamps', default=False)
parser.add_argument('-l, --read', dest='read_dump', help='Read a pcap dump file (eg from tcpdump)', default=None)
parser.add_argument('--always-show-keyhashes', dest='always_show_keyhashes', help='Show keyhashes even for decrypted packets (more spammy)', default=False)
r = parser.parse_args()
if r.read_dump is not None:
args.packet_source = PCapLoader(r.read_dump)
else:
args.packet_source = Sniffer(r.interface.encode())
if r.ports is not None:
args.ports = set()
for t in r.ports.split(','):
(a,_,b) = t.partition('-')
ai = int(a)
bi = int(b) if b else ai
args.ports.update(list(range(int(ai), int(bi)+1)))
if r.keys is not None:
args.keys = {}
print(v_attention + ' Loading decryption keys from ' + r.keys)
with open(r.keys, 'r') as f:
for line in f:
line = line.strip()
if not line or line.startswith('#'):
continue
l = line.split()
# filter out '.' so that keys can be copied directly
args.keys[int(l[0].replace('.',''))] = int(l[1].replace('.',''))
args.show_nouns = r.show_nouns
args.show_raw = r.show_raw
args.show_timestamps = r.show_timestamps
args.always_show_keyhashes = r.always_show_keyhashes
return args
def dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data):
try:
# Urbit header and payload
urhdr = unpack('<L', data[0:4])[0]
proto = urhdr & 7
mug = (urhdr >> 3) & 0xfffff
yax = (urhdr >> 23) & 3
yax_bytes = 1<<(yax+1)
qax = (urhdr >> 25) & 3
qax_bytes = 1<<(qax+1)
crypto = (urhdr >> 27)
sender = from_le(data[4:4+yax_bytes])
receiver = from_le(data[4+yax_bytes:4+yax_bytes+qax_bytes])
payload = data[4+yax_bytes+qax_bytes:]
if crypto == 2: # %fast
keyhash = from_le(payload[0:16])
payload = payload[16:]
else:
keyhash = None
except (IndexError, struct.error):
print('Warn: invpkt')
return
# Decode packet if crypto known
decrypted = False
if crypto in [0,1]: # %none %open
decrypted = True
if crypto == 2 and keyhash in args.keys: # %fast
payload = from_le(payload)
payload = de_crua(args.keys[keyhash], payload)
payload = to_le(payload)
decrypted = True
# Print packet
hdata = [('proto', str(proto)),
('mug', '%05x' % mug),
('crypto', crypto_name(crypto))]
if keyhash is not None and (args.always_show_keyhashes or not decrypted):
hdata += [('keyhash', format_hexnum(keyhash))]
if srcaddr is not None:
metadata = ''
if args.show_timestamps:
metadata += colorize(datetime.datetime.utcfromtimestamp(timestamp).strftime('%H%M%S.%f'), COLOR_TIMESTAMP) + ' '
metadata += (colorize(ipv4str(srcaddr), COLOR_IP) + v_colon + colorize(str(sport), COLOR_IP) + ' ' +
colorize(pname(sender), COLOR_RECIPIENT) + ' ' +
v_arrow + ' ' +
colorize(ipv4str(dstaddr), COLOR_IP) + v_colon + colorize(str(dport), COLOR_IP) + ' ' +
colorize(pname(receiver), COLOR_RECIPIENT))
else:
metadata = (' %fore ' + # nested packet
colorize(pname(sender), COLOR_RECIPIENT) + ' ' +
v_arrow + ' ' +
colorize(pname(receiver), COLOR_RECIPIENT))
print( metadata + v_colon + ' ' +
' '.join(colorize(key, COLOR_HEADER) + v_equal + colorize(value, COLOR_VALUE) for (key,value) in hdata))
if decrypted: # decrypted or unencrypted data
if args.show_raw:
print(' ' + colorize(hexstr(payload), COLOR_DATA))
cake = cue(from_le(payload))
if cake[0] == 1701998438: # %fore
subpacket = to_le(cake[1][1][1])
dump_urbit_packet(args, None, None, None, None, None, subpacket)
else:
if args.show_nouns:
sys.stdout.write(' ')
dump_noun(cake, sys.stdout)
sys.stdout.write('\n')
else: # [sealed]
print(' [' + colorize(hexstr(payload), COLOR_DATA_ENC)+']')
def main(args):
print(v_attention + ' Listening on ' + args.packet_source.name + ' ports ' + (',').join(str(x) for x in args.ports))
for timestamp,packet in args.packet_source:
try:
# IP header
iph = unpack('!BBHHHBBH4s4s', packet[0:20])
ihl = (iph[0] & 15)*4
if ihl < 20: # cannot handle IP headers <20 bytes
# print("Warn: invhdr")
continue
protocol = iph[6]
srcaddr = iph[8]
dstaddr = iph[9]
if protocol != 17: # not UDP
#print("Warn: invproto")
continue
# UDP header
(sport, dport, ulength, uchecksum) = unpack('!HHHH', packet[ihl:ihl+8])
data = packet[ihl+8:ihl+ulength]
if len(data) != (ulength-8):
print("Warn: invlength")
continue # invalid length packet
if dport not in args.ports and sport not in args.ports: # only urbit ports
continue
except (IndexError, struct.error):
print('Warn: invpkt')
continue
dump_urbit_packet(args, timestamp, srcaddr, sport, dstaddr, dport, data)
if __name__ == '__main__':
# Force UTF8 out
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf8', line_buffering=True)
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf8', line_buffering=True)
try:
main(parse_args())
except KeyboardInterrupt:
pass
| 38.146552 | 165 | 0.599887 | 1,135 | 8,850 | 4.567401 | 0.274009 | 0.020062 | 0.026235 | 0.007523 | 0.154514 | 0.092207 | 0.055941 | 0.039738 | 0.021605 | 0.021605 | 0 | 0.026981 | 0.262938 | 8,850 | 231 | 166 | 38.311688 | 0.767592 | 0.110508 | 0 | 0.152542 | 0 | 0.011299 | 0.110585 | 0.008319 | 0 | 0 | 0.000896 | 0 | 0 | 1 | 0.039548 | false | 0.00565 | 0.050847 | 0.00565 | 0.175141 | 0.050847 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bd54aeda1cf3aa43806abaf7f0e2dafeca01c0d | 1,638 | py | Python | app/tests/v1/test_product.py | owezzy/StoreManager | 821856c0d502b55bd499cfe9188cd4951c5b0b75 | [
"MIT"
] | null | null | null | app/tests/v1/test_product.py | owezzy/StoreManager | 821856c0d502b55bd499cfe9188cd4951c5b0b75 | [
"MIT"
] | 2 | 2018-10-10T22:32:35.000Z | 2021-06-01T22:50:56.000Z | app/tests/v1/test_product.py | owezzy/StoreManager | 821856c0d502b55bd499cfe9188cd4951c5b0b75 | [
"MIT"
] | 1 | 2018-10-25T12:42:41.000Z | 2018-10-25T12:42:41.000Z | import unittest
import json
from app.app import create_app
POST_PRODUCT_URL = '/api/v1/products'
GET_A_SINGLE_PRODUCT = '/api/v1/product/1'
GET_ALL_PRODUCTS = '/api/v1/products'
class TestProduct(unittest.TestCase):
def setUp(self):
"""Initialize the api with test variable"""
self.app = create_app('testing')
self.client = self.app.test_client()
self.create_product = json.dumps(dict(
product_name="shoes",
stock=2,
price=3000
))
def test_add_product(self):
"""Test for post product"""
resource = self.client.post(
POST_PRODUCT_URL,
data=self.create_product,
content_type='application/json')
data = json.loads(resource.data.decode())
print(data)
self.assertEqual(resource.status_code, 201, msg='CREATED')
self.assertEqual(resource.content_type, 'application/json')
def test_get_products(self):
"""test we can get products"""
resource = self.client.get(POST_PRODUCT_URL,
data=json.dumps(self.create_product),
content_type='application/json')
get_data = json.dumps(resource.data.decode())
print(get_data)
self.assertEqual(resource.content_type, 'application/json')
self.assertEqual(resource.status_code, 200)
def test_get(self):
"""test we can get a single products"""
resource = self.client.get(GET_A_SINGLE_PRODUCT)
self.assertEqual(resource.status_code, 404)
if __name__ == '__main__':
unittest.main()
| 30.333333 | 72 | 0.623321 | 194 | 1,638 | 5.041237 | 0.309278 | 0.076687 | 0.117587 | 0.106339 | 0.381391 | 0.188139 | 0.188139 | 0 | 0 | 0 | 0 | 0.01495 | 0.264957 | 1,638 | 53 | 73 | 30.90566 | 0.797342 | 0.072039 | 0 | 0.108108 | 0 | 0 | 0.093396 | 0 | 0 | 0 | 0 | 0 | 0.135135 | 1 | 0.108108 | false | 0 | 0.081081 | 0 | 0.216216 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bd88438849c3a76c7d468249205380ab8ab8c38 | 1,346 | py | Python | cryptoshredding/s3/client.py | hupe1980/cryptoshredding | 1ab5ee452c4435f486006aa2cc1a7bee440d91fe | [
"MIT"
] | null | null | null | cryptoshredding/s3/client.py | hupe1980/cryptoshredding | 1ab5ee452c4435f486006aa2cc1a7bee440d91fe | [
"MIT"
] | null | null | null | cryptoshredding/s3/client.py | hupe1980/cryptoshredding | 1ab5ee452c4435f486006aa2cc1a7bee440d91fe | [
"MIT"
] | null | null | null | import boto3
from botocore.client import BaseClient
from ..key_store import KeyStore
from .object import CryptoObject
from .stream_body_wrapper import StreamBodyWrapper
class CryptoS3(object):
def __init__(
self,
client: BaseClient,
key_store: KeyStore,
) -> None:
self._client = client
self._key_store = key_store
def put_object(self, CSEKeyId: str, Bucket: str, Key: str, **kwargs):
obj = CryptoObject(
key_store=self._key_store,
object=boto3.resource("s3").Object(Bucket, Key),
)
return obj.put(CSEKeyId=CSEKeyId, **kwargs)
def get_object(self, **kwargs):
obj = self._client.get_object(**kwargs)
obj["Body"] = StreamBodyWrapper(
key_store=self._key_store,
stream_body=obj["Body"],
metadata=obj["Metadata"],
)
return obj
def __getattr__(self, name: str):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided bridge object.
:param str name: Attribute name
:returns: Result of asking the provided client object for that attribute name
:raises AttributeError: if attribute is not found on provided bridge object
"""
return getattr(self._client, name)
| 31.302326 | 88 | 0.641902 | 161 | 1,346 | 5.192547 | 0.403727 | 0.076555 | 0.043062 | 0.035885 | 0.047847 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004082 | 0.271917 | 1,346 | 42 | 89 | 32.047619 | 0.84898 | 0.228083 | 0 | 0.068966 | 0 | 0 | 0.018145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.172414 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bd9272def1931c3aead22a640fadc1a05f50b8f | 5,627 | py | Python | tests/infra/test_subnet.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | 2 | 2017-10-31T18:48:20.000Z | 2018-03-04T20:35:20.000Z | tests/infra/test_subnet.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | tests/infra/test_subnet.py | bretttegart/treadmill | 812109e31c503a6eddaee2d3f2e1faf2833b6aaf | [
"Apache-2.0"
] | null | null | null | """
Unit test for EC2 subnet.
"""
import unittest
import mock
from treadmill.infra.subnet import Subnet
class SubnetTest(unittest.TestCase):
@mock.patch('treadmill.infra.connection.Connection')
def test_init(self, ConnectionMock):
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
subnet = Subnet(
id=1,
vpc_id='vpc-id',
metadata={
'Tags': [{
'Key': 'Name',
'Value': 'goo'
}]
}
)
self.assertEquals(subnet.vpc_id, 'vpc-id')
self.assertEquals(subnet.name, 'goo')
self.assertEquals(subnet.ec2_conn, conn_mock)
@mock.patch('treadmill.infra.connection.Connection')
def test_create_tags(self, ConnectionMock):
conn_mock = ConnectionMock()
conn_mock.create_tags = mock.Mock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
subnet = Subnet(
name='foo',
id='1',
vpc_id='vpc-id'
)
subnet.create_tags()
conn_mock.create_tags.assert_called_once_with(
Resources=['1'],
Tags=[{
'Key': 'Name',
'Value': 'foo'
}]
)
@mock.patch('treadmill.infra.connection.Connection')
def test_create(self, ConnectionMock):
ConnectionMock.context.region_name = 'us-east-1'
conn_mock = ConnectionMock()
subnet_json_mock = {
'SubnetId': '1'
}
conn_mock.create_subnet = mock.Mock(return_value={
'Subnet': subnet_json_mock
})
conn_mock.create_route_table = mock.Mock(return_value={
'RouteTable': {'RouteTableId': 'route-table-id'}
})
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet.create(
cidr_block='172.23.0.0/24',
vpc_id='vpc-id',
name='foo',
gateway_id='gateway-id'
)
self.assertEqual(_subnet.id, '1')
self.assertEqual(_subnet.name, 'foo')
self.assertEqual(_subnet.metadata, subnet_json_mock)
conn_mock.create_subnet.assert_called_once_with(
VpcId='vpc-id',
CidrBlock='172.23.0.0/24',
AvailabilityZone='us-east-1a'
)
conn_mock.create_tags.assert_called_once_with(
Resources=['1'],
Tags=[{
'Key': 'Name',
'Value': 'foo'
}]
)
conn_mock.create_route_table.assert_called_once_with(
VpcId='vpc-id'
)
conn_mock.create_route.assert_called_once_with(
RouteTableId='route-table-id',
DestinationCidrBlock='0.0.0.0/0',
GatewayId='gateway-id'
)
conn_mock.associate_route_table.assert_called_once_with(
RouteTableId='route-table-id',
SubnetId='1',
)
@mock.patch('treadmill.infra.connection.Connection')
def test_refresh(self, ConnectionMock):
conn_mock = ConnectionMock()
subnet_json_mock = {
'VpcId': 'vpc-id',
'Foo': 'bar'
}
conn_mock.describe_subnets = mock.Mock(return_value={
'Subnets': [subnet_json_mock]
})
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet(id='subnet-id', vpc_id=None, metadata=None)
_subnet.refresh()
self.assertEqual(_subnet.vpc_id, 'vpc-id')
self.assertEqual(_subnet.metadata, subnet_json_mock)
@mock.patch.object(Subnet, 'refresh')
@mock.patch.object(Subnet, 'get_instances')
@mock.patch('treadmill.infra.connection.Connection')
def test_show(self, ConnectionMock, get_instances_mock, refresh_mock):
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
_subnet = Subnet(id='subnet-id',
vpc_id='vpc-id',
metadata=None)
_subnet.instances = None
result = _subnet.show()
self.assertEqual(
result,
{
'VpcId': 'vpc-id',
'SubnetId': 'subnet-id',
'Instances': None
}
)
get_instances_mock.assert_called_once_with(refresh=True, role=None)
refresh_mock.assert_called_once()
@mock.patch('treadmill.infra.connection.Connection')
def test_persisted(self, ConnectionMock):
_subnet = Subnet(id='subnet-id', metadata={'foo': 'goo'})
self.assertFalse(_subnet.persisted)
_subnet.metadata['SubnetId'] = 'subnet-id'
self.assertTrue(_subnet.persisted)
@mock.patch('treadmill.infra.connection.Connection')
def test_persist(self, ConnectionMock):
ConnectionMock.context.region_name = 'us-east-1'
conn_mock = ConnectionMock()
Subnet.ec2_conn = Subnet.route53_conn = conn_mock
conn_mock.create_subnet = mock.Mock(
return_value={
'Subnet': {
'foo': 'bar'
}
}
)
_subnet = Subnet(
id='subnet-id', metadata=None, vpc_id='vpc-id', name='subnet-name'
)
_subnet.persist(
cidr_block='cidr-block',
gateway_id='gateway-id',
)
self.assertEqual(_subnet.metadata, {'foo': 'bar'})
conn_mock.create_subnet.assert_called_once_with(
VpcId='vpc-id',
CidrBlock='cidr-block',
AvailabilityZone='us-east-1a'
)
| 30.090909 | 78 | 0.563355 | 582 | 5,627 | 5.201031 | 0.139175 | 0.066072 | 0.04625 | 0.052858 | 0.629997 | 0.584077 | 0.502147 | 0.438057 | 0.326396 | 0.259663 | 0 | 0.014048 | 0.316865 | 5,627 | 186 | 79 | 30.252688 | 0.773413 | 0.004443 | 0 | 0.385621 | 0 | 0 | 0.135145 | 0.0463 | 0 | 0 | 0 | 0 | 0.137255 | 1 | 0.045752 | false | 0 | 0.019608 | 0 | 0.071895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bdac7bc37d6267a61f05383477b3f0ca7a95eab | 5,138 | py | Python | tests/building/test_tokenization.py | fossabot/langumo | 2d8b30979878bb27fb07cc31879c13c5c186582c | [
"Apache-2.0"
] | 7 | 2020-09-05T08:30:25.000Z | 2021-11-01T14:07:58.000Z | tests/building/test_tokenization.py | fossabot/langumo | 2d8b30979878bb27fb07cc31879c13c5c186582c | [
"Apache-2.0"
] | 2 | 2020-09-11T14:19:47.000Z | 2021-03-05T17:22:21.000Z | tests/building/test_tokenization.py | fossabot/langumo | 2d8b30979878bb27fb07cc31879c13c5c186582c | [
"Apache-2.0"
] | 3 | 2020-09-11T14:16:06.000Z | 2021-10-31T14:18:10.000Z | import tempfile
from langumo.building import TrainTokenizer, TokenizeSentences
from langumo.utils import AuxiliaryFileManager
_dummy_corpus_content = (
'Wikipedia is a multilingual online encyclopedia created and maintained '
'as an open collaboration project by a community of volunteer editors '
'using a wiki-based editing system. It is the largest and most popular '
'general reference work on the World Wide Web. It is also one of the 15 '
'most popular websites ranked by Alexa, as of August 2020. It features '
'exclusively free content and no commercial ads. It is hosted by the '
'Wikimedia Foundation, a non-profit organization funded primarily through '
'donations.\n'
'Wikipedia was launched on January 15, 2001, and was created by Jimmy '
'Wales and Larry Sanger. Sanger coined its name as a portmanteau of the '
'terms "wiki" and "encyclopedia". Initially an English-language '
'encyclopedia, versions of Wikipedia in other languages were quickly '
'developed. With 6.1 million articles, the English Wikipedia is the '
'largest of the more than 300 Wikipedia encyclopedias. Overall, Wikipedia '
'comprises more than 54 million articles attracting 1.5 billion unique '
'visitors per month.\n'
'In 2005, Nature published a peer review comparing 42 hard science '
'articles from Encyclopædia Britannica and Wikipedia and found that '
'Wikipedia\'s level of accuracy approached that of Britannica, although '
'critics suggested that it might not have fared so well in a similar '
'study of a random sampling of all articles or one focused on social '
'science or contentious social issues. The following year, Time stated '
'that the open-door policy of allowing anyone to edit had made Wikipedia '
'the biggest and possibly the best encyclopedia in the world, and was a '
'testament to the vision of Jimmy Wales.\n'
'Wikipedia has been criticized for exhibiting systemic bias and for being '
'subject to manipulation and spin in controversial topics; Edwin Black '
'has criticized Wikipedia for presenting a mixture of "truth, half truth, '
'and some falsehoods". Wikipedia has also been criticized for gender '
'bias, particularly on its English-language version, where the dominant '
'majority of editors are male. However, edit-a-thons have been held to '
'encourage female editors and increase the coverage of women\'s topics. '
'Facebook announced that by 2017 it would help readers detect fake news '
'by suggesting links to related Wikipedia articles. YouTube announced a '
'similar plan in 2018.'
)
def test_subset_file_creation():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write('hello world!\n' * 100)
with (TrainTokenizer(subset_size=1024)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 79
with (TrainTokenizer(subset_size=128)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 10
with (TrainTokenizer(subset_size=2000)
._create_subset_file(afm, corpus)
.open('r')) as fp:
assert len(fp.readlines()) == 100
def test_training_wordpiece_tokenizer():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write(_dummy_corpus_content)
# Train WordPiece tokenizer and get vocabulary file.
vocab = (TrainTokenizer(vocab_size=128,
limit_alphabet=64,
unk_token='[UNK]')
.build(afm, corpus))
# Read subwords from the vocabulary file.
with vocab.open('r') as fp:
words = fp.readlines()
# Check if the number of total words equals to vocabulary size and the
# vocabulary contains unknown token.
assert len(words) == 128
assert words[0].strip() == '[UNK]'
def test_subword_tokenization():
with tempfile.TemporaryDirectory() as tdir, \
AuxiliaryFileManager(f'{tdir}/workspace') as afm:
corpus = afm.create()
with corpus.open('w') as fp:
fp.write(_dummy_corpus_content)
# Train WordPiece vocabulary and tokenize sentences.
vocab = (TrainTokenizer(vocab_size=128, limit_alphabet=64)
.build(afm, corpus))
tokenized = (TokenizeSentences(unk_token='[UNK]')
.build(afm, corpus, vocab))
# Test if the tokenization is correctly applied to the corpus. Note
# that the tokenizer model will normalize the sentences.
with tokenized.open('r') as fp:
assert (fp.read().strip().replace('##', '').replace(' ', '')
== _dummy_corpus_content.lower().replace(' ', ''))
| 47.137615 | 79 | 0.665434 | 650 | 5,138 | 5.201538 | 0.436923 | 0.023957 | 0.010352 | 0.01331 | 0.216208 | 0.211772 | 0.196983 | 0.196983 | 0.169772 | 0.169772 | 0 | 0.018243 | 0.253211 | 5,138 | 108 | 80 | 47.574074 | 0.862914 | 0.071234 | 0 | 0.258824 | 0 | 0 | 0.47775 | 0 | 0 | 0 | 0 | 0 | 0.070588 | 1 | 0.035294 | false | 0 | 0.035294 | 0 | 0.070588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bdb7b37ea55baaa1973a1fff39476ce6ea71851 | 12,819 | py | Python | examples/BertNewsClassification/news_classifier.py | mlflow/mlflow-torchserve | 91663b630ef12313da3ad821767faf3fc409345b | [
"Apache-2.0"
] | 40 | 2020-11-13T02:08:10.000Z | 2022-03-27T07:41:57.000Z | examples/BertNewsClassification/news_classifier.py | Ideas2IT/mlflow-torchserve | d6300fb73f16d74ee2c7718c249faf485c4f3b62 | [
"Apache-2.0"
] | 23 | 2020-11-16T11:28:01.000Z | 2021-09-23T11:28:24.000Z | examples/BertNewsClassification/news_classifier.py | Ideas2IT/mlflow-torchserve | d6300fb73f16d74ee2c7718c249faf485c4f3b62 | [
"Apache-2.0"
] | 15 | 2020-11-13T10:25:25.000Z | 2022-02-01T10:13:20.000Z | # pylint: disable=W0221
# pylint: disable=W0613
# pylint: disable=E1102
# pylint: disable=W0223
import shutil
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import Dataset, DataLoader
from transformers import (
BertModel,
BertTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import argparse
import os
from tqdm import tqdm
import requests
import torchtext.datasets as td
import mlflow.pytorch
class_names = ["World", "Sports", "Business", "Sci/Tech"]
class AGNewsDataset(Dataset):
"""
Constructs the encoding with the dataset
"""
def __init__(self, reviews, targets, tokenizer, max_len):
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
review = str(self.reviews[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
padding="max_length",
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
return {
"review_text": review,
"input_ids": encoding["input_ids"].flatten(),
"attention_mask": encoding["attention_mask"].flatten(),
"targets": torch.tensor(target, dtype=torch.long),
}
class NewsClassifier(nn.Module):
def __init__(self, args):
super(NewsClassifier, self).__init__()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.PRE_TRAINED_MODEL_NAME = "bert-base-uncased"
self.EPOCHS = args.max_epochs
self.df = None
self.tokenizer = None
self.df_train = None
self.df_val = None
self.df_test = None
self.train_data_loader = None
self.val_data_loader = None
self.test_data_loader = None
self.optimizer = None
self.total_steps = None
self.scheduler = None
self.loss_fn = None
self.BATCH_SIZE = 16
self.MAX_LEN = 160
self.NUM_SAMPLES_COUNT = args.num_samples
n_classes = len(class_names)
self.VOCAB_FILE_URL = args.vocab_file
self.VOCAB_FILE = "bert_base_uncased_vocab.txt"
self.drop = nn.Dropout(p=0.2)
self.bert = BertModel.from_pretrained(self.PRE_TRAINED_MODEL_NAME)
for param in self.bert.parameters():
param.requires_grad = False
self.fc1 = nn.Linear(self.bert.config.hidden_size, 512)
self.out = nn.Linear(512, n_classes)
def forward(self, input_ids, attention_mask):
"""
:param input_ids: Input sentences from the batch
:param attention_mask: Attention mask returned by the encoder
:return: output - label for the input text
"""
pooled_output = self.bert(input_ids=input_ids, attention_mask=attention_mask).pooler_output
output = F.relu(self.fc1(pooled_output))
output = self.drop(output)
output = self.out(output)
return output
@staticmethod
def process_label(rating):
rating = int(rating)
return rating - 1
def create_data_loader(self, df, tokenizer, max_len, batch_size):
"""
:param df: DataFrame input
:param tokenizer: Bert tokenizer
:param max_len: maximum length of the input sentence
:param batch_size: Input batch size
:return: output - Corresponding data loader for the given input
"""
ds = AGNewsDataset(
reviews=df.description.to_numpy(),
targets=df.label.to_numpy(),
tokenizer=tokenizer,
max_len=max_len,
)
return DataLoader(ds, batch_size=batch_size, num_workers=4)
def prepare_data(self):
"""
Creates train, valid and test dataloaders from the csv data
"""
td.AG_NEWS(root="data", split=("train", "test"))
extracted_files = os.listdir("data/AG_NEWS")
train_csv_path = None
for fname in extracted_files:
if fname.endswith("train.csv"):
train_csv_path = os.path.join(os.getcwd(), "data/AG_NEWS", fname)
self.df = pd.read_csv(train_csv_path)
self.df.columns = ["label", "title", "description"]
self.df.sample(frac=1)
self.df = self.df.iloc[: self.NUM_SAMPLES_COUNT]
self.df["label"] = self.df.label.apply(self.process_label)
if not os.path.isfile(self.VOCAB_FILE):
filePointer = requests.get(self.VOCAB_FILE_URL, allow_redirects=True)
if filePointer.ok:
with open(self.VOCAB_FILE, "wb") as f:
f.write(filePointer.content)
else:
raise RuntimeError("Error in fetching the vocab file")
self.tokenizer = BertTokenizer(self.VOCAB_FILE)
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
self.df_train, self.df_test = train_test_split(
self.df, test_size=0.1, random_state=RANDOM_SEED, stratify=self.df["label"]
)
self.df_val, self.df_test = train_test_split(
self.df_test, test_size=0.5, random_state=RANDOM_SEED, stratify=self.df_test["label"]
)
self.train_data_loader = self.create_data_loader(
self.df_train, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
self.val_data_loader = self.create_data_loader(
self.df_val, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
self.test_data_loader = self.create_data_loader(
self.df_test, self.tokenizer, self.MAX_LEN, self.BATCH_SIZE
)
def setOptimizer(self):
"""
Sets the optimizer and scheduler functions
"""
self.optimizer = AdamW(model.parameters(), lr=1e-3, correct_bias=False)
self.total_steps = len(self.train_data_loader) * self.EPOCHS
self.scheduler = get_linear_schedule_with_warmup(
self.optimizer, num_warmup_steps=0, num_training_steps=self.total_steps
)
self.loss_fn = nn.CrossEntropyLoss().to(self.device)
def startTraining(self, model):
"""
Initialzes the Traning step with the model initialized
:param model: Instance of the NewsClassifier class
"""
history = defaultdict(list)
best_accuracy = 0
for epoch in range(self.EPOCHS):
print(f"Epoch {epoch + 1}/{self.EPOCHS}")
train_acc, train_loss = self.train_epoch(model)
print(f"Train loss {train_loss} accuracy {train_acc}")
val_acc, val_loss = self.eval_model(model, self.val_data_loader)
print(f"Val loss {val_loss} accuracy {val_acc}")
history["train_acc"].append(train_acc)
history["train_loss"].append(train_loss)
history["val_acc"].append(val_acc)
history["val_loss"].append(val_loss)
if val_acc > best_accuracy:
torch.save(model.state_dict(), "best_model_state.bin")
best_accuracy = val_acc
def train_epoch(self, model):
"""
Training process happens and accuracy is returned as output
:param model: Instance of the NewsClassifier class
:result: output - Accuracy of the model after training
"""
model = model.train()
losses = []
correct_predictions = 0
for data in tqdm(self.train_data_loader):
input_ids = data["input_ids"].to(self.device)
attention_mask = data["attention_mask"].to(self.device)
targets = data["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = self.loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
return (
correct_predictions.double() / len(self.train_data_loader) / self.BATCH_SIZE,
np.mean(losses),
)
def eval_model(self, model, data_loader):
"""
Validation process happens and validation / test accuracy is returned as output
:param model: Instance of the NewsClassifier class
:param data_loader: Data loader for either test / validation dataset
:result: output - Accuracy of the model after testing
"""
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(self.device)
attention_mask = d["attention_mask"].to(self.device)
targets = d["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
loss = self.loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / len(data_loader) / self.BATCH_SIZE, np.mean(losses)
def get_predictions(self, model, data_loader):
"""
Prediction after the training step is over
:param model: Instance of the NewsClassifier class
:param data_loader: Data loader for either test / validation dataset
:result: output - Returns prediction results,
prediction probablities and corresponding values
"""
model = model.eval()
review_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
texts = d["review_text"]
input_ids = d["input_ids"].to(self.device)
attention_mask = d["attention_mask"].to(self.device)
targets = d["targets"].to(self.device)
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
review_texts.extend(texts)
predictions.extend(preds)
prediction_probs.extend(probs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return review_texts, predictions, prediction_probs, real_values
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch BERT Example")
parser.add_argument(
"--max_epochs",
type=int,
default=5,
metavar="N",
help="number of epochs to train (default: 14)",
)
parser.add_argument(
"--num_samples",
type=int,
default=15000,
metavar="N",
help="Number of samples to be used for training "
"and evaluation steps (default: 15000) Maximum:100000",
)
parser.add_argument(
"--vocab_file",
default="https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
help="Custom vocab file",
)
parser.add_argument(
"--model_save_path", type=str, default="models", help="Path to save mlflow model"
)
args = parser.parse_args()
mlflow.start_run()
model = NewsClassifier(args)
model = model.to(model.device)
model.prepare_data()
model.setOptimizer()
model.startTraining(model)
print("TRAINING COMPLETED!!!")
test_acc, _ = model.eval_model(model, model.test_data_loader)
print(test_acc.item())
y_review_texts, y_pred, y_pred_probs, y_test = model.get_predictions(
model, model.test_data_loader
)
print("\n\n\n SAVING MODEL")
if os.path.exists(args.model_save_path):
shutil.rmtree(args.model_save_path)
mlflow.pytorch.save_model(
model,
path=args.model_save_path,
requirements_file="requirements.txt",
extra_files=["class_mapping.json", "bert_base_uncased_vocab.txt"],
)
mlflow.end_run()
| 32.289673 | 99 | 0.620095 | 1,548 | 12,819 | 4.917959 | 0.209302 | 0.034152 | 0.01839 | 0.012479 | 0.283068 | 0.23959 | 0.222777 | 0.183765 | 0.159333 | 0.121765 | 0 | 0.007908 | 0.279897 | 12,819 | 396 | 100 | 32.371212 | 0.816813 | 0.109993 | 0 | 0.135338 | 0 | 0.003759 | 0.088915 | 0.004875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048872 | false | 0 | 0.06015 | 0.003759 | 0.146617 | 0.022556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bde8a8095cceac04979671e29124bd410698f7c | 3,716 | py | Python | src/toil/lib/aws/utils.py | rupertnash/toil | fd805d5fa14cca98f2bc64b322a4b546e163d6c9 | [
"Apache-2.0"
] | 6 | 2018-05-27T05:09:11.000Z | 2020-07-01T17:02:40.000Z | src/toil/lib/aws/utils.py | rupertnash/toil | fd805d5fa14cca98f2bc64b322a4b546e163d6c9 | [
"Apache-2.0"
] | 1 | 2020-07-01T18:31:30.000Z | 2020-07-08T14:03:39.000Z | src/toil/lib/aws/utils.py | rupertnash/toil | fd805d5fa14cca98f2bc64b322a4b546e163d6c9 | [
"Apache-2.0"
] | 1 | 2020-04-06T15:04:44.000Z | 2020-04-06T15:04:44.000Z | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
from toil.lib.misc import printq
from toil.lib.retry import retry
from toil.lib import aws
try:
from boto.exception import BotoServerError
except ImportError:
BotoServerError = None # AWS/boto extra is not installed
logger = logging.getLogger(__name__)
@retry(errors=[BotoServerError])
def delete_iam_role(role_name: str, region: Optional[str] = None, quiet: bool = True):
from boto.iam.connection import IAMConnection
iam_client = aws.client('iam', region_name=region)
iam_resource = aws.resource('iam', region_name=region)
boto_iam_connection = IAMConnection()
role = iam_resource.Role(role_name)
# normal policies
for attached_policy in role.attached_policies.all():
printq(f'Now dissociating policy: {attached_policy.name} from role {role.name}', quiet)
role.detach_policy(PolicyName=attached_policy.name)
# inline policies
for attached_policy in role.policies.all():
printq(f'Deleting inline policy: {attached_policy.name} from role {role.name}', quiet)
# couldn't find an easy way to remove inline policies with boto3; use boto
boto_iam_connection.delete_role_policy(role.name, attached_policy.name)
iam_client.delete_role(RoleName=role_name)
printq(f'Role {role_name} successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_iam_instance_profile(instance_profile_name: str, region: Optional[str] = None, quiet: bool = True):
iam_resource = aws.resource('iam', region_name=region)
instance_profile = iam_resource.InstanceProfile(instance_profile_name)
for role in instance_profile.roles:
printq(f'Now dissociating role: {role.name} from instance profile {instance_profile_name}', quiet)
instance_profile.remove_role(RoleName=role.name)
instance_profile.delete()
printq(f'Instance profile "{instance_profile_name}" successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_sdb_domain(sdb_domain_name: str, region: Optional[str] = None, quiet: bool = True):
sdb_client = aws.client('sdb', region_name=region)
sdb_client.delete_domain(DomainName=sdb_domain_name)
printq(f'SBD Domain: "{sdb_domain_name}" successfully deleted.', quiet)
@retry(errors=[BotoServerError])
def delete_s3_bucket(bucket: str, region: Optional[str], quiet: bool = True):
printq(f'Deleting s3 bucket in region "{region}": {bucket}', quiet)
s3_client = aws.client('s3', region_name=region)
s3_resource = aws.resource('s3', region_name=region)
paginator = s3_client.get_paginator('list_object_versions')
for response in paginator.paginate(Bucket=bucket):
versions = response.get('Versions', []) + response.get('DeleteMarkers', [])
for version in versions:
printq(f" Deleting {version['Key']} version {version['VersionId']}", quiet)
s3_client.delete_object(Bucket=bucket, Key=version['Key'], VersionId=version['VersionId'])
s3_resource.Bucket(bucket).delete()
printq(f'\n * Deleted s3 bucket successfully: {bucket}\n\n', quiet)
| 45.876543 | 110 | 0.741389 | 503 | 3,716 | 5.328032 | 0.28827 | 0.061567 | 0.026866 | 0.043284 | 0.254104 | 0.21903 | 0.180597 | 0.180597 | 0.15 | 0 | 0 | 0.007318 | 0.154198 | 3,716 | 80 | 111 | 46.45 | 0.845371 | 0.194295 | 0 | 0.115385 | 0 | 0 | 0.201277 | 0.038306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bdeffb0d14dc1d4dca20f695831236be20df06b | 3,389 | py | Python | src/data/processed_data.py | Victoradukwu/titanic | 18a4e8fe7dbe755a946512ca71b1d2a2f5932c64 | [
"MIT"
] | null | null | null | src/data/processed_data.py | Victoradukwu/titanic | 18a4e8fe7dbe755a946512ca71b1d2a2f5932c64 | [
"MIT"
] | null | null | null | src/data/processed_data.py | Victoradukwu/titanic | 18a4e8fe7dbe755a946512ca71b1d2a2f5932c64 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
def read_data():
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_file_path = os.path.join(raw_data_path, 'train.csv')
test_file_path = os.path.join(raw_data_path, 'test.csv')
train_df = pd.read_csv(train_file_path, index_col = 'PassengerId')
test_df = pd.read_csv(test_file_path, index_col = 'PassengerId')
test_df['Survived'] = -100
df = pd.concat([train_df, test_df], sort=-False, axis=0)
return df
def process_data(df):
return(df
.assign(Title = lambda x: x.Name.map(get_title))
.pipe(fill_missing_values)
.assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low', 'low', 'high', 'very_high']))
.assign(AgeState = lambda x: np.where(x.Age >= 18, 'Adult', 'Child'))
.assign(FamilySize = lambda x: x.Parch + x.SibSp + 1)
.assign(IsMother = lambda x: np.where(((x.Age > 18) & (x.Parch > 0) & (x.Title != 'Miss') & (x.Sex == 'female')), 1,0))
.assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin))
.assign(Deck = lambda x: x.Cabin.map(get_deck))
.assign(IsMale = lambda x: np.where(x.Sex == 'male', 1, 0))
.pipe(pd.get_dummies, columns=['Deck', 'Pclass', 'Title', 'Fare_Bin', 'Embarked', 'AgeState'])
.drop(['Cabin', 'Name', 'Ticket', 'Parch', 'SibSp', 'Sex'], axis=1)
.pipe(reorder_columns)
)
# modify the function to reduce number of titles and return more meaningful functions
def get_title(name):
title_map = {
'mr': 'Mr',
'mrs': 'Mrs',
'mme': 'Mrs',
'ms': 'Mrs',
'miss': 'Miss',
'mlle': 'Miss',
'master': 'Master',
'don': 'Sir',
'rev': 'Sir',
'sir': 'Sir',
'jonkheer': 'Sir',
'dr': 'Officer',
'major': 'Officer',
'capt': 'Office',
'col': 'Officer',
'lady': 'Lady',
'the countess': 'Lady',
'dona': 'Lady'
}
first_name_with_title = name.split(',')[1]
raw_title = first_name_with_title.split('.')[0]
title = raw_title.strip().lower()
return title_map[title]
def get_deck(cabin):
return np.where(pd.notnull(cabin), str(cabin)[0].upper(), 'Z')
def fill_missing_values(df):
#Embarked
df.Embarked.fillna('C', inplace=True)
# Fare
median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median()
df.Fare.fillna(median_fare, inplace=True)
#Age
title_age_median = df.groupby('Title').Age.transform('median')
df.Age.fillna(title_age_median, inplace=True)
return df
def reorder_columns(df):
columns = [column for column in df.columns if column != 'Survived']
columns = ['Survived'] + columns
df = df[columns]
return df
def write_data(df):
processed_data_path = os.path.join(os.path.pardir, 'data', 'processed')
write_train_path = os.path.join(processed_data_path, 'train.csv')
write_test_path = os.path.join(processed_data_path, 'test.csv')
df.loc[df.Survived != -100].to_csv(write_train_path)
columns = [column for column in df.columns if column != 'Survived']
df.loc[df.Survived == -100][columns].to_csv(write_test_path)
if __name__ == '__main__':
df = read_data()
df = process_data(df)
write_data(df)
| 33.554455 | 130 | 0.59044 | 467 | 3,389 | 4.104925 | 0.284797 | 0.025039 | 0.031299 | 0.043818 | 0.238915 | 0.204486 | 0.204486 | 0.116849 | 0.086594 | 0.051122 | 0 | 0.010054 | 0.236943 | 3,389 | 100 | 131 | 33.89 | 0.731245 | 0.029212 | 0 | 0.064935 | 0 | 0 | 0.122679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0.025974 | 0.038961 | 0.025974 | 0.194805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6be02c0cdca45be2d0933fe3cbd070df05fee26e | 59,336 | py | Python | clustergrammer/upload_pages/clustergrammer_old.py | delosrogers/clustergrammer-web | 14102cfca328214d3bc8285e8331663fe0e5fad4 | [
"MIT"
] | 5 | 2018-04-04T16:25:06.000Z | 2021-04-10T23:47:20.000Z | clustergrammer/upload_pages/clustergrammer_old.py | delosrogers/clustergrammer-web | 14102cfca328214d3bc8285e8331663fe0e5fad4 | [
"MIT"
] | 8 | 2016-07-16T02:55:12.000Z | 2022-02-02T16:42:17.000Z | clustergrammer/upload_pages/clustergrammer_old.py | delosrogers/clustergrammer-web | 14102cfca328214d3bc8285e8331663fe0e5fad4 | [
"MIT"
] | 4 | 2019-05-28T08:52:41.000Z | 2021-01-11T22:14:48.000Z | # define a class for networks
class Network(object):
'''
Networks have two states: the data state where they are stored as: matrix and
nodes and a viz state where they are stored as: viz.links, viz.row_nodes, viz.
col_nodes.
The goal is to start in a data-state and produce a viz-state of the network
that will be used as input to clustergram.js.
'''
def __init__(self):
# network: data-state
self.dat = {}
self.dat['nodes'] = {}
self.dat['nodes']['row'] = []
self.dat['nodes']['col'] = []
# node_info holds the orderings (ini, clust, rank), classification ('cl'),
# and other general information
self.dat['node_info'] = {}
for inst_rc in self.dat['nodes']:
self.dat['node_info'][inst_rc] = {}
self.dat['node_info'][inst_rc]['ini'] = []
self.dat['node_info'][inst_rc]['clust'] = []
self.dat['node_info'][inst_rc]['rank'] = []
self.dat['node_info'][inst_rc]['info'] = []
# classification is specifically used to color the class triangles
self.dat['node_info'][inst_rc]['cl'] = []
self.dat['node_info'][inst_rc]['value'] = []
# initialize matrix
self.dat['mat'] = []
# mat_info is an optional dictionary
# so I'm not including it by default
# network: viz-state
self.viz = {}
self.viz['row_nodes'] = []
self.viz['col_nodes'] = []
self.viz['links'] = []
def load_tsv_to_net(self, filename):
f = open(filename,'r')
lines = f.readlines()
f.close()
self.load_lines_from_tsv_to_net(lines)
def pandas_load_tsv_to_net(self, file_buffer):
'''
A user can add category information to the columns
'''
import pandas as pd
# get lines and check for category and value info
lines = file_buffer.getvalue().split('\n')
# check for category info in headers
cat_line = lines[1].split('\t')
add_cat = False
if cat_line[0] == '':
add_cat = True
tmp_df = {}
if add_cat:
# read in names and categories
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=[0,1])
else:
# read in names only
tmp_df['mat'] = pd.read_table(file_buffer, index_col=0, header=0)
# save to self
self.df_to_dat(tmp_df)
# add categories if necessary
if add_cat:
cat_line = [i.strip() for i in cat_line]
self.dat['node_info']['col']['cl'] = cat_line[1:]
# make a dict of columns in categories
##########################################
col_in_cat = {}
for i in range(len(self.dat['node_info']['col']['cl'])):
inst_cat = self.dat['node_info']['col']['cl'][i]
inst_col = self.dat['nodes']['col'][i]
if inst_cat not in col_in_cat:
col_in_cat[inst_cat] = []
# collect col names for categories
col_in_cat[inst_cat].append(inst_col)
# save to node_info
self.dat['node_info']['col_in_cat'] = col_in_cat
def load_lines_from_tsv_to_net(self, lines):
import numpy as np
# get row/col labels and data from lines
for i in range(len(lines)):
# get inst_line
inst_line = lines[i].rstrip().split('\t')
# strip each element
inst_line = [z.strip() for z in inst_line]
# get column labels from first row
if i == 0:
tmp_col_labels = inst_line
# add the labels
for inst_elem in range(len(tmp_col_labels)):
# skip the first element
if inst_elem > 0:
# get the column label
inst_col_label = tmp_col_labels[inst_elem]
# add to network data
self.dat['nodes']['col'].append(inst_col_label)
# get row info
if i > 0:
# save row labels
self.dat['nodes']['row'].append(inst_line[0])
# get data - still strings
inst_data_row = inst_line[1:]
# convert to float
inst_data_row = [float(tmp_dat) for tmp_dat in inst_data_row]
# save the row data as an array
inst_data_row = np.asarray(inst_data_row)
# initailize matrix
if i == 1:
self.dat['mat'] = inst_data_row
# add rows to matrix
if i > 1:
self.dat['mat'] = np.vstack( ( self.dat['mat'], inst_data_row ) )
def load_l1000cds2(self, l1000cds2):
import scipy
import numpy as np
# process gene set result
if 'upGenes' in l1000cds2['input']['data']:
# add the names from all the results
all_results = l1000cds2['result']
# grab col nodes - input sig and drugs
self.dat['nodes']['col'] = []
for i in range(len(all_results)):
inst_result = all_results[i]
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['value'].append(inst_result['score'])
for type_overlap in inst_result['overlap']:
self.dat['nodes']['row'].extend( inst_result['overlap'][type_overlap] )
self.dat['nodes']['row'] = sorted(list(set(self.dat['nodes']['row'])))
# initialize the matrix
self.dat['mat'] = scipy.zeros([ len(self.dat['nodes']['row']), len(self.dat['nodes']['col']) ])
# fill in the matrix with l10000 data
########################################
# fill in gene sigature as first column
for i in range(len(self.dat['nodes']['row'])):
inst_gene = self.dat['nodes']['row'][i]
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_gene)
# if gene is in up add 1 otherwise add -1
if inst_gene in l1000cds2['input']['data']['upGenes']:
self.dat['node_info']['row']['value'].append(1)
else:
self.dat['node_info']['row']['value'].append(-1)
# save the name as a class
for i in range(len(self.dat['nodes']['col'])):
self.dat['node_info']['col']['cl'].append(self.dat['nodes']['col'][i])
# swap keys for aggravate and reverse
if l1000cds2['input']['aggravate'] == False:
# reverse gene set
up_type = 'up/dn'
dn_type = 'dn/up'
else:
# mimic gene set
up_type = 'up/up'
dn_type = 'dn/dn'
# loop through drug results
for inst_result_index in range(len(all_results)):
inst_result = all_results[inst_result_index]
# for non-mimic if up/dn then it should be negative since the drug is dn
# for mimic if up/up then it should be positive since the drug is up
for inst_dn in inst_result['overlap'][up_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_dn)
# save -1 to gene row and drug column
if up_type == 'up/dn':
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
# for non-mimic if dn/up then it should be positive since the drug is up
# for mimic if dn/dn then it should be negative since the drug is dn
for inst_up in inst_result['overlap'][dn_type]:
# get gene index
inst_gene_index = self.dat['nodes']['row'].index(inst_up)
# save 1 to gene row and drug column
if dn_type == 'dn/up':
self.dat['mat'][ inst_gene_index, inst_result_index ] = 1
else:
self.dat['mat'][ inst_gene_index, inst_result_index ] = -1
# process a characteristic direction vector result
else:
all_results = l1000cds2['result']
# get gene names
self.dat['nodes']['row'] = l1000cds2['input']['data']['up']['genes'] + l1000cds2['input']['data']['dn']['genes']
# save gene expression values
tmp_exp_vect = l1000cds2['input']['data']['up']['vals'] + l1000cds2['input']['data']['dn']['vals']
for i in range(len(self.dat['nodes']['row'])):
self.dat['node_info']['row']['value'].append(tmp_exp_vect[i])
# gather result names
for i in range(len(all_results)):
inst_result = all_results[i]
# add result to list
self.dat['nodes']['col'].append(inst_result['name']+'#'+str(i))
self.dat['node_info']['col']['cl'].append(inst_result['name'])
# reverse signature, score [1,2]
if l1000cds2['input']['aggravate'] == False:
self.dat['node_info']['col']['value'].append( inst_result['score']-1 )
else:
self.dat['node_info']['col']['value'].append( 1 - inst_result['score'] )
# concat up and down lists
inst_vect = inst_result['overlap']['up'] + inst_result['overlap']['dn']
inst_vect = np.transpose(np.asarray(inst_vect))
inst_vect = inst_vect.reshape(-1,1)
# initialize or add to matrix
if type(self.dat['mat']) is list:
self.dat['mat'] = inst_vect
else:
self.dat['mat'] = np.hstack(( self.dat['mat'], inst_vect))
def load_vect_post_to_net(self, vect_post):
import numpy as np
# get all signatures (a.k.a. columns)
sigs = vect_post['columns']
# get all rows from signatures
all_rows = []
all_sigs = []
for inst_sig in sigs:
# gather sig names
all_sigs.append(inst_sig['col_name'])
# get column
col_data = inst_sig['data']
# gather row names
for inst_row_data in col_data:
# get gene name
all_rows.append( inst_row_data['row_name'] )
# get unique sorted list of genes
all_rows = sorted(list(set(all_rows)))
all_sigs = sorted(list(set(all_sigs)))
print( 'found ' + str(len(all_rows)) + ' rows' )
print( 'found ' + str(len(all_sigs)) + ' columns\n' )
# save genes and sigs to nodes
self.dat['nodes']['row'] = all_rows
self.dat['nodes']['col'] = all_sigs
# initialize numpy matrix of nans
self.dat['mat'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat'][:] = np.nan
is_up_down = False
if 'is_up_down' in vect_post:
if vect_post['is_up_down'] == True:
is_up_down = True
if is_up_down == True:
self.dat['mat_up'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_up'][:] = np.nan
self.dat['mat_dn'] = np.empty((len(all_rows),len(all_sigs)))
self.dat['mat_dn'][:] = np.nan
# loop through all signatures and rows
# and place information into self.dat
for inst_sig in sigs:
# get sig name
inst_sig_name = inst_sig['col_name']
# get row data
col_data = inst_sig['data']
# loop through column
for inst_row_data in col_data:
# add row data to signature matrix
inst_row = inst_row_data['row_name']
inst_value = inst_row_data['val']
# find index of row and sig in matrix
row_index = all_rows.index(inst_row)
col_index = all_sigs.index(inst_sig_name)
# save inst_value to matrix
self.dat['mat'][row_index, col_index] = inst_value
if is_up_down == True:
self.dat['mat_up'][row_index, col_index] = inst_row_data['val_up']
self.dat['mat_dn'][row_index, col_index] = inst_row_data['val_dn']
def load_data_file_to_net(self, filename):
# load json from file to new dictionary
inst_dat = self.load_json_to_dict(filename)
# convert dat['mat'] to numpy array and add to network
self.load_data_to_net(inst_dat)
def load_data_to_net(self, inst_net):
''' load data into nodes and mat, also convert mat to numpy array'''
self.dat['nodes'] = inst_net['nodes']
self.dat['mat'] = inst_net['mat']
# convert to numpy array
self.mat_to_numpy_arr()
def export_net_json(self, net_type, indent='no-indent'):
''' export json string of dat '''
import json
from copy import deepcopy
if net_type == 'dat':
exp_dict = deepcopy(self.dat)
# convert numpy array to list
if type(exp_dict['mat']) is not list:
exp_dict['mat'] = exp_dict['mat'].tolist()
elif net_type == 'viz':
exp_dict = self.viz
# make json
if indent == 'indent':
exp_json = json.dumps(exp_dict, indent=2)
else:
exp_json = json.dumps(exp_dict)
return exp_json
def write_json_to_file(self, net_type, filename, indent='no-indent'):
import json
# get dat or viz representation as json string
if net_type == 'dat':
exp_json = self.export_net_json('dat', indent)
elif net_type == 'viz':
exp_json = self.export_net_json('viz', indent)
# save to file
fw = open(filename, 'w')
fw.write( exp_json )
fw.close()
def set_node_names(self, row_name, col_name):
'''give names to the rows and columns'''
self.dat['node_names'] = {}
self.dat['node_names']['row'] = row_name
self.dat['node_names']['col'] = col_name
def mat_to_numpy_arr(self):
''' convert list to numpy array - numpy arrays can not be saved as json '''
import numpy as np
self.dat['mat'] = np.asarray( self.dat['mat'] )
def swap_nan_for_zero(self):
import numpy as np
self.dat['mat'][ np.isnan( self.dat['mat'] ) ] = 0
def filter_row_thresh( self, row_filt_int, filter_type='value' ):
'''
Remove rows from matrix that do not meet some threshold
value: The default filtering is value, in that at least one value in the row
has to be higher than some threshold.
num: Rows can be filtered by the number of non-zero values it has.
sum: Rows can be filtered by the sum of the values
'''
import scipy
import numpy as np
# max vlue in matrix
mat = self.dat['mat']
max_mat = abs(max(mat.min(), mat.max(), key=abs))
# maximum number of measurements
max_num = len(self.dat['nodes']['col'])
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = self.dat['nodes']['col']
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = self.dat['node_info']['col']['info']
# filter rows
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get absolute value of row data
row_vect = np.absolute(self.dat['mat'][i,:])
# value: is there at least one value over cutoff
##################################################
if filter_type == 'value':
# calc cutoff
cutoff = row_filt_int * max_mat
# count the number of values above some thresh
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=1:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'num':
num_nonzero = np.count_nonzero(row_vect)
# use integer number of non-zero measurements
cutoff = row_filt_int * 10
if num_nonzero>= cutoff:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
elif filter_type == 'sum':
inst_row_sum = sum(abs(row_vect))
if inst_row_sum > row_filt_int*max_sum:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_col_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = self.dat['nodes']['row']
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = self.dat['node_info']['row']['info']
node_info['col'] = []
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def filter_network_thresh( self, cutoff, min_num_meet ):
'''
remove rows and columns from matrix that do not have at least
min_num_meet instances of a value with an absolute value above cutoff
'''
import scipy
import numpy as np
# transfer the nodes
nodes = {}
nodes['row'] = []
nodes['col'] = []
# transfer the 'info' part of node_info if necessary
node_info = {}
node_info['row'] = []
node_info['col'] = []
# add rows with non-zero values
#################################
for i in range(len(self.dat['nodes']['row'])):
# get row name
inst_nodes_row = self.dat['nodes']['row'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['row']['info']) > 0:
inst_node_info = self.dat['node_info']['row']['info'][i]
# get row vect
row_vect = np.absolute(self.dat['mat'][i,:])
# check if there are nonzero values
found_tuple = np.where(row_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['row'].append(inst_nodes_row)
# add info if necessary
if len(self.dat['node_info']['row']['info']) > 0:
node_info['row'].append(inst_node_info)
# add cols with non-zero values
#################################
for i in range(len(self.dat['nodes']['col'])):
# get col name
inst_nodes_col = self.dat['nodes']['col'][i]
# get node info - disregard ini, clust, and rank orders
if len(self.dat['node_info']['col']['info']) > 0:
inst_node_info = self.dat['node_info']['col']['info'][i]
# get col vect
col_vect = np.absolute(self.dat['mat'][:,i])
# check if there are nonzero values
found_tuple = np.where(col_vect >= cutoff)
if len(found_tuple[0])>=min_num_meet:
# add name
nodes['col'].append(inst_nodes_col)
# add info if necessary
if len(self.dat['node_info']['col']['info']) > 0:
node_info['col'].append(inst_node_info)
# cherrypick data from self.dat['mat']
##################################
# filtered matrix
filt_mat = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_up' in self.dat:
filt_mat_up = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
filt_mat_dn = scipy.zeros([ len(nodes['row']), len(nodes['col']) ])
if 'mat_info' in self.dat:
# initialize filtered mat_info dictionary with tuple keys
filt_mat_info = {}
# loop through the rows
for i in range(len(nodes['row'])):
inst_row = nodes['row'][i]
# loop through the cols
for j in range(len(nodes['col'])):
inst_col = nodes['col'][j]
# get row and col index
pick_row = self.dat['nodes']['row'].index(inst_row)
pick_col = self.dat['nodes']['col'].index(inst_col)
# cherrypick
###############
filt_mat[i,j] = self.dat['mat'][pick_row, pick_col]
if 'mat_up' in self.dat:
filt_mat_up[i,j] = self.dat['mat_up'][pick_row, pick_col]
filt_mat_dn[i,j] = self.dat['mat_dn'][pick_row, pick_col]
if 'mat_info' in self.dat:
filt_mat_info[str((i,j))] = self.dat['mat_info'][str((pick_row,pick_col))]
# save nodes array - list of node names
self.dat['nodes'] = nodes
# save node_info array - list of node infos
self.dat['node_info']['row']['info'] = node_info['row']
self.dat['node_info']['col']['info'] = node_info['col']
# overwrite with new filtered data
self.dat['mat'] = filt_mat
# overwrite with up/dn data if necessary
if 'mat_up' in self.dat:
self.dat['mat_up'] = filt_mat_up
self.dat['mat_dn'] = filt_mat_dn
# overwrite mat_info if necessary
if 'mat_info' in self.dat:
self.dat['mat_info'] = filt_mat_info
print( 'final mat shape' + str(self.dat['mat'].shape ) + '\n')
def keep_max_num_links(self, keep_num_links):
print('\trun keep_max_num_links')
max_mat_value = abs(self.dat['mat']).max()
# check the total number of links
inst_thresh = 0
inst_pct_max = 0
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('initially there are '+str(inst_num_links)+' links ')
print('there are initially '+str(inst_num_links)+'\n')
thresh_fraction = 100
while (inst_num_links > keep_num_links):
# increase the threshold as a pct of max value in mat
inst_pct_max = inst_pct_max + 1
# increase threshold
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
# check the number of links above the curr threshold
inst_num_links = (abs(self.dat['mat'])>inst_thresh).sum()
print('there are '+str(inst_num_links)+ ' links at threshold '+str(inst_pct_max)+'pct and value of ' +str(inst_thresh)+'\n')
# if there are no links then increas thresh back up
if inst_num_links == 0:
inst_pct_max = inst_pct_max - 1
inst_thresh = max_mat_value*(float(inst_pct_max)/thresh_fraction)
print('final number of links '+str(inst_num_links))
# replace values that are less than thresh with zero
self.dat['mat'][ abs(self.dat['mat']) < inst_thresh] = 0
# return number of links
return (abs(self.dat['mat'])>inst_thresh).sum()
def cluster_row_and_col(self, dist_type='cosine', linkage_type='average', dendro=True, \
run_clustering=True, run_rank=True):
'''
cluster net.dat and make visualization json, net.viz.
optionally leave out dendrogram colorbar groups with dendro argument
'''
import scipy
import numpy as np
from scipy.spatial.distance import pdist
from copy import deepcopy
# do not make dendrogram is you are not running clusttering
if run_clustering == False:
dendro = False
# make distance matrices
##########################
# get number of rows and columns from self.dat
num_row = len(self.dat['nodes']['row'])
num_col = len(self.dat['nodes']['col'])
# initialize distance matrices
row_dm = scipy.zeros([num_row,num_row])
col_dm = scipy.zeros([num_col,num_col])
# make copy of matrix
tmp_mat = deepcopy(self.dat['mat'])
# calculate distance matrix
row_dm = pdist( tmp_mat, metric=dist_type )
col_dm = pdist( tmp_mat.transpose(), metric=dist_type )
# prevent negative values
row_dm[row_dm < 0] = float(0)
col_dm[col_dm < 0] = float(0)
# initialize clust order
clust_order = self.ini_clust_order()
# initial ordering
###################
clust_order['row']['ini'] = range(num_row, -1, -1)
clust_order['col']['ini'] = range(num_col, -1, -1)
# cluster
if run_clustering == True:
clust_order['row']['clust'], clust_order['row']['group'] = \
self.clust_and_group(row_dm, linkage_type=linkage_type)
clust_order['col']['clust'], clust_order['col']['group'] = \
self.clust_and_group(col_dm, linkage_type=linkage_type)
# rank
if run_rank == True:
clust_order['row']['rank'] = self.sort_rank_nodes('row')
clust_order['col']['rank'] = self.sort_rank_nodes('col')
# save clustering orders to node_info
if run_clustering == True:
self.dat['node_info']['row']['clust'] = clust_order['row']['clust']
self.dat['node_info']['col']['clust'] = clust_order['col']['clust']
else:
self.dat['node_info']['row']['clust'] = clust_order['row']['ini']
self.dat['node_info']['col']['clust'] = clust_order['col']['ini']
if run_rank == True:
self.dat['node_info']['row']['rank'] = clust_order['row']['rank']
self.dat['node_info']['col']['rank'] = clust_order['col']['rank']
else:
self.dat['node_info']['row']['rank'] = clust_order['row']['ini']
self.dat['node_info']['col']['rank'] = clust_order['col']['ini']
# transfer ordereings
# row
self.dat['node_info']['row']['ini'] = clust_order['row']['ini']
self.dat['node_info']['row']['group'] = clust_order['row']['group']
# col
self.dat['node_info']['col']['ini'] = clust_order['col']['ini']
self.dat['node_info']['col']['group'] = clust_order['col']['group']
#!! disabled temporarily
# if len(self.dat['node_info']['col']['cl']) > 0:
# self.calc_cat_clust_order()
# make the viz json - can optionally leave out dendrogram
self.viz_json(dendro)
def calc_cat_clust_order(self):
from clustergrammer import Network
from copy import deepcopy
col_in_cat = self.dat['node_info']['col_in_cat']
# alpha order categories
all_cats = sorted(col_in_cat.keys())
# cluster each category
##############################
# calc clustering of each category
all_cat_orders = []
# this is the ordering of the columns based on their category, not
# including their clustering order on top of their category
tmp_col_names_list = []
for inst_cat in all_cats:
inst_cols = col_in_cat[inst_cat]
# keep a list of the columns
tmp_col_names_list.extend(inst_cols)
cat_net = deepcopy(Network())
cat_net.dat['mat'] = deepcopy(self.dat['mat'])
cat_net.dat['nodes'] = deepcopy(self.dat['nodes'])
# get dataframe, to simplify column filtering
cat_df = cat_net.dat_to_df()
# get subset of dataframe
sub_df = {}
sub_df['mat'] = cat_df['mat'][inst_cols]
# load back to dat
cat_net.df_to_dat(sub_df)
try:
cat_net.cluster_row_and_col('cos')
inst_cat_order = cat_net.dat['node_info']['col']['clust']
except:
inst_cat_order = range(len(cat_net.dat['nodes']['col']))
prev_order_len = len(all_cat_orders)
# add previous order length to the current order number
inst_cat_order = [i+prev_order_len for i in inst_cat_order]
all_cat_orders.extend(inst_cat_order)
# sort tmp_col_names_lust by the integers in all_cat_orders
names_col_cat_clust = [x for (y,x) in sorted(zip(all_cat_orders,tmp_col_names_list))]
# calc category-cluster order
##############################
final_order = []
for i in range(len(self.dat['nodes']['col'])):
# get the rank of the col in the order of col_nodes
inst_col_name = self.dat['nodes']['col'][i]
inst_col_num = names_col_cat_clust.index(inst_col_name)
final_order.append(inst_col_num)
self.dat['node_info']['col']['cl_index'] = final_order
def clust_and_group( self, dm, linkage_type='average' ):
import scipy.cluster.hierarchy as hier
# calculate linkage
Y = hier.linkage( dm, method=linkage_type )
Z = hier.dendrogram( Y, no_plot=True )
# get ordering
inst_clust_order = Z['leaves']
all_dist = self.group_cutoffs()
# generate distance cutoffs
inst_groups = {}
for inst_dist in all_dist:
inst_key = str(inst_dist).replace('.','')
inst_groups[inst_key] = hier.fcluster(Y, inst_dist*dm.max(), 'distance')
inst_groups[inst_key] = inst_groups[inst_key].tolist()
return inst_clust_order, inst_groups
def sort_rank_node_values( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of nodes and node_info
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_vals = deepcopy(self.dat['node_info'][rowcol]['value'])
tmp_arr = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# get value
inst_dict['value'] = inst_vals[i]
tmp_arr.append(inst_dict)
# sort dictionary by value
tmp_arr = sorted( tmp_arr, key=itemgetter('value') )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in tmp_arr:
tmp_sort_nodes.append( inst_dict['name'] )
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def sort_rank_nodes( self, rowcol ):
import numpy as np
from operator import itemgetter
from copy import deepcopy
# make a copy of node information
inst_nodes = deepcopy(self.dat['nodes'][rowcol])
inst_mat = deepcopy(self.dat['mat'])
sum_term = []
for i in range(len(inst_nodes)):
inst_dict = {}
# get name of the node
inst_dict['name'] = inst_nodes[i]
# sum values of the node
if rowcol == 'row':
inst_dict['total'] = np.sum(inst_mat[i,:])
else:
inst_dict['total'] = np.sum(inst_mat[:,i])
# add this to the list of dicts
sum_term.append(inst_dict)
# sort dictionary by number of terms
sum_term = sorted( sum_term, key=itemgetter('total'), reverse=False )
# get list of sorted nodes
tmp_sort_nodes = []
for inst_dict in sum_term:
tmp_sort_nodes.append(inst_dict['name'])
# get the sorted index
sort_index = []
for inst_node in inst_nodes:
sort_index.append( tmp_sort_nodes.index(inst_node) )
return sort_index
def viz_json(self, dendro=True):
''' make the dictionary for the clustergram.js visualization '''
# get dendrogram cutoff distances
all_dist = self.group_cutoffs()
# make nodes for viz
#####################
# make rows and cols
for inst_rc in self.dat['nodes']:
for i in range(len( self.dat['nodes'][inst_rc] )):
inst_dict = {}
inst_dict['name'] = self.dat['nodes'][inst_rc][i]
inst_dict['ini'] = self.dat['node_info'][inst_rc]['ini'][i]
#!! clean this up so I do not have to get the index here
inst_dict['clust'] = self.dat['node_info'][inst_rc]['clust'].index(i)
inst_dict['rank'] = self.dat['node_info'][inst_rc]['rank'][i]
# add node class cl
if len(self.dat['node_info'][inst_rc]['cl']) > 0:
inst_dict['cl'] = self.dat['node_info'][inst_rc]['cl'][i]
# add node class cl_index
if 'cl_index' in self.dat['node_info'][inst_rc] > 0:
inst_dict['cl_index'] = self.dat['node_info'][inst_rc]['cl_index'][i]
# add node class val
if len(self.dat['node_info'][inst_rc]['value']) > 0:
inst_dict['value'] = self.dat['node_info'][inst_rc]['value'][i]
# add node information
# if 'info' in self.dat['node_info'][inst_rc]:
if len(self.dat['node_info'][inst_rc]['info']) > 0:
inst_dict['info'] = self.dat['node_info'][inst_rc]['info'][i]
# group info
if dendro==True:
inst_dict['group'] = []
for tmp_dist in all_dist:
# read group info in correct order
tmp_dist = str(tmp_dist).replace('.','')
inst_dict['group'].append( float( self.dat['node_info'][inst_rc]['group'][tmp_dist][i] ) )
# append dictionary to list of nodes
self.viz[inst_rc+'_nodes'].append(inst_dict)
# links
########
for i in range(len( self.dat['nodes']['row'] )):
for j in range(len( self.dat['nodes']['col'] )):
if abs( self.dat['mat'][i,j] ) > 0:
inst_dict = {}
inst_dict['source'] = i
inst_dict['target'] = j
inst_dict['value'] = self.dat['mat'][i,j]
# add up/dn values if necessary
if 'mat_up' in self.dat:
inst_dict['value_up'] = self.dat['mat_up'][i,j]
if 'mat_up' in self.dat:
inst_dict['value_dn'] = self.dat['mat_dn'][i,j]
# add information if necessary - use dictionary with tuple key
# each element of the matrix needs to have information
if 'mat_info' in self.dat:
# use tuple string
inst_dict['info'] = self.dat['mat_info'][str((i,j))]
# add highlight if necessary - use dictionary with tuple key
if 'mat_hl' in self.dat:
inst_dict['highlight'] = self.dat['mat_hl'][i,j]
# append link
self.viz['links'].append( inst_dict )
def df_to_dat(self, df):
import numpy as np
import pandas as pd
self.dat['mat'] = df['mat'].values
self.dat['nodes']['row'] = df['mat'].index.tolist()
self.dat['nodes']['col'] = df['mat'].columns.tolist()
# check if there is category information in the column names
if type(self.dat['nodes']['col'][0]) is tuple:
self.dat['nodes']['col'] = [i[0] for i in self.dat['nodes']['col']]
if 'mat_up' in df:
self.dat['mat_up'] = df['mat_up'].values
self.dat['mat_dn'] = df['mat_dn'].values
def dat_to_df(self):
import numpy as np
import pandas as pd
df = {}
# always return 'mat' dataframe
df['mat'] = pd.DataFrame(data = self.dat['mat'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
if 'mat_up' in self.dat:
df['mat_up'] = pd.DataFrame(data = self.dat['mat_up'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
df['mat_dn'] = pd.DataFrame(data = self.dat['mat_dn'], columns=self.dat['nodes']['col'], index=self.dat['nodes']['row'])
return df
def make_filtered_views(self, dist_type='cosine', run_clustering=True, \
dendro=True, views=['filter_row_sum','N_row_sum'], calc_col_cats=True, \
linkage_type='average'):
from copy import deepcopy
'''
This will calculate multiple views of a clustergram by filtering the data
and clustering after each filtering. This filtering will keep the top N
rows based on some quantity (sum, num-non-zero, etc).
'''
print('running make_filtered_views')
print('dist_type '+str(dist_type))
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.0001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
##################################################
# swap back in the filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col(dist_type=dist_type, linkage_type=linkage_type, \
run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# generate views for each column category (default to only one)
all_col_cat = ['all_category']
# check for column categories and check whether category specific clustering
# should be calculated
if len(self.dat['node_info']['col']['cl']) > 0 and calc_col_cats:
tmp_cats = sorted(list(set(self.dat['node_info']['col']['cl'])))
# gather all col_cats
all_col_cat.extend(tmp_cats)
for inst_col_cat in all_col_cat:
# make a copy of df to send to filters
send_df = deepcopy(df)
# add N_row_sum views
if 'N_row_sum' in views:
print('add N top views')
all_views = self.add_N_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
if 'filter_row_sum' in views:
all_views = self.add_pct_top_views( send_df, all_views, dist_type=dist_type, current_col_cat=inst_col_cat )
# add views to viz
self.viz['views'] = all_views
print('finished make_filtered_views')
def add_pct_top_views(self, df, all_views, dist_type='cosine', \
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
import numpy as np
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# filter columns by category if necessary - do this on df, which is a copy
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = deepcopy(df['mat'])
sum_row = np.sum(mat, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
cutoff = inst_filt * max_sum
# make a copy of the network so that filtering is not propagated
copy_net = deepcopy(self)
# make copy of df
inst_df = deepcopy(df)
# filter row in df
inst_df = copy_net.df_filter_row(inst_df, cutoff, take_abs=False)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
inst_df['mat'] = copy_net.grab_df_subset(inst_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in inst_df:
# grab up and down data
inst_df['mat_up'] = copy_net.grab_df_subset(inst_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
inst_df['mat_dn'] = copy_net.grab_df_subset(inst_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(inst_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in copy_net.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type=dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster pct filtered view')
return all_views
def add_N_top_views(self, df, all_views, dist_type='cosine',\
current_col_cat='all_category'):
from clustergrammer import Network
from copy import deepcopy
# make a copy of hte network
copy_net = deepcopy(self)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
df['mat'] = copy_net.grab_df_subset(df['mat'], keep_rows='all', keep_cols=keep_cols)
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0 and current_col_cat=='all_category':
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# keep the following number of top rows
keep_top = ['all',500,400,300,200,100,90,80,70,60,50,40,30,20,10]
# get copy of df and take abs value, cell line cols and gene rows
df_abs = deepcopy(df['mat'])
# transpose to get gene columns
df_abs = df_abs.transpose()
# sum the values of the genes in the cell lines
tmp_sum = df_abs.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
rows_sorted = tmp_sum.index.values.tolist()
for inst_keep in keep_top:
# initialize df
tmp_df = deepcopy(df)
# filter columns by category if necessary
if current_col_cat != 'all_category':
keep_cols = copy_net.dat['node_info']['col_in_cat'][current_col_cat]
tmp_df['mat'] = copy_net.grab_df_subset(tmp_df['mat'], keep_rows='all', keep_cols=keep_cols)
if 'mat_up' in df:
# grab up and down data
tmp_df['mat_up'] = copy_net.grab_df_subset(tmp_df['mat_up'], keep_rows='all', keep_cols=keep_cols)
tmp_df['mat_dn'] = copy_net.grab_df_subset(tmp_df['mat_dn'], keep_rows='all', keep_cols=keep_cols)
if inst_keep < len(rows_sorted) or inst_keep == 'all':
# initialize netowrk
net = deepcopy(Network())
# filter the rows
if inst_keep != 'all':
# get the labels of the rows that will be kept
keep_rows = rows_sorted[0:inst_keep]
# filter the matrix
tmp_df['mat'] = tmp_df['mat'].ix[keep_rows]
if 'mat_up' in tmp_df:
tmp_df['mat_up'] = tmp_df['mat_up'].ix[keep_rows]
tmp_df['mat_dn'] = tmp_df['mat_dn'].ix[keep_rows]
# filter columns - some columns may have all zero values
tmp_df = self.df_filter_col(tmp_df,0.001)
# transfer to dat
net.df_to_dat(tmp_df)
else:
net.df_to_dat(tmp_df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
# transfer category information
net.dat['node_info']['col']['cl'] = inst_col_cats
# add col_in_cat
net.dat['node_info']['col_in_cat'] = copy_net.dat['node_info']['col_in_cat']
# try to cluster
try:
try:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=True)
except:
# cluster
net.cluster_row_and_col(dist_type,run_clustering=False)
# add view
inst_view = {}
inst_view['N_row_sum'] = inst_keep
inst_view['dist'] = 'cos'
inst_view['col_cat'] = current_col_cat
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster N filtered view')
return all_views
def fast_mult_views(self, dist_type='cos', run_clustering=True, dendro=True):
import numpy as np
import pandas as pd
from clustergrammer import Network
from copy import deepcopy
'''
This will use Pandas to calculte multiple views of a clustergram
Currently, it is only filtering based on row-sum and it is disregarding
link information (used to add click functionality).
'''
# gather category key
is_col_cat = False
if len(self.dat['node_info']['col']['cl']) > 0:
is_col_cat = True
cat_key_col = {}
for i in range(len(self.dat['nodes']['col'])):
cat_key_col[ self.dat['nodes']['col'][i] ] = self.dat['node_info']['col']['cl'][i]
# get dataframe dictionary of network and remove rows/cols with all zero values
df = self.dat_to_df()
# each row or column must have at least one non-zero value
threshold = 0.001
df = self.df_filter_row(df, threshold)
df = self.df_filter_col(df, threshold)
# calculate initial view with no row filtering
#################################################
# swap back in filtered df to dat
self.df_to_dat(df)
# cluster initial view
self.cluster_row_and_col('cos',run_clustering=run_clustering, dendro=dendro)
# set up views
all_views = []
# set up initial view
inst_view = {}
inst_view['filter_row_sum'] = 0
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = self.viz['row_nodes']
inst_view['nodes']['col_nodes'] = self.viz['col_nodes']
# add view with no filtering
all_views.append(inst_view)
# filter between 0% and 90% of some threshoold
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# row filtering values
mat = self.dat['mat']
mat_abs = abs(mat)
sum_row = np.sum(mat_abs, axis=1)
max_sum = max(sum_row)
for inst_filt in all_filt:
# skip zero filtering
if inst_filt > 0:
cutoff = inst_filt * max_sum
# filter row
df = self.df_filter_row(df, cutoff, take_abs=True)
print('\tfiltering at cutoff ' + str(inst_filt) + ' mat shape: ' + str(df['mat'].shape))
# ini net
net = deepcopy(Network())
# transfer to dat
net.df_to_dat(df)
# add col categories if necessary
if is_col_cat:
inst_col_cats = []
for inst_col_name in self.dat['nodes']['col']:
inst_col_cats.append( cat_key_col[inst_col_name] )
net.dat['node_info']['col']['cl'] = inst_col_cats
# try to cluster
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_row_sum'] = inst_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t*** did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
print('\tfinished fast_mult_views')
def make_mult_views(self, dist_type='cos',filter_row=['value'], filter_col=False, run_clustering=True, dendro=True):
'''
This will calculate multiple views of a clustergram by filtering the
data and clustering after each fitlering. By default row filtering will
be turned on and column filteirng will not. The filtering steps are defined
as a percentage of the maximum value found in the network.
'''
from clustergrammer import Network
from copy import deepcopy
# filter between 0% and 90% of some to be determined value
all_filt = range(10)
all_filt = [i/float(10) for i in all_filt]
# cluster default view
self.cluster_row_and_col('cos', run_clustering=run_clustering, dendro=dendro)
self.viz['views'] = []
all_views = []
# Perform row filterings
###########################
if len(filter_row) > 0:
# perform multiple types of row filtering
###########################################
for inst_type in filter_row:
for row_filt_int in all_filt:
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
# filter rows
net.filter_row_thresh(row_filt_int, filter_type=inst_type)
# filter columns since some columns might be all zero
net.filter_col_thresh(0.001,1)
# try to cluster - will not work if there is one row
try:
# cluster
net.cluster_row_and_col('cos')
inst_name = 'filter_row'+'_'+inst_type
# add view
inst_view = {}
inst_view[inst_name] = row_filt_int
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('\t***did not cluster filtered view')
# Default col Filtering
###########################
inst_meet = 1
if filter_col == True:
# col filtering
#####################
for col_filt in all_filt:
# print(col_filt)
# initialize new net
net = deepcopy(Network())
net.dat = deepcopy(self.dat)
filt_value = col_filt * max_mat
# filter cols
net.filter_col_thresh(filt_value, inst_meet)
# try to cluster - will not work if there is one col
try:
# cluster
net.cluster_row_and_col('cos')
# add view
inst_view = {}
inst_view['filter_col'] = col_filt
inst_view['dist'] = 'cos'
inst_view['nodes'] = {}
inst_view['nodes']['row_nodes'] = net.viz['row_nodes']
inst_view['nodes']['col_nodes'] = net.viz['col_nodes']
all_views.append(inst_view)
except:
print('did not cluster filtered view')
# add views to viz
self.viz['views'] = all_views
@staticmethod
def df_filter_row(df, threshold, take_abs=True):
''' filter rows in matrix at some threshold
and remove columns that have a sum below this threshold '''
import pandas as pd
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
ini_rows = df_copy.index.values.tolist()
# transpose df
df_copy = df_copy.transpose()
# sum the values of the rows
tmp_sum = df_copy.sum(axis=0)
# take absolute value to keep most positive and most negative rows
tmp_sum = tmp_sum.abs()
# sort rows by value
tmp_sum.sort(ascending=False)
# filter series using threshold
tmp_sum = tmp_sum[tmp_sum>threshold]
# get keep_row names
keep_rows = sorted(tmp_sum.index.values.tolist())
if len(keep_rows) < len(ini_rows):
# grab the subset of the data
df['mat'] = net.grab_df_subset(df['mat'], keep_rows=keep_rows)
if 'mat_up' in df:
# grab up and down data
df['mat_up'] = net.grab_df_subset(df['mat_up'], keep_rows=keep_rows)
df['mat_dn'] = net.grab_df_subset(df['mat_dn'], keep_rows=keep_rows)
return df
@staticmethod
def df_filter_col(df, threshold, take_abs=True):
''' filter columns in matrix at some threshold
and remove rows that have all zero values '''
import pandas
from copy import deepcopy
from clustergrammer import Network
net = Network()
# take absolute value if necessary
if take_abs == True:
df_copy = deepcopy(df['mat'].abs())
else:
df_copy = deepcopy(df['mat'])
# filter columns to remove columns with all zero values
# transpose
df_copy = df_copy.transpose()
df_copy = df_copy[df_copy.sum(axis=1) > threshold]
# transpose back
df_copy = df_copy.transpose()
# filter rows
df_copy = df_copy[df_copy.sum(axis=1) > 0]
# get df ready for export
if take_abs == True:
inst_rows = df_copy.index.tolist()
inst_cols = df_copy.columns.tolist()
df['mat'] = net.grab_df_subset(df['mat'], inst_rows, inst_cols)
else:
# just transfer the copied data
df['mat'] = df_copy
return df
@staticmethod
def grab_df_subset(df, keep_rows='all', keep_cols='all'):
if keep_cols != 'all':
# filter columns
df = df[keep_cols]
if keep_rows != 'all':
# filter rows
df = df.ix[keep_rows]
return df
@staticmethod
def load_gmt(filename):
f = open(filename, 'r')
lines = f.readlines()
f.close()
gmt = {}
# loop through the lines of the gmt
for i in range(len(lines)):
# get the inst line, strip off the new line character
inst_line = lines[i].rstrip()
inst_term = inst_line.split('\t')[0]
# get the elements
inst_elems = inst_line.split('\t')[2:]
# save the drug-kinase sets
gmt[inst_term] = inst_elems
return gmt
@staticmethod
def load_json_to_dict(filename):
''' load json to python dict and return dict '''
import json
f = open(filename, 'r')
inst_dict = json.load(f)
f.close()
return inst_dict
@staticmethod
def save_dict_to_json(inst_dict, filename, indent='no-indent'):
import json
# save as a json
fw = open(filename, 'w')
if indent == 'indent':
fw.write( json.dumps(inst_dict, indent=2) )
else:
fw.write( json.dumps(inst_dict) )
fw.close()
@staticmethod
def ini_clust_order():
rowcol = ['row','col']
orderings = ['clust','rank','group','ini']
clust_order = {}
for inst_node in rowcol:
clust_order[inst_node] = {}
for inst_order in orderings:
clust_order[inst_node][inst_order] = []
return clust_order
@staticmethod
def threshold_vect_comparison(x, y, cutoff):
import numpy as np
# x vector
############
# take absolute value of x
x_abs = np.absolute(x)
# this returns a tuple
found_tuple = np.where(x_abs >= cutoff)
# get index array
found_index_x = found_tuple[0]
# y vector
############
# take absolute value of y
y_abs = np.absolute(y)
# this returns a tuple
found_tuple = np.where(y_abs >= cutoff)
# get index array
found_index_y = found_tuple[0]
# get common intersection
found_common = np.intersect1d(found_index_x, found_index_y)
# apply cutoff
thresh_x = x[found_common]
thresh_y = y[found_common]
# return the threshold data
return thresh_x, thresh_y
@staticmethod
def group_cutoffs():
# generate distance cutoffs
all_dist = []
for i in range(11):
all_dist.append(float(i)/10)
return all_dist
@staticmethod
def find_dict_in_list(list_dict, search_value, search_string):
''' find a dict in a list of dicts by searching for a value '''
# get all the possible values of search_value
all_values = [d[search_value] for d in list_dict]
# check if the search value is in the keys
if search_string in all_values:
# find the dict
found_dict = (item for item in list_dict if item[search_value] == search_string).next()
else:
found_dict = {}
# return the found dictionary
return found_dict
| 31.528162 | 130 | 0.603546 | 8,648 | 59,336 | 3.928885 | 0.056545 | 0.056656 | 0.029461 | 0.034877 | 0.601054 | 0.542279 | 0.497042 | 0.459605 | 0.423875 | 0.402625 | 0 | 0.005392 | 0.253017 | 59,336 | 1,882 | 131 | 31.528162 | 0.761185 | 0.217254 | 0 | 0.508351 | 0 | 0 | 0.11193 | 0 | 0.001044 | 0 | 0 | 0 | 0 | 1 | 0.041754 | false | 0 | 0.056367 | 0 | 0.11691 | 0.021921 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6be3f12ed534c88956efb0cde9bfba8da5449ad9 | 1,113 | py | Python | server/face_recogniser.py | fvalle1/ai_server | 0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0 | [
"MIT"
] | 1 | 2021-03-10T15:37:21.000Z | 2021-03-10T15:37:21.000Z | server/face_recogniser.py | fvalle1/ai_server | 0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0 | [
"MIT"
] | null | null | null | server/face_recogniser.py | fvalle1/ai_server | 0bbb2b842a329ca8bbeb6529a2008b61dcc77cc0 | [
"MIT"
] | null | null | null | import cv2 as cv
from model import model
class face_recogniser(model):
def __init__(self):
super()
self.net = cv.dnn.readNet('/home/pi/inception/face-detection-adas-0001.xml','/home/pi/inception/face-detection-adas-0001.bin')
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_MYRIAD)
def add_face_rectangle(self, frame):
# Prepare input blob and perform an inference.
blob = cv.dnn.blobFromImage(frame, size=(672, 384), ddepth=cv.CV_8U)
self.net.setInput(blob)
out = self.net.forward()
# Draw detected faces on the frame.
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if confidence > 0.5:
cv.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 0))
return frame
def process(self, frame):
return self.add_face_rectangle(frame)
| 39.75 | 134 | 0.615454 | 150 | 1,113 | 4.486667 | 0.5 | 0.041605 | 0.044577 | 0.056464 | 0.106984 | 0.106984 | 0.106984 | 0 | 0 | 0 | 0 | 0.040964 | 0.254268 | 1,113 | 27 | 135 | 41.222222 | 0.76988 | 0.070081 | 0 | 0 | 0 | 0 | 0.091085 | 0.091085 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0.045455 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6be4827cb6db3797b4e0960a8f9afb82862b44ab | 2,783 | py | Python | tests/test_cpymadtools/test_generators.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 5 | 2020-05-28T09:16:01.000Z | 2021-12-27T18:59:15.000Z | tests/test_cpymadtools/test_generators.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 71 | 2020-02-20T20:32:43.000Z | 2022-03-24T17:04:28.000Z | tests/test_cpymadtools/test_generators.py | fsoubelet/PyhDToolk | aef9d5f1fac2d4c4307d50ba5c53f6a5eb635a66 | [
"MIT"
] | 2 | 2021-09-28T16:01:06.000Z | 2022-03-16T19:04:23.000Z | import random
import pytest
from pyhdtoolkit.cpymadtools.generators import LatticeGenerator
class TestLatticeGenerator:
def test_base_cas_lattice_generation(self):
base_cas_lattice = LatticeGenerator.generate_base_cas_lattice()
assert isinstance(base_cas_lattice, str)
assert len(base_cas_lattice) == 1493
def test_onesext_cas_lattice(self):
onesext_cas_lattice = LatticeGenerator.generate_onesext_cas_lattice()
assert isinstance(onesext_cas_lattice, str)
assert len(onesext_cas_lattice) == 2051
def test_oneoct_cas_lattice(self):
oneoct_cas_lattice = LatticeGenerator.generate_oneoct_cas_lattice()
assert isinstance(oneoct_cas_lattice, str)
assert len(oneoct_cas_lattice) == 2050
def test_tripleterrors_study_reference(self):
tripleterrors_study_reference = LatticeGenerator.generate_tripleterrors_study_reference()
assert isinstance(tripleterrors_study_reference, str)
assert len(tripleterrors_study_reference) == 1617
@pytest.mark.parametrize(
"randseed, tferror",
[
("", ""),
("95", "195"),
("105038", "0.001"),
(str(random.randint(0, 1e7)), str(random.randint(0, 1e7))),
(random.randint(0, 1e7), random.randint(0, 1e7)),
],
)
def test_tripleterrors_study_tferror_job(self, randseed, tferror):
tripleterrors_study_tferror_job = LatticeGenerator.generate_tripleterrors_study_tferror_job(
rand_seed=randseed, tf_error=tferror,
)
assert isinstance(tripleterrors_study_tferror_job, str)
assert len(tripleterrors_study_tferror_job) == 2521 + len(str(randseed)) + len(str(tferror))
assert f"eoption, add, seed = {randseed};" in tripleterrors_study_tferror_job
assert f"B2r = {tferror};" in tripleterrors_study_tferror_job
@pytest.mark.parametrize(
"randseed, mserror",
[
("", ""),
("95", "195"),
("105038", "0.001"),
(str(random.randint(0, 1e7)), str(random.randint(0, 1e7))),
(random.randint(0, 1e7), random.randint(0, 1e7)),
],
)
def test_tripleterrors_study_mserror_job(self, randseed, mserror):
tripleterrors_study_mserror_job = LatticeGenerator.generate_tripleterrors_study_mserror_job(
rand_seed=randseed, ms_error=mserror,
)
assert isinstance(tripleterrors_study_mserror_job, str)
assert len(tripleterrors_study_mserror_job) == 2384 + len(str(randseed)) + len(str(mserror))
assert f"eoption, add, seed = {randseed};" in tripleterrors_study_mserror_job
assert f"ealign, ds := {mserror} * 1E-3 * TGAUSS(GCUTR);" in tripleterrors_study_mserror_job
| 42.166667 | 100 | 0.680201 | 308 | 2,783 | 5.824675 | 0.201299 | 0.190635 | 0.06243 | 0.075808 | 0.389075 | 0.218506 | 0.181717 | 0.181717 | 0.181717 | 0.12709 | 0 | 0.037207 | 0.217751 | 2,783 | 65 | 101 | 42.815385 | 0.786863 | 0 | 0 | 0.25 | 0 | 0 | 0.06935 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.107143 | false | 0 | 0.053571 | 0 | 0.178571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6be4e81e9d40d83f06f1cfa243fa8007e8370a2f | 1,789 | py | Python | preprocessing/act_bed_construction.py | shtoneyan/sea-lion | 7e1ce9a18a147eea42e6172a2329d696f6e6aef9 | [
"MIT"
] | 1 | 2022-02-10T21:21:32.000Z | 2022-02-10T21:21:32.000Z | preprocessing/act_bed_construction.py | shtoneyan/sea-lion | 7e1ce9a18a147eea42e6172a2329d696f6e6aef9 | [
"MIT"
] | null | null | null | preprocessing/act_bed_construction.py | shtoneyan/sea-lion | 7e1ce9a18a147eea42e6172a2329d696f6e6aef9 | [
"MIT"
] | null | null | null | import pandas as pd
import re
import sys
#read bed file
#constructure acitivity table
#output tfr file
def main():
bed_file = sys.argv[1]
act_table = sys.argv[2]
data = pd.read_csv(act_table,sep = '\t')
data.rename(columns={'Unnamed: 0':'loci'}, inplace=True)
chrom = [i.split(':')[0] for i in list(data.loci)]
coord = [re.split(':()',i)[-1] for i in list(data.loci)]
start = [i.split('(')[0].split('-')[0] for i in coord]
end = [i.split('(')[0].split('-')[1] for i in coord]
strand = [i[-2] for i in coord]
data = data.drop(columns=['loci'])
# chrom = [i.split(':')[0] for i in list(data.loci)]
# start = [re.split(':|-',i)[1] for i in list(data.loci)]
# end = [re.split(":|-",i)[2] for i in list(data.loci)]
# clean_end = [i[:-3] for i in end]
# strand = [i[-2] for i in end]
data['chrom'] = chrom
data['start'] = start
data['end'] = end
data['strand'] = strand
cols = data.columns.tolist()
cols = cols[-4:]+cols[:-4]
data = data[cols]
output_act = act_table.split('.txt')[0]+'.bed'
data.to_csv(output_act,sep='\t',index = False)
##############################################################
def align_seqs_scores_1hot(seq_vecs, seq_scores, sort=True):
if sort:
seq_headers = sorted(seq_vecs.keys())
else:
seq_headers = seq_vecs.keys()
# construct lists of vectors
train_scores = []
train_seqs = []
for header in seq_headers:
train_seqs.append(seq_vecs[header])
train_scores.append(seq_scores[header])
# stack into matrices
train_seqs = np.vstack(train_seqs)
train_scores = np.vstack(train_scores)
return train_seqs, train_scores
if __name__ == '__main__':
main()
| 28.854839 | 66 | 0.567915 | 260 | 1,789 | 3.757692 | 0.296154 | 0.040942 | 0.061412 | 0.051177 | 0.188332 | 0.174002 | 0.126919 | 0.116684 | 0.116684 | 0.116684 | 0 | 0.013768 | 0.228619 | 1,789 | 61 | 67 | 29.327869 | 0.694203 | 0.196758 | 0 | 0 | 0 | 0 | 0.047619 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.078947 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6be8cef9f2811735b313ed8611fa3362dad56bc1 | 2,072 | py | Python | aiserver/backup/simpleserver2.py | hirasaki1985/Oreilly_deepLearning | 378b60ccec67dc616669fcd65ad14c7eddae6767 | [
"MIT"
] | null | null | null | aiserver/backup/simpleserver2.py | hirasaki1985/Oreilly_deepLearning | 378b60ccec67dc616669fcd65ad14c7eddae6767 | [
"MIT"
] | null | null | null | aiserver/backup/simpleserver2.py | hirasaki1985/Oreilly_deepLearning | 378b60ccec67dc616669fcd65ad14c7eddae6767 | [
"MIT"
] | null | null | null | import sys, socket
import json
import cgi
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import numpy as np
from http.server import BaseHTTPRequestHandler, HTTPServer
from modules.controller import Controller
# setting
host = ''
port = 8000
class MyHandler(BaseHTTPRequestHandler):
def do_POST(self):
print("simpleserver do_POST exec()")
if self.path.endswith('favicon.ico'):
return;
self.controller = Controller()
# request
form = self.getRequestData()
print(type(form))
# logic
#logicResult = ""
logicResult = self.controller.webLogic(form)
# make result
result = self.makeResponseData(logicResult)
# send
self.sendResponse(result)
return
def getRequestData(self):
# POST されたフォームデータを解析する
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD':'POST',
'CONTENT_TYPE':'png',
})
print(form)
#image = {"test":"requestData"}
return form
def makeResponseData(self, result):
print("### simpleserver makeResponseData exec")
#result = {"test":"responseData"}
print(result)
print(type(result))
return result
def sendResponse(self, result):
print("### simpleserver sendResponse exec")
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.send_header('Access-Control-Allow-Origin', 'http://deeplearning.local.com')
self.end_headers()
#self.wfile.flush()
self.wfile.write(str(result).encode('UTF-8'))
self.wfile.close()
return
try:
server = HTTPServer((host, port), MyHandler)
server.serve_forever()
except KeyboardInterrupt:
print ('^C received, shutting down the web server')
server.socket.close()
| 25.9 | 89 | 0.586873 | 200 | 2,072 | 6.035 | 0.465 | 0.042254 | 0.024855 | 0.044739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005587 | 0.30888 | 2,072 | 79 | 90 | 26.227848 | 0.837291 | 0.074807 | 0 | 0.075472 | 0 | 0 | 0.145594 | 0.014778 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075472 | false | 0 | 0.169811 | 0 | 0.339623 | 0.150943 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bedb7df070733efc849ec3f70b009e7c5d82ea3 | 1,214 | py | Python | get_machine_id.py | Server-Factory/Parallels-Utils | 9b5c724b59832abf0506c5f632b0122573e71cd7 | [
"Apache-2.0"
] | 1 | 2021-01-01T23:24:31.000Z | 2021-01-01T23:24:31.000Z | get_machine_id.py | Server-Factory/Parallels-Utils | 9b5c724b59832abf0506c5f632b0122573e71cd7 | [
"Apache-2.0"
] | null | null | null | get_machine_id.py | Server-Factory/Parallels-Utils | 9b5c724b59832abf0506c5f632b0122573e71cd7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import subprocess
import sys
def main():
if len(sys.argv) > 1:
machines = dict()
image = sys.argv[1]
row_home = "Home:"
row_id_open = "{"
row_id_close = "}"
output = subprocess.check_output(['prlctl', 'list', '-a', '-i'])
items = str(output).split("ID:")
for item in items:
if row_home in item:
home = ""
machine_id = ""
rows = item.strip().split('\\n')
for row in rows:
if row_id_open in row.strip() and row_id_close in row.strip():
machine_id = row.replace(row_id_open, "").replace(row_id_close, "").strip()
if row_home in row:
home = row.replace(row_home, "").strip()
machines[home] = machine_id
for machine_id in machines:
if machine_id.startswith(image):
machine = machines[machine_id]
print(machine)
sys.exit(0)
print("Unknown_ID")
sys.exit(1)
else:
print("No image path provided")
sys.exit(1)
if __name__ == "__main__":
main()
| 28.904762 | 99 | 0.490939 | 142 | 1,214 | 3.971831 | 0.338028 | 0.053191 | 0.047872 | 0.039007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006684 | 0.383855 | 1,214 | 41 | 100 | 29.609756 | 0.747326 | 0.01318 | 0 | 0.058824 | 0 | 0 | 0.055973 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.058824 | 0 | 0.088235 | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bf3a11f4eaf5ef256dd41b159fcdf1ed04aaca8 | 12,158 | py | Python | transform/preprocess/student_preprocess.py | WillianFuks/papis19 | 479c5460218c8f02716dbd5c2b0b9121a4328ab0 | [
"Apache-2.0"
] | 4 | 2019-06-24T13:20:22.000Z | 2020-11-12T01:19:02.000Z | transform/preprocess/student_preprocess.py | WillianFuks/papis19 | 479c5460218c8f02716dbd5c2b0b9121a4328ab0 | [
"Apache-2.0"
] | 7 | 2019-12-16T21:55:20.000Z | 2022-02-10T00:16:54.000Z | transform/preprocess/student_preprocess.py | WillianFuks/papis19 | 479c5460218c8f02716dbd5c2b0b9121a4328ab0 | [
"Apache-2.0"
] | 8 | 2019-06-24T12:27:51.000Z | 2021-04-20T18:33:24.000Z | # Copyright 2019 Willian Fuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
import six
import tensorflow as tf
import tensorflow_transform as tft
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import tensorflow_transform.beam.impl as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata, dataset_schema
from tensorflow_transform.beam import impl as beam_impl
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
import ast
import six
import preprocess.metadata as metadata
import tempfile
if not six.PY2:
sys.exit("ERROR: Must use Python2.7")
def build_bq_query(filename, project_id, init_date, end_date):
query = open(filename).read().format(project_id=project_id, init_date=init_date,
end_date=end_date)
return query
def build_pipeline_options(args):
"""
Apache Beam Pipelines must receive a set of options for setting how the engine should
run.
Args
----
args: argparse.Namespace
Returns
-------
pipeline_options: defines how to run beam job.
"""
options = {}
options['runner'] = args.runner
if args.temp_location:
options['temp_location'] = args.temp_location
if args.project:
options['project'] = args.project
if args.staging_location:
options['staging_location'] = args.staging_location
if args.job_name:
options['job_name'] = args.job_name
if args.max_num_workers:
options['max_num_workers'] = args.max_num_workers
if args.machine_type:
options['machine_type'] = args.machine_type
options.update({'save_main_session': True})
options.update({'setup_file': './setup.py'})
pipeline_options = PipelineOptions(**options)
return pipeline_options
class FlattenInteractionsFn(beam.DoFn):
def process(self, element):
"""
flattens table
"""
for hit in element[1]:
yield {'customer_id': element[0], 'sku': hit['sku'], 'action': hit['action']}
def preprocess_fn(dictrow):
return {
'customer_id': tft.string_to_int(dictrow['customer_id'],
vocab_filename='customers_mapping'),
'sku': tft.string_to_int(dictrow['sku'], vocab_filename='skus_mapping'),
'action': dictrow['action']
}
def aggregate_customers_sessions(sessions):
"""
Receives as input what products customers interacted with and returns their final
aggregation.
Args
----
sessions: list of list of dicts.
List where each element is a list of dict of type: [{'action': '', 'sku': ''}]
Returns
-------
results: list of dicts
Each resulting dict is aggregated on the sku and action level (repeating
clauses are filtered out).
"""
result = []
for session in sessions:
for hit in session:
result.append(hit)
return [dict(t) for t in {tuple(d.items()) for d in result}]
def build_final_results(row):
"""
row = (customer_id, [{sku:, action}, {sku:, action}])
"""
skus_list = [e['sku'] for e in row[1]]
actions_list = [e['action'] for e in row[1]]
return {
'customer_id': row[0],
'skus_list': skus_list,
'actions_list': actions_list
}
def build_test_results(row):
"""
('customer2', {'test': [{'skus_list': [1, 1], 'actions_list': ['AddedToBasket',
'Browsed'], 'customer_id': 'customer2'}], 'train': [{'skus_list': [1, 1],
'actions_list': ['AddedToBasket', 'Browsed'], 'customer_id': 'customer2'}]})
"""
result = {}
result['customer_id'] = row[0]
inner_dicts = row[1]
# customers that had empty interactions after filtering out test dataset.
if not inner_dicts['test']:
return
# customers that were not present in training data.
if not inner_dicts['train']:
return
test_dict = inner_dicts['test'][0]
result['skus_list'] = test_dict['skus_list']
result['actions_list'] = test_dict['actions_list']
train_dict = inner_dicts['train'][0]
result['trained_skus_list'] = train_dict['skus_list']
result['trained_actions_list'] = train_dict['actions_list']
return result
def read_input_data(args, pipeline, flag):
"""
Reads train and test pipelines.
args: input args.
pipeline: input pipeline where all transformations will take place.
flag: either train or test.
"""
if args.input_sql:
train_query = build_bq_query(args.input_sql, args.project,
args.train_init_date, args.train_end_date)
test_query = build_bq_query(args.input_sql, args.project,
args.test_init_date, args.test_end_date)
data = (
pipeline
| '{} read'.format(flag) >> beam.io.Read(beam.io.BigQuerySource(
query=train_query if flag == 'train' else test_query,
use_standard_sql=True)
)
)
else:
data = (
pipeline
| '{} read'.format(flag) >> beam.io.ReadFromText(
args.input_train_data if flag == 'train' else args.input_test_data
)
| '{} to json'.format(flag) >> beam.Map(lambda x: ast.literal_eval(x))
)
data = (
data
| '{} filter empty hits'.format(flag) >> beam.Filter(lambda x: x['hits'])
| '{} prepare customer grouping'.format(flag) >> beam.Map(lambda x: (
x['customer_id'],
[{'action': e['action'], 'sku': e['productSku']} for e in
x['hits'] if e['action'] in ['Browsed', 'AddedToBasket']])
)
| '{} group customers'.format(flag) >> beam.GroupByKey()
| '{} aggregate customers sessions'.format(flag) >> beam.Map(lambda x: (
x[0],
aggregate_customers_sessions(x[1]))
)
| '{} flatten'.format(flag) >> beam.ParDo(FlattenInteractionsFn())
)
return data
def write_total_distinct_keys_to_file(data, filename, key):
"""
Counts how many distinct items of "key" is present in data. Key here is either
sku or customer_id.
Args
----
data: pcollection.
filename: where to write results to.
key: on which value to count for.
"""
_ = (
data
| 'get {}'.format(key) >> beam.Map(lambda x: x[key])
| 'group {}'.format(key) >> beam.RemoveDuplicates()
| 'count {}'.format(key) >> beam.combiners.Count.Globally()
| 'write {}'.format(key) >> beam.io.WriteToText(filename)
)
def write_tfrecords(data, schema, filename, name):
"""
Converts input pcollection into a file of tfrecords following schema.
Args
----
data: pcollection.
schema: dataset_schema from tensorflow transform.
name: str to identify operations.
"""
_ = (
data
| '{} tfrecords write'.format(name) >> beam.io.tfrecordio.WriteToTFRecord(
filename,
coder=example_proto_coder.ExampleProtoCoder(dataset_schema.Schema(schema)))
)
def aggregate_transformed_data(transformed_data, flag):
"""
One of the final steps into our pipelining transformations where data that has
been transformed (in our case, skus went from string names to integer indices) is
aggregated on the user level.
transformed_data: pcollection.
flag: identifies train or test
Returns
-------
transformed_data aggregated on user level.
"""
if flag == 'test':
transformed_data = (
transformed_data
| 'test filter out invalid skus' >> beam.Filter(lambda x: x['sku'] != -1)
)
transformed_agg_data = (
transformed_data
| '{} prepare grouping'.format(flag) >> beam.Map(lambda x: (
x['customer_id'],
{'sku': x['sku'], 'action': x['action']})
)
| '{} transformed agg group'.format(flag) >> beam.GroupByKey()
| '{} final results'.format(flag) >> beam.Map(lambda x: build_final_results(x))
)
return transformed_agg_data
def aggregate_final_test_data(train_data, test_data):
"""
Joins train dataset with test so that only customers that we can make recommendations
are present in final dataset. Remember that, in order to make them, we need to know
a priori what customers interacted with. That's why we join the train data so we
know customers preferences when we need to interact with them with our system.
"""
data = (
{
'train': train_data | 'train prepare customer key' >> beam.Map(lambda x: (
x['customer_id'], x)),
'test': test_data | 'test prepare customer key' >> beam.Map(lambda x: (
x['customer_id'], x))
}
| 'cogroup' >> beam.CoGroupByKey()
| 'build final rows' >> beam.Map(build_test_results)
| 'filter customers out of test' >> beam.Filter(lambda x: x)
)
return data
def run_tft_pipeline(args):
"""
This is where all the data we have available in our database is processed and
transformed into Tensorflow tfrecords for later training and testing.
The code runs in distributed manner automatically in the engine choosen by
the `runner` argument in input.
"""
pipeline_options = build_pipeline_options(args)
temp_tft_folder = (
tempfile.mkdtemp(dir='/tmp/') if not args.tft_temp else args.tft_temp
)
tft_transform_folder = (
tempfile.mkdtemp(dir='/tmp/') if not args.tft_transform else args.tft_transform
)
with beam.Pipeline(options=pipeline_options) as pipeline:
with beam_impl.Context(temp_dir=temp_tft_folder):
train_data = read_input_data(args, pipeline, 'train')
write_total_distinct_keys_to_file(train_data, args.nitems_filename,
'sku')
train_dataset = (train_data, metadata.RAW_DATA_METADATA)
(train_data, transformed_train_metadata), transform_fn = (
train_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_fn)
)
_ = (
transform_fn
| 'WriteTransformFn' >>
transform_fn_io.WriteTransformFn(tft_transform_folder)
)
train_data = aggregate_transformed_data(
train_data,
'train'
)
write_tfrecords(train_data, metadata.OUTPUT_TRAIN_SCHEMA,
args.output_train_filename,
'output train')
test_data = read_input_data(args, pipeline, 'test')
test_dataset = (test_data, metadata.RAW_DATA_METADATA)
(test_data, _) = (
(test_dataset, transform_fn) | beam_impl.TransformDataset())
test_data = aggregate_transformed_data(
test_data,
'test'
)
test_data = aggregate_final_test_data(
train_data,
test_data
)
write_tfrecords(test_data, metadata.OUTPUT_TEST_SCHEMA,
args.output_test_filename, 'output test')
def main():
args = parse_args()
run_tft_pipeline(args)
if __name__ == '__main__':
main()
| 31.661458 | 89 | 0.618358 | 1,450 | 12,158 | 4.991034 | 0.230345 | 0.017963 | 0.02128 | 0.015476 | 0.148404 | 0.11151 | 0.086638 | 0.07434 | 0.063562 | 0.052784 | 0 | 0.003408 | 0.276032 | 12,158 | 383 | 90 | 31.744125 | 0.818791 | 0.246833 | 0 | 0.122642 | 0 | 0 | 0.113934 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066038 | false | 0 | 0.084906 | 0.004717 | 0.207547 | 0.004717 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bf66439de1fb3ae4352db93cc562648f32d838f | 751 | py | Python | leetcode/p690.py | mythnc/lab | 9f69482a063e3cfce2ce8832c2ef1425658c31b9 | [
"MIT"
] | null | null | null | leetcode/p690.py | mythnc/lab | 9f69482a063e3cfce2ce8832c2ef1425658c31b9 | [
"MIT"
] | null | null | null | leetcode/p690.py | mythnc/lab | 9f69482a063e3cfce2ce8832c2ef1425658c31b9 | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/employee-importance/
"""
# Definition for Employee.
class Employee:
def __init__(self, id: int, importance: int, subordinates: List[int]):
self.id = id
self.importance = importance
self.subordinates = subordinates
"""
class Solution:
def getImportance(self, employees: List['Employee'], id: int) -> int:
table = {}
for e in employees:
table[e.id] = e
result = table[id].importance
q = []
q.append(table[id].subordinates)
while len(q) > 0:
ids = q.pop()
for id_ in ids:
result += table[id_].importance
q.append(table[id_].subordinates)
return result
| 28.884615 | 74 | 0.559254 | 82 | 751 | 5.036585 | 0.378049 | 0.067797 | 0.062954 | 0.11138 | 0.239709 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001961 | 0.320905 | 751 | 25 | 75 | 30.04 | 0.807843 | 0.356858 | 0 | 0 | 0 | 0 | 0.016842 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6bf727625ea059c4f0a7f91f758f96269dcdd254 | 3,916 | py | Python | python/ranking/examples/fess/train_model.py | codelibs/logana | 48b475e9fd5224821bfba7d41e755d8d64806651 | [
"Apache-2.0"
] | 2 | 2020-09-30T12:42:28.000Z | 2020-11-04T01:34:20.000Z | python/ranking/examples/fess/train_model.py | codelibs/logana | 48b475e9fd5224821bfba7d41e755d8d64806651 | [
"Apache-2.0"
] | null | null | null | python/ranking/examples/fess/train_model.py | codelibs/logana | 48b475e9fd5224821bfba7d41e755d8d64806651 | [
"Apache-2.0"
] | null | null | null | import dataclasses
import datetime
import gzip
import json
import logging
import os
from typing import Any, Dict
import numpy as np
import tensorflow as tf
from absl import flags
from loganary.ranking.common import NumpyJsonEncoder, setup_logging, setup_seed
from loganary.ranking.model import (
RankingModel,
RankingModelConfig,
RankingModelEmbeddingField,
RankingModelField,
)
flags.DEFINE_string("train_path", None, "Path of .tfrecords file for training.")
flags.DEFINE_string("eval_path", None, "Path of .tfrecords file for evaluation.")
flags.DEFINE_string("keyword_path", None, "Path of vocabulary file for keyword field.")
flags.DEFINE_string("title_path", None, "Path of vocabulary file for title field.")
flags.DEFINE_string("model_path", None, "Path of trained model files.")
flags.DEFINE_integer("num_train_steps", 15000, "The number of train steps.")
flags.DEFINE_list("hidden_layer_dims", ["64", "32", "16"], "Sizes for hidden layers.")
flags.DEFINE_integer(
"keyword_embedding_dim", 20, "Dimention of an embedding for keyword field."
)
flags.DEFINE_integer(
"title_embedding_dim", 20, "Dimention of an embedding for title field."
)
flags.DEFINE_integer("batch_size", 32, "Batch size.")
flags.DEFINE_integer("list_size", 100, "List size.")
flags.DEFINE_float("learning_rate", 0.05, "Learning rate.")
flags.DEFINE_integer("group_size", 10, "Group size.")
flags.DEFINE_float("dropout_rate", 0.8, "Dropout rate.")
flags.DEFINE_bool("verbose", False, "Set a logging level as debug.")
FLAGS = flags.FLAGS
logger = logging.getLogger(__name__)
def main(_) -> None:
setup_seed()
setup_logging(FLAGS.verbose)
now_str = datetime.datetime.now().strftime("%Y%m%d%H%M")
model_path: str = f"{FLAGS.model_path}/{now_str}"
config: RankingModelConfig = RankingModelConfig(
model_path=model_path,
train_path=FLAGS.train_path,
eval_path=FLAGS.eval_path,
context_fields=[
RankingModelEmbeddingField(
name="keyword",
vocabulary_file=FLAGS.keyword_path,
dimension=FLAGS.keyword_embedding_dim,
),
],
example_fields=[
RankingModelEmbeddingField(
name="title",
vocabulary_file=FLAGS.title_path,
dimension=FLAGS.title_embedding_dim,
),
],
label_field=RankingModelField(
name="relevance",
column_type="numeric",
default_value=-1,
),
num_train_steps=FLAGS.num_train_steps,
hidden_layer_dims=FLAGS.hidden_layer_dims,
batch_size=FLAGS.batch_size,
list_size=FLAGS.list_size,
learning_rate=FLAGS.learning_rate,
group_size=FLAGS.group_size,
dropout_rate=FLAGS.dropout_rate,
)
logger.info(f"Config: {config}")
model: RankingModel = RankingModel(config)
result = model.train()
logger.info(f"Result: {result}")
export_model_path: str = model.save_model()
saved_model_path: str = f"{model_path}/saved_model"
os.rename(export_model_path, saved_model_path)
logger.info(f"Output Model Path: {saved_model_path}")
with gzip.open(f"{model_path}/result.json.gz", mode="wt", encoding="utf-8") as f:
config_dict: Dict[str, Any] = dataclasses.asdict(config)
del config_dict["eval_metric"]
f.write(
json.dumps(
{
"config": config_dict,
"result": result,
},
ensure_ascii=False,
cls=NumpyJsonEncoder,
)
)
if __name__ == "__main__":
flags.mark_flag_as_required("train_path")
flags.mark_flag_as_required("eval_path")
flags.mark_flag_as_required("keyword_path")
flags.mark_flag_as_required("title_path")
flags.mark_flag_as_required("model_path")
tf.compat.v1.app.run()
| 34.350877 | 87 | 0.671348 | 484 | 3,916 | 5.163223 | 0.293388 | 0.066026 | 0.043217 | 0.028011 | 0.186074 | 0.123249 | 0.080032 | 0.031212 | 0 | 0 | 0 | 0.009785 | 0.217058 | 3,916 | 113 | 88 | 34.654867 | 0.805284 | 0 | 0 | 0.088235 | 0 | 0 | 0.223442 | 0.025536 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009804 | false | 0 | 0.117647 | 0 | 0.127451 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40116f3963560e5bc86c79d514d9d85b3020138 | 22,630 | py | Python | build-fortress.py | DFIRmadness/infosec-fortress | cc20c5c5ecf5194fdd270e7accdf927b71ed2952 | [
"MIT"
] | 33 | 2021-06-22T01:42:06.000Z | 2022-03-27T14:41:44.000Z | build-fortress.py | ED-209-MK7/infosec-fortress | cc20c5c5ecf5194fdd270e7accdf927b71ed2952 | [
"MIT"
] | 1 | 2021-06-24T09:10:03.000Z | 2021-06-28T13:25:59.000Z | build-fortress.py | ED-209-MK7/infosec-fortress | cc20c5c5ecf5194fdd270e7accdf927b71ed2952 | [
"MIT"
] | 5 | 2021-06-23T08:04:22.000Z | 2022-03-27T14:41:45.000Z | #!/bin/python3
'''
Title: build-fortress.py
Purpose: Build the infosec-fortress
Author: James Smith (DFIRmadness)
Contributors: Check the github page.
Notes: Beta
Version: 0.5
Usage: ./build-fortress.py
Functions:
+ apt update
+ dist upgrade
+ install base packages
+ create /opt/infosec-fortress
+ start log
+ install starter packages (min. pkgs to let script run)
+ install the REMnux Distribution
+ install SIFT
+ install base security packages
+ install Metasploit Framework
+ install wordlists
+ install and update exploitdb (searchsploit)
+ log2Timeline
+ elasticsearch containers
+ powershell Core (turns out its part of REMnux)
+ install impacket
+ install enum4linux
+ enum4linux https://github.com/cddmp/enum4linux-ng
+ display message about updating ZAP and Burp after reboot
'''
# Globals
PKG_MGR = 'apt'
FORTRESS_DIR = '/opt/infosec-fortress/'
BUILD_LOG = 'build-fortress.log'
LOG = FORTRESS_DIR + BUILD_LOG
# Minimal Package list to get started
starterPackagesList = [
'net-tools',
'curl',
'git'
]
# List of packages to have APT install. Change if you want. You break it you buy it.
aptPackageList = [
'tmux',
'torbrowser-launcher',
'nmap',
'smbclient',
'locate',
'radare2-cutter',
'snort',
'dirb',
'gobuster',
'medusa',
'masscan',
'whois',
'libjenkins-htmlunit-core-js-java',
'autopsy',
'hashcat',
'kismet',
'kismet-plugins',
'airgraph-ng',
'wifite',
'dnsenum',
'dnsmap',
'ettercap-common',
'ettercap-graphical',
'netdiscover',
'sqsh',
'install nfs-common'
]
# List of packages to have SNAP install. Change if you want. You break it you buy it.
snapPackageList = [
'chromium',
'sqlmap',
'john-the-ripper'
]
# Snaps that need --classic
# Avoid these. It's better to scrape a git for the latest and install. Zaproxy is a great example.
snapClassicPackageList =[
#'zaproxy'
]
########################################################
# Colors
GREEN = '\033[32m'
RED = '\033[31m'
YELLOW = '\033[33m'
NOCOLOR = '\033[m'
from datetime import datetime
from getpass import getpass
from hashlib import sha1
from os import geteuid,path,makedirs
from os.path import expanduser
from subprocess import run
from urllib.request import urlopen
from requests import get
from re import search
# Check that the user is root
def checkIfRoot():
if geteuid() != 0:
print(RED + '[!] You need sudo/root permissions to run this... exiting.' + NOCOLOR)
exit(0)
# Check for internet connection
def checkForInternet():
try:
check = urlopen('https://www.google.com', timeout=3.0)
print(GREEN +'[+] Internet connection looks good!' + NOCOLOR)
except:
print(RED + '[-] Internet connection looks down. You will need internet for this to run (most likely). Fix and try again.' + NOCOLOR)
exit(1)
def initNotice():
print('[!] This script requires user input once or twice.\n\
[!] It is not completely "Set and Forget".')
nullInput = input('Hit Enter.')
# Get starting Disk Room
def freeSpaceStart():
# Needs Regex Impovement with RE Search. Non Gig sized systems will break this.
global FREE_SPACE_START_INT
freeSpaceStart = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip()
writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceStart + 'G')
FREE_SPACE_START_INT = float(freeSpaceStart)
return(FREE_SPACE_START_INT)
def freeSpaceEnd():
# Needs Regex Impovement with RE Search. Non Gig sized systems will break this.
freeSpaceEnd = run(['df -h /'],shell=True,capture_output=True).stdout.decode().split('G')[2].strip()
writeToLog('[i] Gigs of Free Space on / at the Start of the build: ' + freeSpaceEnd + 'G')
freeSpaceEndInt = float(freeSpaceEnd)
spaceUsed = FREE_SPACE_START_INT - freeSpaceEndInt
writeToLog('[i] Gigs of Space used for InfoSec-Fortress Buildout: ' + str(spaceUsed) + 'G')
# Check/Inform about for unattended upgrade
def informAboutUnattendedUpgade():
print('[!][!][!][!][!][!][!][!]\nUnattended Upgades firing while this script is running will break it.\
\nKill or complete the upgrades if you recently booted or rebooted. Then continue.\
\nIT MAY REQUIRE A REBOOT! If so, kill this script. Reboot. Run the updates. Run this script again.')
nullInput = input('Hit any key to continue.')
def createFortressDir(FORTRESS_DIR):
print('[*] Creating InfoSec Fortress Dir at:',FORTRESS_DIR)
try:
makedirs(FORTRESS_DIR, exist_ok=True)
except FileExistsError:
print('[i] ' + FORTRESS_DIR + ' already exists. Continuing.')
except Exception as e:
print('[-] Error creating the ' + FORTRESS_DIR + '. Error ' + str(e))
def startLogFile():
try:
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
if not path.isfile(LOG):
with open(LOG, 'a') as log:
log.write(now + " - Log Started.\n")
return('Succeeded')
else:
with open(LOG, 'a') as log:
log.write(now + " - Log Started. Strange, the log file appears to exist already? Continuing anyways.\n")
return('Succeeded')
except:
return('Failed')
# For now just simply exit here
exit(1)
def writeToLog(stringToLog):
now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
with open(LOG, 'a') as log:
log.write(now + " - " + stringToLog + '\n')
if '[+]' in stringToLog:
print('\n' + GREEN + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
elif '[-]' in stringToLog:
print('\n' + RED + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
elif '[i]' in stringToLog + NOCOLOR:
print('\n' + YELLOW + stringToLog + NOCOLOR + '\n----------------------------------------------------------\n')
else:
print('\n' + stringToLog + '\n----------------------------------------------------------\n')
def buildStarterPackageList():
listOfPackagesCommand = ''
for package in starterPackagesList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildAptPackageList():
listOfPackagesCommand = ''
for package in aptPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildSnapPackageList():
listOfPackagesCommand = ''
for package in snapPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
def buildSnapClassicPackagesList():
listOfPackagesCommand = ''
for package in snapClassicPackageList:
listOfPackagesCommand = (listOfPackagesCommand + ' ' + package).strip()
return(listOfPackagesCommand)
# apt update
def updateOS():
#writeToLog('[+] Beginning OS updates...')
try:
run(['/usr/bin/apt','update'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
try:
run(['/usr/bin/apt','upgrade','-y'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
try:
run(['/usr/bin/apt','dist-upgrade','-y'])
except Exception as e:
writeToLog('[-] APT Updating failed. Fix and try again. Error:',str(e))
exit(1)
# Minimal packages
def installStarterPackages():
starterPackages = buildStarterPackageList()
writeToLog('[*] Attempting installation of the following starter packages: ' + starterPackages)
try:
run(['/usr/bin/apt install -y ' + starterPackages],shell=True)
writeToLog('[+] Starter Packages installed.')
except Exception as e:
writeToLog('[-] Starter Packages installation failed:',str(e))
# the REMnux Distribution
def installREMnux():
writeToLog('[+] Installing REMnux. This will take quite awhile. Verify the hash from the site later.')
try:
run(['/usr/bin/wget https://REMnux.org/remnux-cli'],shell=True)
run(['/usr/bin/mv remnux-cli remnux'],shell=True)
run(['/usr/bin/chmod +x remnux'],shell=True)
run(['/usr/bin/mv remnux /usr/local/bin'],shell=True)
run(['/usr/local/bin/remnux install --mode=addon'],shell=True)
writeToLog('[+] REMnux Added On (downloaded and ran).')
except Exception as e:
writeToLog('[-] Something went wrong during the REMnux install. Error: ' + str(e))
# Install SIFT
def installSIFTPackages():
writeToLog('[*] Finding latest SIFT Release.')
try:
latestLinkPage = get('https://github.com/sans-dfir/sift-cli/releases/latest').text.splitlines()
latestSIFTBinLine = [match for match in latestLinkPage if "sift-cli-linux" in match][0].split('"')[1]
latestSIFTBin = 'https://github.com/' + latestSIFTBinLine
#latestSIFTBin = search('https:.*sift-cli-linux',latestSIFTBinLine)[0]
writeToLog('[+] latest SIFT BIN: ' + latestSIFTBin)
except Exception as e:
writeToLog('[-] latest SIFT Bin not found. Error: ' + str(e))
return
writeToLog('[*] Installing SIFT Packages.')
try:
run(['/usr/bin/curl -Lo /usr/local/bin/sift ' + latestSIFTBin],shell=True)
run(['/usr/bin/chmod +x /usr/local/bin/sift'],shell=True)
run(['/usr/local/bin/sift install --mode=packages-only'],shell=True)
writeToLog('[+] SIFT Packages installed (downloaded and ran).')
except Exception as e:
writeToLog('[-] Installation of SIFT Packages had an error. Error: '+str(e))
# install base packages
def installAPTandSNAPPackages():
print('[i] If Wireshark asks - say YES non-super users can capture packets.\n\n\
[i] When snort asks about a monitoring interface enter lo.\n\
[i] Setting the interface to "lo" (no quotes) sets it for local use.\n\
[i] Set any private network for the "home" network.\n\n\
[i] KISMET - Say YES to the sticky bit. Add your username to the Kismet Goup at the prompt.')
nullInput = input('Hit Enter.')
aptPackages = buildAptPackageList()
snapPackages = buildSnapPackageList()
snapClassicPackages = buildSnapClassicPackagesList()
writeToLog('[*] Attempting installation of the following ATP packages: ' + aptPackages)
try:
run(['/usr/bin/apt install -y ' + aptPackages],shell=True)
writeToLog('[+] APT Packages installed.')
except Exception as e:
writeToLog('[-] APT Packages installation failed:',str(e))
writeToLog('[*] Attempting installation of the following Snap Packages: ' + snapPackages)
try:
run(['/usr/bin/snap install ' + snapPackages],shell=True)
writeToLog('[+] Snap Packages installed.')
except Exception as e:
writeToLog('[-] Snap packages installation failed:',str(e))
if len(snapClassicPackages) == 0:
writeToLog('[*] No snap classics to install.')
return
writeToLog('[*] Attempting installation of the following Snap Classic Packages: ' + snapClassicPackages)
for package in snapClassicPackageList:
try:
run(['/usr/bin/snap install --classic ' + package],shell=True)
writeToLog('[+] Snap Classic ' + package + ' installed.')
except Exception as e:
writeToLog('[-] Snap packages ' + package + ' failed:',str(e))
# Swap Netcats
# Change out netcat-bsd for netcat-traditional
def swapNetcat():
writeToLog('[*] Attempting to trade out netcat-bsd for netcat-traditional')
try:
run(['/usr/bin/apt purge -y netcat-openbsd'],shell=True)
run(['/usr/bin/apt install -y netcat-traditional'],shell=True)
writeToLog('[+] netcat-traditional installed.')
except Exception as e:
writeToLog('[-] Installation of netcat-traditional failed. Error: '+str(e))
# Metasploit Framework
def installMSF():
writeToLog('[+] Installing Metasploit Framework.')
try:
run(['/usr/bin/curl https://raw.githubusercontent.com/rapid7/metasploit-omnibus/master/config/templates/metasploit-framework-wrappers/msfupdate.erb > msfinstall'],shell=True)
run(['/usr/bin/chmod 755 msfinstall'],shell=True)
run(['./msfinstall'],shell=True)
writeToLog('[+] MSF Installed Successfully.')
except Exception as e:
writeToLog('[-] Something went wrong during the MSF install. Error: ' + str(e))
# Install wordlists
# Git clone the default wordlists
# Add Rockyou2021
# Add fuzzing list for burp/SQLI (xplatform.txt)
def installWordlists():
# Error handling using git in this way (with run) sucks.
writeToLog('[*] Installing Wordlists to /usr/share/wordlists')
makedirs('/usr/share/wordlists/', exist_ok=True)
try:
run(['/usr/bin/git clone https://github.com/3ndG4me/KaliLists.git /usr/share/wordlists/'],shell=True)
run(['/usr/bin/rm /usr/share/wordlists/README.md'],shell=True)
run(['/usr/bin/gunzip /usr/share/wordlists/rockyou.txt.gz'],shell=True)
writeToLog('[+] Kali default wordlists added and unpacked.')
except Exception as e:
writeToLog('[-] There was an error installing Kali default wordlists. Error: ' + str(e))
try:
run(['/usr/bin/wget https://raw.githubusercontent.com/fuzzdb-project/fuzzdb/master/attack/sql-injection/detect/xplatform.txt \
-O /usr/share/wordlists/xplatform.txt'],shell=True)
writeToLog('[+] Xplatform.txt SQLI Validation list added.')
except Exception as e:
writeToLog('[-] There was an error adding xplatform.txt. Error: ' + str(e))
#Install exploit-db
def installExploitDb():
writeToLog('[*] Installing ExploitDB.')
try:
run(['/usr/bin/git clone https://github.com/offensive-security/exploitdb.git /opt/exploitdb'],shell=True)
run(['/usr/bin/ln -sf /opt/exploitdb/searchsploit /usr/local/bin/searchsploit'],shell=True)
writeToLog('[+] Exploit DB Added.')
except Exception as e:
writeToLog('[-] There was an error installing ExploitDB. Error: ' + str(e))
try:
writeToLog('[*] Updating ExploitDB...')
run(['/usr/local/bin/searchsploit -u'],shell=True)
writeToLog('[+] Exploit DB Updated.')
except Exception as e:
writeToLog('[-] There was an error updating ExploitDB. Error: ' + str(e))
# elasticsearch containers?
# powershell Core
# REMnux already installs it.
#def installPosh():
# writeToLog('[*] Installing Powershell.')
# try:
# run(['/usr/bin/apt-get update\
# && /usr/bin/apt-get install -y wget apt-transport-https software-properties-common\
# && /usr/bin/wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb\
# && /usr/bin/dpkg -i packages-microsoft-prod.deb\
# && /usr/bin/apt-get update\
# && /usr/bin/add-apt-repository universe\
# && /usr/bin/apt-get install -y powershell'],shell=True)
# writeToLog('[+] Powershell installed.')
# except Exception as e:
# writeToLog('[-] There was an error installing Powershell. Error: ' + str(e))
# Install Impacket
def installImpacket():
writeToLog('[*] Installing Impacket.')
try:
run(['/usr/bin/git clone https://github.com/SecureAuthCorp/impacket.git /opt/impacket'],shell=True)
run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True)
# It seems that it takes running this twice to get it to complete
run(['/usr/bin/python3 -m pip install /opt/impacket/.'],shell=True)
writeToLog('[+] Impacket Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Impacket. Error: ' + str(e))
# enum4Linux
def installEnum():
writeToLog('[*] Installing Enum4Linux.')
try:
run(['/usr/bin/git clone https://github.com/CiscoCXSecurity/enum4linux.git /opt/enum4linux'],shell=True)
run(['/usr/bin/ln -sf /opt/enum4linux/enum4linux.pl /usr/local/bin/enum4linux.pl'],shell=True)
writeToLog('[+] Enum4Linux Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# enum4linux
def installEnumNG():
writeToLog('[*] Installing Enum4Linux-ng.')
try:
run(['/usr/bin/git clone https://github.com/cddmp/enum4linux-ng /opt/enum4linux-ng'],shell=True)
run(['/usr/bin/ln -sf /opt/enum4linux-ng/enum4linux-ng.py /usr/local/bin/enum4linux-ng.py'],shell=True)
writeToLog('[+] Enum4Linux-ng Installed.')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux-ng. Error: ' + str(e))
# Install WebShells
def installWebShells():
writeToLog('[*] Installing Kali\'s Webshells')
try:
run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/webshells.git /usr/share/webshells'],shell=True)
writeToLog('[+] Kali\'s WebShells Cloned to /usr/share/webshells')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# Install Windows Resources
def installWindowsResources():
writeToLog('[*] Installing Kali\'s Windows Resources')
try:
run(['/usr/bin/git clone https://gitlab.com/kalilinux/packages/windows-binaries.git /usr/share/windows-resources'],shell=True)
writeToLog('[+] Kali\'s Windows Resources Cloned to /usr/share/webshells')
except Exception as e:
writeToLog('[-] There was an error installing Enum4Linux. Error: ' + str(e))
# Install Bloodhound
def installBloodhound():
writeToLog('[*] Finding latest Blood Hound Release.')
try:
latestLinkPage = get('https://github.com/BloodHoundAD/BloodHound/releases/latest').text.splitlines()
latestBloodHoundZip = [match for match in latestLinkPage if "BloodHound-linux-x64.zip" in match][0].split('"')[1]
writeToLog('[+] latest Blood Hound Zip at: ' + latestBloodHoundZip)
except Exception as e:
writeToLog('[-] latest Blood Hound Zip not found. Error: ' + str(e))
return
writeToLog('[*] Installing Bloodhound...')
try:
run(['/usr/bin/curl -Lo /tmp/bloodhound.zip https://github.com' + latestBloodHoundZip],shell=True)
run(['/usr/bin/unzip -o /tmp/bloodhound.zip -d /opt/'],shell=True)
except Exception as e:
writeToLog('[-] Bloodhound not installed. Error: ' + str(e))
# Find and install latest Zaproxy
def installZaproxy():
writeToLog('[*] Finding latest Zaproxy Release.')
try:
latestLinkPage = get('https://github.com/zaproxy/zaproxy/releases/latest').text.splitlines()
latestZapDeb = [match for match in latestLinkPage if "_all.deb" in match][0].split('"')[1]
writeToLog('[+] latest Zaproxy Zip at: ' + latestZapDeb)
except Exception as e:
writeToLog('[-] latest Zaproxy Zip not found. Error: ' + str(e))
return
writeToLog('[*] Installing Zaproxy...')
try:
run(['/usr/bin/curl -Lo /tmp/zaproxy.deb ' + latestZapDeb],shell=True)
run(['/usr/bin/dpkg -i /tmp/zaproxy.deb'],shell=True)
except Exception as e:
writeToLog('[-] Zaproxy not installed. Error: ' + str(e))
def installZeek():
# instll Zeek
writeToLog('[*] Installing Zeek...')
try:
run(['/usr/bin/echo \'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_20.04/ /\' | sudo tee /etc/apt/sources.list.d/security:zeek.list'],shell=True)
run(['/usr/bin/curl -fsSL https://download.opensuse.org/repositories/security:zeek/xUbuntu_20.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/security_zeek.gpg > /dev/null'],shell=True)
run(['/usr/bin/apt update'],shell=True)
run(['/usr/bin/apt -y install zeek'],shell=True)
except Exception as e:
writeToLog('[-] Zeek not installed. Error: ' + str(e))
# add /opt/zeek/bin to the path permanently
try:
writeToLog('[i] Writing Zeeks path to the current users bashrc. You may need to manually add: \'export PATH=$PATH:/opt/zeek/bin\' to yours.')
run(['/usr/bin/echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc'],shell=True)
run(['export PATH=$PATH:/opt/zeek/bin'],shell=True)
except Exception as e:
writeToLog('[-] Zeek path not added. Error: ' + str(e))
# display log
def displayLog():
print('[*] The following activities were logged:\n')
with open(LOG,'r') as log:
allLines = log.readlines()
for line in allLines:
print(line.strip())
# display fortress artwork
def displayImage():
try:
run(['/usr/bin/curl -Lo ' + FORTRESS_DIR + 'fortress.jpg https://dfirmadness.com/wp-content/uploads/2021/06/infosec-fortress-2500.jpg'],shell=True)
run(['/usr/bin/eog ' + FORTRESS_DIR + 'fortress.jpg'],shell=True)
run(['/usr/bin/rm ' + FORTRESS_DIR + 'fortress.jpg'],shell=True)
except:
return
# display message about updating ZAP and Burp after reboot
def giveUserNextSteps():
print(GREEN + '[+]' + '-----------------------------------------------------------------------------------' + NOCOLOR)
print(GREEN + '[+]' + '------------------------ ! Script Complete ! --------------------------------------' + NOCOLOR)
print('\n\n[!] REBOOT the system. After Reboot you will want to run Burp, Zap and Ghidra. Each will ask you to update.\
\n You should update these. If they have you download a .deb file you simple run ' + GREEN + 'dpkg -i foo.deb' + NOCOLOR + '.\
\n Don\'t forget to run: \'echo "export PATH=$PATH:/opt/zeek/bin" >> ~/.bashrc\' to add the Zeek bins to your user (non-root) path')
nullInput = input('Hit Enter.')
# Re-enable unattended upgrade
#Only needed if auto kill of unattended upgrades is added
def main():
checkIfRoot()
checkForInternet()
initNotice()
informAboutUnattendedUpgade()
createFortressDir(FORTRESS_DIR)
startLogFile()
freeSpaceStart()
updateOS()
installStarterPackages()
installREMnux()
installSIFTPackages()
installAPTandSNAPPackages()
swapNetcat()
installMSF()
installWordlists()
installExploitDb()
installImpacket()
installEnum()
installEnumNG()
installWebShells()
installWindowsResources()
installBloodhound()
installZaproxy()
installZeek()
freeSpaceEnd()
displayLog()
displayImage()
giveUserNextSteps()
exit(0)
main()
if __name__== "__main__":
main()
| 40.996377 | 210 | 0.644057 | 2,677 | 22,630 | 5.428838 | 0.220022 | 0.021056 | 0.027868 | 0.035918 | 0.369573 | 0.328287 | 0.25824 | 0.1895 | 0.163628 | 0.121861 | 0 | 0.005682 | 0.198939 | 22,630 | 551 | 211 | 41.07078 | 0.796006 | 0.136677 | 0 | 0.26699 | 0 | 0.046117 | 0.376436 | 0.050796 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082524 | false | 0.002427 | 0.021845 | 0 | 0.116505 | 0.043689 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4051d0a2b00e9998b74b852734dd381524320f3 | 955 | py | Python | substance/constants.py | philraj/substance | c68c8343e22fd2ac1e83b7567140c2a20f417984 | [
"Apache-2.0"
] | null | null | null | substance/constants.py | philraj/substance | c68c8343e22fd2ac1e83b7567140c2a20f417984 | [
"Apache-2.0"
] | null | null | null | substance/constants.py | philraj/substance | c68c8343e22fd2ac1e83b7567140c2a20f417984 | [
"Apache-2.0"
] | null | null | null |
class Constants(object):
class ConstError(TypeError):
pass
def __init__(self, **kwargs):
for name, value in list(kwargs.items()):
super(Constants, self).__setattr__(name, value)
def __setattr__(self, name, value):
if name in self.__dist__:
raise self.ConstError("Can't rebind const(%s)" % name)
self.__dict__[name] = value
def __delattr__(self, name):
if name in self.__dict__:
raise self.ConstError("Can't unbind const(%s)" % name)
raise NameError(name)
Tables = Constants(
BOXES="boxes"
)
DefaultEngineBox = 'turbulent/substance-box:1.0'
EngineStates = Constants(
RUNNING="running",
STOPPED="stopped",
SUSPENDED="suspended",
UNKNOWN="unknown",
INEXISTENT="inexistent"
)
Syncher = Constants(
UP=">>",
DOWN="<<",
BOTH="<>"
)
Orchestrators = Constants(
DOCKWRKR="dockwrkr",
COMPOSE="docker-compose"
)
| 20.76087 | 66 | 0.618848 | 102 | 955 | 5.519608 | 0.509804 | 0.063943 | 0.042629 | 0.042629 | 0.081705 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00277 | 0.243979 | 955 | 45 | 67 | 21.222222 | 0.777008 | 0 | 0 | 0 | 0 | 0 | 0.150943 | 0.028302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0.029412 | 0 | 0 | 0.147059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4051f0da6a3085bed81c035499b45d737816f30 | 12,482 | py | Python | scripts/identify_taxonomic_trees.py | AtilioA/wikidata-evaluation-based-on-ontologies | b726cc40a80312e92e7aa42fc24f1eee21bc40be | [
"Unlicense"
] | 2 | 2020-12-06T21:57:36.000Z | 2020-12-11T16:07:00.000Z | scripts/identify_taxonomic_trees.py | AtilioA/wikidata-evaluation-based-on-ontologies | b726cc40a80312e92e7aa42fc24f1eee21bc40be | [
"Unlicense"
] | null | null | null | scripts/identify_taxonomic_trees.py | AtilioA/wikidata-evaluation-based-on-ontologies | b726cc40a80312e92e7aa42fc24f1eee21bc40be | [
"Unlicense"
] | null | null | null | import time
import json
import sys
from pathlib import Path
from pprint import pprint
import wikidata_utils
from graphviz import Digraph
NL = "\n"
def find_subclasses_between(subclass, superclass):
# Query Stardog for subclasses
subclassesJSON = wikidata_utils.query_subclasses_stardog(superclass, subclass)[
"results"
]["bindings"]
subclassesList = []
try:
# Parse JSON for results
subclassesList = [result["entity"]["value"] for result in subclassesJSON]
# Look for QID in all the strings
subclassesList = wikidata_utils.regex_match_QID(subclassesList)
except:
pass
print(f"Subclasses between '{subclass}' and '{superclass}':\n{subclassesList}")
# print(subclassLabels)
try:
# Remove superclass from the list (it is included by SPARQL)
subclassesList.remove(superclass)
except:
pass
# Return reversed list so we can use it immediately in the right order with graphviz
return list(reversed(subclassesList))
def graph_from_superclasses_dict(treesDictFilename, **kwargs):
# PROBLEM: Given a dictionary with entities, their superclasses and subclasses, create a "maximal" graph that displays the relation between entities
dotsTime = int(time.time())
# Optional argument; if it exists, will include only entities from the ranking
rankingEntities = kwargs.get("rankingEntities", None)
useRandomColors = kwargs.get("useRandomColors", None)
remainingEntities = set(rankingEntities)
totalEntities = len(remainingEntities)
with open(Path(treesDictFilename), "r+", encoding="utf8") as dictFile:
entitiesDict = json.load(dictFile)
# Filter out entities without any subclasses in the ranking
# Entities of interest here are entities without superclasses or whose superclasses are themselves
entitiesDict = dict(
filter(
lambda x: x[1]["subclasses"] != []
and (x[1]["superclasses"] == [] or [x[0]] == x[1]["superclasses"]),
entitiesDict.items(),
)
)
keepEntity = "1"
keptDict = {}
pprint(entitiesDict.keys())
while(len(keepEntity) > 0):
if not keptDict:
keepEntity = input("What entity to generate graphs for? [Enter] for All: ")
else:
keepEntity = input("What entity to generate graphs for? [Enter] to leave: ")
if keepEntity:
kept = entitiesDict.pop(keepEntity)
keptDict[keepEntity] = kept
else:
break
print(f"Kept {keepEntity}")
if keptDict:
entitiesDict = keptDict
# Number of entities to be processed
print(f"{len(entitiesDict)} superclasses")
nodesDict = {}
for entity in entitiesDict.items():
# Get label for each main entity
entityLabel = wikidata_utils.get_entity_label(entity[0])
nSubclasses = len(entity[1]["subclasses"])
print(f"\nBuilding graph for {entity[0]} ({entityLabel}).")
print(f"{entityLabel.capitalize()} has at least {nSubclasses} subclasses from the ranking.\n")
# Create graph for each main entity
nodesep = "0.1"
ranksep = "0.5"
if nSubclasses > 50:
nodesep = "0.15"
ranksep = "1"
dot = Digraph(
comment=entityLabel,
strict=True,
encoding="utf8",
graph_attr={"nodesep": nodesep, "ranksep": ranksep, "rankdir": "BT"},
)
# Create a bigger node for each main entity
dot.node(f"{entityLabel}\n{entity[0]}", fontsize="24")
# Add entity QID to nodes' dict
nodesDict[entity[0]] = True
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
for subclass in entity[1]["subclasses"]:
# Get label for each subclass
subclassLabel = wikidata_utils.get_entity_label(subclass)
# If label is unavailable, use ID
if subclassLabel != "Label unavailable":
subclassNodeLabel = f"{subclassLabel}\n{subclass}"
else:
subclassNodeLabel = subclass
print(
f'Finding subclasses between "{subclassLabel}" and "{entityLabel}"...'
)
# Get random color for nodes and edges
argsColor = "#111111"
if useRandomColors:
argsColor = wikidata_utils.random_color_hex()
edgeLabel = None
if not nodesDict.get(subclass, False):
# Create subclass node
dot.node(f"{subclassLabel}\n{subclass}", color=argsColor)
# Add subclass QID to nodes' dict
nodesDict[subclass] = True
# Query intermediary entities between "subclass" and "entity" (returns ordered list)
subclassesBetween = find_subclasses_between(subclass, entity[0])
# Default styling for intermediary subclasses
subclassNodeArgs = {
"shape": "square",
"color": "#777777",
"fontsize": "10",
"fontcolor": "#555555",
}
# remainingEntitiesLastIteration = {totalEntities - len(remainingEntities)}
if rankingEntities:
# Filter out subclasses that aren't from the ranking
subclassesBetween = {
subclass: True
for subclass in subclassesBetween
if subclass in rankingEntities
}
print(f"Subclasses between: {subclassesBetween}")
# Use no particular styling instead
subclassNodeArgs = {}
# edgeLabel = "P279+"
if subclassesBetween:
# Get labels for each subclass in between
subclassLabels = [
wikidata_utils.get_entity_label(subclass)
for subclass in list(subclassesBetween)
]
# Connect "main" subclass to its immediate superclass
print(
f"(First) Marking {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) as subclass of {subclassLabels[-1]} ({list(subclassesBetween)[-1]})"
)
dot.edge(
subclassNodeLabel,
f"{subclassLabels[-1]}\n{list(subclassesBetween)[-1]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(list(subclassesBetween)[-1])
except KeyError:
pass
for i, subclassBetween in enumerate(subclassesBetween):
if not nodesDict.get(subclassBetween, False):
# Create node for each subclass
dot.node(
f"{subclassLabels[i]}\n{subclassBetween}",
**subclassNodeArgs,
color=argsColor,
)
# Add intermediary entity QID to nodes' dict
nodesDict[subclassBetween] = True
for i, subclassBetween in enumerate(list(subclassesBetween)[:-1]):
# Connect each subclass to its immediate superclass
# First, check if they should be connected
for j, entityAbove in enumerate(list(subclassesBetween)[i:]):
checkSubclass = list(subclassesBetween)[i]
checkSubclassLabel = subclassLabels[i]
if i == 0:
checkSubclass = subclass
checkSubclassLabel = subclassLabel
isSubclass = wikidata_utils.query_subclass_stardog(
entityAbove, checkSubclass, transitive=True
)["results"]["bindings"][0]["isSubclass0"]["value"]
isSubclass = isSubclass.lower() == "true"
print(
f" (For) Is {checkSubclass} subclass of {entityAbove}? {isSubclass}"
)
if isSubclass:
print(
f" Marking {checkSubclassLabel} ({checkSubclass}) as subclass of {subclassLabels[i + j]} ({entityAbove})"
)
dot.edge(
f"{checkSubclassLabel}\n{checkSubclass}",
f"{subclassLabels[i + j]}\n{entityAbove}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(checkSubclass)
except KeyError:
pass
try:
remainingEntities.remove(entityAbove)
except KeyError:
pass
# if totalEntities - len(remainingEntities) > remainingEntitiesLastIteration:
print(
f"{totalEntities - len(remainingEntities)} entities (of {totalEntities}) from the ranking processed so far."
)
# Connect the topmost superclass to the main superclass, i.e., the entity
print(
f"(Last) Marking {subclassLabels[0]} as subclass of {entityLabel}"
)
dot.edge(
f"{subclassLabels[0]}\n{list(subclassesBetween)[0]}",
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
else:
# If there are no subclasses in between, connect subclass and entity directly
print(
f"Joining {subclassNodeLabel.split(NL)[0]} ({subclassNodeLabel.split(NL)[1]}) and {entityLabel} ({entity[0]})"
)
dot.edge(
subclassNodeLabel,
f"{entityLabel}\n{entity[0]}",
label=edgeLabel,
color=argsColor,
arrowhead="o",
)
try:
remainingEntities.remove(subclass)
except KeyError:
pass
# Not having graphviz properly installed might raise an exception
try:
if rankingEntities:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(f"output/dots/dots_{dotsTime}/AP1_{dot.comment}.gv")
else:
u = dot.unflatten(stagger=5) # Break graphs into more lines
u.render(
f"output/dots/dots_{dotsTime}/AP1_{dot.comment}_intermediary.gv"
)
except:
print("\nVerify your Graphviz installation or Digraph args!\n")
pass
try:
remainingEntities.remove(entity[0])
except KeyError:
pass
print(remainingEntities)
def get_ranking_entity_set(rankingFile):
entityList = parse_ranking_file(rankingFile)
return set(entityList)
def parse_ranking_file(rankingFile):
lines = rankingFile.readlines()
lines = list(map(lambda line: line.strip(), lines))
# Look for the QID in all strings
rankEntities = wikidata_utils.regex_match_QID(lines)
return rankEntities
if __name__ == "__main__":
try:
fileIn = Path(sys.argv[2])
except:
fileIn = Path("output/ranking/AP1_minus_Q23958852_ranking.txt")
with open(fileIn, "r") as rankingFile:
entities = parse_ranking_file(rankingFile)
# entitiesSet = get_ranking_entity_set(rankingFile)
# graph_from_superclasses_dict(
# "output/AP1_occurrence.json", rankingEntities=entities
# )
graph_from_superclasses_dict(
"output/AP1_trees.json", rankingEntities=entities
)
| 37.371257 | 174 | 0.539337 | 1,098 | 12,482 | 6.068306 | 0.250455 | 0.012607 | 0.010506 | 0.016809 | 0.199009 | 0.153084 | 0.123668 | 0.123668 | 0.10866 | 0.070839 | 0 | 0.011031 | 0.375421 | 12,482 | 333 | 175 | 37.483483 | 0.843638 | 0.162875 | 0 | 0.284483 | 0 | 0.008621 | 0.194041 | 0.072177 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017241 | false | 0.034483 | 0.030172 | 0 | 0.060345 | 0.077586 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4064fd610eae924f03892b2d599dd0687c7269d | 913 | py | Python | dist/weewx-4.0.0b5/bin/weewx/junk.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 10 | 2017-01-05T17:30:48.000Z | 2021-09-18T15:04:20.000Z | dist/weewx-4.0.0b5/bin/weewx/junk.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 2 | 2019-07-21T10:48:42.000Z | 2022-02-16T20:36:45.000Z | dist/weewx-4.0.0b5/bin/weewx/junk.py | v0rts/docker-weewx | 70b2f252051dfead4fcb74e74662b297831e6342 | [
"Apache-2.0"
] | 12 | 2017-01-05T18:50:30.000Z | 2021-10-05T07:35:45.000Z | import weewx
class MyTypes(object):
def get_value(self, obs_type, record, db_manager):
if obs_type == 'dewpoint':
if record['usUnits'] == weewx.US:
return weewx.wxformulas.dewpointF(record.get('outTemp'), record.get('outHumidity'))
elif record['usUnits'] == weewx.METRIC or record['usUnits'] == weewx.METRICWX:
return weewx.wxformulas.dewpointC(record.get('outTemp'), record.get('outHumidity'))
else:
raise ValueError("Unknown unit system %s" % record['usUnits'])
else:
raise weewx.UnknownType(obs_type)
class MyVector(object):
def get_aggregate(self, obs_type, timespan,
aggregate_type=None,
aggregate_interval=None):
if obs_type.starts_with('ch'):
"something"
else:
raise weewx.UnknownType(obs_type) | 32.607143 | 99 | 0.591457 | 97 | 913 | 5.443299 | 0.463918 | 0.079545 | 0.102273 | 0.083333 | 0.257576 | 0.257576 | 0 | 0 | 0 | 0 | 0 | 0 | 0.292443 | 913 | 28 | 100 | 32.607143 | 0.817337 | 0 | 0 | 0.25 | 0 | 0 | 0.11488 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.05 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d407929c4b0bf64d15071879c336b090bd7b4eb9 | 1,580 | py | Python | vision_api_batch.py | swowko51/pfch_humanvsmachine_2019 | ba5a4c0db804e62892a28c72ba2d2180c6e44282 | [
"MIT"
] | null | null | null | vision_api_batch.py | swowko51/pfch_humanvsmachine_2019 | ba5a4c0db804e62892a28c72ba2d2180c6e44282 | [
"MIT"
] | null | null | null | vision_api_batch.py | swowko51/pfch_humanvsmachine_2019 | ba5a4c0db804e62892a28c72ba2d2180c6e44282 | [
"MIT"
] | null | null | null | import io
import os
import pandas as pd
import re
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
# Set Google API authentication
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "INSERT_FULL_KEY_FILE_PATH"
# ---------------------------------------------------------------------
# Retrieve labels for a batch of images and create a dataframe
# Folder where images are stored
ImageFolder = "INSERT_FULL_FOLDER_PATH"
# Placeholders to store data
ImageID = []
Description = []
# Instantiates a client
ImageLabels = pd.DataFrame()
client = vision.ImageAnnotatorClient()
# Get labels and scores for every image in folder
for file in os.listdir(ImageFolder):
filename = os.path.basename(file).split('.jpg')[0] # Get image ID
image_file = io.open(ImageFolder+file, 'rb') # Open image
content = image_file.read() # Read image into memory
image = types.Image(content=content)
response = client.label_detection(image=image) # Gets response from API for image
labels = response.label_annotations # Get labels from response
Nlabels = len(labels) # Get the number of labels that were returned
for i in range(0, Nlabels): # For each label we will store the MID, label, and score
ImageID.append(filename) # Keep track Image ID
Description.append(labels[i].description) # Store label
# Put Image ID and label into data frame
ImageLabels["imageid"] = ImageID
ImageLabels["desc"] = Description
ImageLabels.groupby(ImageID)
# print(ImageLabels)
Export = ImageLabels.to_json (r'test2.json',orient='records')
| 28.727273 | 85 | 0.73038 | 215 | 1,580 | 5.302326 | 0.469767 | 0.028947 | 0.026316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002214 | 0.142405 | 1,580 | 54 | 86 | 29.259259 | 0.839114 | 0.393671 | 0 | 0 | 0 | 0 | 0.119403 | 0.083156 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d407d7b9d817bbd5e0d8ab001619f9db240a997f | 1,556 | py | Python | examples/ElasticsearchDomain.py | hmain/troposphere-fork | 815ee739bcf3d024e1aef5caeca4e4d63e85e98e | [
"BSD-2-Clause"
] | null | null | null | examples/ElasticsearchDomain.py | hmain/troposphere-fork | 815ee739bcf3d024e1aef5caeca4e4d63e85e98e | [
"BSD-2-Clause"
] | null | null | null | examples/ElasticsearchDomain.py | hmain/troposphere-fork | 815ee739bcf3d024e1aef5caeca4e4d63e85e98e | [
"BSD-2-Clause"
] | null | null | null | # Converted from Elasticsearch Domain example located at:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#d0e51519
from troposphere import Template, constants
from troposphere.elasticsearch import Domain, EBSOptions
from troposphere.elasticsearch import ElasticsearchClusterConfig
from troposphere.elasticsearch import SnapshotOptions
templ = Template()
templ.add_description('Elasticsearch Domain example')
es_domain = templ.add_resource(Domain(
'ElasticsearchDomain',
DomainName="ExampleElasticsearchDomain",
ElasticsearchClusterConfig=ElasticsearchClusterConfig(
DedicatedMasterEnabled=True,
InstanceCount=2,
ZoneAwarenessEnabled=True,
InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterCount=3
),
EBSOptions=EBSOptions(EBSEnabled=True,
Iops=0,
VolumeSize=20,
VolumeType="gp2"),
SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
AccessPolicies={'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'AWS': '*'
},
'Action': 'es:*',
'Resource': '*'
}]},
AdvancedOptions={"rest.action.multi.allow_explicit_index": "true"}
))
print(templ.to_json())
| 36.186047 | 111 | 0.633676 | 118 | 1,556 | 8.271186 | 0.584746 | 0.061475 | 0.086066 | 0.104508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020318 | 0.272494 | 1,556 | 42 | 112 | 37.047619 | 0.841873 | 0.105398 | 0 | 0 | 0 | 0 | 0.134726 | 0.04611 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d409f34383374dd7cd26218ce4cee29f4ca7b7c0 | 3,917 | py | Python | footprint/models/audio.py | arthurtofani/footprint | 572401d4cba3299ae9915fca2a7d08ea1a3a9bc4 | [
"MIT"
] | null | null | null | footprint/models/audio.py | arthurtofani/footprint | 572401d4cba3299ae9915fca2a7d08ea1a3a9bc4 | [
"MIT"
] | null | null | null | footprint/models/audio.py | arthurtofani/footprint | 572401d4cba3299ae9915fca2a7d08ea1a3a9bc4 | [
"MIT"
] | null | null | null | from collections import defaultdict
import librosa
import os
import numpy as np
import h5py
class Audio:
filename = None
project = None
bucket = None
tempo = None
beats = None
features = None
tokens = None
loaded_from_cache = False
has_changed = False
def __init__(self, filename, project):
self.filename = filename
self.project = project
self.features = defaultdict()
self.tokens = defaultdict()
self.signal_has_changed = False
self.feature_has_changed = False
self.token_has_changed = False
self.y = None
self.sr = None
def load(self):
if self.project.cache_features:
self.__load_features_from_cache()
self.__load_tokens_from_cache()
def add_feature(self, feature_name, feature):
self.features[feature_name] = feature
self.feature_has_changed = True
def add_tokens(self, tokens_key, tokens):
self.tokens[tokens_key] = tokens
self.token_has_changed = True
def persist(self):
if self.project.cache_features and self.feature_has_changed:
self.persist_features()
if self.project.cache_tokens and self.token_has_changed:
self.persist_tokens()
if self.project.cache_signal and self.signal_has_changed:
self.persist_signal()
def signal(self):
if self.y is None:
self.y, self.sr = self.__load_signal()
return (self.y, self.sr)
def cleanup(self):
self.y = None
self.sr = None
def persist_features(self):
self.__create_cache_folder()
print('dumping features', self.filename)
with h5py.File(self.cache_filename('features'), "w") as f:
for key in self.features.keys():
f.create_dataset(key, data=self.features[key])
self.feature_has_changed = False
def persist_tokens(self):
print('dumping tokens', self.filename)
with h5py.File(self.cache_filename('tokens'), "w") as f:
for key in self.tokens.keys():
f.attrs[key] = self.tokens[key]
self.token_has_changed = False
def persist_signal(self):
self.__create_cache_folder()
print('dumping audio', self.filename)
with h5py.File(self.cache_filename('audio'), "w") as f:
f.create_dataset('y', data=self.y)
f.attrs["sr"] = self.sr
self.signal_has_changed = False
def clean_cache(self, file_type_str):
if self.cache_filename_exists():
os.remove(self.cache_filename(file_type_str))
def __load_signal(self):
return self.__load_signal_from_cache() or self.__load_signal_from_file()
def __load_signal_from_file(self):
print('loading signal from file - %s' % self.filename)
self.y, self.sr = librosa.load(self.filename)
self.signal_has_changed = True
return (self.y, self.sr)
def __load_signal_from_cache(self):
if not self.cache_filename_exists('audio'):
return None
print('loading signal from cache - %s' % self.filename)
with h5py.File(self.cache_filename('audio'), 'r') as f:
self.y = np.array(f['y'])
self.sr = f.attrs["sr"]
return (self.y, self.sr)
def __load_features_from_cache(self):
if not self.cache_filename_exists('features'):
return
with h5py.File(self.cache_filename('features'), 'r') as f:
for k in f.keys():
self.features[k] = np.array(f[k])
def __load_tokens_from_cache(self):
if not self.cache_filename_exists('tokens'):
return
with h5py.File(self.cache_filename('tokens'), 'r') as f:
for k in f.attrs.keys():
self.tokens[k] = f.attrs[k]
self.token_has_changed = False
def cache_filename(self, file_type_str):
return self.__cache_folder() + ('/%s.hdf5' % file_type_str)
def cache_filename_exists(self, file_type_str):
return os.path.isfile(self.cache_filename(file_type_str))
def __create_cache_folder(self):
os.makedirs(self.__cache_folder(), exist_ok=True)
def __cache_folder(self):
fld = self.project.cache_folder
return fld + self.filename
| 29.900763 | 76 | 0.693643 | 570 | 3,917 | 4.5 | 0.135088 | 0.054581 | 0.079532 | 0.037427 | 0.40117 | 0.316179 | 0.263938 | 0.115789 | 0.083821 | 0 | 0 | 0.002528 | 0.191984 | 3,917 | 130 | 77 | 30.130769 | 0.807899 | 0 | 0 | 0.165138 | 0 | 0 | 0.045698 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.183486 | false | 0 | 0.045872 | 0.027523 | 0.412844 | 0.045872 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40a02528d58fe92a0117b78210b7cc191ea3776 | 2,634 | py | Python | src/solr.py | ielm/CSCI6964 | 5ab46ea68d9a1bf6192a2f3cbae0acea0e3696c8 | [
"MIT"
] | null | null | null | src/solr.py | ielm/CSCI6964 | 5ab46ea68d9a1bf6192a2f3cbae0acea0e3696c8 | [
"MIT"
] | null | null | null | src/solr.py | ielm/CSCI6964 | 5ab46ea68d9a1bf6192a2f3cbae0acea0e3696c8 | [
"MIT"
] | null | null | null | import json
import os
import urllib.request, urllib.parse
from collections import OrderedDict
from typing import List, Union, Dict
class SOLR:
"""
Wrapper for the SOLR retrieval engine.
"""
def __init__(
self,
host: str = "localhost",
port: int = 8983,
cookie: dict = None,
collection: str = "trec",
file_number: int = 100,
ir_model: str = "DFR",
):
self.host = host
if self.host is None:
self.host = (
os.environ["SOLR_HOST"] if "SOLR_HOST" in os.environ else "localhost"
)
self.port = port
if self.port is None:
self.port = (
int(os.environ["SOLR_PORT"]) if "SOLR_PORT" in os.environ else 8983
)
self.cookie = cookie
self.collection = collection
if self.collection is None:
self.collection = (
os.environ["SOLR_COLLECTION"]
if "SOLR_COLLECTION" in os.environ
else "trec"
)
self.file_number = file_number
if self.file_number is None:
self.file_number = (
int(os.environ["SOLR_FILENUMBER"])
if "SOLR_FILENUMBER" in os.environ
else 100
)
self.ir_model = ir_model
if self.ir_model is None:
self.ir_model = (
os.environ["SOLR_IRMODEL"] if "SOLR_IRMODEL" in os.environ else "DFR"
)
def __rget(self, path: str = "select?", params: Union[Dict, None] = None):
url = f"http://{self.host}:{str(self.port)}/solr/{self.collection}/{path}"
def __format_param(key):
values = params[key]
if type(values) is not list:
values = [values]
return "&".join(map(lambda value: key + "=" + str(value), values))
if len(params) > 0:
url = f"{url}?{'&'.join(map(__format_param, params.keys()))}"
request = urllib.request.urlopen(url)
return json.load(request)["response"]["docs"]
def query(
self,
query: str,
fields: List[str] = None,
rows: int = 15,
sort: str = "score asc",
):
params = {
"fl": (
urllib.parse.quote(" ".join(fields))
if fields is not None
else "docno%2Cscore%2Cdoctext&"
),
"q": f"doctext%3A({query})",
"rows": f"{rows}",
"sort": urllib.parse.quote(sort),
}
return self.__rget(path="select", params=params)
| 27.154639 | 85 | 0.509112 | 296 | 2,634 | 4.415541 | 0.290541 | 0.06886 | 0.038256 | 0.057383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012063 | 0.370539 | 2,634 | 96 | 86 | 27.4375 | 0.776236 | 0.014427 | 0 | 0.054054 | 0 | 0 | 0.14186 | 0.022868 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0 | 0.067568 | 0 | 0.175676 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40bd0e8a76f18b193a91aefda5569f8c67d163b | 3,773 | py | Python | modules/slack.py | KTH/Jan-Ove | 661acf5843b7b6dd459b0dddd64cffe27840e3c8 | [
"MIT"
] | null | null | null | modules/slack.py | KTH/Jan-Ove | 661acf5843b7b6dd459b0dddd64cffe27840e3c8 | [
"MIT"
] | null | null | null | modules/slack.py | KTH/Jan-Ove | 661acf5843b7b6dd459b0dddd64cffe27840e3c8 | [
"MIT"
] | null | null | null | __author__ = 'tinglev@kth.se'
import os
import re
import logging
from slackclient import SlackClient
def init():
#global CLIENT, BOT_ID
log = logging.getLogger(__name__)
client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
auth_test = client.api_call("auth.test")
log.debug('Auth test response: %s', auth_test)
bot_id = auth_test["user_id"]
log.debug('Bot ID is "%s"', bot_id)
client.rtm_connect(with_team_state=False, auto_reconnect=True)
return client
def mention_to_user_id(mention):
mention_regex = r'^<@(.+)>$'
matches = re.search(mention_regex, mention)
if matches:
return matches.group(1)
return None
def user_id_to_mention(user_id):
return f'<@{user_id}>'
def get_rtm_messages(events):
messages = []
for event in events:
if event["type"] == "message":
messages.append(event)
return messages
def message_is_command(message):
try:
trigger_text = os.environ.get('BOT_TRIGGER') or '!pingis'
log = logging.getLogger(__name__)
trigger_regex = r'^{0} (.+)'.format(trigger_text)
matches = re.search(trigger_regex, message['text'])
if matches and matches.group(1):
return matches.group(1).strip(), message['user'], message['channel']
except Exception as err:
log.debug('Edited message ignored "%s". Error: "%s".', message, err)
return (None, None, None)
def send_ephemeral(slack_client, channel, user, message, default_message=None):
log = logging.getLogger(__name__)
log.debug('Sending eph to ch "%s" user "%s" msg "%s"', channel, user, message)
slack_client.api_call(
"chat.postEphemeral",
channel=channel,
user=user,
text=message or default_message
)
def get_user_info(slack_client, slack_user_id):
log = logging.getLogger(__name__)
log.debug('Calling "users.info" on slack api')
user = slack_client.api_call(
'users.info',
user=slack_user_id
)
log.debug('Got user %s', user)
return user
def get_user_list(slack_client):
log = logging.getLogger(__name__)
log.debug('Calling "users.list" on slack api')
result = slack_client.api_call(
'users.list'
)
#log.debug('Response from api was: %s', result)
return result
def get_user_from_user_list(user_list, user_id):
log = logging.getLogger(__name__)
if not 'members' in user_list:
return None
for user in user_list['members']:
if 'id' in user and user['id'] == user_id:
log.debug('Found user %s in user_list', user_id)
return user
return None
def get_user_image_url(user):
imv_version = 'image_192'
log = logging.getLogger(__name__)
if 'user' in user and 'profile' in user['user']:
if imv_version in user['user']['profile']:
log.debug('Found user image for user %s', user['user']['id'])
return user['user']['profile'][imv_version]
return None
def send_message(slack_client, channel, message, default_message=None):
log = logging.getLogger(__name__)
log.debug('Sending msg to ch "%s" msg "%s"', channel, message)
response = slack_client.api_call(
"chat.postMessage",
channel=channel,
text=message or default_message
)
log.debug('Response from api was: %s', response)
def send_block_message(slack_client, channel, blocks):
log = logging.getLogger(__name__)
log.debug('Sending block message to ch "%s" blocks "%s"', channel, blocks)
response = slack_client.api_call(
"chat.postMessage",
channel=channel,
blocks=blocks
)
log.debug('Response from api was: %s', response)
def rtm_read(slack_client):
return slack_client.rtm_read()
| 31.974576 | 82 | 0.655712 | 511 | 3,773 | 4.594912 | 0.213307 | 0.0477 | 0.072828 | 0.08816 | 0.286201 | 0.212095 | 0.197189 | 0.169506 | 0.132879 | 0.053663 | 0 | 0.002377 | 0.219454 | 3,773 | 117 | 83 | 32.247863 | 0.794907 | 0.017758 | 0 | 0.257426 | 0 | 0 | 0.171166 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128713 | false | 0 | 0.039604 | 0.019802 | 0.316832 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40cd5794c364cbfbd47cd7ffe66063081da15e8 | 952 | py | Python | gitconfig_parser/parser.py | seanfisk/gitconfig-parser | f747a4263bdd7fc4ffd6b6a0fe34ef7cf7e935a8 | [
"MIT"
] | null | null | null | gitconfig_parser/parser.py | seanfisk/gitconfig-parser | f747a4263bdd7fc4ffd6b6a0fe34ef7cf7e935a8 | [
"MIT"
] | null | null | null | gitconfig_parser/parser.py | seanfisk/gitconfig-parser | f747a4263bdd7fc4ffd6b6a0fe34ef7cf7e935a8 | [
"MIT"
] | 2 | 2019-03-07T04:55:29.000Z | 2019-03-28T00:59:55.000Z | """:mod:`gitconfig_parser.parser` -- Parser implementation
"""
from pyparsing import (
OneOrMore, restOfLine, Group, ZeroOrMore,
CharsNotIn, Suppress, Word, alphanums, Literal, pythonStyleComment)
def build_parser():
key = Word(alphanums).setResultsName('key')
value = restOfLine.setParseAction(
lambda string, location, tokens: tokens[0].strip()
).setResultsName('value')
property_ = Group(key + Suppress(Literal('=')) + value)
properties = Group(OneOrMore(property_)).setResultsName('properties')
section_name = (Suppress('[') + OneOrMore(CharsNotIn(']')) +
Suppress(']')).setResultsName('section')
section = Group(section_name + properties)
ini_file = ZeroOrMore(section).setResultsName('sections')
ini_file.ignore(pythonStyleComment)
return ini_file
def parse_file(file_):
parser = build_parser()
return parser.parseWithTabs().parseFile(file_, parseAll=True)
| 35.259259 | 73 | 0.697479 | 91 | 952 | 7.153846 | 0.472527 | 0.032258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001259 | 0.165966 | 952 | 26 | 74 | 36.615385 | 0.81864 | 0.057773 | 0 | 0 | 0 | 0 | 0.041573 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40d751818208fbb5ac12dea45e5d857cfe9c460 | 1,641 | py | Python | chroma-manager/tests/unit/chroma_api/test_command.py | GarimaVishvakarma/intel-chroma | fdf68ed00b13643c62eb7480754d3216d9295e0b | [
"MIT"
] | null | null | null | chroma-manager/tests/unit/chroma_api/test_command.py | GarimaVishvakarma/intel-chroma | fdf68ed00b13643c62eb7480754d3216d9295e0b | [
"MIT"
] | null | null | null | chroma-manager/tests/unit/chroma_api/test_command.py | GarimaVishvakarma/intel-chroma | fdf68ed00b13643c62eb7480754d3216d9295e0b | [
"MIT"
] | null | null | null |
from chroma_core.models import Command
from chroma_core.models.host import ManagedHost
from chroma_core.services.job_scheduler.job_scheduler_client import JobSchedulerClient
import mock
from tests.unit.chroma_api.chroma_api_test_case import ChromaApiTestCase
class TestCommandResource(ChromaApiTestCase):
def test_host_lists(self):
"""Test that commands which take a list of hosts as an argument
are get the host URIs converted to host IDs (for use with HostListMixin)"""
from chroma_api.urls import api
hosts = []
for i in range(0, 2):
address = 'myserver_%d' % i
host = ManagedHost.objects.create(
address = address,
fqdn = address,
nodename = address)
hosts.append(host)
with mock.patch("chroma_core.services.job_scheduler.job_scheduler_client.JobSchedulerClient.command_run_jobs",
mock.Mock(return_value = Command.objects.create().id)):
response = self.api_client.post("/api/command/", data={
'message': "Test command",
'jobs': [
{
'class_name': 'UpdateNidsJob',
'args': {'hosts': [api.get_resource_uri(h) for h in hosts]}
}
]
})
self.assertEqual(response.status_code, 201)
host_ids = "[%s]" % ", ".join([str(h.id) for h in hosts])
JobSchedulerClient.command_run_jobs.assert_called_once_with([{'class_name': 'UpdateNidsJob', 'args': {'host_ids': host_ids}}], 'Test command')
| 42.076923 | 154 | 0.604509 | 184 | 1,641 | 5.201087 | 0.472826 | 0.041797 | 0.043887 | 0.041797 | 0.100313 | 0.100313 | 0.100313 | 0.100313 | 0 | 0 | 0 | 0.004325 | 0.295551 | 1,641 | 38 | 155 | 43.184211 | 0.823529 | 0.081048 | 0 | 0 | 0 | 0 | 0.149364 | 0.060951 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.033333 | false | 0 | 0.2 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d40e05ca54d2f05415844295db6aa87aa4fcad8b | 743 | py | Python | tps/problems/forms/export.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 5 | 2019-02-26T06:10:43.000Z | 2021-07-24T17:11:45.000Z | tps/problems/forms/export.py | akmohtashami/tps-web | 9dab3ffe97c21f658be30ce2f2711dd93e4ba60f | [
"MIT"
] | 3 | 2019-08-15T13:56:03.000Z | 2021-06-10T18:43:16.000Z | tps/problems/forms/export.py | jonathanirvings/tps-web | 46519347d4fc8bdced9b5bceb6cdee5ea4e508f2 | [
"MIT"
] | 2 | 2018-12-28T13:12:59.000Z | 2020-12-25T18:42:13.000Z | from django import forms
from problems.models import ExportPackage
class ExportForm(forms.ModelForm):
class Meta:
model = ExportPackage
fields = ('exporter', 'export_format',)
def __init__(self, *args, **kwargs):
self.problem = kwargs.pop('problem')
self.revision = kwargs.pop('revision')
self.creator = kwargs.pop('user')
super(ExportForm, self).__init__(*args, **kwargs)
def save(self, **kwargs):
export_package = super(ExportForm, self).save(commit=False)
export_package.problem = self.problem
export_package.commit_id = self.revision.commit_id
export_package.creator = self.creator
export_package.save()
return export_package
| 30.958333 | 67 | 0.664872 | 83 | 743 | 5.746988 | 0.39759 | 0.163522 | 0.079665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.224764 | 743 | 23 | 68 | 32.304348 | 0.828125 | 0 | 0 | 0 | 0 | 0 | 0.053836 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4112f52a0cbfaf422eb8c4063d5420c3f79065c | 851 | py | Python | presenterServer.py | eliahvo/presenter-app | 9d5b90e15590316b1c769b94d1d88f62ea594f7e | [
"MIT"
] | null | null | null | presenterServer.py | eliahvo/presenter-app | 9d5b90e15590316b1c769b94d1d88f62ea594f7e | [
"MIT"
] | null | null | null | presenterServer.py | eliahvo/presenter-app | 9d5b90e15590316b1c769b94d1d88f62ea594f7e | [
"MIT"
] | null | null | null | import socket
import sys
from pynput.keyboard import Key, Controller
import time
TCP_IP = sys.argv[1]
TCP_PORT = 5005
BUFFER_SIZE = 20 # Normally 1024, but we want fast response
keyboard = Controller()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((TCP_IP, TCP_PORT))
s.listen(1)
print("Server: ", TCP_IP)
while 1:
try:
print("Waiting...")
conn, addr = s.accept()
print('Connection address: ', addr)
data = conn.recv(BUFFER_SIZE)
if not data: break
print("received data:", data,)
if data == b'forward':
keyboard.press(Key.right)
keyboard.release(Key.right)
elif data == b'backward':
keyboard.press(Key.left)
keyboard.release(Key.left)
except KeyboardInterrupt:
if conn:
conn.close()
print("W: interrupt received, stopping…")
break
finally:
# clean up
conn.close()
#conn.close() | 19.790698 | 60 | 0.690952 | 124 | 851 | 4.693548 | 0.532258 | 0.025773 | 0.054983 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018519 | 0.175088 | 851 | 43 | 61 | 19.790698 | 0.806268 | 0.07168 | 0 | 0.060606 | 0 | 0 | 0.125794 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.121212 | 0 | 0.121212 | 0.151515 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4137fb795db6c1885430d3738fc61716e37a474 | 3,226 | py | Python | newserver/server.py | packedbread/hack.moscow | d00aeabd5b46d1c95990e181cb08895e9a4e2ff7 | [
"MIT"
] | null | null | null | newserver/server.py | packedbread/hack.moscow | d00aeabd5b46d1c95990e181cb08895e9a4e2ff7 | [
"MIT"
] | 2 | 2021-03-09T22:13:16.000Z | 2021-05-10T18:42:36.000Z | newserver/server.py | packedbread/hack.moscow | d00aeabd5b46d1c95990e181cb08895e9a4e2ff7 | [
"MIT"
] | null | null | null | from concurrent.futures import ProcessPoolExecutor
from storage import ClientStorage
from aiohttp import web, hdrs
import aiofiles
import tempfile
import asyncio
import logging
import shutil
import os
MAX_FILE_SIZE = 20 * 1024 * 1024
PROCESS_POOL_SIZE = 8
STATIC_PATH = '../client/dist'
TEMP_PATH = 'temp'
logger = logging.getLogger('Server')
logging.basicConfig(
level=logging.CRITICAL,
format='[%(levelname)s] %(name)s: %(message)s',
)
clients = ClientStorage
clients.loop = loop = asyncio.get_event_loop()
clients.pool = pool = ProcessPoolExecutor(PROCESS_POOL_SIZE)
app = web.Application()
routes = web.RouteTableDef()
# Index page
@routes.get('/')
async def index(_):
return web.FileResponse(STATIC_PATH + '/index.html')
# Upload multiple files
@routes.post('/upload')
@routes.route('OPTIONS', '/upload')
async def upload(request: web.Request):
if request.method == 'OPTIONS':
return web.Response(status=200)
reader = await request.multipart()
tempdir = tempfile.mkdtemp(dir=TEMP_PATH)
files = []
while True:
field = await reader.next()
if field is None: break
if field.name != 'files[]': continue
size = 0
path = os.path.join(tempdir, str(len(files)))
file = await aiofiles.open(path, mode='wb')
while True:
if size > MAX_FILE_SIZE:
shutil.rmtree(tempdir, ignore_errors=True)
raise web.Response(status=403, text='Too large file')
chunk = await field.read_chunk()
if not chunk: break
size += len(chunk)
await file.write(chunk)
await file.flush()
await file.close()
files.append(path)
if not files: return web.Response(status=400, text='No files')
client = ClientStorage()
future = client.handle_upload(files)
asyncio.ensure_future(future, loop=loop)
logging.critical('New client storage: ' + client.uid)
return web.Response(status=200, text=client.uid)
@routes.post('/next')
async def get_next(request):
try: data = await request.json()
except: data = {'time': 0}
if not clients.clients: return web.HTTPNotFound()
# Could get client by uid
client = next(iter(clients.clients.values()))
result = {
'status': client.status,
'ready': client.status == 'ready',
}
if client.status == 'ready':
from_, to = client.next_jump(data['time'])
result['from'], result['to'] = from_, to
return web.json_response(data=result)
# Disable CORS globally
@app.on_response_prepare.append
async def on_prepare(_, response):
response.headers[hdrs.ACCESS_CONTROL_ALLOW_ORIGIN] = '*'
response.headers[hdrs.ACCESS_CONTROL_ALLOW_METHODS] = 'OPTIONS, GET, POST'
response.headers[hdrs.ACCESS_CONTROL_ALLOW_HEADERS] = (
'Content-Type, Access-Control-Allow-Headers, Authorization, X-Requested-With'
)
if __name__ == '__main__':
# Cleanup temp dir
shutil.rmtree(TEMP_PATH, ignore_errors=True)
os.mkdir(TEMP_PATH)
# Serve static & register routes
routes.static('/', STATIC_PATH)
app.add_routes(routes)
# Start
port = os.getenv('PORT', 5000)
web.run_app(app, port=port)
| 27.810345 | 85 | 0.6646 | 404 | 3,226 | 5.180693 | 0.376238 | 0.0258 | 0.032489 | 0.032967 | 0.077879 | 0.053034 | 0 | 0 | 0 | 0 | 0 | 0.011399 | 0.211407 | 3,226 | 115 | 86 | 28.052174 | 0.811321 | 0.040918 | 0 | 0.023256 | 0 | 0 | 0.096889 | 0.009397 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.104651 | 0 | 0.151163 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d414c13a693261e067977fffbcfd18adb9931eb1 | 881 | py | Python | src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/miniesptool_esp32nina.py | mbaaba/solar_panel | 42059d8c61320494ad1298065dbc50cd9b3bd51e | [
"MIT"
] | 1 | 2020-04-13T16:10:53.000Z | 2020-04-13T16:10:53.000Z | infra/libs-400rc2-20190512/examples/miniesptool_esp32nina.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | infra/libs-400rc2-20190512/examples/miniesptool_esp32nina.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | import time
import board
import busio
from digitalio import DigitalInOut, Direction # pylint: disable=unused-import
import adafruit_miniesptool
print("ESP32 Nina-FW")
uart = busio.UART(board.TX, board.RX, baudrate=115200, timeout=1)
resetpin = DigitalInOut(board.D5)
gpio0pin = DigitalInOut(board.D6)
esptool = adafruit_miniesptool.miniesptool(uart, gpio0pin, resetpin,
flashsize=4*1024*1024)
esptool.sync()
print("Synced")
print("Found:", esptool.chip_name)
if esptool.chip_name != "ESP32":
raise RuntimeError("This example is for ESP32 only")
esptool.baudrate = 912600
print("MAC ADDR: ", [hex(i) for i in esptool.mac_addr])
# Note: Make sure to use the LATEST nina-fw binary release!
esptool.flash_file("NINA_W102-1.3.1.bin",0x0,'3f9d2765dd3b7b1eab61e1eccae73e44')
esptool.reset()
time.sleep(0.5)
| 30.37931 | 81 | 0.708286 | 116 | 881 | 5.318966 | 0.612069 | 0.061588 | 0.048622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080111 | 0.178207 | 881 | 28 | 82 | 31.464286 | 0.772099 | 0.098751 | 0 | 0 | 0 | 0 | 0.158585 | 0.04194 | 0 | 0 | 0.003932 | 0 | 0 | 1 | 0 | false | 0 | 0.238095 | 0 | 0.238095 | 0.190476 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d418716dae9acd4334a9f34e0f2074dd64298ebb | 451 | py | Python | sqlalchemy/test_delete.py | WebSofter/lessnor | f5843a29f443126d30955a2fe4e7f3cadb216ad8 | [
"MIT"
] | null | null | null | sqlalchemy/test_delete.py | WebSofter/lessnor | f5843a29f443126d30955a2fe4e7f3cadb216ad8 | [
"MIT"
] | null | null | null | sqlalchemy/test_delete.py | WebSofter/lessnor | f5843a29f443126d30955a2fe4e7f3cadb216ad8 | [
"MIT"
] | null | null | null | #Import our configuration functions
from models import Film
from config import get_session
#Get instances for working with DB
session = get_session()
#***********************Working with db********************
#Update spcify row v1
film = session.query(Film).filter(Film.id == 3).one()
session.delete(film)
session.commit()
#Update spcify row v2
films = Film.__table__.delete().where(Film.id.in_([2, 4, 5]))
session.execute(films)
session.commit() | 25.055556 | 61 | 0.687361 | 63 | 451 | 4.809524 | 0.555556 | 0.066007 | 0.085809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014925 | 0.108647 | 451 | 18 | 62 | 25.055556 | 0.738806 | 0.365854 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d41946ca90fe6073895898eb60b2f62a025bcdcd | 1,045 | py | Python | kakakucom-scrape/recover.py | GINK03/itmedia-scraping | 5afbe06dd0aa12db1694a2b387aa2eeafb20e981 | [
"MIT"
] | 16 | 2018-02-06T14:43:41.000Z | 2021-01-23T05:07:33.000Z | kakakucom-scrape/recover.py | GINK03/itmedia-scraping | 5afbe06dd0aa12db1694a2b387aa2eeafb20e981 | [
"MIT"
] | null | null | null | kakakucom-scrape/recover.py | GINK03/itmedia-scraping | 5afbe06dd0aa12db1694a2b387aa2eeafb20e981 | [
"MIT"
] | 4 | 2018-01-16T13:50:43.000Z | 2019-12-16T19:45:54.000Z | import glob
import json
import pickle
import gzip
import os
import hashlib
import re
import bs4
import concurrent.futures
names = set([name.split('/').pop() for name in glob.glob('hrefs/*')])
size = len(names)
def _map(arg):
urls = set()
index, size, name = arg
print(index, '/', size, name)
try:
html = gzip.decompress(open(f'htmls/{name}', 'rb').read()).decode()
except Exception as ex:
return []
soup = bs4.BeautifulSoup(html)
for a in soup.find_all('a', href=True):
href = a.get('href')
href = re.sub(r'\?.*?', '', href)
href = '/'.join(filter(lambda x:'='not in x, href.split('/')))
#print(href)
if 'https://book.dmm.com' in href:
urls.add(href)
return urls
urls = set()
args = [(index, size, name) for index, name in enumerate(names)]
_map(args[0])
with concurrent.futures.ProcessPoolExecutor(max_workers=16) as exe:
for _urls in exe.map(_map, args):
for url in _urls:
urls.add(url)
print(urls)
open('urls.pkl.gz', 'wb').write(gzip.compress(pickle.dumps(urls)))
| 20.9 | 71 | 0.639234 | 159 | 1,045 | 4.157233 | 0.490566 | 0.040847 | 0.059002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005862 | 0.183732 | 1,045 | 49 | 72 | 21.326531 | 0.76905 | 0.010526 | 0 | 0.055556 | 0 | 0 | 0.066796 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0 | 0.25 | 0 | 0.333333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d41ba959656db47ebcf8cbb11038ffe513c678ca | 7,339 | py | Python | scrounger/core/session.py | NORD-Function/IOS-tools | dd393666aa1ba1117d1c472cfdef4d0b18216904 | [
"BSD-3-Clause"
] | 217 | 2018-08-23T12:00:45.000Z | 2022-01-20T12:09:09.000Z | scrounger/core/session.py | Warlockk/scrounger | dd393666aa1ba1117d1c472cfdef4d0b18216904 | [
"BSD-3-Clause"
] | 18 | 2018-08-24T09:28:54.000Z | 2020-05-19T04:49:54.000Z | scrounger/core/session.py | Warlockk/scrounger | dd393666aa1ba1117d1c472cfdef4d0b18216904 | [
"BSD-3-Clause"
] | 48 | 2018-08-23T12:51:45.000Z | 2022-01-20T12:09:02.000Z | """
Module that holds all information of a session of scrounger
"""
# custom module imports
from sys import path as _path
# config imports
from scrounger.utils.config import _SCROUNGER_HOME
class Session(object):
_name = ""
_rows, _columns = 128, 80
options = {}
global_options = {}
devices = {}
results = {}
exceptions = [] # unused
_available_modules = None
_module_instance = None
_current_module = None
_module_class = None
prompt = None
def __init__(self, name):
from os import popen, path
# helper functions
from scrounger.utils.general import execute
# used to find the available modules
import scrounger.modules
self._name = name
self._rows, self._columns = popen('stty size', 'r').read().split()
self._rows, self._columns = int(self._rows), int(self._columns)
if self._columns < 128: self._columns = 128
# need to add / to then replace it
modules_path = "{}/".format(scrounger.modules.__path__[0])
modules = execute("find {} -name '*.py'".format(modules_path))
self._available_modules = [
module.replace(modules_path, "").replace(".py", "")
for module in modules.split("\n")
if module and "__" not in module
]
# add custom modules
modules_path = "{}/modules/".format(_SCROUNGER_HOME)
modules = execute("find {} -name \"*.py\"".format(modules_path))
# add path to sys.path
_path.append(modules_path)
self._available_modules += [
module.replace(modules_path, "").replace(".py", "")
for module in modules.split("\n")
if module and "__" not in module
]
# fix for macos
self._available_modules = [
module[1:] if module.startswith("/") else module
for module in sorted(self._available_modules)
]
# public vars to be used by calling modules
self.options = {}
self.global_options = {
"debug": "False",
"device": "",
"output": "",
"verbose": "False"
}
self.devices = {}
self.results = {}
self.exceptions = [] # unused
self.prompt = None
# initialize private vars
self._module_instance = None
self._current_module = None
self._module_class = None
def modules(self):
"""
Returns the available modules
:return: returns a list with the available modules
"""
return self._available_modules
def back(self):
"""Returns to the main state"""
self._module_instance = None
self._current_module = None
self._module_class = None
def use(self, module):
self._current_module = module
if module.startswith("custom/"):
self._module_class = __import__("{}".format(
module.replace("/", ".")), fromlist=["Module"])
else:
self._module_class = __import__("scrounger.modules.{}".format(
module.replace("/", ".")), fromlist=["Module"])
if not hasattr(self._module_class, "Module"):
self._current_module = None
self._module_class = None
raise Exception("Missing `Module` class")
self._module_instance = self._module_class.Module()
if not hasattr(self._module_class.Module, "meta") or not hasattr(
self._module_instance, "options"):
self._module_instance = None
self._current_module = None
self._module_class = None
raise Exception("Missing required variables")
def module_options(self):
"""
Returns the options dict for the current module or None if no module
is active
:return: a dict with the required options
"""
if self._module_instance:
return self._module_instance.options
return None
def module(self):
"""
Returns the current active module or None if no module is active
:return: a str with the current module
"""
return self._current_module
def instance(self):
"""
Returns an instance with the current active module or None if no module
is active
:return: an object representing an inatance of the current active module
"""
return self._module_instance
def name(self):
"""
Returns the name of a session
:return: a str with the session name
"""
return self._name
def to_dict(self):
"""
Returns a dict representing the current sesssion
:return: a dict representing the session
"""
return {
"name": self._name,
"devices": [
{
"id": self.devices[device]["device"].device_id(),
"type": self.devices[device]["type"],
"no": device
} for device in self.devices
],
"results": self.results, # TODO: if object, need to reproduce it
"global": self.global_options,
"options": self.options,
"current": self._current_module,
"prompt": self.prompt
}
def __str__(self):
return "Session {}".format(self.name())
def load_sessions(filename):
"""
Loads a list of sessions from a file
:param str filename: the file path to load the sessions from
:return: a list of Session objects
"""
from scrounger.core.device import IOSDevice, AndroidDevice
from scrounger.utils.general import file_exists
from json import loads
if not file_exists(filename):
return []
with open(filename, "r") as fp:
content = fp.read()
sessions = []
try:
json_sessions = loads(content)
except Exception as e:
# error loading sessions files
return []
for json_session in json_sessions["sessions"]:
session = Session(json_session["name"])
for json_device in json_session["devices"]:
if json_device["type"] == "ios":
device = IOSDevice(json_device["id"])
else:
device = AndroidDevice(json_device["id"])
session.devices[json_device["no"]] = {
"device": device,
"type": json_device["type"]
}
session.results = json_session["results"]
session.global_options = json_session["global"]
session.options = json_session["options"]
if json_session["current"]:
session.use(json_session["current"])
session.prompt = json_session["prompt"]
sessions += [session]
return sessions
def save_sessions(sessions, filename):
"""
Saves a list of session into a file
:param list sessions: a list of Session objects
:param str filename: the filepath to save the sessions to
:return: nothing
"""
from json import dumps
with open(filename, "w") as fp:
fp.write(dumps(
{"sessions": [session.to_dict() for session in sessions]}
))
| 28.55642 | 80 | 0.571331 | 797 | 7,339 | 5.077792 | 0.188206 | 0.044477 | 0.033358 | 0.020756 | 0.241166 | 0.193971 | 0.193971 | 0.193971 | 0.157153 | 0.157153 | 0 | 0.002632 | 0.326884 | 7,339 | 256 | 81 | 28.667969 | 0.816599 | 0.180951 | 0 | 0.170068 | 0 | 0 | 0.067341 | 0 | 0 | 0 | 0 | 0.003906 | 0 | 1 | 0.081633 | false | 0 | 0.07483 | 0.006803 | 0.312925 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d41d1b57c833f3f45020982a1362ffc5bb62328a | 8,703 | py | Python | deepmath/treegen/cnf_model_test.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 830 | 2016-11-07T21:46:27.000Z | 2022-03-23T08:01:03.000Z | deepmath/treegen/cnf_model_test.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 26 | 2016-11-07T22:06:31.000Z | 2022-02-16T00:18:29.000Z | deepmath/treegen/cnf_model_test.py | LaudateCorpus1/deepmath | b5b721f54de1d5d6a02d78f5da5995237f9995f9 | [
"Apache-2.0"
] | 168 | 2016-11-07T21:48:55.000Z | 2022-03-19T02:47:14.000Z | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for treegen.cnf_model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tensorflow as tf
from deepmath.treegen import cnf_model
from deepmath.treegen import cnf_model_test_lib
flags = tf.flags
FLAGS = flags.FLAGS
class CnfModelTest(tf.test.TestCase):
# From l102_finseq_1, in test0.jsonl
# v7_ordinal1(X1) | ~m1_subset_1(X1, k4_ordinal1())
tiny_expr = json.loads(
'''{"clauses": [{"positive": true, "params": [{"var": "X1"}], "pred":
"v7_ordinal1"}, {"positive": false, "params": [{"var": "X1"}, {"params":
[], "func": "k4_ordinal1"}], "pred": "m1_subset_1"}]}''')
# From l102_modelc_2, in test0.jsonl
huge_expr = json.loads(
'''{"clauses": [{"positive": true, "params": [{"params": [], "func":
"esk4_0"}, {"var": "X1"}, {"var": "X1"}, {"var": "X1"}], "pred":
"r5_modelc_2"}, {"positive": false, "equal": [{"params": [{"var": "X1"},
{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}], "func": "k1_funct_1"},
{"params": [{"var": "X1"}, {"params": [{"params": [], "func": "esk4_0"},
{"var": "X1"}, {"var": "X2"}, {"var": "X3"}], "func": "esk2_4"}], "func":
"k1_funct_1"}]}, {"positive": false, "params": [{"var": "X1"}, {"params":
[{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var": "X2"}, {"var":
"X3"}], "func": "esk2_4"}, {"params": [], "func": "esk4_0"}], "pred":
"epred2_3"}, {"positive": false, "params": [{"var": "X1"}], "pred":
"v7_ordinal1"}, {"positive": false, "params": [{"params": [{"params": [],
"func": "esk4_0"}, {"var": "X1"}, {"var": "X2"}, {"var": "X3"}], "func":
"esk2_4"}], "pred": "v3_modelc_2"}, {"positive": false, "params":
[{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}, {"params": [], "func":
"k5_numbers"}], "pred": "m2_finseq_1"}, {"positive": false, "params":
[{"params": [{"params": [], "func": "esk4_0"}, {"var": "X1"}, {"var":
"X2"}, {"var": "X3"}], "func": "esk2_4"}], "pred": "v1_modelc_2"},
{"positive": false, "params": [{"params": [], "func": "esk4_0"},
{"params": [], "func": "esk5_0"}, {"var": "X1"}], "pred": "r4_modelc_2"},
{"positive": false, "params": [{"var": "X1"}, {"params": [{"params":
[{"params": [], "func": "k9_modelc_2"}, {"params": [{"params": [], "func":
"esk4_0"}], "func": "u1_struct_0"}], "func": "k2_zfmisc_1"}], "func":
"k1_zfmisc_1"}], "pred": "m1_subset_1"}, {"positive": false, "params":
[{"var": "X1"}, {"params": [{"params": [{"params": [], "func":
"k15_modelc_2"}, {"params": [{"params": [], "func": "esk4_0"}], "func":
"u1_modelc_2"}], "func": "k2_zfmisc_1"}], "func": "k1_zfmisc_1"}], "pred":
"m1_subset_1"}, {"positive": false, "params": [{"var": "X1"}, {"params":
[], "func": "k9_modelc_2"}, {"params": [{"params": [], "func": "esk4_0"}],
"func": "u1_struct_0"}], "pred": "v1_funct_2"}, {"positive": false,
"params": [{"var": "X1"}, {"params": [], "func": "k15_modelc_2"},
{"params": [{"params": [], "func": "esk4_0"}], "func": "u1_modelc_2"}],
"pred": "v1_funct_2"}, {"positive": false, "params": [{"var": "X1"}],
"pred": "v1_funct_1"}, {"positive": false, "params": [{"var": "X1"}],
"pred": "v1_funct_1"}]}''')
def testSeqModelMemorizesTinyExpr(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=200,
extra_hparams='depth=1',
model_class=cnf_model.CNFSequenceModel)
def testSeqModelMemorizesTinyExprMaskedXent(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=200,
extra_hparams='depth=1,masked_xent=true',
model_class=cnf_model.CNFSequenceModel)
def testSeqModelWorksWithTinyHugeExpr(self):
cnf_model_test_lib.test_memorization(
self, [self.tiny_expr, self.huge_expr],
num_iterations=1,
model_class=cnf_model.CNFSequenceModel)
def testSeqModelWorksWithTinyHugeExprMaskedXent(self):
cnf_model_test_lib.test_memorization(
self, [self.tiny_expr, self.huge_expr],
num_iterations=1,
extra_hparams='masked_xent=true',
model_class=cnf_model.CNFSequenceModel)
def testTreeModelMemorizesTinyExprStdFixedZ(self):
cnf_model_test_lib.test_memorization(self, self.tiny_expr)
def testTreeModelMemorizesTinyExprStdVae(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprStdIwaeMcSamples2(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='objective=iwae,min_kl_weight=1,mc_samples=2')
def testTreeModelMemorizesTinyExprStdVaeMix(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='objective=vae_mix,batch_size=3',
num_iterations=150)
def testTreeModelMemorizesTinyExprAuxLstmFixedZ(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[aux_lstm]')
def testTreeModelMemorizesTinyExprUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[uncond_sib]')
def testTreeModelMemorizesTinyExprGatedSigmoidFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_type=sigmoid')
def testTreeModelMemorizesTinyExprGatedSoftmaxFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_type=softmax')
def testTreeModelMemorizesTinyExprGatedTiedFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated],gate_tied=true')
def testTreeModelMemorizesTinyExprAuxLstmUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,uncond_sib]')
def testTreeModelMemorizesTinyExprAuxLstmVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=80,
extra_hparams='model_variants=[aux_lstm],objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprAuxLstmGatedUncondSibFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,gated,uncond_sib]')
def testTreeModelMemorizesTinyExprGatedUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self, self.tiny_expr, extra_hparams='model_variants=[gated,uncond_sib]')
def testTreeModelMemorizesTinyExprAuxLstmGatedLayerNormUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[aux_lstm,gated,layer_norm,uncond_sib]')
def testTreeModelMemorizesTinyExprGatedLayerNormUncondSibVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated,layer_norm,uncond_sib]')
def testTreeModelWorksWithTinyExprTanhMostVariationssVae(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
num_iterations=1,
extra_hparams='model_variants=[gated,layer_norm,rev_read,uncond_sib],'
'act_fn=tanh,objective=vae,min_kl_weight=1')
def testTreeModelMemorizesTinyExprMostVariationsDeepFixedZ(self):
cnf_model_test_lib.test_memorization(
self,
self.tiny_expr,
extra_hparams='model_variants=[gated,layer_norm,rev_read,uncond_sib],'
'highway_layers=5,op_hidden=1')
if __name__ == '__main__':
tf.test.main()
| 42.871921 | 80 | 0.649087 | 1,005 | 8,703 | 5.316418 | 0.20597 | 0.041924 | 0.04941 | 0.061763 | 0.618566 | 0.615759 | 0.579075 | 0.537339 | 0.529478 | 0.467153 | 0 | 0.025963 | 0.167988 | 8,703 | 202 | 81 | 43.084158 | 0.711918 | 0.091693 | 0 | 0.5 | 0 | 0 | 0.153846 | 0.14762 | 0 | 0 | 0 | 0 | 0 | 1 | 0.177966 | false | 0 | 0.059322 | 0 | 0.262712 | 0.008475 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d41dac3657c44fb744404a9ec259d063e17765ee | 3,905 | py | Python | codes/utils.py | k1101jh/Alpha-Zero | f2a83f430f186ad0633e38baa31c2a042e1305b6 | [
"Apache-2.0"
] | 1 | 2021-11-12T13:36:47.000Z | 2021-11-12T13:36:47.000Z | codes/utils.py | k1101jh/Alpha-Zero | f2a83f430f186ad0633e38baa31c2a042e1305b6 | [
"Apache-2.0"
] | null | null | null | codes/utils.py | k1101jh/Alpha-Zero | f2a83f430f186ad0633e38baa31c2a042e1305b6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import io
import importlib
import math
from typing import Dict, Iterable, List, Optional, Tuple
from games.game_types import Move, Player
from games.game_types import Point
from games.game_types import game_name_dict
from games.game_types import game_state_dict
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding='utf-8')
COLS = 'ABCDEFGHJKLMNOPQRSTUVWXYZ'
EMPTY = 0
STONE_TO_CHAR = {
EMPTY: '━╋━',
Player.black.value: ' ○ ',
Player.white.value: ' ● ',
}
def get_rule_constructor(game_name: str, rule_name: str):
module = importlib.import_module(f'games.{game_name_dict[game_name]}.rule')
constructor = getattr(module, rule_name)
return constructor
def get_game_state_constructor(name: str):
module = importlib.import_module(f'games.{game_name_dict[name]}.{game_name_dict[name]}_game_state')
constructor = getattr(module, game_state_dict[name])
return constructor
def print_turn(game_state) -> None:
print(f'{game_state.player.name} turn!')
sys.stdout.flush()
def print_move(player_move: Move) -> None:
if player_move is not None:
player = player_move[0]
move = player_move[1]
if move.is_pass:
move_str = 'passes'
else:
move_str = '%s%d' % (COLS[move.point.col], move.point.row + 1)
print('%s %s' % (player, move_str))
sys.stdout.flush()
def print_board(board) -> None:
board_size = board.get_board_size()
for row in range(board_size - 1, -1, -1):
bump = " " if row <= board_size else ""
line = []
for col in range(0, board_size):
stone = board.get(Point(row=row, col=col))
line.append(STONE_TO_CHAR[stone])
print('%s%2d %s' % (bump, row + 1, ''.join(line)))
print(' ' + ' '.join(COLS[:board_size]))
sys.stdout.flush()
def print_visit_count(visit_counts: Optional[Iterable[int]]) -> None:
if visit_counts is not None:
board_size = int(math.sqrt(len(visit_counts)))
for row in range(board_size - 1, -1, -1):
bump = " " if row <= board_size else ""
print('\n%s%2d' % (bump, row + 1), end='')
for col in range(0, board_size):
visit_count = visit_counts[row * board_size + col]
print('%4d ' % (visit_count), end='')
print('')
print(' ' + ' '.join(COLS[:board_size]))
sys.stdout.flush()
def print_winner(winner: Player) -> None:
if winner is Player.both:
print("DRAW!!!")
else:
print(winner.name, "WINS!!!")
sys.stdout.flush()
def point_from_coords(coords: Tuple[int, int]) -> Point:
col = COLS.index(coords[0])
row = int(coords[1:]) - 1
return Point(row=row, col=col)
def is_on_grid(point: Point, board_size: int) -> bool:
"""[summary]
check point is on grid
Args:
point (Point): [description]
board_size (int): Size of board.
Returns:
bool: Is point on board.
"""
return 0 <= point.row < board_size and 0 <= point.col < board_size
def get_agent_filename(game_name: str, version: int, postfix: str = "", prefix: str = "") -> str:
cur_file_path = os.path.abspath(__file__)
project_path = os.path.dirname(os.path.dirname(cur_file_path))
dir_path = os.path.join(project_path, f'trained_models/{game_name}')
file_name = f'{postfix}-v{version}{prefix}.pth'
os.makedirs(dir_path, exist_ok=True)
return os.path.join(dir_path, file_name)
def copy_list(input_list: List) -> List:
ret = input_list.copy()
for idx, item in enumerate(ret):
ret[idx] = item
return ret
def copy_dict(input_dict: Dict) -> Dict:
ret = input_dict.copy()
for key, value in ret.items():
ret[key] = value
return ret
| 29.583333 | 103 | 0.628169 | 556 | 3,905 | 4.235612 | 0.232014 | 0.061147 | 0.029724 | 0.036093 | 0.22845 | 0.164756 | 0.140977 | 0.121444 | 0.121444 | 0.121444 | 0 | 0.008317 | 0.230218 | 3,905 | 131 | 104 | 29.80916 | 0.773453 | 0.044558 | 0 | 0.206522 | 0 | 0 | 0.080898 | 0.056006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0.021739 | 0.130435 | 0 | 0.336957 | 0.163043 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d41fccb7523d2734e1207ea00040f62a02a41e0c | 1,545 | py | Python | obdlib/uart.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | 9 | 2015-07-14T07:15:58.000Z | 2021-06-03T01:42:19.000Z | obdlib/uart.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | null | null | null | obdlib/uart.py | s-s-boika/obdlib | 5b0b35276575a522d20858b6993a9bebf0acc968 | [
"MIT"
] | 4 | 2015-07-15T09:05:46.000Z | 2022-02-06T04:28:53.000Z | DEFAULT_BAUDRATE = 38400
import sys
if (hasattr(sys, 'implementation') and
sys.implementation.name == 'micropython'):
# if using pyBoard
from pyb import UART as uart_base
else:
from serial import Serial as uart_base
from obdlib.logging import logger
class UART(object):
def __init__(self):
self.bus_name = uart_base.__name__
self.bus = None
self.map = {}
def connection(self, port, baudrate=DEFAULT_BAUDRATE):
try:
self.bus = uart_base(port, baudrate)
self._mapping()
except Exception as err:
# logging exception
logger.error(err)
return None
return self
def __getattr__(self, item):
def args_wrapper(*args, **kwargs):
try:
response = getattr(self.bus, item)(*args, **kwargs)
except AttributeError:
response = self._invoke_mapping(item, *args, **kwargs)
return response
return args_wrapper
def _invoke_mapping(self, method, *args, **kwargs):
try:
item = self.map[self.bus_name][method]
return getattr(self.bus, item)(*args, **kwargs) if item else None
except KeyError:
raise Exception(
"Unregistered method or attribute {}".format(method))
def _mapping(self):
self.map = {
"UART": {
"close": "deinit",
"flushInput": "",
"flushOutput": ""
},
}
| 27.105263 | 77 | 0.55534 | 160 | 1,545 | 5.1875 | 0.36875 | 0.050602 | 0.050602 | 0.043373 | 0.06747 | 0.06747 | 0 | 0 | 0 | 0 | 0 | 0.004946 | 0.345631 | 1,545 | 56 | 78 | 27.589286 | 0.816024 | 0.022006 | 0 | 0.068182 | 0 | 0 | 0.06366 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d423ccff923c193b1018a53c447911773c46989d | 1,352 | py | Python | 4-gen_diffs.py | lcmd-epfl/Local_Kernel_Regression | 5be1b01fb347a8b8473ffe62a2f2d1fe2c1c4157 | [
"MIT"
] | null | null | null | 4-gen_diffs.py | lcmd-epfl/Local_Kernel_Regression | 5be1b01fb347a8b8473ffe62a2f2d1fe2c1c4157 | [
"MIT"
] | null | null | null | 4-gen_diffs.py | lcmd-epfl/Local_Kernel_Regression | 5be1b01fb347a8b8473ffe62a2f2d1fe2c1c4157 | [
"MIT"
] | null | null | null | import numpy as np
import time
import sklearn as sk
from sklearn import metrics
import gc
import sys
import pickle
init_t = time.time()
at = sys.argv[1]
ref_envs = np.load('./data/train_envs.npy',
allow_pickle=True).item()[at]
reps_dict = np.load('./data/red_reps_dict.npy', allow_pickle=True)
attypes = np.load('./data/attypes.npy', allow_pickle=True)
gc.collect()
t1 = time.time()
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write('Reps loaded, time: {} \n'.format(time.time() - t1))
atom_projections = []
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write('Starting train products at {} \n'.format(time.time() - init_t))
atom_diffs = []
t1 = time.time()
for i in range(len(reps_dict[:])):
repd = reps_dict[i]
rep_at_envs = repd[at]
if len(rep_at_envs) > 0:
atom_diff = sk.metrics.pairwise_distances(
rep_at_envs, ref_envs, n_jobs=-1).T
else:
atom_diff = []
atom_diffs.append(atom_diff)
if i % 100 == 0:
with open('progress_gen__diffs_{}.txt'.format(at), 'a') as file:
file.write(' Train mol {}, cost: {} \n'.format(
i, time.time() - t1))
t1 = time.time()
with open('./euclideans/{}_diffs.npy'.format(at), "wb") as fp:
pickle.dump(atom_diffs, fp)
| 22.163934 | 79 | 0.623521 | 207 | 1,352 | 3.874396 | 0.338164 | 0.069825 | 0.037406 | 0.067332 | 0.225686 | 0.190773 | 0.190773 | 0.190773 | 0.190773 | 0.190773 | 0 | 0.011246 | 0.210799 | 1,352 | 60 | 80 | 22.533333 | 0.740394 | 0 | 0 | 0.157895 | 0 | 0 | 0.189349 | 0.109467 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.184211 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d426fe625d6ef6fff91e3cb4328137b87944afc4 | 6,540 | py | Python | crypto.py | aivalja/Crypto-Scrooge | 98fb08f3eb1008de66e02b30a70f6e772c095a48 | [
"MIT"
] | null | null | null | crypto.py | aivalja/Crypto-Scrooge | 98fb08f3eb1008de66e02b30a70f6e772c095a48 | [
"MIT"
] | null | null | null | crypto.py | aivalja/Crypto-Scrooge | 98fb08f3eb1008de66e02b30a70f6e772c095a48 | [
"MIT"
] | null | null | null | from json import loads
import argparse
from urllib.request import urlopen
from pprint import pprint
from datetime import datetime
def main():
parser = argparse.ArgumentParser()
parser.add_argument("start_date", help="Start date in format DD.MM.YYYY",
type=lambda s: datetime.strptime(s, '%d.%m.%Y'))
parser.add_argument("end_date", help="End date in format DD.MM.YYYY",
type=lambda s: datetime.strptime(s, '%d.%m.%Y'))
args = parser.parse_args()
start_date = args.start_date
end_date = args.end_date
if(start_date > end_date):
parser.error("start_date must be before end_date")
# Can be changed to use different coins
coin = "bitcoin"
coin_history = get_price_history(start_date, end_date, coin)
price_history = parse_data_history(coin_history, start_date, end_date, "prices")
longest_bear = get_longest_bear(price_history)
volume_history = parse_data_history(coin_history, start_date, end_date, "total_volumes")
optimal_investment_dates = get_optimal_investment(price_history)
highest_volume = get_highest_volume(volume_history)
print(f"Longest bearish trend: {longest_bear[0]}")
print(
f"Highest volume: {highest_volume[0]} on "
f"{highest_volume[1].strftime('%d.%m.%Y')}"
)
if(optimal_investment_dates[0] == 0):
print(f"Optimal investment dates: do not invest in this period")
else:
print(
f"Optimal investment dates: buy on "
f"{optimal_investment_dates[0].strftime('%d.%m.%Y')} and sell on "
f"{optimal_investment_dates[1].strftime('%d.%m.%Y')}"
)
def get_highest_volume(volume_history):
"""Returns the date with highest trading volume.
Return is format: [highest volume, date with highest volume].
"""
highest_volume = 0
highest_volume_timestamp = 0
for timestamp in volume_history:
if(volume_history[timestamp] > highest_volume):
highest_volume = volume_history[timestamp]
highest_volume_timestamp = timestamp
return([highest_volume,
datetime.fromtimestamp(highest_volume_timestamp / 1000)])
def get_optimal_investment(price_history):
"""Returns optimal dates to buy and sell given coin"""
sorted_history = sorted(price_history, key=price_history.get)
largest_profit = 0
largest_profit_dates = [0,0]
complete = False
current_end_price = 0
current_start_price = 0
current_time = 0
current_iteration = 0
# Loop from largest price down
for current_end_time in reversed(sorted_history):
current_end_price = price_history[current_end_time]
if(current_end_price - price_history[sorted_history[0]]
< largest_profit):
# With current end price not possible to find better deal
break
# Loop from smallest price up
for current_start_time in sorted_history:
current_start_price = price_history[current_start_time]
if(current_end_price - current_start_price > largest_profit):
if(current_end_time > current_start_time):
# Found start price that is before end price, best deal
# with current end_price
largest_profit = current_end_price - current_start_price
largest_profit_dates = [
datetime.fromtimestamp(current_start_time / 1000),
datetime.fromtimestamp(current_end_time / 1000)
]
break
else:
# Largest possible profit is smaller than previously found
break
return(largest_profit_dates)
def get_longest_bear(price_history):
"""Get longest bear trend length and dates
Return is format: [longest bear length, start date, end date].
"""
previous_price = 0
longest_bear_start = 0
longest_bear_end = 0
longest_bear_length = 0
current_bear_start = 0
current_bear_length = 0
for timestamp in price_history:
if(price_history[timestamp] < previous_price):
current_bear_length += 1
else:
current_bear_start = timestamp
current_bear_length = 0
if(current_bear_length > longest_bear_length):
longest_bear_length = current_bear_length
longest_bear_start = current_bear_start
longest_bear_end = timestamp
previous_price = price_history[timestamp]
return([
longest_bear_length, datetime.fromtimestamp(longest_bear_start / 1000),
datetime.fromtimestamp(longest_bear_end / 1000)
])
def parse_data_history(data, start_date, end_date, data_type):
"""Parse price history to include only datapoints closest to midnight UTC."""
datapoints = {}
previous_value = data[data_type][0]
expected_timestamp = int(datetime.timestamp(start_date) * 1000)
end_timestamp = int(datetime.timestamp(end_date) * 1000)
for k in data[data_type]:
# Timestamp is correct
if(k[0] == expected_timestamp):
datapoints[k[0]] = k[1]
# Set expected timestamp to next day
expected_timestamp += 86400000
elif(k[0] > expected_timestamp):
# Current value was closer to midnight than previous
if(abs(k[0] - expected_timestamp)
< abs(previous_value[0] - expected_timestamp)):
datapoints[k[0]] = k[1]
#Previous value was closer to midnight
else:
datapoints[previous_value[0]] = previous_value[1]
expected_timestamp += 86400000
if(end_timestamp < expected_timestamp):
break
previous_value = k
return datapoints
def get_price_history(start_date, end_date, coin):
"""Get price history for given coin and date period."""
# 3600 added to end date in order to
# make sure end date midnight is also included
with urlopen(f"https://api.coingecko.com/api/v3/coins/{coin}"
f"/market_chart/range?vs_currency=eur&from="
f"{str(datetime.timestamp(start_date) - 3600)}&to="
f"{str(datetime.timestamp(end_date) + 3600)}") as response:
response_content = response.read()
response_content.decode('utf-8')
json_response = loads(response_content)
return json_response
if __name__ == '__main__':
main()
| 38.928571 | 92 | 0.647706 | 805 | 6,540 | 4.991304 | 0.201242 | 0.050772 | 0.023892 | 0.031857 | 0.260577 | 0.106023 | 0.106023 | 0.106023 | 0.050274 | 0.050274 | 0 | 0.019665 | 0.269113 | 6,540 | 167 | 93 | 39.161677 | 0.820921 | 0.136697 | 0 | 0.144 | 0 | 0 | 0.118595 | 0.044608 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048 | false | 0 | 0.04 | 0 | 0.104 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4276c833da8da729c76c47b1baa7513e58dc63c | 4,504 | py | Python | main.py | pechhenka/itmo_parser_table | 8cefcad00290539872759c6a4dc8050e03333a1f | [
"Unlicense"
] | null | null | null | main.py | pechhenka/itmo_parser_table | 8cefcad00290539872759c6a4dc8050e03333a1f | [
"Unlicense"
] | null | null | null | main.py | pechhenka/itmo_parser_table | 8cefcad00290539872759c6a4dc8050e03333a1f | [
"Unlicense"
] | null | null | null | import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = "https://abit.itmo.ru/bachelor/rating_rank/all/261/"
required_name = 'Шараев Павел Ильдарович'
def write_to_file(result):
global required_name
import xlsxwriter
with xlsxwriter.Workbook('result.xlsx') as workbook:
worksheet = workbook.add_worksheet('Таблица')
worksheet.write_row(0, 0, ['Номер', 'Номер в конкурсной группе', 'Условие поступления', '№ п/п',
'Номер заявления', 'ФИО', 'Вид', 'М', 'Р', 'И', 'ЕГЭ+ИД', 'ЕГЭ', 'ИД',
'Наличие согласия на зачисление', 'Преимущественное право', 'Олимпиада', 'Статус'])
data_format1 = workbook.add_format({'bg_color': '#16de69'})
gray = workbook.add_format({'bg_color': '#dbdbdb'})
white = workbook.add_format({'bg_color': '#ffffff'})
current_color = gray
last_color = white
last_cond = result[0][0]
j = 1
for i in range(len(result)):
if i > 0 and result[i][0] == last_cond:
j += 1
else:
j = 1
current_color, last_color = last_color, current_color
last_cond = result[i][0]
if required_name == result[i][3]:
worksheet.write_row(i + 1, 0, [i + 1, j] + result[i], data_format1)
else:
worksheet.write_row(i + 1, 0, [i + 1, j] + result[i], current_color)
def cmp_items(a, b):
def convert(l):
condtion_key = {'без вступительных испытаний': 4,
'на бюджетное место в пределах особой квоты': 3,
'на бюджетное место в пределах целевой квоты': 2,
'по общему конкурсу': 1,
'на контрактной основе': 0}
l[0] = condtion_key[l[0]]
l[8] = int(l[8] or 0)
l[11] = 1 if l[11] == 'Да' else 0
l[12] = 1 if l[12] == 'Да' else 0
return l
a = convert(a.copy());
b = convert(b.copy())
r = 0
if a[11] > b[11]:
r = -1 # Наличие согласия на зачисление
elif a[11] < b[11]:
r = 1
else:
if a[0] > b[0]:
r = -1 # Условие поступления (бви, контракт ...)
elif a[0] < b[0]:
r = 1
else:
if a[12] > b[12]:
r = -1 # Преимущественное право
elif a[12] > b[12]:
r = 1
else:
if a[8] > b[8]:
r = -1 # ЕГЭ+ИД
elif a[8] < b[8]:
r = 1
else:
r = 0
return r
last_condition = ''
def parse_row(row):
def to_int_possible(a):
try:
r = int(a)
except:
r = ''
return r
global last_condition
cells = row.find_all('td')
if len(cells) == 15:
last_condition = cells[0].getText()
cells = row.find_all('td', {'rowspan': None})
condition = last_condition
number_1 = int(cells[0].getText())
number_2 = int(cells[1].getText())
full_name = cells[2].getText()
mode = cells[3].getText()
m = to_int_possible(cells[4].getText())
r = to_int_possible(cells[5].getText())
i = to_int_possible(cells[6].getText())
exam_and_ia = to_int_possible(cells[7].getText())
exam = to_int_possible(cells[8].getText())
ia = to_int_possible(cells[9].getText())
agreement = cells[10].getText()
advantage = cells[11].getText()
olympiad = cells[12].getText()
status = cells[13].getText()
res = [condition, number_1, number_2, full_name,
mode, m, r, i,
exam_and_ia, exam, ia,
agreement, advantage, olympiad, status]
return res
def main():
print('Скачиваю страницу:', url)
import requests
r = requests.get(url, verify=False) # получаем страницу
print('Ищу таблицу')
from bs4 import BeautifulSoup
soup = BeautifulSoup(r.text, features='html.parser') # парсим таблицу
rows = soup.find_all('tr', {'class': None}) # получаем строки
print('Начинаю парсить таблицу')
result = []
for row in rows:
res = parse_row(row)
result.append(res)
print('Ранжирую таблицу')
from functools import cmp_to_key
result = sorted(result, key=cmp_to_key(cmp_items))
print('Вывожу таблицу в файл')
write_to_file(result)
print('Готово!')
if __name__ == '__main__':
main()
| 30.026667 | 118 | 0.538854 | 577 | 4,504 | 4.071057 | 0.317158 | 0.006811 | 0.03874 | 0.045977 | 0.140485 | 0.049383 | 0.025543 | 0.025543 | 0.025543 | 0.025543 | 0 | 0.036634 | 0.327265 | 4,504 | 149 | 119 | 30.228188 | 0.737954 | 0.033082 | 0 | 0.165289 | 0 | 0 | 0.133425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.049587 | false | 0 | 0.041322 | 0 | 0.123967 | 0.049587 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4284956c70586476c90480dd8b13432ac59b14e | 3,503 | py | Python | makeplotz.py | Munfred/scutils | 9c6b6ce078c0b2aeb7a9f801933cf767197f4efd | [
"Unlicense"
] | null | null | null | makeplotz.py | Munfred/scutils | 9c6b6ce078c0b2aeb7a9f801933cf767197f4efd | [
"Unlicense"
] | null | null | null | makeplotz.py | Munfred/scutils | 9c6b6ce078c0b2aeb7a9f801933cf767197f4efd | [
"Unlicense"
] | null | null | null | ### EXAMPLE JOB SUBMISISON ###
# sbatch -A SternbergGroup --gres gpu --mem=64000 -t 15:00:00 --ntasks 10 --nodes 1 --job-name "bcbg" --wrap "python bcbg.py"
### FILENAME CHANGE BEFORE RUNNING ###
model_name= 'blah'
adata_file='../blah.h5ad'
######################################
import sys
import warnings; warnings.simplefilter('ignore')
import os
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
from scvi.dataset import GeneExpressionDataset
from scvi.models import VAE
from scvi.inference import UnsupervisedTrainer
import torch
import anndata
import scvi
import datetime
import plotly.express as px
import plotly.graph_objects as go
from anndata import AnnData
from umap import UMAP
from fastTSNE import TSNE
from fastTSNE.callbacks import ErrorLogger
import plotnine as p
print('Starting makeplotz with model:', model_name)
##### PLOTTING FUNCTIONS ######
def isnotebook():
# return false if not running on a notebook to avoid drawing and wasting time
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def derplot(adata=None, filename='derplot',embedding='tsne',feature='sample_type_tech',
size=(12, 12), save=False, draw=False, psize=3):
start = datetime.datetime.now()
p.options.figure_size = size
savename=filename +'.' + embedding + '.' + feature + '.png'
print(start.strftime("%H:%M:%S"), 'Starting ... \t',savename, )
p.theme_set(p.theme_classic())
pt = \
p.ggplot(p.aes(embedding +'0', embedding + '1', color=feature), adata.obs) \
+ p.geom_point(size=psize, alpha = 1, stroke = 0 ) \
+ p.guides(color = p.guide_legend(override_aes={'size': 15}))
if isnotebook() and draw: pt.draw()
if save: pt.save(savename, format='png', dpi=200)
end = datetime.datetime.now()
delta = end-start
print(start.strftime("%H:%M:%S"), str(int(delta.total_seconds())), 's to make: \t', savename)
return(pt)
def wraplot(adata=None, filename='wraplot',embedding='tsne',feature='sample_type_tech',
size=(12, 12), color=None, save=False, draw=False, psize=3):
start = datetime.datetime.now()
p.options.figure_size = size
savename = filename +'.' + embedding + '.' + feature + '.' + str(color) + '.png'
if color==None:
color=feature
savename = filename +'.' + embedding + '.' + feature + '.png'
print(start.strftime("%H:%M:%S"), 'Starting ... \t',savename, )
pt = (
p.ggplot(p.aes(x= embedding+'0', y=embedding+'1', color=color), adata.obs)
+ p.geom_point(color='lightgrey', shape = '.', data=adata.obs.drop(feature, axis = 1))
+ p.geom_point(shape='.', size=psize, alpha = 1, stroke = 0 )
+ p.theme_minimal()
+ p.facet_wrap('~' + feature )
+ p.guides(color = p.guide_legend(override_aes={'size': 10}))
)
if isnotebook() and draw: pt.draw()
if save: pt.save(savename, format='png', dpi=200)
end = datetime.datetime.now()
delta = end-start
print(start.strftime("%H:%M:%S"), str(int(delta.total_seconds())), 's to make: \t', savename)
return(pt)
| 35.383838 | 126 | 0.633172 | 451 | 3,503 | 4.847007 | 0.37694 | 0.020128 | 0.034767 | 0.034767 | 0.417658 | 0.389296 | 0.389296 | 0.36871 | 0.36871 | 0.294602 | 0 | 0.015913 | 0.210677 | 3,503 | 98 | 127 | 35.744898 | 0.774684 | 0.10962 | 0 | 0.306667 | 0 | 0 | 0.09374 | 0.007866 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.28 | 0 | 0.373333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d42854dec66dab2bfdf36e91642dcd8421c577f7 | 1,244 | py | Python | examples/injecting_existing_instance/injecting_existing_instance_example.py | allrod5/injectable | 74e640f0911480fb06fa97c1a468c3863541c0fd | [
"MIT"
] | 71 | 2018-02-05T04:12:27.000Z | 2022-02-15T23:08:16.000Z | examples/injecting_existing_instance/injecting_existing_instance_example.py | Euraxluo/injectable | 74e640f0911480fb06fa97c1a468c3863541c0fd | [
"MIT"
] | 104 | 2018-02-06T23:37:36.000Z | 2021-08-25T04:50:15.000Z | examples/injecting_existing_instance/injecting_existing_instance_example.py | Euraxluo/injectable | 74e640f0911480fb06fa97c1a468c3863541c0fd | [
"MIT"
] | 13 | 2019-02-10T18:52:50.000Z | 2022-01-26T17:12:35.000Z | """
In this example you'll see how to supply an already-initialized instance as injectable.
For whatever reason we have already initialized an instance of ``Application`` and
assigned it to the ``app`` variable so we use the
:meth:`injectable_factory <injectable.injectable_factory>` decorator in a lambda which
in turn just returns the existing ``app``.
Now our ``InjectingExistingInstance`` example class can be injected with our existing
``Application`` instance.
.. seealso::
The :meth:`injectable_factory <injectable.injectable_factory>` decorator can also be
used in regular functions and not just in lambdas. The :ref:`factory_example` shows
how to use it.
"""
# sphinx-start
from examples import Example
from examples.injecting_existing_instance.app import Application
from injectable import autowired, Autowired, load_injection_container
class InjectingExistingInstance(Example):
@autowired
def __init__(
self,
app: Autowired(Application),
):
self.app = app
def run(self):
print(self.app.number)
# 42
def run_example():
load_injection_container()
example = InjectingExistingInstance()
example.run()
if __name__ == "__main__":
run_example()
| 27.644444 | 88 | 0.734727 | 156 | 1,244 | 5.698718 | 0.49359 | 0.07649 | 0.038245 | 0.053993 | 0.134983 | 0.134983 | 0.134983 | 0.134983 | 0 | 0 | 0 | 0.00197 | 0.184084 | 1,244 | 44 | 89 | 28.272727 | 0.873892 | 0.55627 | 0 | 0 | 0 | 0 | 0.014733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.388889 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d42903bc52bf571f31d31df1a834976bfcbfa6a0 | 2,736 | py | Python | fHDHR/device/tuners/__init__.py | crackers8199/fHDHR_Youtube-IHOPKC | e1878d972ffba96ff813690630e30e2de8b5f504 | [
"WTFPL"
] | null | null | null | fHDHR/device/tuners/__init__.py | crackers8199/fHDHR_Youtube-IHOPKC | e1878d972ffba96ff813690630e30e2de8b5f504 | [
"WTFPL"
] | null | null | null | fHDHR/device/tuners/__init__.py | crackers8199/fHDHR_Youtube-IHOPKC | e1878d972ffba96ff813690630e30e2de8b5f504 | [
"WTFPL"
] | null | null | null |
from fHDHR.exceptions import TunerError
from .tuner import Tuner
class Tuners():
def __init__(self, fhdhr, epg, channels):
self.fhdhr = fhdhr
self.channels = channels
self.epg = epg
self.max_tuners = int(self.fhdhr.config.dict["fhdhr"]["tuner_count"])
self.tuners = {}
for i in range(1, self.max_tuners + 1):
self.tuners[i] = Tuner(fhdhr, i, epg)
def tuner_grab(self, tuner_number):
if int(tuner_number) not in list(self.tuners.keys()):
self.fhdhr.logger.error("Tuner %s does not exist." % str(tuner_number))
raise TunerError("806 - Tune Failed")
# TunerError will raise if unavailable
self.tuners[int(tuner_number)].grab()
return tuner_number
def first_available(self):
if not self.available_tuner_count():
raise TunerError("805 - All Tuners In Use")
for tunernum in list(self.tuners.keys()):
try:
self.tuners[int(tunernum)].grab()
except TunerError:
continue
else:
return tunernum
raise TunerError("805 - All Tuners In Use")
def tuner_close(self, tunernum):
self.tuners[int(tunernum)].close()
def status(self):
all_status = {}
for tunernum in list(self.tuners.keys()):
all_status[tunernum] = self.tuners[int(tunernum)].get_status()
return all_status
def available_tuner_count(self):
available_tuners = 0
for tunernum in list(self.tuners.keys()):
tuner_status = self.tuners[int(tunernum)].get_status()
if tuner_status["status"] == "Inactive":
available_tuners += 1
return available_tuners
def inuse_tuner_count(self):
inuse_tuners = 0
for tunernum in list(self.tuners.keys()):
tuner_status = self.tuners[int(tunernum)].get_status()
if tuner_status["status"] == "Active":
inuse_tuners += 1
return inuse_tuners
def get_stream_info(self, stream_args):
stream_args["channelUri"] = self.channels.get_channel_stream(str(stream_args["channel"]))
if not stream_args["channelUri"]:
raise TunerError("806 - Tune Failed")
channelUri_headers = self.fhdhr.web.session.head(stream_args["channelUri"]).headers
stream_args["true_content_type"] = channelUri_headers['Content-Type']
if stream_args["true_content_type"].startswith(tuple(["application/", "text/"])):
stream_args["content_type"] = "video/mpeg"
else:
stream_args["content_type"] = stream_args["true_content_type"]
return stream_args
| 31.448276 | 97 | 0.612208 | 325 | 2,736 | 4.963077 | 0.236923 | 0.080595 | 0.048357 | 0.049597 | 0.33478 | 0.218227 | 0.199628 | 0.121513 | 0.121513 | 0.121513 | 0 | 0.009068 | 0.274488 | 2,736 | 86 | 98 | 31.813953 | 0.803526 | 0.013158 | 0 | 0.2 | 0 | 0 | 0.110122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.033333 | 0 | 0.283333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d42f67e9e0f814fed2def88800445e02b82253cc | 401 | py | Python | ARC/arc001-arc050/arc040/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ARC/arc001-arc050/arc040/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ARC/arc001-arc050/arc040/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
n = int(input())
r_count = 0
b_count = 0
for i in range(n):
si = input()
r_count += si.count('R')
b_count += si.count('B')
if r_count > b_count:
print('TAKAHASHI')
elif r_count < b_count:
print('AOKI')
else:
print('DRAW')
if __name__ == '__main__':
main()
| 16.708333 | 33 | 0.456359 | 52 | 401 | 3.211538 | 0.480769 | 0.143713 | 0.131737 | 0.143713 | 0.203593 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012195 | 0.386534 | 401 | 23 | 34 | 17.434783 | 0.666667 | 0.052369 | 0 | 0 | 0 | 0 | 0.076056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.0625 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4305b27996382bcf34ae4f2d283d939605b8ef7 | 6,622 | py | Python | labs/lab3/lab3b.py | MITLLRacecar/racecar-parth-kocheta | 6e244575b83e312880c5540342a380364032d326 | [
"MIT"
] | 1 | 2021-08-01T17:06:39.000Z | 2021-08-01T17:06:39.000Z | labs/lab3/lab3b.py | MITLLRacecar/racecar-parth-kocheta | 6e244575b83e312880c5540342a380364032d326 | [
"MIT"
] | null | null | null | labs/lab3/lab3b.py | MITLLRacecar/racecar-parth-kocheta | 6e244575b83e312880c5540342a380364032d326 | [
"MIT"
] | null | null | null | """
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
Lab 3B - Depth Camera Cone Parking
"""
########################################################################################
# Imports
########################################################################################
import sys
import cv2 as cv
import numpy as np
from typing import Any, Tuple, List, Optional
from nptyping import NDArray
from enum import IntEnum
sys.path.insert(0, "../../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
# Sets up the racecar object
rc = racecar_core.create_racecar()
# >> Constants
# The smallest contour we will recognize as a valid contour
MIN_CONTOUR_AREA = 30
# The HSV range for the color orange, stored as (hsv_min, hsv_max)
ORANGE = ((10, 100, 100), (20, 255, 255))
# >> Variables
speed = 0.0 # The current speed of the car
angle = 0.0 # The current angle of the car's wheels
contour_center = None # The (pixel row, pixel column) of contour
contour_area = 0 # The area of contour
# Add any global variables here
isParked = False # Set to true once the car has stopped around 30cm in front of the cone
########################################################################################
# Functions
########################################################################################
class State(IntEnum):
search = 0
approach = 1
curState = State.search
def update_contour():
"""
Finds contours in the current color image and uses them to update contour_center
and contour_area
"""
global contour_center
global contour_area
image = rc.camera.get_color_image()
if image is None:
contour_center = None
contour_area = 0
else:
# Find all of the orange contours
contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])
# Select the largest contour
contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)
if contour is not None:
# Calculate contour information
contour_center = rc_utils.get_contour_center(contour)
contour_area = rc_utils.get_contour_area(contour)
# Draw contour onto the image
rc_utils.draw_contour(image, contour)
rc_utils.draw_circle(image, contour_center)
else:
contour_center = None
contour_area = 0
def get_mask(
image: NDArray[(Any, Any, 3), np.uint8],
hsv_lower: Tuple[int, int, int],
hsv_upper: Tuple[int, int, int]
) -> NDArray[Any, Any]:
"""
Returns a mask containing all of the areas of image which were between hsv_lower and hsv_upper.
Args:
image: The image (stored in BGR) from which to create a mask.
hsv_lower: The lower bound of HSV values to include in the mask.
hsv_upper: The upper bound of HSV values to include in the mask.
"""
# Convert hsv_lower and hsv_upper to numpy arrays so they can be used by OpenCV
hsv_lower = np.array(hsv_lower)
hsv_upper = np.array(hsv_upper)
# TODO: Use the cv.cvtColor function to switch our BGR colors to HSV colors
image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
# TODO: Use the cv.inRange function to highlight areas in the correct range
mask = cv.inRange(image, hsv_lower, hsv_upper)
return mask
def start():
"""
This function is run once every time the start button is pressed
"""
# Have the car begin at a stop
rc.drive.stop()
# Print start message
print(">> Lab 3B - Depth Camera Cone Parking")
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
# TODO: Park the car 30 cm away from the closest orange cone.
global speed
global angle
global curState
# Search for contours in the current color image
update_contour()
print(curState)
imgX = rc.camera.get_width()
if contour_center is not None:
angle = rc_utils.remap_range(contour_center[1],0,imgX,-1,1)
if contour_center is None:
curState == State.search
angle = 1
if curState == State.search:
angle = 1
speed = 0.2
if contour_center is not None:
curState = State.approach
depth_image = rc.camera.get_depth_image()
depth_image_adjust = (depth_image - 0.01) % 9999
depth_image_adjust_blur = cv.GaussianBlur(depth_image_adjust, (11,11), 0)
image = rc.camera.get_color_image()
mask = get_mask(image, ORANGE[0], ORANGE[1])
masked_depth_image = cv.bitwise_and(depth_image, depth_image, mask=mask)
top_left_inclusive = (0, 0)
bottom_right_exclusive = ((rc.camera.get_height() * 4 // 5) , rc.camera.get_width())
cropped_image = rc_utils.crop(masked_depth_image, top_left_inclusive, bottom_right_exclusive)
closest_pixel = rc_utils.get_closest_pixel(cropped_image)
distance = cropped_image[closest_pixel[0], closest_pixel[1]]
rc.display.show_depth_image(cropped_image, points=[closest_pixel])
if curState == State.approach:
if distance < 29:
speed = rc_utils.remap_range(distance, 0, 30, -1, 0)
print("backing")
elif distance < 30:
speed = 0
angle = 0
elif distance > 30 and distance < 100:
speed = rc_utils.remap_range(distance, 30, 1000, 0, 1)
elif distance > 100:
speed = 0.5
rc.drive.set_speed_angle(speed, angle)
# Print the current speed and angle when the A button is held down
if rc.controller.is_down(rc.controller.Button.Y):
isParked = False
print("not parke")
if rc.controller.is_down(rc.controller.Button.A):
print("Speed:", speed, "Angle:", angle)
# Print the center and area of the largest contour when B is held down
if rc.controller.is_down(rc.controller.Button.B):
if contour_center is None:
print("No contour found")
else:
print("Center:", contour_center, "Area:", contour_area)
pass
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go() | 31.235849 | 99 | 0.593174 | 855 | 6,622 | 4.438596 | 0.272515 | 0.047958 | 0.017391 | 0.017918 | 0.18498 | 0.140711 | 0.053228 | 0.053228 | 0.043215 | 0.025296 | 0 | 0.019727 | 0.22682 | 6,622 | 212 | 100 | 31.235849 | 0.721484 | 0.269858 | 0 | 0.17757 | 0 | 0 | 0.028408 | 0 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0.037383 | false | 0.009346 | 0.074766 | 0 | 0.149533 | 0.065421 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4318826121e4a0a63a9ff18a3888788c700b50d | 19,272 | py | Python | ska-tmc/ska-tmc-sdpsubarrayleafnode-mid/src/ska_tmc_sdpsubarrayleafnode_mid/sdp_subarray_leaf_node.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 3 | 2019-01-10T11:49:36.000Z | 2019-07-19T03:32:52.000Z | ska-tmc/ska-tmc-sdpsubarrayleafnode-mid/src/ska_tmc_sdpsubarrayleafnode_mid/sdp_subarray_leaf_node.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 19 | 2019-01-07T14:50:26.000Z | 2019-10-02T13:25:23.000Z | ska-tmc/ska-tmc-sdpsubarrayleafnode-mid/src/ska_tmc_sdpsubarrayleafnode_mid/sdp_subarray_leaf_node.py | ska-telescope/tmc-prototype | 4138274e933d4b05f7fe9fc34a11c417b6d0d336 | [
"BSD-3-Clause"
] | 1 | 2018-12-21T13:39:23.000Z | 2018-12-21T13:39:23.000Z | # -*- coding: utf-8 -*-
#
# This file is part of the SdpSubarrayLeafNode project
#
#
#
# Distributed under the terms of the BSD-3-Clause license.
# See LICENSE.txt for more info.
"""
SDP Subarray Leaf node is to monitor the SDP Subarray and issue control actions during an observation.
It also acts as a SDP contact point for Subarray Node for observation execution.
"""
# PROTECTED REGION ID(sdpsubarrayleafnode.additionnal_import) ENABLED START #
# Third party imports
import os
# PyTango imports
import tango
import threading
from tango import DebugIt, AttrWriteType, ApiUtil
from tango.server import run, command, device_property, attribute
# Additional imports
from ska.base import SKABaseDevice
from ska.base.control_model import HealthState, ObsState
from ska.base.commands import ResultCode
from tmc.common.tango_client import TangoClient
from tmc.common.tango_server_helper import TangoServerHelper
from . import const, release
from .assign_resources_command import AssignResources
from .release_resources_command import ReleaseAllResources
from .configure_command import Configure
from .scan_command import Scan
from .endscan_command import EndScan
from .end_command import End
from .abort_command import Abort
from .restart_command import Restart
from .obsreset_command import ObsReset
from .telescope_on_command import TelescopeOn
from .telescope_off_command import TelescopeOff
from .reset_command import ResetCommand
from .device_data import DeviceData
from .exceptions import InvalidObsStateError
# PROTECTED REGION END # // SdpSubarrayLeafNode.additionnal_import
__all__ = [
"SdpSubarrayLeafNode",
"main",
"AssignResources",
"const",
"release",
"ReleaseAllResources",
"TelescopeOn",
"TelescopeOff",
"Configure",
"Abort",
"Restart",
"ObsReset",
"Scan",
"End",
"EndScan",
"ResetCommand"
]
# pylint: disable=unused-argument,unused-variable, implicit-str-concat
class SdpSubarrayLeafNode(SKABaseDevice):
"""
SDP Subarray Leaf node is to monitor the SDP Subarray and issue control actions during an observation.
:Device Properties:
SdpSubarrayFQDN:
FQDN of the SDP Subarray Tango Device Server.
:Device Attributes:
receiveAddresses:
This attribute is used for testing purposes. In the unit test cases
it is used to provide FQDN of receiveAddresses attribute from SDP.
activityMessage:
String providing information about the current activity in SDP Subarray Leaf Node.
activeProcessingBlocks:
This is a attribute from SDP Subarray which depicts the active Processing
Blocks in the SDP Subarray.
"""
# -----------------
# Device Properties
# -----------------
SdpSubarrayFQDN = device_property(
dtype="str", doc="FQDN of the SDP Subarray Tango Device Server."
)
# ----------
# Attributes
# ----------
receiveAddresses = attribute(
dtype="str",
access=AttrWriteType.READ_WRITE,
doc="This attribute is used for testing purposes. In the unit test cases, "
"it is used to provide FQDN of receiveAddresses attribute from SDP.",
)
activityMessage = attribute(
dtype="str",
access=AttrWriteType.READ_WRITE,
doc="String providing information about the current activity in SDP Subarray Leaf Node",
)
activeProcessingBlocks = attribute(
dtype="str",
doc="This is a attribute from SDP Subarray which depicts the active Processing Blocks in "
"the SDP Subarray.",
)
class InitCommand(SKABaseDevice.InitCommand):
"""
A class for the TMC SdpSubarrayLeafNode's init_device() method.
"""
def do(self):
"""
Initializes the attributes and properties of the SdpSubarrayLeafNode.
return:
A tuple containing a return code and a string message indicating status.
The message is for information purpose only.
rtype:
(ResultCode, str)
"""
super().do()
device = self.target
self.this_server = TangoServerHelper.get_instance()
self.this_server.set_tango_class(device)
device.attr_map = {}
device.attr_map["receiveAddresses"] = ""
device.attr_map["activeProcessingBlocks"] = ""
device.attr_map["activityMessage"] = ""
# Initialise attributes
device._sdp_subarray_health_state = HealthState.OK
device._build_state = "{},{},{}".format(
release.name, release.version, release.description
)
device._version_id = release.version
# Create DeviceData class instance
device_data = DeviceData.get_instance()
device.device_data = device_data
standalone_mode = os.environ.get("STANDALONE_MODE")
self.logger.info("Device running in standalone_mode:%s", standalone_mode)
ApiUtil.instance().set_asynch_cb_sub_model(tango.cb_sub_model.PUSH_CALLBACK)
log_msg = f"{const.STR_SETTING_CB_MODEL}{ApiUtil.instance().get_asynch_cb_sub_model()}"
self.logger.debug(log_msg)
self.this_server.write_attr(
"activityMessage", const.STR_SDPSALN_INIT_SUCCESS, False
)
# Initialise Device status
device.set_status(const.STR_SDPSALN_INIT_SUCCESS)
self.logger.info(const.STR_SDPSALN_INIT_SUCCESS)
return (ResultCode.OK, const.STR_SDPSALN_INIT_SUCCESS)
# ---------------
# General methods
# ---------------
def always_executed_hook(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.always_executed_hook) ENABLED START #
"""Internal construct of TANGO."""
# PROTECTED REGION END # // SdpSubarrayLeafNode.always_executed_hook
def delete_device(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.delete_device) ENABLED START #
"""Internal construct of TANGO."""
# PROTECTED REGION END # // SdpSubarrayLeafNode.delete_device
# ------------------
# Attributes methods
# ------------------
def read_receiveAddresses(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.receiveAddresses_read) ENABLED START #
"""Internal construct of TANGO. Returns the Receive Addresses.
receiveAddresses is a forwarded attribute from SDP Master which depicts State of the SDP."""
return self.attr_map["receiveAddresses"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.receiveAddresses_read
def write_receiveAddresses(self, value):
# PROTECTED REGION ID(SdpSubarrayLeafNode.receiveAddresses_read) ENABLED START #
"""Internal construct of TANGO. Sets the Receive Addresses.
receiveAddresses is a forwarded attribute from SDP Master which depicts State of the SDP."""
self.attr_map["receiveAddresses"] = value
# PROTECTED REGION END # // SdpSubarrayLeafNode.receiveAddresses_read
def read_activityMessage(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activityMessage_read) ENABLED START #
"""Internal construct of TANGO. Returns Activity Messages.
activityMessage is a String providing information about the current activity in SDP Subarray Leaf Node"""
return self.attr_map["activityMessage"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.activityMessage_read
def write_activityMessage(self, value):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activityMessage_write) ENABLED START #
"""Internal construct of TANGO. Sets the activity message.
activityMessage is a String providing information about the current activity in SDP Subarray Leaf Node"""
self.update_attr_map("activityMessage", value)
# PROTECTED REGION END # // SdpSubarrayLeafNode.activityMessage_write
def update_attr_map(self, attr, val):
# PROTECTED REGION ID(SdpSubarrayLeafNode.update_attr_map) ENABLED START #
"""This method updates attribute value in attribute map. Once a thread has acquired a lock,
subsequent attempts to acquire it are blocked, until it is released."""
lock = threading.Lock()
lock.acquire()
self.attr_map[attr] = val
lock.release()
# PROTECTED REGION END # // SdpSubarrayLeafNode.update_attr_map
def read_activeProcessingBlocks(self):
# PROTECTED REGION ID(SdpSubarrayLeafNode.activeProcessingBlocks_read) ENABLED START #
"""Internal construct of TANGO. Returns Active Processing Blocks.activeProcessingBlocks is a forwarded attribute
from SDP Subarray which depicts the active Processing Blocks in the SDP Subarray"""
return self.attr_map["activeProcessingBlocks"]
# PROTECTED REGION END # // SdpSubarrayLeafNode.activeProcessingBlocks_read
# --------
# Commands
# --------
def is_telescope_on_allowed(self):
"""
Checks Whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
raises: DevF
ailed if this command is not allowed to be run in current device state.
"""
handler = self.get_command_object("TelescopeOn")
return handler.check_allowed()
@command()
@DebugIt()
def TelescopeOn(self):
"""
Sets the opState to ON.
:param argin: None
:return: None
"""
handler = self.get_command_object("TelescopeOn")
handler()
def is_telescope_off_allowed(self):
"""
Checks Whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
raises: DevF
ailed if this command is not allowed to be run in current device state.
"""
handler = self.get_command_object("TelescopeOff")
return handler.check_allowed()
@command()
@DebugIt()
def TelescopeOff(self):
"""
Sets the opState to Off.
:param argin: None
:return: None
"""
handler = self.get_command_object("TelescopeOff")
handler()
@command()
@DebugIt()
def Abort(self):
"""
Invoke Abort on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Abort")
handler()
def is_Abort_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("Abort")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The input JSON string consists of information related to id, max_length, scan_types"
" and processing_blocks.",
)
@DebugIt()
def AssignResources(self, argin):
"""
Assigns resources to given SDP subarray.
"""
handler = self.get_command_object("AssignResources")
try:
self.validate_obs_state()
except InvalidObsStateError as error:
self.logger.exception(error)
tango.Except.throw_exception(
const.ERR_DEVICE_NOT_IN_EMPTY_IDLE,
const.ERR_ASSGN_RESOURCES,
"SdpSubarrayLeafNode.AssignResources()",
tango.ErrSeverity.ERR,
)
handler(argin)
def is_AssignResources_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("AssignResources")
return handler.check_allowed()
def is_Configure_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("Configure")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The JSON input string consists of scan type.",
)
@DebugIt()
def Configure(self, argin):
"""
Invokes Configure on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Configure")
handler(argin)
def is_End_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("End")
return handler.check_allowed()
@command()
@DebugIt()
def End(self):
"""This command invokes End command on SDP subarray to end the current Scheduling block."""
handler = self.get_command_object("End")
handler()
def is_EndScan_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("EndScan")
return handler.check_allowed()
@command()
@DebugIt()
def EndScan(self):
"""
Invokes EndScan on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("EndScan")
handler()
@command()
@DebugIt()
def ObsReset(self):
"""
Invoke ObsReset command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("ObsReset")
handler()
def is_ObsReset_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
"""
handler = self.get_command_object("ObsReset")
return handler.check_allowed()
def is_ReleaseAllResources_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("ReleaseAllResources")
return handler.check_allowed()
@command()
@DebugIt()
def ReleaseAllResources(self):
"""
Invokes ReleaseAllResources command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("ReleaseAllResources")
handler()
@command()
@DebugIt()
def Restart(self):
"""
Invoke Restart command on SdpSubarrayLeafNode.
"""
handler = self.get_command_object("Restart")
handler()
def is_Restart_allowed(self):
"""
Checks whether this command is allowed to be run in current device state
return:
True if this command is allowed to be run in current device state
rtype:
boolean
raises:
DevFailed if this command is not allowed to be run in current device state
"""
handler = self.get_command_object("Restart")
return handler.check_allowed()
def is_Scan_allowed(self):
"""
Checks whether this command is allowed to be run in current device state.
return:
True if this command is allowed to be run in current device state.
rtype:
boolean
"""
handler = self.get_command_object("Scan")
return handler.check_allowed()
@command(
dtype_in=("str"),
doc_in="The JSON input string consists of SB ID.",
)
@DebugIt()
def Scan(self, argin):
"""Invoke Scan command to SDP subarray."""
handler = self.get_command_object("Scan")
handler(argin)
def validate_obs_state(self):
self.this_server = TangoServerHelper.get_instance()
sdp_subarray_fqdn = self.this_server.read_property("SdpSubarrayFQDN")[0]
sdp_sa_client = TangoClient(sdp_subarray_fqdn)
if sdp_sa_client.get_attribute("obsState").value in [
ObsState.EMPTY,
ObsState.IDLE,
]:
self.logger.info(
"SDP subarray is in required obstate,Hence resources to SDP can be assign."
)
else:
self.logger.error("Subarray is not in EMPTY obstate")
log_msg = "Error in device obstate."
self.this_server.write_attr("activityMessage", log_msg, False)
raise InvalidObsStateError("SDP subarray is not in EMPTY obstate.")
def init_command_objects(self):
"""
Initialises the command handlers for commands supported by this
device.
"""
super().init_command_objects()
# Create device_data class object
device_data = DeviceData.get_instance()
args = (device_data, self.state_model, self.logger)
self.register_command_object("AssignResources", AssignResources(*args))
self.register_command_object("ReleaseAllResources", ReleaseAllResources(*args))
self.register_command_object("Scan", Scan(*args))
self.register_command_object("End", End(*args))
self.register_command_object("Restart", Restart(*args))
self.register_command_object("Configure", Configure(*args))
self.register_command_object("EndScan", EndScan(*args))
self.register_command_object("Abort", Abort(*args))
self.register_command_object("ObsReset", ObsReset(*args))
self.register_command_object("TelescopeOff", TelescopeOff(*args))
self.register_command_object("TelescopeOn", TelescopeOn(*args))
self.register_command_object("Reset", ResetCommand(*args))
# ----------
# Run server
# ----------
def main(args=None, **kwargs):
# PROTECTED REGION ID(SdpSubarrayLeafNode.main) ENABLED START #
"""
Runs the SdpSubarrayLeafNode
:param args: Arguments internal to TANGO
:param kwargs: Arguments internal to TANGO
:return: SdpSubarrayLeafNode TANGO object
"""
# PROTECTED REGION ID(SdpSubarrayLeafNode.main) ENABLED START #
ret_val = run((SdpSubarrayLeafNode,), args=args, **kwargs)
return ret_val
# PROTECTED REGION END # // SdpSubarrayLeafNode.main
if __name__ == "__main__":
main()
| 31.749588 | 120 | 0.643161 | 2,098 | 19,272 | 5.772164 | 0.14204 | 0.036499 | 0.028984 | 0.031214 | 0.5455 | 0.476713 | 0.427415 | 0.37341 | 0.333526 | 0.333526 | 0 | 0.000214 | 0.273557 | 19,272 | 606 | 121 | 31.80198 | 0.864786 | 0.385118 | 0 | 0.3083 | 0 | 0 | 0.148589 | 0.014773 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134387 | false | 0 | 0.098814 | 0 | 0.320158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d431a229e5bc2ae5e5df2bc300491d02cd7bb9da | 4,255 | py | Python | src/app.py | qq20004604/qq-robot | f109a774666efcd0cf36ed4da0d79f53b32b1577 | [
"Apache-2.0"
] | 8 | 2019-05-20T03:30:27.000Z | 2020-09-24T13:16:53.000Z | src/app.py | qq20004604/qq-robot | f109a774666efcd0cf36ed4da0d79f53b32b1577 | [
"Apache-2.0"
] | null | null | null | src/app.py | qq20004604/qq-robot | f109a774666efcd0cf36ed4da0d79f53b32b1577 | [
"Apache-2.0"
] | 2 | 2020-09-24T13:16:56.000Z | 2020-12-25T09:44:48.000Z | from aiocqhttp import CQHttp
from datetime import datetime
from sendmsg import SendMsg
from loadData import LoadData
import threading
import time
# windows本机运行本脚本与coolq的配置
# HOST = '127.0.0.1'
# PORT = 7788
# 这个url是发送给docker容器里的coolq
# 举例来说,假如docker命令有这样的 -p 3542:9000 -p 15700:5700
# 9000 是coolq暴露的页面访问地址(这里映射到了外面的3542,所以外界通过3542端口访问)
# 而5700是是coolq接受数据的端口(即是这个python服务发送给coolq的数据),这里映射到了15700,
# 所以外界通过15700端口发送信息给coolq
BASEURL = 'http://127.0.0.1:15700/'
bot = CQHttp(api_root=BASEURL)
d = {
# '博客': 'https://blog.csdn.net/qq20004604',
# 'github': 'https://github.com/qq20004604',
# 'nginx': 'https://github.com/qq20004604/nginx-demo',
# 'django': 'https://github.com/qq20004604/Python3_Django_Demo',
# 'docker': 'https://github.com/qq20004604/docker-learning',
# 'webpack': 'https://github.com/qq20004604/webpack-study',
# 'react': 'https://github.com/qq20004604/react-demo',
# 'vue': 'github: https://github.com/qq20004604/vue-scaffold\n博客专栏(1.x):https://blog.csdn.net/qq20004604/article/category/6381182',
# '笔记': 'https://github.com/qq20004604/notes',
# 'demo': 'https://github.com/qq20004604/some_demo',
# '海外服务器': 'https://manage.hostdare.com/aff.php?aff=939\n这个可以做私人服务器(不需要备案),也可以找群主询问如何架设SS server的方法。',
# 'QQ 机器人': 'https://github.com/qq20004604/qq-robot',
# '架构': 'https://juejin.im/post/5cea1f705188250640005472',
# 'es6': 'https://blog.csdn.net/qq20004604/article/details/78014684',
# 'vue脚手架': 'https://github.com/qq20004604/Vue-with-webpack',
# 'react脚手架': 'https://github.com/qq20004604/react-with-webpack',
# 'Macbook常用软件': 'https://github.com/qq20004604/when-you-get-new-Macbook',
# 'python的django与mysql交互': 'https://blog.csdn.net/qq20004604/article/details/89934212'
}
ld = LoadData()
def log(context, filename='./log.log'):
with open(filename, 'a', encoding='utf-8') as f:
f.write('time:%s, sender:%s, message_type:%s, user_id:%s, content:%s\n' % (
datetime.now(),
context['sender']['nickname'],
context['message_type'],
context['sender']['user_id'],
context['raw_message']))
@bot.on_message()
async def handle_msg(context):
msg = context['message']
# print(msg)
'''
# print(str(context)) 内容示例如下
{'font': 1473688, 'message': '#help', 'message_id': 528, 'message_type': 'private', 'post_type': 'message',
'raw_message': '#help', 'self_id': 2691365658,
'sender': {'age': 30, 'nickname': '零零水', 'sex': 'male', 'user_id': 20004604}, 'sub_type': 'friend',
'time': 1558283078, 'user_id': 20004604}
'''
result = ''
isindict = False
isinhelp = False
for k in d:
if ('#' + k) in msg:
result += d[k] + '\n'
isindict = True
if '#help' in msg:
result += '你可以使用以下命令~记得前面带上#喔\n'
isinhelp = True
for k in d:
result += '#' + k + '\n'
# 默认词典要求给star
if isindict is True:
result += "记得给star!"
# 只要是词典之一,则打印日志
if isindict is True or isinhelp is True:
log(context)
return {'reply': result}
@bot.on_notice('group_increase')
async def handle_group_increase(context):
await bot.send(context, message='欢迎新人~可以输入#help来向我查询所有命令喔',
at_sender=True, auto_escape=True)
@bot.on_request('group', 'friend')
async def handle_request(context):
return {'approve': True}
SendMsg(BASEURL)
def mixin_dict():
global d
minutes = 0
while True:
# 1 分钟更新一次
minutes = minutes + 1
if minutes % 60 == 0:
print('%s hours pass' % (minutes / 60))
ld_dict = ld.load_search_info()
d = {**ld_dict}
time.sleep(60)
t1 = threading.Thread(target=mixin_dict, name='loop')
t1.start()
# docker的配置
HOST = '172.18.0.1'
PORT = 12399
# 这里是coolq接收到qq信息,然后发送到这个python服务的端口。
# 所以也就是这个python服务,接收到这个消息的端口
# 在 coolq 的docker容器里,这个是在 */coolq/app/io.github.richardchien.coolqhttpapi/config/(qq号).ini 里配置的
# 由于容器不能通过 127.0.0.1 直接访问宿主机的端口,因此,需要通过执行 ip addr show docker0 命令来查看宿主机的端口
# 举例来说,我的server执行这个命令,获得的宿主机的 ip 是 172.18.0.1 (即,容器访问 172.18.0.1 这个地址是访问宿主机)
# 于是修改那个ini配置文件:post_url = http://172.18.0.1:34519
# 这里的host可以保持要和那个ip地址保持一样,port也是
bot.run(host=HOST, port=PORT)
| 31.518519 | 135 | 0.647239 | 526 | 4,255 | 5.171103 | 0.448669 | 0.052574 | 0.066912 | 0.114706 | 0.118015 | 0.041544 | 0.029412 | 0 | 0 | 0 | 0 | 0.102394 | 0.185194 | 4,255 | 134 | 136 | 31.753731 | 0.682146 | 0.436428 | 0 | 0.032258 | 0 | 0.016129 | 0.141642 | 0.012012 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0.016129 | 0.096774 | 0 | 0.16129 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d43272fbb73b6c6b6b7fe45a8e85aee3304ff420 | 1,359 | py | Python | src/9/enforcing_type_checking_on_a_function_using_a_decorator/example.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 14 | 2017-05-20T04:06:46.000Z | 2022-01-23T06:48:45.000Z | src/9/enforcing_type_checking_on_a_function_using_a_decorator/example.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 1 | 2021-06-10T20:17:55.000Z | 2021-06-10T20:17:55.000Z | src/9/enforcing_type_checking_on_a_function_using_a_decorator/example.py | tuanavu/python-gitbook | 948a05e065b0f40afbfd22f697dff16238163cde | [
"MIT"
] | 15 | 2017-03-29T17:57:33.000Z | 2021-08-24T02:20:08.000Z | from inspect import signature
from functools import wraps
def typeassert(*ty_args, **ty_kwargs):
def decorate(func):
# If in optimized mode, disable type checking
if not __debug__:
return func
# Map function argument names to supplied types
sig = signature(func)
bound_types = sig.bind_partial(*ty_args, **ty_kwargs).arguments
@wraps(func)
def wrapper(*args, **kwargs):
bound_values = sig.bind(*args, **kwargs)
# Enforce type assertions across supplied arguments
for name, value in bound_values.arguments.items():
if name in bound_types:
if not isinstance(value, bound_types[name]):
raise TypeError(
'Argument {} must be {}'.format(name, bound_types[name])
)
return func(*args, **kwargs)
return wrapper
return decorate
# Examples
@typeassert(int, int)
def add(x, y):
return x + y
@typeassert(int, z=int)
def spam(x, y, z=42):
print(x, y, z)
if __name__ == '__main__':
print(add(2,3))
try:
add(2, 'hello')
except TypeError as e:
print(e)
spam(1, 2, 3)
spam(1, 'hello', 3)
try:
spam(1, 'hello', 'world')
except TypeError as e:
print(e)
| 26.134615 | 84 | 0.557027 | 166 | 1,359 | 4.421687 | 0.415663 | 0.054496 | 0.021798 | 0.038147 | 0.065395 | 0.065395 | 0 | 0 | 0 | 0 | 0 | 0.012209 | 0.337013 | 1,359 | 51 | 85 | 26.647059 | 0.802442 | 0.108904 | 0 | 0.157895 | 0 | 0 | 0.041494 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 1 | 0.131579 | false | 0 | 0.052632 | 0.026316 | 0.315789 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d433b3494ae66595b5f31b0ee2375f4a611bbe3e | 2,230 | py | Python | muk_web_theme/muk_dms_actions/tests/test_file.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | muk_web_theme/muk_dms_actions/tests/test_file.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | null | null | null | muk_web_theme/muk_dms_actions/tests/test_file.py | marionumza/vocal_v12 | 480990e919c9410903e06e7813ee92800bd6a569 | [
"Unlicense"
] | 1 | 2021-05-05T07:59:08.000Z | 2021-05-05T07:59:08.000Z | ###################################################################################
#
# Copyright (C) 2017 MuK IT GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###################################################################################
import os
import base64
import logging
from odoo.exceptions import AccessError, ValidationError
from odoo.addons.muk_utils.tests.common import multi_users
from odoo.addons.muk_dms.tests.common import setup_data_function
from odoo.addons.muk_dms.tests.test_file import FileTestCase
_path = os.path.dirname(os.path.dirname(__file__))
_logger = logging.getLogger(__name__)
class FileActionTestCase(FileTestCase):
def setUp(self):
super(FileActionTestCase, self).setUp()
self.action = self.env['muk_dms_actions.action'].sudo()
@multi_users(lambda self: self.multi_users())
@setup_data_function(setup_func='_setup_test_data')
def test_available_actions(self):
self.action.create({'name': "Test 01"})
self.action.create({'name': "Test 02", 'is_limited_to_single_file': True})
self.action.create({'name': "Test 03", 'criteria_directory': self.new_root_directory.id})
self.action.create({'name': "Test 04", 'criteria_directory': self.new_sub_directory.id})
self.assertTrue(len(self.new_file_root_directory.actions) == 3)
self.assertTrue(len(self.new_file_root_directory.actions_multi) == 2)
self.assertTrue(len(self.new_file_sub_directory.actions) == 4)
self.assertTrue(len(self.new_file_sub_directory.actions_multi) == 3)
| 44.6 | 97 | 0.675336 | 293 | 2,230 | 4.959044 | 0.440273 | 0.028906 | 0.044047 | 0.055059 | 0.300069 | 0.233999 | 0.177564 | 0.130764 | 0.130764 | 0 | 0 | 0.010133 | 0.159193 | 2,230 | 50 | 98 | 44.6 | 0.7648 | 0.309865 | 0 | 0 | 0 | 0 | 0.105691 | 0.034738 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.083333 | false | 0 | 0.291667 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4343b5d871290ec2f4ca3f7e5cf287d0875613d | 2,500 | py | Python | populate.py | yueguoguo/shopify-flask-example | dc556aeac21bfcd9bbd5fdeece5cd4827e090055 | [
"Apache-2.0"
] | null | null | null | populate.py | yueguoguo/shopify-flask-example | dc556aeac21bfcd9bbd5fdeece5cd4827e090055 | [
"Apache-2.0"
] | null | null | null | populate.py | yueguoguo/shopify-flask-example | dc556aeac21bfcd9bbd5fdeece5cd4827e090055 | [
"Apache-2.0"
] | null | null | null | # Mock method to populate data to a Shopify store
# This is used only for development purpose
import io
import requests
import random
from typing import List
from time import sleep
import shopify
from faker import Faker
ACCESS_TOKEN = "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" # Temporary use only
API_VERSION = "2020-10"
SHOP_URL = "fromairstore.myshopify.com"
class Populate:
def __init__(self, access_token: str, shop_url: str, api_version: str):
"""
Initialize a populate object
Args:
access_token (str): shopify API access token
shop_url (str): shopify shop URL
api_version (str): shopify API version
"""
self.token = access_token
self.shop_url = shop_url
self.api_version = api_version
random.seed(42)
session = shopify.Session(shop_url, api_version, access_token)
shopify.ShopifyResource.activate_session(session)
self.existing_customers = None
self.existing_products = None
def get_customers(self) -> List:
if not self.existing_customers:
self.existing_customers = shopify.Customer.find()
return self.existing_customers
def get_products(self) -> List:
if not self.existing_products:
self.existing_products = shopify.Product.find()
return self.existing_products
def generate_customer(self):
"""Add customers with random fake information to the shop
"""
fake = Faker()
names = fake.name().split(' ')
customer = shopify.Customer()
customer.first_name = names[0]
customer.last_name = names[1]
customer.email = "{0}{1}@gmail.com".format(names[0], names[1])
customer.save()
def generate_products(self):
"""Generate fake products
"""
NotImplemented
def generate_order(self):
"""Generate an order for a customer to purchase a product
"""
customer = random.choice(self.get_customers())
product = random.choice(self.get_products())
order = shopify.Order()
order.customer = {
"first_name": customer.first_name,
"last_name": customer.last_name,
"email": customer.email
}
order.fulfillment_status = "fulfilled"
order.line_items = [
{
"title": product.title,
"quantity": 1,
"price": product.price_range()
}
]
order.save()
if __name__ == "__main__":
populator = Populate(access_token=ACCESS_TOKEN, shop_url=SHOP_URL, api_version=API_VERSION)
# generate 5 fake customers
for _ in range(5):
populator.generate_customer()
sleep(0.5)
# generate 10 fake orders with random customer and product
for _ in range(5):
populator.generate_order()
sleep(1)
| 23.809524 | 92 | 0.7288 | 333 | 2,500 | 5.276276 | 0.303303 | 0.051224 | 0.047809 | 0.029027 | 0.06033 | 0.06033 | 0 | 0 | 0 | 0 | 0 | 0.019212 | 0.1672 | 2,500 | 104 | 93 | 24.038462 | 0.824688 | 0.2212 | 0 | 0.03125 | 0 | 0 | 0.074619 | 0.032487 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.109375 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d43594d2c1b672c278943517c110acaf9fee2089 | 1,067 | py | Python | examples/VNH5019_example.py | being24/VNH5019 | 1030050d363991e43f63befbb052c423c3470156 | [
"MIT"
] | null | null | null | examples/VNH5019_example.py | being24/VNH5019 | 1030050d363991e43f63befbb052c423c3470156 | [
"MIT"
] | null | null | null | examples/VNH5019_example.py | being24/VNH5019 | 1030050d363991e43f63befbb052c423c3470156 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
from logging import DEBUG, ERROR, FATAL, INFO, WARN
import pigpio
from VNH5019_driver import VNH5019 as MOTOR
if __name__ == "__main__":
count = 0.0
one_count = 360 * 4 / (64 * 50)
pi = pigpio.pi()
motor0 = MOTOR(
pi,
driver_out1=20,
driver_out2=21,
encoder_in1=5,
encoder_in2=6,
pwm_channel=0,
gear_ratio=150,
logging_level=WARN)
motor1 = MOTOR(
pi,
driver_out1=23,
driver_out2=24,
encoder_in1=27,
encoder_in2=22,
pwm_channel=1,
gear_ratio=50,
logging_level=WARN)
time.sleep(3)
motor0.rotate_motor(pwm_duty_cycle=500, rotation_angle=180)
motor1.rotate_motor(pwm_duty_cycle=500, rotation_angle=180)
#motor0.drive(pwm_duty_cycle=4095)
#motor1.drive(pwm_duty_cycle=4095)
time.sleep(3)
print("-" * 10)
print(motor0.get_current_angle())
print(motor1.get_current_angle())
# メモ I制御を導入して平滑化したい、ゲインにスピードの逆数かけるのやめたい
| 20.921569 | 63 | 0.626992 | 142 | 1,067 | 4.429577 | 0.507042 | 0.044515 | 0.076312 | 0.054054 | 0.200318 | 0.133545 | 0.133545 | 0.133545 | 0.133545 | 0 | 0 | 0.103316 | 0.26523 | 1,067 | 50 | 64 | 21.34 | 0.69898 | 0.137769 | 0 | 0.181818 | 0 | 0 | 0.009847 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.121212 | 0 | 0.121212 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d43f4c8e8daff28f9449dbacb0f541d11f998d5f | 326 | py | Python | stockUpdater/update.py | SamrathPalSingh/stockmarketwebsite | ea91647e25066c1a5c7f48015dccd19117428e9b | [
"MIT"
] | null | null | null | stockUpdater/update.py | SamrathPalSingh/stockmarketwebsite | ea91647e25066c1a5c7f48015dccd19117428e9b | [
"MIT"
] | 9 | 2020-05-05T18:43:29.000Z | 2021-09-22T18:58:59.000Z | stockUpdater/update.py | SamrathPalSingh/stockmarketwebsite | ea91647e25066c1a5c7f48015dccd19117428e9b | [
"MIT"
] | null | null | null | from .stock_prediction_logic.analyse import start_analysis
from home.models import stock
def updateStocks():
# start_analysis()
obj = stock.objects.get(stockSymbol = "AAPL")
obj.macd_trend = 'be'
obj.rank = int(4)
import datetime
now = datetime.datetime.now()
obj.volume = str(now)
obj.save()
| 25.076923 | 58 | 0.687117 | 43 | 326 | 5.093023 | 0.651163 | 0.118721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003831 | 0.199387 | 326 | 12 | 59 | 27.166667 | 0.835249 | 0.04908 | 0 | 0 | 0 | 0 | 0.019481 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d43fb21fc63432d8db4d6094061fd258a4b6f29b | 790 | py | Python | train.py | jindal2309/conv-ai-model | 2238780bdc01965a97726edaeb834cda7ed73867 | [
"MIT"
] | null | null | null | train.py | jindal2309/conv-ai-model | 2238780bdc01965a97726edaeb834cda7ed73867 | [
"MIT"
] | null | null | null | train.py | jindal2309/conv-ai-model | 2238780bdc01965a97726edaeb834cda7ed73867 | [
"MIT"
] | null | null | null | from torch.utils.data.dataloader import DataLoader
from dataloader import ConvAIDataset
from utils import combine_contexts
from vocab.text import BPEVocab
max_seq_len = 512
train_data = 'data/train_self_revised_no_cands.txt'
bpe_vocab_path = 'vocab/bpe.vocab'
bpe_codes_path = 'vocab/bpe.code'
params = {'batch_size': 64, 'shuffle': True, 'num_workers': 2, 'collate_fn': combine_contexts}
if __name__ == '__main__':
vocab = BPEVocab.from_files(bpe_vocab_path, bpe_codes_path)
dataset = ConvAIDataset(filename=train_data,
max_seq_len=max_seq_len,
bpe_vocab=vocab)
dataloader = DataLoader(dataset, **params)
for i, (contexts, targets) in enumerate(dataloader):
print(i, contexts, targets)
exit(0)
| 30.384615 | 94 | 0.702532 | 103 | 790 | 5.048544 | 0.495146 | 0.061538 | 0.051923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011111 | 0.202532 | 790 | 25 | 95 | 31.6 | 0.814286 | 0 | 0 | 0 | 0 | 0 | 0.140506 | 0.04557 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d440b9f21f95bdb0021bcda32bda78128d97dda1 | 1,415 | py | Python | leetcode/binary_tree_level _order_traversal_leetcode_102/binary_tree_level _order_traversal.py | Williano/Interview-Prep | 0ad688637215080c7e4d26c640d74c89227e7cfb | [
"MIT"
] | null | null | null | leetcode/binary_tree_level _order_traversal_leetcode_102/binary_tree_level _order_traversal.py | Williano/Interview-Prep | 0ad688637215080c7e4d26c640d74c89227e7cfb | [
"MIT"
] | null | null | null | leetcode/binary_tree_level _order_traversal_leetcode_102/binary_tree_level _order_traversal.py | Williano/Interview-Prep | 0ad688637215080c7e4d26c640d74c89227e7cfb | [
"MIT"
] | null | null | null | """
Leetcode No: 102
Title: Binary Tree Level Order Traversal
Description:
Given the root of a binary tree, return the level order traversal of its
nodes' values. (i.e., from left to right, level by level).
Example 1:
Input: root = [3,9,20,null,null,15,7]
Output: [[3],[9,20],[15,7]]
Example 2:
Input: root = [1]
Output: [[1]]
Example 3:
Input: root = []
Output: []
"""
from typing import Optional, List
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def level_order(self, root: Optional[TreeNode]) -> List[List[int]]:
if root is None:
return root
tree_queue = deque()
tree_queue.append(root)
level_order_traversal = []
while tree_queue:
tree_levels = []
for _ in range(len(tree_queue)):
current_node = tree_queue.popleft()
if current_node:
tree_levels.append(current_node.val)
if current_node.left:
tree_queue.append(current_node.left)
if current_node.right:
tree_queue.append(current_node.right)
level_order_traversal.append(tree_levels)
return level_order_traversal
| 21.769231 | 76 | 0.579505 | 176 | 1,415 | 4.494318 | 0.375 | 0.079646 | 0.120101 | 0.055626 | 0.06574 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024084 | 0.325088 | 1,415 | 64 | 77 | 22.109375 | 0.804188 | 0.259364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4430fca441867f9bd80ffce1daeda8281c206ea | 5,785 | py | Python | Validation/Performance/test/ThreadTest.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 3 | 2018-08-24T19:10:26.000Z | 2019-02-19T11:45:32.000Z | Validation/Performance/test/ThreadTest.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 7 | 2016-07-17T02:34:54.000Z | 2019-08-13T07:58:37.000Z | Validation/Performance/test/ThreadTest.py | NTrevisani/cmssw | a212a27526f34eb9507cf8b875c93896e6544781 | [
"Apache-2.0"
] | 5 | 2018-08-21T16:37:52.000Z | 2020-01-09T13:33:17.000Z | #!/usr/bin/env python
#A script to test threading concepts/implementations
from __future__ import print_function
import threading,time,sys
#The following used from past bad experience with multithreading in Python
def _cleanup():
pass
#Define the "thread" class, of which each thread is a instance
class TestThread(threading.Thread):
#Constructor with 2 arguments
def __init__(self,Name,Cpu,**kwargs): #Name and Cpu are equivalent to any number of options necessary to instantiate the Test()
self.Name=Name
self.Cpu=Cpu
self.kwargs=kwargs
threading.Thread.__init__(self)
#Actual function executed at the invocation of thread's start() method
def run(self):
#Instantiate the test of class Test()
self.Test=Test(self.Name,self.Cpu,**(self.kwargs))
#This is the function used to really activate the test
#Launch it!
self.Test.runTest()
return
#Define the class Test, of which each test (executed in a thread or not) is an instance
class Test:
#Constructor with 2 optional arguments
def __init__(self,Name="N/A",Cpu="N/A",**kwargs):
self.Name=Name
self.Cpu=Cpu
self.kwargs=kwargs
#Initializing some list to keep timestamps for a silly test
self.Times=[]
print("Initializing Test() instance, value of Name is %s and valud of Cpu is %s"%(self.Name,self.Cpu))
#Silly functions to get back the Name and Cpu arguments originally passed to the Test object
def getName(self):
return self.Name
def getCpu(self):
return self.Cpu
#The actual test function
def runTest(self):
print("I am thread Test and I was invoked with arguments Name %s, Cpu %s and optional keyword arguments %s"%(self.Name,self.Cpu,self.kwargs))
self.time=0
while self.time<10:
self.Times.append(time.ctime())
time.sleep(1)
self.time+=1
print(self.Times)
if self.kwargs:
print("Testing keyword arguments handling with function invocation")
test(**(self.kwargs))
return
#Test function for arguments fun
ahi="AHI!"
def test(cpu='N/A',perfsuitedir=ahi,IgProfEvents='N/A',IgProfCandles='N/A',cmsdriverOptions='N/A',stepOptions='N/A',string="IgProf",profilers='N/A',bypasshlt='N/A',userInputFile='N/A'):
print(cpu)
print(perfsuitedir)
print(userInputFile)
#print "Value of Available is: %s"%Available
#Playing with classes for variable scope tests:
class Pippo:
def __init__(self):
self.a=0
self.b=1
def test1(self,d):
print(d)
def test2(self):
self.e=self.Pluto(self)
self.e.testscope()
class Pluto:
def __init__(self,mother):
self.Me="Pluto"
self.mother=mother
def testscope(self):
#print a
#print self.a
self.mother.test1(self.Me)
def main():
#Testing threading concepts ;)
#First set that all 4 cores are available:
Available=['0','1','2','3']
#Then populate the list of tests to do:
#This list should be a list of arguments with which to run simpleGenReport (except the cpu).
TestToDo=['Pippo','Pluto','Paperino','Minnie','Qui','Quo','Qua','Zio Paperone','Banda Bassotti','Archimede','Topolino']
#Now let's set up an infinite loop that will go through the TestToDo list, submit a thread per cpu available from the Available list
#using pop.
activeThreads={}
while True:
#If there are cores available and tests to run:
print("Main while loop:")
print(Available)
print(TestToDo)
#Logic based on checking for TestToDo first:
if TestToDo:
print("Still folllowing %s tests to do:"%len(TestToDo))
print(TestToDo)
#Test available cores:
if Available:
print("Hey there is at least one core available!")
print(Available)
cpu=Available.pop()
print("Let's use core %s"%cpu)
threadArgument=TestToDo.pop()
print("Let's submit job %s on core %s"%(threadArgument,cpu))
print("Instantiating thread")
print("Testing the keyword arguments with:")
kwargs={'cpu':3,'perfsuitedir':"work",'userInputFile':'TTBAR_GEN,FASTSIM.root'}
print(kwargs)
threadToDo=TestThread(threadArgument,cpu,**kwargs)
print("Starting thread %s"%threadToDo)
threadToDo.start()
print("Appending thread %s to the list of active threads"%threadToDo)
activeThreads[cpu]=threadToDo
#If there is no available core, pass, there will be some checking of activeThreads, a little sleep and then another check.
else:
pass
#Test activeThreads:
for cpu in activeThreads.keys():
if activeThreads[cpu].isAlive():
pass
elif cpu not in Available:
print("About to append cpu %s to Available list"%cpu)
Available.append(cpu)
if set(Available)==set(['0','1','2','3']) and not TestToDo:
break
else:
print("Sleeping and checking again...")
time.sleep(1)
#Check we broke out of the infinite loop!
print("WHEW! We're done... all TestToDo are done...")
print(Available)
print(TestToDo)
#Next: check scenarios
#1-many more TestToDo than Available cores
#Test 1 done successfully.
#2-complicated Test() class that calls other functions with args
#3-What happens on the machine with top
#4-What if they get killed or hang?
| 39.353741 | 185 | 0.621262 | 758 | 5,785 | 4.705805 | 0.313984 | 0.005607 | 0.015419 | 0.012616 | 0.053546 | 0.035324 | 0.021306 | 0.021306 | 0.021306 | 0 | 0 | 0.006708 | 0.278479 | 5,785 | 146 | 186 | 39.623288 | 0.847868 | 0.307692 | 0 | 0.21 | 0 | 0.01 | 0.197883 | 0.005546 | 0 | 0 | 0 | 0.006849 | 0 | 1 | 0.14 | false | 0.04 | 0.02 | 0.02 | 0.24 | 0.28 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d44510e870ea216676fc2c3138e5db6f9bb2ae40 | 3,301 | py | Python | gui/plot.py | kklmn/ParSeq | 46178c21c3ee39b84bbf3d80bcd9f93128ace9e2 | [
"MIT"
] | 3 | 2018-11-19T07:14:25.000Z | 2020-07-28T17:20:14.000Z | gui/plot.py | kklmn/ParSeq | 46178c21c3ee39b84bbf3d80bcd9f93128ace9e2 | [
"MIT"
] | null | null | null | gui/plot.py | kklmn/ParSeq | 46178c21c3ee39b84bbf3d80bcd9f93128ace9e2 | [
"MIT"
] | 2 | 2019-03-25T09:36:11.000Z | 2021-12-19T07:52:38.000Z | # -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev"
__date__ = "16 Feb 2019"
# !!! SEE CODERULES.TXT !!!
import os
from silx.gui import qt
from silx.gui import plot as splot
from ..core import singletons as csi
class Plot1D(splot.PlotWindow):
def __init__(self, parent=None, backend=None, position=True):
super(Plot1D, self).__init__(parent=parent, backend=backend,
resetzoom=True, autoScale=True,
logScale=True, grid=True,
curveStyle=True, colormap=False,
aspectRatio=False, yInverted=False,
copy=True, save=True, print_=True,
control=True, position=position,
roi=False, mask=False, fit=False)
if parent is None:
self.setWindowTitle('Plot1D')
action = self.getFitAction()
action.setXRangeUpdatedOnZoom(True)
action.setFittedItemUpdatedFromActiveCurve(True)
def graphCallback(self, ddict=None):
"""This callback is going to receive all the events from the plot."""
if ddict is None:
ddict = {}
if ddict['event'] in ["legendClicked", "curveClicked"]:
if ddict['button'] == "left":
self.activateCurve(ddict['label'])
qt.QToolTip.showText(self.cursor().pos(), ddict['label'])
def activateCurve(self, label):
alias = os.path.splitext(label)[0]
for item in csi.allLoadedItems:
if item.alias == alias:
break
else:
return
index = csi.model.indexFromItem(item)
csi.selectionModel.setCurrentIndex(
index, qt.QItemSelectionModel.ClearAndSelect |
qt.QItemSelectionModel.Rows)
class Plot2D(splot.Plot2D):
pass
class Plot3D(splot.StackView):
posInfo = [
('Position', None), # None is callback fn set after instantiation
('Value', None)] # None is callback fn set after instantiation
def setCustomPosInfo(self):
p = self._plot._positionWidget._fields[0]
self._plot._positionWidget._fields[0] = (p[0], p[1], self._imagePos)
p = self._plot._positionWidget._fields[1]
self._plot._positionWidget._fields[1] = (p[0], p[1], self._imageVal)
def _imageVal(self, x, y):
"used for displaying pixel value under cursor"
activeImage = self.getActiveImage()
if activeImage is not None:
data = activeImage.getData()
height, width = data.shape
# print(width, height, x, y)
x = int(x)
y = int(y)
return data[y][x] if 0 <= x < width and 0 <= y < height else ''
return '-'
def _imagePos(self, x, y):
"used for displaying pixel coordinates under cursor"
img_idx = self._browser.value()
if self._perspective == 0:
dim0, dim1, dim2 = img_idx, int(y), int(x)
elif self._perspective == 1:
dim0, dim1, dim2 = int(y), img_idx, int(x)
elif self._perspective == 2:
dim0, dim1, dim2 = int(y), int(x), img_idx
return '{0}, {1}, {2}'.format(dim0, dim1, dim2)
| 37.511364 | 77 | 0.565586 | 364 | 3,301 | 5.016484 | 0.395604 | 0.017525 | 0.048193 | 0.061336 | 0.191676 | 0.075575 | 0.075575 | 0.044907 | 0 | 0 | 0 | 0.018775 | 0.322327 | 3,301 | 87 | 78 | 37.942529 | 0.797497 | 0.097849 | 0 | 0 | 0 | 0 | 0.068211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0.014286 | 0.057143 | 0 | 0.257143 | 0.014286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d44834c9eaa5437119fdcf7514ce64d66090c5cb | 576 | py | Python | src/test_app.py | perylemke/immutable_infra | 9e7ce2fec2ace5d2efdb598883483142b02ceac0 | [
"Apache-2.0"
] | 10 | 2019-05-06T20:48:48.000Z | 2020-10-30T21:30:23.000Z | src/test_app.py | perylemke/immutable_infra | 9e7ce2fec2ace5d2efdb598883483142b02ceac0 | [
"Apache-2.0"
] | null | null | null | src/test_app.py | perylemke/immutable_infra | 9e7ce2fec2ace5d2efdb598883483142b02ceac0 | [
"Apache-2.0"
] | null | null | null | from app import app
import json
import socket
import os.path
# The first and the second test validate json structure
def test_get_status_code():
app.config["TESTING"] = True
with app.test_client() as client:
response = client.get("/request")
assert response.status_code == 200
def test_get_status_msg():
app.config["TESTING"] = True
host = socket.gethostname()
with app.test_client() as client:
response = client.get("/request")
assert response.json == {
'response': "Respeitem o isolamento social!"
} | 27.428571 | 56 | 0.661458 | 74 | 576 | 5.027027 | 0.472973 | 0.048387 | 0.053763 | 0.086022 | 0.33871 | 0.33871 | 0.33871 | 0.33871 | 0.33871 | 0.33871 | 0 | 0.006803 | 0.234375 | 576 | 21 | 57 | 27.428571 | 0.836735 | 0.092014 | 0 | 0.352941 | 0 | 0 | 0.130268 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.117647 | false | 0 | 0.235294 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d44a00f74c482c7832f425339e2417ab5c29c7e3 | 35,663 | py | Python | lingvodoc/scripts/docx_import.py | SegFaulti4/lingvodoc | 8b296b43453a46b814d3cd381f94382ebcb9c6a6 | [
"Apache-2.0"
] | null | null | null | lingvodoc/scripts/docx_import.py | SegFaulti4/lingvodoc | 8b296b43453a46b814d3cd381f94382ebcb9c6a6 | [
"Apache-2.0"
] | 1 | 2021-07-26T09:52:46.000Z | 2021-07-26T09:52:46.000Z | lingvodoc/scripts/docx_import.py | Winking-maniac/lingvodoc | f037bf0e91ccdf020469037220a43e63849aa24a | [
"Apache-2.0"
] | null | null | null |
# Standard library imports.
import ast
import getopt
import logging
import pprint
import re
import sys
# External imports.
import docx
import pympi
import pyramid.paster as paster
# Project imports.
from lingvodoc.models import (
DBSession,
Dictionary,
)
# Setting up logging, if we are not being run as a script.
if __name__ != '__main__':
log = logging.getLogger(__name__)
log.debug('module init')
def levenshtein(
snippet_str,
snippet_index,
word_str,
__debug_levenshtein_flag__ = False):
"""
Matches word string to the snippet string via adjusted Levenshtein matching, with no penalties for
snippet string skipping before and after match.
"""
d = {(0, j): (j, 1e256)
for j in range(len(word_str) + 1)}
for i in range(len(snippet_str) - snippet_index):
d[(i + 1, 0)] = (0, 1e256)
minimum_distance = len(word_str)
minimum_begin_index = 0
minimum_end_index = 0
for i in range(1, len(snippet_str) - snippet_index + 1):
if __debug_levenshtein_flag__:
log.debug(
'd[{0}, 0]: {1}'.format(i, d[(i, 0)]))
for j in range(1, len(word_str) + 1):
# Matching current characters of the word and snippet strings.
s_distance, s_begin_index = d[i - 1, j - 1]
substitution_value = s_distance + (
0 if snippet_str[snippet_index + i - 1] == word_str[j - 1] else 1)
substitution_index = min(s_begin_index, i - 1)
# Skipping current character from the snippet string.
d_distance, d_begin_index = d[i - 1, j]
deletion_value = d_distance + (
1 if j < len(word_str) else 0)
deletion_index = d_begin_index
# Skipping current character from the word string.
i_distance, i_begin_index = d[i, j - 1]
insertion_value = i_distance + 1
insertion_index = i_begin_index
# Getting minimum.
minimum_value = min(
substitution_value,
deletion_value,
insertion_value)
if minimum_value == deletion_value:
operation_index = 1
minimum_index = deletion_index
elif minimum_value == insertion_value:
operation_index = 2
minimum_index = insertion_index
else:
operation_index = 0
minimum_index = substitution_index
d[(i, j)] = (minimum_value, minimum_index)
# Showing edit distance computation details.
if __debug_levenshtein_flag__:
log.debug(
'\nd[{0}, {1}] (\'{18}\' & \'{14}\'): {4}'
'\n d[{5}, {6}] (\'{2}\' & \'{3}\'): {9} + {10}{11} (\'{12}\', \'{13}\')'
'\n d[{5}, {1}] (\'{2}\' & \'{14}\'): {15} + {16}{17}'
'\n d[{0}, {6}] (\'{18}\' & \'{3}\'): {19} + 1{20}'.format(
i, j,
snippet_str[snippet_index : snippet_index + i - 1] + '|' +
snippet_str[snippet_index + i - 1],
word_str[: j - 1] + '|' + word_str[j - 1],
d[(i, j)][0],
i - 1, j - 1,
snippet_str[snippet_index : snippet_index + i - 1] + '|',
word_str[: j - 1] + '|',
d[(i - 1, j - 1)][0],
0 if snippet_str[snippet_index + i - 1] == word_str[j - 1] else 1,
'*' if operation_index == 0 else '',
snippet_str[snippet_index + i - 1],
word_str[j - 1],
word_str[:j] + '|',
d[(i - 1, j)][0],
1 if j < len(word_str) else 0,
'*' if operation_index == 1 else '',
snippet_str[snippet_index : snippet_index + i] + '|',
d[(i, j - 1)][0],
'*' if operation_index == 2 else ''))
# Checking if we have a new best matching.
if d[i, len(word_str)][0] < minimum_distance:
minimum_distance, minimum_begin_index = d[i, len(word_str)]
minimum_end_index = i
if minimum_distance == 0:
break
return (
minimum_distance,
minimum_begin_index,
minimum_end_index)
def prepare_match_string(cell_str):
"""
Processes string for matching, finding and marking portions in parentheses to be considered as
optional during matching.
"""
chr_list = []
chr_index = 0
for match in re.finditer(r'\([^()]*?\)', cell_str):
for chr in re.sub(
r'\W+', '', cell_str[chr_index : match.start()]):
chr_list.append((chr, False))
for chr in re.sub(
r'\W+', '', match.group(0)):
chr_list.append((chr, True))
chr_index = match.end()
for chr in re.sub(
r'\W+', '', cell_str[chr_index:]):
chr_list.append((chr, False))
return chr_list
def format_match_string(marked_chr_list):
"""
Formats list of marked characters as a string.
"""
chr_list = []
mark_prev = False
for chr, mark in marked_chr_list:
if mark != mark_prev:
chr_list.append('(' if mark else ')')
chr_list.append(chr)
mark_prev = mark
if mark_prev:
chr_list.append(')')
return ''.join(chr_list)
class State(object):
"""
State of snippet table parsing.
"""
def __init__(self, snippet_str, cell_list, row_index):
"""
Initialization with the contents of the first snippet string.
"""
self.snippet_count = 0
self.snippet_chain = None
self.snippet_str = snippet_str
self.row_index = row_index
self.row_list = [cell_list]
self.d0 = []
self.d1 = [0.999 * i for i in range(len(self.snippet_str) + 1)]
self.word_list = []
self.word_str = []
self.total_value = 0
self.snippet_value = 0
def process_row(
self,
row_str,
cell_list,
row_index,
__debug_flag__ = False):
"""
Processing another data string, splitting into a state when it's a word string and another state
when it's a new snippet string.
"""
# First, assuming that this data string is the next snippet string.
if row_str:
copy = State(row_str, cell_list, row_index)
copy.snippet_chain = (
(tuple(self.row_list), self.row_index),
self.snippet_chain)
copy.snippet_count = self.snippet_count + 1
copy.total_value = self.total_value + self.d1[-1]
yield copy
# Second, assuming that this data string is a word string.
len_prev = len(self.word_str)
self.word_list.append(row_str)
self.word_str += row_str
self.row_list.append(cell_list)
# Updating Levenshtein alignment of snippet words to the snippet string.
for i in range(len(row_str)):
self.d0 = self.d1
self.d1 = [len_prev + i + 1]
for j in range(len(self.snippet_str)):
# Matching current characters of the snippet string and the word string.
s_cost = 0 if self.snippet_str[j][0] == row_str[i][0] else 1
if s_cost and (self.snippet_str[j][1] or row_str[i][1]):
s_cost = 0.001
s_value = self.d0[j] + s_cost
# Skipping current character either from the snippet string or from the word string.
d_value = self.d1[j] + (0.000999 if self.snippet_str[j][1] else 0.999)
i_value = self.d0[j + 1] + (0.001 if row_str[i][1] else 1)
self.d1.append(min(s_value, d_value, i_value))
# Showing debug info, if required.
if __debug_flag__:
log.debug((
format_match_string(self.snippet_str[:j]),
format_match_string(self.word_str[:len_prev + i]),
self.d0[j],
self.snippet_str[j][0],
row_str[i][0],
round(s_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j]),
format_match_string(self.word_str[:len_prev + i + 1]),
self.d1[j],
self.snippet_str[j][0],
round(d_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j + 1]),
format_match_string(self.word_str[:len_prev + i]),
self.d0[j + 1],
row_str[i][0],
round(i_value, 6)))
log.debug((
format_match_string(self.snippet_str[:j + 1]),
format_match_string(self.word_str[:len_prev + i + 1]),
round(min(s_value, d_value, i_value), 6)))
log.debug(self.d1)
# Updating alignment value.
if len(self.word_str) <= 0:
self.snippet_value = 0
elif len(self.word_str) > len(self.snippet_str):
self.snippet_value = self.d1[-1]
else:
self.snippet_value = min(
self.d1[len(self.word_str) : 2 * len(self.word_str)])
yield self
def beam_search_step(
state_list,
cell_str,
cell_list,
row_index,
beam_width,
__debug_beam_flag__ = False):
"""
Another step of alignment beam search.
"""
if not state_list:
return [State(
cell_str, cell_list, row_index)]
# Sorting parsing states by the snippet they are parsing.
state_dict = {}
for state in state_list:
for state_after in state.process_row(
cell_str, cell_list, row_index):
index = state_after.row_index
# Leaving only states with the best snippet histories.
if (index not in state_dict or
state_after.total_value < state_dict[index][0]):
state_dict[index] = (state_after.total_value, [state_after])
elif state_after.total_value == state_dict[index][0]:
state_dict[index][1].append(state_after)
state_list = []
for value, state_after_list in state_dict.values():
state_list.extend(state_after_list)
# Showing snippet alignment beam search state, if required.
if __debug_beam_flag__:
log.debug('\n' +
pprint.pformat([(
round(state.total_value + state.snippet_value, 6),
state.snippet_count,
format_match_string(state.snippet_str),
'|'.join(
format_match_string(word_str)
for word_str in state.word_list))
for state in state_list],
width = 384))
# Leaving only a number of best states.
state_list.sort(key = lambda state:
(state.total_value + state.snippet_value, state.snippet_count))
return state_list[:beam_width]
def parse_table(
row_list,
limit = None,
__debug_beam_flag__ = False):
"""
Tries to parse snippet data represented as a table.
"""
# Removing any snippet alignment marks, if we have any.
for cell_list in row_list:
for i in range(len(cell_list)):
match = re.match(r'\(__\d+__\)\s*', cell_list[i])
if match:
cell_list[i] = cell_list[i][match.end():]
state_list = []
beam_width = 32
# Going through snippet data.
for row_index, cell_list in enumerate(row_list[1:], 1):
if limit and row_index > limit:
break
if not any(cell_list[:3]):
continue
cell_str = (
prepare_match_string(
cell_list[0].lower()))
# Updating alignment search on another row.
state_list = (
beam_search_step(
state_list,
cell_str,
cell_list,
row_index,
beam_width,
__debug_beam_flag__))
# Returning final parsing search state.
return state_list
def parse_by_paragraphs(
row_list,
limit = None,
__debug_flag__ = False,
__debug_beam_flag__ = False):
"""
Tries to parse snippet data with paragraph separation inside table cells.
"""
# Splitting row texts by paragraphs.
line_row_list = []
line_row_count = 0
for cell_list in row_list[1:]:
if limit and line_row_count >= limit:
break
paragraph_list_list = [
re.split(r'[^\S\n]*\n\s*', text)
for text in cell_list]
how_many = max(
len(paragraph_list)
for paragraph_list in paragraph_list_list[:3])
# Iterating over aligned paragraphs in adjacent cells.
line_rank_list = []
for i in range(how_many):
line_cell_list = []
for paragraph_list in paragraph_list_list:
if i < len(paragraph_list):
# Removing snippet alignment mark, if there is one present.
cell_str = paragraph_list[i]
match = re.match(r'\(__\d+__\)\s*', cell_str)
line_cell_list.append(
cell_str[match.end():] if match else
cell_str)
else:
line_cell_list.append('')
# Another line row, if it is non-empty.
if any(line_cell_list):
line_rank_list.append(line_cell_list)
line_row_count += 1
if limit and line_row_count >= limit:
break
line_row_list.append(line_rank_list)
# Showing what we have, if required.
if __debug_flag__:
log.debug(
'\nrow_list:\n{0}'.format(
pprint.pformat(
row_list, width = 196)))
state_list = []
beam_width = 32
line_row_count = 0
# Going through snippet data.
for row_index, line_rank_list in enumerate(line_row_list):
if limit and line_row_count >= limit:
break
for line_index, line_cell_list in enumerate(line_rank_list):
line_cell_str = (
prepare_match_string(
line_cell_list[0].lower()))
# Updating alignment search on another row.
state_list = (
beam_search_step(
state_list,
line_cell_str,
line_cell_list,
(row_index, line_index),
beam_width,
__debug_beam_flag__))
# Returning final parsing search state.
return state_list
def main_import(args):
"""
Test import of 5-tier data from a Docx file.
"""
opt_list, arg_list = (
getopt.gnu_getopt(args, '', [
'all-tables',
'check-docx-file=',
'check-file=',
'debug',
'debug-beam',
'debug-eaf',
'eaf-file=',
'limit=',
'modify-docx-file',
'no-db',
'separate-by-paragraphs']))
opt_dict = dict(opt_list)
# Parsing command-line options.
docx_path = arg_list[0]
check_file_path = opt_dict.get('--check-file')
check_docx_file_path = opt_dict.get('--check-docx-file')
eaf_file_path = opt_dict.get('--eaf-file')
limit = (
ast.literal_eval(opt_dict['--limit'])
if '--limit' in opt_dict else None)
modify_docx_flag = '--modify-docx-file' in opt_dict
separate_by_paragraphs_flag = '--separate-by-paragraphs' in opt_dict
__debug_flag__ = '--debug' in opt_dict
__debug_beam_flag__ = '--debug-beam' in opt_dict
__debug_eaf_flag__ = '--debug-eaf' in opt_dict
# Processing specified Docx file.
log.debug(
'\ndocx_path: {0}'.format(docx_path))
document = docx.Document(docx_path)
if len(document.tables) <= 0:
raise NotImplementedError
# Accessing info of the first table, or all tables, depending on the options.
#
# Counting only unique cells because apparently some .docx documents can have repeating cells in their
# structure.
row_list = []
table_list = (
document.tables if '--all-tables' in opt_dict else
document.tables[:1])
for table_index, table in enumerate(table_list):
column_count = len(set(table.rows[0].cells))
row_count = len(set(table.columns[0].cells))
table_cell_list = list(table._cells)
source_cell_list = []
source_cell_set = set()
for cell in table_cell_list:
if cell not in source_cell_set:
source_cell_list.append(cell)
source_cell_set.add(cell)
# Checking for non-uniform rows / columns.
if len(source_cell_list) != column_count * row_count:
log.error(
'\nTable ({}): rows and / or columns are uneven, '
'{} rows, {} columns, {3} != {1} * {2} cells.'.format(
table_index,
row_count,
column_count,
len(source_cell_list)))
raise NotImplementedError
row_list.extend(
[cell.text
for cell in source_cell_list[
i * column_count : (i + 1) * column_count]]
for i in range(row_count))
log.debug(
'\ntable ({}): {} columns, {} rows, {} cells'.format(
table_index,
column_count,
row_count,
len(source_cell_list)))
# Processing this info.
header_list = row_list[0]
log.debug(
'\nheader: {0}'.format(header_list))
if separate_by_paragraphs_flag:
state_list = parse_by_paragraphs(
row_list,
limit,
__debug_flag__,
__debug_beam_flag__)
else:
state_list = parse_table(
row_list,
limit,
__debug_beam_flag__)
# Showing final alignment search state, if required.
if __debug_beam_flag__:
log.debug('\n' +
pprint.pformat([(
round(state.total_value + state.snippet_value, 6),
state.snippet_count,
format_match_string(state.snippet_str),
'|'.join(
format_match_string(word_str)
for word_str in state.word_list))
for state in state_list],
width = 384))
# Getting all parsed snippets, if we need them.
if (eaf_file_path is not None or
check_file_path is not None or
check_docx_file_path is not None or
modify_docx_flag):
if not state_list:
log.debug('\nno data')
return
best_state = state_list[0]
snippet_chain = (
(tuple(best_state.row_list), best_state.row_index),
best_state.snippet_chain)
snippet_list = []
# Compiling snippet list, showing it, if required.
while snippet_chain is not None:
(row_tuple, row_index), snippet_chain = snippet_chain
snippet_list.append((list(row_tuple), row_index))
snippet_list.reverse()
if __debug_flag__:
log.debug(
'\nsnippet_list:\n{0}'.format(
pprint.pformat(
snippet_list, width = 196)))
# Saving parsed alignment, if required.
if check_file_path is not None:
with open(
check_file_path, 'w', encoding = 'utf-8') as check_file:
check_file.write('\n')
# Showing each parsed snippet.
for i, (snippet_value_list, snippet_value_index) in enumerate(snippet_list):
check_file.write(
'{0}\n'.format(i + 1))
value = snippet_value_list[0]
check_file.write(
(value if isinstance(value, str) else value[0]) + '\n')
for value in snippet_value_list[1:]:
check_file.write(' ' +
(value if isinstance(value, str) else value[0]) + '\n')
check_file.write('\n')
# Saving parsing alignment as Docx file, if required.
if check_docx_file_path is not None:
if separate_by_paragraphs_flag:
raise NotImplementedError
check_docx = docx.Document()
check_table = check_docx.add_table(
rows = row_count - 1 + len(snippet_list),
cols = 3)
table_cell_list = check_table._cells
table_cell_index = 0
# Exporting all parsed snippets with their numbers.
for i, (snippet_row_list, snippet_row_index) in enumerate(snippet_list):
table_cell_list[table_cell_index].text = '{0}'.format(i + 1)
table_cell_index += 3
for cell_list in snippet_row_list:
for table_cell, snippet_cell in zip(
table_cell_list[table_cell_index : table_cell_index + 3],
cell_list):
table_cell.text = snippet_cell
table_cell_index += 3
check_docx.save(check_docx_file_path)
# Saving parsed snippets as the standard 5-tier EAF structure.
if eaf_file_path is not None:
log.debug('\n' + pprint.pformat(snippet_list, width = 196))
eaf = pympi.Elan.Eaf()
eaf.add_linguistic_type('text_top_level')
eaf.add_linguistic_type('symbolic_association', 'Symbolic_Association', False)
eaf.add_linguistic_type('word_translation_included_in', 'Included_In')
eaf.remove_linguistic_type('default-lt')
# Showing linguistic types info, if required.
if __debug_eaf_flag__:
log.debug(
'\nget_linguistic_type_names(): {0}'.format(eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_parameters_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
eaf.add_tier('text', 'text_top_level')
eaf.add_tier('other text', 'symbolic_association', 'text')
eaf.add_tier('literary translation', 'symbolic_association', 'text')
eaf.add_tier('translation', 'word_translation_included_in', 'text')
eaf.add_tier('transcription', 'symbolic_association', 'translation')
eaf.add_tier('word', 'symbolic_association', 'translation')
eaf.remove_tier('default')
# Showing tier info, if required.
if __debug_eaf_flag__:
log.debug(
'\nget_tier_names(): {0}'.format(eaf.get_tier_names()))
log.debug(''.join(
'\nget_parameters_for_tier({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_tier(name))
for name in eaf.get_tier_names()))
# Compiling annotation data.
step = 75
position = step
for snippet_value_list, snippet_value_index in snippet_list:
# Snippet base texts.
text, text_other, text_translation = snippet_value_list[0]
duration = len(text) * step
eaf.add_annotation(
'text', position, position + duration, text)
eaf.add_ref_annotation(
'other text', 'text', position, text_other)
eaf.add_ref_annotation(
'literary translation', 'text', position, text_translation)
# Preparing to create annotations for snippet words.
translation_position = position
translation_length = (
sum(len(text_list[0] or text_list[2] or text_list[1])
for text_list in snippet_value_list[1:]) +
len(snippet_value_list) - 2)
translation_position = position
translation_step = duration // translation_length
# Snippet words.
for text_list in snippet_value_list[1:]:
word, word_other, translation = text_list
translation_duration = (
round(
max(len(word or translation or word_other), 1) *
translation_step))
eaf.add_annotation(
'translation',
translation_position,
translation_position + translation_duration,
translation)
eaf.add_ref_annotation(
'transcription', 'translation', translation_position, word_other)
eaf.add_ref_annotation(
'word', 'translation', translation_position, word)
translation_position += (
translation_duration + translation_step)
# Ready to go to the next snippet.
position += duration + step
# Showing annotation info, if required.
if __debug_eaf_flag__:
log.debug(''.join(
'\nget_annotation_data_for_tier({0}):\n{1}'.format(
repr(name),
eaf.get_annotation_data_for_tier(name)[:4])
for name in eaf.get_tier_names()))
eaf.header['TIME_UNITS'] = 'milliseconds'
eaf.to_file(eaf_file_path)
# Modifying source Docx file with alignment marks, if required.
if modify_docx_flag:
if not separate_by_paragraphs_flag:
for i, (snippet_row_list, snippet_row_index) in enumerate(snippet_list):
mark_str = '(__{0}__)\n'.format(i + 1)
cell_index = snippet_row_index * column_count
for j, cell in enumerate(
source_cell_list[cell_index : cell_index + 3]):
# Right now can't do something like
#
# cell.paragraphs[0].insert_paragraph_before(mark_str),
#
# because, if there is a mark there already, we should delete it, and tracking this
# deletion across all possible paragraphs and runs in the cell is too high complexity.
cell.text = mark_str + snippet_row_list[0][j]
document.save(docx_path)
# When tables are separated by paragraphs.
else:
snippet_index = 0
snippet_row_index, snippet_rank_index = snippet_list[snippet_index][1]
for row_index, cell_list in enumerate(row_list[1:]):
# Along the lines of data extraction from such tables, see 'parse_by_paragraphs()' function.
paragraph_list_list = [
re.split(r'([^\S\n]*\n\s*)', text)
for text in cell_list]
for i, paragraph_list in enumerate(paragraph_list_list):
paragraph_list.append('')
paragraph_list_list[i] = list(
zip(paragraph_list[::2], paragraph_list[1::2]))
line_list_list = [[]
for text in cell_list]
how_many = max(
len(paragraph_list)
for paragraph_list in paragraph_list_list[:3])
# Iterating over aligned paragraphs in adjacent cells.
line_rank_count = 0
for i in range(how_many):
line_cell_list = []
if (snippet_index is not None and
row_index == snippet_row_index and
line_rank_count == snippet_rank_index):
mark_str = '(__{0}__)\n'.format(snippet_index + 1)
for line_list in line_list_list:
line_list.append(mark_str)
# Next snippet.
snippet_index += 1
if snippet_index >= len(snippet_list):
snippet_index = None
else:
snippet_row_index, snippet_rank_index = (
snippet_list[snippet_index][1])
for paragraph_list, line_list in zip(
paragraph_list_list, line_list_list):
if i < len(paragraph_list):
# Removing previous snippet alignment mark, if there is one present.
cell_str, separator_str = paragraph_list[i]
match = re.match(r'\(__\d+__\)\s*', cell_str)
if match:
cell_str = cell_str[match.end():]
line_cell_list.append(cell_str)
line_list.append(cell_str + separator_str)
# Another line row, if it is non-empty.
if any(line_cell_list):
line_rank_count += 1
else:
for line_list in line_list_list:
line_list.pop()
# Replacing contents of another table cell.
cell_index = (row_index + 1) * column_count
for cell, line_list in zip(
source_cell_list[cell_index : cell_index + 3],
line_list_list):
match = re.fullmatch(
r'(.*?)[^\S\n]*\n[^\S\n]*', line_list[0], re.DOTALL)
cell.text = match.group(1) if match else line_list[0]
# Splitting text into distinct paragraphs because otherwise at least LibreOffice writer
# starts to take too much time to process resulting documents.
for line in line_list[1:]:
match = re.fullmatch(
r'(.*?)[^\S\n]*\n[^\S\n]*', line, re.DOTALL)
cell.add_paragraph(
match.group(1) if match else line, 'Normal')
# Saving Docx file updates.
document.save(docx_path)
def main_eaf(args):
"""
Showing structure of a specified Eaf file.
"""
for eaf_path in args:
log.debug(
'\neaf_path: {0}'.format(eaf_path))
eaf = pympi.Elan.Eaf(eaf_path)
log.debug(
'\nget_controlled_vocabulary_names(): {0}'.format(eaf.get_controlled_vocabulary_names()))
log.debug(
'\nget_external_ref_names(): {0}'.format(eaf.get_external_ref_names()))
log.debug(
'\nget_languages(): {0}'.format(eaf.get_languages()))
log.debug(
'\nget_lexicon_ref_names(): {0}'.format(eaf.get_lexicon_ref_names()))
log.debug(
'\nget_licenses(): {0}'.format(eaf.get_licenses()))
log.debug(
'\nget_linguistic_type_names(): {0}'.format(eaf.get_linguistic_type_names()))
log.debug(
'\nget_linked_files(): {0}'.format(eaf.get_linked_files()))
log.debug(
'\nget_locales(): {0}'.format(eaf.get_locales()))
log.debug(
'\nget_properties(): {0}'.format(eaf.get_properties()))
log.debug(
'\nget_secondary_linked_files(): {0}'.format(eaf.get_secondary_linked_files()))
log.debug(
'\nget_tier_names(): {0}'.format(eaf.get_tier_names()))
log.debug('\n' +
pprint.pformat(eaf.linguistic_types, width = 196))
# L-type and tier parameters.
log.debug(''.join(
'\nget_parameters_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_tier_ids_for_linguistic_type({0}): {1}'.format(
repr(name),
eaf.get_tier_ids_for_linguistic_type(name))
for name in eaf.get_linguistic_type_names()))
log.debug(''.join(
'\nget_parameters_for_tier({0}): {1}'.format(
repr(name),
eaf.get_parameters_for_tier(name))
for name in eaf.get_tier_names()))
# Select annotations.
log.debug(''.join(
'\nget_annotation_data_for_tier({0}):\n{1}'.format(
repr(name),
eaf.get_annotation_data_for_tier(name)[:4])
for name in eaf.get_tier_names()))
# Average time interval per character.
total_duration = 0
total_length = 0
for name in eaf.get_tier_names():
tier_duration = 0
tier_length = 0
for annotation in eaf.get_annotation_data_for_tier(name):
begin, end, text = annotation[:3]
tier_duration += end - begin
tier_length += len(text)
log.debug(
'\ntier {0}: {1:.3f} / {2} -> {3:.3f}'.format(
repr(name),
tier_duration / 1000.0,
tier_length,
tier_duration / (tier_length * 1000)))
total_duration += tier_duration
total_length += tier_length
log.debug(
'\ntotal: {0:.3f} / {1} -> {2:.3f}'.format(
total_duration / 1000.0,
total_length,
total_duration / (total_length * 1000)))
# If we are being run as a script.
if __name__ == '__main__':
if (len(sys.argv) > 1 and
sys.argv[1] == '-config'):
# We have a configuration file; initializing DB, if required, and logging.
config_path = sys.argv[2]
if sys.argv[3] != '-no-db':
pyramid_env = paster.bootstrap(config_path)
arg_list = sys.argv[3:]
else:
arg_list = sys.argv[4:]
paster.setup_logging(config_path)
log = logging.getLogger(__name__)
else:
# No config file, so just logging to stdout.
arg_list = sys.argv[1:]
log_root = logging.getLogger()
log_root.setLevel(logging.DEBUG)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setLevel(logging.DEBUG)
log_formatter = (
logging.Formatter(
'%(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] '
'%(pathname)s:%(lineno)d: %(message)s'))
log_handler.setFormatter(log_formatter)
log_root.addHandler(log_handler)
log = logging.getLogger(__name__)
# Doing what we need.
if len(arg_list) <= 0:
log.info(
'\nPlease specify a command to execute.')
elif arg_list[0] == 'import':
main_import(arg_list[1:])
elif arg_list[0] == 'eaf':
main_eaf(arg_list[1:])
else:
log.warn(
'\nUnknown command \'{0}\'.'.format(arg_list[0]))
| 28.462091 | 108 | 0.535653 | 4,157 | 35,663 | 4.306952 | 0.109213 | 0.021448 | 0.010947 | 0.009439 | 0.405384 | 0.325011 | 0.276977 | 0.247822 | 0.223916 | 0.204368 | 0 | 0.016202 | 0.364832 | 35,663 | 1,252 | 109 | 28.484824 | 0.774192 | 0.120966 | 0 | 0.350282 | 0 | 0 | 0.071132 | 0.021468 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014124 | false | 0 | 0.018362 | 0 | 0.045198 | 0.009887 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d44da7d127e1ce8cb7767fe40398dec1469b4993 | 4,509 | py | Python | mindhome_alpha/erpnext/demo/user/stock.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/demo/user/stock.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/demo/user/stock.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe, random, erpnext
from frappe.desk import query_report
from erpnext.stock.stock_ledger import NegativeStockError
from erpnext.stock.doctype.serial_no.serial_no import SerialNoRequiredError, SerialNoQtyError
from erpnext.stock.doctype.batch.batch import UnableToSelectBatchError
from erpnext.stock.doctype.delivery_note.delivery_note import make_sales_return
from erpnext.stock.doctype.purchase_receipt.purchase_receipt import make_purchase_return
def work():
frappe.set_user(frappe.db.get_global('demo_manufacturing_user'))
make_purchase_receipt()
make_delivery_note()
make_stock_reconciliation()
submit_draft_stock_entries()
make_sales_return_records()
make_purchase_return_records()
def make_purchase_receipt():
if random.random() < 0.6:
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_receipt
report = "Purchase Order Items To Be Received"
po_list =list(set([r[0] for r in query_report.run(report)["result"] if r[0]!="Total"]))[:random.randint(1, 10)]
for po in po_list:
pr = frappe.get_doc(make_purchase_receipt(po))
if pr.is_subcontracted=="Yes":
pr.supplier_warehouse = "Supplier - WPL"
pr.posting_date = frappe.flags.current_date
pr.insert()
try:
pr.submit()
except NegativeStockError:
print('Negative stock for {0}'.format(po))
pass
frappe.db.commit()
def make_delivery_note():
# make purchase requests
# make delivery notes (if possible)
if random.random() < 0.6:
from erpnext.selling.doctype.sales_order.sales_order import make_delivery_note
report = "Ordered Items To Be Delivered"
for so in list(set([r[0] for r in query_report.run(report)["result"]
if r[0]!="Total"]))[:random.randint(1, 3)]:
dn = frappe.get_doc(make_delivery_note(so))
dn.posting_date = frappe.flags.current_date
for d in dn.get("items"):
if not d.expense_account:
d.expense_account = ("Cost of Goods Sold - {0}".format(
frappe.get_cached_value('Company', dn.company, 'abbr')))
try:
dn.insert()
dn.submit()
frappe.db.commit()
except (NegativeStockError, SerialNoRequiredError, SerialNoQtyError, UnableToSelectBatchError):
frappe.db.rollback()
def make_stock_reconciliation():
# random set some items as damaged
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation \
import OpeningEntryAccountError, EmptyStockReconciliationItemsError
if random.random() < 0.4:
stock_reco = frappe.new_doc("Stock Reconciliation")
stock_reco.posting_date = frappe.flags.current_date
stock_reco.company = erpnext.get_default_company()
stock_reco.get_items_for("Stores - WPL")
if stock_reco.items:
for item in stock_reco.items:
if item.qty:
item.qty = item.qty - round(random.randint(1, item.qty))
try:
stock_reco.insert(ignore_permissions=True, ignore_mandatory=True)
stock_reco.submit()
frappe.db.commit()
except OpeningEntryAccountError:
frappe.db.rollback()
except EmptyStockReconciliationItemsError:
frappe.db.rollback()
def submit_draft_stock_entries():
from erpnext.stock.doctype.stock_entry.stock_entry import IncorrectValuationRateError, \
DuplicateEntryForWorkOrderError, OperationsNotCompleteError
# try posting older drafts (if exists)
frappe.db.commit()
for st in frappe.db.get_values("Stock Entry", {"docstatus":0}, "name"):
try:
ste = frappe.get_doc("Stock Entry", st[0])
ste.posting_date = frappe.flags.current_date
ste.save()
ste.submit()
frappe.db.commit()
except (NegativeStockError, IncorrectValuationRateError, DuplicateEntryForWorkOrderError,
OperationsNotCompleteError):
frappe.db.rollback()
def make_sales_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Delivery Note', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
dn = make_sales_return(data.name)
dn.insert()
dn.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
def make_purchase_return_records():
if random.random() < 0.1:
for data in frappe.get_all('Purchase Receipt', fields=["name"], filters={"docstatus": 1}):
if random.random() < 0.1:
try:
pr = make_purchase_return(data.name)
pr.insert()
pr.submit()
frappe.db.commit()
except Exception:
frappe.db.rollback()
| 34.684615 | 113 | 0.745842 | 600 | 4,509 | 5.42 | 0.258333 | 0.0369 | 0.03444 | 0.032288 | 0.261685 | 0.220172 | 0.160517 | 0.143911 | 0.129766 | 0.098401 | 0 | 0.009042 | 0.141495 | 4,509 | 129 | 114 | 34.953488 | 0.831051 | 0.055223 | 0 | 0.327103 | 0 | 0 | 0.07289 | 0.005408 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065421 | false | 0.009346 | 0.11215 | 0 | 0.17757 | 0.018692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d45013e19ee94ead3bc3ab1cecb032b98f2fafda | 720 | py | Python | posts/templatetags/paginator.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | 496 | 2020-04-24T04:20:32.000Z | 2022-03-31T21:55:57.000Z | posts/templatetags/paginator.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | 642 | 2020-04-24T11:54:13.000Z | 2022-03-26T15:41:06.000Z | posts/templatetags/paginator.py | ujlbu4/vas3k.club | 1ec907cf7e5ae3a74059cde8729ca0b3e2d55a3e | [
"MIT"
] | 243 | 2020-04-24T11:49:11.000Z | 2022-03-24T18:38:48.000Z | from django import template
register = template.Library()
@register.inclusion_tag("common/paginator.html")
def paginator(items):
adjacent_pages = 4
num_pages = items.paginator.num_pages
page = items.number
start_page = max(page - adjacent_pages, 1)
if start_page <= 3:
start_page = 1
end_page = page + adjacent_pages + 1
if end_page >= num_pages - 1:
end_page = num_pages + 1
page_numbers = [n for n in range(start_page, end_page) if 0 < n <= num_pages]
return {
"items": items,
"page_numbers": page_numbers,
"show_first": 1 not in page_numbers,
"show_last": num_pages not in page_numbers,
"num_pages": num_pages,
}
| 24 | 81 | 0.643056 | 101 | 720 | 4.316832 | 0.356436 | 0.146789 | 0.077982 | 0.082569 | 0.165138 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016886 | 0.259722 | 720 | 29 | 82 | 24.827586 | 0.801126 | 0 | 0 | 0 | 0 | 0 | 0.091667 | 0.029167 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4504bcd464e9711733f6e23b9e032a1b7ce8a80 | 18,784 | py | Python | svb/prior.py | physimals/svb | 9c04309ef1fc1d5f81103a50a01e3bf4c8f4ea44 | [
"Apache-2.0"
] | 3 | 2022-01-08T12:50:25.000Z | 2022-03-22T00:55:17.000Z | svb/prior.py | physimals/svb | 9c04309ef1fc1d5f81103a50a01e3bf4c8f4ea44 | [
"Apache-2.0"
] | 1 | 2020-10-16T15:27:46.000Z | 2020-10-16T15:59:48.000Z | svb/prior.py | physimals/svb | 9c04309ef1fc1d5f81103a50a01e3bf4c8f4ea44 | [
"Apache-2.0"
] | null | null | null | """
Definition of prior distribution
"""
import numpy as np
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from .utils import LogBase
from .dist import Normal
PRIOR_TYPE_NONSPATIAL = "N"
PRIOR_TYPE_SPATIAL_MRF = "M"
def get_prior(param, data_model, **kwargs):
"""
Factory method to return a vertexwise prior
"""
prior = None
if isinstance(param.prior_dist, Normal):
if param.prior_type == "N":
prior = NormalPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "M":
prior = MRFSpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "M2":
prior = MRF2SpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "Mfab":
prior = FabberMRFSpatialPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
elif param.prior_type == "A":
prior = ARDPrior(data_model.n_vertices, param.prior_dist.mean, param.prior_dist.var, **kwargs)
if prior is not None:
return prior
else:
raise ValueError("Can't create prior type %s for distribution %s - unrecognized combination" % (param.prior_type, param.prior_dist))
class Prior(LogBase):
"""
Base class for a prior, defining methods that must be implemented
"""
def mean_log_pdf(self, samples):
"""
:param samples: A tensor of shape [W, P, S] where W is the number
of parameter vertices, P is the number of parameters in the prior
(possibly 1) and S is the number of samples
:return: A tensor of shape [W] where W is the number of parameter vertices
containing the mean log PDF of the parameter samples
provided
"""
raise NotImplementedError()
def log_det_cov(self):
raise NotImplementedError()
class NormalPrior(Prior):
"""
Prior based on a vertexwise univariate normal distribution
"""
def __init__(self, nvertices, mean, var, **kwargs):
"""
:param mean: Prior mean value
:param var: Prior variance
"""
Prior.__init__(self)
self.name = kwargs.get("name", "NormalPrior")
self.nvertices = nvertices
self.scalar_mean = mean
self.scalar_var = var
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
def mean_log_pdf(self, samples):
"""
Mean log PDF for normal distribution
Note that ``term1`` is a constant offset when the prior variance is fixed and hence
in earlier versions of the code this was neglected, along with other constant offsets
such as factors of pi. However when this code is inherited by spatial priors and ARD
the variance is no longer fixed and this term must be included.
"""
dx = tf.subtract(samples, tf.reshape(self.mean, [self.nvertices, 1, 1])) # [W, 1, N]
z = tf.div(tf.square(dx), tf.reshape(self.var, [self.nvertices, 1, 1])) # [W, 1, N]
term1 = self.log_tf(-0.5*tf.log(tf.reshape(self.var, [self.nvertices, 1, 1])), name="term1")
term2 = self.log_tf(-0.5*z, name="term2")
log_pdf = term1 + term2 # [W, 1, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
return mean_log_pdf
def __str__(self):
return "Non-spatial prior (%f, %f)" % (self.scalar_mean, self.scalar_var)
class FabberMRFSpatialPrior(NormalPrior):
"""
Prior designed to mimic the 'M' type spatial prior in Fabber.
Note that this uses update equations for ak which is not in the spirit of the stochastic
method. 'Native' SVB MRF spatial priors are also defined which simply treat the spatial
precision parameter as an inference variable.
This code has been verified to generate the same ak estimate given the same input as
Fabber, however in practice it does not optimize to the same value. We don't yet know
why.
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
"""
:param mean: Tensor of shape [W] containing the prior mean at each parameter vertex
:param var: Tensor of shape [W] containing the prior variance at each parameter vertex
:param post: Posterior instance
:param nn: Sparse tensor of shape [W, W] containing nearest neighbour lists
:param n2: Sparse tensor of shape [W, W] containing second nearest neighbour lists
"""
NormalPrior.__init__(self, nvertices, mean, var, name="FabberMRFSpatialPrior")
self.idx = idx
# Save the original vertexwise mean and variance - the actual prior mean/var
# will be calculated from these and also the spatial variation in neighbour vertices
self.fixed_mean = self.mean
self.fixed_var = self.var
# nn and n2 are sparse tensors of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B, and similarly for n2 and second nearest neighbours
self.nn = nn
self.n2 = n2
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
self._setup_ak(post, nn, n2)
# Set up prior mean/variance
self._setup_mean_var(post, nn, n2)
def __str__(self):
return "Spatial MRF prior (%f, %f)" % (self.scalar_mean, self.scalar_var)
def _setup_ak(self, post, nn, n2):
# This is the equivalent of CalculateAk in Fabber
#
# Some of this could probably be better done using linalg
# operations but bear in mind this is one parameter only
self.sigmaK = self.log_tf(tf.matrix_diag_part(post.cov)[:, self.idx], name="sigmak") # [W]
self.wK = self.log_tf(post.mean[:, self.idx], name="wk") # [W]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
# Sum over vertices of parameter variance multiplied by number of
# nearest neighbours for each vertex
trace_term = self.log_tf(tf.reduce_sum(self.sigmaK * self.num_nn), name="trace") # [1]
# Sum of nearest and next-nearest neighbour mean values
self.sum_means_nn = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.nn, tf.reshape(self.wK, (-1, 1))), (-1,)), name="wksum") # [W]
self.sum_means_n2 = self.log_tf(tf.reshape(tf.sparse_tensor_dense_matmul(self.n2, tf.reshape(self.wK, (-1, 1))), (-1,)), name="contrib8") # [W]
# vertex parameter mean multipled by number of nearest neighbours
wknn = self.log_tf(self.wK * self.num_nn, name="wknn") # [W]
swk = self.log_tf(wknn - self.sum_means_nn, name="swk") # [W]
term2 = self.log_tf(tf.reduce_sum(swk * self.wK), name="term2") # [1]
gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
hk = tf.multiply(tf.to_float(self.nvertices), 0.5) + 1.0
self.ak = self.log_tf(tf.identity(gk * hk, name="ak"))
def _setup_mean_var(self, post, nn, n2):
# This is the equivalent of ApplyToMVN in Fabber
contrib_nn = self.log_tf(8*self.sum_means_nn, name="contrib_nn") # [W]
contrib_n2 = self.log_tf(-self.sum_means_n2, name="contrib_n2") # [W]
spatial_mean = self.log_tf(contrib_nn / (8*self.num_nn), name="spatial_mean")
spatial_prec = self.log_tf(self.num_nn * self.ak, name="spatial_prec")
self.var = self.log_tf(1 / (1/self.fixed_var + spatial_prec), name="%s_var" % self.name)
#self.var = self.fixed_var
self.mean = self.log_tf(self.var * spatial_prec * spatial_mean, name="%s_mean" % self.name)
#self.mean = self.fixed_mean + self.ak
class MRFSpatialPrior(Prior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This uses the same formalism as the Fabber 'M' type spatial prior but treats the ak
as a parameter of the optimization.
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
Prior.__init__(self)
self.name = kwargs.get("name", "MRFSpatialPrior")
self.nvertices = nvertices
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
# nn is a sparse tensor of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B
self.nn = nn
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
# We infer the log of ak.
self.logak = tf.Variable(-5.0, name="log_ak", dtype=tf.float32)
self.ak = self.log_tf(tf.exp(self.logak, name="ak"))
def mean_log_pdf(self, samples):
r"""
mean log PDF for the MRF spatial prior.
This is calculating:
:math:`\log P = \frac{1}{2} \log \phi - \frac{\phi}{2}\underline{x^T} D \underline{x}`
"""
samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
dx_diag = self.log_tf(tf.reshape(self.num_nn, (self.nvertices, 1)) * samples, name="dx_diag") # [W, N]
dx_offdiag = self.log_tf(tf.sparse_tensor_dense_matmul(self.nn, samples), name="dx_offdiag") # [W, N]
self.dx = self.log_tf(dx_diag - dx_offdiag, name="dx") # [W, N]
self.xdx = self.log_tf(samples * self.dx, name="xdx") # [W, N]
term1 = tf.identity(0.5*self.logak, name="term1")
term2 = tf.identity(-0.5*self.ak*self.xdx, name="term2")
log_pdf = term1 + term2 # [W, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
# Gamma prior if we care
#q1, q2 = 1, 100
#mean_log_pdf += (q1-1) * self.logak - self.ak / q2
return mean_log_pdf
def __str__(self):
return "MRF spatial prior"
class ARDPrior(NormalPrior):
"""
Automatic Relevance Determination prior
"""
def __init__(self, nvertices, mean, var, **kwargs):
NormalPrior.__init__(self, nvertices, mean, var, **kwargs)
self.name = kwargs.get("name", "ARDPrior")
self.fixed_var = self.var
# Set up inferred precision parameter phi
self.logphi = tf.Variable(tf.log(1/self.fixed_var), name="log_phi", dtype=tf.float32)
self.phi = self.log_tf(tf.exp(self.logphi, name="phi"))
self.var = 1/self.phi
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
def __str__(self):
return "ARD prior"
class MRF2SpatialPrior(Prior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This uses the same formalism as the Fabber 'M' type spatial prior but treats the ak
as a parameter of the optimization. It differs from MRFSpatialPrior by using the
PDF formulation of the PDF rather than the matrix formulation (the two are equivalent
but currently we keep both around for checking that they really are!)
FIXME currently this does not work unless sample size=1
"""
def __init__(self, nvertices, mean, var, idx=None, post=None, nn=None, n2=None, **kwargs):
Prior.__init__(self)
self.name = kwargs.get("name", "MRF2SpatialPrior")
self.nvertices = nvertices
self.mean = tf.fill([nvertices], mean, name="%s_mean" % self.name)
self.var = tf.fill([nvertices], var, name="%s_var" % self.name)
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
# nn is a sparse tensor of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B
self.nn = nn
# We need the number of samples to implement the log PDF function
self.sample_size = kwargs.get("sample_size", 5)
# Set up spatial smoothing parameter calculation from posterior and neighbour lists
self.logak = tf.Variable(-5.0, name="log_ak", dtype=tf.float32)
self.ak = self.log_tf(tf.exp(self.logak, name="ak"))
def mean_log_pdf(self, samples):
samples = tf.reshape(samples, (self.nvertices, -1)) # [W, N]
self.num_nn = self.log_tf(tf.sparse_reduce_sum(self.nn, axis=1), name="num_nn") # [W]
expanded_nn = tf.sparse_concat(2, [tf.sparse.reshape(self.nn, (self.nvertices, self.nvertices, 1))] * self.sample_size)
xj = expanded_nn * tf.reshape(samples, (self.nvertices, 1, -1))
#xi = tf.reshape(tf.sparse.to_dense(tf.sparse.reorder(self.nn)), (self.nvertices, self.nvertices, 1)) * tf.reshape(samples, (1, self.nvertices, -1))
xi = expanded_nn * tf.reshape(samples, (1, self.nvertices, -1))
#xi = tf.sparse.transpose(xj, perm=(1, 0, 2))
neg_xi = tf.SparseTensor(xi.indices, -xi.values, dense_shape=xi.dense_shape )
dx2 = tf.square(tf.sparse.add(xj, neg_xi), name="dx2")
sdx = tf.sparse.reduce_sum(dx2, axis=0) # [W, N]
term1 = tf.identity(0.5*self.logak, name="term1")
term2 = tf.identity(-self.ak * sdx / 4, name="term2")
log_pdf = term1 + term2 # [W, N]
mean_log_pdf = tf.reshape(tf.reduce_mean(log_pdf, axis=-1), [self.nvertices]) # [W]
return mean_log_pdf
def __str__(self):
return "MRF2 spatial prior"
class ConstantMRFSpatialPrior(NormalPrior):
"""
Prior which performs adaptive spatial regularization based on the
contents of neighbouring vertices using the Markov Random Field method
This is equivalent to the Fabber 'M' type spatial prior
"""
def __init__(self, nvertices, mean, var, idx=None, nn=None, n2=None, **kwargs):
"""
:param mean: Tensor of shape [W] containing the prior mean at each parameter vertex
:param var: Tensor of shape [W] containing the prior variance at each parameter vertex
:param post: Posterior instance
:param nn: Sparse tensor of shape [W, W] containing nearest neighbour lists
:param n2: Sparse tensor of shape [W, W] containing second nearest neighbour lists
"""
NormalPrior.__init__(self, nvertices, mean, var, name="MRFSpatialPrior")
self.idx = idx
# Save the original vertexwise mean and variance - the actual prior mean/var
# will be calculated from these and also the spatial variation in neighbour vertices
self.fixed_mean = self.mean
self.fixed_var = self.var
# nn and n2 are sparse tensors of shape [W, W]. If nn[A, B] = 1 then A is
# a nearest neighbour of B, and similarly for n2 and second nearest neighbours
self.nn = nn
self.n2 = n2
def __str__(self):
return "Spatial MRF prior (%f, %f) - const" % (self.scalar_mean, self.scalar_var)
def update_ak(self, post_mean, post_cov):
# This is the equivalent of CalculateAk in Fabber
#
# Some of this could probably be better done using linalg
# operations but bear in mind this is one parameter only
self.sigmaK = post_cov[:, self.idx, self.idx] # [W]
self.wK = post_mean[:, self.idx] # [W]
self.num_nn = np.sum(self.nn, axis=1) # [W]
# Sum over vertices of parameter variance multiplied by number of
# nearest neighbours for each vertex
trace_term = np.sum(self.sigmaK * self.num_nn) # [1]
# Sum of nearest and next-nearest neighbour mean values
self.sum_means_nn = np.matmul(self.nn, np.reshape(self.wK, (-1, 1))) # [W]
self.sum_means_n2 = np.matmul(self.n2, tf.reshape(self.wK, (-1, 1))) # [W]
# vertex parameter mean multipled by number of nearest neighbours
wknn = self.wK * self.num_nn # [W]
swk = wknn - self.sum_means_nn # [W]
term2 = np.sum(swk * self.wK) # [1]
gk = 1 / (0.5 * trace_term + 0.5 * term2 + 0.1)
hk = float(self.nvertices) * 0.5 + 1.0
self.ak = gk * hk
self.log.info("%s: ak=%f", self.name, self.ak)
def _setup_mean_var(self, post_mean, post_cov):
# This is the equivalent of ApplyToMVN in Fabber
contrib_nn = self.log_tf(8*self.sum_means_nn, name="contrib_nn") # [W]
contrib_n2 = self.log_tf(-self.sum_means_n2, name="contrib_n2") # [W]
spatial_mean = self.log_tf(contrib_nn / (8*self.num_nn), name="spatial_mean")
spatial_prec = self.log_tf(self.num_nn * self.ak, name="spatial_prec")
self.var = self.log_tf(1 / (1/self.fixed_var + spatial_prec), name="%s_var" % self.name)
#self.var = self.fixed_var
self.mean = self.log_tf(self.var * spatial_prec * spatial_mean, name="%s_mean" % self.name)
#self.mean = self.fixed_mean + self.ak
class FactorisedPrior(Prior):
"""
Prior for a collection of parameters where there is no prior covariance
In this case the mean log PDF can be summed from the contributions of each
parameter
"""
def __init__(self, priors, **kwargs):
Prior.__init__(self)
self.priors = priors
self.name = kwargs.get("name", "FactPrior")
self.nparams = len(priors)
means = [prior.mean for prior in self.priors]
variances = [prior.var for prior in self.priors]
self.mean = self.log_tf(tf.stack(means, axis=-1, name="%s_mean" % self.name))
self.var = self.log_tf(tf.stack(variances, axis=-1, name="%s_var" % self.name))
self.std = tf.sqrt(self.var, name="%s_std" % self.name)
self.nvertices = priors[0].nvertices
# Define a diagonal covariance matrix for convenience
self.cov = tf.matrix_diag(self.var, name='%s_cov' % self.name)
def mean_log_pdf(self, samples):
nvertices = tf.shape(samples)[0]
mean_log_pdf = tf.zeros([nvertices], dtype=tf.float32)
for idx, prior in enumerate(self.priors):
param_samples = tf.slice(samples, [0, idx, 0], [-1, 1, -1])
param_logpdf = prior.mean_log_pdf(param_samples)
mean_log_pdf = tf.add(mean_log_pdf, param_logpdf)
return mean_log_pdf
def log_det_cov(self):
"""
Determinant of diagonal matrix is product of diagonal entries
"""
return tf.reduce_sum(tf.log(self.var), axis=1, name='%s_log_det_cov' % self.name)
| 44.511848 | 156 | 0.638948 | 2,772 | 18,784 | 4.196248 | 0.129509 | 0.021664 | 0.02708 | 0.015131 | 0.64314 | 0.606517 | 0.585712 | 0.558803 | 0.526479 | 0.51023 | 0 | 0.013514 | 0.243665 | 18,784 | 421 | 157 | 44.617577 | 0.805237 | 0.332624 | 0 | 0.425743 | 0 | 0 | 0.059125 | 0.001764 | 0 | 0 | 0 | 0.002375 | 0 | 1 | 0.123762 | false | 0 | 0.029703 | 0.029703 | 0.252475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d451e78026744df79562580a63a5aa68b269af10 | 1,123 | py | Python | setup.py | open-contracting/extensions-data-collector | 041f7b44de60b3d241e4891da1bb1c1cb8ce9ec4 | [
"BSD-3-Clause"
] | null | null | null | setup.py | open-contracting/extensions-data-collector | 041f7b44de60b3d241e4891da1bb1c1cb8ce9ec4 | [
"BSD-3-Clause"
] | 23 | 2018-06-29T15:34:41.000Z | 2018-11-03T13:29:49.000Z | setup.py | open-contracting/extensions-data-collector | 041f7b44de60b3d241e4891da1bb1c1cb8ce9ec4 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup, find_packages
with open('README.rst') as f:
long_description = f.read()
setup(
name='ocdsextensionsdatacollector',
version='0.0.1',
author='Open Contracting Partnership, Open Data Services',
author_email='data@open-contracting.org',
url='https://github.com/open-contracting/extensions-data-collector',
description='Collects data about OCDS extensions into a machine-readable format',
license='BSD',
packages=find_packages(),
long_description=long_description,
install_requires=[
'Babel',
'ocds-babel>=0.0.3',
'ocdsextensionregistry>=0.0.5',
'polib',
'requests',
'Sphinx==1.5.1',
],
extras_require={
'test': [
'coveralls',
'pytest',
'pytest-cov',
],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'ocdsextensionsdatacollector = ocdsextensionsdatacollector.cli.__main__:main',
],
},
)
| 27.390244 | 90 | 0.601959 | 110 | 1,123 | 6.018182 | 0.645455 | 0.067976 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016867 | 0.260908 | 1,123 | 40 | 91 | 28.075 | 0.780723 | 0 | 0 | 0.105263 | 0 | 0 | 0.458593 | 0.135352 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.026316 | 0 | 0.026316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4531e43111597e9d512203074171f5484832a7b | 423 | py | Python | server_side_python/DetectionAndTracking.py | Tpierga/2I_ProjetDetectionIA | 39ba28a1d98bf7f0e908ee6cab933b219cc98977 | [
"MIT"
] | null | null | null | server_side_python/DetectionAndTracking.py | Tpierga/2I_ProjetDetectionIA | 39ba28a1d98bf7f0e908ee6cab933b219cc98977 | [
"MIT"
] | null | null | null | server_side_python/DetectionAndTracking.py | Tpierga/2I_ProjetDetectionIA | 39ba28a1d98bf7f0e908ee6cab933b219cc98977 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import cv2
def detect_body(frame):
body_img = frame.copy()
body_classifier = cv2.CascadeClassifier("haarcascade_fullbody.xml")
gray = cv2.cvtColor(body_img, cv2.COLOR_BGR2GRAY)
bodies = body_classifier.detectMultiScale(gray)
for (x, y, w, h) in bodies:
cv2.rectangle(body_img, (x, y), (x+w, y+h), (255, 0, 0), 8)
return body_img
| 18.391304 | 71 | 0.678487 | 63 | 423 | 4.412698 | 0.571429 | 0.100719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035608 | 0.20331 | 423 | 22 | 72 | 19.227273 | 0.789318 | 0 | 0 | 0 | 0 | 0 | 0.057279 | 0.057279 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d453fbe6cb754cbcc0bc6074f266abb4c1ebf8ac | 1,333 | py | Python | packages/infra_libs/infra_libs/_command_line_linux.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | null | null | null | packages/infra_libs/infra_libs/_command_line_linux.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | 7 | 2022-02-15T01:11:37.000Z | 2022-03-02T12:46:13.000Z | packages/infra_libs/infra_libs/_command_line_linux.py | NDevTK/chromium-infra | d38e088e158d81f7f2065a38aa1ea1894f735ec4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ctypes
import ctypes.util
import sys
_CACHED_CMDLINE_LENGTH = None
def set_command_line(cmdline):
"""Replaces the commandline of this process as seen by ps."""
# Get the current commandline.
argc = ctypes.c_int()
argv = ctypes.POINTER(ctypes.c_char_p)()
ctypes.pythonapi.Py_GetArgcArgv(ctypes.byref(argc), ctypes.byref(argv))
global _CACHED_CMDLINE_LENGTH
if _CACHED_CMDLINE_LENGTH is None:
# Each argument is terminated by a null-byte, so the length of the whole
# thing in memory is the sum of all the argument byte-lengths, plus 1 null
# byte for each.
_CACHED_CMDLINE_LENGTH = sum(
len(argv[i]) for i in range(0, argc.value)) + argc.value
# Pad the cmdline string to the required length. If it's longer than the
# current commandline, truncate it.
if len(cmdline) >= _CACHED_CMDLINE_LENGTH:
new_cmdline = ctypes.c_char_p(cmdline[:_CACHED_CMDLINE_LENGTH-1] + '\0')
else:
new_cmdline = ctypes.c_char_p(cmdline.ljust(_CACHED_CMDLINE_LENGTH, '\0'))
# Replace the old commandline.
libc = ctypes.CDLL(ctypes.util.find_library('c'))
libc.memcpy(argv.contents, new_cmdline, _CACHED_CMDLINE_LENGTH)
| 34.179487 | 78 | 0.740435 | 207 | 1,333 | 4.584541 | 0.47343 | 0.109589 | 0.160169 | 0.037935 | 0.061117 | 0.061117 | 0.061117 | 0 | 0 | 0 | 0 | 0.008145 | 0.171043 | 1,333 | 38 | 79 | 35.078947 | 0.850679 | 0.40135 | 0 | 0 | 0 | 0 | 0.006386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.166667 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4563d756cea51df08af818dd4f5a3f61d84a870 | 231 | py | Python | 4.py | CarineGhisiCadorin/infosatc-lp-avaliativo-02 | 0018df4f52c0659611c484c909ff4bbf925c450a | [
"MIT"
] | null | null | null | 4.py | CarineGhisiCadorin/infosatc-lp-avaliativo-02 | 0018df4f52c0659611c484c909ff4bbf925c450a | [
"MIT"
] | null | null | null | 4.py | CarineGhisiCadorin/infosatc-lp-avaliativo-02 | 0018df4f52c0659611c484c909ff4bbf925c450a | [
"MIT"
] | null | null | null | lista = [1,2,3,4,5,6,7,8,9,10]
print(lista)
lista2 = [11, 12, 13]
print(lista2)
lista_completa = lista + lista2
print(lista_completa)
#Ou
listaA = [1,2,3,4,5,6,7,8,9,10]
listaB = [11, 12, 13]
listaA = listaB.copy()
print(listaA)
| 17.769231 | 31 | 0.65368 | 47 | 231 | 3.170213 | 0.446809 | 0.026846 | 0.040268 | 0.053691 | 0.147651 | 0.147651 | 0.147651 | 0.147651 | 0.147651 | 0.147651 | 0 | 0.185 | 0.134199 | 231 | 12 | 32 | 19.25 | 0.56 | 0.008658 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d457da1f72c081a9943cc4f083598a70a7403071 | 14,240 | py | Python | soundsig/discriminate.py | theunissenlab/sounsig | fca413aa71ce6fec079c59e615e328e6781d1d35 | [
"MIT"
] | 22 | 2017-08-05T12:41:49.000Z | 2022-01-24T23:14:59.000Z | soundsig/discriminate.py | theunissenlab/sounsig | fca413aa71ce6fec079c59e615e328e6781d1d35 | [
"MIT"
] | 3 | 2017-07-06T19:23:54.000Z | 2020-10-13T10:41:27.000Z | soundsig/discriminate.py | theunissenlab/sounsig | fca413aa71ce6fec079c59e615e328e6781d1d35 | [
"MIT"
] | 6 | 2017-05-13T18:41:23.000Z | 2022-01-24T23:15:01.000Z |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.model_selection import StratifiedKFold
from scipy.stats import binom
def discriminatePlot(X, y, cVal, titleStr='', figdir='.', Xcolname = None, plotFig = False, removeTickLabels = False, testInd = None):
# Frederic's Robust Wrapper for discriminant analysis function. Performs lda, qda and RF afer error checking,
# Generates nice plots and returns cross-validated
# performance, stderr and base line.
# X np array n rows x p parameters
# y group labels n rows
# rgb color code for each data point - should be the same for each data beloging to the same group
# titleStr title for plots
# figdir is a directory name (folder name) for figures
# Xcolname is a np.array or list of strings with column names for printout display
# returns: ldaScore, ldaScoreSE, qdaScore, qdaScoreSE, rfScore, rfScoreSE, nClasses
# Global Parameters
CVFOLDS = 10
MINCOUNT = 10
MINCOUNTTRAINING = 5
# figdir = '/Users/frederictheunissen/Documents/Data/Julie/Acoustical Analysis/Figures Voice'
# Initialize Variables and clean up data
classes, classesCount = np.unique(y, return_counts = True) # Classes to be discriminated should be same as ldaMod.classes_
goodIndClasses = np.array([n >= MINCOUNT for n in classesCount])
goodInd = np.array([b in classes[goodIndClasses] for b in y])
if testInd is not None:
# Check for goodInd - should be an np.array of dtype=bool
# Transform testInd into an index inside xGood and yGood
testIndx = testInd.nonzero()[0]
goodIndx = goodInd.nonzero()[0]
testInd = np.hstack([ np.where(goodIndx == testval)[0] for testval in testIndx])
trainInd = np.asarray([i for i in range(len(goodIndx)) if i not in testInd])
yGood = y[goodInd]
XGood = X[goodInd]
cValGood = cVal[goodInd]
classes, classesCount = np.unique(yGood, return_counts = True)
nClasses = classes.size # Number of classes or groups
# Do we have enough data?
if (nClasses < 2):
print ('Error in ldaPLot: Insufficient classes with minimun data (%d) for discrimination analysis' % (MINCOUNT))
return -1, -1, -1, -1 , -1, -1, -1, -1, -1
if testInd is None:
cvFolds = min(min(classesCount), CVFOLDS)
if (cvFolds < CVFOLDS):
print ('Warning in ldaPlot: Cross-validation performed with %d folds (instead of %d)' % (cvFolds, CVFOLDS))
else:
cvFolds = 1
# Data size and color values
nD = XGood.shape[1] # number of features in X
nX = XGood.shape[0] # number of data points in X
cClasses = [] # Color code for each class
for cl in classes:
icl = (yGood == cl).nonzero()[0][0]
cClasses.append(np.append(cValGood[icl],1.0))
cClasses = np.asarray(cClasses)
# Use a uniform prior
myPrior = np.ones(nClasses)*(1.0/nClasses)
# Perform a PCA for dimensionality reduction so that the covariance matrix can be fitted.
nDmax = int(np.fix(np.sqrt(nX//5)))
if nDmax < nD:
print ('Warning: Insufficient data for', nD, 'parameters. PCA projection to', nDmax, 'dimensions.' )
nDmax = min(nD, nDmax)
pca = PCA(n_components=nDmax)
Xr = pca.fit_transform(XGood)
print ('Variance explained is %.2f%%' % (sum(pca.explained_variance_ratio_)*100.0))
# Initialise Classifiers
ldaMod = LDA(n_components = min(nDmax,nClasses-1), priors = myPrior, shrinkage = None, solver = 'svd')
qdaMod = QDA(priors = myPrior)
rfMod = RF() # by default assumes equal weights
# Perform CVFOLDS fold cross-validation to get performance of classifiers.
ldaYes = 0
qdaYes = 0
rfYes = 0
cvCount = 0
if testInd is None:
skf = StratifiedKFold(n_splits = cvFolds)
skfList = skf.split(Xr, yGood)
else:
skfList = [(trainInd,testInd)]
for train, test in skfList:
# Enforce the MINCOUNT in each class for Training
trainClasses, trainCount = np.unique(yGood[train], return_counts=True)
goodIndClasses = np.array([n >= MINCOUNTTRAINING for n in trainCount])
goodIndTrain = np.array([b in trainClasses[goodIndClasses] for b in yGood[train]])
# Specity the training data set, the number of groups and priors
yTrain = yGood[train[goodIndTrain]]
XrTrain = Xr[train[goodIndTrain]]
trainClasses, trainCount = np.unique(yTrain, return_counts=True)
ntrainClasses = trainClasses.size
# Skip this cross-validation fold because of insufficient data
if ntrainClasses < 2:
continue
goodInd = np.array([b in trainClasses for b in yGood[test]])
if (goodInd.size == 0):
continue
# Fit the data
trainPriors = np.ones(ntrainClasses)*(1.0/ntrainClasses)
ldaMod.priors = trainPriors
qdaMod.priors = trainPriors
ldaMod.fit(XrTrain, yTrain)
qdaMod.fit(XrTrain, yTrain)
rfMod.fit(XrTrain, yTrain)
ldaYes += np.around((ldaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
qdaYes += np.around((qdaMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
rfYes += np.around((rfMod.score(Xr[test[goodInd]], yGood[test[goodInd]]))*goodInd.size)
cvCount += goodInd.size
# Refit with all the data for the plots
ldaMod.priors = myPrior
qdaMod.priors = myPrior
Xrr = ldaMod.fit_transform(Xr, yGood)
# Check labels
for a, b in zip(classes, ldaMod.classes_):
if a != b:
print ('Error in ldaPlot: labels do not match')
# Check the within-group covariance in the rotated space
# covs = []
# for group in classes:
# Xg = Xrr[yGood == group, :]
# covs.append(np.atleast_2d(np.cov(Xg,rowvar=False)))
# withinCov = np.average(covs, axis=0, weights=myPrior)
# Print the five largest coefficients of first 3 DFA
MAXCOMP = 3 # Maximum number of DFA componnents
MAXWEIGHT = 5 # Maximum number of weights printed for each componnent
ncomp = min(MAXCOMP, nClasses-1)
nweight = min(MAXWEIGHT, nD)
# The scalings_ has the eigenvectors of the LDA in columns and the pca.componnents has the eigenvectors of PCA in columns
weights = np.dot(ldaMod.scalings_[:,0:ncomp].T, pca.components_)
print('LDA Weights:')
for ic in range(ncomp):
idmax = np.argsort(np.abs(weights[ic,:]))[::-1]
print('DFA %d: '%ic, end = '')
for iw in range(nweight):
if Xcolname is None:
colstr = 'C%d' % idmax[iw]
else:
colstr = Xcolname[idmax[iw]]
print('%s %.3f; ' % (colstr, float(weights[ic, idmax[iw]]) ), end='')
print()
if plotFig:
dimVal = 0.8 # Overall diming of background so that points can be seen
# Obtain fits in this rotated space for display purposes
ldaMod.fit(Xrr, yGood)
qdaMod.fit(Xrr, yGood)
rfMod.fit(Xrr, yGood)
XrrMean = Xrr.mean(0)
# Make a mesh for plotting
x1, x2 = np.meshgrid(np.arange(-6.0, 6.0, 0.1), np.arange(-6.0, 6.0, 0.1))
xm1 = np.reshape(x1, -1)
xm2 = np.reshape(x2, -1)
nxm = np.size(xm1)
Xm = np.zeros((nxm, Xrr.shape[1]))
Xm[:,0] = xm1
if Xrr.shape[1] > 1 :
Xm[:,1] = xm2
for ix in range(2,Xrr.shape[1]):
Xm[:,ix] = np.squeeze(np.ones((nxm,1)))*XrrMean[ix]
XmcLDA = np.zeros((nxm, 4)) # RGBA values for color for LDA
XmcQDA = np.zeros((nxm, 4)) # RGBA values for color for QDA
XmcRF = np.zeros((nxm, 4)) # RGBA values for color for RF
# Predict values on mesh for plotting based on the first two DFs
yPredLDA = ldaMod.predict_proba(Xm)
yPredQDA = qdaMod.predict_proba(Xm)
yPredRF = rfMod.predict_proba(Xm)
# Transform the predictions in color codes
maxLDA = yPredLDA.max()
for ix in range(nxm) :
cWeight = yPredLDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcLDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcLDA[ix,3] = (cWeight.max()/maxLDA)*dimVal
# Plot the surface of probability
plt.figure(facecolor='white', figsize=(10,4))
plt.subplot(131)
Zplot = XmcLDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: LDA %d/%d' % (titleStr, ldaYes, cvCount))
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# Transform the predictions in color codes
maxQDA = yPredQDA.max()
for ix in range(nxm) :
cWeight = yPredQDA[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses
XmcQDA[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcQDA[ix,3] = (cWeight.max()/maxQDA)*dimVal
# Plot the surface of probability
plt.subplot(132)
Zplot = XmcQDA.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: QDA %d/%d' % (titleStr, qdaYes, cvCount))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
# Transform the predictions in color codes
maxRF = yPredRF.max()
for ix in range(nxm) :
cWeight = yPredRF[ix,:] # Prob for all classes
cWinner = ((cWeight == cWeight.max()).astype('float')) # Winner takes all
# XmcLDA[ix,:] = np.dot(cWeight, cClasses)/nClasses # Weighted colors does not work
XmcRF[ix,:] = np.dot(cWinner*cWeight, cClasses)
XmcRF[ix,3] = (cWeight.max()/maxRF)*dimVal
# Plot the surface of probability
plt.subplot(133)
Zplot = XmcRF.reshape(np.shape(x1)[0], np.shape(x1)[1],4)
plt.imshow(Zplot, zorder=0, extent=[-6, 6, -6, 6], origin='lower', interpolation='none', aspect='auto')
if nClasses > 2:
plt.scatter(Xrr[:,0], Xrr[:,1], c=cValGood, s=40, zorder=1)
else:
plt.scatter(Xrr,(np.random.rand(Xrr.size)-0.5)*12.0 , c=cValGood, s=40, zorder=1)
plt.title('%s: RF %d/%d' % (titleStr, rfYes, cvCount))
plt.xlabel('DFA 1')
plt.ylabel('DFA 2')
plt.axis('square')
plt.xlim((-6, 6))
plt.ylim((-6, 6))
if removeTickLabels:
ax = plt.gca()
labels = [item.get_text() for item in ax.get_xticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_xticklabels(empty_string_labels)
labels = [item.get_text() for item in ax.get_yticklabels()]
empty_string_labels = ['']*len(labels)
ax.set_yticklabels(empty_string_labels)
plt.show()
plt.savefig('%s/%s.png' % (figdir,titleStr), format='png', dpi=1000)
# Results
ldaYes = int(ldaYes)
qdaYes = int(qdaYes)
rfYes = int(rfYes)
p = 1.0/nClasses
ldaP = 0
qdaP = 0
rfP = 0
for k in range(ldaYes, cvCount+1):
ldaP += binom.pmf(k, cvCount, p)
for k in range(qdaYes, cvCount+1):
qdaP += binom.pmf(k, cvCount, p)
for k in range(rfYes, cvCount+1):
rfP += binom.pmf(k, cvCount, p)
print ("Number of classes %d. Chance level %.2f %%" % (nClasses, 100.0/nClasses))
print ("%s LDA: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*ldaYes/cvCount, ldaYes, cvCount, ldaP))
print ("%s QDA: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*qdaYes/cvCount, qdaYes, cvCount, qdaP))
print ("%s RF: %.2f %% (%d/%d p=%.4f)" % (titleStr, 100.0*rfYes/cvCount, rfYes, cvCount, rfP))
return ldaYes, qdaYes, rfYes, cvCount, ldaP, qdaP, rfP, nClasses, weights
| 41.037464 | 134 | 0.589466 | 1,840 | 14,240 | 4.526087 | 0.226087 | 0.003602 | 0.024496 | 0.002882 | 0.330451 | 0.311359 | 0.29671 | 0.283381 | 0.258646 | 0.224183 | 0 | 0.022441 | 0.286517 | 14,240 | 346 | 135 | 41.156069 | 0.797244 | 0.202528 | 0 | 0.291304 | 0 | 0 | 0.05529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004348 | false | 0 | 0.034783 | 0 | 0.047826 | 0.056522 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d457ff551e1af72008fb3c2b85fdd39fc33404b0 | 4,477 | py | Python | backend_receitas/core/views.py | gugact/backend_web | 32b72ec460c1b6bae63bfd391c87b0c4bf644821 | [
"Apache-2.0"
] | null | null | null | backend_receitas/core/views.py | gugact/backend_web | 32b72ec460c1b6bae63bfd391c87b0c4bf644821 | [
"Apache-2.0"
] | null | null | null | backend_receitas/core/views.py | gugact/backend_web | 32b72ec460c1b6bae63bfd391c87b0c4bf644821 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.http import *
from django.contrib.auth import *
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.exceptions import APIException
#from rest_framework.permissions import IsAuthenticatedOrReadOnly
from .models import *
from .serializers import *
from itertools import chain
# Create your views here.
class RecipeDetails(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, pk):
try:
return Recipe.objects.get(pk=pk)
except Recipe.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
recipe = self.get_object(pk)
serializer = RecipeSerializer(recipe)
return Response(serializer.data)
#FALTA TRATAMENTO DE IMAGENS
class RecipeRegister(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def post(self, request, format=None):
serializer = RecipeSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserDuplicationError(APIException):
status_code = status.HTTP_409_CONFLICT
default_detail = u'Duplicate user'
class ProfileSignUp(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
#see if User already exists
def get_object(self, data):
try:
retrievedUser = User.objects.filter(username = data)
raise UserDuplicationError()
except User.DoesNotExist:
return True
def post(self, request, format=None):
self.get_object(request.data['email'])
createdUser = User.objects.create_user(request.data['email'], None, request.data['password'])
request.data.pop('email', None)
request.data.pop('password', None)
request.data['user'] = createdUser.pk
serializer = CreateProfileSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProfileLogin(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, data):
try:
retrievedUser = authenticate(username=data['email'], password=data['password'])
if retrievedUser is not None:
user = Profile.objects.get(user = retrievedUser)
print("achou usuario" + retrievedUser.username)
return user
else:
print("NAO achou usuario")
raise Http404
except User.DoesNotExist:
raise Http404
def post(self, request, format=None):
print("request body: " +request.data['email'] + " " + request.data['password'])
profile = self.get_object(request.data)
serializer = ProfileSerializer(profile)
return Response(serializer.data)
class MostThreeRecentRecipeFromEveryCategory(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get(self, request, format=None):
categories = Category.objects.all()
listOfRecipes = []
for cat in categories:
recipes = Recipe.objects.filter(category = cat)[:3]
listOfRecipes.append(recipes)
qs = list(chain.from_iterable(listOfRecipes))
serializer = ThreeRecentSerializer(qs, many=True)
return Response(serializer.data)
class RecipesFromCategory(APIView):
#permission_classes = (IsAuthenticatedOrReadOnly,)
def get_object(self, pk):
try:
category = Category.objects.get(pk=pk)
return Recipe.objects.filter(category = category)
except Category.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
print("PK: " +pk)
recipes = self.get_object(pk)
serializer = CategorySerializer(recipes, many=True)
print(serializer.data)
return Response(serializer.data)
| 33.162963 | 101 | 0.682153 | 465 | 4,477 | 6.473118 | 0.270968 | 0.040199 | 0.063787 | 0.097674 | 0.36711 | 0.278405 | 0.232226 | 0.213621 | 0.192027 | 0.192027 | 0 | 0.008391 | 0.228055 | 4,477 | 134 | 102 | 33.410448 | 0.862558 | 0.101854 | 0 | 0.333333 | 0 | 0 | 0.03093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0.043011 | 0.150538 | 0 | 0.483871 | 0.053763 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d459cbe1a695f12684fad0e549571c60487537ba | 736 | py | Python | src/predict.py | tarasowski/starbucks-offer-engine | 51b80e5b390b58427d842964867a5db2aed6dda6 | [
"MIT"
] | 1 | 2020-02-19T06:59:30.000Z | 2020-02-19T06:59:30.000Z | src/predict.py | tarasowski/starbucks-offer-engine | 51b80e5b390b58427d842964867a5db2aed6dda6 | [
"MIT"
] | null | null | null | src/predict.py | tarasowski/starbucks-offer-engine | 51b80e5b390b58427d842964867a5db2aed6dda6 | [
"MIT"
] | null | null | null | import joblib
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
def predict(data):
clf, X_test, y_test = data
y_pred = clf.predict(X_test)
print(f'Accuracy: {round(accuracy_score(y_test, y_pred) * 100, 2)}%')
print(f'F1 Score: {round(f1_score(y_test, y_pred) * 100, 2)}%')
print(f'Recall Score: {round(precision_score(y_test, y_pred) * 100, 2)}%')
print(f'Precision Score: {round(recall_score(y_test, y_pred) * 100, 2)}%')
def main(params):
return predict(params)
if __name__ == '__main__':
model = joblib.load('../models/model.pkl')
S_test = joblib.load('../models/S_test.pkl')
y_test = joblib.load('../models/y_test.pkl')
main((model, S_test, y_test))
| 35.047619 | 83 | 0.677989 | 117 | 736 | 3.974359 | 0.282051 | 0.086022 | 0.086022 | 0.094624 | 0.202151 | 0.202151 | 0.202151 | 0.16129 | 0.16129 | 0 | 0 | 0.030547 | 0.154891 | 736 | 20 | 84 | 36.8 | 0.717042 | 0 | 0 | 0 | 0 | 0 | 0.41712 | 0.148098 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0.0625 | 0.3125 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d45bb49795fcb2f093014163608b945bdcaafd58 | 3,238 | py | Python | examples/optimize_polyfaces.py | hh-wu/ezdxf | 62509ba39b826ee9b36f19c0a5abad7f3518186a | [
"MIT"
] | 1 | 2021-06-05T09:15:15.000Z | 2021-06-05T09:15:15.000Z | examples/optimize_polyfaces.py | luoyu-123/ezdxf | 40963a2010028f87846241e08434f43ab421f3fb | [
"MIT"
] | null | null | null | examples/optimize_polyfaces.py | luoyu-123/ezdxf | 40963a2010028f87846241e08434f43ab421f3fb | [
"MIT"
] | null | null | null | # Purpose: open example files with big polyface models
# Created: 23.04.2014
# Copyright (c) 2014-2020, Manfred Moitzi
# License: MIT License
import time
from pathlib import Path
import ezdxf
from ezdxf.render import MeshVertexMerger
SRCDIR = Path(r'D:\Source\dxftest\CADKitSamples')
OUTDIR = Path('~/Desktop/Outbox').expanduser()
def optimize_polyfaces(polyfaces):
count = 0
runtime = 0
vertex_diff = 0
print("start optimizing...")
for polyface in polyfaces:
count += 1
start_vertex_count = len(polyface)
start_time = time.time()
polyface.optimize()
end_time = time.time()
end_vertex_count = len(polyface)
runtime += end_time - start_time
vertex_diff += start_vertex_count - end_vertex_count
print(f"removed {vertex_diff} vertices in {runtime:.2f} seconds.")
def optimize(name: str):
filename = SRCDIR / name
new_filename = OUTDIR / ('optimized_' + name)
print(f'opening DXF file: {filename}')
start_time = time.time()
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
end_time = time.time()
print(f'time for reading: {end_time - start_time:.1f} seconds')
print(f"DXF version: {doc.dxfversion}")
print(f"Database contains {len(doc.entitydb)} entities.")
polyfaces = (polyline for polyline in msp.query('POLYLINE') if polyline.is_poly_face_mesh)
optimize_polyfaces(polyfaces)
print(f'saving DXF file: {new_filename}')
start_time = time.time()
doc.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
def save_as(name):
filename = SRCDIR / name
print(f'opening DXF file: {filename}')
start_time = time.time()
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
end_time = time.time()
print(f'time for reading: {end_time - start_time:.1f} seconds')
print(f"DXF version: {doc.dxfversion}")
print(f"Database contains {len(doc.entitydb)} entities.")
polyfaces = (polyline for polyline in msp.query('POLYLINE') if polyline.is_poly_face_mesh)
# create a new documents
doc1 = ezdxf.new()
msp1 = doc1.modelspace()
doc2 = ezdxf.new()
msp2 = doc2.modelspace()
for polyface in polyfaces:
b = MeshVertexMerger.from_polyface(polyface)
b.render(msp1, dxfattribs={
'layer': polyface.dxf.layer,
'color': polyface.dxf.color,
})
b.render_polyface(msp2, dxfattribs={
'layer': polyface.dxf.layer,
'color': polyface.dxf.color,
})
new_filename = OUTDIR / ('mesh_' + name)
print(f'saving as mesh DXF file: {new_filename}')
start_time = time.time()
doc1.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
new_filename = OUTDIR / ('recreated_polyface_' + name)
print(f'saving as polyface DXF file: {new_filename}')
start_time = time.time()
doc2.saveas(new_filename)
end_time = time.time()
print(f'time for saving: {end_time - start_time:.1f} seconds')
if __name__ == '__main__':
optimize('fanuc-430-arm.dxf')
optimize('cnc machine.dxf')
save_as('fanuc-430-arm.dxf') | 32.38 | 94 | 0.661519 | 424 | 3,238 | 4.891509 | 0.252358 | 0.092575 | 0.069431 | 0.04918 | 0.51109 | 0.493732 | 0.492285 | 0.492285 | 0.441659 | 0.391514 | 0 | 0.016484 | 0.213095 | 3,238 | 100 | 95 | 32.38 | 0.797488 | 0.048178 | 0 | 0.4875 | 0 | 0 | 0.270393 | 0.010075 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0375 | false | 0 | 0.05 | 0 | 0.0875 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d45c7405d2149d4710720762085175c95dab0d57 | 1,576 | py | Python | glashammer/bundles/contrib/dev/firephp.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | 1 | 2016-07-04T15:23:59.000Z | 2016-07-04T15:23:59.000Z | glashammer/bundles/contrib/dev/firephp.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | null | null | null | glashammer/bundles/contrib/dev/firephp.py | passy/glashammer-rdrei | 9e56952d70b961d8945707469aad9cfe97c4e7b7 | [
"MIT"
] | null | null | null |
# adapted from http://code.cmlenz.net/diva/browser/trunk/diva/ext/firephp.py
# (c) 2008 C. M. Lenz, Glashammer Developers
from time import time
from logging import Handler
from simplejson import dumps
from glashammer.utils import local
from glashammer.utils.log import add_log_handler
LEVEL_MAP = {'DEBUG': 'LOG', 'WARNING': 'WARN', 'CRITICAL': 'ERROR'}
PREFIX = 'X-FirePHP-Data-'
def init_firephp():
# one-time initialisation per request
local.firephp_log = []
def inject_firephp_headers(response):
prefix = PREFIX
if not hasattr(response, 'headers'):
# an httpexception or some other weird response
return
for i, record in enumerate(local.firephp_log):
if i == 0:
response.headers[prefix + '100000000001'] = '{'
response.headers[prefix + '300000000001'] = '"FirePHP.Firebug.Console":['
response.headers[prefix + '399999999999'] = ',["__SKIP__"]],'
response.headers[prefix + '999999999999'] = '"__SKIP__":"__SKIP__"}'
secs = str(int(time()))[-3:]
msgid = '3' + secs + ('%08d' % (i + 2))
msg = dumps(record)
if i != 0:
msg = ',' + msg
response.headers[PREFIX + msgid] = msg
def emit(level, record):
try:
local.firephp_log.append((LEVEL_MAP.get(level.upper()), record))
except AttributeError:
pass
def setup_firephp(app):
app.connect_event('wsgi-call', init_firephp)
app.connect_event('response-start', inject_firephp_headers)
app.connect_event('log', emit)
setup_app = setup_firephp
| 29.735849 | 85 | 0.64467 | 191 | 1,576 | 5.157068 | 0.486911 | 0.091371 | 0.106599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04789 | 0.218274 | 1,576 | 52 | 86 | 30.307692 | 0.751623 | 0.126269 | 0 | 0 | 0 | 0 | 0.14515 | 0.03574 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0.028571 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d45e087825bbd78e5f7c80d653a07a247d6d0607 | 5,667 | py | Python | neural machine translation/data_load.py | quanganh1997polytechnique/Project-DL-Seq2Seq | de7b118ec865af60ee44c5a463f59ac548e76794 | [
"MIT"
] | null | null | null | neural machine translation/data_load.py | quanganh1997polytechnique/Project-DL-Seq2Seq | de7b118ec865af60ee44c5a463f59ac548e76794 | [
"MIT"
] | null | null | null | neural machine translation/data_load.py | quanganh1997polytechnique/Project-DL-Seq2Seq | de7b118ec865af60ee44c5a463f59ac548e76794 | [
"MIT"
] | null | null | null | """
** deeplean-ai.com **
created by :: GauravBh1010tt
contact :: gauravbhatt.deeplearn@gmail.com
"""
from __future__ import unicode_literals, print_function, division
import math
import re
import os
import numpy as np
import torch
import random
import warnings
from io import open
import unicodedata
import matplotlib.pyplot as plt
from torch.autograd import Variable
import time
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %02ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
warnings.simplefilter('ignore')
plt.rcParams['figure.figsize'] = (8, 8)
np.random.seed(42)
torch.manual_seed(0)
torch.cuda.manual_seed(0)
use_cuda = torch.cuda.is_available()
import zipfile
zip_ref = zipfile.ZipFile('data.zip', 'r')
zip_ref.extractall()
zip_ref.close()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# torch.cuda.set_device(1)
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p,reverse):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[reverse].startswith(eng_prefixes)
def filterPairs(pairs, reverse):
if reverse:
reverse = 1
else:
reverse = 0
return [pair for pair in pairs if filterPair(pair,reverse)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs,reverse)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair, input_lang, output_lang):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def as_minutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def time_since(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (as_minutes(s), as_minutes(rs))
def indexes_from_sentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def variable_from_sentence(lang, sentence):
indexes = indexes_from_sentence(lang, sentence)
indexes.append(EOS_token)
var = Variable(torch.LongTensor(indexes).view(-1, 1))
# print('var =', var)
if use_cuda: var = var.cuda()
return var
def variables_from_pair(pair, input_lang, output_lang):
input_variable = variable_from_sentence(input_lang, pair[0])
target_variable = variable_from_sentence(output_lang, pair[1])
return (input_variable, target_variable)
def save_checkpoint(epoch, model, optimizer, directory, \
filename='best.pt'):
checkpoint=({'epoch': epoch+1,
'model': model.state_dict(),
'optimizer' : optimizer.state_dict()
})
try:
torch.save(checkpoint, os.path.join(directory, filename))
except:
os.mkdir(directory)
torch.save(checkpoint, os.path.join(directory, filename)) | 26.85782 | 77 | 0.640551 | 767 | 5,667 | 4.6206 | 0.273794 | 0.030474 | 0.021163 | 0.026806 | 0.228273 | 0.172122 | 0.106095 | 0.106095 | 0.080135 | 0.080135 | 0 | 0.018889 | 0.224634 | 5,667 | 211 | 78 | 26.85782 | 0.787665 | 0.072878 | 0 | 0.164474 | 0 | 0 | 0.062607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131579 | false | 0 | 0.092105 | 0.026316 | 0.335526 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d45fdaebbda1324d20d94461daceb3de9d2ddda0 | 9,041 | py | Python | tests/channels/mock.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 415 | 2016-06-10T00:46:55.000Z | 2021-10-16T00:56:06.000Z | tests/channels/mock.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 25 | 2016-06-11T13:48:59.000Z | 2021-01-05T11:19:30.000Z | tests/channels/mock.py | febuiles/two1-python | 88704487dba7715f97a0980781d4c0efb2ea7fc4 | [
"BSD-2-Clause-FreeBSD"
] | 109 | 2016-06-11T05:17:05.000Z | 2021-12-22T11:02:22.000Z | import codecs
import two1.bitcoin as bitcoin
import two1.bitcoin.utils as utils
import two1.channels.server as server
import two1.channels.blockchain as blockchain
import two1.channels.statemachine as statemachine
class MockTwo1Wallet:
"""Mock Two1 Wallet interface for unit testing. See two1.wallet.Two1Wallet for API."""
PRIVATE_KEY = bitcoin.PrivateKey.from_bytes(
codecs.decode("83407377a24a5cef75dedb0445d2da3a5389ed34c0f0c57266b1ed0a5ebb30c1", 'hex_codec'))
"Customer private key."
MOCK_UTXO_SCRIPT_PUBKEY = bitcoin.Script.build_p2pkh(PRIVATE_KEY.public_key.hash160())
MOCK_UTXO = bitcoin.Hash("3d3834fb69654cea89f9b086642b867c4cb9c86cc0a4cc1972924370dd54de19")
MOCK_UTXO_INDEX = 1
"Mock utxo to make deposit transaction."
def get_change_public_key(self):
return self.PRIVATE_KEY.public_key
def build_signed_transaction(
self, addresses_and_amounts, use_unconfirmed=False, insert_into_cache=False, fees=None, expiration=0):
address = list(addresses_and_amounts.keys())[0]
amount = addresses_and_amounts[address]
inputs = [bitcoin.TransactionInput(self.MOCK_UTXO, self.MOCK_UTXO_INDEX, bitcoin.Script(), 0xffffffff)]
outputs = [bitcoin.TransactionOutput(amount, bitcoin.Script.build_p2sh(utils.address_to_key_hash(address)[1]))]
tx = bitcoin.Transaction(bitcoin.Transaction.DEFAULT_TRANSACTION_VERSION, inputs, outputs, 0x0)
tx.sign_input(0, bitcoin.Transaction.SIG_HASH_ALL, self.PRIVATE_KEY, self.MOCK_UTXO_SCRIPT_PUBKEY)
return [tx]
def get_private_for_public(self, public_key):
assert bytes(public_key) == bytes(self.PRIVATE_KEY.public_key)
return self.PRIVATE_KEY
def broadcast_transaction(self, transaction):
return MockBlockchain.broadcast_tx(MockBlockchain, transaction)
@property
def testnet(self):
return False
class MockPaymentChannelServer(server.PaymentChannelServerBase):
"""Mock Payment Channel Server interface for unit testing."""
PRIVATE_KEY = bitcoin.PrivateKey.from_bytes(
codecs.decode("9d1ad8f765996474ff478ef65692a95dba0af2e24cd9e2cb6dfeee52ce2d38e8", 'hex_codec'))
"Merchant private key."
blockchain = None
"Merchant blockchain interface."
channels = {}
"Retained server-side channels state across instantiations of this payment channel server \"client\"."
def __init__(self, url=None):
"""Instantiate a Mock Payment Channel Server interface for the
specified URL.
Args:
url (str): URL of Mock server.
Returns:
MockPaymentChannelServer: instance of MockPaymentChannelServer.
"""
super().__init__()
self._url = url
def get_info(self):
return {'public_key': codecs.encode(self.PRIVATE_KEY.public_key.compressed_bytes, 'hex_codec').decode('utf-8')}
def open(self, deposit_tx, redeem_script):
# Deserialize deposit tx and redeem script
deposit_tx = bitcoin.Transaction.from_hex(deposit_tx)
deposit_txid = str(deposit_tx.hash)
redeem_script = statemachine.PaymentChannelRedeemScript.from_bytes(codecs.decode(redeem_script, 'hex_codec'))
# Validate redeem_script
assert redeem_script.merchant_public_key.compressed_bytes == self.PRIVATE_KEY.public_key.compressed_bytes
# Validate deposit tx
assert len(deposit_tx.outputs) == 1, "Invalid deposit tx outputs."
output_index = deposit_tx.output_index_for_address(redeem_script.hash160())
assert output_index is not None, "Missing deposit tx P2SH output."
assert deposit_tx.outputs[output_index].script.is_p2sh(), "Invalid deposit tx output P2SH script."
assert deposit_tx.outputs[output_index].script.get_hash160() == redeem_script.hash160(), "Invalid deposit tx output script P2SH address." # nopep8
self.channels[deposit_txid] = {'deposit_tx': deposit_tx, 'redeem_script': redeem_script, 'payment_tx': None}
def pay(self, deposit_txid, payment_tx):
# Deserialize payment tx
payment_tx = bitcoin.Transaction.from_hex(payment_tx)
# Validate payment tx
redeem_script = self.channels[deposit_txid]['redeem_script']
assert len(payment_tx.inputs) == 1, "Invalid payment tx inputs."
assert len(payment_tx.outputs) == 2, "Invalid payment tx outputs."
assert bytes(payment_tx.inputs[0].script[-1]) == bytes(self.channels[deposit_txid]['redeem_script']), "Invalid payment tx redeem script." # nopep8
# Validate payment is greater than the last one
if self.channels[deposit_txid]['payment_tx']:
output_index = payment_tx.output_index_for_address(self.PRIVATE_KEY.public_key.hash160())
assert output_index is not None, "Invalid payment tx output."
assert payment_tx.outputs[output_index].value > self.channels[deposit_txid]['payment_tx'].outputs[output_index].value, "Invalid payment tx output value." # nopep8
# Sign payment tx
assert redeem_script.merchant_public_key.compressed_bytes == self.PRIVATE_KEY.public_key.compressed_bytes, "Public key mismatch." # nopep8
sig = payment_tx.get_signature_for_input(0, bitcoin.Transaction.SIG_HASH_ALL, self.PRIVATE_KEY, redeem_script)[0] # nopep8
# Update input script sig
payment_tx.inputs[0].script.insert(1, sig.to_der() + bitcoin.utils.pack_compact_int(bitcoin.Transaction.SIG_HASH_ALL)) # nopep8
# Verify signature
output_index = self.channels[deposit_txid]['deposit_tx'].output_index_for_address(redeem_script.hash160())
assert payment_tx.verify_input_signature(0, self.channels[deposit_txid]['deposit_tx'].outputs[output_index].script), "Payment tx input script verification failed." # nopep8
# Save payment tx
self.channels[deposit_txid]['payment_tx'] = payment_tx
# Return payment txid
return str(payment_tx.hash)
def status(self, deposit_txid):
return {}
def close(self, deposit_txid, deposit_txid_signature):
# Assert a payment has been made to this chanel
assert self.channels[deposit_txid]['payment_tx'], "No payment tx exists."
# Verify deposit txid singature
public_key = self.channels[deposit_txid]['redeem_script'].customer_public_key
assert public_key.verify(deposit_txid.encode(), bitcoin.Signature.from_der(deposit_txid_signature)), "Invalid deposit txid signature." # nopep8
# Broadcast to blockchain
self.blockchain.broadcast_tx(self.channels[deposit_txid]['payment_tx'].to_hex())
# Return payment txid
return str(self.channels[deposit_txid]['payment_tx'].hash)
class MockBlockchain(blockchain.BlockchainBase):
"""Mock Blockchain interface for unit testing."""
_blockchain = {}
"""Global blockchain state accessible by other mock objects."""
def __init__(self):
"""Instantiate a Mock blockchain interface.
Returns:
MockBlockchain: instance of MockBlockchain.
"""
# Reset blockchain state
for key in list(MockBlockchain._blockchain.keys()):
del MockBlockchain._blockchain[key]
# Stores transactions as
# {
# "<txid>": {
# "tx": <serialized tx>,
# "confirmations": <number of confirmations>,
# "outputs_spent": [
# "<txid>" or None,
# ...
# ]
# },
# ...
# }
def mock_confirm(self, txid, num_confirmations=1):
self._blockchain[txid]['confirmations'] = num_confirmations
def check_confirmed(self, txid, num_confirmations=1):
if txid not in self._blockchain:
return False
return self._blockchain[txid]['confirmations'] >= num_confirmations
def lookup_spend_txid(self, txid, output_index):
if txid not in self._blockchain:
return None
if output_index >= len(self._blockchain[txid]['outputs_spent']):
raise IndexError('Output index out of bounds.')
return self._blockchain[txid]['outputs_spent'][output_index]
def lookup_tx(self, txid):
if txid not in self._blockchain:
return None
return self._blockchain[txid]['tx']
def broadcast_tx(self, tx):
txobj = bitcoin.Transaction.from_hex(tx)
txid = str(txobj.hash)
if txid in self._blockchain:
return txid
self._blockchain[txid] = {"tx": tx, "confirmations": 0, "outputs_spent": [None] * len(txobj.outputs)}
# Mark spent outputs in other blockchain transactions
for other_txid in self._blockchain:
for txinput in txobj.inputs:
if str(txinput.outpoint) == other_txid:
self._blockchain[other_txid]['outputs_spent'][txinput.outpoint_index] = txid
| 41.856481 | 181 | 0.685433 | 1,049 | 9,041 | 5.676835 | 0.180172 | 0.046851 | 0.038287 | 0.046348 | 0.306465 | 0.237615 | 0.15382 | 0.092024 | 0.064148 | 0.064148 | 0 | 0.02523 | 0.219666 | 9,041 | 215 | 182 | 42.051163 | 0.818852 | 0.143347 | 0 | 0.079646 | 0 | 0 | 0.148462 | 0.025451 | 0 | 0 | 0.001723 | 0 | 0.132743 | 1 | 0.150442 | false | 0 | 0.053097 | 0.044248 | 0.442478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d46043e7e3ee4e0fd2cb4b7196675dd558bf6307 | 2,256 | py | Python | dev/Gems/CloudGemMetric/v1/AWS/common-code/LoadTest/LoadTest__CloudGemMetric.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 8 | 2019-10-07T16:33:47.000Z | 2020-12-07T03:59:58.000Z | dev/Gems/CloudGemMetric/v1/AWS/common-code/LoadTest/LoadTest__CloudGemMetric.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | null | null | null | dev/Gems/CloudGemMetric/v1/AWS/common-code/LoadTest/LoadTest__CloudGemMetric.py | jeikabu/lumberyard | 07228c605ce16cbf5aaa209a94a3cb9d6c1a4115 | [
"AML"
] | 5 | 2020-08-27T20:44:18.000Z | 2021-08-21T22:54:11.000Z | # All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision$
from __future__ import print_function
from cloud_gem_load_test.service_api_call import ServiceApiCall
from data_generator import DataGenerator
import metric_constant as c
#
# Load Test Transaction Handler registration
#
def add_transaction_handlers(handler_context, transaction_handlers):
service_api_name = c.RES_GEM_NAME + '.ServiceApi'
base_url = handler_context.mappings.get(service_api_name, {}).get('PhysicalResourceId')
if not base_url:
raise RuntimeError('Missing PhysicalResourceId for ' + service_api_name)
transaction_handlers.append(ServiceStatus(base_url))
transaction_handlers.append(ProduceMessage(base_url))
#
# Check for the service status of Cloud Gem Under Test
#
class ServiceStatus(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ServiceStatus', method='get', base_url=base_url,
path='/service/status')
#
# Produce Metric Messages
#
class ProduceMessage(ServiceApiCall):
def __init__(self, base_url):
ServiceApiCall.__init__(self, name=c.RES_GEM_NAME + '.ProduceMessage', method='post', base_url=base_url,
path='/producer/produce/message?compression_mode=NoCompression&sensitivity_type=Insensitive&payload_type=JSON')
def build_request(self):
request = ServiceApiCall.build_request(self)
request['body'] = {
'data': build_metric_data()
}
return request
#
# Build the metric data object needed for the metric producer request body
#
def build_metric_data():
print('Building metric event data')
data_generator = DataGenerator()
return data_generator.json(1)
| 35.809524 | 143 | 0.734486 | 289 | 2,256 | 5.49827 | 0.422145 | 0.044053 | 0.026432 | 0.020768 | 0.118943 | 0.086847 | 0.086847 | 0.086847 | 0.086847 | 0.086847 | 0 | 0.000544 | 0.18484 | 2,256 | 62 | 144 | 36.387097 | 0.863513 | 0.313387 | 0 | 0.068966 | 0 | 0 | 0.162516 | 0.067497 | 0 | 0 | 0 | 0 | 0 | 1 | 0.172414 | false | 0 | 0.137931 | 0 | 0.448276 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d463a1c535606eac893445765a893c90912ac9f7 | 756 | py | Python | python/django-app/config/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | python/django-app/config/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | 23 | 2020-08-15T15:18:32.000Z | 2022-02-26T13:49:05.000Z | python/django-app/config/urls.py | mp5maker/library | b4d2eea70ae0da9d917285569031edfb4d8ab9fc | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import (static, )
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls', namespace="rest_framework")),
path('api/v1/', include('superhero.urls', namespace="superhero")),
path('api/v1/', include('movie.urls', namespace="movie")),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
urlpatterns += static(settings.MEDIA_URL, document=settings.MEDIA_ROOT)
if 'silk' in settings.INSTALLED_APPS:
urlpatterns += [path('silk/', include('silk.urls', namespace='silk'))] | 34.363636 | 82 | 0.702381 | 92 | 756 | 5.652174 | 0.347826 | 0.076923 | 0.053846 | 0.061538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003072 | 0.138889 | 756 | 22 | 83 | 34.363636 | 0.795699 | 0 | 0 | 0.111111 | 0 | 0 | 0.174373 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.277778 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d463c99b03f786a3e4037e0d14d949c40fda1d7c | 1,492 | py | Python | config.py | proteus1991/RawVSR | 56686859498a07c83fde191fa1fc109d7aafb3da | [
"MIT"
] | 24 | 2021-01-05T02:34:09.000Z | 2022-03-15T12:26:21.000Z | config.py | baowenbo/RawVSR | 56686859498a07c83fde191fa1fc109d7aafb3da | [
"MIT"
] | 3 | 2021-01-11T17:43:58.000Z | 2021-02-04T19:59:36.000Z | config.py | baowenbo/RawVSR | 56686859498a07c83fde191fa1fc109d7aafb3da | [
"MIT"
] | 4 | 2021-01-25T08:45:04.000Z | 2021-12-22T09:14:35.000Z | """
paper: Exploit Camera Raw Data for Video Super-Resolution via Hidden Markov Model Inference
file: config.py
author: Xiaohong Liu
date: 17/09/19
"""
def get_config(args):
scale = args.scale_ratio
save_tag = args.save_image
if scale not in [2, 4]:
raise Exception('scale {} is not supported!'.format(scale))
opt = {'train': {'dataroot_GT': './dataset/train/1080p_gt_rgb',
'dataroot_LQ': './dataset/train/1080p_lr_d_raw_{}'.format(scale),
'lr': 2e-4,
'num_epochs': 100,
'N_frames': 7,
'n_workers': 12,
'batch_size': 24 if scale == 4 else 8,
'GT_size': 256,
'LQ_size': 256 // scale,
'scale': scale,
'phase': 'train',
},
'test': {'dataroot_GT': './dataset/test/1080p_gt_rgb',
'dataroot_LQ': './dataset/test/1080p_lr_d_raw_{}'.format(scale),
'N_frames': 7,
'n_workers': 12,
'batch_size': 2,
'phase': 'test',
'save_image': save_tag,
},
'network': {'nf': 64,
'nframes': 7,
'groups': 8,
'back_RBs': 4},
'dataset': {'dataset_name': 'RawVD'
}
}
return opt
| 31.083333 | 91 | 0.432306 | 150 | 1,492 | 4.08 | 0.526667 | 0.053922 | 0.055556 | 0.058824 | 0.248366 | 0.248366 | 0.088235 | 0.088235 | 0 | 0 | 0 | 0.060642 | 0.436327 | 1,492 | 47 | 92 | 31.744681 | 0.667063 | 0.095845 | 0 | 0.121212 | 0 | 0 | 0.273676 | 0.089485 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0 | 0 | 0.060606 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d4672e7716b611a83d36c41530dceaaadca832b5 | 4,018 | py | Python | src/datasets/emnist.py | bjfranks/Classification-AD | 4eecd6648bb6b54662944921924c8960c2ca236c | [
"MIT"
] | 27 | 2020-05-30T16:27:31.000Z | 2022-03-28T16:45:25.000Z | src/datasets/emnist.py | bjfranks/Classification-AD | 4eecd6648bb6b54662944921924c8960c2ca236c | [
"MIT"
] | 3 | 2021-04-22T10:01:55.000Z | 2022-01-13T02:50:31.000Z | src/datasets/emnist.py | bjfranks/Classification-AD | 4eecd6648bb6b54662944921924c8960c2ca236c | [
"MIT"
] | 7 | 2020-06-15T16:31:23.000Z | 2022-03-23T09:33:32.000Z | from torch.utils.data import Subset
from PIL import Image
from torchvision.datasets import EMNIST
from base.torchvision_dataset import TorchvisionDataset
from PIL.ImageFilter import GaussianBlur
import numpy as np
import torch
import torchvision.transforms as transforms
import random
class EMNIST_Dataset(TorchvisionDataset):
def __init__(self, root: str, split: str = 'letters', normal_class: int = 1, outlier_exposure: bool = False,
oe_n_classes: int = 26, blur_oe: bool = False, blur_std: float = 1.0, seed: int = 0):
super().__init__(root)
self.image_size = (1, 28, 28)
self.n_classes = 2 # 0: normal, 1: outlier
self.shuffle = True
self.split = split
random.seed(seed) # set seed
if outlier_exposure:
self.normal_classes = None
self.outlier_classes = list(range(1, 27))
self.known_outlier_classes = tuple(random.sample(self.outlier_classes, oe_n_classes))
else:
# Define normal and outlier classes
self.normal_classes = tuple([normal_class])
self.outlier_classes = list(range(1, 27))
self.outlier_classes.remove(normal_class)
self.outlier_classes = tuple(self.outlier_classes)
# EMNIST preprocessing: feature scaling to [0, 1]
transform = []
if blur_oe:
transform += [transforms.Lambda(lambda x: x.filter(GaussianBlur(radius=blur_std)))]
transform += [transforms.ToTensor()]
transform = transforms.Compose(transform)
target_transform = transforms.Lambda(lambda x: int(x in self.outlier_classes))
# Get train set
train_set = MyEMNIST(root=self.root, split=self.split, train=True, transform=transform,
target_transform=target_transform, download=True)
if outlier_exposure:
idx = np.argwhere(np.isin(train_set.targets.cpu().data.numpy(), self.known_outlier_classes))
idx = idx.flatten().tolist()
train_set.semi_targets[idx] = -1 * torch.ones(len(idx)).long() # set outlier exposure labels
# Subset train_set to selected classes
self.train_set = Subset(train_set, idx)
self.train_set.shuffle_idxs = False
self.test_set = None
else:
# Subset train_set to normal_classes
idx = np.argwhere(np.isin(train_set.targets.cpu().data.numpy(), self.normal_classes))
idx = idx.flatten().tolist()
train_set.semi_targets[idx] = torch.zeros(len(idx)).long()
self.train_set = Subset(train_set, idx)
# Get test set
self.test_set = MyEMNIST(root=self.root, split=self.split, train=False, transform=transform,
target_transform=target_transform, download=True)
class MyEMNIST(EMNIST):
"""
Torchvision EMNIST class with additional targets for the outlier exposure setting and patch of __getitem__ method
to also return the outlier exposure target as well as the index of a data sample.
"""
def __init__(self, *args, **kwargs):
super(MyEMNIST, self).__init__(*args, **kwargs)
self.semi_targets = torch.zeros_like(self.targets)
self.shuffle_idxs = False
def __getitem__(self, index):
"""Override the original method of the EMNIST class.
Args:
index (int): Index
Returns:
tuple: (image, target, semi_target, index)
"""
img, target, semi_target = self.data[index], int(self.targets[index]), int(self.semi_targets[index])
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, semi_target, index
| 39.009709 | 117 | 0.639373 | 499 | 4,018 | 4.971944 | 0.262525 | 0.041919 | 0.050786 | 0.017735 | 0.25393 | 0.212011 | 0.212011 | 0.188634 | 0.112858 | 0.079 | 0 | 0.007797 | 0.265804 | 4,018 | 102 | 118 | 39.392157 | 0.83322 | 0.162768 | 0 | 0.193548 | 0 | 0 | 0.002433 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048387 | false | 0 | 0.145161 | 0 | 0.241935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |