hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c5bd4d3b64eb6bd036b9f5e78b463c58d2f38085 | 1,021 | py | Python | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | PictureColoring/MyUtils.py | chiihero/DeepLearning | 029d547ada401a00a1cc0b314cdb2fb8937062c9 | [
"Apache-2.0"
] | null | null | null | from keras.callbacks import TensorBoard,EarlyStopping,TerminateOnNaN,ReduceLROnPlateau,ModelCheckpoint
import os
import sys
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
file_abspath = os.path.abspath(sys.argv[0]) # exe所在文件地址
location = os.path.dirname(file_abspath) # exe所在文件夹目录地址
tbCallBack = TensorBoard(log_dir='./logs', histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None, embeddings_data=None, update_freq='epoch')
esCallBack=EarlyStopping(monitor='val_acc', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None, restore_best_weights=False)
tnonCallBack = TerminateOnNaN()
rpCallBack = ReduceLROnPlateau(monitor='val_acc', factor=0.2,patience=3, min_lr=0.0001)
mcCallBack = ModelCheckpoint(filepath=file_abspath[:-3]+'.model', monitor='val_acc', mode='auto', period=1,save_best_only=True)
callbacklist=[tbCallBack,esCallBack,tnonCallBack,rpCallBack,mcCallBack]
| 51.05 | 248 | 0.813908 | 136 | 1,021 | 5.926471 | 0.573529 | 0.040943 | 0.048387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019979 | 0.06856 | 1,021 | 19 | 249 | 53.736842 | 0.82755 | 0.021548 | 0 | 0 | 0 | 0 | 0.046231 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.384615 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c5be6868849918d06af4471aca57e790b36777ca | 8,545 | py | Python | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 1 | 2019-07-09T16:42:15.000Z | 2019-07-09T16:42:15.000Z | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 103 | 2016-11-14T15:58:53.000Z | 2022-03-07T21:01:03.000Z | tests/analysis/test_executor.py | shapiromatron/bmds-server | 0b2b79b521728582fa66100621e9ea03e251f9f1 | [
"MIT"
] | 2 | 2017-03-17T20:43:22.000Z | 2018-01-04T19:15:18.000Z | from copy import deepcopy
from bmds.bmds3.constants import ContinuousModelIds, DichotomousModelIds
from bmds.bmds3.types.priors import PriorClass
from bmds_server.analysis.executor import AnalysisSession
class TestAnalysisSession:
def test_default_dichotomous(self, bmds3_complete_dichotomous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_dichotomous)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_default_continuous(self, bmds3_complete_continuous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_continuous)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_default_continuous_individual(self, bmds3_complete_continuous_individual):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_continuous_individual)
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert len(session.bayesian.models) == 1
def test_prior_classes(self, bmds3_complete_dichotomous):
# assure a default dataset can be created
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {
"frequentist_restricted": ["Gamma"],
"frequentist_unrestricted": ["Gamma"],
"bayesian": [{"model": "Gamma", "prior_weight": 1}],
}
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert len(session.bayesian.models) == 1
assert (
session.frequentist.models[0].settings.priors.prior_class
is PriorClass.frequentist_restricted
)
assert (
session.frequentist.models[1].settings.priors.prior_class
is PriorClass.frequentist_unrestricted
)
assert session.bayesian.models[0].settings.priors.prior_class is PriorClass.bayesian
def test_exponential_unpacking(self, bmds3_complete_continuous):
data = deepcopy(bmds3_complete_continuous)
data["models"] = {
"frequentist_restricted": ["Exponential"],
"bayesian": [{"model": "Exponential", "prior_weight": 1}],
}
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert session.frequentist.models[0].bmd_model_class.id == ContinuousModelIds.c_exp_m3
assert session.frequentist.models[1].bmd_model_class.id == ContinuousModelIds.c_exp_m5
assert len(session.bayesian.models) == 2
assert session.bayesian.models[0].bmd_model_class.id == ContinuousModelIds.c_exp_m3
assert session.bayesian.models[1].bmd_model_class.id == ContinuousModelIds.c_exp_m5
def test_multistage_permutations(self, bmds3_complete_dichotomous):
def _expected_degree(session, n: int):
assert session.bayesian is None
assert len(session.frequentist.models) == n
model_classes = set([model.bmd_model_class.id for model in session.frequentist.models])
assert model_classes == {DichotomousModelIds.d_multistage}
degrees = set([model.settings.degree for model in session.frequentist.models])
assert degrees == set(list(range(1, n + 1)))
# degree = 1
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 1
session = AnalysisSession.create(data, 0, 0)
_expected_degree(session, 1)
# degree = 2
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 2
session = AnalysisSession.create(data, 0, 0)
_expected_degree(session, 2)
# 3 dose-groups; degree = N-1; expected 2
for num_doses in range(3, 8):
expected_degree = min(max(num_doses - 1, 2), 4)
data = deepcopy(bmds3_complete_dichotomous)
data["datasets"] = [
{
"dtype": "D",
"metadata": {"id": 123},
"doses": list(range(num_doses)),
"ns": list(range(num_doses)),
"incidences": list(range(num_doses)),
}
]
assert len(data["datasets"][0]["doses"]) == num_doses
data["models"] = {"frequentist_restricted": ["Multistage"]}
data["dataset_options"][0]["degree"] = 0 # n-1
session = AnalysisSession.create(data, 0, 0)
print(f"{num_doses=} {expected_degree=}")
_expected_degree(session, expected_degree)
# degree = N -1, bayesian, fixed at degree == 2
data = deepcopy(bmds3_complete_dichotomous)
data["models"] = {"bayesian": [{"model": "Multistage", "prior_weight": 1}]}
data["dataset_options"][0]["degree"] = 0
session = AnalysisSession.create(data, 0, 0)
assert session.frequentist is None
assert len(session.bayesian.models) == 1
model = session.bayesian.models[0]
assert model.bmd_model_class.id == DichotomousModelIds.d_multistage
assert model.settings.degree == 2
def test_polynomial_unpacking(self, bmds3_complete_continuous):
# test linear; degree 0
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Linear"]}
data["dataset_options"][0]["degree"] = 0
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert session.frequentist.models[0].settings.degree == 1
assert session.bayesian is None
# test polynomial; degree 2
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Polynomial"]}
data["dataset_options"][0]["degree"] = 2
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
assert session.frequentist.models[0].settings.degree == 2
assert session.bayesian is None
# test polynomial; degree 3
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Polynomial"]}
data["dataset_options"][0]["degree"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 2
assert session.frequentist.models[0].settings.degree == 2
assert session.frequentist.models[1].settings.degree == 3
assert session.bayesian is None
# test linear + polynomial; degree 3
data = deepcopy(bmds3_complete_continuous)
data["models"] = {"frequentist_unrestricted": ["Linear", "Polynomial"]}
data["dataset_options"][0]["degree"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
assert session.frequentist.models[0].settings.degree == 1
assert session.frequentist.models[1].settings.degree == 2
assert session.frequentist.models[2].settings.degree == 3
assert session.bayesian is None
# disttype 3 Linear and power are not added
def test_disttype(self, bmds3_complete_continuous):
data = deepcopy(bmds3_complete_continuous)
data["models"] = {
"frequentist_restricted": ["Hill", "Linear", "Power"],
}
# normal
data["options"][0]["dist_type"] = 1
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill", "Linear", "Power"]
data["options"][0]["dist_type"] = 2
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 3
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill", "Linear", "Power"]
# lognormal
data["options"][0]["dist_type"] = 3
session = AnalysisSession.create(data, 0, 0)
assert len(session.frequentist.models) == 1
names = [model.name() for model in session.frequentist.models]
assert names == ["Hill"]
| 45.452128 | 99 | 0.640609 | 937 | 8,545 | 5.697972 | 0.116329 | 0.101143 | 0.130361 | 0.095898 | 0.770182 | 0.715115 | 0.691515 | 0.643754 | 0.596366 | 0.565087 | 0 | 0.023511 | 0.243417 | 8,545 | 187 | 100 | 45.695187 | 0.80232 | 0.051375 | 0 | 0.516779 | 0 | 0 | 0.106564 | 0.031153 | 0 | 0 | 0 | 0 | 0.315436 | 1 | 0.060403 | false | 0 | 0.026846 | 0 | 0.09396 | 0.006711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5bedaf2756a09941c7750735b35a4cb10278fa5 | 483 | py | Python | 1094.py | gabzin/beecrowd | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | 3 | 2021-12-15T20:27:14.000Z | 2022-03-01T12:30:08.000Z | 1094.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | 1094.py | gabzin/uri | 177bdf3f87bacfd924bd031a973b8db877379fe5 | [
"MIT"
] | null | null | null | tot=coe=rat=sap=0
for i in range(int(input())):
n,s=input().split()
n=int(n)
tot+=n
if s=='C':coe+=n
elif s=='R':rat+=n
elif s=='S':sap+=n
print(f"Total: {tot} cobaias\nTotal de coelhos: {coe}\nTotal de ratos: {rat}\nTotal de sapos: {sap}")
p=(coe/tot)*100
print("Percentual de coelhos: %.2f"%p,end="")
print(" %")
p=(rat/tot)*100
print("Percentual de ratos: %.2f"%p,end="")
print(" %")
p=(sap/tot)*100
print("Percentual de sapos: %.2f"%p,end="")
print(" %")
| 25.421053 | 101 | 0.585921 | 88 | 483 | 3.215909 | 0.363636 | 0.084806 | 0.116608 | 0.222615 | 0.328622 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031553 | 0.146998 | 483 | 18 | 102 | 26.833333 | 0.65534 | 0 | 0 | 0.166667 | 0 | 0.055556 | 0.36646 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.388889 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5c1ceac12c3d4acf6d93b005cde760e9701d344 | 1,722 | py | Python | extras/createTestBlocksForReadBlkUpdate.py | Manny27nyc/BitcoinArmory | 1d02a6640d6257ab0c37013e5cd4b99681a5cfc3 | [
"MIT"
] | 505 | 2016-02-04T15:54:46.000Z | 2022-03-27T18:43:01.000Z | extras/createTestBlocksForReadBlkUpdate.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 528 | 2016-02-06T19:50:12.000Z | 2022-01-15T10:21:16.000Z | extras/createTestBlocksForReadBlkUpdate.py | jimmysong/BitcoinArmory | 1c7190176897a2e0f3e4e198ab2f199059bb2402 | [
"MIT"
] | 208 | 2015-01-02T10:31:40.000Z | 2021-12-14T07:37:36.000Z | from sys import path
path.append('..')
from armoryengine import *
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
if not os.path.exists('testmultiblock'):
os.mkdir('testmultiblock')
fout = []
fout.append([0, 101, 'testmultiblock/blk00000.dat'])
fout.append([0, 102, 'testmultiblock/blk00000_test1.dat']) # Add 1 block
fout.append([0, 105, 'testmultiblock/blk00000_test2.dat']) # Add 3 blocks
fout.append([106, 106, 'testmultiblock/blk00001_test3.dat']) # Just block split
fout.append([107, 109, 'testmultiblock/blk00002_test4.dat']) # Another block split 3 blks
fout.append([107, 110, 'testmultiblock/blk00002_test5.dat']) # Add block
fout.append([110, 113, 'testmultiblock/blk00003_test5.dat']) # and split
for start,end,theFile in fout:
if os.path.exists(theFile):
os.remove(theFile)
lastLocation = [0]*len(fout)
openfiles = [[trip[0], trip[1], open(trip[2],'wb')] for trip in fout]
# Assume we are only reading into blk000000.dat, no split
for h in range(120):
head = TheBDM.getHeaderByHeight(h)
blk = head.serializeWholeBlock(MAGIC_BYTES, True)
for i,trip in enumerate(openfiles):
start,end,theFile = trip
if (start <= h <= end):
theFile.write(blk)
lastLocation[i] += len(blk)
for start,end,opnfil in openfiles:
opnfil.close()
for i,trip in enumerate(fout):
start,end,theFile = trip
sz = os.path.getsize(theFile)
f = open(theFile,'ab')
if i<3:
f.write('\x00'*(22000-sz))
else:
f.write('\x00'*(1000-sz))
f.close()
print 'Blocks written out:'
for start,end,fn in fout:
if end-start==0:
print '\t%d in file: %s' % (end,fn)
else:
print '\t%d-%d in file: %s' % (start,end,fn)
| 28.229508 | 89 | 0.662602 | 251 | 1,722 | 4.517928 | 0.386454 | 0.061728 | 0.029101 | 0.017637 | 0.03351 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076596 | 0.181185 | 1,722 | 60 | 90 | 28.7 | 0.72766 | 0.087108 | 0 | 0.088889 | 0 | 0 | 0.207294 | 0.143954 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.044444 | null | null | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5c1e4e3885d653cd8bccb778237b0517bd6b7f7 | 369 | py | Python | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | dashboard/migrations/0034_auto_20201226_2150.py | BDALab/GENEActiv-sleep-analyses-system | f0458de041153f2dee240a53571149827de00a2e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-12-26 20:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0033_auto_20201226_2148'),
]
operations = [
migrations.RenameField(
model_name='sleepnight',
old_name='data',
new_name='diary_day',
),
]
| 20.5 | 49 | 0.593496 | 40 | 369 | 5.3 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119231 | 0.295393 | 369 | 17 | 50 | 21.705882 | 0.696154 | 0.121951 | 0 | 0 | 1 | 0 | 0.170807 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5c5651c307431ee7d13f20d6102f8ee5f585f1c | 6,331 | py | Python | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | party/migrations/0002_auto__chg_field_party_primaries_date__chg_field_party_qualifying_date_.py | daonb/okqa | 3babf225911294dec1249472a9a3f6141fa7d6a7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Party.primaries_date'
db.alter_column('party_party', 'primaries_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'Party.qualifying_date'
db.alter_column('party_party', 'qualifying_date', self.gf('django.db.models.fields.DateField')(null=True))
# Changing field 'Party.open_knesset_id'
db.alter_column('party_party', 'open_knesset_id', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'Party.number_of_members'
db.alter_column('party_party', 'number_of_members', self.gf('django.db.models.fields.IntegerField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Party.primaries_date'
raise RuntimeError("Cannot reverse this migration. 'Party.primaries_date' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.qualifying_date'
raise RuntimeError("Cannot reverse this migration. 'Party.qualifying_date' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.open_knesset_id'
raise RuntimeError("Cannot reverse this migration. 'Party.open_knesset_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Party.number_of_members'
raise RuntimeError("Cannot reverse this migration. 'Party.number_of_members' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'party.party': {
'Meta': {'object_name': 'Party'},
'first_in_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'open_knesset_id': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'primaries_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True'}),
'qualifying_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['party']
| 65.947917 | 182 | 0.599905 | 695 | 6,331 | 5.329496 | 0.181295 | 0.088553 | 0.151188 | 0.215983 | 0.682775 | 0.664957 | 0.632289 | 0.572354 | 0.475972 | 0.361501 | 0 | 0.008166 | 0.187648 | 6,331 | 95 | 183 | 66.642105 | 0.712036 | 0.077555 | 0 | 0.111111 | 0 | 0 | 0.576844 | 0.296569 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027778 | false | 0.013889 | 0.055556 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5c89aef54358898a0e1bcf5928f0cc81d126d81 | 2,441 | py | Python | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | vmware.py | Sbaljepa/get_esxi_host_info | cb12bcc3712135e22fc456178349c51cbd480c03 | [
"MIT"
] | null | null | null | from con_esxi_host import *
from math import pow, ceil
class vmware:
def get_vm_info(self):
si = connect_to_host()
#global virtual
inv = si.RetrieveContent()
dc1 = inv.rootFolder.childEntity[0]
vmList = dc1.vmFolder.childEntity
virtual = []
for vm in vmList:
name = vm.summary.config.name
guestFullName = vm.summary.config.guestFullName
# powerState = vm.summary.runtime.powerState
bootTime = str(vm.summary.runtime.bootTime)
# overallCPUUsage = vm.summary.quickStats.overallCpuUsage
# maxCpuUsage = vm.summary.runtime.maxCpuUsage / 1024
cpuUtilization = vm.summary.quickStats.overallCpuUsage * 100 / vm.summary.runtime.maxCpuUsage;
# paused = vm.summary.runtime.paused
# snapshotInBackground = vm.summary.runtime.snapshotInBackground
# toolsStatus = vm.summary.guest.toolsStatus
hostName = vm.summary.guest.hostName
ipAddress = vm.summary.guest.ipAddress
# vmPathName = vm.summary.config.vmPathName
memorySizeMB = ceil(vm.summary.config.memorySizeMB / 1024);
numEthernetCards = vm.summary.config.numEthernetCards
numVirtualDisks = vm.summary.config.numVirtualDisks
# guestId = vm.summary.config.guestId
annotation = vm.summary.config.annotation
unshared = ceil(vm.summary.storage.unshared / pow(1024, 3))
uncommitted = ceil(vm.summary.storage.uncommitted / pow(1024, 3))
committed = uncommitted + unshared
numCpu = vm.summary.config.numCpu
vm_info = {'Type': "Virtual Machine", 'Name': name, 'HostName': hostname}
vm_kernal = {'Type': "kernal", 'OS': guestFullName, 'Boot Time': bootTime}
vm_nw = {'Type': "NetWork", 'IP': ipAddress 'EthernetCards': numEthernetCards}
vm_strg = {'Type': "Storage", 'Storage': committed, 'Disks': numVirtualDisks}
vm_mem = {'Type': "RAM", 'Name': "Memory", 'Size': memorySizeMB}
vm_cpu = {'Type': "CPU", 'Name': "Cores", 'capacity':numCpu}
config_vm = vm_kernal, vm_cpu,vm_mem,vm_strg, vm_nw
vm_info['configuration'] = config_vm
virtual.append(vm_info)
#print(virtual)
return virtual
| 51.93617 | 107 | 0.603032 | 240 | 2,441 | 6.045833 | 0.354167 | 0.136458 | 0.093039 | 0.046864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013857 | 0.290455 | 2,441 | 46 | 108 | 53.065217 | 0.823903 | 0.163048 | 0 | 0 | 0 | 0 | 0.08002 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.058824 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5cbbb0cba0072bbd3a2426dbea64e76c05e64a7 | 2,214 | py | Python | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | home/views.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.cache import cache_page
from django.views.decorators.gzip import gzip_page
from django.views.generic import DetailView
from about.models import AboutItem
from blog.models import Article
from home.models import Home
from case.models import Case, Category
from product.models import Product
from service.models import Service
from website.utils import cache
import logging
logger = logging.getLogger(__name__)
class HomeView(DetailView):
# 首页
model = Home
def get_context_data(self, **kwargs):
# 判断手机,跳转手机模板首页
if self.request.is_mobile:
self.template_name = 'mobile/index.html'
kwargs['carousel_list'] = self.object.carousel_set.all().order_by('sequence')
kwargs['adware_list'] = self.object.adware_set.all().order_by('sequence')
kwargs['superiority_list'] = self.object.superiority_set.all().order_by('sequence')
kwargs['category_list'] = Category.objects.all().order_by('sequence')
kwargs['case_list'] = Case.objects.all()[:6]
kwargs['product_list'] = Product.objects.all()
kwargs['service_list'] = Service.objects.all()
kwargs['aboutitem_list'] = AboutItem.objects.all().order_by('sequence')
else:
self.template_name = 'index.html'
kwargs['carousel_list'] = self.object.carousel_set.all().order_by('sequence')
kwargs['adware_list'] = self.object.adware_set.all().order_by('sequence')
kwargs['superiority_list'] = self.object.superiority_set.all().order_by('sequence')
kwargs['category_list'] = Category.objects.all().order_by('sequence')
kwargs['case_list'] = Case.objects.all()[:6]
product_list = Product.objects.all()
kwargs['first_product'] = product_list[0]
kwargs['other_products'] = product_list[1:5]
kwargs['service_list'] = Service.objects.all()
kwargs['aboutitem_list'] = AboutItem.objects.all().order_by('sequence')
return super().get_context_data(**kwargs)
def index(request):
return HttpResponse('ok')
| 41 | 95 | 0.677958 | 269 | 2,214 | 5.39777 | 0.263941 | 0.055096 | 0.068871 | 0.123967 | 0.523416 | 0.523416 | 0.480716 | 0.480716 | 0.480716 | 0.480716 | 0 | 0.002806 | 0.195122 | 2,214 | 53 | 96 | 41.773585 | 0.812009 | 0.007227 | 0 | 0.333333 | 0 | 0 | 0.147608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.309524 | 0.02381 | 0.452381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c5cc748812d8a678d614f011ee8f5b8566bed123 | 16,292 | py | Python | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | 1 | 2021-08-31T03:07:52.000Z | 2021-08-31T03:07:52.000Z | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | null | null | null | devel/.private/px_comm/lib/python2.7/dist-packages/px_comm/msg/_CameraInfo.py | akshastry/Neo_WS | 6c646227b1fedf4fb8cf700533ca8fc47f381b46 | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from px_comm/CameraInfo.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class CameraInfo(genpy.Message):
_md5sum = "014513fdee9cefabe3cec97bca5e5c57"
_type = "px_comm/CameraInfo"
_has_header = True # flag to mark the presence of a Header object
_full_text = """#######################################################################
# Image acquisition info #
#######################################################################
# Time of image acquisition, camera coordinate frame ID
Header header # Header timestamp should be acquisition time of image
# Header frame_id should be optical frame of camera
# origin of frame should be optical center of camera
# +x should point to the right in the image
# +y should point down in the image
# +z should point into the plane of the image
#######################################################################
# Calibration Parameters #
#######################################################################
# These are fixed during camera calibration. Their values will be the #
# same in all messages until the camera is recalibrated. Note that #
# self-calibrating systems may "recalibrate" frequently. #
#######################################################################
# The camera model used.
string camera_model
# The name of the camera.
string camera_name
# The type of the camera.
string camera_type
# The image dimensions with which the camera was calibrated. Normally
# this will be the full camera resolution in pixels.
uint32 image_width
uint32 image_height
# The distortion parameters, size depending on the distortion model.
float64[] D
# The projection parameters, size depending on the projection model.
float64[] P
# Other parameters which are not defined by either the distortion or
# projection model.
float64[] M
# Pose of camera with respect to a specific reference frame.
geometry_msgs/Pose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of position and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','camera_model','camera_name','camera_type','image_width','image_height','D','P','M','pose']
_slot_types = ['std_msgs/Header','string','string','string','uint32','uint32','float64[]','float64[]','float64[]','geometry_msgs/Pose']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,camera_model,camera_name,camera_type,image_width,image_height,D,P,M,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CameraInfo, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.camera_model is None:
self.camera_model = ''
if self.camera_name is None:
self.camera_name = ''
if self.camera_type is None:
self.camera_type = ''
if self.image_width is None:
self.image_width = 0
if self.image_height is None:
self.image_height = 0
if self.D is None:
self.D = []
if self.P is None:
self.P = []
if self.M is None:
self.M = []
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
else:
self.header = std_msgs.msg.Header()
self.camera_model = ''
self.camera_name = ''
self.camera_type = ''
self.image_width = 0
self.image_height = 0
self.D = []
self.P = []
self.M = []
self.pose = geometry_msgs.msg.Pose()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.image_width, _x.image_height))
length = len(self.D)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.D))
length = len(self.P)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.P))
length = len(self.M)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.Struct(pattern).pack(*self.M))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_model = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_model = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_type = str[start:end]
_x = self
start = end
end += 8
(_x.image_width, _x.image_height,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.D = s.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.P = s.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.M = s.unpack(str[start:end])
_x = self
start = end
end += 56
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_model
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.camera_type
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_2I().pack(_x.image_width, _x.image_height))
length = len(self.D)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.D.tostring())
length = len(self.P)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.P.tostring())
length = len(self.M)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.M.tostring())
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geometry_msgs.msg.Pose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_model = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_model = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_name = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.camera_type = str[start:end].decode('utf-8', 'rosmsg')
else:
self.camera_type = str[start:end]
_x = self
start = end
end += 8
(_x.image_width, _x.image_height,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.D = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.P = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
s = struct.Struct(pattern)
end += s.size
self.M = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
_x = self
start = end
end += 56
(_x.pose.position.x, _x.pose.position.y, _x.pose.position.z, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
| 35.112069 | 201 | 0.60097 | 2,202 | 16,292 | 4.278383 | 0.127611 | 0.064537 | 0.049039 | 0.041503 | 0.641652 | 0.62435 | 0.600892 | 0.600892 | 0.591445 | 0.580618 | 0 | 0.013269 | 0.236742 | 16,292 | 463 | 202 | 35.187905 | 0.744351 | 0.080223 | 0 | 0.695761 | 1 | 0.002494 | 0.249511 | 0.049295 | 0 | 0 | 0.000674 | 0 | 0 | 1 | 0.024938 | false | 0 | 0.014963 | 0 | 0.074813 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5cca9d03c0d9fcd4b29b96bb68a691816ff964e | 3,567 | py | Python | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 4 | 2019-03-28T06:42:17.000Z | 2021-06-06T13:10:51.000Z | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 177 | 2018-09-28T14:21:56.000Z | 2022-03-30T21:45:26.000Z | search/models.py | IATI/new-website | b90783e32d19ac4c821c5ea018a52997a11b5286 | [
"MIT"
] | 8 | 2018-10-25T20:43:10.000Z | 2022-03-17T14:19:27.000Z | from itertools import chain
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from wagtail.core.models import Page
from wagtail.search.models import Query
from about.models import AboutPage, AboutSubPage, CaseStudyPage, HistoryPage, PeoplePage
from contact.models import ContactPage
from events.models import EventPage
from guidance_and_support.models import GuidanceAndSupportPage, GuidanceGroupPage, GuidancePage
# from guidance_and_support.models import KnowledgebasePage
from news.models import NewsPage
from home.models import AbstractBasePage, StandardPage
from iati_standard.models import IATIStandardPage
class SearchPage(AbstractBasePage):
"""A model for a seach page, to respond to query requests."""
class Meta:
verbose_name = 'Search'
parent_page_types = ['home.HomePage']
subpage_types = []
max_count = 1
def get_paginated(self, collection, page: int, per_page: int = 10):
"""Handle some error conditions and tries to return working pagination."""
results = None
paginator = None
try:
paginator = Paginator(collection, per_page)
except Exception:
pass
try:
results = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
results = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
results = paginator.page(paginator.num_pages)
return results, paginator
def serve(self, request, page=None):
"""Serve the search page with query info and paginated results."""
template = self.get_template(request)
context = self.get_context(request)
per_page = 10
searchable_models = [
AboutPage, AboutSubPage, CaseStudyPage, HistoryPage,
PeoplePage, ContactPage, EventPage, GuidanceAndSupportPage,
GuidanceGroupPage, GuidancePage, NewsPage,
StandardPage, IATIStandardPage,
]
# TODO: add KnowledgebasePage back if activated
query = request.GET
search_query = request.GET.get('query', '')
page = request.GET.get('page', 1)
if search_query:
search_results = [r for m in searchable_models
for r in m.objects.live().public().search(search_query).annotate_score('_score')]
search_results = sorted(search_results, key=lambda x: x._score, reverse=True)
promoted = [x.page.specific for x in Query.get(search_query).editors_picks.all() if x.page.live]
query = Query.get(search_query)
query.add_hit()
results = list(chain(promoted, search_results))
else:
results = Page.objects.none()
search_results, paginator = self.get_paginated(results, page, per_page)
total_pages = search_results.paginator.num_pages if search_results else 0
range_start = search_results.number - 5 if search_results.number > 5 else 1
if search_results.number < (total_pages - 4):
range_end = search_results.number + 4
else:
range_end = total_pages
context['search_query'] = search_query
context['search_results'] = search_results
context['paginator_range'] = [i for i in range(range_start, range_end + 1)]
context['paginator'] = paginator
return render(request, template, context)
| 37.547368 | 111 | 0.670311 | 411 | 3,567 | 5.681265 | 0.340633 | 0.072377 | 0.032548 | 0.038544 | 0.076231 | 0.029122 | 0 | 0 | 0 | 0 | 0 | 0.006744 | 0.251752 | 3,567 | 94 | 112 | 37.946809 | 0.868115 | 0.11326 | 0 | 0.059701 | 0 | 0 | 0.026718 | 0 | 0 | 0 | 0 | 0.010638 | 0 | 1 | 0.029851 | false | 0.014925 | 0.179104 | 0 | 0.313433 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5cf38192c894cd50c8e95291a53bb4a08d2bc9f | 334 | py | Python | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | desafio_005_antecessor_e_sucessor.py | VagnerGit/PythonCursoEmVideo | 3e80e12fbf21f5be08c554d77fa9073dc0a3145f | [
"MIT"
] | null | null | null | """
Exercício Python 5:
Faça um programa que leia um número Inteiro e
mostre na tela o seu sucessor e seu antecessor.
"""
n = int(input('digite um numero inteiro '))
#ant = n-1
#post = n+1
#print('O antecessor de {} é {} e posterior é {}' .format(n, ant, post))
print('{} o antercessor é {} o sucessor é {}'.format(n, (n-1), (n+1)))
| 27.833333 | 72 | 0.643713 | 59 | 334 | 3.644068 | 0.542373 | 0.037209 | 0.074419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018315 | 0.182635 | 334 | 11 | 73 | 30.363636 | 0.769231 | 0.610778 | 0 | 0 | 0 | 0 | 0.516667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
c5d90016a90a331eb727c3cf478e5adf99c5cfde | 8,986 | py | Python | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | VincentWei/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 6 | 2017-05-31T01:46:45.000Z | 2018-06-12T10:53:30.000Z | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | null | null | null | Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py | FMSoftCN/mdolphin-core | 48ffdcf587a48a7bb4345ae469a45c5b64ffad0e | [
"Apache-2.0"
] | 2 | 2017-07-17T06:02:42.000Z | 2018-09-19T10:08:38.000Z | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.net.layouttestresults import LayoutTestResults
class CommitQueueTaskDelegate(object):
def run_command(self, command):
raise NotImplementedError("subclasses must implement")
def command_passed(self, message, patch):
raise NotImplementedError("subclasses must implement")
def command_failed(self, message, script_error, patch):
raise NotImplementedError("subclasses must implement")
def refetch_patch(self, patch):
raise NotImplementedError("subclasses must implement")
def layout_test_results(self):
raise NotImplementedError("subclasses must implement")
def archive_last_layout_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
# We could make results_archive optional, but for now it's required.
def report_flaky_tests(self, patch, flaky_tests, results_archive):
raise NotImplementedError("subclasses must implement")
class CommitQueueTask(object):
def __init__(self, delegate, patch):
self._delegate = delegate
self._patch = patch
self._script_error = None
def _validate(self):
# Bugs might get closed, or patches might be obsoleted or r-'d while the
# commit-queue is processing.
self._patch = self._delegate.refetch_patch(self._patch)
if self._patch.is_obsolete():
return False
if self._patch.bug().is_closed():
return False
if not self._patch.committer():
return False
if not self._patch.review() != "-":
return False
# Reviewer is not required. Missing reviewers will be caught during
# the ChangeLog check during landing.
return True
def _run_command(self, command, success_message, failure_message):
try:
self._delegate.run_command(command)
self._delegate.command_passed(success_message, patch=self._patch)
return True
except ScriptError, e:
self._script_error = e
self.failure_status_id = self._delegate.command_failed(failure_message, script_error=self._script_error, patch=self._patch)
return False
def _clean(self):
return self._run_command([
"clean",
],
"Cleaned working directory",
"Unable to clean working directory")
def _update(self):
# FIXME: Ideally the status server log message should include which revision we updated to.
return self._run_command([
"update",
],
"Updated working directory",
"Unable to update working directory")
def _apply(self):
return self._run_command([
"apply-attachment",
"--no-update",
"--non-interactive",
self._patch.id(),
],
"Applied patch",
"Patch does not apply")
def _build(self):
return self._run_command([
"build",
"--no-clean",
"--no-update",
"--build-style=both",
],
"Built patch",
"Patch does not build")
def _build_without_patch(self):
return self._run_command([
"build",
"--force-clean",
"--no-update",
"--build-style=both",
],
"Able to build without patch",
"Unable to build without patch")
def _test(self):
return self._run_command([
"build-and-test",
"--no-clean",
"--no-update",
# Notice that we don't pass --build, which means we won't build!
"--test",
"--non-interactive",
],
"Passed tests",
"Patch does not pass tests")
def _build_and_test_without_patch(self):
return self._run_command([
"build-and-test",
"--force-clean",
"--no-update",
"--build",
"--test",
"--non-interactive",
],
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
def _failing_results_from_last_run(self):
results = self._delegate.layout_test_results()
if not results:
return [] # Makes callers slighty cleaner to not have to deal with None
return results.failing_test_results()
def _land(self):
# Unclear if this should pass --quiet or not. If --parent-command always does the reporting, then it should.
return self._run_command([
"land-attachment",
"--force-clean",
"--ignore-builders",
"--non-interactive",
"--parent-command=commit-queue",
self._patch.id(),
],
"Landed patch",
"Unable to land patch")
def _report_flaky_tests(self, flaky_test_results, results_archive):
self._delegate.report_flaky_tests(self._patch, flaky_test_results, results_archive)
def _test_patch(self):
if self._test():
return True
first_results = self._failing_results_from_last_run()
first_failing_tests = [result.filename for result in first_results]
first_results_archive = self._delegate.archive_last_layout_test_results(self._patch)
if self._test():
# Only report flaky tests if we were successful at archiving results.
if first_results_archive:
self._report_flaky_tests(first_results, first_results_archive)
return True
second_results = self._failing_results_from_last_run()
second_failing_tests = [result.filename for result in second_results]
if first_failing_tests != second_failing_tests:
# We could report flaky tests here, but since run-webkit-tests
# is run with --exit-after-N-failures=1, we would need to
# be careful not to report constant failures as flaky due to earlier
# flaky test making them not fail (no results) in one of the runs.
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
if self._build_and_test_without_patch():
return self.report_failure() # The error from the previous ._test() run is real, report it.
return False # Tree must be red, just retry later.
def report_failure(self):
if not self._validate():
return False
raise self._script_error
def run(self):
if not self._validate():
return False
if not self._clean():
return False
if not self._update():
return False
if not self._apply():
return self.report_failure()
if not self._patch.is_rollout():
if not self._build():
if not self._build_without_patch():
return False
return self.report_failure()
if not self._test_patch():
return False
# Make sure the patch is still valid before landing (e.g., make sure
# no one has set commit-queue- since we started working on the patch.)
if not self._validate():
return False
# FIXME: We should understand why the land failure occured and retry if possible.
if not self._land():
return self.report_failure()
return True
| 38.076271 | 135 | 0.639217 | 1,080 | 8,986 | 5.15 | 0.285185 | 0.027508 | 0.021036 | 0.028767 | 0.30169 | 0.208558 | 0.159835 | 0.046926 | 0.024452 | 0.024452 | 0 | 0.001545 | 0.279769 | 8,986 | 235 | 136 | 38.238298 | 0.857849 | 0.305253 | 0 | 0.462963 | 0 | 0 | 0.149475 | 0.004681 | 0 | 0 | 0 | 0.004255 | 0 | 0 | null | null | 0.037037 | 0.012346 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5e54fc9fa5630a02a90b1f99c8ec0d552707d95 | 6,021 | py | Python | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 5 | 2015-04-16T08:36:17.000Z | 2017-05-12T17:20:12.000Z | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | null | null | null | sqlalchemy/sqlalchemy-0.3.6+codebay/test/base/dependency.py | nakedible/vpnease-l2tp | 0fcda6a757f2bc5c37f4753b3cd8b1c6d282db5c | [
"WTFPL"
] | 4 | 2015-03-19T14:39:51.000Z | 2019-01-23T08:22:55.000Z | from testbase import PersistTest
import sqlalchemy.topological as topological
import unittest, sys, os
from sqlalchemy import util
# TODO: need assertion conditions in this suite
class DependencySorter(topological.QueueDependencySorter):pass
class DependencySortTest(PersistTest):
def assert_sort(self, tuples, node, collection=None):
print str(node)
def assert_tuple(tuple, node):
if node.cycles:
cycles = [i.item for i in node.cycles]
else:
cycles = []
if tuple[0] is node.item or tuple[0] in cycles:
tuple.pop()
if tuple[0] is node.item or tuple[0] in cycles:
return
elif len(tuple) > 1 and tuple[1] is node.item:
assert False, "Tuple not in dependency tree: " + str(tuple)
for c in node.children:
assert_tuple(tuple, c)
for tuple in tuples:
assert_tuple(list(tuple), node)
if collection is None:
collection = []
items = util.Set()
def assert_unique(node):
for item in [n.item for n in node.cycles or [node,]]:
assert item not in items
items.add(item)
if item in collection:
collection.remove(item)
for c in node.children:
assert_unique(c)
assert_unique(node)
assert len(collection) == 0
def testsort(self):
rootnode = 'root'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
subnode1 = 'subnode1'
subnode2 = 'subnode2'
subnode3 = 'subnode3'
subnode4 = 'subnode4'
subsubnode1 = 'subsubnode1'
tuples = [
(subnode3, subsubnode1),
(node2, subnode1),
(node2, subnode2),
(rootnode, node2),
(rootnode, node3),
(rootnode, node4),
(node4, subnode3),
(node4, subnode4)
]
head = DependencySorter(tuples, []).sort()
self.assert_sort(tuples, head)
def testsort2(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
node6 = 'node6'
node7 = 'node7'
tuples = [
(node1, node2),
(node3, node4),
(node4, node5),
(node5, node6),
(node6, node2)
]
head = DependencySorter(tuples, [node7]).sort()
self.assert_sort(tuples, head, [node7])
def testsort3(self):
['Mapper|Keyword|keywords,Mapper|IKAssociation|itemkeywords', 'Mapper|Item|items,Mapper|IKAssociation|itemkeywords']
node1 = 'keywords'
node2 = 'itemkeyowrds'
node3 = 'items'
tuples = [
(node1, node2),
(node3, node2),
(node1,node3)
]
head1 = DependencySorter(tuples, [node1, node2, node3]).sort()
head2 = DependencySorter(tuples, [node3, node1, node2]).sort()
head3 = DependencySorter(tuples, [node3, node2, node1]).sort()
# TODO: figure out a "node == node2" function
#self.assert_(str(head1) == str(head2) == str(head3))
print "\n" + str(head1)
print "\n" + str(head2)
print "\n" + str(head3)
def testsort4(self):
node1 = 'keywords'
node2 = 'itemkeyowrds'
node3 = 'items'
node4 = 'hoho'
tuples = [
(node1, node2),
(node4, node1),
(node1, node3),
(node3, node2)
]
head = DependencySorter(tuples, []).sort()
self.assert_sort(tuples, head)
def testsort5(self):
# this one, depenending on the weather,
node1 = 'node1' #'00B94190'
node2 = 'node2' #'00B94990'
node3 = 'node3' #'00B9A9B0'
node4 = 'node4' #'00B4F210'
tuples = [
(node4, node1),
(node1, node2),
(node4, node3),
(node2, node3),
(node4, node2),
(node3, node3)
]
allitems = [
node1,
node2,
node3,
node4
]
head = DependencySorter(tuples, allitems).sort()
self.assert_sort(tuples, head)
def testcircular(self):
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
node5 = 'node5'
tuples = [
(node4, node5),
(node5, node4),
(node1, node2),
(node2, node3),
(node3, node1),
(node4, node1)
]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testcircular2(self):
# this condition was arising from ticket:362
# and was not treated properly by topological sort
node1 = 'node1'
node2 = 'node2'
node3 = 'node3'
node4 = 'node4'
tuples = [
(node1, node2),
(node3, node1),
(node2, node4),
(node3, node2),
(node2, node3)
]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testcircular3(self):
nodes = {}
tuples = [('Question', 'Issue'), ('ProviderService', 'Issue'), ('Provider', 'Question'), ('Question', 'Provider'), ('ProviderService', 'Question'), ('Provider', 'ProviderService'), ('Question', 'Answer'), ('Issue', 'Question')]
head = DependencySorter(tuples, []).sort(allow_all_cycles=True)
self.assert_sort(tuples, head)
def testbigsort(self):
tuples = []
for i in range(0,1500, 2):
tuples.append((i, i+1))
head = DependencySorter(tuples, []).sort()
if __name__ == "__main__":
unittest.main()
| 31.036082 | 235 | 0.51038 | 565 | 6,021 | 5.387611 | 0.235398 | 0.042707 | 0.068331 | 0.045992 | 0.293035 | 0.268397 | 0.207293 | 0.182326 | 0.169185 | 0.169185 | 0 | 0.054617 | 0.370536 | 6,021 | 193 | 236 | 31.196891 | 0.748549 | 0.051985 | 0 | 0.39521 | 0 | 0 | 0.088819 | 0.018957 | 0 | 0 | 0 | 0.005181 | 0.101796 | 0 | null | null | 0.005988 | 0.023952 | null | null | 0.023952 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5e956eebd2928391f5d2ba00f6fd36130e8bbd1 | 894 | py | Python | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | 4 | 2020-07-24T01:59:50.000Z | 2021-07-24T15:14:08.000Z | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | hackerearth/Algorithms/Restoring trees/solution.py | ATrain951/01.python-com_Qproject | c164dd093954d006538020bdf2e59e716b24d67c | [
"MIT"
] | null | null | null | """
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
n = int(input())
start = list(map(int, input().strip().split()))
finish = list(map(int, input().strip().split()))
vertexes = [0] * n
for i, v in enumerate(start):
vertexes[v] = i
parents = [-1] * n
parent = vertexes[0]
for i in range(1, n):
cur = vertexes[i]
if finish[cur] - i > 1:
parents[cur] = parent
parent = cur
else:
parents[cur] = parent
while finish[cur] == finish[parents[cur]]:
cur = parents[cur]
parent = parents[cur]
if parent == vertexes[0]:
break
for i in range(n):
parents[i] += 1
print(*parents)
| 26.294118 | 94 | 0.569351 | 125 | 894 | 4.072 | 0.448 | 0.098232 | 0.094303 | 0.058939 | 0.098232 | 0.098232 | 0 | 0 | 0 | 0 | 0 | 0.010989 | 0.287472 | 894 | 33 | 95 | 27.090909 | 0.788069 | 0.297539 | 0 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030303 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5eb3429d56063aad6eca63d3f25a6fb4eaf84b5 | 1,674 | py | Python | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | Sorting/Sorts.py | niranjan09/DataStructures_Algorithms | df2801f7ea48a39a55a6d79fd66ad200a2de0145 | [
"MIT"
] | null | null | null | import time
def swap(arr, i , j):
temp = arr[i]
arr[i] = arr[j]
arr[j] = temp
def selection_sort(arr):
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if(arr[i] > arr[j]):
swap(arr, i, j)
return arr
def bubble_sort(arr):
swapped = True
while(swapped == True):
swapped = False
for i in range(len(arr)-1):
if(arr[i] > arr[i+1]):
swap(arr, i, i+1)
swapped = True
return arr
def insertion_sort(arr):
for i in range(1, len(arr)):
if(arr[i] < arr[i-1]):
temp = i-1
print arr[i], arr[temp], arr
while(arr[i] < arr[temp] and temp>=0):
print "shifting...", arr[i], arr[temp]
temp-=1
arr.insert(temp+1, arr[i])
del arr[i+1]
return arr
def merge_sort(arr):
arr_len = len(arr)
if(arr_len == 1):
return arr
a = merge_sort(arr[:arr_len/2])
b = merge_sort(arr[arr_len/2:])
c = []
i_a = i_b = 0
a_len = len(a)
b_len = len(b)
while(i_a < a_len and i_b < b_len):
if(a[i_a]<b[i_b]):
c.append(a[i_a])
i_a+=1
else:
c.append(b[i_b])
i_b+=1
if(i_a < a_len):
for remaining in a[i_a:]:
c.append(remaining)
if(i_b < b_len):
for remaining in b[i_b:]:
c.append(remaining)
return c
def quick_sort(arr):
if(len(arr)<1):
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quick_sort(left) + middle + quick_sort(right)
def direct_addressing_sort(arr):
maximum = max(arr)
minimum = min(arr)
a = len(range(minimum, maximum+1))*[0]
for i in range(len(arr)):
a[i]+=1
print a
print(time.time())
print(quick_sort([-2.2, -2.2, -2.2]*5+[1.1]*5))
print(time.time())
| 19.465116 | 53 | 0.599164 | 337 | 1,674 | 2.869436 | 0.148368 | 0.062048 | 0.057911 | 0.045502 | 0.26577 | 0.220269 | 0.088935 | 0.055843 | 0 | 0 | 0 | 0.024335 | 0.214456 | 1,674 | 85 | 54 | 19.694118 | 0.711027 | 0 | 0 | 0.178082 | 0 | 0 | 0.006571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.013699 | null | null | 0.082192 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5f3bfd369153aa479d911c36ea48a47b9a4d56f | 485 | py | Python | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 4 | 2020-09-10T11:51:48.000Z | 2021-08-18T21:25:22.000Z | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 22 | 2020-09-10T11:14:22.000Z | 2021-08-24T14:58:58.000Z | waateax/users/migrations/0004_auto_20200910_1516.py | hendu25/waatea | 668707cd14d3336cd74d7043473f7094f5d0db6e | [
"MIT"
] | 3 | 2020-09-13T06:37:14.000Z | 2021-08-17T13:51:23.000Z | # Generated by Django 3.0.10 on 2020-09-10 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_mobile_phone'),
]
operations = [
migrations.AlterField(
model_name='user',
name='mobile_phone',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='Mobile phone number'),
preserve_default=False,
),
]
| 24.25 | 111 | 0.618557 | 54 | 485 | 5.407407 | 0.740741 | 0.113014 | 0.10274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.064607 | 0.265979 | 485 | 19 | 112 | 25.526316 | 0.755618 | 0.094845 | 0 | 0 | 1 | 0 | 0.141876 | 0.050343 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5fc1fd40bd0f609dfec322f4db82f5ebd97d46e | 704 | py | Python | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | 1 | 2016-11-29T04:28:09.000Z | 2016-11-29T04:28:09.000Z | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | ALGOs/Perceptron/logic_gates.py | iamharshit/ML_works | b0bb53c5a60312719d15e25b727c54cbab65b4af | [
"MIT"
] | null | null | null | #The input to the gate can only be 0 or 1
'''
Single Layer Perceptrons
'''
def AND_perceptron(x1,x2):
w1, w2, t = 1, 1, 2
return w1*x1 + w2*x2 >=t
def OR_perceptron(x1,x2):
w1, w2, t = 1, 1, 1
return w1*x1 + w2*x2 >=t
def AND_perceptron(x1):
w1, t = -1, 0
return w1*x1 >=t
'''
Multi Layer Perceptrons
'''
def XOR_perceptron(x1,x2):
w1, w2, t = 1, 1, 0.5
h_1_1 = (w1*x1 + w2*x2 >=t) #layer:1 node:1
w1, w2, t = -1, -1, -1.5
h_1_2 = (w1*x1 + w2*x2 >=t) #layer:1 node:2
w1, w2, t = 1, 1, 1.5
return w1*h_1_1 + w2*h_1_2 >=t #layer:2 or output layer
print XOR_perceptron(0,0)
print XOR_perceptron(0,1)
print XOR_perceptron(1,0)
print XOR_perceptron(1,1) | 20.705882 | 58 | 0.59233 | 148 | 704 | 2.709459 | 0.202703 | 0.054863 | 0.062344 | 0.074813 | 0.389027 | 0.389027 | 0.386534 | 0.25187 | 0 | 0 | 0 | 0.151685 | 0.241477 | 704 | 34 | 59 | 20.705882 | 0.599251 | 0.129261 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5ffc77661b72f4eb6b28eeafca66a6e20e1570d | 6,897 | py | Python | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | null | null | null | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 10 | 2020-02-12T02:51:31.000Z | 2022-02-10T13:33:43.000Z | src/sms_verifier/settings.py | ArieLevs/sms-verifier-backend | b687f0426e7fc871a7ba50d95499187e72089dce | [
"MIT"
] | 1 | 2022-02-22T18:56:22.000Z | 2022-02-22T18:56:22.000Z | """
Django settings for sms_verifier project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import socket
ENVIRONMENT = os.environ.get('environment', 'dev')
DOMAIN_NAME = os.environ.get('domain_name', 'http://127.0.0.1:8000')
try:
HOSTNAME = socket.gethostname()
except ImportError as e:
HOSTNAME = 'localhost'
PROJECT_NAME = 'SMS Verifier'
VERSION = os.environ.get('version', 'null')
EXTRA_ALLOWED_HOSTS = os.environ.get('allowed_hosts', '').split(',')
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('django_secret_key', 'djangoSecretKey')
# SECURITY WARNING: don't run with debug turned on in production!
if ENVIRONMENT == 'dev' or ENVIRONMENT == 'ci':
DEBUG = True
EXTRA_ALLOWED_HOSTS.append('*')
else:
DEBUG = False
INTERNAL_IPS = [
'127.0.0.1',
]
ALLOWED_HOSTS = [
'alpha.sms-verifier.nalkins.cloud',
'sms-verifier.nalkins.cloud',
'127.0.0.1',
'10.0.2.2', # Android AVD IP for localhost
] + EXTRA_ALLOWED_HOSTS
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'oauth2_provider',
'django_user_email_extension',
'sms_verifier_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sms_verifier.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sms_verifier.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
######################
# DATABASE SETTINGS
######################
if ENVIRONMENT == 'dev':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('db_name', 'sms_verifier'),
'USER': os.environ.get('db_user', 'sms_verifier'),
'PASSWORD': os.environ.get('db_pass', 'django'),
'HOST': os.environ.get('db_host', 'localhost'),
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = os.environ.get('static_url', "/static/")
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
######################
# Custom User Model
######################
AUTH_USER_MODEL = 'django_user_email_extension.User'
##################
# REST Framework
##################
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
# 'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
}
######################
# Social Auth
######################
SOCIAL_AUTH_USER_MODEL = 'django_user_email_extension.User'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'index'
SOCIAL_AUTH_LOGOUT_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
SOCIAL_AUTH_LOGIN_URL = 'index'
SOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('google_oauth_client_id', 'None')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('google_oauth_client_secret', 'None')
SOCIAL_AUTH_GITHUB_KEY = os.environ.get('github_oauth_client_id', 'None')
SOCIAL_AUTH_GITHUB_SECRET = os.environ.get('github_oauth_client_secret', 'None')
SOCIAL_AUTH_GITHUB_SCOPE = [
'read:user',
'user:email',
'read:org',
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
######################
# LOGGING SETTINGS
######################
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')
HANDLERS = ['console']
LOGGING = {
'version': 1,
'handlers': {
'console': {
'level': LOG_LEVEL,
'class': 'logging.StreamHandler',
},
},
'loggers': {
PROJECT_NAME: {
'handlers': HANDLERS,
'level': LOG_LEVEL,
},
'django.request': {
'handlers': HANDLERS,
'level': 'WARNING',
'propagate': False,
},
},
}
| 27.810484 | 91 | 0.660577 | 759 | 6,897 | 5.776021 | 0.30303 | 0.04562 | 0.041058 | 0.038321 | 0.242245 | 0.216925 | 0.086907 | 0.063412 | 0.027372 | 0 | 0 | 0.01091 | 0.176019 | 6,897 | 247 | 92 | 27.923077 | 0.760514 | 0.148325 | 0 | 0.08284 | 1 | 0 | 0.478878 | 0.3612 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.035503 | 0.017751 | 0 | 0.017751 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
680818c94a5d8918261d5f2d00ab826a279d384e | 1,096 | py | Python | notebooks/tpot_exported_pipeline.py | rsouza/FGV_Intro_DS | 5c4da0a9476654516199708cb729231a21d99686 | [
"MIT"
] | 38 | 2018-02-28T17:33:00.000Z | 2022-01-21T22:30:11.000Z | Notebooks/tpot_exported_pipeline.py | rsouza/DataScience_Course | dd8018d616cf24a7b23c7b04ea1e2120261da3c0 | [
"MIT"
] | 17 | 2018-08-16T12:11:57.000Z | 2019-08-04T17:55:27.000Z | Notebooks/tpot_exported_pipeline.py | rsouza/DataScience_Course | dd8018d616cf24a7b23c7b04ea1e2120261da3c0 | [
"MIT"
] | 62 | 2018-02-20T12:58:13.000Z | 2022-03-03T11:10:16.000Z | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline, make_union
from tpot.builtins import StackingEstimator
from xgboost import XGBClassifier
from sklearn.preprocessing import FunctionTransformer
from copy import copy
# NOTE: Make sure that the outcome column is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1)
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'], random_state=None)
# Average CV score on the training set was: 0.8477898176814586
exported_pipeline = make_pipeline(
make_union(
FunctionTransformer(copy),
FunctionTransformer(copy)
),
XGBClassifier(learning_rate=0.5, max_depth=9, min_child_weight=9, n_estimators=100, n_jobs=1, subsample=0.8, verbosity=0)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| 40.592593 | 125 | 0.794708 | 152 | 1,096 | 5.526316 | 0.532895 | 0.039286 | 0.033333 | 0.05 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032393 | 0.126825 | 1,096 | 26 | 126 | 42.153846 | 0.84535 | 0.125 | 0 | 0 | 0 | 0 | 0.047071 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.380952 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6813cdd606ade45f43873ba2c99142d90f8a961a | 16,267 | py | Python | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | wrt/wrt-packertool-android-tests/test.py | tiwanek/crosswalk-test-suite | 82d12e6187c81caeb56e12c5ec3483f35d939982 | [
"BSD-3-Clause"
] | null | null | null | import sys, os, os.path, time, shutil
import commands
from xml.etree.ElementTree import ElementTree
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import SubElement as SE
import metacomm.combinatorics.all_pairs2
all_pairs = metacomm.combinatorics.all_pairs2.all_pairs2
totalNum = 0
failNum = 0
passNum = 0
Flag = "positive"
ConstPath = os.getcwd()
Start = time.strftime("%Y-%m-%d %H:%M:%S")
ResultList = []
Direc = "./"
def lineCount(fp):
fileTmp = open(fp)
count = len(fileTmp.readlines())
fileTmp.close()
return count
def genSelfcom(combIn, combOut):
try:
fp = open(combIn)
comb = open(combOut, 'a+')
comb.write(fp.read())
fp.close()
comb.close()
print "Update selfcomb.txt ---------------->O.k"
return
except Exception,e:
print Exception,":",e
print "Update selfcomb.txt ---------------->Error"
sys.exit(1)
def processMain(seedIn):
try:
print "Input Seed :" + os.path.basename(seedIn)
print "Excute " + Flag + " cases ------------------------->Start"
row = 0
sectionList = []
fp = open(seedIn)
for line in fp:
items = line.strip('\n\r').split(":")
sectionName = items[0].split("--")[0]
if sectionName not in sectionList:
sectionList.append(sectionName)
inputTxt = open(ConstPath + "/self/" + sectionName + "_input.txt", "a+")
inputTxt.write(line)
inputTxt.close()
fp.close()
for section in sectionList:
caseline = ""
counters = lineCount(ConstPath + "/self/" + section + "_input.txt")
if counters >= 2:
lists = [[] for m in range(counters)]
inputTxt = open(ConstPath + "/self/" + section + "_input.txt")
for line in inputTxt:
items = line.strip('\n\r').split(":")
values = items[1].split(",")
lists[row].extend(values)
row = row + 1
inputTxt.close()
pairs = all_pairs(lists)
outTxt = open(ConstPath + "/self/" + section + "_output.txt", 'w+')
for e, v in enumerate(pairs):
for c in range(len(v)):
caseline = caseline + v[c] + ","
outTxt.write(section + ":" + caseline[:-1] + "\n")
outTxt.close()
else:
shutil.copy(ConstPath + "/self/" + section + "_input.txt", ConstPath + "/self/" + section + "_output.txt")
#1*********XX_output.txt -> selfcomb.txt
genSelfcom(ConstPath + "/self/" + section + "_output.txt", ConstPath + "/allpairs/selfcomb.txt")
#2*********selfcomb.txt -> caseXX.txt
genCases(ConstPath + "/allpairs/selfcomb.txt")
#3*********output -> manifest.json
caseExecute(ConstPath + "/allpairs/case_" + Flag + ".txt")
print "Excute " + Flag + " cases ------------------------->O.K"
except Exception,e:
print Exception,":",e
print "Excute " + Flag + " cases ------------------------->Error"
sys.exit(1)
def genCases(selfcomb):
try:
print "Genarate " + Flag + " case.txt file ---------------->Start"
caseFile = open(ConstPath + "/allpairs/case_" + Flag + ".txt", 'w+')
names = ""
row = 0
counters = lineCount(selfcomb)
lists = [[] for m in range(counters)]
fobj = open(selfcomb)
for line in fobj:
items = line.strip('\n\r').split(":")
names = names + items[0] + "\t"
caseFile.write(names.rstrip("\t") + "\n")
fobj.seek(0)
for line in fobj:
items = line.strip('\n\r').split(":")
values = items[1:]
lists[row].extend(":".join(values).split(","))
row = row + 1
fobj.close()
pairs = all_pairs(lists)
for e, v in enumerate(pairs):
case = ""
for c in range(0,len(v)):
case = case + v[c] +"\t"
caseFile.write(case.rstrip("\t") + "\n")
caseFile.close()
print "Genarate " + Flag + " case.txt file ---------------->O.k"
except Exception,e:
print "Generate " + Flag + " case.txt file ---------------->Error"
print Exception,":",e
sys.exit(1)
def caseExecute(caseInput):
try:
global totalNum
global failNum
global passNum
global ResultList
global Flag
global Direc
print "Excute cases ------------------------->Start"
caseIn = open(caseInput)
line = caseIn.readline().strip('\n\r')
sectionList = line.split("\t")
os.chdir(ConstPath + "/tools/crosswalk")
toolstatus = commands.getstatusoutput("python make_apk.py")
if toolstatus[0] != 0:
print "Crosswalk Binary is not ready, Please attention"
sys.exit(1)
for line in caseIn:
totalNum = totalNum + 1
items = line.strip("\t\n").split("\t")
command = "python make_apk.py "
data = {"id":"","result":"","entry":"","start":"","end":"","set":""}
data["start"] = time.strftime("%Y-%m-%d %H:%M:%S")
for i in range(len(sectionList)):
items[i] = items[i].replace("000", " ")
command = command + "--" + sectionList[i] + "=" + '"' + items[i] + '" '
command = command.strip()
if "target-dir" in sectionList:
dirIndex = sectionList.index("target-dir")
Direc = items[dirIndex]
else:
Direc = "./"
nameIndex = sectionList.index("name")
packIndex = sectionList.index("package")
name = items[nameIndex]
package = items[packIndex]
print "##########"
print "Case" + str(totalNum) + " :"
print "Packer Tool Command:"
print command
print "Genarate APK ---------------->Start"
packstatus = commands.getstatusoutput(command)
if Flag == "negative":
if packstatus[0] == 0:
print "Genarate APK ---------------->O.K"
result = "FAIL"
failNum = failNum + 1
else:
print "Genarate APK ---------------->Error"
result = "PASS"
passNum = passNum + 1
else:
if packstatus[0] != 0:
print "Genarate APK ---------------->Error"
result = "FAIL"
failNum = failNum + 1
else:
print "Genarate APK ---------------->O.K"
result = tryRunApp(name, package)
data["end"] = time.strftime("%Y-%m-%d %H:%M:%S")
data["id"] = "Case" + str(totalNum)
data["result"] = result
data["entry"] = command
data["set"] = Flag
ResultList.append(data)
os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk")
print "Case Result :",result
print "##########"
caseIn.close()
print "Excute cases ------------------------->O.K"
except Exception,e:
print Exception,":",e
print "Execute case ---------------->Error"
sys.exit(1)
def tryRunApp(name, package):
try:
global failNum
global passNum
result = "PASS"
message = ""
print "Install APK ---------------->Start"
instatus = commands.getstatusoutput("adb install " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk")
if instatus[0] == 0:
print "Install APK ---------------->O.K"
print "Find Package in device ---------------->Start"
pmstatus = commands.getstatusoutput("adb shell pm list packages |grep " + package)
if pmstatus[0] == 0:
print "Find Package in device ---------------->O.K"
print "Launch APK ---------------->Start"
launchstatus = commands.getstatusoutput("adb shell am start -n " + package + "/." + name + "Acivity")
if launchstatus[0] != 0:
print "Launch APK ---------------->Error"
os.system("adb uninstall " + package)
failNum = failNum + 1
result = "FAIL"
else:
print "Launch APK ---------------->O.K"
print "Stop APK ---------------->Start"
stopstatus = commands.getstatusoutput("adb shell am force-stop " + package)
if stopstatus[0] == 0:
print "Stop APK ---------------->O.K"
print "Uninstall APK ---------------->Start"
unistatus = commands.getstatusoutput("adb uninstall " + package)
if unistatus[0] == 0:
print "Uninstall APK ---------------->O.K"
passNum = passNum + 1
else:
print "Uninstall APK ---------------->Error"
failNum = failNum + 1
result = "FAIL"
else:
print "Stop APK ---------------->Error"
failNum = failNum + 1
result = "FAIL"
os.system("adb uninstall " + package)
else:
print "Find Package in device ---------------->Error"
os.system("adb uninstall " + package)
failNum = failNum + 1
result = "FAIL"
else:
print "Install APK ---------------->Error"
result = "FAIL"
failNum = failNum + 1
os.system("rm -rf " + ConstPath + "/tools/crosswalk/" + Direc + "/*apk" + "&>/dev/null")
return result
except Exception,e:
print Exception,":",e
print "Try run webapp ---------------->Error"
sys.exit(1)
def updateXmlTitle(fp,title):
fobj = open(fp, "r+")
lines = fobj.readlines()
fobj.seek(0)
fobj.truncate()
lines.insert(0,title)
fobj.writelines(lines)
fobj.close()
def genResultXml():
try:
tree = ElementTree()
root = Element("test_definition")
tree._setroot(root)
env = Element("environment", {"build_id":"","device_id":"","device_name":"","host":"",\
"lite_version":"","manufacturer":"","resolution":"","screen_size":""})
root.append(env)
#summary element
summary = Element("summary", {"test_plan_name":""})
root.append(summary)
tStart = SE(summary, "start_at")
tEnd = SE(summary, "end_at")
tStart.text = Start
tEnd.text = End
#suite element
suite = SE(root, "suite", {"category":"Crosswalk_Packer_Tool","launcher":"xwalk",\
"name":"wrt-packertool-android-tests"})
setPositive = SE(suite, "set", {"name":"positive","set_debug_msg":""})
setNegitive = SE(suite, "set", {"name":"negitive","set_debug_msg":""})
#testcase element
for case in ResultList:
setElement = setPositive
if case["set"] == "negative":
setElement = setNegitive
pur = "Check if packer tool work properly"
testcase = SE(setElement, "testcase", {"component":"Crosswalk Packer Tool",\
"execution_type":"auto","id":case["id"],"purpose":pur,"result":case["result"]},)
desc = SE(testcase, "description")
entry = Element("test_script_entry")
entry.text = "pack command: " + case["entry"].decode("utf-8")
desc.append(entry)
resultInfo = SE(testcase, "result_info")
actualResult = SE(resultInfo, "actual_result")
actualResult.text = case["result"]
caseStart = SE(resultInfo, "start")
caseStart.text = case["start"]
caseEnd = SE(resultInfo, "end")
caseEnd.text = case["end"]
SE(resultInfo, "stdout")
SE(resultInfo, "stderr")
tree.write(ConstPath + "/report/wrt-packertool-android-tests.xml")
updateXmlTitle(ConstPath + "/report/wrt-packertool-android-tests.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/testresult.xsl"?>\n<?xml-stylesheet type="text/xsl" href="testresult.xsl"?>\n')
print "Generate test.result.xml file ------------------------->O.K"
except Exception,e:
print Exception,"Generate test.result.xml error:",e
def genSummaryXml():
try:
tree = ElementTree()
root = Element("result_summary", {"plan_name":""})
tree._setroot(root)
env = SE(root,"environment",{"build_id":"","cts_version":"","device_id":"","device_model":"","device_name":"","host":"","resolution":"","screen_size":"","manufacturer":""})
summary = SE(root, "summary")
startTime = SE(summary, "start_at")
endTime = SE(summary, "end_at")
startTime.text = Start
endTime.text = End
suite = SE(root, "suite", {"name":"wrt-packertool-android-tests"})
total_case = SE(suite, "total_case")
total_case.text = str(totalNum)
pass_case = SE(suite, "pass_case")
pass_case.text = str(passNum)
pass_rate = SE(suite, "pass_rate")
pass_rate.text = str(float(passNum) / totalNum * 100)
fail_case = SE(suite, "fail_case")
fail_case.text = str(failNum)
fail_rate = SE(suite, "fail_rate")
fail_rate.text = str(float(failNum) / totalNum * 100)
SE(suite, "block_case")
SE(suite, "block_rate")
SE(suite, "na_case")
SE(suite, "na_rate")
tree.write(ConstPath + "/report/summary.xml")
updateXmlTitle(ConstPath + "/report/summary.xml",'<?xml version="1.0" encoding="UTF-8"?>\n<?xml-stylesheet type="text/xsl" href="./style/summary.xsl"?>\n')
print "Generate summary.xml file ------------------------->O.K"
except Exception,e:
print Exception,"Generate summary.xml error:",e
def devicesConform():
try:
deviceList = os.popen("adb devices").readlines()
if len(deviceList) == 2:
print "No test devices connected, Please attention"
sys.exit(1)
except Exception,e:
print Exception,"Device Connect error:",e
sys.exit(1)
def main():
try:
global End
global Flag
os.system("rm -rf " + ConstPath + "/allpairs/negative/*~ &>/dev/null")
os.system("rm -rf " + ConstPath + "/allpairs/positive/*~ &>/dev/null")
os.system("rm -rf " + ConstPath + "/allpairs/positive/case*txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/tools/crosswalk/*apk &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
devicesConform()
#positive test
for seed in os.listdir(ConstPath + "/allpairs/positive/"):
os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
processMain(ConstPath + "/allpairs/positive/" + seed)
#negative case
Flag = "negative"
for seed in os.listdir(ConstPath + "/allpairs/negative/"):
os.system("rm -rf " + ConstPath + "/allpairs/selfcomb.txt &>/dev/null")
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
os.system("mkdir -p " + ConstPath + "/self")
processMain(ConstPath + "/allpairs/negative/" + seed)
End = time.strftime("%Y-%m-%d %H:%M:%S")
genResultXml()
genSummaryXml()
except Exception,e:
print Exception,":",e
sys.exit(1)
finally:
os.system("rm -rf " + ConstPath + "/self &>/dev/null")
if __name__=="__main__":
main()
| 40.165432 | 245 | 0.500645 | 1,645 | 16,267 | 4.903343 | 0.179331 | 0.017853 | 0.024176 | 0.017853 | 0.363501 | 0.251426 | 0.218696 | 0.160674 | 0.152864 | 0.103645 | 0 | 0.006874 | 0.320342 | 16,267 | 404 | 246 | 40.264851 | 0.722685 | 0.010942 | 0 | 0.316804 | 0 | 0.00551 | 0.247808 | 0.069399 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.030303 | 0.016529 | null | null | 0.143251 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6814898e651034adbd68ccc0b6b8b8dad8f99277 | 584 | py | Python | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | bigcommerce/resources/options.py | sebaacuna/bigcommerce-api-python | 59ef206d7296c196a0ae0400b6bf9bdb5c2f72af | [
"MIT"
] | null | null | null | from .base import *
class Options(ListableApiResource, CreateableApiResource, UpdateableApiResource, DeleteableApiResource):
resource_name = 'options'
def values(self, id=None):
if id:
return OptionValues.get(self.id, id, connection=self._connection)
else:
return OptionValues.all(self.id, connection=self._connection)
class OptionValues(ListableApiSubResource, CreateableApiSubResource, UpdateableApiSubResource, DeleteableApiSubResource):
resource_name = 'values'
parent_resource = 'options'
parent_key = 'option_id'
| 32.444444 | 121 | 0.739726 | 53 | 584 | 8.018868 | 0.566038 | 0.042353 | 0.075294 | 0.122353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.178082 | 584 | 17 | 122 | 34.352941 | 0.885417 | 0 | 0 | 0 | 0 | 0 | 0.049658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a84368b717e48b36eb701f61e858302d7ec5883b | 277 | py | Python | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | djblog/comments/admin.py | rewalkerof/microblog | 2ef1652dcb9a7d35eed228f1d74587f4b0fa8912 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Comment
# Register your models here.
class CommentsAdmin(admin.ModelAdmin):
list_display = ['id', "user", "content", "timestamp"]
class Meta:
model = Comment
admin.site.register(Comment, CommentsAdmin)
| 18.466667 | 57 | 0.714801 | 32 | 277 | 6.15625 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.180505 | 277 | 14 | 58 | 19.785714 | 0.867841 | 0.093863 | 0 | 0 | 0 | 0 | 0.088353 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a8475c65df4075b3b08496dd18fcba18f180aafe | 4,306 | py | Python | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | pgoapi/protos/POGOProtos/Networking/Responses/SetAvatarResponse_pb2.py | PogoHop/pgoapi-hsvr | b5761159e0240bbb81ef6c257fe2eb1bc1ce2d47 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/SetAvatarResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data import PlayerData_pb2 as POGOProtos_dot_Data_dot_PlayerData__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/SetAvatarResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n7POGOProtos/Networking/Responses/SetAvatarResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a POGOProtos/Data/PlayerData.proto\"\xd7\x01\n\x11SetAvatarResponse\x12I\n\x06status\x18\x01 \x01(\x0e\x32\x39.POGOProtos.Networking.Responses.SetAvatarResponse.Status\x12\x30\n\x0bplayer_data\x18\x02 \x01(\x0b\x32\x1b.POGOProtos.Data.PlayerData\"E\n\x06Status\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x16\n\x12\x41VATAR_ALREADY_SET\x10\x02\x12\x0b\n\x07\x46\x41ILURE\x10\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_PlayerData__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETAVATARRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='AVATAR_ALREADY_SET', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILURE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=273,
serialized_end=342,
)
_sym_db.RegisterEnumDescriptor(_SETAVATARRESPONSE_STATUS)
_SETAVATARRESPONSE = _descriptor.Descriptor(
name='SetAvatarResponse',
full_name='POGOProtos.Networking.Responses.SetAvatarResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_data', full_name='POGOProtos.Networking.Responses.SetAvatarResponse.player_data', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETAVATARRESPONSE_STATUS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=127,
serialized_end=342,
)
_SETAVATARRESPONSE.fields_by_name['status'].enum_type = _SETAVATARRESPONSE_STATUS
_SETAVATARRESPONSE.fields_by_name['player_data'].message_type = POGOProtos_dot_Data_dot_PlayerData__pb2._PLAYERDATA
_SETAVATARRESPONSE_STATUS.containing_type = _SETAVATARRESPONSE
DESCRIPTOR.message_types_by_name['SetAvatarResponse'] = _SETAVATARRESPONSE
SetAvatarResponse = _reflection.GeneratedProtocolMessageType('SetAvatarResponse', (_message.Message,), dict(
DESCRIPTOR = _SETAVATARRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.SetAvatarResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetAvatarResponse)
))
_sym_db.RegisterMessage(SetAvatarResponse)
# @@protoc_insertion_point(module_scope)
| 38.106195 | 529 | 0.760102 | 490 | 4,306 | 6.412245 | 0.293878 | 0.072565 | 0.092298 | 0.131763 | 0.328135 | 0.247931 | 0.203055 | 0.126034 | 0.101846 | 0.057288 | 0 | 0.035954 | 0.134464 | 4,306 | 112 | 530 | 38.446429 | 0.807083 | 0.065258 | 0 | 0.402174 | 1 | 0.01087 | 0.259278 | 0.221142 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076087 | 0 | 0.076087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8498638fe63f75b8fb7831a168989a3237bda41 | 887 | py | Python | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | PP4E-Examples-1.4/Examples/PP4E/System/Threads/four-threads.py | AngelLiang/PP4E | 3a7f63b366e1e4700b4d2524884696999a87ba9d | [
"MIT"
] | null | null | null | """
four different ways to run an action in a thread; all print 4294967296,
but prints should be synchronized with a mutex here to avoid overlap
"""
import threading, _thread
def action(i):
print(i ** 32)
# subclass with state
class Mythread(threading.Thread):
def __init__(self, i):
self.i = i
threading.Thread.__init__(self)
def run(self): # redefine run for action
print(self.i ** 32)
Mythread(2).start() # start invokes run()
# pass action in
thread = threading.Thread(target=(lambda: action(2))) # run invokes target
thread.start()
# same but no lambda wrapper for state
threading.Thread(target=action, args=(2,)).start() # callable plus its args
# basic thread module
_thread.start_new_thread(action, (2,)) # all-function interface
| 31.678571 | 83 | 0.621195 | 114 | 887 | 4.72807 | 0.473684 | 0.139147 | 0.06679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028213 | 0.280722 | 887 | 27 | 84 | 32.851852 | 0.816614 | 0.385569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.071429 | 0 | 0.357143 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a84c51e00e3c06ed946f2d73ff195ec6335ee4c9 | 8,007 | py | Python | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 35 | 2019-10-25T09:19:36.000Z | 2022-03-04T11:22:27.000Z | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 17 | 2019-10-30T03:49:20.000Z | 2020-07-02T15:54:50.000Z | argo/workflows/client/models/v1_pod_log_options.py | fvdnabee/argo-client-python | 0caa743442d37f2f2e3b30867398ed2708c1bf4d | [
"Apache-2.0"
] | 9 | 2019-11-06T13:30:08.000Z | 2021-06-12T03:00:05.000Z | # coding: utf-8
"""
Argo
Python client for Argo Workflows # noqa: E501
OpenAPI spec version: master
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1PodLogOptions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'container': 'str',
'follow': 'bool',
'limit_bytes': 'str',
'previous': 'bool',
'since_seconds': 'str',
'since_time': 'V1Time',
'tail_lines': 'str',
'timestamps': 'bool'
}
attribute_map = {
'container': 'container',
'follow': 'follow',
'limit_bytes': 'limitBytes',
'previous': 'previous',
'since_seconds': 'sinceSeconds',
'since_time': 'sinceTime',
'tail_lines': 'tailLines',
'timestamps': 'timestamps'
}
def __init__(self, container=None, follow=None, limit_bytes=None, previous=None, since_seconds=None, since_time=None, tail_lines=None, timestamps=None): # noqa: E501
"""V1PodLogOptions - a model defined in Swagger""" # noqa: E501
self._container = None
self._follow = None
self._limit_bytes = None
self._previous = None
self._since_seconds = None
self._since_time = None
self._tail_lines = None
self._timestamps = None
self.discriminator = None
if container is not None:
self.container = container
if follow is not None:
self.follow = follow
if limit_bytes is not None:
self.limit_bytes = limit_bytes
if previous is not None:
self.previous = previous
if since_seconds is not None:
self.since_seconds = since_seconds
if since_time is not None:
self.since_time = since_time
if tail_lines is not None:
self.tail_lines = tail_lines
if timestamps is not None:
self.timestamps = timestamps
@property
def container(self):
"""Gets the container of this V1PodLogOptions. # noqa: E501
:return: The container of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._container
@container.setter
def container(self, container):
"""Sets the container of this V1PodLogOptions.
:param container: The container of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._container = container
@property
def follow(self):
"""Gets the follow of this V1PodLogOptions. # noqa: E501
:return: The follow of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._follow
@follow.setter
def follow(self, follow):
"""Sets the follow of this V1PodLogOptions.
:param follow: The follow of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._follow = follow
@property
def limit_bytes(self):
"""Gets the limit_bytes of this V1PodLogOptions. # noqa: E501
:return: The limit_bytes of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._limit_bytes
@limit_bytes.setter
def limit_bytes(self, limit_bytes):
"""Sets the limit_bytes of this V1PodLogOptions.
:param limit_bytes: The limit_bytes of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._limit_bytes = limit_bytes
@property
def previous(self):
"""Gets the previous of this V1PodLogOptions. # noqa: E501
:return: The previous of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._previous
@previous.setter
def previous(self, previous):
"""Sets the previous of this V1PodLogOptions.
:param previous: The previous of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._previous = previous
@property
def since_seconds(self):
"""Gets the since_seconds of this V1PodLogOptions. # noqa: E501
:return: The since_seconds of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._since_seconds
@since_seconds.setter
def since_seconds(self, since_seconds):
"""Sets the since_seconds of this V1PodLogOptions.
:param since_seconds: The since_seconds of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._since_seconds = since_seconds
@property
def since_time(self):
"""Gets the since_time of this V1PodLogOptions. # noqa: E501
:return: The since_time of this V1PodLogOptions. # noqa: E501
:rtype: V1Time
"""
return self._since_time
@since_time.setter
def since_time(self, since_time):
"""Sets the since_time of this V1PodLogOptions.
:param since_time: The since_time of this V1PodLogOptions. # noqa: E501
:type: V1Time
"""
self._since_time = since_time
@property
def tail_lines(self):
"""Gets the tail_lines of this V1PodLogOptions. # noqa: E501
:return: The tail_lines of this V1PodLogOptions. # noqa: E501
:rtype: str
"""
return self._tail_lines
@tail_lines.setter
def tail_lines(self, tail_lines):
"""Sets the tail_lines of this V1PodLogOptions.
:param tail_lines: The tail_lines of this V1PodLogOptions. # noqa: E501
:type: str
"""
self._tail_lines = tail_lines
@property
def timestamps(self):
"""Gets the timestamps of this V1PodLogOptions. # noqa: E501
:return: The timestamps of this V1PodLogOptions. # noqa: E501
:rtype: bool
"""
return self._timestamps
@timestamps.setter
def timestamps(self, timestamps):
"""Sets the timestamps of this V1PodLogOptions.
:param timestamps: The timestamps of this V1PodLogOptions. # noqa: E501
:type: bool
"""
self._timestamps = timestamps
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1PodLogOptions, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodLogOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.869128 | 170 | 0.585488 | 892 | 8,007 | 5.098655 | 0.140135 | 0.042216 | 0.147757 | 0.131926 | 0.432718 | 0.337951 | 0.306069 | 0.242964 | 0.08971 | 0.016711 | 0 | 0.023432 | 0.323092 | 8,007 | 297 | 171 | 26.959596 | 0.815683 | 0.333958 | 0 | 0.076336 | 1 | 0 | 0.0616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.167939 | false | 0 | 0.022901 | 0 | 0.320611 | 0.015267 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a854b2655b1079804920d693717c0a025c1256f9 | 129,043 | py | Python | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | 19 | 2020-07-06T01:01:28.000Z | 2022-03-09T19:22:25.000Z | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | 2 | 2020-09-20T12:26:54.000Z | 2021-11-22T21:48:08.000Z | benchmarks/Generators/Parser/SygusParser.py | yangdinglou/DryadSynth | 0e80c08c3f8a95c16c75b413b53b10fa30c945ce | [
"MIT"
] | null | null | null | # Generated from Sygus.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3%")
buf.write("\u01ca\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23")
buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31")
buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36")
buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t")
buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4")
buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64")
buf.write("\4\65\t\65\4\66\t\66\3\2\3\2\5\2o\n\2\3\3\3\3\3\3\3\3")
buf.write("\5\3u\n\3\3\4\3\4\3\5\3\5\3\5\3\5\3\5\3\6\3\6\3\6\3\7")
buf.write("\3\7\3\7\3\7\5\7\u0085\n\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b")
buf.write("\3\b\3\b\3\b\3\b\5\b\u0092\n\b\3\t\3\t\3\t\3\t\3\t\3\t")
buf.write("\3\n\3\n\3\n\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13")
buf.write("\3\13\3\13\3\13\5\13\u00b4\n\13\3\f\3\f\3\r\3\r\3\16\3")
buf.write("\16\3\17\3\17\3\17\3\17\3\20\3\20\3\21\3\21\3\21\3\21")
buf.write("\3\22\3\22\3\22\3\23\3\23\3\23\3\23\5\23\u00cd\n\23\3")
buf.write("\24\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\26\3\26")
buf.write("\3\26\3\27\3\27\3\27\3\27\5\27\u00df\n\27\3\30\3\30\3")
buf.write("\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33")
buf.write("\3\33\3\33\5\33\u00fb\n\33\3\34\3\34\3\34\3\34\3\35\3")
buf.write("\35\3\35\3\35\5\35\u0105\n\35\3\36\3\36\3\36\3\36\3\36")
buf.write("\3\37\3\37\3\37\3\37\3\37\3\37\3\37\3\37\5\37\u0114\n")
buf.write("\37\3 \3 \3 \3 \3 \3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3\"\5")
buf.write("\"\u0125\n\"\3#\3#\3#\3#\3#\3#\3$\3$\3$\3$\5$\u0131\n")
buf.write("$\3%\3%\3%\3%\3%\5%\u0138\n%\3&\3&\3&\3\'\3\'\3\'\3\'")
buf.write("\5\'\u0141\n\'\3(\3(\3(\3(\3(\3(\3(\3(\3)\3)\3)\3*\3*")
buf.write("\3*\3*\5*\u0152\n*\3+\3+\3+\3+\3,\3,\3,\3,\3,\3-\3-\3")
buf.write("-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\3-\5-\u016e\n")
buf.write("-\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3.\3.\3.\3.\5.\u018c\n.\3/\3/\3/\3")
buf.write("/\3/\3/\3/\3/\3\60\3\60\3\60\3\61\3\61\3\61\3\61\5\61")
buf.write("\u019d\n\61\3\62\3\62\3\62\3\62\3\62\3\62\3\63\3\63\3")
buf.write("\63\3\63\5\63\u01a9\n\63\3\64\3\64\3\64\3\64\3\64\3\64")
buf.write("\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\3\64\5\64\u01ba")
buf.write("\n\64\3\65\3\65\3\65\3\65\3\65\3\65\3\66\3\66\3\66\3\66")
buf.write("\3\66\3\66\3\66\3\66\3\66\2\2\67\2\4\6\b\n\f\16\20\22")
buf.write("\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJLNPR")
buf.write("TVXZ\\^`bdfhj\2\3\3\2\16\17\2\u01c1\2n\3\2\2\2\4t\3\2")
buf.write("\2\2\6v\3\2\2\2\bx\3\2\2\2\n}\3\2\2\2\f\u0084\3\2\2\2")
buf.write("\16\u0091\3\2\2\2\20\u0093\3\2\2\2\22\u0099\3\2\2\2\24")
buf.write("\u00b3\3\2\2\2\26\u00b5\3\2\2\2\30\u00b7\3\2\2\2\32\u00b9")
buf.write("\3\2\2\2\34\u00bb\3\2\2\2\36\u00bf\3\2\2\2 \u00c1\3\2")
buf.write("\2\2\"\u00c5\3\2\2\2$\u00cc\3\2\2\2&\u00ce\3\2\2\2(\u00d3")
buf.write("\3\2\2\2*\u00d7\3\2\2\2,\u00de\3\2\2\2.\u00e0\3\2\2\2")
buf.write("\60\u00e5\3\2\2\2\62\u00ed\3\2\2\2\64\u00fa\3\2\2\2\66")
buf.write("\u00fc\3\2\2\28\u0104\3\2\2\2:\u0106\3\2\2\2<\u0113\3")
buf.write("\2\2\2>\u0115\3\2\2\2@\u011d\3\2\2\2B\u0124\3\2\2\2D\u0126")
buf.write("\3\2\2\2F\u0130\3\2\2\2H\u0137\3\2\2\2J\u0139\3\2\2\2")
buf.write("L\u0140\3\2\2\2N\u0142\3\2\2\2P\u014a\3\2\2\2R\u0151\3")
buf.write("\2\2\2T\u0153\3\2\2\2V\u0157\3\2\2\2X\u016d\3\2\2\2Z\u018b")
buf.write("\3\2\2\2\\\u018d\3\2\2\2^\u0195\3\2\2\2`\u019c\3\2\2\2")
buf.write("b\u019e\3\2\2\2d\u01a8\3\2\2\2f\u01b9\3\2\2\2h\u01bb\3")
buf.write("\2\2\2j\u01c1\3\2\2\2lo\5\4\3\2mo\3\2\2\2nl\3\2\2\2nm")
buf.write("\3\2\2\2o\3\3\2\2\2pq\5\b\5\2qr\5\n\6\2ru\3\2\2\2su\5")
buf.write("\n\6\2tp\3\2\2\2ts\3\2\2\2u\5\3\2\2\2vw\7%\2\2w\7\3\2")
buf.write("\2\2xy\7\3\2\2yz\7\4\2\2z{\5\6\4\2{|\7\5\2\2|\t\3\2\2")
buf.write("\2}~\5\16\b\2~\177\5\f\7\2\177\13\3\2\2\2\u0080\u0081")
buf.write("\5\16\b\2\u0081\u0082\5\f\7\2\u0082\u0085\3\2\2\2\u0083")
buf.write("\u0085\3\2\2\2\u0084\u0080\3\2\2\2\u0084\u0083\3\2\2\2")
buf.write("\u0085\r\3\2\2\2\u0086\u0092\5\60\31\2\u0087\u0092\5\62")
buf.write("\32\2\u0088\u0092\5X-\2\u0089\u0092\5T+\2\u008a\u0092")
buf.write("\5V,\2\u008b\u0092\5\22\n\2\u008c\u0092\5&\24\2\u008d")
buf.write("\u0092\5\20\t\2\u008e\u0092\5f\64\2\u008f\u0092\5h\65")
buf.write("\2\u0090\u0092\5j\66\2\u0091\u0086\3\2\2\2\u0091\u0087")
buf.write("\3\2\2\2\u0091\u0088\3\2\2\2\u0091\u0089\3\2\2\2\u0091")
buf.write("\u008a\3\2\2\2\u0091\u008b\3\2\2\2\u0091\u008c\3\2\2\2")
buf.write("\u0091\u008d\3\2\2\2\u0091\u008e\3\2\2\2\u0091\u008f\3")
buf.write("\2\2\2\u0091\u0090\3\2\2\2\u0092\17\3\2\2\2\u0093\u0094")
buf.write("\7\3\2\2\u0094\u0095\7\6\2\2\u0095\u0096\5\6\4\2\u0096")
buf.write("\u0097\5\24\13\2\u0097\u0098\7\5\2\2\u0098\21\3\2\2\2")
buf.write("\u0099\u009a\7\3\2\2\u009a\u009b\7\7\2\2\u009b\u009c\5")
buf.write("\6\4\2\u009c\u009d\5\24\13\2\u009d\u009e\7\5\2\2\u009e")
buf.write("\23\3\2\2\2\u009f\u00a0\7\3\2\2\u00a0\u00a1\7\b\2\2\u00a1")
buf.write("\u00a2\5\26\f\2\u00a2\u00a3\7\5\2\2\u00a3\u00b4\3\2\2")
buf.write("\2\u00a4\u00b4\7\t\2\2\u00a5\u00b4\7\n\2\2\u00a6\u00b4")
buf.write("\7\13\2\2\u00a7\u00a8\7\3\2\2\u00a8\u00a9\7\f\2\2\u00a9")
buf.write("\u00aa\5 \21\2\u00aa\u00ab\7\5\2\2\u00ab\u00b4\3\2\2\2")
buf.write("\u00ac\u00ad\7\3\2\2\u00ad\u00ae\7\r\2\2\u00ae\u00af\5")
buf.write("\24\13\2\u00af\u00b0\5\24\13\2\u00b0\u00b1\7\5\2\2\u00b1")
buf.write("\u00b4\3\2\2\2\u00b2\u00b4\5\6\4\2\u00b3\u009f\3\2\2\2")
buf.write("\u00b3\u00a4\3\2\2\2\u00b3\u00a5\3\2\2\2\u00b3\u00a6\3")
buf.write("\2\2\2\u00b3\u00a7\3\2\2\2\u00b3\u00ac\3\2\2\2\u00b3\u00b2")
buf.write("\3\2\2\2\u00b4\25\3\2\2\2\u00b5\u00b6\7!\2\2\u00b6\27")
buf.write("\3\2\2\2\u00b7\u00b8\t\2\2\2\u00b8\31\3\2\2\2\u00b9\u00ba")
buf.write("\7\"\2\2\u00ba\33\3\2\2\2\u00bb\u00bc\5\6\4\2\u00bc\u00bd")
buf.write("\7\20\2\2\u00bd\u00be\5\6\4\2\u00be\35\3\2\2\2\u00bf\u00c0")
buf.write("\7#\2\2\u00c0\37\3\2\2\2\u00c1\u00c2\7\3\2\2\u00c2\u00c3")
buf.write("\5\"\22\2\u00c3\u00c4\7\5\2\2\u00c4!\3\2\2\2\u00c5\u00c6")
buf.write("\5\6\4\2\u00c6\u00c7\5$\23\2\u00c7#\3\2\2\2\u00c8\u00c9")
buf.write("\5\6\4\2\u00c9\u00ca\5$\23\2\u00ca\u00cd\3\2\2\2\u00cb")
buf.write("\u00cd\3\2\2\2\u00cc\u00c8\3\2\2\2\u00cc\u00cb\3\2\2\2")
buf.write("\u00cd%\3\2\2\2\u00ce\u00cf\7\3\2\2\u00cf\u00d0\7\21\2")
buf.write("\2\u00d0\u00d1\5(\25\2\u00d1\u00d2\7\5\2\2\u00d2\'\3\2")
buf.write("\2\2\u00d3\u00d4\7\3\2\2\u00d4\u00d5\5*\26\2\u00d5\u00d6")
buf.write("\7\5\2\2\u00d6)\3\2\2\2\u00d7\u00d8\5.\30\2\u00d8\u00d9")
buf.write("\5,\27\2\u00d9+\3\2\2\2\u00da\u00db\5.\30\2\u00db\u00dc")
buf.write("\5,\27\2\u00dc\u00df\3\2\2\2\u00dd\u00df\3\2\2\2\u00de")
buf.write("\u00da\3\2\2\2\u00de\u00dd\3\2\2\2\u00df-\3\2\2\2\u00e0")
buf.write("\u00e1\7\3\2\2\u00e1\u00e2\5\6\4\2\u00e2\u00e3\7$\2\2")
buf.write("\u00e3\u00e4\7\5\2\2\u00e4/\3\2\2\2\u00e5\u00e6\7\3\2")
buf.write("\2\u00e6\u00e7\7\22\2\2\u00e7\u00e8\5\6\4\2\u00e8\u00e9")
buf.write("\5\66\34\2\u00e9\u00ea\5\24\13\2\u00ea\u00eb\5<\37\2\u00eb")
buf.write("\u00ec\7\5\2\2\u00ec\61\3\2\2\2\u00ed\u00ee\7\3\2\2\u00ee")
buf.write("\u00ef\7\23\2\2\u00ef\u00f0\5\6\4\2\u00f0\u00f1\7\3\2")
buf.write("\2\u00f1\u00f2\5\64\33\2\u00f2\u00f3\7\5\2\2\u00f3\u00f4")
buf.write("\5\24\13\2\u00f4\u00f5\7\5\2\2\u00f5\63\3\2\2\2\u00f6")
buf.write("\u00f7\5\24\13\2\u00f7\u00f8\5\64\33\2\u00f8\u00fb\3\2")
buf.write("\2\2\u00f9\u00fb\3\2\2\2\u00fa\u00f6\3\2\2\2\u00fa\u00f9")
buf.write("\3\2\2\2\u00fb\65\3\2\2\2\u00fc\u00fd\7\3\2\2\u00fd\u00fe")
buf.write("\58\35\2\u00fe\u00ff\7\5\2\2\u00ff\67\3\2\2\2\u0100\u0101")
buf.write("\5:\36\2\u0101\u0102\58\35\2\u0102\u0105\3\2\2\2\u0103")
buf.write("\u0105\3\2\2\2\u0104\u0100\3\2\2\2\u0104\u0103\3\2\2\2")
buf.write("\u01059\3\2\2\2\u0106\u0107\7\3\2\2\u0107\u0108\5\6\4")
buf.write("\2\u0108\u0109\5\24\13\2\u0109\u010a\7\5\2\2\u010a;\3")
buf.write("\2\2\2\u010b\u010c\7\3\2\2\u010c\u010d\5\6\4\2\u010d\u010e")
buf.write("\5F$\2\u010e\u010f\7\5\2\2\u010f\u0114\3\2\2\2\u0110\u0114")
buf.write("\5H%\2\u0111\u0114\5\6\4\2\u0112\u0114\5> \2\u0113\u010b")
buf.write("\3\2\2\2\u0113\u0110\3\2\2\2\u0113\u0111\3\2\2\2\u0113")
buf.write("\u0112\3\2\2\2\u0114=\3\2\2\2\u0115\u0116\7\3\2\2\u0116")
buf.write("\u0117\7\24\2\2\u0117\u0118\7\3\2\2\u0118\u0119\5@!\2")
buf.write("\u0119\u011a\7\5\2\2\u011a\u011b\5<\37\2\u011b\u011c\7")
buf.write("\5\2\2\u011c?\3\2\2\2\u011d\u011e\5D#\2\u011e\u011f\5")
buf.write("B\"\2\u011fA\3\2\2\2\u0120\u0121\5D#\2\u0121\u0122\5B")
buf.write("\"\2\u0122\u0125\3\2\2\2\u0123\u0125\3\2\2\2\u0124\u0120")
buf.write("\3\2\2\2\u0124\u0123\3\2\2\2\u0125C\3\2\2\2\u0126\u0127")
buf.write("\7\3\2\2\u0127\u0128\5\6\4\2\u0128\u0129\5\24\13\2\u0129")
buf.write("\u012a\5<\37\2\u012a\u012b\7\5\2\2\u012bE\3\2\2\2\u012c")
buf.write("\u012d\5<\37\2\u012d\u012e\5F$\2\u012e\u0131\3\2\2\2\u012f")
buf.write("\u0131\3\2\2\2\u0130\u012c\3\2\2\2\u0130\u012f\3\2\2\2")
buf.write("\u0131G\3\2\2\2\u0132\u0138\5\26\f\2\u0133\u0138\5\30")
buf.write("\r\2\u0134\u0138\5\32\16\2\u0135\u0138\5\34\17\2\u0136")
buf.write("\u0138\5\36\20\2\u0137\u0132\3\2\2\2\u0137\u0133\3\2\2")
buf.write("\2\u0137\u0134\3\2\2\2\u0137\u0135\3\2\2\2\u0137\u0136")
buf.write("\3\2\2\2\u0138I\3\2\2\2\u0139\u013a\5N(\2\u013a\u013b")
buf.write("\5L\'\2\u013bK\3\2\2\2\u013c\u013d\5N(\2\u013d\u013e\5")
buf.write("L\'\2\u013e\u0141\3\2\2\2\u013f\u0141\3\2\2\2\u0140\u013c")
buf.write("\3\2\2\2\u0140\u013f\3\2\2\2\u0141M\3\2\2\2\u0142\u0143")
buf.write("\7\3\2\2\u0143\u0144\5\6\4\2\u0144\u0145\5\24\13\2\u0145")
buf.write("\u0146\7\3\2\2\u0146\u0147\5P)\2\u0147\u0148\7\5\2\2\u0148")
buf.write("\u0149\7\5\2\2\u0149O\3\2\2\2\u014a\u014b\5Z.\2\u014b")
buf.write("\u014c\5R*\2\u014cQ\3\2\2\2\u014d\u014e\5Z.\2\u014e\u014f")
buf.write("\5R*\2\u014f\u0152\3\2\2\2\u0150\u0152\3\2\2\2\u0151\u014d")
buf.write("\3\2\2\2\u0151\u0150\3\2\2\2\u0152S\3\2\2\2\u0153\u0154")
buf.write("\7\3\2\2\u0154\u0155\7\25\2\2\u0155\u0156\7\5\2\2\u0156")
buf.write("U\3\2\2\2\u0157\u0158\7\3\2\2\u0158\u0159\7\26\2\2\u0159")
buf.write("\u015a\5<\37\2\u015a\u015b\7\5\2\2\u015bW\3\2\2\2\u015c")
buf.write("\u015d\7\3\2\2\u015d\u015e\7\27\2\2\u015e\u015f\5\6\4")
buf.write("\2\u015f\u0160\5\66\34\2\u0160\u0161\5\24\13\2\u0161\u0162")
buf.write("\7\3\2\2\u0162\u0163\5J&\2\u0163\u0164\7\5\2\2\u0164\u0165")
buf.write("\7\5\2\2\u0165\u016e\3\2\2\2\u0166\u0167\7\3\2\2\u0167")
buf.write("\u0168\7\27\2\2\u0168\u0169\5\6\4\2\u0169\u016a\5\66\34")
buf.write("\2\u016a\u016b\5\24\13\2\u016b\u016c\7\5\2\2\u016c\u016e")
buf.write("\3\2\2\2\u016d\u015c\3\2\2\2\u016d\u0166\3\2\2\2\u016e")
buf.write("Y\3\2\2\2\u016f\u018c\5\6\4\2\u0170\u018c\5H%\2\u0171")
buf.write("\u0172\7\3\2\2\u0172\u0173\5\6\4\2\u0173\u0174\5d\63\2")
buf.write("\u0174\u0175\7\5\2\2\u0175\u018c\3\2\2\2\u0176\u0177\7")
buf.write("\3\2\2\u0177\u0178\7\30\2\2\u0178\u0179\5\24\13\2\u0179")
buf.write("\u017a\7\5\2\2\u017a\u018c\3\2\2\2\u017b\u017c\7\3\2\2")
buf.write("\u017c\u017d\7\31\2\2\u017d\u017e\5\24\13\2\u017e\u017f")
buf.write("\7\5\2\2\u017f\u018c\3\2\2\2\u0180\u0181\7\3\2\2\u0181")
buf.write("\u0182\7\32\2\2\u0182\u0183\5\24\13\2\u0183\u0184\7\5")
buf.write("\2\2\u0184\u018c\3\2\2\2\u0185\u0186\7\3\2\2\u0186\u0187")
buf.write("\7\33\2\2\u0187\u0188\5\24\13\2\u0188\u0189\7\5\2\2\u0189")
buf.write("\u018c\3\2\2\2\u018a\u018c\5\\/\2\u018b\u016f\3\2\2\2")
buf.write("\u018b\u0170\3\2\2\2\u018b\u0171\3\2\2\2\u018b\u0176\3")
buf.write("\2\2\2\u018b\u017b\3\2\2\2\u018b\u0180\3\2\2\2\u018b\u0185")
buf.write("\3\2\2\2\u018b\u018a\3\2\2\2\u018c[\3\2\2\2\u018d\u018e")
buf.write("\7\3\2\2\u018e\u018f\7\24\2\2\u018f\u0190\7\3\2\2\u0190")
buf.write("\u0191\5^\60\2\u0191\u0192\7\5\2\2\u0192\u0193\5Z.\2\u0193")
buf.write("\u0194\7\5\2\2\u0194]\3\2\2\2\u0195\u0196\5b\62\2\u0196")
buf.write("\u0197\5`\61\2\u0197_\3\2\2\2\u0198\u0199\5b\62\2\u0199")
buf.write("\u019a\5`\61\2\u019a\u019d\3\2\2\2\u019b\u019d\3\2\2\2")
buf.write("\u019c\u0198\3\2\2\2\u019c\u019b\3\2\2\2\u019da\3\2\2")
buf.write("\2\u019e\u019f\7\3\2\2\u019f\u01a0\5\6\4\2\u01a0\u01a1")
buf.write("\5\24\13\2\u01a1\u01a2\5Z.\2\u01a2\u01a3\7\5\2\2\u01a3")
buf.write("c\3\2\2\2\u01a4\u01a5\5Z.\2\u01a5\u01a6\5d\63\2\u01a6")
buf.write("\u01a9\3\2\2\2\u01a7\u01a9\3\2\2\2\u01a8\u01a4\3\2\2\2")
buf.write("\u01a8\u01a7\3\2\2\2\u01a9e\3\2\2\2\u01aa\u01ab\7\3\2")
buf.write("\2\u01ab\u01ac\7\34\2\2\u01ac\u01ad\5\6\4\2\u01ad\u01ae")
buf.write("\5\66\34\2\u01ae\u01af\7\3\2\2\u01af\u01b0\5J&\2\u01b0")
buf.write("\u01b1\7\5\2\2\u01b1\u01b2\7\5\2\2\u01b2\u01ba\3\2\2\2")
buf.write("\u01b3\u01b4\7\3\2\2\u01b4\u01b5\7\34\2\2\u01b5\u01b6")
buf.write("\5\6\4\2\u01b6\u01b7\5\66\34\2\u01b7\u01b8\7\5\2\2\u01b8")
buf.write("\u01ba\3\2\2\2\u01b9\u01aa\3\2\2\2\u01b9\u01b3\3\2\2\2")
buf.write("\u01bag\3\2\2\2\u01bb\u01bc\7\3\2\2\u01bc\u01bd\7\35\2")
buf.write("\2\u01bd\u01be\5\6\4\2\u01be\u01bf\5\24\13\2\u01bf\u01c0")
buf.write("\7\5\2\2\u01c0i\3\2\2\2\u01c1\u01c2\7\3\2\2\u01c2\u01c3")
buf.write("\7\36\2\2\u01c3\u01c4\5\6\4\2\u01c4\u01c5\5\6\4\2\u01c5")
buf.write("\u01c6\5\6\4\2\u01c6\u01c7\5\6\4\2\u01c7\u01c8\7\5\2\2")
buf.write("\u01c8k\3\2\2\2\26nt\u0084\u0091\u00b3\u00cc\u00de\u00fa")
buf.write("\u0104\u0113\u0124\u0130\u0137\u0140\u0151\u016d\u018b")
buf.write("\u019c\u01a8\u01b9")
return buf.getvalue()
class SygusParser ( Parser ):
grammarFileName = "Sygus.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'('", "'set-logic'", "')'", "'declare-var'",
"'define-sort'", "'BitVec'", "'Int'", "'Bool'", "'Real'",
"'Enum'", "'Array'", "'true'", "'false'", "'::'", "'set-options'",
"'define-fun'", "'declare-fun'", "'let'", "'check-synth'",
"'constraint'", "'synth-fun'", "'Constant'", "'Variable'",
"'InputVariable'", "'LocalVariable'", "'synth-inv'",
"'declare-primed-var'", "'inv-constraint'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "WS", "COMMENT", "INTEGER", "BVCONST",
"REALCONST", "QUOTEDLIT", "SYMBOL" ]
RULE_start = 0
RULE_prog = 1
RULE_symbol = 2
RULE_setLogicCmd = 3
RULE_cmdPlus = 4
RULE_cmdPlusTail = 5
RULE_cmd = 6
RULE_varDeclCmd = 7
RULE_sortDefCmd = 8
RULE_sortExpr = 9
RULE_intConst = 10
RULE_boolConst = 11
RULE_bVConst = 12
RULE_enumConst = 13
RULE_realConst = 14
RULE_eCList = 15
RULE_symbolPlus = 16
RULE_symbolPlusTail = 17
RULE_setOptsCmd = 18
RULE_optList = 19
RULE_symbolPairPlus = 20
RULE_symbolPairPlusTail = 21
RULE_symbolPair = 22
RULE_funDefCmd = 23
RULE_funDeclCmd = 24
RULE_sortStar = 25
RULE_argList = 26
RULE_symbolSortPairStar = 27
RULE_symbolSortPair = 28
RULE_term = 29
RULE_letTerm = 30
RULE_letBindingTermPlus = 31
RULE_letBindingTermPlusTail = 32
RULE_letBindingTerm = 33
RULE_termStar = 34
RULE_literal = 35
RULE_nTDefPlus = 36
RULE_nTDefPlusTail = 37
RULE_nTDef = 38
RULE_gTermPlus = 39
RULE_gTermPlusTail = 40
RULE_checkSynthCmd = 41
RULE_constraintCmd = 42
RULE_synthFunCmd = 43
RULE_gTerm = 44
RULE_letGTerm = 45
RULE_letBindingGTermPlus = 46
RULE_letBindingGTermPlusTail = 47
RULE_letBindingGTerm = 48
RULE_gTermStar = 49
RULE_synthInvCmd = 50
RULE_declarePrimedVar = 51
RULE_invConstraintCmd = 52
ruleNames = [ "start", "prog", "symbol", "setLogicCmd", "cmdPlus",
"cmdPlusTail", "cmd", "varDeclCmd", "sortDefCmd", "sortExpr",
"intConst", "boolConst", "bVConst", "enumConst", "realConst",
"eCList", "symbolPlus", "symbolPlusTail", "setOptsCmd",
"optList", "symbolPairPlus", "symbolPairPlusTail", "symbolPair",
"funDefCmd", "funDeclCmd", "sortStar", "argList", "symbolSortPairStar",
"symbolSortPair", "term", "letTerm", "letBindingTermPlus",
"letBindingTermPlusTail", "letBindingTerm", "termStar",
"literal", "nTDefPlus", "nTDefPlusTail", "nTDef", "gTermPlus",
"gTermPlusTail", "checkSynthCmd", "constraintCmd", "synthFunCmd",
"gTerm", "letGTerm", "letBindingGTermPlus", "letBindingGTermPlusTail",
"letBindingGTerm", "gTermStar", "synthInvCmd", "declarePrimedVar",
"invConstraintCmd" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
T__21=22
T__22=23
T__23=24
T__24=25
T__25=26
T__26=27
T__27=28
WS=29
COMMENT=30
INTEGER=31
BVCONST=32
REALCONST=33
QUOTEDLIT=34
SYMBOL=35
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class StartContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def prog(self):
return self.getTypedRuleContext(SygusParser.ProgContext,0)
def getRuleIndex(self):
return SygusParser.RULE_start
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStart" ):
listener.enterStart(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStart" ):
listener.exitStart(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStart" ):
return visitor.visitStart(self)
else:
return visitor.visitChildren(self)
def start(self):
localctx = SygusParser.StartContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_start)
try:
self.state = 108
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 106
self.prog()
pass
elif token in [SygusParser.EOF]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ProgContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def setLogicCmd(self):
return self.getTypedRuleContext(SygusParser.SetLogicCmdContext,0)
def cmdPlus(self):
return self.getTypedRuleContext(SygusParser.CmdPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_prog
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterProg" ):
listener.enterProg(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitProg" ):
listener.exitProg(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitProg" ):
return visitor.visitProg(self)
else:
return visitor.visitChildren(self)
def prog(self):
localctx = SygusParser.ProgContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_prog)
try:
self.state = 114
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,1,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 110
self.setLogicCmd()
self.state = 111
self.cmdPlus()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 113
self.cmdPlus()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def SYMBOL(self):
return self.getToken(SygusParser.SYMBOL, 0)
def getRuleIndex(self):
return SygusParser.RULE_symbol
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbol" ):
listener.enterSymbol(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbol" ):
listener.exitSymbol(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbol" ):
return visitor.visitSymbol(self)
else:
return visitor.visitChildren(self)
def symbol(self):
localctx = SygusParser.SymbolContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_symbol)
try:
self.enterOuterAlt(localctx, 1)
self.state = 116
self.match(SygusParser.SYMBOL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetLogicCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def getRuleIndex(self):
return SygusParser.RULE_setLogicCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetLogicCmd" ):
listener.enterSetLogicCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetLogicCmd" ):
listener.exitSetLogicCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetLogicCmd" ):
return visitor.visitSetLogicCmd(self)
else:
return visitor.visitChildren(self)
def setLogicCmd(self):
localctx = SygusParser.SetLogicCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_setLogicCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 118
self.match(SygusParser.T__0)
self.state = 119
self.match(SygusParser.T__1)
self.state = 120
self.symbol()
self.state = 121
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self):
return self.getTypedRuleContext(SygusParser.CmdContext,0)
def cmdPlusTail(self):
return self.getTypedRuleContext(SygusParser.CmdPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmdPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdPlus" ):
listener.enterCmdPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdPlus" ):
listener.exitCmdPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmdPlus" ):
return visitor.visitCmdPlus(self)
else:
return visitor.visitChildren(self)
def cmdPlus(self):
localctx = SygusParser.CmdPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_cmdPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 123
self.cmd()
self.state = 124
self.cmdPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def cmd(self):
return self.getTypedRuleContext(SygusParser.CmdContext,0)
def cmdPlusTail(self):
return self.getTypedRuleContext(SygusParser.CmdPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmdPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmdPlusTail" ):
listener.enterCmdPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmdPlusTail" ):
listener.exitCmdPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmdPlusTail" ):
return visitor.visitCmdPlusTail(self)
else:
return visitor.visitChildren(self)
def cmdPlusTail(self):
localctx = SygusParser.CmdPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_cmdPlusTail)
try:
self.state = 130
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 126
self.cmd()
self.state = 127
self.cmdPlusTail()
pass
elif token in [SygusParser.EOF]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def funDefCmd(self):
return self.getTypedRuleContext(SygusParser.FunDefCmdContext,0)
def funDeclCmd(self):
return self.getTypedRuleContext(SygusParser.FunDeclCmdContext,0)
def synthFunCmd(self):
return self.getTypedRuleContext(SygusParser.SynthFunCmdContext,0)
def checkSynthCmd(self):
return self.getTypedRuleContext(SygusParser.CheckSynthCmdContext,0)
def constraintCmd(self):
return self.getTypedRuleContext(SygusParser.ConstraintCmdContext,0)
def sortDefCmd(self):
return self.getTypedRuleContext(SygusParser.SortDefCmdContext,0)
def setOptsCmd(self):
return self.getTypedRuleContext(SygusParser.SetOptsCmdContext,0)
def varDeclCmd(self):
return self.getTypedRuleContext(SygusParser.VarDeclCmdContext,0)
def synthInvCmd(self):
return self.getTypedRuleContext(SygusParser.SynthInvCmdContext,0)
def declarePrimedVar(self):
return self.getTypedRuleContext(SygusParser.DeclarePrimedVarContext,0)
def invConstraintCmd(self):
return self.getTypedRuleContext(SygusParser.InvConstraintCmdContext,0)
def getRuleIndex(self):
return SygusParser.RULE_cmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCmd" ):
listener.enterCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCmd" ):
listener.exitCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCmd" ):
return visitor.visitCmd(self)
else:
return visitor.visitChildren(self)
def cmd(self):
localctx = SygusParser.CmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_cmd)
try:
self.state = 143
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 132
self.funDefCmd()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 133
self.funDeclCmd()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 134
self.synthFunCmd()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 135
self.checkSynthCmd()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 136
self.constraintCmd()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 137
self.sortDefCmd()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 138
self.setOptsCmd()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 139
self.varDeclCmd()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 140
self.synthInvCmd()
pass
elif la_ == 10:
self.enterOuterAlt(localctx, 10)
self.state = 141
self.declarePrimedVar()
pass
elif la_ == 11:
self.enterOuterAlt(localctx, 11)
self.state = 142
self.invConstraintCmd()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDeclCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_varDeclCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterVarDeclCmd" ):
listener.enterVarDeclCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitVarDeclCmd" ):
listener.exitVarDeclCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVarDeclCmd" ):
return visitor.visitVarDeclCmd(self)
else:
return visitor.visitChildren(self)
def varDeclCmd(self):
localctx = SygusParser.VarDeclCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_varDeclCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 145
self.match(SygusParser.T__0)
self.state = 146
self.match(SygusParser.T__3)
self.state = 147
self.symbol()
self.state = 148
self.sortExpr()
self.state = 149
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortDefCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortDefCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortDefCmd" ):
listener.enterSortDefCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortDefCmd" ):
listener.exitSortDefCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortDefCmd" ):
return visitor.visitSortDefCmd(self)
else:
return visitor.visitChildren(self)
def sortDefCmd(self):
localctx = SygusParser.SortDefCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_sortDefCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 151
self.match(SygusParser.T__0)
self.state = 152
self.match(SygusParser.T__4)
self.state = 153
self.symbol()
self.state = 154
self.sortExpr()
self.state = 155
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def intConst(self):
return self.getTypedRuleContext(SygusParser.IntConstContext,0)
def eCList(self):
return self.getTypedRuleContext(SygusParser.ECListContext,0)
def sortExpr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SortExprContext)
else:
return self.getTypedRuleContext(SygusParser.SortExprContext,i)
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortExpr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortExpr" ):
listener.enterSortExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortExpr" ):
listener.exitSortExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortExpr" ):
return visitor.visitSortExpr(self)
else:
return visitor.visitChildren(self)
def sortExpr(self):
localctx = SygusParser.SortExprContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_sortExpr)
try:
self.state = 177
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,4,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 157
self.match(SygusParser.T__0)
self.state = 158
self.match(SygusParser.T__5)
self.state = 159
self.intConst()
self.state = 160
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 162
self.match(SygusParser.T__6)
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 163
self.match(SygusParser.T__7)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 164
self.match(SygusParser.T__8)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 165
self.match(SygusParser.T__0)
self.state = 166
self.match(SygusParser.T__9)
self.state = 167
self.eCList()
self.state = 168
self.match(SygusParser.T__2)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 170
self.match(SygusParser.T__0)
self.state = 171
self.match(SygusParser.T__10)
self.state = 172
self.sortExpr()
self.state = 173
self.sortExpr()
self.state = 174
self.match(SygusParser.T__2)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 176
self.symbol()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IntConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def INTEGER(self):
return self.getToken(SygusParser.INTEGER, 0)
def getRuleIndex(self):
return SygusParser.RULE_intConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIntConst" ):
listener.enterIntConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIntConst" ):
listener.exitIntConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIntConst" ):
return visitor.visitIntConst(self)
else:
return visitor.visitChildren(self)
def intConst(self):
localctx = SygusParser.IntConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_intConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 179
self.match(SygusParser.INTEGER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BoolConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SygusParser.RULE_boolConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBoolConst" ):
listener.enterBoolConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBoolConst" ):
listener.exitBoolConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBoolConst" ):
return visitor.visitBoolConst(self)
else:
return visitor.visitChildren(self)
def boolConst(self):
localctx = SygusParser.BoolConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_boolConst)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 181
_la = self._input.LA(1)
if not(_la==SygusParser.T__11 or _la==SygusParser.T__12):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BVConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def BVCONST(self):
return self.getToken(SygusParser.BVCONST, 0)
def getRuleIndex(self):
return SygusParser.RULE_bVConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBVConst" ):
listener.enterBVConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBVConst" ):
listener.exitBVConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBVConst" ):
return visitor.visitBVConst(self)
else:
return visitor.visitChildren(self)
def bVConst(self):
localctx = SygusParser.BVConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_bVConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 183
self.match(SygusParser.BVCONST)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class EnumConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SymbolContext)
else:
return self.getTypedRuleContext(SygusParser.SymbolContext,i)
def getRuleIndex(self):
return SygusParser.RULE_enumConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEnumConst" ):
listener.enterEnumConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEnumConst" ):
listener.exitEnumConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEnumConst" ):
return visitor.visitEnumConst(self)
else:
return visitor.visitChildren(self)
def enumConst(self):
localctx = SygusParser.EnumConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_enumConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 185
self.symbol()
self.state = 186
self.match(SygusParser.T__13)
self.state = 187
self.symbol()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class RealConstContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def REALCONST(self):
return self.getToken(SygusParser.REALCONST, 0)
def getRuleIndex(self):
return SygusParser.RULE_realConst
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRealConst" ):
listener.enterRealConst(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRealConst" ):
listener.exitRealConst(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRealConst" ):
return visitor.visitRealConst(self)
else:
return visitor.visitChildren(self)
def realConst(self):
localctx = SygusParser.RealConstContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_realConst)
try:
self.enterOuterAlt(localctx, 1)
self.state = 189
self.match(SygusParser.REALCONST)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ECListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPlus(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_eCList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterECList" ):
listener.enterECList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitECList" ):
listener.exitECList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitECList" ):
return visitor.visitECList(self)
else:
return visitor.visitChildren(self)
def eCList(self):
localctx = SygusParser.ECListContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_eCList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 191
self.match(SygusParser.T__0)
self.state = 192
self.symbolPlus()
self.state = 193
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def symbolPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPlus" ):
listener.enterSymbolPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPlus" ):
listener.exitSymbolPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPlus" ):
return visitor.visitSymbolPlus(self)
else:
return visitor.visitChildren(self)
def symbolPlus(self):
localctx = SygusParser.SymbolPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_symbolPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 195
self.symbol()
self.state = 196
self.symbolPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def symbolPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPlusTail" ):
listener.enterSymbolPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPlusTail" ):
listener.exitSymbolPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPlusTail" ):
return visitor.visitSymbolPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPlusTail(self):
localctx = SygusParser.SymbolPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 34, self.RULE_symbolPlusTail)
try:
self.state = 202
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 198
self.symbol()
self.state = 199
self.symbolPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SetOptsCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def optList(self):
return self.getTypedRuleContext(SygusParser.OptListContext,0)
def getRuleIndex(self):
return SygusParser.RULE_setOptsCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSetOptsCmd" ):
listener.enterSetOptsCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSetOptsCmd" ):
listener.exitSetOptsCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSetOptsCmd" ):
return visitor.visitSetOptsCmd(self)
else:
return visitor.visitChildren(self)
def setOptsCmd(self):
localctx = SygusParser.SetOptsCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 36, self.RULE_setOptsCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 204
self.match(SygusParser.T__0)
self.state = 205
self.match(SygusParser.T__14)
self.state = 206
self.optList()
self.state = 207
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class OptListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPairPlus(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_optList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOptList" ):
listener.enterOptList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOptList" ):
listener.exitOptList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOptList" ):
return visitor.visitOptList(self)
else:
return visitor.visitChildren(self)
def optList(self):
localctx = SygusParser.OptListContext(self, self._ctx, self.state)
self.enterRule(localctx, 38, self.RULE_optList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 209
self.match(SygusParser.T__0)
self.state = 210
self.symbolPairPlus()
self.state = 211
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlus" ):
listener.enterSymbolPairPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlus" ):
listener.exitSymbolPairPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlus" ):
return visitor.visitSymbolPairPlus(self)
else:
return visitor.visitChildren(self)
def symbolPairPlus(self):
localctx = SygusParser.SymbolPairPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 40, self.RULE_symbolPairPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 213
self.symbolPair()
self.state = 214
self.symbolPairPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolPair(self):
return self.getTypedRuleContext(SygusParser.SymbolPairContext,0)
def symbolPairPlusTail(self):
return self.getTypedRuleContext(SygusParser.SymbolPairPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPairPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPairPlusTail" ):
listener.enterSymbolPairPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPairPlusTail" ):
listener.exitSymbolPairPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPairPlusTail" ):
return visitor.visitSymbolPairPlusTail(self)
else:
return visitor.visitChildren(self)
def symbolPairPlusTail(self):
localctx = SygusParser.SymbolPairPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 42, self.RULE_symbolPairPlusTail)
try:
self.state = 220
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 216
self.symbolPair()
self.state = 217
self.symbolPairPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def QUOTEDLIT(self):
return self.getToken(SygusParser.QUOTEDLIT, 0)
def getRuleIndex(self):
return SygusParser.RULE_symbolPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolPair" ):
listener.enterSymbolPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolPair" ):
listener.exitSymbolPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolPair" ):
return visitor.visitSymbolPair(self)
else:
return visitor.visitChildren(self)
def symbolPair(self):
localctx = SygusParser.SymbolPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 44, self.RULE_symbolPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 222
self.match(SygusParser.T__0)
self.state = 223
self.symbol()
self.state = 224
self.match(SygusParser.QUOTEDLIT)
self.state = 225
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDefCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDefCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDefCmd" ):
listener.enterFunDefCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDefCmd" ):
listener.exitFunDefCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDefCmd" ):
return visitor.visitFunDefCmd(self)
else:
return visitor.visitChildren(self)
def funDefCmd(self):
localctx = SygusParser.FunDefCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 46, self.RULE_funDefCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 227
self.match(SygusParser.T__0)
self.state = 228
self.match(SygusParser.T__15)
self.state = 229
self.symbol()
self.state = 230
self.argList()
self.state = 231
self.sortExpr()
self.state = 232
self.term()
self.state = 233
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunDeclCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_funDeclCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterFunDeclCmd" ):
listener.enterFunDeclCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitFunDeclCmd" ):
listener.exitFunDeclCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunDeclCmd" ):
return visitor.visitFunDeclCmd(self)
else:
return visitor.visitChildren(self)
def funDeclCmd(self):
localctx = SygusParser.FunDeclCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 48, self.RULE_funDeclCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 235
self.match(SygusParser.T__0)
self.state = 236
self.match(SygusParser.T__16)
self.state = 237
self.symbol()
self.state = 238
self.match(SygusParser.T__0)
self.state = 239
self.sortStar()
self.state = 240
self.match(SygusParser.T__2)
self.state = 241
self.sortExpr()
self.state = 242
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SortStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def sortStar(self):
return self.getTypedRuleContext(SygusParser.SortStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_sortStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSortStar" ):
listener.enterSortStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSortStar" ):
listener.exitSortStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSortStar" ):
return visitor.visitSortStar(self)
else:
return visitor.visitChildren(self)
def sortStar(self):
localctx = SygusParser.SortStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 50, self.RULE_sortStar)
try:
self.state = 248
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__6, SygusParser.T__7, SygusParser.T__8, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 244
self.sortExpr()
self.state = 245
self.sortStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ArgListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_argList
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterArgList" ):
listener.enterArgList(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitArgList" ):
listener.exitArgList(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitArgList" ):
return visitor.visitArgList(self)
else:
return visitor.visitChildren(self)
def argList(self):
localctx = SygusParser.ArgListContext(self, self._ctx, self.state)
self.enterRule(localctx, 52, self.RULE_argList)
try:
self.enterOuterAlt(localctx, 1)
self.state = 250
self.match(SygusParser.T__0)
self.state = 251
self.symbolSortPairStar()
self.state = 252
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbolSortPair(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairContext,0)
def symbolSortPairStar(self):
return self.getTypedRuleContext(SygusParser.SymbolSortPairStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPairStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPairStar" ):
listener.enterSymbolSortPairStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPairStar" ):
listener.exitSymbolSortPairStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPairStar" ):
return visitor.visitSymbolSortPairStar(self)
else:
return visitor.visitChildren(self)
def symbolSortPairStar(self):
localctx = SygusParser.SymbolSortPairStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 54, self.RULE_symbolSortPairStar)
try:
self.state = 258
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 254
self.symbolSortPair()
self.state = 255
self.symbolSortPairStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SymbolSortPairContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_symbolSortPair
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSymbolSortPair" ):
listener.enterSymbolSortPair(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSymbolSortPair" ):
listener.exitSymbolSortPair(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSymbolSortPair" ):
return visitor.visitSymbolSortPair(self)
else:
return visitor.visitChildren(self)
def symbolSortPair(self):
localctx = SygusParser.SymbolSortPairContext(self, self._ctx, self.state)
self.enterRule(localctx, 56, self.RULE_symbolSortPair)
try:
self.enterOuterAlt(localctx, 1)
self.state = 260
self.match(SygusParser.T__0)
self.state = 261
self.symbol()
self.state = 262
self.sortExpr()
self.state = 263
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def termStar(self):
return self.getTypedRuleContext(SygusParser.TermStarContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def letTerm(self):
return self.getTypedRuleContext(SygusParser.LetTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_term
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTerm" ):
listener.enterTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTerm" ):
listener.exitTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTerm" ):
return visitor.visitTerm(self)
else:
return visitor.visitChildren(self)
def term(self):
localctx = SygusParser.TermContext(self, self._ctx, self.state)
self.enterRule(localctx, 58, self.RULE_term)
try:
self.state = 273
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,9,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 265
self.match(SygusParser.T__0)
self.state = 266
self.symbol()
self.state = 267
self.termStar()
self.state = 268
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 270
self.literal()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 271
self.symbol()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 272
self.letTerm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTermPlus(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetTerm" ):
listener.enterLetTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetTerm" ):
listener.exitLetTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetTerm" ):
return visitor.visitLetTerm(self)
else:
return visitor.visitChildren(self)
def letTerm(self):
localctx = SygusParser.LetTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 60, self.RULE_letTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 275
self.match(SygusParser.T__0)
self.state = 276
self.match(SygusParser.T__17)
self.state = 277
self.match(SygusParser.T__0)
self.state = 278
self.letBindingTermPlus()
self.state = 279
self.match(SygusParser.T__2)
self.state = 280
self.term()
self.state = 281
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermContext,0)
def letBindingTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTermPlus" ):
listener.enterLetBindingTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTermPlus" ):
listener.exitLetBindingTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTermPlus" ):
return visitor.visitLetBindingTermPlus(self)
else:
return visitor.visitChildren(self)
def letBindingTermPlus(self):
localctx = SygusParser.LetBindingTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 62, self.RULE_letBindingTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 283
self.letBindingTerm()
self.state = 284
self.letBindingTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermContext,0)
def letBindingTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTermPlusTail" ):
listener.enterLetBindingTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTermPlusTail" ):
listener.exitLetBindingTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTermPlusTail" ):
return visitor.visitLetBindingTermPlusTail(self)
else:
return visitor.visitChildren(self)
def letBindingTermPlusTail(self):
localctx = SygusParser.LetBindingTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 64, self.RULE_letBindingTermPlusTail)
try:
self.state = 290
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 286
self.letBindingTerm()
self.state = 287
self.letBindingTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingTerm" ):
listener.enterLetBindingTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingTerm" ):
listener.exitLetBindingTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingTerm" ):
return visitor.visitLetBindingTerm(self)
else:
return visitor.visitChildren(self)
def letBindingTerm(self):
localctx = SygusParser.LetBindingTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 66, self.RULE_letBindingTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 292
self.match(SygusParser.T__0)
self.state = 293
self.symbol()
self.state = 294
self.sortExpr()
self.state = 295
self.term()
self.state = 296
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class TermStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def termStar(self):
return self.getTypedRuleContext(SygusParser.TermStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_termStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterTermStar" ):
listener.enterTermStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitTermStar" ):
listener.exitTermStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitTermStar" ):
return visitor.visitTermStar(self)
else:
return visitor.visitChildren(self)
def termStar(self):
localctx = SygusParser.TermStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 68, self.RULE_termStar)
try:
self.state = 302
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 298
self.term()
self.state = 299
self.termStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LiteralContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def intConst(self):
return self.getTypedRuleContext(SygusParser.IntConstContext,0)
def boolConst(self):
return self.getTypedRuleContext(SygusParser.BoolConstContext,0)
def bVConst(self):
return self.getTypedRuleContext(SygusParser.BVConstContext,0)
def enumConst(self):
return self.getTypedRuleContext(SygusParser.EnumConstContext,0)
def realConst(self):
return self.getTypedRuleContext(SygusParser.RealConstContext,0)
def getRuleIndex(self):
return SygusParser.RULE_literal
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLiteral" ):
listener.enterLiteral(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLiteral" ):
listener.exitLiteral(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLiteral" ):
return visitor.visitLiteral(self)
else:
return visitor.visitChildren(self)
def literal(self):
localctx = SygusParser.LiteralContext(self, self._ctx, self.state)
self.enterRule(localctx, 70, self.RULE_literal)
try:
self.state = 309
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.INTEGER]:
self.enterOuterAlt(localctx, 1)
self.state = 304
self.intConst()
pass
elif token in [SygusParser.T__11, SygusParser.T__12]:
self.enterOuterAlt(localctx, 2)
self.state = 305
self.boolConst()
pass
elif token in [SygusParser.BVCONST]:
self.enterOuterAlt(localctx, 3)
self.state = 306
self.bVConst()
pass
elif token in [SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 4)
self.state = 307
self.enumConst()
pass
elif token in [SygusParser.REALCONST]:
self.enterOuterAlt(localctx, 5)
self.state = 308
self.realConst()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nTDef(self):
return self.getTypedRuleContext(SygusParser.NTDefContext,0)
def nTDefPlusTail(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDefPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDefPlus" ):
listener.enterNTDefPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDefPlus" ):
listener.exitNTDefPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDefPlus" ):
return visitor.visitNTDefPlus(self)
else:
return visitor.visitChildren(self)
def nTDefPlus(self):
localctx = SygusParser.NTDefPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 72, self.RULE_nTDefPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 311
self.nTDef()
self.state = 312
self.nTDefPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def nTDef(self):
return self.getTypedRuleContext(SygusParser.NTDefContext,0)
def nTDefPlusTail(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDefPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDefPlusTail" ):
listener.enterNTDefPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDefPlusTail" ):
listener.exitNTDefPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDefPlusTail" ):
return visitor.visitNTDefPlusTail(self)
else:
return visitor.visitChildren(self)
def nTDefPlusTail(self):
localctx = SygusParser.NTDefPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 74, self.RULE_nTDefPlusTail)
try:
self.state = 318
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 314
self.nTDef()
self.state = 315
self.nTDefPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class NTDefContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def gTermPlus(self):
return self.getTypedRuleContext(SygusParser.GTermPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_nTDef
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNTDef" ):
listener.enterNTDef(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNTDef" ):
listener.exitNTDef(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNTDef" ):
return visitor.visitNTDef(self)
else:
return visitor.visitChildren(self)
def nTDef(self):
localctx = SygusParser.NTDefContext(self, self._ctx, self.state)
self.enterRule(localctx, 76, self.RULE_nTDef)
try:
self.enterOuterAlt(localctx, 1)
self.state = 320
self.match(SygusParser.T__0)
self.state = 321
self.symbol()
self.state = 322
self.sortExpr()
self.state = 323
self.match(SygusParser.T__0)
self.state = 324
self.gTermPlus()
self.state = 325
self.match(SygusParser.T__2)
self.state = 326
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.GTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermPlus" ):
listener.enterGTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermPlus" ):
listener.exitGTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermPlus" ):
return visitor.visitGTermPlus(self)
else:
return visitor.visitChildren(self)
def gTermPlus(self):
localctx = SygusParser.GTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 78, self.RULE_gTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 328
self.gTerm()
self.state = 329
self.gTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.GTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermPlusTail" ):
listener.enterGTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermPlusTail" ):
listener.exitGTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermPlusTail" ):
return visitor.visitGTermPlusTail(self)
else:
return visitor.visitChildren(self)
def gTermPlusTail(self):
localctx = SygusParser.GTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 80, self.RULE_gTermPlusTail)
try:
self.state = 335
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 331
self.gTerm()
self.state = 332
self.gTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CheckSynthCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return SygusParser.RULE_checkSynthCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCheckSynthCmd" ):
listener.enterCheckSynthCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCheckSynthCmd" ):
listener.exitCheckSynthCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCheckSynthCmd" ):
return visitor.visitCheckSynthCmd(self)
else:
return visitor.visitChildren(self)
def checkSynthCmd(self):
localctx = SygusParser.CheckSynthCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 82, self.RULE_checkSynthCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 337
self.match(SygusParser.T__0)
self.state = 338
self.match(SygusParser.T__18)
self.state = 339
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ConstraintCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def term(self):
return self.getTypedRuleContext(SygusParser.TermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_constraintCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterConstraintCmd" ):
listener.enterConstraintCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitConstraintCmd" ):
listener.exitConstraintCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitConstraintCmd" ):
return visitor.visitConstraintCmd(self)
else:
return visitor.visitChildren(self)
def constraintCmd(self):
localctx = SygusParser.ConstraintCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 84, self.RULE_constraintCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 341
self.match(SygusParser.T__0)
self.state = 342
self.match(SygusParser.T__19)
self.state = 343
self.term()
self.state = 344
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthFunCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthFunCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthFunCmd" ):
listener.enterSynthFunCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthFunCmd" ):
listener.exitSynthFunCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthFunCmd" ):
return visitor.visitSynthFunCmd(self)
else:
return visitor.visitChildren(self)
def synthFunCmd(self):
localctx = SygusParser.SynthFunCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 86, self.RULE_synthFunCmd)
try:
self.state = 363
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,15,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 346
self.match(SygusParser.T__0)
self.state = 347
self.match(SygusParser.T__20)
self.state = 348
self.symbol()
self.state = 349
self.argList()
self.state = 350
self.sortExpr()
self.state = 351
self.match(SygusParser.T__0)
self.state = 352
self.nTDefPlus()
self.state = 353
self.match(SygusParser.T__2)
self.state = 354
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 356
self.match(SygusParser.T__0)
self.state = 357
self.match(SygusParser.T__20)
self.state = 358
self.symbol()
self.state = 359
self.argList()
self.state = 360
self.sortExpr()
self.state = 361
self.match(SygusParser.T__2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def literal(self):
return self.getTypedRuleContext(SygusParser.LiteralContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def letGTerm(self):
return self.getTypedRuleContext(SygusParser.LetGTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTerm" ):
listener.enterGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTerm" ):
listener.exitGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTerm" ):
return visitor.visitGTerm(self)
else:
return visitor.visitChildren(self)
def gTerm(self):
localctx = SygusParser.GTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 88, self.RULE_gTerm)
try:
self.state = 393
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,16,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 365
self.symbol()
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 366
self.literal()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 367
self.match(SygusParser.T__0)
self.state = 368
self.symbol()
self.state = 369
self.gTermStar()
self.state = 370
self.match(SygusParser.T__2)
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 372
self.match(SygusParser.T__0)
self.state = 373
self.match(SygusParser.T__21)
self.state = 374
self.sortExpr()
self.state = 375
self.match(SygusParser.T__2)
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 377
self.match(SygusParser.T__0)
self.state = 378
self.match(SygusParser.T__22)
self.state = 379
self.sortExpr()
self.state = 380
self.match(SygusParser.T__2)
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 382
self.match(SygusParser.T__0)
self.state = 383
self.match(SygusParser.T__23)
self.state = 384
self.sortExpr()
self.state = 385
self.match(SygusParser.T__2)
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 387
self.match(SygusParser.T__0)
self.state = 388
self.match(SygusParser.T__24)
self.state = 389
self.sortExpr()
self.state = 390
self.match(SygusParser.T__2)
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 392
self.letGTerm()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTermPlus(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetGTerm" ):
listener.enterLetGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetGTerm" ):
listener.exitLetGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetGTerm" ):
return visitor.visitLetGTerm(self)
else:
return visitor.visitChildren(self)
def letGTerm(self):
localctx = SygusParser.LetGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 90, self.RULE_letGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 395
self.match(SygusParser.T__0)
self.state = 396
self.match(SygusParser.T__17)
self.state = 397
self.match(SygusParser.T__0)
self.state = 398
self.letBindingGTermPlus()
self.state = 399
self.match(SygusParser.T__2)
self.state = 400
self.gTerm()
self.state = 401
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlus
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlus" ):
listener.enterLetBindingGTermPlus(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlus" ):
listener.exitLetBindingGTermPlus(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlus" ):
return visitor.visitLetBindingGTermPlus(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlus(self):
localctx = SygusParser.LetBindingGTermPlusContext(self, self._ctx, self.state)
self.enterRule(localctx, 92, self.RULE_letBindingGTermPlus)
try:
self.enterOuterAlt(localctx, 1)
self.state = 403
self.letBindingGTerm()
self.state = 404
self.letBindingGTermPlusTail()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermPlusTailContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def letBindingGTerm(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermContext,0)
def letBindingGTermPlusTail(self):
return self.getTypedRuleContext(SygusParser.LetBindingGTermPlusTailContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTermPlusTail
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTermPlusTail" ):
listener.enterLetBindingGTermPlusTail(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTermPlusTail" ):
listener.exitLetBindingGTermPlusTail(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTermPlusTail" ):
return visitor.visitLetBindingGTermPlusTail(self)
else:
return visitor.visitChildren(self)
def letBindingGTermPlusTail(self):
localctx = SygusParser.LetBindingGTermPlusTailContext(self, self._ctx, self.state)
self.enterRule(localctx, 94, self.RULE_letBindingGTermPlusTail)
try:
self.state = 410
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0]:
self.enterOuterAlt(localctx, 1)
self.state = 406
self.letBindingGTerm()
self.state = 407
self.letBindingGTermPlusTail()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LetBindingGTermContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def getRuleIndex(self):
return SygusParser.RULE_letBindingGTerm
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLetBindingGTerm" ):
listener.enterLetBindingGTerm(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLetBindingGTerm" ):
listener.exitLetBindingGTerm(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLetBindingGTerm" ):
return visitor.visitLetBindingGTerm(self)
else:
return visitor.visitChildren(self)
def letBindingGTerm(self):
localctx = SygusParser.LetBindingGTermContext(self, self._ctx, self.state)
self.enterRule(localctx, 96, self.RULE_letBindingGTerm)
try:
self.enterOuterAlt(localctx, 1)
self.state = 412
self.match(SygusParser.T__0)
self.state = 413
self.symbol()
self.state = 414
self.sortExpr()
self.state = 415
self.gTerm()
self.state = 416
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class GTermStarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def gTerm(self):
return self.getTypedRuleContext(SygusParser.GTermContext,0)
def gTermStar(self):
return self.getTypedRuleContext(SygusParser.GTermStarContext,0)
def getRuleIndex(self):
return SygusParser.RULE_gTermStar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterGTermStar" ):
listener.enterGTermStar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitGTermStar" ):
listener.exitGTermStar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitGTermStar" ):
return visitor.visitGTermStar(self)
else:
return visitor.visitChildren(self)
def gTermStar(self):
localctx = SygusParser.GTermStarContext(self, self._ctx, self.state)
self.enterRule(localctx, 98, self.RULE_gTermStar)
try:
self.state = 422
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [SygusParser.T__0, SygusParser.T__11, SygusParser.T__12, SygusParser.INTEGER, SygusParser.BVCONST, SygusParser.REALCONST, SygusParser.SYMBOL]:
self.enterOuterAlt(localctx, 1)
self.state = 418
self.gTerm()
self.state = 419
self.gTermStar()
pass
elif token in [SygusParser.T__2]:
self.enterOuterAlt(localctx, 2)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class SynthInvCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def argList(self):
return self.getTypedRuleContext(SygusParser.ArgListContext,0)
def nTDefPlus(self):
return self.getTypedRuleContext(SygusParser.NTDefPlusContext,0)
def getRuleIndex(self):
return SygusParser.RULE_synthInvCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterSynthInvCmd" ):
listener.enterSynthInvCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitSynthInvCmd" ):
listener.exitSynthInvCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSynthInvCmd" ):
return visitor.visitSynthInvCmd(self)
else:
return visitor.visitChildren(self)
def synthInvCmd(self):
localctx = SygusParser.SynthInvCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 100, self.RULE_synthInvCmd)
try:
self.state = 439
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,19,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 424
self.match(SygusParser.T__0)
self.state = 425
self.match(SygusParser.T__25)
self.state = 426
self.symbol()
self.state = 427
self.argList()
self.state = 428
self.match(SygusParser.T__0)
self.state = 429
self.nTDefPlus()
self.state = 430
self.match(SygusParser.T__2)
self.state = 431
self.match(SygusParser.T__2)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 433
self.match(SygusParser.T__0)
self.state = 434
self.match(SygusParser.T__25)
self.state = 435
self.symbol()
self.state = 436
self.argList()
self.state = 437
self.match(SygusParser.T__2)
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class DeclarePrimedVarContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self):
return self.getTypedRuleContext(SygusParser.SymbolContext,0)
def sortExpr(self):
return self.getTypedRuleContext(SygusParser.SortExprContext,0)
def getRuleIndex(self):
return SygusParser.RULE_declarePrimedVar
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterDeclarePrimedVar" ):
listener.enterDeclarePrimedVar(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitDeclarePrimedVar" ):
listener.exitDeclarePrimedVar(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDeclarePrimedVar" ):
return visitor.visitDeclarePrimedVar(self)
else:
return visitor.visitChildren(self)
def declarePrimedVar(self):
localctx = SygusParser.DeclarePrimedVarContext(self, self._ctx, self.state)
self.enterRule(localctx, 102, self.RULE_declarePrimedVar)
try:
self.enterOuterAlt(localctx, 1)
self.state = 441
self.match(SygusParser.T__0)
self.state = 442
self.match(SygusParser.T__26)
self.state = 443
self.symbol()
self.state = 444
self.sortExpr()
self.state = 445
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class InvConstraintCmdContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def symbol(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(SygusParser.SymbolContext)
else:
return self.getTypedRuleContext(SygusParser.SymbolContext,i)
def getRuleIndex(self):
return SygusParser.RULE_invConstraintCmd
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterInvConstraintCmd" ):
listener.enterInvConstraintCmd(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitInvConstraintCmd" ):
listener.exitInvConstraintCmd(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInvConstraintCmd" ):
return visitor.visitInvConstraintCmd(self)
else:
return visitor.visitChildren(self)
def invConstraintCmd(self):
localctx = SygusParser.InvConstraintCmdContext(self, self._ctx, self.state)
self.enterRule(localctx, 104, self.RULE_invConstraintCmd)
try:
self.enterOuterAlt(localctx, 1)
self.state = 447
self.match(SygusParser.T__0)
self.state = 448
self.match(SygusParser.T__27)
self.state = 449
self.symbol()
self.state = 450
self.symbol()
self.state = 451
self.symbol()
self.state = 452
self.symbol()
self.state = 453
self.match(SygusParser.T__2)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
| 34.319947 | 166 | 0.586897 | 13,771 | 129,043 | 5.413115 | 0.083945 | 0.013012 | 0.009297 | 0.009015 | 0.656377 | 0.622719 | 0.608687 | 0.530063 | 0.456469 | 0.373135 | 0 | 0.077047 | 0.303488 | 129,043 | 3,759 | 167 | 34.329077 | 0.752325 | 0.000504 | 0 | 0.567776 | 1 | 0.063748 | 0.107429 | 0.082803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153065 | false | 0.022767 | 0.001401 | 0.058494 | 0.323643 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8556da33787e277786e6a3792b993ea18601c17 | 1,870 | py | Python | keitaro/utils.py | Infvmous/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-07-03T16:40:37.000Z | 2021-07-03T16:40:37.000Z | keitaro/utils.py | ysomad/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-01-28T13:06:33.000Z | 2021-01-28T13:06:36.000Z | keitaro/utils.py | ysomad/keitaro | aeb7555bd8443da995705f26fd42e6e882f64dd4 | [
"MIT"
] | 1 | 2021-02-23T08:10:29.000Z | 2021-02-23T08:10:29.000Z | import random
from string import ascii_letters, digits
from urllib.parse import urlparse
def list_to_string(list_items, separator='\n'):
"""
Converts list items to string with separator
"""
return separator.join(list_items)
def string_to_list(string, separator='\n'):
"""
Converts string with separator to a list
"""
return [word for word in string.split(separator)]
def generate_random_string(length: int = 8) -> str:
"""
Generates random string of letters and digits with length
"""
symbols = ascii_letters + digits
return ''.join(random.choice(symbols) for letter in range(length))
def build_host_url(url: str, scheme: str = 'https') -> str:
"""
Parses url and adding http scheme if it doesn't exist
"""
parse_result = urlparse(url, scheme)
if parse_result.netloc:
netloc = parse_result.netloc
path = parse_result.path
else:
netloc = parse_result.path
path = ''
host = parse_result._replace(netloc=netloc, path=path)
return host.geturl()
def remove_key_values(dictionary, keys=['self', '__class__']):
"""
Removes key values from dictionary
"""
new_dict = dictionary
for key in keys:
del new_dict[key]
return new_dict
def filter_resource_entities_by_key_value(resource_entities, key, value):
"""
Filters all resource entities by key and values,
returns list of resource entities
"""
found_dicts = [d for d in resource_entities if d[key] == value]
if not found_dicts:
raise KeyError(f'resource entities with {key} "{value}" not found')
return found_dicts
def set_resource_default_fields(args_to_set, query_params, resource_instances):
for key, value in args_to_set.items():
if value is None:
query_params[key] = resource_instances[key]
| 26.338028 | 79 | 0.675401 | 251 | 1,870 | 4.836653 | 0.36255 | 0.054366 | 0.029654 | 0.034596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000695 | 0.231016 | 1,870 | 70 | 80 | 26.714286 | 0.843533 | 0.168449 | 0 | 0 | 0 | 0 | 0.047945 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205882 | false | 0 | 0.088235 | 0 | 0.470588 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8584dc2e1d7252f7eee5bae7caafe98b9817a57 | 5,304 | py | Python | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/machine_learning_with_Scikit_Learn_and_TensorFlow | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 11 | 2019-12-19T08:55:52.000Z | 2021-10-01T13:07:13.000Z | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/Scikit_Learn_TensorFlow_Examples | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 5 | 2019-10-09T01:41:19.000Z | 2022-02-10T00:19:01.000Z | tensorflow_1_x/7_kaggle/learntools/pandas/indexing_selecting_and_assigning.py | amitbcp/Scikit_Learn_TensorFlow_Examples | 37dda063e316503d53ac45f3b104a5cf1aaa4d78 | [
"MIT"
] | 7 | 2019-10-08T06:10:14.000Z | 2020-12-01T07:49:21.000Z | import pandas as pd
from learntools.core import *
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
# 1
class SelectDescCol(EqualityCheckProblem):
_var = 'desc'
_expected = (
reviews.description
)
#_solution = CS("desc = reviews.description")
# This behaviour really should have been opt-in, rather than opt-out :/
show_solution_on_correct = False
_hint = "As an example, say we would like to select the column `column` from a DataFrame `table`. Then we have two options: we can call either `table.column` or `table[\"column\"]`."
_solution = """
```python
desc = reviews.description
```
or
```python
desc = reviews["description"]
```
`desc` is a pandas `Series` object, with an index matching the `reviews` DataFrame.
In general, when we select a single column from a DataFrame, we'll get a Series.
"""
# 2
class FirstDesc(EqualityCheckProblem):
_var = 'first_description'
_expected = (
reviews.description.iloc[0]
)
_hint = "To obtain a specific entry (corresponding to column `column` and row `i`) in a DataFrame `table`, we can call `table.column.iloc[i]`. Remember that Python indexing starts at 0!"
_solution = """
```python
first_description = reviews.description.iloc[0]
```
Note that while this is the preferred way to obtain the entry in the DataFrame, many other options will return a valid result, such as `reviews.description.loc[0]`, `reviews.description[0]`, and more!
"""
# 3
class FirstRow(EqualityCheckProblem):
_var = 'first_row'
_expected = (
reviews.iloc[0]
)
_hint = "To obtain a specific row of a DataFrame, we can use the `iloc` operator. For more information, see the section on **Index-based selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("first_row = reviews.iloc[0]")
# 4
class FirstDescs(EqualityCheckProblem):
_var = 'first_descriptions'
_expected = (
reviews.description.iloc[:10]
)
_hint = "We can use either the `loc` or `iloc` operator to solve this problem. For more information, see the sections on **Index-based selection** and **Label-based selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = """
```python
first_descriptions = reviews.description.iloc[:10]
```
Note that many other options will return a valid result, such as `desc.head(10)` and `reviews.loc[:9, "description"]`.
"""
# 5
class SampleReviews(EqualityCheckProblem):
_var = 'sample_reviews'
indices = [1, 2, 3, 5, 8]
_expected = (
reviews.loc[indices],
)
_hint = "Use either the `loc` or `iloc` operator to select rows of a DataFrame."
_solution = CS("""\
indices = [1, 2, 3, 5, 8]
sample_reviews = reviews.loc[indices]""")
# 6
class RowColSelect(EqualityCheckProblem):
_var = 'df'
cols = ['country', 'province', 'region_1', 'region_2']
indices = [0, 1, 10, 100]
_expected = (
reviews.loc[indices, cols],
)
_hint = "Use the `loc` operator. (Note that it is also *possible* to solve this problem using the `iloc` operator, but this would require extra effort to convert each column name to a corresponding integer-valued index.)"
_solution = CS("""\
cols = ['country', 'province', 'region_1', 'region_2']
indices = [0, 1, 10, 100]
df = reviews.loc[indices, cols]""")
# 7
class RowColSelect2(EqualityCheckProblem):
_var = 'df'
cols = ['country', 'variety']
_expected = (
reviews.head(100).loc[:,cols],
)
_hint = "It is most straightforward to solve this problem with the `loc` operator. (However, if you decide to use `iloc`, remember to first convert each column into a corresponding integer-valued index.)"
_solution = """
```python
cols = ['country', 'variety']
df = reviews.loc[:99, cols]
```
or
```python
cols_idx = [0, 11]
df = reviews.iloc[:100, cols_idx]
```
"""
# 8
class ItalianWines(EqualityCheckProblem):
_var = 'italian_wines'
_expected = (
reviews[reviews.country == 'Italy'],
)
_hint = "For more information, see the section on **Conditional selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("italian_wines = reviews[reviews.country == 'Italy']")
# 9
class TopOceanicWines(EqualityCheckProblem):
_var = 'top_oceania_wines'
cols = ['country', 'variety']
_expected = reviews[
(reviews.country.isin(['Australia', 'New Zealand']))
& (reviews.points >= 95)
]
_hint = "For more information, see the section on **Conditional selection** in the [reference component](https://www.kaggle.com/residentmario/indexing-selecting-assigning-reference)."
_solution = CS("""\
top_oceania_wines = reviews.loc[
(reviews.country.isin(['Australia', 'New Zealand']))
& (reviews.points >= 95)
]""")
qvars = bind_exercises(globals(), [
SelectDescCol,
FirstDesc,
FirstRow,
FirstDescs,
SampleReviews,
RowColSelect,
RowColSelect2,
ItalianWines,
TopOceanicWines,
],
tutorial_id=46,
)
__all__ = list(qvars)
| 35.36 | 289 | 0.6727 | 660 | 5,304 | 5.30303 | 0.3 | 0.051429 | 0.025143 | 0.024 | 0.345429 | 0.305429 | 0.275714 | 0.251429 | 0.233714 | 0.204 | 0 | 0.017585 | 0.19589 | 5,304 | 149 | 290 | 35.597315 | 0.803048 | 0.024698 | 0 | 0.396825 | 0 | 0.087302 | 0.609572 | 0.050184 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.015873 | 0 | 0.420635 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a862a94126909233a9d159eed285bf366dafeca1 | 1,739 | py | Python | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | 4 | 2018-03-05T09:34:49.000Z | 2022-02-01T15:33:54.000Z | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | null | null | null | distfromplane_demo.py | johnmgregoire/PythonCompositionPlots | e105c575463b7d4512d9aac18c7330d1a0dc2c14 | [
"BSD-3-Clause"
] | 2 | 2016-01-24T19:09:21.000Z | 2019-10-11T12:43:07.000Z | import pylab, numpy
from myquaternaryutility import QuaternaryPlot
q=QuaternaryPlot(211)
q2=QuaternaryPlot(212)
#t=numpy.linspace(0,1.,5)
#comps=[[a,b,c,d] for a in t for b in t for c in t for d in t if a+b+c+d==1.]
#comps=numpy.float32(comps)
t=numpy.linspace(0,1.,30)
comps=[[a,b,1.-a-b-(2.*a**2+b),2.*a**2+b] for a in t for b in t[:10] if a+b+(2.*a**2+b)<=1.]
comps=numpy.float32(comps)
examplenum=0
if examplenum==0:
compvert2=numpy.array([0.125, .125, .6, .15])
compvert0=numpy.array([.2, .2, 0., .6])
compvert1=numpy.array([1., 0., 0., 0])
critdist=.04
withintriangle=False
elif examplenum==1:
compvert2=numpy.array([0.125, .125, .6, .15])
compvert0=numpy.array([.2, .2, 0., .6])
compvert1=numpy.array([1., 0., 0., 0])
critdist=.04
withintriangle=True
q.scatter(comps,c=comps[:,3])
q.label(ha='center', va='center', fontsize=16)
q.set_projection(azim=-17, elev=-6)
inds, distfromplane, xyparr, xyp_verts,intriangle=q2.filterbydistancefromplane(comps, compvert0, compvert1, compvert2, critdist, withintriangle=withintriangle, invlogic=False, returnall=True)
indsnot=q2.filterbydistancefromplane(comps, compvert0, compvert1, compvert2, critdist, withintriangle=withintriangle, invlogic=True)
print len(inds), ' points'
q2.scatter(comps[inds],c=comps[inds,3])
q2.scatter(comps[indsnot],c='grey', marker='.', s=5)
q2.line(compvert0, compvert1)
q2.line(compvert1, compvert2)
q2.line(compvert2, compvert0)
q2.label(ha='center', va='center', fontsize=16)
q2.set_projection(azim=-17, elev=-6)
pylab.figure()
ax=pylab.subplot(111)
q2.plotfominselectedplane(ax, xyparr[inds], comps[inds, -1], xyp_verts=xyp_verts, vertcomps_labels=[compvert0, compvert1, compvert2], s=20)
pylab.show()
| 31.618182 | 191 | 0.703278 | 278 | 1,739 | 4.377698 | 0.28777 | 0.01479 | 0.019721 | 0.00986 | 0.522597 | 0.455218 | 0.405916 | 0.354971 | 0.331964 | 0.331964 | 0 | 0.07871 | 0.108683 | 1,739 | 54 | 192 | 32.203704 | 0.706452 | 0.072455 | 0 | 0.216216 | 0 | 0 | 0.02236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.054054 | null | null | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a867335bf7412afcc85944e531249bfc53f2c724 | 813 | py | Python | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 34 | 2015-02-28T14:19:22.000Z | 2019-09-19T14:55:32.000Z | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 6 | 2015-01-12T12:07:13.000Z | 2019-12-12T23:01:37.000Z | mopidy_spotify_tunigo/__init__.py | trygveaa/mopidy-spotify-tunigo | b33d6df81b8b827859c9288ebedfa71304b98d07 | [
"Apache-2.0"
] | 3 | 2015-05-22T17:24:43.000Z | 2017-04-11T17:57:42.000Z | from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '1.0.0'
class Extension(ext.Extension):
dist_name = 'Mopidy-Spotify-Tunigo'
ext_name = 'spotify_tunigo'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['region'] = config.String(optional=True)
schema['sub_genres'] = config.Boolean()
schema['cache_time'] = config.Integer(minimum=0, optional=True)
return schema
def setup(self, registry):
from .backend import SpotifyTunigoBackend
registry.add('backend', SpotifyTunigoBackend)
| 26.225806 | 71 | 0.688807 | 98 | 813 | 5.408163 | 0.479592 | 0.049057 | 0.056604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006182 | 0.204182 | 813 | 30 | 72 | 27.1 | 0.812983 | 0 | 0 | 0 | 0 | 0 | 0.099631 | 0.02583 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.2 | 0 | 0.65 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a86bd19cc05762dfd9e1c34a405531b5248d3abb | 7,553 | py | Python | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 53 | 2015-02-17T16:20:06.000Z | 2022-03-18T09:22:00.000Z | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 23 | 2015-01-30T16:26:20.000Z | 2022-03-11T23:26:03.000Z | mwlib/utoken.py | pediapress/mwlib | 1074fbc429e65180de09671dbc369ed1eeb2345d | [
"Unlicense"
] | 19 | 2015-01-21T13:55:46.000Z | 2019-02-23T23:14:31.000Z | #! /usr/bin/env python
# Copyright (c) 2007-2009 PediaPress GmbH
# See README.rst for additional licensing information.
# unified/universal token
import sys
import re
import _uscan as _mwscan
from mwlib.refine.util import resolve_entity, parseParams
def walknode(node, filt=lambda x: True):
if not isinstance(node, token):
for x in node:
for k in walknode(x):
if filt(k):
yield k
return
if filt(node):
yield node
if node.children:
for x in node.children:
for k in walknode(x):
if filt(k):
yield k
def walknodel(node, filt=lambda x: True):
return list(walknode(node, filt=filt))
def show(node, out=None, indent=0, verbose=False):
if node is None:
return
if out is None:
out = sys.stdout
if not isinstance(node, token):
for x in node:
show(x, out=out, indent=indent, verbose=verbose)
return
out.write("%s%r\n" % (" " * indent, node))
children = node.children
if children:
for x in children:
show(x, out=out, indent=indent + 1, verbose=verbose)
class _show(object):
def __get__(self, obj, type=None):
if obj is None:
return lambda node, out=None: show(node, out=out)
else:
return lambda out=None: show(obj, out=out)
class token(object):
caption = ''
vlist = None
target = None
level = None
children = None
rawtagname = None
tagname = None
ns = None
lineprefix = None
interwiki = None
langlink = None
namespace = None
blocknode = False
# image attributes
align = None
thumb = False
frame = None
t_end = 0
t_text = 1
t_entity = 2
t_special = 3
t_magicword = 4
t_comment = 5
t_2box_open = 6
t_2box_close = 7
t_http_url = 8
t_break = 9
t_begintable = t_begin_table = 10
t_endtable = t_end_table = 11
t_html_tag = 12
t_singlequote = 13
t_pre = 14
t_section = 15
t_endsection = t_section_end = 16
t_item = 17
t_colon = 18
t_semicolon = 19
t_hrule = 20
t_newline = 21
t_column = 22
t_row = 23
t_tablecaption = 24
t_urllink = 25
t_uniq = 26
t_html_tag_end = 100
token2name = {}
_text = None
@staticmethod
def join_as_text(tokens):
return u"".join([x.text or u"" for x in tokens])
def _get_text(self):
if self._text is None and self.source is not None:
self._text = self.source[self.start:self.start + self.len]
return self._text
def _set_text(self, t):
self._text = t
text = property(_get_text, _set_text)
def __init__(self, type=None, start=None, len=None, source=None, text=None, **kw):
self.type = type
self.start = start
self.len = len
self.source = source
if text is not None:
self.text = text
self.__dict__.update(kw)
def __repr__(self):
if isinstance(self, token):
r = [self.token2name.get(self.type, self.type)]
else:
r = [self.__class__.__name__]
if self.text is not None:
r.append(repr(self.text)[1:])
if self.tagname:
r.append(" tagname=")
r.append(repr(self.tagname))
if self.rawtagname:
r.append(" rawtagname=")
r.append(repr(self.rawtagname))
if self.vlist:
r.append(" vlist=")
r.append(repr(self.vlist))
if self.target:
r.append(" target=")
r.append(repr(self.target))
if self.level:
r.append(" level=")
r.append(repr(self.level))
if self.ns is not None:
r.append(" ns=")
r.append(repr(self.ns))
if self.lineprefix is not None:
r.append(" lineprefix=")
r.append(self.lineprefix)
if self.interwiki:
r.append(" interwiki=")
r.append(repr(self.interwiki))
if self.langlink:
r.append(" langlink=")
r.append(repr(self.langlink))
if self.type == self.t_complex_style:
r.append(repr(self.caption))
elif self.caption:
r.append("->")
r.append(repr(self.caption))
return u"".join(r)
show = _show()
token2name = token.token2name
for d in dir(token):
if d.startswith("t_"):
token2name[getattr(token, d)] = d
del d, token2name
def _split_tag(txt):
m = re.match(" *(\w+)(.*)", txt, re.DOTALL)
assert m is not None, "could not match tag name"
name = m.group(1)
values = m.group(2)
return name, values
def _analyze_html_tag(t):
text = t.text
selfClosing = False
if text.startswith(u"</"):
name = text[2:-1]
isEndToken = True
elif text.endswith("/>"):
name = text[1:-2]
selfClosing = True
isEndToken = False # ???
else:
name = text[1:-1]
isEndToken = False
name, values = _split_tag(name)
t.vlist = parseParams(values)
name = name.lower()
if name == 'br':
isEndToken = False
t.rawtagname = name
t.tag_selfClosing = selfClosing
t.tag_isEndToken = isEndToken
if isEndToken:
t.type = t.t_html_tag_end
def dump_tokens(text, tokens):
for type, start, len in tokens:
print type, repr(text[start:start + len])
def scan(text):
text += u"\0" * 32
return _mwscan.scan(text)
class _compat_scanner(object):
allowed_tags = None
def _init_allowed_tags(self):
self.allowed_tags = set("""
abbr b big blockquote br center cite code del div em endfeed font h1 h2 h3
h4 h5 h6 hr i index inputbox ins kbd li ol p pages references rss s small span
startfeed strike strong sub sup caption table td th tr tt u ul var dl dt dd
""".split())
def __call__(self, text, uniquifier=None):
if self.allowed_tags is None:
self._init_allowed_tags()
if isinstance(text, str):
text = unicode(text)
tokens = scan(text)
res = []
def g():
return text[start:start + tlen]
for type, start, tlen in tokens:
if type == token.t_begintable:
txt = g()
count = txt.count(":")
if count:
res.append(token(type=token.t_colon, start=start, len=count, source=text))
tlen -= count
start += count
t = token(type=type, start=start, len=tlen, source=text)
if type == token.t_entity:
t.text = resolve_entity(g())
t.type = token.t_text
res.append(t)
elif type == token.t_html_tag:
s = g()
if uniquifier:
s = uniquifier.replace_uniq(s)
t.text = s
_analyze_html_tag(t)
tagname = t.rawtagname
if tagname in self.allowed_tags:
res.append(t)
else:
res.append(token(type=token.t_text, start=start, len=tlen, source=text))
else:
res.append(t)
return res
compat_scan = _compat_scanner()
def tokenize(input, name="unknown", uniquifier=None):
assert input is not None, "must specify input argument in tokenize"
return compat_scan(input, uniquifier=uniquifier)
| 24.683007 | 94 | 0.558983 | 996 | 7,553 | 4.105422 | 0.258032 | 0.037662 | 0.029592 | 0.040352 | 0.106628 | 0.06652 | 0.030325 | 0.030325 | 0.030325 | 0.013695 | 0 | 0.01659 | 0.337614 | 7,553 | 305 | 95 | 24.763934 | 0.80072 | 0.021051 | 0 | 0.111111 | 0 | 0 | 0.056172 | 0 | 0 | 0 | 0 | 0 | 0.008547 | 0 | null | null | 0 | 0.017094 | null | null | 0.004274 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a86d72d1fa9d273bd8fa78ace8fa385b2f849513 | 4,826 | py | Python | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | 5 | 2019-06-01T06:21:25.000Z | 2021-11-17T18:43:43.000Z | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | null | null | null | pynmmso/listeners/trace_listener.py | wood-chris/pynmmso | e13f8139160421a9d3f7e650ad6f988c9244ca69 | [
"MIT"
] | 3 | 2019-10-01T11:24:06.000Z | 2021-09-23T17:20:03.000Z | from pynmmso.listeners.base_listener import BaseListener
class TraceListener(BaseListener):
"""
Listener used to trace the progress of the Nmmso algorithm.
Arguments
---------
level : int
The amount of detail to output in the trace. Level 1 is the least information and
level 5 is the most. Default is level 2.
"""
def __init__(self, level=2):
self.nmmso = None
self.iteration_number = 1
self.evaluations = 0
self.level = level
super().__init__()
def set_nmmso(self, nmmso):
self.nmmso = nmmso
def iteration_started(self):
if self.level >= 3:
print(80*"=")
print("Starting iteration {}".format(self.iteration_number))
def location_evaluated(self, location, value):
self.evaluations += 1
if self.level >= 5:
print("Evaluation {}: location {}, value is {}".format(
self.evaluations, location, value))
def swarm_peak_changed(self, swarm, old_location, old_value):
if self.level >= 3:
print("Swarm {} has found a new peak at location {} with value {}, "
"old location was {} old value was {}".format(
swarm.id, swarm.mode_location, swarm.mode_value, old_location, old_value))
def swarm_created_at_random(self, new_swarm):
if self.level >= 3:
print("Created swarm {} at random location {}, value is {}".format(
new_swarm.id, new_swarm.mode_location, new_swarm.mode_value))
def swarm_created_from_crossover(self, new_swarm, parent_swarm1, parent_swarm2):
if self.level >= 3:
print("Created swarm {} by crossover of swarms {} and {} at location {}, "
"value is {}".format(
new_swarm.id, parent_swarm1.id, parent_swarm2.id,
new_swarm.mode_location, new_swarm.mode_value, ))
def merging_started(self):
if self.level >= 4:
print("Merging swarms...")
def merged_close_swarms(self, swarm1, swarm2):
if self.level >= 3:
print("Merged swarm {} into swarm {} as they were close".format(swarm2.id, swarm1.id))
def merged_saddle_swarms(self, swarm1, swarm2):
if self.level >= 3:
print("Merged swarm {} into swarm {} as midpoint was fitter".format(
swarm2.id, swarm1.id))
def merging_ended(self):
if self.level >= 4:
print("Finished merging swarms")
def incrementing_swarms_started(self):
if self.level >= 4:
print("Incrementing swarms...")
def swarm_added_particle(self, swarm):
if self.level >= 4:
print("Added particle to swarm {}, it now has {} particles".format(
swarm.id, swarm.number_of_particles))
def swarm_moved_particle(self, swarm):
if self.level >= 4:
print("Moved particle of swarm {}".format(swarm.id))
def incrementing_swarms_ended(self):
if self.level >= 4:
print("Finished incrementing swarms")
def hiving_swams_started(self):
if self.level >= 4:
print("Hiving swarms...")
def hiving_new_swarm(self, new_swarm, parent_swarm):
if self.level >= 3:
print("Hiving new swarm {} from swarm {}".format(new_swarm.id, parent_swarm.id))
def hiving_swarms_ended(self):
if self.level >= 4:
print("Finishing hiving swarms")
def iteration_ended(
self, n_new_locations, n_mid_evals, n_evol_modes, n_rand_modes, n_hive_samples):
total_this_iteration = \
n_new_locations + n_mid_evals + n_evol_modes + n_rand_modes + n_hive_samples
if self.level >= 1:
print("Finished iteration {}, evaluations this iteration: {}, total evaluations: {}, "
"number of swarms: {}".format(
self.iteration_number,
total_this_iteration,
self.nmmso.evaluations,
len(self.nmmso.swarms)))
if self.level >= 3:
print(" This iteration: new location evals = {} mid evals = {} evol modes = {} "
"rand modes = {} hive samples = {}".format(
n_new_locations, n_mid_evals, n_evol_modes, n_rand_modes, n_hive_samples))
if self.level >= 2:
for swarm in self.nmmso.swarms:
print("Swarm {} : location: {} value {}".format(
swarm.id, swarm.mode_location, swarm.mode_value))
self.iteration_number += 1
def max_evaluations_reached(self):
if self.level >= 1:
print("Maximum number of evaluations reached. Total evaluations: {}".format(
self.nmmso.evaluations))
| 37.123077 | 98 | 0.588065 | 579 | 4,826 | 4.709845 | 0.196891 | 0.072607 | 0.080675 | 0.044004 | 0.385039 | 0.343601 | 0.313165 | 0.239091 | 0.175284 | 0.143014 | 0 | 0.012489 | 0.30315 | 4,826 | 129 | 99 | 37.410853 | 0.798394 | 0.046622 | 0 | 0.193548 | 0 | 0 | 0.202016 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.215054 | false | 0 | 0.010753 | 0 | 0.236559 | 0.225806 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a87166a60d65966ddedb5ce1324d6d79ae3b0c7c | 17,050 | py | Python | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | 1 | 2016-05-21T21:11:21.000Z | 2016-05-21T21:11:21.000Z | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | null | null | null | arghphot/arghphot.py | balbinot/arghphot | ebfa8da444d2b43d68248bb450dfdc49890e0794 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
## temp imports
from matplotlib import pyplot as p
from matplotlib import cm
import tempfile
import numpy as np
from astropy.io import fits, ascii
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy import wcs
import aplpy
from pyraf import iraf
# Logger
from logutil import *
# Loading necessary IRAF packages and configurations
iraf.digiphot(_doprint=0)
iraf.daophot(_doprint=0)
iraf.apphot(_doprint=0)
iraf.reset(min_lenuserarea='200000')
__all__ = ['Frame']
def index_by_last_column_entry(M, keys):
colkeys = M[:,-1]
sorter = np.argsort(colkeys)
index = np.searchsorted(colkeys, keys, sorter = sorter)
return M[sorter[index]]
class Frame(mylogger):
def __init__(self, fname, ext, mask, logfn='last.log'):
## Initialize DAOPHOT
self.base = './'
import daophot
daophot.set_default(self.base)
## Initiate logger utility
self.sdict={}
self.log = mylogger(self.sdict, logfn)
self.fname = fname
self.pfname = fname.split('/')[-1]
self.iname = "%s[%i]" % (fname, ext)
self.ext = ext
# Read image and maks
self.hdu = self.read_fits(fname, ext)
self.mask = self.read_fits(mask, ext)
self.read_prim(fname) # Some infos are only found in the primary HDU
self.read_wcs()
# Change with fwhm estimator routine. Bellow for processed DECAM data
self.fwhm = self.hdup.header['FWHMAV']
self.fwhmph = self.hdup.header['FWHMAV']*(3600*self.hdup.header['CDELT2'])
self.high = 35000
## Utility names
self.daofindfn = '%s%d.coo.1' % (self.pfname, ext)
self.photfn = '%s%d.mag.2' % (self.pfname, ext)
self.pstfile = '%s%d.pst.1' % (self.pfname, ext)
self.fitpsffn = '%s%d.fitpsf.1' % (self.pfname, ext)
self.guess = '%s%d.guess.1' % (self.pfname, ext)
self.psfgridname = '%s%d.psfgrid.png' % (self.pfname, ext)
self.psf = '%s%d.psf.1.fits' % (self.pfname, ext)
self.psfimg = '%s%d.psf.1.img.fits' % (self.pfname, ext)
self.psfselectplot = '%s%d.psfselect.png' % (self.pfname, ext)
def pix2sky(self, x, y):
return self.wcs.wcs_pix2world(np.array([x,y]).T, 1)
def read_fits(self, fname, ext):
self.log(1, 'READ', 1, 'Reading %s[%i]' % (fname, ext))
return fits.open(fname, memmap=True)[ext]
def read_prim(self, fname):
self.log(1, 'READP', 1, 'Reading %s header' % (fname))
self.hdup = fits.open(fname, memmap=True)[0]
def read_wcs(self):
self.log(1, 'WCS READ', 1, 'Reading WCS for %s[%i]' % (self.fname, self.ext))
self.wcs = wcs.WCS(self.hdu.header)
def findsky(self, scl, nwin, rerun=False):
"""
Find sky statistics at random windows.
Window size set by scl (square) and number of windows by nwin
"""
a = self.hdu.data
m = self.mask.data
avoid = 100*scl
s = scl*0.5
xsize = a.shape[1]
ysize = a.shape[1]
b = np.empty((nwin,4))
for i in xrange(nwin):
x = y = -10
while x < avoid or x > xsize-avoid or y < avoid or y > ysize-avoid:
x = 1 + int(np.random.rand()*xsize)
y = 1 + int(np.random.rand()*ysize)
tmp = a[y-s:y+s, x-s:x+s]
tmpm = m[y-s:y+s, x-s:x+s]
if np.any(tmpm < 1):
b[i,0] = np.nan
b[i,1] = np.nan
b[i,2] = np.nan
b[i,3] = np.nan
else:
b[i,0] = np.sum(tmp)/(scl*scl)
b[i,1] = np.mean(tmp)
b[i,2] = np.median(tmp)
b[i,3] = np.std(tmp)
sigma = np.nanmedian(b[:,3])
sky = np.nanmedian(b[:,0])
self.sigma = sigma
self.sky = sky
self.log(1, 'SKY', sky, 'Sky value median: %lf' % sky)
self.log(1, 'SKYSIGMA', sigma, 'Sky variance median: %lf' % sigma)
iraf.datapars.setParam('sigma', sigma)
iraf.fitskypars.setParam('skyvalu', sky)
def run_daofind(self, coofn):
iraf.daofind.setParam('image',self.iname)
iraf.datapars.setParam('fwhmpsf',self.fwhm)
iraf.daofind.setParam('output', coofn)
iraf.daofind(mode='h',Stdout=1)
return coofn
def run_phot(self, coofn, photfn):
iraf.phot.setParam('coords', coofn)
iraf.phot.setParam('output', photfn)
iraf.phot.setParam('image',self.iname)
iraf.fitskypars.setParam('skyvalue',self.sky)
iraf.fitskypars.setParam('annulus',4.*self.fwhm)
iraf.fitskypars.setParam('dannulus',2.*self.fwhm)
iraf.photpars.setParam('zmag', self.hdup.header['MAGZPT']) # Use DECAM estimate of zeropoint
iraf.phot(mode='h',Stdout=1)
def trim_phot(self, photfn, outfn):
a = ascii.read(photfn)
std = a['STDEV']
sum = a['SUM']/a['AREA']
sky = a['MSKY']
sn = np.abs(sum-sky)/std
i = sn > 3
tmp = a[i]
tmp.write(outfn, format='ascii')
return outfn, np.where(i)[0]
def run_fitpsf(self, coofn, outfn, guessfn):
# select some guess stars for PSF building
# Based on median magnitude from apperture phot
daofind = np.loadtxt(coofn, usecols=(0,1,2))
i = (daofind[:,2] < np.median(daofind[:,2]) + 0.12)&(daofind[:,2] > np.median(daofind[:,2]) - 0.12)
np.savetxt(guessfn, daofind[i,0:2], fmt=['%-10.3f','%-10.3f'])
iraf.fitpsf.setParam('image', self.iname)
iraf.fitpsf.setParam('output', outfn) # preliminary psf fit
iraf.fitpsf.setParam('coords', guessfn)
iraf.fitpsf(mode='h',Stdout=1)
def merge(self, trimphotfn, daofindfn, fitpsffn):
self.log(1, 'MERGED', '1', '%d Will merge %s, %s, %s by ID' % (self.ext, trimphotfn, daofindfn, fitpsffn))
## Use trimmed photometry to avoid variable sky background spurious
## detections
# x,y, msky, stdev, sum, area, mag, merr, id
# mags = np.loadtxt(self.photfn+'trim', usecols=(6,7,14,15,26,27,29,30,3), skiprows=1)
mags = np.genfromtxt(trimphotfn, usecols=(6,7,14,15,26,27,29,30,3), skip_header=1, dtype='|S5')
j = True
for i in np.arange(mags.shape[1]):
j *= (mags[:,i] != '--')
mags = mags[j]
mags = mags.astype(np.float64)
tf = tempfile.NamedTemporaryFile(dir=self.base)
iraf.txdump(textfile=daofindfn,
fields='sharpness,sround,ground,id',
expr='sharpness!=INDEF && sround!=INDEF && ground!=INDEF',
Stdout=tf.name+'coo.meh')
daofind = np.loadtxt(tf.name+'coo.meh')
tf = tempfile.NamedTemporaryFile(dir=self.base)
iraf.txdump(textfile=fitpsffn,
fields='rsigma,id',
expr='rsigma!=INDEF && rsigma < 7.0',
Stdout=tf.name+'psf.meh')
fitpsf = np.loadtxt(tf.name+'psf.meh')
## I have no idea how this works
I = reduce(lambda l,r: np.intersect1d(l,r,False), (i[:,-1] for i in
(mags, daofind,
fitpsf)))
mags = index_by_last_column_entry(mags, I)
fitpsf = index_by_last_column_entry(fitpsf, I)
daofind = index_by_last_column_entry(daofind, I)
oo = np.c_[mags[np.searchsorted(mags[:,-1], I)],
daofind[np.searchsorted(daofind[:,-1], I)],
fitpsf[np.searchsorted(fitpsf[:,-1], I)]]
tf = 'joinedforpsf%s.%d.dat' % ('DEBUG', self.ext)
np.savetxt(tf, oo, fmt='%lf')
return oo
def select_psf(self, trimphotfn, coofn, fitpsffn, outfn, mlimt=1.2, sepmult=12, checkcom=True):
f = self.merge(trimphotfn, coofn, fitpsffn)
w = self.pix2sky(f[:,0], f[:,1])
coo = SkyCoord(w[:,0]*u.deg, w[:,1]*u.deg)
nid, nsep2, _ = coo.match_to_catalog_sky(coo, nthneighbor=2)
x = f[:,0]
y = f[:,1]
id = f[:,-1]
sky = f[:,2]
skystd = f[:,3]
mag = f[:,6]
merr = f[:,7]
sharp = f[:,9]
fwhm = f[:,13]
self.maglim = np.mean(mag) - mlimt
self.merrlim = 0.08
self.sharplim = 0.04
#self.fwhmlimup = 1.15*self.fwhm/2.355
#self.fwhmlimlow = 0.55*self.fwhm/2.355
self.fwhmlimup = np.mean(fwhm) + 0.2*np.std(fwhm)
self.fwhmlimlow = np.mean(fwhm) - np.std(fwhm)
p.figure(figsize=(12,12))
p.subplot(331)
p.xlabel('mag')
p.hist(mag, range=[12,32], bins=30, color='k', alpha=0.6)
p.axvline(x=self.maglim, c='k')
p.subplot(332)
p.xlabel('merr')
p.hist(merr, range=[0,0.5], bins=30, color='k', alpha=0.6)
p.axvline(x=self.merrlim, c='k')
p.subplot(333)
p.xlabel('sharpness')
p.hist(sharp, range=[-0.5,0.5], bins=30, color='k', alpha=0.6)
p.axvline(x=np.median(sharp), c='k')
p.axvline(x=np.median(sharp)+self.sharplim, ls='--', c='k')
p.axvline(x=np.median(sharp)-self.sharplim, ls='--', c='k')
p.subplot(334)
p.xlabel('fwhm [px]')
p.hist(fwhm, range=[1,10], bins=30, color='k', alpha=0.6)
p.axvline(x=self.fwhmlimup, ls='--', c='k')
p.axvline(x=self.fwhmlimlow, ls='--', c='k')
p.subplot(335)
p.xlabel('separation [arcsec]')
p.hist(nsep2.arcsec, bins=30, color='k', alpha=0.6)
p.axvline(x=12*self.fwhmph, ls='--', c='k')
p.subplot(336)
p.xlabel('sky std [counts]')
p.hist(skystd, bins=30, color='k', alpha=0.6)
p.axvline(x=np.median(skystd) - np.std(skystd), ls='--', c='k')
p.axvline(x=np.median(skystd) + np.std(skystd), ls='--', c='k')
p.axvline(x=np.median(skystd), ls='-', c='k')
p.subplot(337)
p.xlabel('sky [counts]')
p.hist(sky, bins=30, color='k', alpha=0.6)
p.axvline(x=np.mean(sky) - np.std(sky), ls='--', c='k')
p.axvline(x=np.mean(sky) + np.std(sky), ls='--', c='k')
p.axvline(x=np.mean(sky), ls='-', c='k')
p.savefig(self.psfselectplot)
## Set of constrains for PSF stars
i = (mag < self.maglim)
i *= (merr < self.merrlim)
i *= (np.abs(sharp-np.median(sharp)) < self.sharplim)
i *= (fwhm < self.fwhmlimup)
i *= (fwhm > self.fwhmlimlow)
i *= (nsep2.arcsec > sepmult*self.fwhmph)
i *= (x > 60*self.fwhm)*(y > 60*self.fwhm)
i *= (x < self.hdu.data.shape[1] - 60*self.fwhm)
i *= (y < self.hdu.data.shape[0] - 60*self.fwhm)
#i *= (np.abs(skystd - np.median(skystd)) < np.std(skystd))
#i *= (np.abs(sky - np.mean(sky)) < np.std(sky))
if len(id[i]) <= 2:
self.log(3, 'NPSF', len(id[i]), 'Number of PSF stars less than 2')
else:
self.log(1, 'NPSF', len(id[i]), '%d Number of PSF stars is %i' % (self.ext, len(id[i])))
fid, fx, fy, fmag, fsky = self.cutbad(id[i], x[i], y[i], mag[i], sky[i], checkcom)
self._parse_pst(fid, fx, fy, fmag, fsky, outfn)
return (fid, fx, fy, fmag, fsky), f
# self._parse_pst(id[i], x[i], y[i], mag[i], sky[i], outfn)
# return (id[i], x[i], y[i], mag[i], sky[i]), f
def tvmark(self, ra, dec):
gc = aplpy.FITSFigure(self.hdu)
gc.show_grayscale(stretch='arcsinh')
gc.set_tick_labels_font(size='small')
gc.show_markers(ra,dec,layer='scatter',edgecolor='red',
facecolor='none',marker='o',s=10,alpha=0.5)
def _parse_pst(self, id, x, y, mag, msky, pstfile):
pstfile = open(pstfile, 'w')
pstfile.write("#N ID XCENTER YCENTER MAG MSKY \\\n")
pstfile.write("#U ## pixels pixels magnitudes counts \\\n")
pstfile.write("#F %-9d %-10.3f %-10.3f %-12.3f %-15.7g \n")
pstfile.write("#\n")
np.savetxt(pstfile, np.array([id, x,y, mag, msky]).T,
fmt=['%-9d','%-10.3f','%-10.3f','%-12.3f','%-15.7g'])
pstfile.close()
def cutbad(self, id, x, y, mag, sky, checkcom=True):
rad = int(6*self.fwhm)
ID = []
X = []
Y = []
MAG = []
SKY = []
for i in np.arange(len(x)):
xbox = int(x[i] - rad)
Xbox = int(x[i] + rad)
ybox = int(y[i] - rad)
Ybox = int(y[i] + rad)
block = self.hdu.data[ybox:Ybox,xbox:Xbox]
xx = np.arange(block.shape[1])
yy = np.arange(block.shape[0])
xc = block.shape[1]/2.
yc = block.shape[0]/2.
rr = np.sqrt((xx[:, None]-xc)**2 + (yy[None, :]-yc)**2) # None is a trick to increase dimensions of boolean array
j = (rr > 3*self.fwhm)
if np.any(block > self.high):
print 'star %d at %d %d eliminated: global high value nearby' % (id[i], x[i], y[i])
elif np.any(block < self.sky - 6*self.sigma):
print 'star %d at %d %d eliminated: global low value nearby' % (id[i], x[i], y[i])
elif np.any(block[j] > self.sky + 5*self.sigma) & checkcom==True:
print 'star %d at %d %d eliminated: contaminating object' % (id[i], x[i], y[i])
else:
ID.append(id[i])
X.append(x[i])
Y.append(y[i])
MAG.append(mag[i])
SKY.append(sky[i])
ID = np.array(ID)
X = np.array(X)
Y = np.array(Y)
MAG = np.array(MAG)
SKY = np.array(SKY)
return (ID, X, Y, MAG, SKY)
def grid_psf(self, pstfile, gridname):
from mpl_toolkits.axes_grid1 import ImageGrid
id, x, y = np.loadtxt(pstfile, usecols=(0,1,2), unpack=True)
side = int(np.ceil(np.sqrt(len(x))))
rad = int(6*self.fwhm)
fig = p.figure(figsize=(12,12))
grid = ImageGrid(fig, 111,
nrows_ncols = (side, side),
axes_pad = 0.0,
share_all=True,
label_mode = "L",
cbar_location = "right",
cbar_mode=None,
# cbar_size="5%",
# cbar_pad="5%",
aspect = True
)
for i in np.arange(len(x)):
xbox = int(x[i] - rad)
Xbox = int(x[i] + rad)
ybox = int(y[i] - rad)
Ybox = int(y[i] + rad)
block = self.hdu.data[ybox:Ybox,xbox:Xbox]
grid[i].imshow(block.T, origin='lower', cmap=cm.gray,
vmin=self.sky-5*self.sigma, vmax=300,
interpolation='nearest')
p.savefig(gridname)
def run_psf(self, base, ext, photfn):
fwhm = self.fwhm
iraf.daopars.setParam('matchra',fwhm)
iraf.daopars.setParam('psfrad',4*fwhm+1)
iraf.daopars.setParam('fitrad',fwhm)
iraf.daopars.setParam('sannulu',2*fwhm)
iraf.daopars.setParam('wsannul',4*fwhm)
iraf.psf.setParam('image',self.iname)
iraf.psf.setParam('photfile', photfn)
iraf.psf.setParam('pstfile', '%s.%d.pst.1' % (base, ext))
iraf.psf.setParam('psfimage', '%s.%d.psf.1' % (base, ext))
iraf.psf.setParam('opstfile', '%s.%d.psj.1' % (base, ext))
iraf.psf.setParam('groupfil', '%s.%d.psg.1' % (base, ext))
iraf.psf(mode='h')
iraf.seepsf(psfimage='%s.%d.psf.1.fits'%(base, ext),
image='%s.%d.psf.1.img.fits'%(base, ext), magnitu='18.0')
def run_allstar(self, base, ext):
fwhm = self.fwhm
iraf.daopars.setParam('matchra',fwhm)
iraf.daopars.setParam('psfrad',4*fwhm+1)
iraf.daopars.setParam('fitrad',fwhm)
iraf.daopars.setParam('sannulu',2*fwhm)
iraf.daopars.setParam('wsannul',4*fwhm)
iraf.allstar.setParam('image',self.iname)
iraf.allstar.setParam('photfile', '%s.%d.mag.1' % (base, ext))
iraf.allstar.setParam('psfimage', '%s.%d.psf.1' % (base, ext))
iraf.allstar.setParam('allstarf', '%s.%d.als.1' % (base, ext))
iraf.allstar.setParam('rejfile', '%s.%d.arj.1' % (base, ext))
iraf.allstar.setParam('subimage', '%s.%d.sub.1' % (base, ext))
iraf.allstar(mode='h',verbose='no')
if __name__=='__main__':
fname = "/scratch/gc_survey/raw_data/c4d_150715_013102_osi_g_v1.fits"
tmp = Frame(fname, 2, 'bunda.log')
tmp.findsky(10, 1000)
#tmp.run_daofind()
#tmp.run_phot()
#tmp.trim_phot()
#tmp.run_fitpsf()
#f = tmp.select_psf()
#tmp.grid_psf()
#tmp.run_psf()
tmp.run_allstar()
#t = tmp.pix2sky(f[1], f[2])
#tmp.tvmark(t[:,0], t[:,1])
#p.show()
| 37.888889 | 125 | 0.528504 | 2,409 | 17,050 | 3.701121 | 0.202574 | 0.004711 | 0.014132 | 0.006169 | 0.290825 | 0.197959 | 0.169695 | 0.167003 | 0.152423 | 0.128197 | 0 | 0.031276 | 0.296774 | 17,050 | 449 | 126 | 37.973274 | 0.712344 | 0.077361 | 0 | 0.090909 | 0 | 0.002933 | 0.108046 | 0.006817 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.038123 | null | null | 0.017595 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8719a8d286f6a06b3cf89fb0e6e20d2209d3663 | 74,138 | py | Python | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | null | null | null | core/domain/exp_domain_test.py | imrk51/oppia | 615ce1dbdaffbb0c090c52d13ef6a3f7c1043d5c | [
"Apache-2.0"
] | 1 | 2021-08-04T13:03:16.000Z | 2021-08-04T13:03:16.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom: []
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_YAML_CONTENT_WITH_GADGETS = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom:
- customization_args:
adviceObjects:
value:
- adviceTitle: b
adviceHtml: <p>c</p>
gadget_type: TestGadget
gadget_name: ATestGadget
visible_in_states:
- New state
- Second state
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Second state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Second state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
TEST_GADGETS = {
'TestGadget': {
'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget')
}
}
TEST_GADGET_CUSTOMIZATION_ARGS = {
'adviceObjects': {
'value': [{
'adviceTitle': 'b',
'adviceHtml': '<p>c</p>'
}]
}
}
TEST_GADGET_DICT = {
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'visible_in_states': ['First state']
}
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
exploration.validate()
# Ensure an answer group with two classifier rules is invalid
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}, {
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}],
'correct': False,
})
)
self._assert_validation_error(
exploration, 'AnswerGroups can only have one classifier rule.')
# Restore a valid exploration.
init_state.interaction.answer_groups.pop()
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'correct': False,
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = 'Feedback'
self._assert_validation_error(
exploration, 'Expected outcome feedback to be a list')
outcome.feedback = [15]
self._assert_validation_error(
exploration, 'Expected outcome feedback item to be a string')
outcome.feedback = ['Feedback']
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = []
exploration.validate()
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
interaction.default_outcome = None
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
interaction.fallbacks = {}
self._assert_validation_error(
exploration, 'Expected fallbacks to be a list')
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
interaction.fallbacks = []
exploration.validate()
# Validate AnswerGroup.
answer_group.rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule for each answer group.')
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_fallbacks_validation(self):
"""Test validation of state fallbacks."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
base_outcome = {
'dest': exploration.init_state_name,
'feedback': [],
'param_changes': [],
}
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'FakeTriggerName',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': base_outcome,
}])
self._assert_validation_error(exploration, 'Unknown trigger type')
with self.assertRaises(KeyError):
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {},
}])
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {},
},
'outcome': base_outcome,
}])
# Default values for the customization args will be added silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 3,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
'bad_key_that_will_get_stripped_silently': {
'value': 'unused_value',
}
},
},
'outcome': base_outcome,
}])
# Unused customization arg keys will be stripped silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 42,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 2,
},
},
},
'outcome': base_outcome,
}])
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_exploration_skin_and_gadget_validation(self):
"""Test that Explorations including gadgets validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
invalid_gadget_instance = exp_domain.GadgetInstance(
'bad_type', 'aUniqueGadgetName', [], {})
with self.assertRaisesRegexp(
utils.ValidationError,
'Unknown gadget with type bad_type is not in the registry.'
):
invalid_gadget_instance.validate()
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
# Force a GadgetInstance to require certain state names.
gadget_instance.visible_in_states.extend(['DEF', 'GHI'])
self._assert_validation_error(
exploration, 'Exploration missing required states: DEF, GHI')
def_state = exp_domain.State.create_default_state('DEF')
def_state.update_interaction_id('TextInput')
exploration.states['DEF'] = def_state
self._assert_validation_error(
exploration, 'Exploration missing required state: GHI')
ghi_state = exp_domain.State.create_default_state('GHI')
ghi_state.update_interaction_id('TextInput')
exploration.states['GHI'] = ghi_state
exploration.validate()
# Force a gadget name collision.
gadget_instance.visible_in_states = ['DEF']
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
exploration.skin_instance.panel_contents_dict[
'bottom'][1].visible_in_states = ['GHI']
self._assert_validation_error(
exploration,
'ATestGadget gadget instance name must be unique.')
exploration.skin_instance.panel_contents_dict['bottom'].pop()
gadget_instance.visible_in_states.extend(['DEF'])
self._assert_validation_error(
exploration,
'TestGadget specifies visibility repeatedly for state: DEF')
# Remove duplicate state.
gadget_instance.visible_in_states.pop()
# Adding a panel that doesn't exist in the skin.
exploration.skin_instance.panel_contents_dict[
'non_existent_panel'] = []
self._assert_validation_error(
exploration,
'The panel name \'non_existent_panel\' is invalid.')
def test_gadget_name_validation(self):
"""Test that gadget naming conditions validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
gadget_instance.validate()
gadget_instance.name = ''
self._assert_validation_error(
gadget_instance, 'Gadget name must not be an empty string.')
gadget_instance.name = 0
self._assert_validation_error(
gadget_instance,
'Gadget name must be a string. Received type: int')
gadget_instance.name = 'ASuperLongGadgetNameThatExceedsTheLimit'
max_length = exp_domain.GadgetInstance._MAX_GADGET_NAME_LENGTH # pylint: disable=protected-access
self._assert_validation_error(
gadget_instance,
'ASuperLongGadgetNameThatExceedsTheLimit gadget name'
' exceeds maximum length of %d' % max_length)
gadget_instance.name = 'VERYGADGET!'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: VERYGADGET!')
gadget_instance.name = 'Name with \t tab'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \t tab')
gadget_instance.name = 'Name with \n newline'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \n newline')
gadget_instance.name = 'Name with 3 space'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with 3 space')
gadget_instance.name = ' untrim whitespace '
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: untrim whitespace ')
# Names with spaces and number should pass.
gadget_instance.name = 'Space and 1'
gadget_instance.validate()
def test_exploration_get_gadget_types(self):
"""Test that Exploration.get_gadget_types returns apt results."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
'An Exploration ID', SAMPLE_YAML_CONTENT)
self.assertEqual(exploration_without_gadgets.get_gadget_types(), [])
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(), ['TestGadget'])
another_gadget = exp_domain.GadgetInstance(
'AnotherGadget', 'GadgetUniqueName1', [], {}
)
exploration_with_gadgets.skin_instance.panel_contents_dict[
'bottom'].append(another_gadget)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(),
['AnotherGadget', 'TestGadget']
)
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', 'user@example.com', title='', category='',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.states['New state'].update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
}])
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_without_gadgets(self):
"""Test from_yaml() and to_yaml() methods without gadgets."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT)
yaml_content = exploration_without_gadgets.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_with_gadgets(self):
"""Test from_yaml() and to_yaml() methods including gadgets."""
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
generated_yaml = exploration_with_gadgets.to_yaml()
generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml)
sample_yaml_as_dict = utils.dict_from_yaml(
SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V12
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
return {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'skin_customizations': (
exp_domain.SkinInstance._get_default_skin_customizations() # pylint: disable=protected-access
),
'language_code': 'en',
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class GadgetOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on gadgets."""
def test_gadget_operations(self):
"""Test deletion of gadgets."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].type, TEST_GADGET_DICT['gadget_type'])
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, TEST_GADGET_DICT['gadget_name'])
with self.assertRaisesRegexp(
ValueError, 'Gadget NotARealGadget does not exist.'
):
exploration.rename_gadget('NotARealGadget', 'ANewName')
exploration.rename_gadget(
TEST_GADGET_DICT['gadget_name'], 'ANewName')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, 'ANewName')
# Add another gadget.
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(
exploration.get_all_gadget_names(),
['ANewName', 'ATestGadget']
)
with self.assertRaisesRegexp(
ValueError, 'Duplicate gadget name: ANewName'
):
exploration.rename_gadget('ATestGadget', 'ANewName')
gadget_instance = exploration.get_gadget_instance_by_name(
'ANewName')
self.assertIs(
exploration.skin_instance.panel_contents_dict['bottom'][0],
gadget_instance
)
panel = exploration._get_panel_for_gadget('ANewName') # pylint: disable=protected-access
self.assertEqual(panel, 'bottom')
exploration.delete_gadget('ANewName')
exploration.delete_gadget('ATestGadget')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'], [])
with self.assertRaisesRegexp(
ValueError, 'Gadget ANewName does not exist.'
):
exploration.delete_gadget('ANewName')
class SkinInstanceUnitTests(test_utils.GenericTestBase):
"""Test methods for SkinInstance."""
_SAMPLE_SKIN_INSTANCE_DICT = {
'skin_id': 'conversation_v1',
'skin_customizations': {
'panels_contents': {
'bottom': [
{
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state']
}
]
}
}
}
def test_get_state_names_required_by_gadgets(self):
"""Test accurate computation of state_names_required_by_gadgets."""
skin_instance = exp_domain.SkinInstance(
'conversation_v1',
self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations'])
self.assertEqual(
skin_instance.get_state_names_required_by_gadgets(),
['New state', 'Second state'])
def test_generation_of_get_default_skin_customizations(self):
"""Tests that default skin customizations are created properly."""
skin_instance = exp_domain.SkinInstance(feconf.DEFAULT_SKIN_ID, None)
self.assertEqual(
skin_instance.panel_contents_dict,
{'bottom': []}
)
def test_conversion_of_skin_to_and_from_dict(self):
"""Tests conversion of SkinInstance to and from dict representations."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
skin_instance = exploration.skin_instance
skin_instance_as_dict = skin_instance.to_dict()
self.assertEqual(
skin_instance_as_dict,
self._SAMPLE_SKIN_INSTANCE_DICT)
skin_instance_as_instance = exp_domain.SkinInstance.from_dict(
skin_instance_as_dict)
self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1')
self.assertEqual(
sorted(skin_instance_as_instance.panel_contents_dict.keys()),
['bottom'])
class GadgetInstanceUnitTests(test_utils.GenericTestBase):
"""Tests methods instantiating and validating GadgetInstances."""
def test_gadget_instantiation(self):
"""Test instantiation of GadgetInstances."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'bottom']), 1)
def test_gadget_instance_properties(self):
"""Test accurate representation of gadget properties."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
self.assertEqual(test_gadget_instance.height, 50)
self.assertEqual(test_gadget_instance.width, 60)
self.assertIn('New state', test_gadget_instance.visible_in_states)
def test_gadget_instance_validation(self):
"""Test validation of GadgetInstance."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
# Validation against sample YAML should pass without error.
exploration.validate()
# Assert size exceeded error triggers when a gadget's size exceeds
# a panel's capacity.
with self.swap(
test_gadget_instance.gadget,
'width_px',
4600):
self._assert_validation_error(
exploration,
'Width 4600 of panel \'bottom\' exceeds limit of 350')
# Assert internal validation against CustomizationArgSpecs.
test_gadget_instance.customization_args[
'adviceObjects']['value'].extend(
[
{'adviceTitle': 'test_title', 'adviceHtml': 'test html'},
{'adviceTitle': 'another_title', 'adviceHtml': 'more html'},
{'adviceTitle': 'third_title', 'adviceHtml': 'third html'}
]
)
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget is limited to 3 tips, found 4.'
):
test_gadget_instance.validate()
test_gadget_instance.customization_args[
'adviceObjects']['value'].pop()
# Assert that too many gadgets in a panel raise a ValidationError.
panel_contents_dict['bottom'].append(test_gadget_instance)
with self.assertRaisesRegexp(
utils.ValidationError,
'\'bottom\' panel expected at most 1 gadget, but 2 gadgets are '
'visible in state \'New state\'.'
):
exploration.validate()
# Assert that an error is raised when a gadget is not visible in any
# states.
test_gadget_instance.visible_in_states = []
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget gadget not visible in any states.'):
test_gadget_instance.validate()
def test_conversion_of_gadget_instance_to_and_from_dict(self):
"""Test conversion of GadgetInstance to and from dict. """
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
test_gadget_instance = panel_contents_dict['bottom'][0]
test_gadget_as_dict = test_gadget_instance.to_dict()
self.assertEqual(
test_gadget_as_dict,
{
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state'],
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS
}
)
test_gadget_as_instance = exp_domain.GadgetInstance.from_dict(
test_gadget_as_dict)
self.assertEqual(test_gadget_as_instance.width, 60)
self.assertEqual(test_gadget_as_instance.height, 50)
class GadgetVisibilityInStatesUnitTests(test_utils.GenericTestBase):
"""Tests methods affecting gadget visibility in states."""
def test_retrieving_affected_gadgets(self):
"""Test that appropriate gadgets are retrieved."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
affected_gadget_instances = (
exploration._get_gadget_instances_visible_in_state('Second state')) # pylint: disable=protected-access
self.assertEqual(len(affected_gadget_instances), 1)
self.assertEqual(affected_gadget_instances[0].name, 'ATestGadget')
| 31.216 | 115 | 0.605614 | 7,278 | 74,138 | 5.894889 | 0.081204 | 0.031047 | 0.027504 | 0.03438 | 0.699811 | 0.640724 | 0.568468 | 0.523576 | 0.479151 | 0.456052 | 0 | 0.005174 | 0.30392 | 74,138 | 2,374 | 116 | 31.229149 | 0.826183 | 0.086798 | 0 | 0.720544 | 0 | 0 | 0.421204 | 0.019658 | 0 | 0 | 0 | 0.000421 | 0.074522 | 1 | 0.020645 | false | 0 | 0.005539 | 0.000504 | 0.039778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8767131fc92daf2b1e76889f0a46f8b914b9aa3 | 1,889 | py | Python | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | PyrogramBot/commands.py | Habdio/GROUP-AutoManageBot | 211ac78b0ce8a267ef9b77f881412ba87140d39b | [
"MIT"
] | null | null | null | from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram.types import CallbackQuery
import random
ALL_PIC = [
"https://telegra.ph/file/52b71d5a61c904c6a59d1.jpg",
"https://telegra.ph/file/28a00384a3be4f6c916ba.jpg",
"https://telegra.ph/file/eb654e5c7ff4d29eab29f.jpg",
"https://telegra.ph/file/a4796bdcca7ff90a3a3b8.jpg",
"https://telegra.ph/file/b7b43793368770ca4c7fb.jpg"
]
@Client.on_message(filters.command("start"))
async def start_message(bot, message):
await message.reply_photo(
photo=random.choice(ALL_PIC),
caption="hey {message.from_user.mention} എന്റെ പേര് <a href=https://t.me/FluffyPyroGramBot>𝙵𝙻𝚄𝙵𝙵𝚈 𝙿𝚈𝚁𝙾𝙶𝚁𝙰𝙼</a>, 🔰മച്ചാനെ എന്റെ പണി കഴിഞ്ഞിട്ടില്ല അതുകൊണ്ട് RePo✅️ പ്രൈവറ്റ് ആണ് Work കഴിഞ്ഞിട്ട് public ആക്കും ",
reply_markup=InlineKeyboardMarkup( [[
InlineKeyboardButton ("🗨️𝔾ℝ𝕆𝕌ℙ🗨️", url="https://t.me/DEVELOPERSCHANNEL2022"),
InlineKeyboardButton ("📂ℂℍ𝔸ℕℕ𝔼𝕃📂", url="https://t.me/DELCHANNEL001"),
],[
InlineKeyboardButton ("🔰𝔼𝔻𝕀𝕋𝔼ℝ🔰", url="t.me/TEAM_KERALA"),
InlineKeyboardButton ("©️ℙ𝔸𝕀𝔻 ℙℝ𝕆𝕄𝕆𝕋𝕀𝕆ℕ", url="t.me/pushpa_Reju"),
],[
InlineKeyboardButton ("👨💻𝔻𝔼𝕍𝔼𝕃𝕆ℙ𝔼ℝ👨💻", url="t.me/TEAM_KERALA"),
InlineKeyboardButton ("help", callback_data="song"),
],[
InlineKeyboardButton ("⚜️𝔸𝔻𝔻 𝕄𝔼 𝕋𝕆 𝔸 ℂℍ𝔸𝕋 𝔾ℝ𝕆𝕌ℙ⚜️", url="http://t.me/FluffyPyroGramBot?startgroup=true"),
]]
)
)
@Client.on_callback_query()
async def callback(bot,query: CallbackQuery):
if query.data == "song":
await query.message.edit_text(
text="/tgraph"
)
reply_markup=InlineKeyboardMarkup( [[
InlineKeyboardButton("song", callback_data="song")
]]
)
| 39.354167 | 218 | 0.629434 | 254 | 1,889 | 4.838583 | 0.440945 | 0.017087 | 0.056957 | 0.07323 | 0.143206 | 0.074858 | 0.016273 | 0.016273 | 0.016273 | 0 | 0 | 0.045884 | 0.215458 | 1,889 | 47 | 219 | 40.191489 | 0.745614 | 0 | 0 | 0.175 | 0 | 0.025 | 0.376919 | 0.014293 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.1 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8782932b4cb5b62f9cdd1eb887ce299cb47652d | 1,044 | py | Python | tests/sample_runbooks/set_variable.py | tuxtof/calm-dsl | 5af67435d8304b97e170a690068f2d5975e9bfe6 | [
"Apache-2.0"
] | 37 | 2019-12-23T15:23:20.000Z | 2022-03-15T11:12:11.000Z | tests/sample_runbooks/set_variable.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 144 | 2020-03-09T11:22:09.000Z | 2022-03-28T21:34:09.000Z | tests/sample_runbooks/set_variable.py | gabybeitler/calm-dsl | bac453413cfcf800eef95d89d5a7323c83654a93 | [
"Apache-2.0"
] | 46 | 2020-01-23T14:28:04.000Z | 2022-03-09T04:17:10.000Z | """
Calm Runbook Sample for set variable task
"""
from calm.dsl.runbooks import read_local_file
from calm.dsl.runbooks import runbook, runbook_json
from calm.dsl.runbooks import RunbookTask as Task
from calm.dsl.runbooks import CalmEndpoint as Endpoint, basic_cred
CRED_USERNAME = read_local_file(".tests/runbook_tests/username")
CRED_PASSWORD = read_local_file(".tests/runbook_tests/password")
VM_IP = read_local_file(".tests/runbook_tests/vm_ip")
Cred = basic_cred(CRED_USERNAME, CRED_PASSWORD, name="endpoint_cred")
endpoint = Endpoint.Linux.ip([VM_IP], cred=Cred)
@runbook
def DslSetVariableTask(endpoints=[endpoint], default=False):
"Runbook example with Set Variable Tasks"
Task.SetVariable.escript(script="print 'var1=test'", variables=["var1"])
Task.SetVariable.ssh(
filename="scripts/sample_script.sh", variables=["var2"], target=endpoints[0]
)
Task.Exec.escript(script="print '@@{var1}@@ @@{var2}@@'")
def main():
print(runbook_json(DslSetVariableTask))
if __name__ == "__main__":
main()
| 29.828571 | 84 | 0.749042 | 139 | 1,044 | 5.395683 | 0.381295 | 0.042667 | 0.058667 | 0.101333 | 0.264 | 0.197333 | 0 | 0 | 0 | 0 | 0 | 0.006522 | 0.118774 | 1,044 | 34 | 85 | 30.705882 | 0.808696 | 0.077586 | 0 | 0 | 0 | 0 | 0.223116 | 0.108543 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.095238 | 0.190476 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a87b6a507b0104731193175cfd2830a1d450aa86 | 873 | py | Python | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | tools/annotation/gPro_profile.py | hidelab/galaxy-central-hpc | 75539db90abe90377db95718f83cafa7cfa43301 | [
"CC-BY-3.0"
] | null | null | null | #!/usr/bin/env python
from anno_lib import gPro
import sys
##
# Runs the GOst profiler from g:Profiler
####
# jje 10152011
# Oliver Hofmann
# Bioinformatics Core
# Harvard School of Public Health
####
#term = "GO:0007050"
#gost_loc = "http://biit.cs.ut.ee/gprofiler/"
try:
idfile = sys.argv[1]
outfile = sys.argv[2]
loc = sys.argv[3]
spec = sys.argv[4]
pcut = sys.argv[5]
except IOError as (errno, strerror):
print "usage: gPro_profile.py infile outfile gPro_URL p-value_cutoff"
''' open and read in ids '''
with open(idfile) as ids:
id_raw = ids.read()
# convert ids from a list to space delim string
id_form = id_raw.replace("\n"," ")
gpro_obj = gPro.Profiler(loc,spec)
content = gpro_obj.ask_pcut(id_form,pcut)
# parse for term enrichment
key_content = gpro_obj.break_GOSt(content)
output = open(outfile,"w")
output.write(key_content)
output.close()
| 19.4 | 70 | 0.710195 | 142 | 873 | 4.253521 | 0.640845 | 0.057947 | 0.046358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027064 | 0.153494 | 873 | 44 | 71 | 19.840909 | 0.790257 | 0.31386 | 0 | 0 | 0 | 0 | 0.118182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.105263 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a87ca034edbcf0ccd37f65c8627b5f05c80d33a3 | 5,135 | py | Python | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | deploy/proc_vector.py | loadwiki/insightface | a641d7c83efebbfd44e6d5ce591646a508744425 | [
"MIT"
] | null | null | null | import face_model
import argparse
import os
import cv2
import sys
import numpy as np
parser = argparse.ArgumentParser(description='face model test')
# general
parser.add_argument('--image-size', default='112,112', help='')
parser.add_argument('--enable-gpu', default=False, type=bool, help='enable to detect and inference in GPU')
#parser.add_argument('--model', default='../models2/model-r100-sfz/model,8', help='path to load model.')
parser.add_argument('--model', default='../../model/m1-insightv3/model,0', help='path to load model.')
parser.add_argument('--ga-model', default='', help='path to load model.')
parser.add_argument('--gpu', default=0, type=int, help='gpu id')
parser.add_argument('--det', default=0, type=int, help='mtcnn option, 1 means using R+O, 0 means detect from begining')
parser.add_argument('--mode', default=1, type=int, help='mode, 0: do nothing, 1:append or merge depends on merge threshold')
parser.add_argument('--threshold', default=0.4, type=float, help='cosine threshold')
parser.add_argument('--merge-threshold', default=0.8, type=float, help='cosine threshold for merge. 1.0: never do merging; -1.0: to merge everytime')
parser.add_argument('--append-threshold', default=0.55, type=float, help='cosine threshold for append. 1.0: never do append; -1.0: to append everytime')
parser.add_argument('--only-replace', default=False, type=bool, help='replace most similar vecotor when list is full,no merge')
parser.add_argument('--max_vector_size', default=8, type=int, help='')
parser.add_argument('--input', default='camera-video1', type=str, help='input npy file name')
args = parser.parse_args()
merge_count=0
append_count=0
full_count=0
none_count=0
print 'count init!'
X = np.load(args.input+'/X.npy')
#for i in xrange(X.shape[0]):
# if i==0:
# continue
# a = X[i]
# sims = []
# for j in xrange(0, i):
# b = X[j]
# sim = np.dot(a, b)
# sims.append(sim)
# print(i,max(sims))
def update_sim_score(vec_list):
for i,vec1 in enumerate(vec_list):
max_ids = 0
max_sims = 0.0
for j,vec2 in enumerate(vec_list):
if i == j:
next
else:
sim = np.dot(vec1[0], vec2[0])
if sim > max_sims:
max_sims = sim
max_ids = j
vec1[1] = max_ids
vec1[2] = max_sims
def insert_vec(vec_list, input_vec, input_id, input_score):
global merge_count
global append_count
global full_count
global none_count
if input_score > args.merge_threshold:
print 'do merge'
new_vec = vec_list[input_id][0] + input_vec
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[input_id][0] = new_vec
update_sim_score(vec_list)
merge_count+=1
elif input_score < args.append_threshold:
if len(vec_list) < args.max_vector_size:
vec_list.append([input_vec,input_id,input_score])
update_sim_score(vec_list)
print 'append to list'
append_count+=1
else:
max_score = 0.0
max_id = -1
print 'merge when list if full'
full_count+=1
for i,vec in enumerate(vec_list):
if max_score < vec[2]:
max_score = vec[2]
max_id = i
if max_score > input_score:
if args.only_replace==False:
vec2_id = vec_list[max_id][1]
new_vec = vec_list[max_id][0] + vec_list[vec2_id][0]
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[max_id][0] = new_vec
vec_list[vec2_id][0] = input_vec
update_sim_score(vec_list)
else:
vec_list[max_id][0] = input_vec
update_sim_score(vec_list)
else:
if args.only_replace==False:
new_vec = input_vec + vec_list[input_id][0]
new_vec = new_vec / np.linalg.norm(new_vec)
vec_list[input_id][0] = new_vec
update_sim_score(vec_list)
else:
pass
else:
none_count+=1
identities = {0: [[X[0],0,0]]}
face_img = {0:[0]}
for i in xrange(1, X.shape[0]):
a = X[i]
ids = []
sims = []
vector_indexes = []
for _id, vectors in identities.iteritems():
for vector_idx, vector in enumerate(vectors):
sim = np.dot(a, vector[0])
sims.append(sim)
ids.append(_id)
vector_indexes.append(vector_idx)
max_idx = np.argmax(sims)
max_score = sims[max_idx]
max_id = ids[max_idx]
max_vector_idx = vector_indexes[max_idx]
print(i, max_score, max_id)
if max_score<args.threshold:
new_id = len(identities)
identities[new_id] = [[a,0,0]]
face_img[new_id] = [i]
print 'append new id ', new_id
else:
if args.mode==1:
face_img[max_id].append(i)
insert_vec(identities[max_id],a,max_vector_idx,max_score)
print('final', len(identities))
print 'merge_count ', merge_count
print 'append_count ', append_count
print 'full_count', full_count
print 'none_count', none_count
os.system('mkdir ' + args.input + '/id')
for face_id,img_list in face_img.items():
new_dir = args.input + '/id/' + str(face_id)
os.system('mkdir ' + new_dir)
for img in img_list:
src = args.input + '/' + str(img) + '.jpg '
os.system('cp ' + src + new_dir)
print('face_id:%d, img idx:%d' % (face_id,img)) | 34.006623 | 152 | 0.659007 | 828 | 5,135 | 3.879227 | 0.172705 | 0.047945 | 0.074097 | 0.031756 | 0.275218 | 0.169365 | 0.134496 | 0.118929 | 0.080635 | 0.080635 | 0 | 0.0226 | 0.198637 | 5,135 | 151 | 153 | 34.006623 | 0.757959 | 0.054333 | 0 | 0.147287 | 0 | 0.015504 | 0.171759 | 0.006606 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.007752 | 0.046512 | null | null | 0.093023 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8811200a87031d7a49f84665c7e8ea51e4ccb14 | 648 | py | Python | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | 1 | 2020-03-12T13:07:35.000Z | 2020-03-12T13:07:35.000Z | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | null | null | null | django_test/articles/migrations/0008_auto_20200306_1955.py | MachineLearningIsEasy/python_lesson_22 | 7fe77da5fa611a54578d092207059f65b719ed8a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-03-06 19:55
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0007_auto_20200306_1946'),
]
operations = [
migrations.AlterField(
model_name='article',
name='article_date',
field=models.DateTimeField(default=datetime.datetime(2020, 3, 6, 19, 55, 10, 304561)),
),
migrations.AlterField(
model_name='article',
name='article_img',
field=models.ImageField(blank=True, null=True, upload_to='articles'),
),
]
| 25.92 | 98 | 0.606481 | 70 | 648 | 5.5 | 0.628571 | 0.114286 | 0.12987 | 0.150649 | 0.244156 | 0.244156 | 0.244156 | 0 | 0 | 0 | 0 | 0.104255 | 0.274691 | 648 | 24 | 99 | 27 | 0.714894 | 0.069444 | 0 | 0.333333 | 1 | 0 | 0.126456 | 0.03827 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a881135e08ba4f471543f5c8dd4cdb32fe51f578 | 2,420 | py | Python | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | models/002_auth.py | ecohealthalliance/sicki | 390ab06cedc205a415d823339f5816730dcc4b72 | [
"Apache-2.0"
] | null | null | null | from gluon.tools import Auth
auth = Auth(db, hmac_key=Auth.get_or_create_key())
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
auth.define_tables ()
admin_role = auth.id_group ("Administrator")
if not admin_role:
admin_role = auth.add_group ("Administrator", "System Administrator - can access & make changes to any data")
editor_role = -1
writer_role = -1
def require_logged_in (func):
def wrapper (*args, **kwargs):
if not auth.user:
raise HTTP(401, "Unauthorized")
return func (*args, **kwargs)
return wrapper
def require_logged_in_deprecated ():
if not auth.user:
redirect (URL (r = request, c = 'default', f = 'user', args = ['login']))
def require_role (role):
def decorator (func):
def wrapper (*args, **kwargs):
if not auth.user:
raise HTTP(401, "Unauthorized")
if role == -1:
return
elif not auth.has_membership (role, auth.user.id):
raise HTTP (403, "Forbidden")
return func (*args, **kwargs)
return wrapper
return decorator
def require_role_deprecated (role):
if not auth.user:
redirect (URL (r = request, c = 'default', f = 'user', args = ['login']))
if role == -1:
return
elif not auth.has_membership (role, auth.user.id):
raise HTTP (401, "Unauthorized")
return
def logged_in ():
return auth.user != None
def has_role (role):
if not logged_in ():
return False
return auth.has_membership (role, auth.user.id)
def check_logged_in ():
return auth.user != None
def check_role (role):
if not auth.user:
return False
if role == -1:
return True
if auth.has_membership (admin_role, auth.user.id):
return True
if role == writer_role and auth.has_membership (editor_role, auth.user.id):
return True
if not auth.has_membership (role, auth.user.id):
return False
return True
def check_user (user_id):
if not auth.user:
return False
return auth.user.id == user_id
def user_name (id):
result = db (db[auth.settings.table_user].id == id).select ().first ()
if result:
return result.first_name + ' ' + result.last_name
else:
return 'Unknown'
| 28.470588 | 113 | 0.634298 | 322 | 2,420 | 4.614907 | 0.251553 | 0.080754 | 0.042396 | 0.05249 | 0.453567 | 0.430013 | 0.353297 | 0.258412 | 0.235532 | 0.235532 | 0 | 0.009439 | 0.255785 | 2,420 | 84 | 114 | 28.809524 | 0.815658 | 0 | 0 | 0.5 | 0 | 0 | 0.070661 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.185714 | false | 0.014286 | 0.014286 | 0.028571 | 0.514286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a884c82cc571aa5061e58f71c9e99493a0106092 | 615 | py | Python | numpan/pan03.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | numpan/pan03.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | 1 | 2021-11-07T04:54:55.000Z | 2021-11-07T04:54:55.000Z | numpan/pan03.py | jaywoong/learn_pandas | 972ad015c142d5e88ea821694b7a95e6aeb4e172 | [
"Apache-2.0"
] | null | null | null | import pandas as pd;
import numpy as np;
data1 = ['A',2];
data2 = ['B',4];
df1 = pd.DataFrame([data1,data2]);
print(df1);
data = {'subject' : ['math', 'comp', 'phys', 'chem'],
'score': [100, 90, 85, 95],
'students': [94, 32, 83, 17]};
df2 = pd.DataFrame(data);
print(df2);
print(len(df2));
print(df2.shape);
print(df2.shape[0]); # 행 정보
print(df2.shape[1]); # 열 정보
df3 = pd.DataFrame(df2, columns=['students','score','subject']);
print(df3);
print(df3['students'][2]);
print(df3[df3['score'] > 90]);
dic1 = {'math':{1:80,2:90,3:100},'comp':{1:90,2:100}};
df4 = pd.DataFrame(dic1);
print(df4);
| 21.964286 | 64 | 0.585366 | 99 | 615 | 3.636364 | 0.454545 | 0.122222 | 0.108333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119772 | 0.144715 | 615 | 27 | 65 | 22.777778 | 0.564639 | 0.014634 | 0 | 0 | 0 | 0 | 0.131012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.454545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
a88abbaa6de226c43d73cf509cf13cab18de2ecb | 848 | py | Python | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 11 | 2020-09-18T16:23:06.000Z | 2022-01-22T11:59:57.000Z | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 1 | 2020-10-02T14:33:08.000Z | 2021-10-05T02:44:19.000Z | 05. DiagonalDifference/solution.py | avishkar2001/AlgorithmHackerRank | d9489ba329121cbbbbc28b15fb8570d7696cde88 | [
"MIT"
] | 11 | 2020-09-18T16:23:13.000Z | 2022-01-22T11:59:58.000Z | '''
Topic : Algorithms
Subtopic : Diagonal Difference
Language : Python
Problem Statement : Given a square matrix, calculate the absolute difference between the sums of its diagonals.
Url : https://www.hackerrank.com/challenges/diagonal-difference/problem
'''
#!/bin/python3
# Complete the 'diagonalDifference' function below.
#
# The function is expected to return an INTEGER.
# The function accepts 2D_INTEGER_ARRAY arr as parameter.
#
def diagonalDifference(arr):
# Write your code here
n = len(arr)
d1 = sum(arr[i][i] for i in range(n))
d2 = sum(arr[i][n-i-1] for i in range(n))
return abs(d1 - d2)
assert diagonalDifference([[11,2,4], [4,5,6], [10,8,-12]]) == 15
assert diagonalDifference([[1,2,3], [4,5,6], [9,8,9]]) == 2
assert diagonalDifference([[1,1,1,1], [1,1,1,1], [1,1,1,1], [1,1,1,1]]) == 0
| 32.615385 | 115 | 0.665094 | 135 | 848 | 4.162963 | 0.548148 | 0.053381 | 0.074733 | 0.092527 | 0.071174 | 0.02847 | 0.02847 | 0.02847 | 0.02847 | 0.02847 | 0 | 0.068571 | 0.174528 | 848 | 25 | 116 | 33.92 | 0.734286 | 0.517689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04 | 0.375 | 1 | 0.125 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8908a5ec5616695737ca542359a44699f12cde8 | 6,722 | py | Python | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:07.000Z | 2022-03-10T21:46:07.000Z | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | null | null | null | gg_gui/gui/gg_start.py | ealcobaca/optimizer_pool | e93ac72c1547bc3813a0edf822d5fd453f22ce49 | [
"MIT"
] | 1 | 2022-03-10T21:46:09.000Z | 2022-03-10T21:46:09.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/GG_start.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from utilGui import Names
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.resize(684, 828)
main_window.setWindowTitle('Alchemist');
self.centralwidget = QtWidgets.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
# run_btn
self.run_btn = QtWidgets.QPushButton(self.centralwidget)
self.run_btn.setText("Run")
self.run_btn.setGeometry(QtCore.QRect(510, 20, 121, 30))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.run_btn.setFont(font)
self.run_btn.setStyleSheet("border-color: rgb(114, 159, 207);\n"
"background-color: rgb(78, 154, 6);")
self.run_btn.setObjectName("run_btn")
# min_max table
self.min_max_table = QtWidgets.QTableWidget(self.centralwidget)
self.min_max_table.setGeometry(QtCore.QRect(20, 100, 421, 192))
self.min_max_table.setObjectName("min_max_table")
self.min_max_table.setColumnCount(2)
self.min_max_table.setRowCount(len(Names.Chemical_Compounds))
for i in range(len(Names.Chemical_Compounds)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Compounds[i])
self.min_max_table.setVerticalHeaderItem(i, item)
item = QtWidgets.QTableWidgetItem("min")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem("max")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(1, item)
for i in range(len(Names.Chemical_Compounds)):
self.min_max_table.setItem(
i, 0, QtWidgets.QTableWidgetItem("0.0"))
self.min_max_table.setItem(
i, 1, QtWidgets.QTableWidgetItem("1.0"))
self.min_max_label = QtWidgets.QLabel(self.centralwidget)
self.min_max_label.setGeometry(QtCore.QRect(20, 70, 421, 22))
self.min_max_label.setObjectName("min_max_label")
self.min_max_label.setText("Search space limitation:")
# opt_label
self.opt_label = QtWidgets.QLabel(self.centralwidget)
self.opt_label.setGeometry(QtCore.QRect(460, 100, 201, 22))
self.opt_label.setObjectName("opt_label")
self.opt_label.setText("Methods:")
# amount
self.amount_sp = QtWidgets.QSpinBox(self.centralwidget)
self.amount_sp.setGeometry(QtCore.QRect(460, 270, 201, 31))
self.amount_sp.setMinimum(1)
self.amount_sp.setMaximum(10000)
self.amount_sp.setValue(1)
self.amount_sp.setObjectName("amount_sp")
self.amount_label = QtWidgets.QLabel(self.centralwidget)
self.amount_label.setGeometry(QtCore.QRect(460, 240, 201, 22))
self.amount_label.setObjectName("amount_label")
self.amount_label.setText("Amount:")
# tg
self.tg_dsb = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.tg_dsb.setGeometry(QtCore.QRect(460, 200, 201, 31))
self.tg_dsb.setMaximum(50000)
self.tg_dsb.setMinimum(0)
self.tg_dsb.setDecimals(0)
self.tg_dsb.setSingleStep(5)
self.tg_dsb.setObjectName("tg_dsb")
self.tg_dsb.setValue(700)
self.tg_label = QtWidgets.QLabel(self.centralwidget)
self.tg_label.setGeometry(QtCore.QRect(460, 170, 201, 22))
self.tg_label.setObjectName("tg_label")
self.tg_label.setText("TG:")
# opt_cb
self.opt_cb = QtWidgets.QComboBox(self.centralwidget)
self.opt_cb.setGeometry(QtCore.QRect(460, 130, 201, 30))
self.opt_cb.setObjectName("opt_cb")
self.opt_cb.addItem("SA")
self.opt_cb.addItem("PSO")
self.opt_cb.addItem("RS")
# result_tb
self.result_label = QtWidgets.QLabel(self.centralwidget)
self.result_label.setGeometry(QtCore.QRect(20, 350, 641, 22))
self.result_label.setObjectName("result_label")
self.result_label.setText("Results:")
self.result_tb = QtWidgets.QTableWidget(self.centralwidget)
self.result_tb.setGeometry(QtCore.QRect(20, 380, 641, 341))
self.result_tb.setObjectName("result_table")
self.result_tb.setColumnCount(46)
# self.result_tb.setRowCount(1)
for i in range(len(Names.Chemical_Elemnts)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Elemnts[i])
self.result_tb.setHorizontalHeaderItem(i, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
item = QtWidgets.QTableWidgetItem("TG")
self.result_tb.setHorizontalHeaderItem(i+1, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
# discard_btn
self.discard_btn = QtWidgets.QPushButton(self.centralwidget)
self.discard_btn.setText("Discard")
self.discard_btn.setGeometry(QtCore.QRect(540, 730, 122, 30))
self.discard_btn.setObjectName("discard_btn")
# save_btn
self.save_btn = QtWidgets.QPushButton(self.centralwidget)
self.save_btn.setText("Save")
self.save_btn.setGeometry(QtCore.QRect(400, 730, 122, 30))
self.save_btn.setToolTip("")
self.save_btn.setObjectName("save_btn")
# clean_all_btn
self.clean_all_btn = QtWidgets.QPushButton(self.centralwidget)
self.clean_all_btn.setGeometry(QtCore.QRect(20, 730, 122, 30))
self.clean_all_btn.setObjectName("clean_all_btn")
self.clean_all_btn.setText("Clan All")
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 684, 27))
self.menubar.setObjectName("menubar")
main_window.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_window)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
| 41.239264 | 74 | 0.666617 | 822 | 6,722 | 5.281022 | 0.205596 | 0.066575 | 0.072564 | 0.034554 | 0.366966 | 0.25455 | 0.156876 | 0.126699 | 0.110113 | 0.110113 | 0 | 0.048887 | 0.217941 | 6,722 | 162 | 75 | 41.493827 | 0.776869 | 0.045968 | 0 | 0.198413 | 1 | 0 | 0.05473 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007937 | false | 0 | 0.015873 | 0 | 0.031746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8945cb8801554f627e2ef91c6ad0ae2533363a4 | 603 | py | Python | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 2 | 2021-04-01T01:17:53.000Z | 2021-10-04T16:46:13.000Z | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 1 | 2020-07-13T18:59:49.000Z | 2020-07-13T18:59:49.000Z | deepchem/data/test_data_loader.py | n3011/deepchem | c316d998c462ce01032f0dae883856b400ea4765 | [
"MIT"
] | 1 | 2021-04-06T20:32:02.000Z | 2021-04-06T20:32:02.000Z | import os
from unittest import TestCase
from io import StringIO
import tempfile
import shutil
import deepchem as dc
class TestCSVLoader(TestCase):
def test_load_singleton_csv(self):
fin = tempfile.NamedTemporaryFile(mode='w', delete=False)
fin.write("smiles,endpoint\nc1ccccc1,1")
fin.close()
print(fin.name)
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["endpoint"]
loader = dc.data.CSVLoader(
tasks=tasks, smiles_field="smiles", featurizer=featurizer)
X = loader.featurize(fin.name)
self.assertEqual(1, len(X))
os.remove(fin.name)
| 24.12 | 66 | 0.719735 | 78 | 603 | 5.512821 | 0.628205 | 0.048837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015968 | 0.169154 | 603 | 24 | 67 | 25.125 | 0.842315 | 0 | 0 | 0 | 0 | 0 | 0.069652 | 0.044776 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.052632 | false | 0 | 0.315789 | 0 | 0.421053 | 0.105263 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
a897a4fa8a48d750657c9f7f093c97c8595dcb6d | 3,094 | py | Python | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | src/test/aiml_tests/pattern_set_tests/test_pattern_set_aiml.py | narnikgamarnikus/program-y | 777b9a8a75ec787c037de9f11a8527875ff450b1 | [
"MIT"
] | null | null | null | import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.sections.brain.file import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration.files.aiml_files._files=os.path.dirname(__file__)
self.configuration.brain_configuration.files.set_files._files = os.path.dirname(__file__)+"/sets"
self.configuration.brain_configuration.files.set_files._extension=".txt"
class PatternsetAIMLTests(unittest.TestCase):
def setUp(cls):
PatternsetAIMLTests.test_client = BasicTestClient()
def test_patten_set_match(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS AMBER")
self.assertEqual(response, "Amber IS A NICE COLOR.")
def test_patten_match_multi_word_set(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS AIR FORCE BLUE")
self.assertEqual(response, "Air Force blue IS A NICE COLOR.")
def test_patten_match_mixed_word_set(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS RED")
self.assertEqual(response, "Red IS A NICE COLOR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS RED ORANGE")
self.assertEqual(response, "Red Orange IS A NICE COLOR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "MY FAVORITE COLOR IS SACRAMENTO STATE GREEN")
self.assertEqual(response, "Sacramento State green IS A NICE COLOR.")
def test_patten_match_mixed_word_set_longer_sentence(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE RED VERY MUCH")
self.assertEqual(response, "IT IS OK, Red IS NOT MY BEST COLOUR EITHER")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE RED ORANGE AT ALL")
self.assertEqual(response, "IT IS OK, Red Orange IS NOT MY BEST COLOUR EITHER")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "I DO NOT LIKE SACRAMENTO STATE GREEN AT ALL")
self.assertEqual(response, "IT IS OK, Sacramento State green IS NOT MY BEST COLOUR EITHER")
def test_patten_match_mixed_word_set_at_front(self):
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "RED IS A NICE COLOUR")
self.assertEqual(response, "YES Red IS A LOVELY COLOUR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "RED ORANGE IS A NICE COLOUR")
self.assertEqual(response, "YES Red Orange IS A LOVELY COLOUR.")
response = PatternsetAIMLTests.test_client.bot.ask_question("test", "SACRAMENTO STATE GREEN IS A NICE COLOUR")
self.assertEqual(response, "YES Sacramento State green IS A LOVELY COLOUR.")
| 50.721311 | 123 | 0.73788 | 403 | 3,094 | 5.471464 | 0.193548 | 0.12517 | 0.157823 | 0.184581 | 0.689796 | 0.654422 | 0.603628 | 0.530612 | 0.468481 | 0.399093 | 0 | 0 | 0.170976 | 3,094 | 60 | 124 | 51.566667 | 0.859649 | 0 | 0 | 0 | 0 | 0 | 0.257679 | 0 | 0 | 0 | 0 | 0 | 0.261905 | 1 | 0.190476 | false | 0 | 0.095238 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a899d0bde8415bf24f5d7bf27bdd0c2ee8c0fdf1 | 391 | py | Python | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | clase 1/quiz 1/quiz 1 exe.py | amedina14/uip-iq17-pc3 | 89f04c0670079384cee40736d5c92175a8c586a5 | [
"MIT"
] | null | null | null | '''
Quiz 1:
Hacer un programa que lea una temperatura en farenheit y la convierta en celsius y si es mayor
a 100°C imprima "caliente". Si es menor a 0°C imprima "frio"
'''
tempF = int(input("TempF: "))
#(tempF - 32/(5/9))
tempC = (((tempF - 32)*5)/9)
print("\nLa temperatura en Celsius es " + str(tempC))
if tempC >= 100:
print("caliente")
elif tempC < 0:
print("frio") | 23 | 95 | 0.629156 | 66 | 391 | 3.757576 | 0.590909 | 0.104839 | 0.072581 | 0.072581 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055375 | 0.214834 | 391 | 17 | 96 | 23 | 0.745928 | 0.465473 | 0 | 0 | 0 | 0 | 0.268817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.428571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
a89e48cf3a4bd044b3bfeec924a6e17c47c0d752 | 543 | py | Python | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | setup.py | anderct105/Dataset_python | 9ab17eda584b6d65e6b37eb2c377f7634d375e8a | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='dataset',
version='0.0.1',
author='Ander Cejudo',
author_email='acejudo001@ikasle.ehu.eus',
packages=['dataset'],
url='Indicar una URL para el paquete...',
license='LICENSE.txt',
description='This package includes some basic functions to work with a dataset object',
long_description=open('README.txt').read(),
tests_require=['pytest'],
install_requires=[
"seaborn >= 0.9.0",
"pandas >= 0.25.1",
"matplotlib >= 3.1.1",
"numpy >=1.17.2"
],
) | 27.15 | 90 | 0.635359 | 72 | 543 | 4.736111 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045767 | 0.195212 | 543 | 20 | 91 | 27.15 | 0.734554 | 0 | 0 | 0 | 0 | 0 | 0.466912 | 0.045956 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8a0133356f4f20d5bb0f1a7ef3b7ad354fcc592 | 1,891 | py | Python | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 34 | 2021-06-16T17:25:13.000Z | 2021-08-13T08:21:22.000Z | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 12 | 2021-08-10T10:08:48.000Z | 2022-03-09T10:14:29.000Z | task/bq2bq/executor/bumblebee/loader.py | pikochip/transformers | aa14f19efd2a83aebd7b0d3a296053283b98e624 | [
"Apache-2.0"
] | 4 | 2021-08-10T13:10:22.000Z | 2022-02-19T14:04:21.000Z | from bumblebee.bigquery_service import BigqueryService
from datetime import datetime
from abc import ABC
from abc import abstractmethod
from bumblebee.config import LoadMethod
class BaseLoader(ABC):
@abstractmethod
def load(self, query):
pass
class PartitionLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod, partition: datetime):
self.bigquery_service = bigquery_service
self.destination_name = destination
self.load_method = load_method
self.partition_date = partition
def load(self, query):
partition_date_str = self.partition_date.strftime("%Y%m%d")
load_destination = "{}${}".format(self.destination_name, partition_date_str)
write_disposition = self.load_method.write_disposition
return self.bigquery_service.transform_load(query=query,
write_disposition=write_disposition,
destination_table=load_destination)
class TableLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod):
self.bigquery_service = bigquery_service
self.full_table_name = destination
self.load_method = load_method
def load(self, query):
return self.bigquery_service.transform_load(query=query,
write_disposition=self.load_method.write_disposition,
destination_table=self.full_table_name)
class DMLLoader(BaseLoader):
def __init__(self,bigquery_service: BigqueryService, destination: str):
self.bigquery_service = bigquery_service
self.full_table_name = destination
def load(self,query):
return self.bigquery_service.execute_query(query)
| 35.018519 | 105 | 0.673189 | 196 | 1,891 | 6.188776 | 0.204082 | 0.160758 | 0.140973 | 0.052762 | 0.545754 | 0.545754 | 0.484749 | 0.370157 | 0.323166 | 0.323166 | 0 | 0 | 0.260709 | 1,891 | 53 | 106 | 35.679245 | 0.867668 | 0 | 0 | 0.351351 | 0 | 0 | 0.005817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.189189 | false | 0.027027 | 0.135135 | 0.054054 | 0.513514 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a8a04bb0a9831548bd868b09fed78c535d82ee0a | 250 | py | Python | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/home/urls.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | # coding=utf-8
from django.conf.urls import url
from .views import home, positioned, tiled
urlpatterns = [
url(r'^$', home),
url(r'home/$', home),
url(r'positioned-(?P<plan_id>[0-9]{1,4})/$', positioned),
url(r'tiled/$', tiled),
]
| 20.833333 | 61 | 0.608 | 39 | 250 | 3.897436 | 0.589744 | 0.105263 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024038 | 0.168 | 250 | 11 | 62 | 22.727273 | 0.701923 | 0.048 | 0 | 0 | 0 | 0 | 0.217021 | 0.153191 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.25 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8a3681ea625f23d752d8458f7791096844d0480 | 4,482 | py | Python | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 97eaa66b324ef1850237dd6dcd6d8f71a1a2b64b | [
"MIT"
] | null | null | null | 1. Number of 1 Bits (HammingWeight):
https://leetcode.com/explore/interview/card/top-interview-questions-easy/99/others/565/
# Easy way
def hammingWeight(self, n: int) -> int:
return bin(n).count('1')
# Harder way - https://stackoverflow.com/questions/21237767/python-a-b-meaning
def hammingWeight(self, n):
c = 0
while n:
n &= n - 1
c += 1
return c
2. Hamming Distance:
# Easy Way
bin(x ^ y).count('1')
#Right way (Bitwise Operators): https://code.tutsplus.com/articles/understanding-bitwise-operators--active-11301
# Approach 1: Just check every bit in both numbers and increment when they are different
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
while x != 0 or y != 0:
if x % 2 != y % 2:
hamming_distance += 1
x = x >> 1
y = y >> 1
return hamming_distance
# Approach 2: Just make XOR of x and y and after that count the number of '1' bits.
# because XOR of two different bits is always 1
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
if new % 2 == 1:
hamming_distance += 1
new = new >> 1
return hamming_distance
# Approach 3: Again make XOR of x and y but when we count the number of '1' bits
# we make the trick n&(n-1) which removes last '1' bit
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
new = new & (new-1)
hamming_distance += 1
return hamming_distance
# Good explanation of XOR solution: https://www.youtube.com/watch?v=UP4GhCxeC4I
3. Reverse Bits (Reverse Bits of a 32 bits unsigned integer):
# https://leetcode.com/explore/featured/card/top-interview-questions-easy/99/others/648/discuss/54932/Three-different-solutions-in-python
def reverseBits(self, n):
bit_str = '{0:032b}'.format(n) # Format n into bit string (length of 32)
reverse_str = bit_str[::-1] # Reverse bit_string with slice fxnality
return int(reverse_str, 2) # Return string as int w/ 2
4. Pascals Triangle:
def generate(self, numRows):
lists = []
for i in range(numRows):
lists.append([1]*(i+1))
if i>1 :
for j in range(1,i):
lists[i][j]=lists[i-1][j-1]+lists[i-1][j]
return lists
5. Valid Parenthesis:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
# The stack to keep track of opening brackets.
stack = []
# Hash map for keeping track of mappings. This keeps the code very clean.
# Also makes adding more types of parenthesis easier
mapping = {")": "(", "}": "{", "]": "["}
# For every bracket in the expression.
for char in s:
# If the character is an closing bracket
if char in mapping:
# Pop the topmost element from the stack, if it is non empty
# Otherwise assign a dummy value of '#' to the top_element variable
top_element = stack.pop() if stack else '#'
# The mapping for the opening bracket in our hash and the top
# element of the stack don't match, return False
if mapping[char] != top_element:
return False
else:
# We have an opening bracket, simply push it onto the stack.
stack.append(char)
# In the end, if the stack is empty, then we have a valid expression.
# The stack won't be empty for cases like ((()
return not stack
def isValid(self, s):
stack = []
mapping = {")": "(", "}": "{", "]": "["}
for char in s:
if char in mapping:
top_element = stack.pop() if stack else '#'
if mapping[char] != top_element:
return False
else:
stack.append(char)
return not stack
6. Missing Number: (Missing number in an array)
# One line
def missingNumber(self, nums):
return sum(range(len(nums)+1)) - sum(nums)
# Two lines
def missingNumber(self, nums):
n = len(nums)
return n * (n+1) / 2 - sum(nums)
| 33.699248 | 141 | 0.555779 | 605 | 4,482 | 4.08595 | 0.328926 | 0.06068 | 0.010922 | 0.015777 | 0.217233 | 0.183252 | 0.154935 | 0.101537 | 0.070793 | 0.070793 | 0 | 0.030118 | 0.340696 | 4,482 | 133 | 142 | 33.699248 | 0.80643 | 0.34315 | 0 | 0.512821 | 0 | 0 | 0.008397 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8a60a167cdfc1752d84114af1252a56c3863ffe | 4,701 | py | Python | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | 3 | 2015-09-07T00:16:16.000Z | 2019-01-11T20:27:56.000Z | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | fca/algorithms/incremental.py | ksiomelo/cubix | cd9e6dda6696b302a7c0d383259a9d60b15b0d55 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Holds implementation of Norris' algorithm"""
from copy import copy
from fca import Concept, ConceptSystem,ConceptLink
#from fca.algorithms.incremental import derivation
def add_object(object, intent, lattice):
sup_g = lattice.get_bottom_concept()
sup_g.intent | intent #{Adjust (sup(G)) for new elements in E'}
if len(sup_g.intent) == 0 and len(sup_g.extent) == 0: # IF sup(G) = (Ø, Ø) THEN
sup_g = Concept(intent=intent, extent=set(object))#Replace sup(G) by: ({x*},f({x*}))
else:
if not intent <= sup_g.intent: #IF NOT (f*({x*}) ⊆ X'(sup(G))) THEN
if len(sup_g.extent) == 0: # IF X(sup(G)) = Ø THEN X'(sup(G)) := X'(sup(G)) ∪ f({x*})
sup_g.intent |= intent
else:
h = Concept(extent=set(), intent=(sup_g.intent | intent)) #Add new pair H {becomes sup(G*)}: (Ø,X'(sup(G)) ∪ f({x*}));
lattice._concepts.append(h)
cl = ConceptLink(_parent=sup_g, _child=h) #Add new edge sup(G)->H
lattice._links.append(cl)
#C[i] <- {H: ||X'(H)||=i}; {Class pairs in buckets with same cardinality of the X' sets}
card = []
sorted_card = sorted(lattice._concepts, key=lambda c: len(c.intent))
cur_card = 0;
row = []
for c in sorted_card:
if cur_card == len(c.intent):
row.append(c)
else:
cur_card = len(c.intent)
card.append(row)
row = [c] #TODO does it empty ??
card.append(row)
max_card = len(sorted_card[-1].intent)
#card_prime = []# C'[i] <- Ø; {Initialize the C' sets}
card_prime = [ [] for i in range(0,max_card+1)]
for i in range(0,max_card+1): # TODO does it take the max? # FOR i : 0 TO maximum cardinality DO
for h in card[i]:#FOR each pair H in C[i]
if h.intent <= intent: #IF X'(H) ⊆ f({x*}) THEN
print "modified pair:"+str(h.intent) # {modified pair}
h.extent.add(object)#Add x* to X(H);
card_prime.insert(i,[h]) # Add H to C'[i] ;
if h.intent == intent: #IF X'(H) = f({x*}) THEN exit algorithm
return
else:#{old pair}
int = h.intent & intent# int <- X'(H) ∩ f({x*});
#IF ¬∃ H1 ∈ C'[||int||] such that X'(H1)=Int THEN {H is a generator}
exists_h1 = False
for c_p in card_prime[len(int)]:
if c_p.intent == int:
exists_h1 = True
break
if not exists_h1:
h_n = Concept(extent=(h.extent | set([object])), intent=int)#Create new pair Hn= (X(H) ∪{x*},int) and add to C'[||int||];
if not card_prime[len(int)]:
card_prime[len(int)] = []
card_prime[len(int)].append(h_n)
cl2 = ConceptLink(_parent=h_n, _child=h) #Add edge Hn -> H;
lattice._links.append(cl2)
#{Modify edges}
for j in range(0,len(int)):#FOR j : 0 TO ||int||-1
for h_a in card_prime[j]:#FOR each Ha ∈ C'[j]
if h_a.intent < int:#IF X'(Ha ) ⊂ int {Ha is a potential parent of Hn}
parent = True
for h_d in lattice.children(h_a): #FOR each Hd child of Ha
if h_d.intent < int: #IF X'(Hd ) ⊂ Int parent<-false; exit FOR END IF
parent = False
break
if parent: #IF parent
if lattice.parents(h).index(h_a) >= 0: #IF Ha is a parent of H
lattice.unlink(h_a, h)#eliminate edge Ha->H END IF;
cl3 = ConceptLink(_parent=h_a, _child=h_n) #Add edge Ha->Hn
lattice._links.append(cl3)
if int == intent: # IF Int=f*({x*}) THEN exit algorithm END IF
return
def test_incremental():
from fca import ConceptLattice, Context
ct = [[True, True, False, False], [False, False, True, True], \
[True, False, True, True], [False, True, False, False], \
[False,False,False,True]]
objs = ['lion', 'finch', 'eagle', 'hare', 'ostrich']
attrs = ['preying', 'mammal', 'flying', 'bird']
c = Context()
c._table=ct
c._attributes=attrs
c._objects=objs
cl = ConceptLattice(c,None)
cl._context=c
cl.compute_lattice()
add_object('snake', set(['preying','reptile']), cl, c)
| 43.12844 | 137 | 0.496703 | 659 | 4,701 | 3.462822 | 0.230653 | 0.035057 | 0.013146 | 0.026293 | 0.134969 | 0.076687 | 0.055653 | 0.055653 | 0.018405 | 0 | 0 | 0.007987 | 0.360774 | 4,701 | 108 | 138 | 43.527778 | 0.747421 | 0.24782 | 0 | 0.121951 | 0 | 0 | 0.023478 | 0 | 0 | 0 | 0 | 0.009259 | 0 | 0 | null | null | 0 | 0.036585 | null | null | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8a8041933930261f3a31462ef960ecbd49c2f5c | 461 | py | Python | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | demo/worlddata/migrations/0005_foods_hunger.py | MarsZone/DreamLand | 87455f421c1ba09cb6efd5fc0882fbc2a29ea1a5 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2017-07-06 08:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('worlddata', '0004_auto_20170628_1556'),
]
operations = [
migrations.AddField(
model_name='foods',
name='hunger',
field=models.IntegerField(blank=True, default=0),
),
]
| 21.952381 | 61 | 0.618221 | 51 | 461 | 5.411765 | 0.843137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.1 | 0.262473 | 461 | 20 | 62 | 23.05 | 0.711765 | 0.147505 | 0 | 0 | 1 | 0 | 0.110256 | 0.058974 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8ada55f883fca697cc8b38ca817ca0714f36859 | 4,079 | py | Python | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | deployutils/apps/django/backends/jwt_session_store.py | knivets/djaodjin-deployutils | e3ce90152f1187dd1cd3d63aa14a1aaff924248f | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2018, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Session Store for JWT tokens.
"""
from __future__ import absolute_import
import logging, json
from django.contrib.sessions.backends.signed_cookies import SessionStore \
as SessionBase
from django.contrib.auth import (BACKEND_SESSION_KEY, HASH_SESSION_KEY,
SESSION_KEY)
from jwt import encode, decode
from .... import crypt
from .. import settings
from .auth import ProxyUserBackend
LOGGER = logging.getLogger(__name__)
class SessionStore(SessionBase):
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key=session_key)
@property
def data(self):
return self._session
@staticmethod
def prepare(session_data={}, #pylint: disable=dangerous-default-value
passphrase=None):
"""
Returns *session_dict* as a base64 encrypted json string.
"""
if passphrase is None:
passphrase = settings.DJAODJIN_SECRET_KEY
serialized = json.dumps(session_data, indent=2, cls=JSONEncoder)
return encode({'payload': serialized}, passphrase)
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
session_data = {}
try:
session_text = decode(self.session_key,
settings.DJAODJIN_SECRET_KEY)
LOGGER.debug("session text: %s<%s>",
session_text, session_text.__class__)
session_data = json.loads(session_text.get('payload'))
# We have been able to decode the session data, let's
# create Users and session keys expected by Django
# contrib.auth backend.
if 'username' in session_data:
backend = ProxyUserBackend()
backend.create_user(session_data)
user = backend.authenticate(session_data['username'])
session_data[SESSION_KEY] = user.id
session_data[BACKEND_SESSION_KEY] = "%s.%s" % (
backend.__class__.__module__, backend.__class__.__name__)
session_data[HASH_SESSION_KEY] = user.get_session_auth_hash()
except:
return {}
return session_data
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return self.prepare(session_cache)
| 38.847619 | 78 | 0.68963 | 507 | 4,079 | 5.376726 | 0.431953 | 0.047689 | 0.018709 | 0.016875 | 0.067498 | 0.04989 | 0.04989 | 0.04989 | 0.04989 | 0.04989 | 0 | 0.003552 | 0.240745 | 4,079 | 104 | 79 | 39.221154 | 0.876655 | 0.470704 | 0 | 0 | 0 | 0 | 0.03426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0.086957 | 0.173913 | 0.021739 | 0.413043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a8ae061feeba3985c2e2f3b0364a2da97ebf2e9e | 211 | py | Python | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null | muscles/neo4j/read_py2neo.py | Spanarchian/measure_group | 45eb1f2420cda02611e454f9cd9e568ccba9a4e9 | [
"MIT"
] | null | null | null |
from py2neo import Graph
grapher = Graph("bolt://localhost:7687", auth=("neo4j", "changeme"))
x = grapher.run("MATCH (a :Person) RETURN a.name, a.city, a.age").to_data_frame()
print(f"To_data_frame() :\n{x}")
| 30.142857 | 81 | 0.682464 | 35 | 211 | 4 | 0.742857 | 0.085714 | 0.157143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031746 | 0.104265 | 211 | 6 | 82 | 35.166667 | 0.708995 | 0 | 0 | 0 | 0 | 0 | 0.485714 | 0.1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8b12924de45222461a00e3c63814bfcde7cffa6 | 25,138 | py | Python | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | null | null | null | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2020-02-17T18:56:18.000Z | 2020-02-20T17:06:20.000Z | example/nsls2id.py | NSLS-II/aphla | ceb5410dc836a8fb16321b6dc5e10d442be765c5 | [
"BSD-3-Clause"
] | 1 | 2021-03-08T16:07:11.000Z | 2021-03-08T16:07:11.000Z | """
NSLS-II insertion device commissioning/operation
copyright (C) 2014, Yongjun Li, Yoshi Hidaka, Lingyun Yang
"""
import aphla as ap
import itertools
import numpy as np
import re
import h5py
_params = {
"dw100g1c08u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c08d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c18u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c18d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c28u":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
"dw100g1c28d":
{"unitsys": "phy",
"gap": (119.0, 147.0, 30, 0.1),
"cch": ("cch0", "cch1", "cch2", "cch3", "cch4", "cch5"),
"background": {"gap": 147.0},
"Imin": 2.0, # mA
"Tmin": 2.0, # hour
"timeout": 150, },
}
def getBrho(E_GeV):
"""
"""
import scipy.constants as const
m_e_GeV = const.m_e*const.c*const.c/const.e/1e9
gamma = E_GeV / m_e_GeV
Brho = const.m_e * const.c * gamma / const.e # magnetic rigidity [T*m]
return Brho
def putPar(ID, parList, **kwargs):
"""
Put (write) a set of parameters (list) on an ID while the hardware
itself (motor control) checks whether the target state is reached or not.
inputs:
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
verbose: integer larger means more details.
throw: raise exception if True, otherwise return False
returns: True if success, otherwise False
"""
timeout = kwargs.get("timeout", _params[ID.name].get("timeout", 150))
unitsys = kwargs.get("unitsys", _params[ID.name].get("unitsys", 'phy'))
throw = kwargs.get("throw", True)
verbose = kwargs.get("verbose", 0)
agree = True
for par in parList:
ID.put(par[0], par[1], timeout=timeout, unitsys=unitsys, trig=1)
p0 = ID.get(par[0], unitsys=unitsys)
if abs(p0-par[1]) <= par[2]:
continue
# error handling
agree = False
if verbose:
print 'For "{0}" of {1}:'.format(par[0], ID.name)
print 'Target SP = {0:.9g}, Current RB = {1:.9g}, Tol = {2:.9g}'.\
format(par[1], p0, par[2])
if throw:
raise RuntimeError('Failed to set device within tolerance.')
else:
break
return agree
def createCorrectorField(ID):
return [(ID, fld) for fld in _params[ID.name].get("cch", [])]
def createParList(ID, parScale):
"""
create parameter list based on the paraneter range, spaced type
parRange: 2d parameter range in the format of
[[name, spacedType, start, end, step, tolerance],...]
example:[['gap','log',150,15,21,0.1]]
scan table will cover 15~150 with 21 steps, tolerance is 0.1,
spacedType: log or linear
return parameter list for communicating with hardware, table for data
archive
"""
nlist,vlist,tlist = [],[],[] #name, value and tolerance list
for fld, scale in parScale:
if not _params[ID.name].get(fld, None): continue
nlist.append(fld)
vmin, vmax, vstep, vtol = _params[ID.name][fld]
if scale == 'linear':
vlist.append(list(np.linspace(vmin, vmax, int(vstep))))
elif scale == 'log':
if vmin<=0 or vmax<=0:
raise RuntimeError('negative boundary can not be spaced Logarithmically')
else:
vlist.append(list(np.logspace(np.log10(vmin),np.log10(vmax),int(vstep))))
else:
raise RuntimeError('unknown spaced pattern: %s'%p[1])
tlist.append(vtol)
valueList = itertools.product(*vlist)
parList = []
for v in valueList:
tmp = []
for i,n in enumerate(nlist):
tmp.append([n,v[i],tlist[i]])
parList.append(tmp)
valueList = itertools.product(*vlist)
table = np.array([vi for vi in valueList])
return parList, nlist, table
def putParHardCheck(ID, parList, timeout=30, throw=True, unitsys='phy'):
'''
Put (write) a set of parameters (list) on an ID while the hardware
itself (motor control) checks whether the target state is reached or not.
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
return: True if success, otherwise throws an exception.
'''
agree = True
for par in parList:
ID.put(par[0], par[1], timeout=timeout, unitsys=unitsys, trig=1)
# raw unit for "gap" = [um]
# raw unit for "phase" = [um?]
p0 = ID.get(par[0], unitsys=unitsys)
if abs(p0-par[1]) <= par[2]: # TODO: readback & setpoint unit may be different! Check it!
continue # print "Agree: ", p0, par[1], "eps=", par[2]
# error handling
agree = False
print 'For "{0}" of {1}:'.format(par[0], ID.name)
print 'Target Setpoint = {0:.9g}, Current Readback = {1:.9g}, Tolerance = {2:.9g}'.format(
par[1], p0, par[2])
if throw:
raise RuntimeError('Failed to set device within tolerance.')
else:
break
return agree
# <codecell>
def putParSoftCheck(ID, parList, timeout=30, online=False):
'''
Put (write) a set of parameters (list) on an ID while this function
checks whether the target state is reached or not through readbacks
for given tolerances.
ID: aphla ID instance
parList: 2d parameter list in the format of [name, value, tolerance]
[['gap',15,1e-4],['phase',12,1e-4]]
timeout: Maximum time the motor control should wait for each "put"
in the unit of seconds.
return: True if success, otherwise throws an exception.
'''
if not online: return True # TODO: To be reomved once we are allowed to move ID motors
for par in parList:
t0 = datetime.now()
converged = False
try:
ID.put(par[0], par[1], unitsys=None) # raw unit
except:
print 'Failed to set the setpoint for {0} to {1}'.format(par[0], par[1])
raise
# TODO: remove hardcoding
ap.caput("SR:C28-ID:G1{DW100:2}ManG:Go_.PROC", 1, wait=False)
while not converged:
p0 = ID.get(par[0], unitsys=None)
if abs(p0-par[1]) <= par[2]: # TODO: readback & setpoint unit may be different! Check it!
# print "Agree: ", p0, par[1], "eps=", par[2]
converged = True
break
t1 = datetime.now()
if (t1-t0).total_seconds() > timeout:
break
time.sleep(0.5)
if not converged:
raise RuntimeError("timeout at setting {0}={1} (epsilon={2})".format(par[0], par[1], par[2]))
return True
def putBackground(ID, **kwargs):
"""
put ID to passive status,
gap to max, phase to 0 if apply, all correction cch to zeros
"""
gapMin, gapMax, gapStep, gapTol = kwargs.get("gap",
_params[ID.name]["gap"])
phaseMin, phaseMax, phaseStep, phaseTol = \
kwargs.get("phase", _params[ID.name].get("phase", (None, None, None, None)))
zeroPhase = 0.0
timeout = kwargs.get("timeout", 150)
throw = kwargs.get("throw", True)
unitsys = kwargs.get("unitsys", 'phy')
verbose = kwargs.get("verbose", 0)
flds = ID.fields()
parList = []
if 'gap' in flds:
parList.append(['gap',gapMax,gapTol])
if 'phase' in flds:
parList.append(['phase',zeroPhase,phaseTol])
if putPar(ID, parList, timeout=timeout,
throw=throw, unitsys=unitsys, verbose=verbose):
# put correcting coils to zeros
for i in range(len(ID.cch)):
ID.put('cch'+str(i), 0.0, unitsys=None)
return True
else:
return False
def checkBeam(Imin=2, Tmin=2, online=False):
"""
check beam life time and current
if beam lifetime is less than Tmin [hr], 2hrs by default,
or current is less then Imin [mA], 2mA by default
return False, otherwise True
"""
tau, Ib = ap.getLifetimeCurrent()
if Ib < Imin:
print 'Beam current too low ({0} < {1})'.format(Ib, Imin)
return False
if tau < Tmin:
print 'Beam lifetime too short ({0} < {1})'.format(tau, Tmin)
return False
return True
def checkGapPhase(ID, **kwargs):
"""
check ID gap, phase
return True if success, otherwise False
"""
gapMin, gapMax, gapStep, gapTol = kwargs.get("gap",
_params[ID.name]["gap"])
phaseMin, phaseMax, phaseStep, phaseTol = \
kwargs.get("phase", _params[ID.name].get("phase", (None, None, None, None)))
timeout = kwargs.get("timeout", 150)
throw = kwargs.get("throw", True)
unitsys = kwargs.get("unitsys", _params[ID.name]["unitsys"])
verbose = kwargs.get("verbose", 0)
gapStep = kwargs.get("gapStep", gapStep)
phaseStep = kwargs.get("phaseStep", phaseStep)
flds = ID.fields()
if 'gap' in flds:
for gap in np.linspace(gapMin, gapMax, gapStep):
gapList = [['gap',gap, gapTol]]
gapStatus = putPar(ID,gapList,timeout=timeout,
throw=throw,unitsys=unitsys,verbose=verbose)
if not gapStatus:
return False
if 'phase' in flds:
for phase in np.linspace(phaseMin,phaseMax,phaseStep):
phaseList = [['phase',phase,phaseTol]]
phaseStatus = putPar(ID,phaseList,timeout=timeout,
throw=throw,unitsys=unitsys,verbose=verbose)
if not phaseStatus:
return False
return True
def switchFeedback(fftable = "off"):
"""
switchFeedback("on") or "off"
"""
if fftable not in ["on", "off"]:
raise RuntimeError("invalid feed forward table state: ('on'|'off')")
for dw in ap.getGroupMembers(["DW",], op="union"):
if "gap" not in dw.fields():
print "WARNING: no 'gap' field in {0}".format(dw.name)
continue
pv = dw.pv(field="gap", handle="setpoint")[0]
m = re.match(r"([^\{\}]+)\{(.+)\}", pv)
if not m:
print "WARNING: inconsistent naming '{0}'".format(pv)
pvffwd = "{0}{{{1}}}MPS:Lookup_.INPA".format(m.group(1), m.group(2))
pvffwd_pref = "{0}{{{1}-Mtr:Gap}}.RBV ".format(m.group(1), m.group(2))
pvffwd_val = {"on": pvffwd_pref + "CP NM",
"off": pvffwd_pref + "NPP N"}
print "set {0}='{1}'".format(pvffwd, pvffwd_val[fftable])
ap.caput(pvffwd, pvffwd_val[fftable])
# fast/slow co
# all ID feed forward
# weixing Bunch by Bunch
def initFile(ID, fieldList, parTable):
"""initilize file name with path, save parameter table to hdf5"""
fileName = ap.outputFileName("ID", ID.name+"_")
fid = h5py.File(fileName)
grp = fid.require_group(ID.name)
grp.attrs["__FORMAT__"] = 1
# setup parameters
subg = grp.require_group("parameters")
subg["scanTable"] = parTable #
subg["scanTable"].attrs["columns"] = fieldList
#for p in nameList:
# subg["scanTable"].attrs[p] = []
bkg = _params[ID.name]["background"]
# like one row of scanTable, same columns
subg["background"] = [bkg[fld] for fld in fieldList]
subg["background"].attrs["columns"] = fieldList
# timestamp ISO "2007-03-01 13:00:00"
subg["minCurrent"] = _params[ID.name]["Imin"]
subg["minCurrent"].attrs["unit"] = "mA"
subg["minLifetime"] = _params[ID.name]["Tmin"]
subg["minLifetime"].attrs["unit"] = "hr"
fid.close()
return fileName
def chooseBpmCor(ID, userBpm=False):
"""
choose bpm and corrector
"""
bpms = ap.getElements('BPM')
if userBpm:
bpms += ap.getElements('UBPM')
bpmfields = []
for bpm in bpms:
bpmflds.append([bpm,'x'])
bpmflds.append([bpm,'y'])
corfields = []
for i in range(len(ID.cch)):
corflds.append([ID,'cch'+'%i'%i])
return bpmFields, corFields
def saveToDB(fileName):
print "save to file (Guobao's DB)"
pass
def measBackground(ID, output, iiter):
"""measure the background and return saved group name"""
if not nsls2id.putBackground(ID):
print "Failed at setting {0} to background mode".format(ID)
return None
# create background subgroup with index
fid = h5py.File(output)
prefix = "iter_"
iterindex = max([int(g[len(prefix):]) for g in fid[ID.name].keys()
if g.startswith(prefix)] + [-1]) + 1
bkgGroup = "iter_{0:04d}".format(iterindex)
grp = fid[ID.name].create_group(bkgGroup)
orb0 = ap.getOrbit(spos=True)
grp["orbit"] = orb0
tau, I = ap.getLifetimeCurrent()
grp["lifetime"] = tau
grp["current"] = I
grp.attrs["iter"] = iiter
fid.close()
return bkgGroup
def virtKicks2FldInt(virtK1, virtK2, idLen, idKickOffset1, idKickOffset2, E_GeV):
"""
Calculate the 1st and 2nd field integrals of an insertion device (ID)
from the given upstream/downstream virtual kicks.
Parameters
----------
virtK1, virtK2 : float
Virtual kick values [rad] at the upsteam and downstream of the ID,
respectively.
idLen : float
Length of the ID [m].
idKickOffset1, idKickOffset2 : float
Position offset [m] of virtual kicks with respect to the undulator
extremeties. `idKickOffset1` == 0 means that the upstream virtual kick
is exactly located at the upstream entrance of the ID. If `idKickOffset1`
is a positive value, then the virtual kick is inside of the ID by the
amount `idKickOffset1`. If negative, the virtual kick is outside of the
ID by the absolute value of `idKickOffset1`. The same is true for the
downstream side.
E_GeV : float
Electron beam energy [GeV].
Returns
-------
I1 : float
First field integral [G*m].
I2 : float
Second field integral [G*(m^2)].
"""
Brho = getBrho(E_GeV) # magnetic rigidity [T*m]
common = Brho * 1e4
I1 = common * (virtK1 + virtK2) # [G*m]
I2 = common * ((idLen-idKickOffset1)*virtK1 + idKickOffset2*virtK2) # [G*(m^2)]
return I1, I2
# <codecell>
def fldInt2VirtKicks(I1, I2, idLen, idKickOffset1, idKickOffset2, E_GeV):
"""
Calculate upstream/downstream virtual kicks from the given 1st and 2nd field
integrals of an insertion device (ID).
Parameters
----------
I1 : float
First field integral [G*m].
I2 : float
Second field integral [G*(m^2)].
idLen : float
Length of the ID [m].
idKickOffset1, idKickOffset2 : float
Position offset [m] of virtual kicks with respect to the undulator
extremeties. `idKickOffset1` == 0 means that the upstream virtual kick
is exactly located at the upstream entrance of the ID. If `idKickOffset1`
is a positive value, then the virtual kick is inside of the ID by the
amount `idKickOffset1`. If negative, the virtual kick is outside of the
ID by the absolute value of `idKickOffset1`. The same is true for the
downstream side.
E_GeV : float
Electron beam energy [GeV].
Returns
-------
virtK1, virtK2 : float
Virtual kick values [rad] at the upsteam and downstream of the ID,
respectively.
"""
Brho = getBrho(E_GeV) # magnetic rigidity [T*m]
common = 1e-4 / Brho / (idLen-idKickOffset1-idKickOffset2)
virtK1 = common * (I2 - I1 * idKickOffset2) # [rad]
virtK2 = common * (I1 * (idLen-idKickOffset1) - I2) # [rad]
return virtK1, virtK2
def save1DFeedFowardTable(filepath, table, fmt='%.16e'):
"""
Save a valid 1-D Stepped Feedforward table (NSLS-II format) to a text file.
"""
np.savetxt(filepath, table, fmt=fmt, delimiter=', ', newline='\n')
def get1DFeedForwardTable(centers, half_widths, dI_array,
I0_array=None, fmt='%.16e'):
"""
Get a valid 1-D Stepped Feedforward table (NSLS-II format)
"""
if I0_array is None:
I_array = dI_array
else:
I_array = I0_array + dI_array
table = np.hstack((np.array(centers).reshape((-1,1)),
np.array(half_widths).reshape((-1,1)),
I_array))
return table
def getZeroed1DFeedForwardTable(parDict, nIDCor):
"""
Get a valid 1-D Stepped Feedforward table (NSLS-II format) with
all ID correctors being set to zero for all the entire range of
ID property specified in "parDict".
"""
try:
scanVectors = parDict['vectors']
bkgList = parDict['bkgTable'].flatten().tolist()
assert len(scanVectors) == len(bkgList) == 1
except:
print 'len(scanVectors) = {0:d}'.format(len(scanVectors))
print 'len(bkgList) = {0:d}'.format(len(bkgList))
print 'This function is only for 1D feedforward table.'
raise RuntimeError(('Lengths of "scanVectors" and "bkgList" must be 1.'))
array = scanVectors[0] + [bkgList[0]]
minVal, maxVal = np.min(array), np.max(array)
centers = [(minVal + maxVal) / 2.0]
half_widths = [(maxVal - minVal) / 2.0 * 1.01] # Extra margin of 1% added
dI_array = np.array([0.0]*nIDCor).reshape((1,-1))
return get1DFeedForwardTable(centers, half_widths, dI_array,
I0_array=None, fmt='%.16e')
def create1DFeedForwardTable(centers, half_widths, dI_array, I0_array=None):
"""
Create a valid 1-D Stepped Feedforward table (NSLS-II format)
"""
if I0_array is None:
I_array = dI_array
else:
I_array = I0_array + dI_array
table = np.hstack((np.array(centers).reshape((-1,1)),
np.array(half_widths).reshape((-1,1)),
I_array))
return table
def calc1DFeedForwardColumns(
ID_filepath, n_interp_pts=None, interp_step_size=None, step_size_unit=None,
cor_inds_ignored=None, bpm_inds_ignored=None, nsv=None):
"""
"""
# TODO: Make sure all the units are correct in the generated table
# Gap & interval are in microns => [um]
# Currents in ppm of 10 Amps => [10uA]
compIterInds = getCompletedIterIndexes(ID_filepath)
nCompletedIter = len(compIterInds)
f = h5py.File(ID_filepath, 'r')
ID_name = f.keys()[0]
grp = f[ID_name]
meas_state_1d_array = grp['parameters']['scanTable'].value
state_unitsymb = grp['parameters']['scanTable'].attrs['unit'] # TODO: need unit conversion
nIter, ndim = meas_state_1d_array.shape
if ndim != 1:
f.close()
raise NotImplementedError('Only 1-D scan has been implemented.')
if nCompletedIter != nIter:
print '# of completed scan states:', nCompletedIter
print '# of requested scan states:', nIter
f.close()
raise RuntimeError('You have not scanned all specified states.')
meas_state_1d_array = meas_state_1d_array.flatten()
state_min = np.min(meas_state_1d_array)
state_max = np.max(meas_state_1d_array)
if (n_interp_pts is not None) and (interp_step_size is not None):
f.close()
raise ValueError(('You can only specify either one of "n_interp_pts" '
'or "interp_step_size", not both.'))
elif n_interp_pts is not None:
interp_state_1d_array = np.linspace(state_min, state_max, n_interp_pts)
elif interp_step_size is not None:
interp_state_1d_array = np.arange(state_min, state_max, interp_step_size)
if interp_state_1d_array[-1] != state_max:
interp_state_1d_array = np.array(interp_state_1d_array.tolist()+
[state_max])
else:
interp_state_1d_array = meas_state_1d_array
M_list = [None]*nIter
diff_orb_list = [None]*nIter
for k in grp.keys():
if k.startswith('iter_'):
iIter = grp[k].attrs['iteration']
M_list[iIter] = grp[k]['orm']['m'].value
orb = grp[k]['orbit'].value
bkgGroup = grp[k].attrs['background']
orb0 = grp[bkgGroup]['orbit'].value[:,:-1] # Ignore s-pos column
diff_orb_list[iIter] = orb - orb0
f.close()
interp_state_1d_array = np.sort(interp_state_1d_array)
center_list = interp_state_1d_array.tolist()
half_width_list = (np.diff(interp_state_1d_array)/2.0).tolist()
half_width_list.append(center_list[-1]-center_list[-2]-half_width_list[-1])
dI_list = []
for M, diff_orb in zip(M_list, diff_orb_list):
TF = np.ones(diff_orb.shape)
if bpm_inds_ignored is not None:
for i in bpm_inds_ignored:
TF[i,:] = 0
TF = TF.astype(bool)
diff_orb_trunc = diff_orb[TF].reshape((-1,2))
# Reverse sign to get desired orbit change
dObs = (-1.0)*diff_orb_trunc.T.flatten().reshape((-1,1))
TF = np.ones(M.shape)
if cor_inds_ignored is not None:
for i in cor_inds_ignored:
TF[:,i] = 0
if bpm_inds_ignored is not None:
nBPM = M.shape[0]/2
try:
assert nBPM*2 == M.shape[0]
except:
raise ValueError('Number of rows for response matrix must be 2*nBPM.')
for i in bpm_inds_ignored:
TF[i ,:] = 0
TF[i+nBPM,:] = 0
TF = TF.astype(bool)
M_trunc = M[TF].reshape((dObs.size,-1))
U, sv, V = np.linalg.svd(M_trunc, full_matrices=0, compute_uv=1)
S_inv = np.linalg.inv(np.diag(sv))
if nsv is not None:
S_inv[nsv:, nsv:] = 0.0
dI = V.T.dot(S_inv.dot(U.T.dot(dObs))).flatten().tolist()
if cor_inds_ignored is not None:
for i in cor_inds_ignored:
# Set 0 Amp for unused correctors
dI.insert(i, 0.0)
dI_list.append(dI)
dI_array = np.array(dI_list)
nCor = dI_array.shape[1]
interp_dI_array = np.zeros((interp_state_1d_array.size, nCor))
for i in range(nCor):
interp_dI_array[:,i] = np.interp(
interp_state_1d_array, meas_state_1d_array, dI_array[:,i])
return {'centers': np.array(center_list),
'half_widths': np.array(half_width_list),
'raw_dIs': dI_array, 'interp_dIs': interp_dI_array}
#----------------------------------------------------------------------
def getCompletedIterIndexes(ID_filepath):
"""
"""
f = h5py.File(ID_filepath, 'r')
ID_name = f.keys()[0]
grp = f[ID_name]
completed_iter_indexes = []
for k in grp.keys():
if k.startswith('iter_') and grp[k].attrs.has_key('completed'):
completed_iter_indexes.append(grp[k].attrs['iteration'].value)
f.close()
if completed_iter_indexes != []:
if not np.all(np.diff(completed_iter_indexes) == 1):
raise RuntimeError(
'List of completed iteration indexes has some missing indexes.')
if np.min(completed_iter_indexes) != 0:
raise RuntimeError('List of completed iteration indexes does not start from 0.')
return completed_iter_indexes
if __name__ == '__main__':
ID_filepath = '/epics/data/aphla/SR/2014_09/ID/dw100g1c08u_2014_09_24_142644.hdf5'
d = calc1DFeedForwardColumns(ID_filepath, interp_step_size=1.0,
cor_inds_ignored=[2,3],
bpm_inds_ignored=None, nsv=None)
table = create1DFeedForwardTable(d['centers'], d['half_widths'],
d['interp_dIs'])
save1DFeedFowardTable('test_ff.txt', table, fmt='%.16e')
| 33.383798 | 105 | 0.584533 | 3,306 | 25,138 | 4.358742 | 0.187235 | 0.009577 | 0.016655 | 0.01499 | 0.412353 | 0.38508 | 0.368772 | 0.352186 | 0.329216 | 0.316794 | 0 | 0.034895 | 0.278383 | 25,138 | 752 | 106 | 33.428191 | 0.759482 | 0.048055 | 0 | 0.378685 | 0 | 0.004535 | 0.133626 | 0.007936 | 0 | 0 | 0 | 0.003989 | 0.004535 | 0 | null | null | 0.002268 | 0.013605 | null | null | 0.038549 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8b4ca0aa758fa6b9fa0e302dfe53d2794b1c6f8 | 2,107 | py | Python | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 11 | 2019-11-20T02:05:30.000Z | 2021-08-22T13:15:14.000Z | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 12 | 2019-11-20T02:07:54.000Z | 2019-12-11T14:57:59.000Z | lab/migrations/0003_auto_20191202_1534.py | jtdub/prod2lab | 054c922f731ad377b83714194ef806325f79336a | [
"MIT"
] | 2 | 2019-11-20T02:05:33.000Z | 2019-11-28T01:29:20.000Z | # Generated by Django 2.2.8 on 2019-12-02 15:34
from django.db import migrations, models
import django.db.models.deletion
from lab.models import OperatingSystem
def initial_os_data(app, schema_editor):
data = [
{"os": "ios", "os_type": "cisco_ios", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "iosxr", "os_type": "cisco_xr", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "iosxe", "os_type": "cisco_xe", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "nxos", "os_type": "cisco_nxos", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "eos", "os_type": "arista_eos", "term_length": "terminal length 0", "fetch_config": "show running-config"},
{"os": "edgeos", "os_type": "vyos", "term_length": "terminal length 0", "fetch_config": "show configuration"},
]
for item in data:
OperatingSystem.objects.create(
name=item['os'],
netmiko_type=item['os_type'],
terminal_length_cmd=item['term_length'],
fetch_config_cmd=item['fetch_config']
)
class Migration(migrations.Migration):
dependencies = [
('lab', '0002_auto_20191120_1951'),
]
operations = [
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('netmiko_type', models.CharField(max_length=255)),
('terminal_length_cmd', models.CharField(max_length=255)),
('fetch_config_cmd', models.CharField(max_length=255)),
],
),
migrations.AlterField(
model_name='device',
name='os_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='lab.OperatingSystem'),
),
migrations.RunPython(initial_os_data),
]
| 41.313725 | 123 | 0.613669 | 242 | 2,107 | 5.119835 | 0.355372 | 0.079903 | 0.087167 | 0.116223 | 0.346247 | 0.302663 | 0.254237 | 0.254237 | 0.221953 | 0.221953 | 0 | 0.030247 | 0.231134 | 2,107 | 50 | 124 | 42.14 | 0.734568 | 0.021357 | 0 | 0.04878 | 1 | 0 | 0.31165 | 0.011165 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.073171 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8bbe7e825f46ffee0807079debe846ac5d0afca | 1,374 | py | Python | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | 3 | 2020-02-02T11:11:44.000Z | 2021-11-17T22:21:18.000Z | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | null | null | null | examples/mini_site/steps/stale_steps.py | xrg/behave_manners | 93cb570e04af870f3401048d26dd7f0e8350759e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: UTF-8 -*-
from __future__ import print_function
from behave import given, when, then, step
from behave_manners.pagelems import DOMScope
from behave_manners.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
@when(u'I click to have the dropdown visible')
def click_dropdown1(context):
if not context.cur_element['input'].owns:
context.cur_element.click()
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
print("Owns: %s" % context.cur_element['input'].owns)
context.cur_overlays = context.cur_element['overlays']
@when(u'I click again to hide the dropdown')
def click_hide_dropdown(context):
input_elem = context.cur_element['input']
if input_elem.owns:
input_elem.send_keys(Keys.ESCAPE)
assert not input_elem.owns
@when(u'I click again to show the dropdown')
def click_dropdown2(context):
context.cur_element['input'].send_keys('o')
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
assert context.cur_element['input'].owns, "Did not present overlay"
@then(u'the previous dropdown component resolves')
def check_resolve_dropdown1(context):
print("Cur dropdown %s" % context.cur_element['input'].owns)
print("Cur overlays %s" % context.cur_overlays.is_displayed())
raise AssertionError
| 33.512195 | 87 | 0.749636 | 195 | 1,374 | 5.071795 | 0.353846 | 0.141557 | 0.206269 | 0.133468 | 0.289181 | 0.262892 | 0.19818 | 0.125379 | 0.125379 | 0.125379 | 0 | 0.003373 | 0.136827 | 1,374 | 40 | 88 | 34.35 | 0.830523 | 0.015284 | 0 | 0.071429 | 0 | 0 | 0.188148 | 0 | 0 | 0 | 0 | 0 | 0.107143 | 1 | 0.142857 | false | 0 | 0.178571 | 0 | 0.321429 | 0.142857 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8c7974a6483483179cdfc40e57d4d46564982cf | 1,995 | py | Python | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | tests/test_utils.py | initOS/dob-lib | a07ea11fb40d7bdc1714b96da52fbc89ed37fb7b | [
"Apache-2.0"
] | null | null | null | # © 2021 Florian Kantelberg (initOS GmbH)
# License Apache-2.0 (http://www.apache.org/licenses/).
import argparse
from unittest.mock import patch
import pytest
from doblib import utils
def test_merge():
assert utils.merge([1, 2], [3, 4]) == [1, 2, 3, 4]
assert utils.merge({1, 2}, {3, 4}) == {1, 2, 3, 4}
assert utils.merge({3, 4}, {1, 2}) == {1, 2, 3, 4}
assert utils.merge([1, 2], {3, 4}) == {3, 4}
assert utils.merge({1: 2}, {1: 3}) == {1: 3}
assert utils.merge({1: 2}, {2: 3}) == {1: 2, 2: 3}
assert utils.merge({1: {2: 3}}, {1: {3: 4}}) == {1: {2: 3, 3: 4}}
assert utils.merge({1: {2: 3}}, {1: {3: 4}}, replace={1}) == {1: {3: 4}}
def test_keyboard_interrupt():
with pytest.raises(KeyboardInterrupt):
utils.raise_keyboard_interrupt()
def test_version():
ver = utils.Version("1.2.3")
assert ver == "1.2.3"
assert utils.Version(ver) == ver
assert utils.Version() == ()
assert utils.Version(1) == (1,)
assert utils.Version((1, 2, 3)) == (1, 2, 3)
assert utils.Version(None) == ()
ver = utils.Version("1.2.3")
assert str(ver) == "1.2.3"
assert ver == (1, 2, 3)
assert ver < 2
assert ver > "1.2"
assert ver <= (1, 2, 3)
assert ver >= (1, 2, 2)
def test_default_parser():
parser = utils.default_parser("test")
assert isinstance(parser, argparse.ArgumentParser)
@patch("os.path.isfile")
def test_config_file(mock):
found = []
mock.side_effect = lambda file: file in found
assert utils.get_config_file() is None
found = ["odoo.local.yaml"]
assert utils.get_config_file() == "odoo.local.yaml"
found = ["odoo.project.yaml"]
assert utils.get_config_file() == "odoo.project.yaml"
found = ["odoo.project.yaml", "odoo.local.yaml"]
assert utils.get_config_file() == "odoo.local.yaml"
def test_call():
output = utils.call("ls")
assert isinstance(output, str) and output
output = utils.call("ls", pipe=False)
assert output == 0
| 27.708333 | 76 | 0.595489 | 307 | 1,995 | 3.80456 | 0.218241 | 0.039384 | 0.043664 | 0.101884 | 0.464897 | 0.384418 | 0.339041 | 0.253425 | 0.234589 | 0.160959 | 0 | 0.066074 | 0.211028 | 1,995 | 71 | 77 | 28.098592 | 0.675349 | 0.046617 | 0 | 0.08 | 0 | 0 | 0.082149 | 0 | 0 | 0 | 0 | 0 | 0.54 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8caad8563cecc8db84796425abde40bb649818d | 385 | py | Python | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | 6 | 2021-07-26T14:21:25.000Z | 2021-07-26T14:32:01.000Z | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | 2 | 2021-12-10T10:25:19.000Z | 2021-12-10T10:27:15.000Z | Timbuchalka/Section-4/conditions.py | Advik-B/Learn-Python | 66ac57259764e8f2c3c6513a8de6c106800d8abe | [
"MIT"
] | null | null | null | age = int(input("How old are you ?"))
#if age >= 16 and age <= 65:
#if 16 <= age <= 65:
if age in range(16,66):
print ("Have a good day at work.")
elif age > 100 or age <= 0:
print ("Nice Try. This program is not dumb.")
endkey = input ("Press enter to exit")
else:
print (f"Enjoy your free time, you need to work for us after {65 - age} years.")
print ("-"*80)
| 20.263158 | 84 | 0.592208 | 69 | 385 | 3.304348 | 0.710145 | 0.04386 | 0.061404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.069444 | 0.251948 | 385 | 18 | 85 | 21.388889 | 0.722222 | 0.119481 | 0 | 0 | 0 | 0 | 0.489614 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.444444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
a8cfa6b65da313be457d7b1e85c1b549c3d118ce | 806 | py | Python | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | null | null | null | tests/test_reducer.py | nclarey/pyg-base | a7b90ea2ad4d740d8e7f8c4a7c9d341d36373862 | [
"MIT"
] | 1 | 2022-01-03T21:56:14.000Z | 2022-01-03T21:56:14.000Z | from pyg_base import reducer, reducing, dictable
import pytest
from operator import add, mul
from functools import reduce
def test_reducer():
assert reducer(add, [1,2,3,4]) == 10
assert reducer(mul, [1,2,3,4]) == 24
assert reducer(add, [1]) == 1
assert reducer(add, []) is None
with pytest.raises(TypeError):
reduce(add, [])
def test_reducing():
from operator import mul
assert reducing(mul)([1,2,3,4]) == 24
assert reducing(mul)(6,4) == 24
assert reducing('__add__')([1,2,3,4]) == 10
assert reducing('__add__')(6,4) == 10
d = dictable(a = [1,2,3,5,4])
assert reducing('inc')(d, dict(a=1))
f = lambda a, b, c: a+b+c
assert reducing(f)([1,2,3,4,5], c = 0) == 15
assert reducing(f)([1,2,3,4,5], c = 1) == 19
| 26 | 48 | 0.580645 | 133 | 806 | 3.43609 | 0.300752 | 0.030635 | 0.045952 | 0.052516 | 0.223195 | 0.223195 | 0.223195 | 0.091904 | 0.091904 | 0 | 0 | 0.091503 | 0.240695 | 806 | 30 | 49 | 26.866667 | 0.655229 | 0 | 0 | 0 | 0 | 0 | 0.021092 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.090909 | false | 0 | 0.227273 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8d39e7bc43d7332ddc41fbd99b4d1baa9f1a728 | 3,398 | py | Python | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | 20 | 2019-03-24T19:27:59.000Z | 2021-06-11T18:03:02.000Z | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | null | null | null | src/lh/service_directives.py | Plazmaz/LiquidHoney | eb983a3223b01e19a360132f9661d4c68cd3ae8c | [
"MIT"
] | 1 | 2020-04-10T17:12:37.000Z | 2020-04-10T17:12:37.000Z | from abc import ABC
class ProbeConfig(object):
def __init__(self):
self.directives = {}
def add_directive(self, directive):
name = directive.keyword
if name not in self.directives:
self.directives[name] = []
self.directives[name].append(directive)
def get_directives(self, name):
return self.directives.get(name)
def has_directive(self, name):
return name in self.directives
def get_directive(self, name):
return self.directives.get(name)[0]
def __str__(self):
return ' '.join([s for s in self.directives])
class Directive(ABC):
"""
Represents a directive type.
See https://nmap.org/book/vscan-fileformat.html
"""
def __init__(self, keyword, param_count, raw):
self.keyword = keyword
self.raw = raw
self.parameters = raw.split(" ", param_count)[1:]
def validate(self):
pass
class Exclude(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('exclude', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class Probe(Directive):
"""
This directive describes what nmap will send to fingerprint this service
"""
def __init__(self, raw):
super().__init__('probe', 3, raw)
self.protocol = self.parameters[0]
self.probename = self.parameters[1]
self.probestring = self.parameters[2]
def validate(self):
assert self.protocol == 'TCP' or self.protocol == 'UDP', \
'Invalid protocol {} found, expected "UDP" or "TCP"'.format(self.protocol)
class Match(Directive):
"""
This directive describes the response nmap is expecting to recieve for a service
"""
def __init__(self, raw):
super().__init__('match', 2, raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.pattern = None
self.flags = []
self.version_info = []
class SoftMatch(Match):
"""
Similar to match, but after a softmap, nmap will only send probes matching the given service.
This is intended to eventually lead to a 'hard' match that will provide more version info
"""
def __init__(self, raw):
super().__init__(raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.keyword = 'softmatch'
class Ports(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('ports', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class SslPorts(Ports):
"""
Same as Ports, but wrapped in ssl
"""
def __init__(self, raw):
super().__init__(raw)
self.keyword = 'sslports'
class Rarity(Directive):
"""
Determines how frequently a probe returns useful results. The higher the number, the rarer the probe is
https://nmap.org/book/vscan-technique.html#vscan-selection-and-rarity
"""
def __init__(self, raw):
super().__init__('rarity', 1, raw)
self.rarity = int(self.parameters[0])
| 26.546875 | 107 | 0.629488 | 430 | 3,398 | 4.795349 | 0.306977 | 0.074685 | 0.048012 | 0.047527 | 0.360815 | 0.340446 | 0.329292 | 0.266246 | 0.240543 | 0.240543 | 0 | 0.006759 | 0.259859 | 3,398 | 127 | 108 | 26.755906 | 0.813121 | 0.272219 | 0 | 0.269841 | 0 | 0 | 0.044187 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 1 | 0.253968 | false | 0.015873 | 0.015873 | 0.063492 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a8d51fd115f735ca8b9bacde53faaf9aede7e370 | 4,699 | py | Python | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | gaia_project/communication_layer.py | yogurt-company/gaia_ai | 1885059e99a96f45e6cc082b87c189829c4eaed9 | [
"MIT"
] | null | null | null | from traits.api import (HasPrivateTraits, Instance, List, Dict)
from .player import Player
from .board import GameBoard
from .tech_board import TechBoard
from gaia_project.faction_board.player_panel import PlayerPanel
from .layout import Layout
from .constants import BASIC_4P_SETUP
import pygame
import sys
class CommunicationLayer(HasPrivateTraits):
pass
class LocalCommunicationLayer(CommunicationLayer):
players = List(Instance(Player))
board = Instance(GameBoard)
tech_board = Instance(TechBoard)
player_panels = Dict(Instance(Player), Instance(PlayerPanel))
layout = Instance(Layout)
def __init__(self, players=None, cfg=BASIC_4P_SETUP, *args, **kwargs):
super().__init__(*args, **kwargs)
if players is not None:
self.players = players
else:
self.players = [Player('Hadsch Hallas', 'Freddy'),
Player('Xenos', 'Jebediah'),
Player('Taklons', 'Vivian')]
self.layout = Layout(self.players, cfg)
self.board = self.layout.board
self.tech_board = self.layout.tech_board
pp_w, pp_h = self.layout.player_panel_coords()
self.player_panels = {
player : (
PlayerPanel(pp_w, pp_h, player)
if player is not self.players[0] else
self.layout.player_panel) for player in self.players}
pygame.init()
pygame.event.set_allowed(None)
pygame.event.set_allowed((pygame.QUIT, pygame.MOUSEBUTTONUP,
pygame.VIDEORESIZE))
def make_move(self, player, game_state):
# set the layout to have the current player panel showing
if player.intelligence == 'human':
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.hide_choice()
self.update_gfx()
move = self.process_events()
return move
elif player.intelligence == 'automa':
move = player.automa.make_move(player, game_state)
elif player.intelligence == 'ai':
raise NotImplementedError
else:
raise NotImplemented
def make_choice(self, player, choice, move):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.show_choice(choice, move.description)
self.update_gfx()
choice = self.process_events()
print('gottud chois')
return choice
def inform_illegal_choice(self, player, explanation):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.display_error(explanation)
self.update_gfx()
self.process_events()
def process_events(self):
while True:
#we are now accepting mouse events
pygame.event.set_allowed(pygame.MOUSEBUTTONUP)
for event in pygame.event.get():
#this event does not need to be executed in order
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#this event does not need to be executed in order
elif event.type == pygame.VIDEORESIZE:
self.layout.resize(event.w, event.h)
elif event.type == pygame.MOUSEBUTTONUP:
#disallow mouse events until this is handled
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
origin_surf = self.layout.determine_origin(event.pos)
if origin_surf is None:
continue
event = self.layout.pass_event(origin_surf, event.pos)
if event is not None:
return event
def add_building(self, player, coords, building_type, lantid_share=False):
x, y = coords
self.board.add_building(x, y, player.color, building_type,
lantid_share=lantid_share)
def add_orbital(self, player, coords, orbital_type):
x, y = coords
self.board.add_orbital(x, y, player.color, orbital_type)
def techup(self, player, tech_track):
self.tech_board.techup(player.color, tech_track)
def update_available_buildings(self, player):
pass
def update_bonus_tiles(self, tiles):
for player in self.player_panels:
panel = self.player_panels[player]
panel.update_bonus_tiles(tiles)
def update_turn_order(self, next_order):
pass
def update_advanced_tech_tiles(self, tiles):
pass
def update_terraforming_fed(self, fed):
pass
def update_available_feds(self, feds):
pass
def update_available_power_actions(self, power_actions):
self.tech_board.update_power_actions(power_actions)
def update_available_special_actions(self, player, spec_actions):
panel = self.player_panels[player]
panel.update_special_actions( spec_actions[player] )
def update_misc_info(self, score):
pass
def update_gfx(self):
self.layout.paint()
| 27.319767 | 76 | 0.685678 | 594 | 4,699 | 5.232323 | 0.255892 | 0.048263 | 0.041184 | 0.054054 | 0.14157 | 0.124196 | 0.111326 | 0.086873 | 0.086873 | 0.086873 | 0 | 0.000822 | 0.223239 | 4,699 | 171 | 77 | 27.479532 | 0.850685 | 0.048308 | 0 | 0.169643 | 0 | 0 | 0.015674 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160714 | false | 0.071429 | 0.080357 | 0 | 0.330357 | 0.008929 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a8d55f37a7ad9934a5f3c141465620693ff5ff0b | 2,710 | py | Python | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 104 | 2020-03-30T09:40:00.000Z | 2022-03-06T22:34:25.000Z | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 25 | 2020-06-12T01:36:35.000Z | 2022-02-19T07:30:44.000Z | dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 3833d56e79dcd7529c3e8b3a3a8a782d513d9b12 | [
"MIT"
] | 10 | 2020-04-02T23:43:56.000Z | 2021-05-14T13:47:01.000Z | """
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.security.linking.accountlinker import BasicAccountLinkerService
from programytest.client import TestClient
class AccountLinkerTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(AccountLinkerTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
class AccountLinkerAIMLTests(unittest.TestCase):
def setUp(self):
config = SQLStorageConfiguration()
storage_engine = SQLStorageEngine(config)
storage_engine.initialise()
client = AccountLinkerTestClient()
self.context = client.create_client_context("TESTUSER")
self.context.brain._security._account_linker = BasicAccountLinkerService(storage_engine)
def test_account_link_happy_path(self):
response = self.context.bot.ask_question(self.context, "LINK PRIMARY ACCOUNT USER1 CONSOLE PASSWORD123")
self.assertIsNotNone(response)
self.assertTrue(response.startswith('Your generated key is'))
words = response.split(" ")
self.assertTrue(5, len(words))
generated_key = words[4][:-1]
command = "LINK SECONDARY ACCOUNT USER1 USER2 FACEBOOK PASSWORD123 %s" % generated_key
response = self.context.bot.ask_question(self.context, command)
self.assertIsNotNone(response)
self.assertEqual('Your accounts are now linked.', response)
| 44.42623 | 126 | 0.761255 | 341 | 2,710 | 5.950147 | 0.501466 | 0.043371 | 0.012814 | 0.024643 | 0.070971 | 0.043371 | 0.043371 | 0.043371 | 0 | 0 | 0 | 0.007121 | 0.170849 | 2,710 | 60 | 127 | 45.166667 | 0.895861 | 0.391882 | 0 | 0.0625 | 0 | 0 | 0.09939 | 0 | 0 | 0 | 0 | 0 | 0.15625 | 1 | 0.125 | false | 0.0625 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a8d7f2fc85888b5e64602a155d1dd889102580a5 | 3,479 | py | Python | density_estimation.py | agilevaluechain/100-page-ml | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | 3 | 2019-02-12T16:46:23.000Z | 2020-12-02T15:39:38.000Z | density_estimation.py | a272573094/theMLbook | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | null | null | null | density_estimation.py | a272573094/theMLbook | 9236ec87b0cf563e0998d723be19e58155003a9d | [
"MIT"
] | 2 | 2019-07-22T15:28:58.000Z | 2019-10-14T16:07:51.000Z | import numpy as np
import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import math
from sklearn.neighbors import KernelDensity
import scipy.integrate as integrate
from sklearn.kernel_ridge import KernelRidge
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
matplotlib.rcParams.update({'font.size': 18})
mu1, sigma1 = 3.0, 1.0
mu2, sigma2 = 8.0, 1.5
def sample_points():
s1 = np.random.normal(mu1, sigma1, 50)
s2 = np.random.normal(mu2, sigma2, 50)
return list(s1) + list(s2)
# generate points used to plot
x_plot = np.linspace(0, 12, 100)
# generate points and keep a subset of them
x = sample_points()
colors = ['red', 'blue', 'orange', 'green']
lw = 2
def kernel(x1, x2, bi = 2.0):
z = (x1 - x2) / bi
return (1.0/math.sqrt(2.0 * 3.14)) * math.exp((-1.0/2.0)*(z**2))
def fb(xx, data, bi):
return (1/(len(data)*bi)) * sum([kernel(xx, xi, bi) for xi in data])
def fbi(i, data, bi):
data_minus_i = []
for ii in range(len(data)):
if i != ii:
data_minus_i.append(data[ii])
return (1/(len(data_minus_i)*bi)) * sum([kernel(data[i], xi, bi) for xi in data_minus_i])
def sum_pdf(x):
result = []
for i in range(len(x)):
result.append((sp.stats.norm.pdf(x, mu1, sigma1)[i] + sp.stats.norm.pdf(x, mu2, sigma2)[i])/2.0)
#result.append(sp.stats.norm.pdf(x, mu1, sigma1)[i])
return result
b = np.linspace(0.01, 3.0, 100)
score = []
for bi in b:
def fb2(xx):
return fb(xx, x, bi)**2
s = integrate.quad(fb2, -np.inf, np.inf)[0] - 2.0*np.mean([fbi(i, x, bi) for i in range(len(x))])
score.append(s)
plt.figure(1)
plt.plot(b,score)
plt.xlabel("$b$")
plt.ylabel("$l$")
plt.tight_layout()
plt.xticks(np.arange(0, 3.5, 0.5))
#plt.show()
fig1 = plt.gcf()
fig1.subplots_adjust(top = 0.98, bottom = 0.1, right = 0.98, left = 0.08, hspace = 0, wspace = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.eps', format='eps', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.pdf', format='pdf', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-loss.png', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
minb = [bi for bi, s in zip(b, score) if s == min(score)][0]
print minb
for count, degree in enumerate([round(minb, 2)] + [0.2, 2.0]):
plt.figure(count+2)
axes = plt.gca()
axes.set_xlim([0,12])
axes.set_ylim([0,0.3])
plt.xlabel("$x$")
plt.ylabel("pdf")
plt.scatter(x, [0.005] * len(x), color='navy', s=30, marker=2, label="training examples")
plt.plot(x_plot, [fb(xp ,x, degree) for xp in x_plot], color=colors[count], linewidth=lw, label="$\\hat{f}_b$, $b = " + str(degree) + "$")
plt.plot(x_plot,sum_pdf(x_plot), label="true pdf")
plt.legend(loc='upper right')
plt.tight_layout()
fig1 = plt.gcf()
fig1.subplots_adjust(top = 0.98, bottom = 0.1, right = 0.98, left = 0.08, hspace = 0, wspace = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.eps', format='eps', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.pdf', format='pdf', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
fig1.savefig('../../Illustrations/density-estimation-' + str(count) + '.png', dpi=1000, bbox_inches = 'tight', pad_inches = 0)
plt.show()
| 33.451923 | 144 | 0.635527 | 579 | 3,479 | 3.75475 | 0.288428 | 0.00644 | 0.033119 | 0.068997 | 0.376725 | 0.369825 | 0.342226 | 0.340386 | 0.336707 | 0.304508 | 0 | 0.057666 | 0.167577 | 3,479 | 103 | 145 | 33.776699 | 0.693025 | 0.037655 | 0 | 0.08 | 0 | 0 | 0.135548 | 0.077199 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.106667 | null | null | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7633bcef8f558a190b0edf5806df25a3b35e668d | 1,188 | py | Python | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | 2 | 2018-03-07T09:43:07.000Z | 2018-03-11T04:50:41.000Z | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | 13 | 2017-12-28T02:44:09.000Z | 2020-06-05T21:13:13.000Z | config/wsgi.py | poblouin/budgetme-rest-api | 74d9237bc7b0a118255a659029637c5ed1a8b7a1 | [
"MIT"
] | null | null | null | """
WSGI config for budgetme project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
# os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 38.322581 | 78 | 0.795455 | 168 | 1,188 | 5.529762 | 0.517857 | 0.043057 | 0.086114 | 0.083961 | 0.131324 | 0.094726 | 0 | 0 | 0 | 0 | 0 | 0.002879 | 0.122896 | 1,188 | 30 | 79 | 39.6 | 0.888676 | 0.734007 | 0 | 0 | 0 | 0 | 0.161074 | 0.161074 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
7635bfe6eba52bcc67cdc83102f247a92f82a569 | 1,276 | py | Python | sa/profiles/NAG/SNR/profile.py | xUndero/noc | 9fb34627721149fcf7064860bd63887e38849131 | [
"BSD-3-Clause"
] | 1 | 2019-09-20T09:36:48.000Z | 2019-09-20T09:36:48.000Z | sa/profiles/NAG/SNR/profile.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | sa/profiles/NAG/SNR/profile.py | ewwwcha/noc | aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Vendor: NAG
# OS: SNR
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.profile.base import BaseProfile
class Profile(BaseProfile):
name = "NAG.SNR"
pattern_more = [
(r"^ --More-- ", "\n"),
(r"^Confirm to overwrite current startup-config configuration \[Y/N\]:", "y\n"),
]
username_submit = "\r"
password_submit = "\r"
command_submit = "\r"
command_disable_pager = "terminal length 200"
command_exit = "exit"
config_tokenizer = "indent"
config_tokenizer_settings = {"line_comment": "!"}
INTERFACE_TYPES = {
"Ethe": "physical", # Ethernet
"Vlan": "SVI", # Vlan
"Port": "aggregated", # Port-Channel
"Vsf-": "aggregated", # Vsf-Port
"vpls": "unknown", # vpls_dev
"l2ov": "tunnel", # l2overgre
}
@classmethod
def get_interface_type(cls, name):
if name == "Ethernet0":
return "management"
return cls.INTERFACE_TYPES.get(name[:4])
| 30.380952 | 88 | 0.487461 | 118 | 1,276 | 5.135593 | 0.686441 | 0.034653 | 0.046205 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 0.222571 | 1,276 | 41 | 89 | 31.121951 | 0.594758 | 0.302508 | 0 | 0 | 0 | 0 | 0.257437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0.037037 | 0.037037 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7636eeaae62c9c102a883b907b773aecc04889f1 | 754 | py | Python | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | 6 | 2020-12-25T20:46:51.000Z | 2022-01-27T20:52:13.000Z | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | 3 | 2021-01-26T12:20:14.000Z | 2022-01-27T20:45:28.000Z | src/common/views.py | danpercic86/e-notary | ba15a9a80e2091593fb088feacfaf9574c816d6e | [
"Apache-2.0"
] | null | null | null | from django.db.models import QuerySet
from drf_spectacular.utils import extend_schema
from rest_framework import status
from rest_framework.viewsets import ModelViewSet
from common.models import Example
from common.serializers import ExampleSerializer
from common.utils import create_swagger_info
@create_swagger_info(
extend_schema(
operation_id="Operation id",
description="Example description",
summary="Example operation summary",
auth=[],
request=ExampleSerializer(),
responses={status.HTTP_201_CREATED: ExampleSerializer()},
tags=["Common"],
)
)
class ExampleViewSet(ModelViewSet):
serializer_class = ExampleSerializer
queryset: QuerySet[Example] = Example.published.all()
| 30.16 | 65 | 0.753316 | 80 | 754 | 6.9375 | 0.4875 | 0.054054 | 0.061261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004792 | 0.169761 | 754 | 24 | 66 | 31.416667 | 0.881789 | 0 | 0 | 0 | 0 | 0 | 0.082228 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
763e6130c882fc4e05156ff0a6f93880ac497dc1 | 2,652 | py | Python | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | applications/terms_of_service/views.py | awwong1/apollo | 5571b5f222265bec3eed45b21e862636ccdc9a97 | [
"MIT"
] | null | null | null | from apollo.viewmixins import LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin
from applications.terms_of_service.models import TermsOfService
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
class TermsOfServiceViewList(LoginRequiredMixin, ListView):
context_object_name = "termsofservices"
model = TermsOfService
template_name = "terms_of_service/termsofservice_list.html"
class TermsOfServiceViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'termsofservice'
model = TermsOfService
template_name = "terms_of_service/termsofservice_detail.html"
class TermsOfServiceViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'termsofservice'
model = TermsOfService
success_message = "%(title)s was created successfully!"
template_name = "terms_of_service/termsofservice_form.html"
activity_verb = 'created terms of service'
def get_success_url(self):
return reverse_lazy('termsofservice_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
return context
class TermsOfServiceViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, UpdateView):
context_object_name = 'termsofservice'
model = TermsOfService
success_message = "%(title)s was updated successfully!"
template_name = "terms_of_service/termsofservice_form.html"
activity_verb = 'updated terms of service'
def get_success_url(self):
return reverse_lazy('termsofservice_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
return context
class TermsOfServiceViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'termsofservice'
model = TermsOfService
success_url = reverse_lazy('termsofservice_list')
template_name = "terms_of_service/termsofservice_form.html"
target_object_valid = False
activity_verb = 'deleted terms of service'
def get_context_data(self, **kwargs):
context = super(TermsOfServiceViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
return context | 42.095238 | 123 | 0.77187 | 267 | 2,652 | 7.430712 | 0.269663 | 0.031754 | 0.063508 | 0.047883 | 0.487903 | 0.480847 | 0.465222 | 0.380544 | 0.277218 | 0.277218 | 0 | 0 | 0.146682 | 2,652 | 63 | 124 | 42.095238 | 0.876712 | 0 | 0 | 0.458333 | 0 | 0 | 0.197889 | 0.093856 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104167 | false | 0 | 0.104167 | 0.041667 | 0.875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
7644bc4bec765da9c73ffce65a56e216576e72d0 | 812 | py | Python | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | spacy-textdescriptives/subsetters.py | HLasse/spacy-textdescriptives | c079c6617ef266b54f28c51e619d2429a5dafb83 | [
"Apache-2.0"
] | null | null | null | """Helpers to subset an extracted dataframe"""
readability_cols = [
"flesch_reading_ease",
"flesch_kincaid_grade",
"smog",
"gunning_fog",
"automated_readability_index",
"coleman_liau_index",
"lix",
"rix",
]
dependency_cols = [
"dependency_distance_mean",
"dependency_distance_std",
"prop_adjacent_dependency_relation_mean",
"prop_adjacent_dependency_relation_std",
]
descriptive_stats_cols = [
"token_length_mean",
"token_length_median",
"token_length_std",
"sentence_length_mean",
"sentence_length_median",
"sentence_length_std",
"syllables_per_token_mean",
"syllables_per_token_median",
"syllables_per_token_std",
"n_tokens",
"n_unique_tokens",
"percent_unique_tokens",
"n_sentences",
"n_characters",
]
| 21.945946 | 46 | 0.699507 | 88 | 812 | 5.852273 | 0.488636 | 0.064078 | 0.099029 | 0.116505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.185961 | 812 | 36 | 47 | 22.555556 | 0.779123 | 0.049261 | 0 | 0 | 0 | 0 | 0.626632 | 0.345953 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
764a1991418da83a2826386345ee15787a0bf609 | 759 | py | Python | notification/admin.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 1 | 2020-04-28T09:10:53.000Z | 2020-04-28T09:10:53.000Z | notification/admin.py | horacexd/clist | 9759dfea97b86514bec9825d2430abc36decacf0 | [
"Apache-2.0"
] | 25 | 2021-03-28T15:20:02.000Z | 2021-03-28T16:02:48.000Z | notification/admin.py | VadVergasov/clist | 4afcdfe88250d224043b28efa511749347cec71c | [
"Apache-2.0"
] | null | null | null | from pyclist.admin import BaseModelAdmin, admin_register
from notification.models import Notification, Task
@admin_register(Notification)
class NotificationAdmin(BaseModelAdmin):
list_display = ['coder', 'method', 'before', 'period', 'last_time', 'modified']
list_filter = ['method']
search_fields = ['coder__user__username', 'method', 'period']
def get_readonly_fields(self, request, obj=None):
return super().get_readonly_fields(request, obj)
@admin_register(Task)
class TaskAdmin(BaseModelAdmin):
list_display = ['notification', 'created', 'modified', 'is_sent']
list_filter = ['notification__method', 'is_sent']
search_fields = ['notification__coder__user__username', 'notification__method', 'subject', 'message']
| 37.95 | 105 | 0.741765 | 82 | 759 | 6.5 | 0.487805 | 0.073171 | 0.093809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126482 | 759 | 19 | 106 | 39.947368 | 0.803922 | 0 | 0 | 0 | 0 | 0 | 0.275362 | 0.073781 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0.071429 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
76501c0e0a4d032fa02c49003cd7b75a3d3f4eaf | 879 | py | Python | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | main.py | Elvira521feng/InfoNews | 5db22738446b1026db6479b91dc42c3aeba17060 | [
"MIT"
] | null | null | null | from flask import current_app
from flask_script import Manager
from flask_migrate import MigrateCommand
from info import create_app
# 创建应用
app = create_app("dev")
# 创建管理器
mgr = Manager(app)
# 添加迁移命令
mgr.add_command("mc", MigrateCommand)
# 生成超级管理员命令
@mgr.option("-u", dest="username")
@mgr.option("-p", dest="password")
def create_superuser(username, password):
if not all([username, password]):
print("账号/密码不完整")
return
from info.models import User
from info import db
user = User()
user.mobile = username
user.password = password
user.nick_name = username
user.is_admin = True
try:
db.session.add(user)
db.session.commit()
except BaseException as e:
current_app.logger.error(e)
db.session.rollback()
print("生成失败")
print("生成管理员成功")
if __name__ == '__main__':
mgr.run() | 21.439024 | 41 | 0.664391 | 114 | 879 | 4.964912 | 0.508772 | 0.047703 | 0.04947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.220705 | 879 | 41 | 42 | 21.439024 | 0.826277 | 0.030717 | 0 | 0 | 0 | 0 | 0.061321 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.133333 | 0.2 | 0 | 0.266667 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
7653939722f793ed976839ee4ae8d6af71343c7a | 810 | py | Python | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | fppv/cli.py | kdheepak/fppv | 5adfb5a6bc62ffcd56faa888ddfe173ae185656c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
"""cli module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import traceback
import importlib
from . import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('modulename')
@click.option('--debug', default=False, help='')
@click.version_option(version.__version__, '--version')
def cli(**kwargs):
modulename = kwargs['modulename']
module = importlib.import_module(modulename)
try:
version = module.__version__
except:
version = ''
click.secho("{} == ".format(modulename), nl=False)
click.secho("{}".format(version), fg='green', bold=True)
if __name__ == '__main__':
cli()
| 23.142857 | 60 | 0.706173 | 93 | 810 | 5.752688 | 0.473118 | 0.056075 | 0.08972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145679 | 810 | 34 | 61 | 23.823529 | 0.773121 | 0.039506 | 0 | 0 | 0 | 0 | 0.084416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.347826 | 0 | 0.391304 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
765a451b98ec23f375bec8b799471dd2d577921c | 4,612 | py | Python | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | microdf/inequality.py | Peter-Metz/microdf | 6c5c6ff5cc87181d559553bdcc36dc95aa701cd4 | [
"MIT"
] | null | null | null | import numpy as np
import microdf as mdf
def gini(df, col, w=None, negatives=None):
"""Calculates Gini index.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:param negatives: An optional string indicating how to treat negative
values of x:
'zero' replaces negative values with zeroes.
'shift' subtracts the minimum value from all values of x,
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
:returns: A float, the Gini index.
"""
# Requires float numpy arrays (not pandas Series or lists) to work.
x = np.array(df[col]).astype("float")
if negatives == "zero":
x[x < 0] = 0
if negatives == "shift" and np.amin(x) < 0:
x -= np.amin(x)
if w is not None:
w = np.array(df[w]).astype("float")
sorted_indices = np.argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
cumw = np.cumsum(sorted_w)
cumxw = np.cumsum(sorted_x * sorted_w)
return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
cumxw[-1] * cumw[-1]
)
else:
sorted_x = np.sort(x)
n = len(x)
cumxw = np.cumsum(sorted_x)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
def top_x_pct_share(df, col, top_x_pct, w=None):
"""Calculates top x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top x%.
"""
threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
total_sum = mdf.weighted_sum(df, col, w)
return top_x_pct_sum / total_sum
def bottom_x_pct_share(df, col, bottom_x_pct, w=None):
"""Calculates bottom x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom x%.
"""
return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, top=False)
def bottom_50_pct_share(df, col, w=None):
"""Calculates bottom 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom 50%.
"""
return bottom_x_pct_share(df, col, 0.5, w)
def top_50_pct_share(df, col, w=None):
"""Calculates top 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 50%.
"""
return top_x_pct_share(df, col, 0.5, w)
def top_10_pct_share(df, col, w=None):
"""Calculates top 10% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10%.
"""
return top_x_pct_share(df, col, 0.1, w)
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
def top_0_1_pct_share(df, col, w=None):
"""Calculates top 0.1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 0.1%.
"""
return top_x_pct_share(df, col, 0.001, w)
def t10_b50(df, col, w=None):
"""Calculates ratio between the top 10% and bottom 50% shares.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10% divided by
the share of w-weighted val held by the bottom 50%.
"""
return top_10_pct_share(df, col, w) / bottom_50_pct_share(df, col, w)
| 31.589041 | 79 | 0.643322 | 755 | 4,612 | 3.823841 | 0.156291 | 0.03637 | 0.051957 | 0.067544 | 0.660201 | 0.613093 | 0.59231 | 0.575338 | 0.519917 | 0.48043 | 0 | 0.025419 | 0.24935 | 4,612 | 145 | 80 | 31.806897 | 0.808492 | 0.552689 | 0 | 0 | 0 | 0 | 0.010686 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.047619 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
765ea91849f86020b6b972615c1e086ac4542de7 | 3,209 | py | Python | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | interface/seed.py | matthewruttley/Bucketerer | 828ee949b1f53b8432cfe7cf7cae0f6eec7d677f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#Creates an ad bucket with a seed term/domain
from pymongo import MongoClient
from similarity import find_by_html, find_by_similarsites, create_connection, tokenize_clean
from tabulate import tabulate
verbose = False
def get_rank(c, domain):
"""Gets the alexa rank for a domain. Returns False if not found"""
entry = c['domains'].find_one({'domain':domain.replace(".", "#")}, {'alexa.rank.latest':1})
try:
return entry['alexa']['rank']['latest']
except Exception:
return False
def create_bucket_from_domain(seed_domain, total_sites=False):
"""Creates a an ad bucket according to the parameters."""
c = create_connection()
#get some similar sites
sites_by_similar = find_by_similarsites(c, starter_site=seed_domain)
sites_by_html = find_by_html(c, starter_site=seed_domain)
#merge, get rank and sort descending
all_sites = []
for repository in [sites_by_html, sites_by_similar]:
for site in repository:
domain = site['url'] if 'url' in site else site['domain']
rank = get_rank(c, domain)
if rank:
if domain != seed_domain:
all_sites.append([domain, rank])
all_sites = sorted(all_sites, key=lambda x: x[1])
if verbose:
print tabulate(all_sites[:15 if not total_sites else total_sites])
else:
return all_sites
def create_bucket_with_raw_keywords(keywords, total_sites=False):
"""finds sites with the keywords in their description. ranks by alexa"""
c = create_connection()
keywords = tokenize_clean(keywords)
results = []
for keyword in keywords:
matches = c['domains'].find({'$text':{'$search':keyword}}, {'domain':1, 'alexa.rank.latest':1})
for match in matches:
results.append([match['domain'].replace('#', '.'), match['alexa']['rank']['latest']])
results = sorted(results, key=lambda x: x[1])
if verbose:
print tabulate(results[:15 if not total_sites else total_sites])
else:
return results
def create_bucket_from_keywords(search_string, total_sites=False):
"""Given some input text, this creates a bucket"""
c = create_connection()
#get some similar sites
sites_by_html = find_by_html(c, starter_text=search_string)
if verbose: print "Found {0} sites by meta desc".format(len(sites_by_html))
#find similar sites to the top 10% (random number?)
all_sites = []
ten_pc = int(len(sites_by_html) / 10)
for site in sites_by_html[:ten_pc]:
domain = site['url'] if 'url' in site else site['domain']
sites_by_similar = find_by_similarsites(c, starter_site=domain)
ten_pc = int(len(sites_by_similar) / 10) #take top 10%
if verbose: print "Adding {0} sites by similarsites".format(ten_pc)
all_sites += sites_by_similar[:ten_pc]
#prepend original sites_by_html to all_sites
all_sites = sites_by_html + all_sites
if verbose: print "Total sites: {0}".format(len(all_sites))
#get rank and sort descending
ranked_sites = []
for site in all_sites:
domain = site['url'] if 'url' in site else site['domain']
rank = get_rank(c, domain)
if rank:
ranked_sites.append([domain, rank])
ranked_sites = sorted(ranked_sites, key=lambda x: x[1])
if verbose:
print tabulate(ranked_sites[:15 if not total_sites else total_sites])
else:
return ranked_sites
| 28.149123 | 97 | 0.722655 | 494 | 3,209 | 4.489879 | 0.220648 | 0.04734 | 0.039675 | 0.016231 | 0.330478 | 0.298016 | 0.281785 | 0.281785 | 0.258792 | 0.16817 | 0 | 0.0085 | 0.156747 | 3,209 | 113 | 98 | 28.39823 | 0.81116 | 0.086008 | 0 | 0.276923 | 0 | 0 | 0.083927 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.046154 | null | null | 0.092308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7671557048679bb49752d923c9406604fe45cfb7 | 4,295 | py | Python | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | ee/cli/plugins/sync.py | quimica/easyengine | 07056759922a6e05949ca4b56ee94df6b2e580de | [
"MIT"
] | null | null | null | from cement.core.controller import CementBaseController, expose
from cement.core import handler, hook
from ee.core.fileutils import EEFileUtils
from ee.cli.plugins.sitedb import *
from ee.core.mysql import *
from ee.core.logging import Log
def ee_sync_hook(app):
# do something with the ``app`` object here.
pass
class EESyncController(CementBaseController):
class Meta:
label = 'sync'
stacked_on = 'base'
stacked_type = 'nested'
description = 'synchronize EasyEngine database'
@expose(hide=True)
def default(self):
self.sync()
@expose(hide=True)
def sync(self):
"""
1. reads database information from wp/ee-config.php
2. updates records into ee database accordingly.
"""
Log.info(self, "Synchronizing ee database, please wait...")
sites = getAllsites(self)
if not sites:
pass
for site in sites:
if site.site_type in ['mysql', 'wp', 'wpsubdir', 'wpsubdomain']:
ee_site_webroot = site.site_path
# Read config files
configfiles = glob.glob(ee_site_webroot + '/*-config.php')
#search for wp-config.php inside htdocs/www/
if not configfiles:
Log.debug(self, "Config files not found in {0}/ "
.format(ee_site_webroot))
if site.site_type != 'mysql':
Log.debug(self, "Searching wp-config.php in {0}/htdocs/www/ "
.format(ee_site_webroot))
configfiles = glob.glob(ee_site_webroot + '/htdocs/www/wp-config.php')
if configfiles:
if EEFileUtils.isexist(self, configfiles[0]):
ee_db_name = (EEFileUtils.grep(self, configfiles[0],
'DB_NAME').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_user = (EEFileUtils.grep(self, configfiles[0],
'DB_USER').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_pass = (EEFileUtils.grep(self, configfiles[0],
'DB_PASSWORD').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
ee_db_host = (EEFileUtils.grep(self, configfiles[0],
'DB_HOST').split(',')[1]
.split(')')[0].strip().replace('\'', ''))
# Check if database really exist
try:
if not EEMysql.check_db_exists(self, ee_db_name):
# Mark it as deleted if not exist
ee_db_name = 'deleted'
ee_db_user = 'deleted'
ee_db_pass = 'deleted'
except StatementExcecutionError as e:
Log.debug(self, str(e))
except Exception as e:
Log.debug(self, str(e))
if site.db_name != ee_db_name:
# update records if any mismatch found
Log.debug(self, "Updating ee db record for {0}"
.format(site.sitename))
updateSiteInfo(self, site.sitename,
db_name=ee_db_name,
db_user=ee_db_user,
db_password=ee_db_pass,
db_host=ee_db_host)
else:
Log.debug(self, "Config files not found for {0} "
.format(site.sitename))
def load(app):
# register the plugin class.. this only happens if the plugin is enabled
handler.register(EESyncController)
# register a hook (function) to run after arguments are parsed.
hook.register('post_argument_parsing', ee_sync_hook)
| 44.739583 | 94 | 0.469849 | 419 | 4,295 | 4.677804 | 0.324582 | 0.028571 | 0.036735 | 0.061224 | 0.242857 | 0.206122 | 0.093878 | 0.042857 | 0 | 0 | 0 | 0.007761 | 0.430035 | 4,295 | 95 | 95 | 45.210526 | 0.792892 | 0.101979 | 0 | 0.197183 | 0 | 0 | 0.100314 | 0.012048 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0.084507 | 0.084507 | 0 | 0.169014 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
767403c68c6963f4ac7af7065b349de0b68aecd1 | 4,307 | py | Python | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | 1 | 2019-01-16T11:59:40.000Z | 2019-01-16T11:59:40.000Z | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | reviewboard/scmtools/sshutils.py | smorley/reviewboard | 39dd1166fdec19d4fbced965b42a3a23a3b6b956 | [
"MIT"
] | null | null | null | import os
import urlparse
from django.utils.translation import ugettext_lazy as _
import paramiko
from reviewboard.scmtools.errors import AuthenticationError, \
BadHostKeyError, SCMError, \
UnknownHostKeyError
# A list of known SSH URL schemes.
ssh_uri_schemes = ["ssh", "sftp"]
urlparse.uses_netloc.extend(ssh_uri_schemes)
class RaiseUnknownHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""A Paramiko policy that raises UnknownHostKeyError for missing keys."""
def missing_host_key(self, client, hostname, key):
raise UnknownHostKeyError(hostname, key)
def humanize_key(key):
"""Returns a human-readable key as a series of hex characters."""
return ':'.join(["%02x" % ord(c) for c in key.get_fingerprint()])
def get_host_keys_filename():
"""Returns the URL to the known host keys file."""
return os.path.expanduser('~/.ssh/known_hosts')
def is_ssh_uri(url):
"""Returns whether or not a URL represents an SSH connection."""
return urlparse.urlparse(url)[0] in ssh_uri_schemes
def get_ssh_client():
"""Returns a new paramiko.SSHClient with all known host keys added."""
client = paramiko.SSHClient()
filename = get_host_keys_filename()
if os.path.exists(filename):
client.load_host_keys(filename)
return client
def add_host_key(hostname, key):
"""Adds a host key to the known hosts file."""
dirname = os.path.dirname(get_host_keys_filename())
if not os.path.exists(dirname):
# Make sure the .ssh directory exists.
try:
os.mkdir(dirname, 0700)
except OSError, e:
raise IOError(_("Unable to create directory %(dirname)s, which is "
"needed for the SSH host keys. Create this "
"directory, set the web server's user as the "
"the owner, and make it writable only by that "
"user.") % {
'dirname': dirname,
})
try:
fp = open(get_host_keys_filename(), 'a')
fp.write('%s %s %s\n' % (hostname, key.get_name(), key.get_base64()))
fp.close()
except IOError, e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def replace_host_key(hostname, old_key, new_key):
"""
Replaces a host key in the known hosts file with another.
This is used for replacing host keys that have changed.
"""
filename = get_host_keys_filename()
if not os.path.exists(filename):
add_host_key(hostname, new_key)
return
try:
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
old_key_base64 = old_key.get_base64()
except IOError, e:
raise IOError(
_('Unable to read host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
try:
fp = open(filename, 'w')
for line in lines:
parts = line.strip().split(" ")
if parts[-1] == old_key_base64:
parts[-1] = new_key.get_base64()
fp.write(' '.join(parts) + '\n')
fp.close()
except IOError, e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def check_host(hostname, username=None, password=None):
"""
Checks if we can connect to a host with a known key.
This will raise an exception if we cannot connect to the host. The
exception will be one of BadHostKeyError, UnknownHostKeyError, or
SCMError.
"""
client = get_ssh_client()
client.set_missing_host_key_policy(RaiseUnknownHostKeyPolicy())
try:
client.connect(hostname, username=username, password=password)
except paramiko.BadHostKeyException, e:
raise BadHostKeyError(e.hostname, e.key, e.expected_key)
except paramiko.AuthenticationException, e:
raise AuthenticationError()
except paramiko.SSHException, e:
raise SCMError(unicode(e))
| 30.118881 | 79 | 0.602972 | 520 | 4,307 | 4.867308 | 0.301923 | 0.04109 | 0.03793 | 0.037535 | 0.161596 | 0.153299 | 0.13868 | 0.125247 | 0.125247 | 0.0968 | 0 | 0.00624 | 0.293011 | 4,307 | 142 | 80 | 30.330986 | 0.824959 | 0.01602 | 0 | 0.318182 | 0 | 0 | 0.125107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.022727 | 0.056818 | null | null | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76803f5a83658d39e862a3ea11083cca906b0016 | 3,369 | py | Python | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | 1 | 2022-01-18T02:08:30.000Z | 2022-01-18T02:08:30.000Z | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | null | null | null | pymain.py | Farhad-Mrkm/NCE_ICC-2022 | 998db82536c2077dbdb157aa21af8c6b84957761 | [
"MIT"
] | null | null | null | # author__Farhad_Mirkarimi-*- coding: utf-8 -*-
import os
import h5py
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import torch
import torch.nn as nn
from tqdm.auto import tqdm, trange
from numpy.random import default_rng
import torch.nn.functional as F
import argparse
import gc
gc.collect()
print(np.version.version)
from all_params import all_params
from joint_training import joint_training
################ parsing input args###################
parser = argparse.ArgumentParser(description='provide arguments for neural capacity estimation')
#parser.add_argument('--SNR', type=int, default=[10], help='Signal to noise(unit)')
parser.add_argument('--SNR',nargs='+',type=int)
parser.add_argument('--init_epoch', type=int, default=100, help='First round epoch')
parser.add_argument('--max_epoch', type=int, default=3000, help='joint training epoch')
parser.add_argument('--seed_size', type=int, default=2, help='seed size for discrete inputs')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--hidden_dim_critic', type=int, default=256, help='hidden dim for mi_est net')
parser.add_argument('--hidden_dim_nit', type=int, default=256, help='hidden_dim for nit net')
parser.add_argument('--dim', type=int, default=1, help='dimension for mi_est net')
parser.add_argument('--dim_nit', type=int, default=1, help='dimension for NIT net')
parser.add_argument('--layer_mi', type=int, default=4, help='layer number for mi_est net')
parser.add_argument('--layer_nit', type=int, default=4, help='layer number for nit_net')
parser.add_argument('--lr_rate_nit', type=float, default=.0001, help='training lr')
parser.add_argument('--lr_rate_mi_est', type=float, default=.00001, help='training lr')
parser.add_argument('--type_channel', type=str, default='conts_awgn', help='channel name')
parser.add_argument('--estimator', type=str, default='mine', help='estimator type')
parser.add_argument('--activation', type=str, default='relu', help='activation function')
parser.add_argument('--peak', type=float, default=None, help='peak_amplitude constraint')
parser.add_argument('--positive', type=float, default=None, help='positivity of input')
#parser.add_argument('--verbose', dest='verbose', action='store_true')
#parser.set_defaults(verbose=False)
args = parser.parse_args()
######################################################3
nit_params,critic_params=all_params(dim=args.dim,layers_critic=args.layer_mi,embed_dim=32,hidden_dim_critic=256,activation_F1='relu',lr_critic=.0001,dim_NIT=args.dim_nit,layers_NIT=args.layer_nit,hidden_dim_NIT=256,t_x_power=1,lr_NIT=.0001,channel_type=args.type_channel,peak_amp=args.peak,positive=args.positive)
batch_x0,cap= joint_training(typeinp=args.type_channel,nit_params=nit_params,critic_params=critic_params,SNR=args.SNR,estimator=args.estimator,init_epoch=args.init_epoch,max_epoch=args.max_epoch,itr_every_nit=2,itr_every_mi=5,batch_size=args.batch_size,seed_size=args.seed_size)
| 63.566038 | 313 | 0.688038 | 471 | 3,369 | 4.717622 | 0.284501 | 0.081008 | 0.153015 | 0.054005 | 0.242124 | 0.180018 | 0.121062 | 0.059406 | 0 | 0 | 0 | 0.01976 | 0.158801 | 3,369 | 52 | 314 | 64.788462 | 0.764291 | 0.08222 | 0 | 0 | 0 | 0 | 0.201468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
76806ed1e58231dbb9592dc36161e9c8ee832dc4 | 3,245 | py | Python | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null | libpySat/pySatTransformPolarMotion.py | grzeskokbol/pySatTools | 7518ce670866bfebd4eb3d2f390e885d83c2e9c9 | [
"MIT"
] | null | null | null |
import datetime
import numpy as np
import libpySat as pySat
from astropy import _erfa as erfa
from scipy.misc import derivative
from scipy import interpolate
class TransformPolarMotion:
def __init__(self,fxp,fyp):
self.fxp=fxp
self.fyp=fyp
self.epochSave = datetime.datetime.now()
self.rotSave = np.matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
self.sprime=0.0
def __getPolarMotion(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas]
"""
mjd=pySat.UTC2MJD(epoch)
return self.fxp(mjd),self.fyp(mjd)
def __getPolarMotionDot(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas/s]
"""
mjd=pySat.UTC2MJD(epoch)
xpdot=derivative(self.fxp,mjd,dx=1e-3,n=1)
ypdot = derivative(self.fyp, mjd, dx=1e-3,n=1)
return xpdot,ypdot
def getMatrix_PolarMotion(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
if (epoch !=self.epochSave):
xp,yp = self.__getPolarMotion(epoch)
# TODO: Implementation of tidal and libration terms for polar motion...
xp*=np.pi/180.0/3600.0
yp*=np.pi/180.0/3600.0
sp= self.__getTIO(epoch)
#print(xp,yp,sp)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
self.rotSave=np.matmul(rs,rxy)
self.epochSave = epoch
return self.rotSave
else:
return self.rotSave
def __getTIO(self, epoch:datetime.datetime ):
"""
Gets the Terrestrial Intermediate Origin (TIO) locator s'
Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
TIRS related to to CIRS by Earth Rotation Angle
:param epoch:
:return:
"""
mjd = pySat.pySatTime.UTC2MJD(epoch)
self.sprime=erfa.sp00(2400000.5,mjd)
return self.sprime
def getMatrix_PolarMotionDot(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
# TODO: Implementation of tidal and libration terms for polar motion...
xp, yp = self.__getPolarMotion(epoch)
xpDot,ypDot = self.__getPolarMotionDot(epoch)
xp *= np.pi / 180.0 / 3600.0
yp *= np.pi / 180.0 / 3600.0
xpDot*=np.pi/180.0/3600.0
ypDot*=np.pi/180.0/3600.0
spDot = -47.0 / 1.0e6 / 3600.0 / 180.0 * np.pi / 86400.0 / 36525.0
sp = self.__getTIO(epoch)
print('Pmotion dot:',xpDot,ypDot,spDot)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rxyDot = np.matmul(xpDot* pySat.RotationMatrix3DY(xp), pySat.RotationMatrix3DX(yp)) \
+np.matmul( pySat.RotationMatrix3DY(xp),ypDot* pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
rsDot=-spDot*pySat.RotationMatrix3DZ(-sp)
return np.matmul(rsDot,rxy) + np.matmul(rs,rxyDot)
| 32.777778 | 93 | 0.600616 | 407 | 3,245 | 4.732187 | 0.272727 | 0.009346 | 0.010903 | 0.012461 | 0.454829 | 0.411215 | 0.339045 | 0.299585 | 0.294912 | 0.233645 | 0 | 0.052293 | 0.281048 | 3,245 | 98 | 94 | 33.112245 | 0.773253 | 0.182435 | 0 | 0.296296 | 0 | 0 | 0.004898 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.351852 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7687b35a200e00e3a680b96e15c14d349cc8f6ec | 586 | py | Python | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 5 | 2019-10-15T15:56:35.000Z | 2021-02-04T10:11:31.000Z | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 121 | 2020-01-06T14:32:30.000Z | 2021-09-23T11:26:11.000Z | tests/test_sink.py | LauWien/smooth | 3d2ee96e3c2b2f9d5d805da1a920748f2dbbd538 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 6 | 2019-10-21T08:36:05.000Z | 2021-03-26T10:37:17.000Z | from smooth.components.component_sink import Sink
import oemof.solph as solph
def test_init():
s = Sink({})
assert hasattr(s, "input_max")
assert hasattr(s, "bus_in")
assert s.commodity_costs == 0
s = Sink({"name": "foo"})
assert s.name == "foo"
def test_add_to_oemof_model():
s = Sink({"bus_in": "foo"})
oemof_model = solph.EnergySystem()
component = s.add_to_oemof_model({"foo": solph.Bus(label="foo")}, oemof_model)
assert type(component) == solph.network.Sink
assert len(component.inputs) == 1
assert len(component.outputs) == 0
| 26.636364 | 82 | 0.662116 | 84 | 586 | 4.440476 | 0.416667 | 0.107239 | 0.075067 | 0.080429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006303 | 0.187713 | 586 | 21 | 83 | 27.904762 | 0.777311 | 0 | 0 | 0 | 0 | 0 | 0.068259 | 0 | 0 | 0 | 0 | 0 | 0.4375 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
768d0d745dc0784b2a3c714af9a252bf04f94f52 | 358 | py | Python | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | 1 | 2020-04-14T14:43:59.000Z | 2020-04-14T14:43:59.000Z | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | null | null | null | fundamentos/exer027.py | edelvandro/Python | 152685590af873bf63fcc5a29cf3528e4cc31a3e | [
"MIT"
] | null | null | null | '''
Faça um programa que leia o nome completo de uma pessoa,
mostrando em seguida o primeiro e o último nome separadamente.
'''
entrada = str(input('Digite um nome completo: ')).strip()
print('Olá {}'.format(entrada))
nome = entrada.split()
print('Seu primeiro nome é: {}'.format(nome[0]))
print('Seu último nome é: {}'.format(nome[len(nome) - 1]))
| 32.545455 | 66 | 0.678771 | 54 | 358 | 4.5 | 0.592593 | 0.098765 | 0.090535 | 0.123457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006623 | 0.156425 | 358 | 10 | 67 | 35.8 | 0.798013 | 0.332402 | 0 | 0 | 0 | 0 | 0.336323 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.6 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
768f6ea4edae64dc3c5e980c6abcd8369f91dd60 | 1,057 | py | Python | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | null | null | null | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 4 | 2020-04-06T14:20:58.000Z | 2020-04-17T10:47:11.000Z | DummyIntermediateDevices/connectiontable_dummy_intermediate.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 2 | 2020-04-10T08:56:28.000Z | 2020-09-06T20:08:29.000Z | """The sample file to be run in runmanager.
This is the minimal sample that you can load from runmanager to see if your
code is working properly.
"""
from labscript import *
# from user_devices.dummy_device.labscript_devices import DummyDevice
from labscript_devices.DummyPseudoclock.labscript_devices import DummyPseudoclock
from user_devices.DummyIntermediateDevices.labscript_devices import (
DummyIntermediateDevice,
)
DummyPseudoclock("dummy_pseudoclock")
ClockLine(
name="dummy_clockline",
pseudoclock=dummy_pseudoclock.pseudoclock,
connection="flag 0",
)
# DummyDevice(name="dummy_device_0", parent_device=dummy_clockline)
DummyIntermediateDevice(name="dummy_intermediate_device", parent_device=dummy_clockline)
AnalogOut(name="AO1", parent_device=dummy_intermediate_device, connection="ao0")
AnalogOut(name="AO2", parent_device=dummy_intermediate_device, connection="ao1")
# DigitalOut(name="dummy_DO1", parent_device=dummy_intermediate_device, connection="dummy_connection")
if __name__ == "__main__":
start()
stop(1)
| 34.096774 | 102 | 0.811731 | 125 | 1,057 | 6.568 | 0.4 | 0.073082 | 0.103532 | 0.105968 | 0.164434 | 0.164434 | 0 | 0 | 0 | 0 | 0 | 0.008403 | 0.099338 | 1,057 | 30 | 103 | 35.233333 | 0.853992 | 0.358562 | 0 | 0 | 0 | 0 | 0.124066 | 0.037369 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.176471 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
7691b87c25055ca65fe6a4859ce2615cd80362d1 | 815 | py | Python | cloudmesh/sign/api.py | cloudmesh/cloudmesh.sign | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | null | null | null | cloudmesh/sign/api.py | cloudmesh/cloudmesh.sign | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | 10 | 2017-04-30T00:43:05.000Z | 2017-04-30T13:19:27.000Z | cloudmesh/sign/api.py | cloudmesh/extstreet | 84ddaf3b9f29e62f87db35d5a06357a51906db12 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
#import cv2
class Sign(object):
def __init__(self):
# not your location will not work easily / is not writeable
self.classifier = '/street-signal/classifier/stopsign_classifier.xml'
def hello(self, msg):
print ("Hello Sign", msg)
def detect(self, image):
'''
stop_cascade = cv2.CascadeClassifier(self.classifier)
test = cv2.imread(image)
gray = cv2.cvtColor(test,cv2.COLOR_BGR2GRAY)
stops = stop_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=2)
for (x,y,w,h) in stops:
cv2.rectangle(test,(x,y),(x+w,y+h),(255,0,0),2)
return test
'''
return image # remove once you fix the above
def cp(self, image, server):
return image
| 29.107143 | 84 | 0.623313 | 107 | 815 | 4.626168 | 0.579439 | 0.056566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026667 | 0.263804 | 815 | 27 | 85 | 30.185185 | 0.798333 | 0.120245 | 0 | 0.2 | 0 | 0 | 0.166667 | 0.138418 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.1 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76996d0c6fee1f7994689e5e99ec874d21d739eb | 1,259 | py | Python | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 2 | 2021-12-09T10:22:27.000Z | 2022-02-16T19:23:46.000Z | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 60 | 2021-08-13T17:24:35.000Z | 2021-08-25T16:25:59.000Z | src/main.py | mdsanima-dev/mdsanima-rt-go | 6032efb8f9fd3dfeb1640fba47b0b8b7759d4572 | [
"Apache-2.0"
] | 1 | 2021-08-20T00:48:28.000Z | 2021-08-20T00:48:28.000Z | """
Main application MDSANIMA RT GO
"""
import kivy
from __init__ import __version__
kivy.require('2.0.0')
from kivy.uix.screenmanager import ScreenManager
from kivymd.app import MDApp
from plyer import notification
from __init__ import resource_path
from config.image import get_images
from config.setting import check_platform, theme_kivy
from libs.screen.calculation import MDSRTGO_scr_2
from libs.screen.info import MDSRTGO_scr_3
from libs.screen.welcome import MDSRTGO_scr_1
class MDSRTGO_main(MDApp):
title = 'MDSANIMA RT GO v' + __version__
def build(self):
theme_kivy(self, 'Orange', 'Blue', 'Dark')
img = get_images()
self.icon = resource_path(img[0])
notification_icon = check_platform()
notification.notify(
title='MDSANIMA RT GO',
message='You have a 2 messages and 10 new issues',
app_name='MDSANIMA RT GO',
app_icon=resource_path(notification_icon),
timeout=10
)
sm = ScreenManager()
sm.add_widget(MDSRTGO_scr_1(name='scr_1'))
sm.add_widget(MDSRTGO_scr_2(name='scr_2'))
sm.add_widget(MDSRTGO_scr_3(name='scr_3'))
return sm
if __name__ == '__main__':
MDSRTGO_main().run()
| 24.686275 | 62 | 0.68467 | 172 | 1,259 | 4.680233 | 0.395349 | 0.074534 | 0.059627 | 0.067081 | 0.078261 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018424 | 0.223987 | 1,259 | 50 | 63 | 25.18 | 0.805527 | 0.024623 | 0 | 0 | 0 | 0 | 0.102459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.333333 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
769a048a0a36fac60d8155d390abc6c865fbbb32 | 1,188 | py | Python | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | null | null | null | puzzler/puzzles/polysticks123.py | tiwo/puzzler | 7ad3d9a792f0635f7ec59ffa85fb46b54fd77a7e | [
"Intel"
] | 1 | 2022-01-02T16:54:14.000Z | 2022-01-02T16:54:14.000Z | #!/usr/bin/env python
# $Id$
# Author: David Goodger <goodger@python.org>
# Copyright: (C) 1998-2015 by David J. Goodger
# License: GPL 2 (see __init__.py)
"""
Concrete polystick (orders 1 through 3) puzzles.
"""
from puzzler import coordsys
from puzzler.puzzles.polysticks import Polysticks123
class Polysticks123_4x4ClippedCorners1(Polysticks123):
"""
21 solutions
"""
width = 4
height = 4
holes = set(((0,0,0), (0,0,1), (2,3,0), (3,2,1)))
"""
no solutions:
holes = set(((1,1,0), (1,1,1), (1,2,0), (2,1,1)))
holes = set(((0,1,1), (1,0,0), (1,3,0), (3,1,1)))
"""
def coordinates(self):
for coord in self.coordinates_bordered(self.width, self.height):
if coord not in self.holes:
yield coord
def customize_piece_data(self):
self.piece_data['L3'][-1]['flips'] = None
self.piece_data['L3'][-1]['rotations'] = (0, 1)
class Polysticks123_4x4ClippedCorners2(Polysticks123_4x4ClippedCorners1):
"""
132 solutions
"""
holes = set(((0,3,0), (0,2,1), (2,3,0), (3,2,1)))
def customize_piece_data(self):
self.piece_data['L3'][-1]['flips'] = None
| 21.6 | 73 | 0.590067 | 170 | 1,188 | 4.035294 | 0.376471 | 0.023324 | 0.039359 | 0.065598 | 0.189504 | 0.166181 | 0.166181 | 0.145773 | 0.145773 | 0.145773 | 0 | 0.106522 | 0.225589 | 1,188 | 54 | 74 | 22 | 0.63913 | 0.186869 | 0 | 0.235294 | 0 | 0 | 0.03201 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.176471 | false | 0 | 0.117647 | 0 | 0.647059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
76a3aadc66cb9b6a3ffc18d0b5d0424cb3f1bf19 | 1,435 | py | Python | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 49 | 2015-07-29T14:11:29.000Z | 2021-12-10T15:24:26.000Z | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 3,732 | 2015-08-03T22:07:26.000Z | 2022-03-31T22:48:33.000Z | tests/package_path_test.py | radiasoft/sirepo | db3d1737bab7a84d39d456c0e8913c88deff3c31 | [
"Apache-2.0"
] | 28 | 2015-11-20T16:23:46.000Z | 2021-09-20T07:22:48.000Z | # -*- coding: utf-8 -*-
u"""Using a sim type that lives in a package outside of sirepo.
:copyright: Copyright (c) 2021 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import contextlib
import pytest
def test_run():
from pykern import pkunit
from pykern.pkdebug import pkdp, pkdlog
with _install():
fc = _fc()
r = fc.sr_login_as_guest(sim_type='code1')
d = fc.sr_sim_data(sim_type='code1', sim_name='Secret sauce')
pkunit.pkeq('green', d.models.sauce.color)
def _fc():
from pykern.pkdebug import pkdp
from pykern.pkcollections import PKDict
from sirepo import srunit
fc = srunit.flask_client(
cfg=PKDict(
SIREPO_FEATURE_CONFIG_PACKAGE_PATH='sirepo_test_package_path:sirepo',
),
sim_types='code1',
no_chdir_work=True,
)
return fc
@contextlib.contextmanager
def _install():
from pykern import pkunit, pkio
from pykern.pkdebug import pkdp, pkdlog
import subprocess
import sys
with pkunit.save_chdir_work() as d:
pkunit.data_dir().join('sirepo_test_package_path.tar.gz').copy(d)
subprocess.run('tar xzf sirepo_test_package_path.tar.gz', shell=True)
with pkio.save_chdir('sirepo_test_package_path') as d:
sys.path.append(str(d))
yield
| 28.137255 | 81 | 0.681533 | 199 | 1,435 | 4.698492 | 0.487437 | 0.064171 | 0.072727 | 0.08984 | 0.15508 | 0.126203 | 0 | 0 | 0 | 0 | 0 | 0.008913 | 0.218118 | 1,435 | 50 | 82 | 28.7 | 0.824421 | 0.145645 | 0 | 0.055556 | 0 | 0 | 0.128794 | 0.09598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0 | 0.444444 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
76a644c9158839039d51c335089af8172474435d | 35,948 | py | Python | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | fanficdownloader/story.py | rodrigonz/rodrigodeoliveiracosta-ffdown | e28e20232e9cd6cef84aa9e830ed8de7dbb208ae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, re
import urlparse
import string
from math import floor
from functools import partial
import logging
logger = logging.getLogger(__name__)
import urlparse as up
import exceptions
from htmlcleanup import conditionalRemoveEntities, removeAllEntities
from configurable import Configurable
SPACE_REPLACE=u'\s'
SPLIT_META=u'\,'
# Create convert_image method depending on which graphics lib we can
# load. Preferred: calibre, PIL, none
imagetypes = {
'jpg':'image/jpeg',
'jpeg':'image/jpeg',
'png':'image/png',
'gif':'image/gif',
'svg':'image/svg+xml',
}
try:
from calibre.utils.magick import Image
convtype = {'jpg':'JPG', 'png':'PNG'}
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
export = False
img = Image()
img.load(data)
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img.size = (nwidth, nheight)
export = True
if normalize_format_name(img.format) != imgtype:
export = True
if removetrans and img.has_transparent_pixels():
canvas = Image()
canvas.create_canvas(int(img.size[0]), int(img.size[1]), str(background))
canvas.compose(img)
img = canvas
export = True
if grayscale and img.type != "GrayscaleType":
img.type = "GrayscaleType"
export = True
if export:
return (img.export(convtype[imgtype]),imgtype,imagetypes[imgtype])
else:
logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre routines, try for PIL for CLI.
try:
import Image
from StringIO import StringIO
convtype = {'jpg':'JPEG', 'png':'PNG'}
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
export = False
img = Image.open(StringIO(data))
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img = img.resize((nwidth, nheight),Image.ANTIALIAS)
export = True
if normalize_format_name(img.format) != imgtype:
if img.mode == "P":
# convert pallete gifs to RGB so jpg save doesn't fail.
img = img.convert("RGB")
export = True
if removetrans and img.mode == "RGBA":
background = Image.new('RGBA', img.size, background)
# Paste the image on top of the background
background.paste(img, img)
img = background.convert('RGB')
export = True
if grayscale and img.mode != "L":
img = img.convert("L")
export = True
if export:
outsio = StringIO()
img.save(outsio,convtype[imgtype])
return (outsio.getvalue(),imgtype,imagetypes[imgtype])
else:
logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre or PIL, simple pass through with mimetype.
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
return no_convert_image(url,data)
## also used for explicit no image processing.
def no_convert_image(url,data):
parsedUrl = up.urlparse(url)
ext=parsedUrl.path[parsedUrl.path.rfind('.')+1:].lower()
if ext not in imagetypes:
logger.debug("no_convert_image url:%s - no known extension"%url)
# doesn't have extension? use jpg.
ext='jpg'
return (data,ext,imagetypes[ext])
def normalize_format_name(fmt):
if fmt:
fmt = fmt.lower()
if fmt == 'jpeg':
fmt = 'jpg'
return fmt
def fit_image(width, height, pwidth, pheight):
'''
Fit image in box of width pwidth and height pheight.
@param width: Width of image
@param height: Height of image
@param pwidth: Width of box
@param pheight: Height of box
@return: scaled, new_width, new_height. scaled is True iff new_width and/or new_height is different from width or height.
'''
scaled = height > pheight or width > pwidth
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
if width > pwidth:
corrf = pwidth/float(width)
width, height = pwidth, floor(corrf*height)
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
return scaled, int(width), int(height)
try:
# doesn't really matter what, just checking for appengine.
from google.appengine.api import apiproxy_stub_map
is_appengine = True
except:
is_appengine = False
# The list comes from ffnet, the only multi-language site we support
# at the time of writing. Values are taken largely from pycountry,
# but with some corrections and guesses.
langs = {
"English":"en",
"Spanish":"es",
"French":"fr",
"German":"de",
"Chinese":"zh",
"Japanese":"ja",
"Dutch":"nl",
"Portuguese":"pt",
"Russian":"ru",
"Italian":"it",
"Bulgarian":"bg",
"Polish":"pl",
"Hungarian":"hu",
"Hebrew":"he",
"Arabic":"ar",
"Swedish":"sv",
"Norwegian":"no",
"Danish":"da",
"Finnish":"fi",
"Filipino":"fil",
"Esperanto":"eo",
"Hindi":"hi",
"Punjabi":"pa",
"Farsi":"fa",
"Greek":"el",
"Romanian":"ro",
"Albanian":"sq",
"Serbian":"sr",
"Turkish":"tr",
"Czech":"cs",
"Indonesian":"id",
"Croatian":"hr",
"Catalan":"ca",
"Latin":"la",
"Korean":"ko",
"Vietnamese":"vi",
"Thai":"th",
"Devanagari":"hi",
}
class InExMatch:
keys = []
regex = None
match = None
negate = False
def __init__(self,line):
if "=~" in line:
(self.keys,self.match) = line.split("=~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re.compile(self.match)
elif "!~" in line:
(self.keys,self.match) = line.split("!~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re.compile(self.match)
self.negate = True
elif "==" in line:
(self.keys,self.match) = line.split("==")
self.match = self.match.replace(SPACE_REPLACE,' ')
elif "!=" in line:
(self.keys,self.match) = line.split("!=")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.negate = True
self.keys = map( lambda x: x.strip(), self.keys.split(",") )
# For conditional, only one key
def is_key(self,key):
return key == self.keys[0]
# For conditional, only one key
def key(self):
return self.keys[0]
def in_keys(self,key):
return key in self.keys
def is_match(self,value):
retval = False
if self.regex:
if self.regex.search(value):
retval = True
#print(">>>>>>>>>>>>>%s=~%s r: %s,%s=%s"%(self.match,value,self.negate,retval,self.negate != retval))
else:
retval = self.match == value
#print(">>>>>>>>>>>>>%s==%s r: %s,%s=%s"%(self.match,value,self.negate,retval, self.negate != retval))
return self.negate != retval
def __str__(self):
if self.negate:
f='!'
else:
f='='
if self.regex:
s='~'
else:
s='='
return u'InExMatch(%s %s%s %s)'%(self.keys,f,s,self.match)
class Story(Configurable):
def __init__(self, configuration):
Configurable.__init__(self, configuration)
try:
## calibre plugin will set externally to match PI version.
self.metadata = {'version':os.environ['CURRENT_VERSION_ID']}
except:
self.metadata = {'version':'4.4'}
self.replacements = []
self.in_ex_cludes = {}
self.chapters = [] # chapters will be tuples of (title,html)
self.imgurls = []
self.imgtuples = []
self.cover=None # *href* of new cover image--need to create html.
self.oldcover=None # (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
self.calibrebookmark=None # cheesy way to carry calibre bookmark file forward across update.
self.logfile=None # cheesy way to carry log file forward across update.
## Look for config parameter, split and add each to metadata field.
for (config,metadata) in [("extracategories","category"),
("extragenres","genre"),
("extracharacters","characters"),
("extraships","ships"),
("extrawarnings","warnings")]:
for val in self.getConfigList(config):
self.addToList(metadata,val)
self.setReplace(self.getConfig('replace_metadata'))
in_ex_clude_list = ['include_metadata_pre','exclude_metadata_pre',
'include_metadata_post','exclude_metadata_post']
for ie in in_ex_clude_list:
ies = self.getConfig(ie)
# print("%s %s"%(ie,ies))
if ies:
iel = []
self.in_ex_cludes[ie] = self.set_in_ex_clude(ies)
def join_list(self, key, vallist):
return self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ').join(map(unicode, vallist))
def setMetadata(self, key, value, condremoveentities=True):
# keep as list type, but set as only value.
if self.isList(key):
self.addToList(key,value,condremoveentities=condremoveentities,clear=True)
else:
## still keeps < < and &
if condremoveentities:
self.metadata[key]=conditionalRemoveEntities(value)
else:
self.metadata[key]=value
if key == "language":
try:
# getMetadata not just self.metadata[] to do replace_metadata.
self.setMetadata('langcode',langs[self.getMetadata(key)])
except:
self.setMetadata('langcode','en')
if key == 'dateUpdated' and value:
# Last Update tags for Bill.
self.addToList('lastupdate',value.strftime("Last Update Year/Month: %Y/%m"))
self.addToList('lastupdate',value.strftime("Last Update: %Y/%m/%d"))
## metakey[,metakey]=~pattern
## metakey[,metakey]==string
## *for* part lines. Effect only when trailing conditional key=~regexp matches
## metakey[,metakey]=~pattern[&&metakey=~regexp]
## metakey[,metakey]==string[&&metakey=~regexp]
## metakey[,metakey]=~pattern[&&metakey==string]
## metakey[,metakey]==string[&&metakey==string]
def set_in_ex_clude(self,setting):
dest = []
# print("set_in_ex_clude:"+setting)
for line in setting.splitlines():
if line:
(match,condmatch)=(None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
condmatch = InExMatch(conditional)
match = InExMatch(line)
dest.append([match,condmatch])
return dest
def do_in_ex_clude(self,which,value,key):
if value and which in self.in_ex_cludes:
include = 'include' in which
keyfound = False
found = False
for (match,condmatch) in self.in_ex_cludes[which]:
keyfndnow = False
if match.in_keys(key):
# key in keys and either no conditional, or conditional matched
if condmatch == None or condmatch.is_key(key):
keyfndnow = True
else:
condval = self.getMetadata(condmatch.key())
keyfndnow = condmatch.is_match(condval)
keyfound |= keyfndnow
# print("match:%s %s\ncondmatch:%s %s\n\tkeyfound:%s\n\tfound:%s"%(
# match,value,condmatch,condval,keyfound,found))
if keyfndnow:
found = isinstance(value,basestring) and match.is_match(value)
if found:
# print("match:%s %s\n\tkeyfndnow:%s\n\tfound:%s"%(
# match,value,keyfndnow,found))
if not include:
value = None
break
if include and keyfound and not found:
value = None
return value
## Two or three part lines. Two part effect everything.
## Three part effect only those key(s) lists.
## pattern=>replacement
## metakey,metakey=>pattern=>replacement
## *Five* part lines. Effect only when trailing conditional key=>regexp matches
## metakey[,metakey]=>pattern=>replacement[&&metakey=>regexp]
def setReplace(self,replace):
for line in replace.splitlines():
# print("replacement line:%s"%line)
(metakeys,regexp,replacement,condkey,condregexp)=(None,None,None,None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
(condkey,condregexp) = conditional.split("=>")
if "=>" in line:
parts = line.split("=>")
if len(parts) > 2:
metakeys = map( lambda x: x.strip(), parts[0].split(",") )
(regexp,replacement)=parts[1:]
else:
(regexp,replacement)=parts
if regexp:
regexp = re.compile(regexp)
if condregexp:
condregexp = re.compile(condregexp)
# A way to explicitly include spaces in the
# replacement string. The .ini parser eats any
# trailing spaces.
replacement=replacement.replace(SPACE_REPLACE,' ')
self.replacements.append([metakeys,regexp,replacement,condkey,condregexp])
def doReplacements(self,value,key,return_list=False,seen_list=[]):
value = self.do_in_ex_clude('include_metadata_pre',value,key)
value = self.do_in_ex_clude('exclude_metadata_pre',value,key)
retlist = [value]
for replaceline in self.replacements:
if replaceline in seen_list: # recursion on pattern, bail
# print("bailing on %s"%replaceline)
continue
#print("replacement tuple:%s"%replaceline)
(metakeys,regexp,replacement,condkey,condregexp) = replaceline
if (metakeys == None or key in metakeys) \
and isinstance(value,basestring) \
and regexp.search(value):
doreplace=True
if condkey and condkey != key: # prevent infinite recursion.
condval = self.getMetadata(condkey)
doreplace = condval != None and condregexp.search(condval)
if doreplace:
# split into more than one list entry if
# SPLIT_META present in replacement string. Split
# first, then regex sub, then recurse call replace
# on each. Break out of loop, each split element
# handled individually by recursion call.
if SPLIT_META in replacement:
retlist = []
for splitrepl in replacement.split(SPLIT_META):
retlist.extend(self.doReplacements(regexp.sub(splitrepl,value),
key,
return_list=True,
seen_list=seen_list+[replaceline]))
break
else:
# print("replacement,value:%s,%s->%s"%(replacement,value,regexp.sub(replacement,value)))
value = regexp.sub(replacement,value)
retlist = [value]
for val in retlist:
retlist = map(partial(self.do_in_ex_clude,'include_metadata_post',key=key),retlist)
retlist = map(partial(self.do_in_ex_clude,'exclude_metadata_post',key=key),retlist)
# value = self.do_in_ex_clude('include_metadata_post',value,key)
# value = self.do_in_ex_clude('exclude_metadata_post',value,key)
if return_list:
return retlist
else:
return self.join_list(key,retlist)
def getMetadataRaw(self,key):
if self.isValidMetaEntry(key) and self.metadata.has_key(key):
return self.metadata[key]
def getMetadata(self, key,
removeallentities=False,
doreplacements=True):
value = None
if not self.isValidMetaEntry(key):
return value
if self.isList(key):
# join_string = self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ')
# value = join_string.join(self.getList(key, removeallentities, doreplacements=True))
value = self.join_list(key,self.getList(key, removeallentities, doreplacements=True))
if doreplacements:
value = self.doReplacements(value,key+"_LIST")
return value
elif self.metadata.has_key(key):
value = self.metadata[key]
if value:
if key == "numWords":
value = commaGroups(value)
if key == "numChapters":
value = commaGroups("%d"%value)
if key in ("dateCreated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d %H:%M:%S"))
if key in ("datePublished","dateUpdated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d"))
if doreplacements:
value=self.doReplacements(value,key)
if removeallentities and value != None:
return removeAllEntities(value)
else:
return value
else: #if self.getConfig("default_value_"+key):
return self.getConfig("default_value_"+key)
def getAllMetadata(self,
removeallentities=False,
doreplacements=True,
keeplists=False):
'''
All single value *and* list value metadata as strings (unless
keeplists=True, then keep lists).
'''
allmetadata = {}
# special handling for authors/authorUrls
linkhtml="<a class='%slink' href='%s'>%s</a>"
if self.isList('author'): # more than one author, assume multiple authorUrl too.
htmllist=[]
for i, v in enumerate(self.getList('author')):
aurl = self.getList('authorUrl')[i]
auth = v
# make sure doreplacements & removeallentities are honored.
if doreplacements:
aurl=self.doReplacements(aurl,'authorUrl')
auth=self.doReplacements(auth,'author')
if removeallentities:
aurl=removeAllEntities(aurl)
auth=removeAllEntities(auth)
htmllist.append(linkhtml%('author',aurl,auth))
# join_string = self.getConfig("join_string_authorHTML",u", ").replace(SPACE_REPLACE,' ')
self.setMetadata('authorHTML',self.join_list("join_string_authorHTML",htmllist))
else:
self.setMetadata('authorHTML',linkhtml%('author',self.getMetadata('authorUrl', removeallentities, doreplacements),
self.getMetadata('author', removeallentities, doreplacements)))
if self.getMetadataRaw('seriesUrl'):
self.setMetadata('seriesHTML',linkhtml%('series',self.getMetadata('seriesUrl', removeallentities, doreplacements),
self.getMetadata('series', removeallentities, doreplacements)))
elif self.getMetadataRaw('series'):
self.setMetadata('seriesHTML',self.getMetadataRaw('series'))
# logger.debug("make_linkhtml_entries:%s"%self.getConfig('make_linkhtml_entries'))
for k in self.getConfigList('make_linkhtml_entries'):
# Assuming list, because it has to be site specific and
# they are all lists. Bail if kUrl list not the same
# length.
# logger.debug("\nk:%s\nlist:%s\nlistURL:%s"%(k,self.getList(k),self.getList(k+'Url')))
if len(self.getList(k+'Url')) != len(self.getList(k)):
continue
htmllist=[]
for i, v in enumerate(self.getList(k)):
url = self.getList(k+'Url')[i]
# make sure doreplacements & removeallentities are honored.
if doreplacements:
url=self.doReplacements(url,k+'Url')
v=self.doReplacements(v,k)
if removeallentities:
url=removeAllEntities(url)
v=removeAllEntities(v)
htmllist.append(linkhtml%(k,url,v))
# join_string = self.getConfig("join_string_"+k+"HTML",u", ").replace(SPACE_REPLACE,' ')
self.setMetadata(k+'HTML',self.join_list("join_string_"+k+"HTML",htmllist))
for k in self.getValidMetaList():
if self.isList(k) and keeplists:
allmetadata[k] = self.getList(k, removeallentities, doreplacements)
else:
allmetadata[k] = self.getMetadata(k, removeallentities, doreplacements)
return allmetadata
# just for less clutter in adapters.
def extendList(self,listname,l):
for v in l:
self.addToList(listname,v.strip())
def addToList(self,listname,value,condremoveentities=True,clear=False):
if value==None:
return
if condremoveentities:
value = conditionalRemoveEntities(value)
if clear or not self.isList(listname) or not listname in self.metadata:
# Calling addToList to a non-list meta will overwrite it.
self.metadata[listname]=[]
# prevent duplicates.
if not value in self.metadata[listname]:
self.metadata[listname].append(value)
if listname == 'category' and self.getConfig('add_genre_when_multi_category') and len(self.metadata[listname]) > 1:
self.addToList('genre',self.getConfig('add_genre_when_multi_category'))
def isList(self,listname):
'Everything set with an include_in_* is considered a list.'
return self.isListType(listname) or \
( self.isValidMetaEntry(listname) and self.metadata.has_key(listname) \
and isinstance(self.metadata[listname],list) )
def getList(self,listname,
removeallentities=False,
doreplacements=True,
includelist=[]):
#print("getList(%s,%s)"%(listname,includelist))
retlist = []
if not self.isValidMetaEntry(listname):
return retlist
# includelist prevents infinite recursion of include_in_'s
if self.hasConfig("include_in_"+listname) and listname not in includelist:
for k in self.getConfigList("include_in_"+listname):
retlist.extend(self.getList(k,removeallentities=False,
doreplacements=doreplacements,includelist=includelist+[listname]))
else:
if not self.isList(listname):
retlist = [self.getMetadata(listname,removeallentities=False,
doreplacements=doreplacements)]
else:
retlist = self.getMetadataRaw(listname)
if retlist:
if doreplacements:
newretlist = []
for val in retlist:
newretlist.extend(self.doReplacements(val,listname,return_list=True))
retlist = newretlist
if removeallentities:
retlist = map(removeAllEntities,retlist)
retlist = filter( lambda x : x!=None and x!='' ,retlist)
# reorder ships so b/a and c/b/a become a/b and a/b/c. Only on '/',
# use replace_metadata to change separator first if needed.
# ships=>[ ]*(/|&|&)[ ]*=>/
if listname == 'ships' and self.getConfig('sort_ships') and retlist:
retlist = [ '/'.join(sorted(x.split('/'))) for x in retlist ]
if retlist:
if listname in ('author','authorUrl','authorId') or self.getConfig('keep_in_order_'+listname):
# need to retain order for author & authorUrl so the
# two match up.
return retlist
else:
# remove dups and sort.
return sorted(list(set(retlist)))
else:
return []
def getSubjectTags(self, removeallentities=False):
# set to avoid duplicates subject tags.
subjectset = set()
tags_list = self.getConfigList("include_subject_tags") + self.getConfigList("extra_subject_tags")
# metadata all go into dc:subject tags, but only if they are configured.
for (name,value) in self.getAllMetadata(removeallentities=removeallentities,keeplists=True).iteritems():
if name in tags_list:
if isinstance(value,list):
for tag in value:
subjectset.add(tag)
else:
subjectset.add(value)
if None in subjectset:
subjectset.remove(None)
if '' in subjectset:
subjectset.remove('')
return list(subjectset | set(self.getConfigList("extratags")))
def addChapter(self, url, title, html):
if self.getConfig('strip_chapter_numbers') and \
self.getConfig('chapter_title_strip_pattern'):
title = re.sub(self.getConfig('chapter_title_strip_pattern'),"",title)
self.chapters.append( (url,title,html) )
def getChapters(self,fortoc=False):
"Chapters will be tuples of (title,html)"
retval = []
## only add numbers if more than one chapter.
if len(self.chapters) > 1 and \
(self.getConfig('add_chapter_numbers') == "true" \
or (self.getConfig('add_chapter_numbers') == "toconly" and fortoc)) \
and self.getConfig('chapter_title_add_pattern'):
for index, (url,title,html) in enumerate(self.chapters):
retval.append( (url,
string.Template(self.getConfig('chapter_title_add_pattern')).substitute({'index':index+1,'title':title}),
html) )
else:
retval = self.chapters
return retval
def formatFileName(self,template,allowunsafefilename=True):
values = origvalues = self.getAllMetadata()
# fall back default:
if not template:
template="${title}-${siteabbrev}_${storyId}${formatext}"
if not allowunsafefilename:
values={}
pattern = re.compile(self.getConfig("output_filename_safepattern",r"[^a-zA-Z0-9_\. \[\]\(\)&'-]+"))
for k in origvalues.keys():
values[k]=re.sub(pattern,'_', removeAllEntities(self.getMetadata(k)))
return string.Template(template).substitute(values).encode('utf8')
# pass fetch in from adapter in case we need the cookies collected
# as well as it's a base_story class method.
def addImgUrl(self,parenturl,url,fetch,cover=False,coverexclusion=None):
# otherwise it saves the image in the epub even though it
# isn't used anywhere.
if cover and self.getConfig('never_make_cover'):
return
url = url.strip() # ran across an image with a space in the
# src. Browser handled it, so we'd better, too.
# appengine (web version) isn't allowed to do images--just
# gets too big too fast and breaks things.
if is_appengine:
return
if url.startswith("http") or url.startswith("file") or parenturl == None:
imgurl = url
else:
parsedUrl = urlparse.urlparse(parenturl)
if url.startswith("//") :
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
'',
url,
'','',''))
elif url.startswith("/") :
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
url,
'','',''))
else:
toppath=""
if parsedUrl.path.endswith("/"):
toppath = parsedUrl.path
else:
toppath = parsedUrl.path[:parsedUrl.path.rindex('/')]
imgurl = urlparse.urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
toppath + '/' + url,
'','',''))
#print("\n===========\nparsedUrl.path:%s\ntoppath:%s\nimgurl:%s\n\n"%(parsedUrl.path,toppath,imgurl))
# apply coverexclusion to explicit covers, too. Primarily for ffnet imageu.
if cover and coverexclusion and re.search(coverexclusion,imgurl):
return
prefix='ffdl'
if imgurl not in self.imgurls:
parsedUrl = urlparse.urlparse(imgurl)
try:
if self.getConfig('no_image_processing'):
(data,ext,mime) = no_convert_image(imgurl,
fetch(imgurl))
else:
try:
sizes = [ int(x) for x in self.getConfigList('image_max_size') ]
except Exception, e:
raise exceptions.FailedToDownload("Failed to parse image_max_size from personal.ini:%s\nException: %s"%(self.getConfigList('image_max_size'),e))
grayscale = self.getConfig('grayscale_images')
imgtype = self.getConfig('convert_images_to')
if not imgtype:
imgtype = "jpg"
removetrans = self.getConfig('remove_transparency')
removetrans = removetrans or grayscale or imgtype=="jpg"
(data,ext,mime) = convert_image(imgurl,
fetch(imgurl),
sizes,
grayscale,
removetrans,
imgtype,
background="#"+self.getConfig('background_color'))
except Exception, e:
logger.info("Failed to load or convert image, skipping:\n%s\nException: %s"%(imgurl,e))
return "failedtoload"
# explicit cover, make the first image.
if cover:
if len(self.imgtuples) > 0 and 'cover' in self.imgtuples[0]['newsrc']:
# remove existing cover, if there is one.
del self.imgurls[0]
del self.imgtuples[0]
self.imgurls.insert(0,imgurl)
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.imgtuples.insert(0,{'newsrc':newsrc,'mime':mime,'data':data})
else:
self.imgurls.append(imgurl)
# First image, copy not link because calibre will replace with it's cover.
# Only if: No cover already AND
# make_firstimage_cover AND
# NOT never_make_cover AND
# either no coverexclusion OR coverexclusion doesn't match
if self.cover == None and \
self.getConfig('make_firstimage_cover') and \
not self.getConfig('never_make_cover') and \
not (coverexclusion and re.search(coverexclusion,imgurl)):
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
self.imgurls.append(imgurl)
newsrc = "images/%s-%s.%s"%(
prefix,
self.imgurls.index(imgurl),
ext)
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
#logger.debug("\nimgurl:%s\nnewsrc:%s\nimage size:%d\n"%(imgurl,newsrc,len(data)))
else:
newsrc = self.imgtuples[self.imgurls.index(imgurl)]['newsrc']
#print("===============\n%s\nimg url:%s\n============"%(newsrc,self.imgurls[-1]))
return newsrc
def getImgUrls(self):
retlist = []
for i, url in enumerate(self.imgurls):
#parsedUrl = urlparse.urlparse(url)
retlist.append(self.imgtuples[i])
return retlist
def __str__(self):
return "Metadata: " +str(self.metadata)
def commaGroups(s):
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
| 41.510393 | 169 | 0.537193 | 3,646 | 35,948 | 5.223533 | 0.191443 | 0.02116 | 0.005671 | 0.004043 | 0.245104 | 0.193594 | 0.160515 | 0.130008 | 0.113416 | 0.0911 | 0 | 0.001505 | 0.352926 | 35,948 | 865 | 170 | 41.558382 | 0.817248 | 0.173195 | 0 | 0.26248 | 0 | 0 | 0.088728 | 0.016103 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.022544 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76a66cd1b58b111bec10cfd3ff837789c2aaac34 | 336 | py | Python | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | 2021/07/2021-07-2 The Treachery of Whales.py | dpustovarov/Advent-of-Code | 3a08944c26ca6428ecca98aed96777d70038b6ef | [
"MIT"
] | null | null | null | import sys, statistics
def solution(N):
m = int(round(statistics.mean(N))) # average for (n - i)**2
d = int(statistics.median(N)) # average for abs(n - i)
return min(sum((n - i)**2 + abs(n - i) for n in N) for i in range(m, d, (d > m) - (d < m))) // 2
print(solution([int(n) for n in sys.stdin.read().split(',')]))
| 37.333333 | 100 | 0.5625 | 62 | 336 | 3.048387 | 0.419355 | 0.042328 | 0.116402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011494 | 0.223214 | 336 | 8 | 101 | 42 | 0.712644 | 0.133929 | 0 | 0 | 0 | 0 | 0.003472 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76a8df49a9b9a1d0460ba09a5a23cc25330a7581 | 213 | py | Python | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | 2 | 2018-11-28T13:49:18.000Z | 2018-11-29T11:13:40.000Z | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | app/__init__.py | PushyZqin/flask-restful-plugin | 7a142de96500910f5f7648d5edf8986afaa72b70 | [
"MIT"
] | null | null | null | #encoding:utf-8
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import config
app = Flask(__name__)
app.config.from_object(config)
db = SQLAlchemy(app)
from app import req_demo, res_demo, models | 19.363636 | 42 | 0.807512 | 33 | 213 | 4.969697 | 0.484848 | 0.109756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005348 | 0.122066 | 213 | 11 | 42 | 19.363636 | 0.871658 | 0.065728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.571429 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
76af968ed5c491f4545b0402bd5a825b42b19aab | 1,273 | py | Python | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 9 | 2021-01-11T10:49:21.000Z | 2022-02-28T15:48:00.000Z | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 18 | 2020-05-08T15:43:08.000Z | 2022-03-23T10:19:24.000Z | tierpsy/debugging/check_roi_flow.py | mgh17/tierpsy-tracker | a18c06aa80a5fb22fd51563d82c639b520742777 | [
"MIT"
] | 10 | 2019-12-18T12:10:12.000Z | 2022-01-05T09:12:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 17:59:36 2018
@author: avelinojaver
"""
import numpy as np
import cv2
from functools import partial
import json
from pathlib import Path
import pandas as pd
from tierpsy.analysis.ske_create.helperIterROI import generateMoviesROI
mask_file = Path('/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/CX11314_Ch1_04072017_103259.hdf5')
root_dir = '/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies/'
for mask_file in list(Path(root_dir).glob('*.hdf5')):
skeletons_file = mask_file.parent / 'Results' / (mask_file.stem + '_skeletons.hdf5')
with pd.HDFStore(str(skeletons_file), "r") as ske_file_id:
#attribute useful to understand if we are dealing with dark or light worms
bgnd_param = ske_file_id.get_node('/plate_worms')._v_attrs['bgnd_param']
bgnd_param = json.loads(bgnd_param.decode("utf-8"))
print(bgnd_param)
#%%
ROIs_generator = generateMoviesROI(masked_image_file,
trajectories_data,
bgnd_param = bgnd_param,
progress_prefix = '')
for frame_props in ROIs_generator:
break
| 28.931818 | 112 | 0.660644 | 161 | 1,273 | 4.993789 | 0.596273 | 0.078358 | 0.062189 | 0.08209 | 0.124378 | 0.124378 | 0.124378 | 0 | 0 | 0 | 0 | 0.046681 | 0.242734 | 1,273 | 44 | 113 | 28.931818 | 0.787344 | 0.139042 | 0 | 0 | 0 | 0 | 0.191529 | 0.134438 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
76b14bd5cac8768daf91adb9dc2873199856ee7c | 2,572 | py | Python | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | PP4E/Examples/PP4E/Integrate/Embed/prioredition-2x/Inventory/WithDbase/inventory.py | BeacherHou/Python-_Markdown- | 015d79a02d32f49395b80ca10919b3a09b72c4df | [
"MIT"
] | null | null | null | ############################################################################
# implement inventory/buyer databases as persistent shelve/pickle files;
# since the validations are already coded to use a function call interface,
# we just map those calls back to the shelve or pickled object here--no
# need to change validations code; caveat: some dbm flavors may need a
# Inventory.close() call, and this scheme doesn't support concurrent dbase
# access--shelves must be locked if concurrent access is possible (see
# flock() in the PyErrata example in the Internet chapter), and we would
# want to load the Buyers list from its file on each buyers() call;
#
# note that shelves require string keys (not ints), but we load raw
# strings from the order data file, so no conversions are necessary here;
# Buyers could be a shelve with all values = None if the list grows long:
# that would replace the 'in' test with a shelve index (but may be slower,
# since it adds a file access); Inventory could almost be a simple dbm
# file instead of a shelve, but that requires mapping integer values to
# and from strings (dbm values must be strings--see persistence chapter);
############################################################################
import shelve, pickle, string
from dbasetools import inventoryFile, buyerFile
# open shelve once per process, on first
# import of this file; changes are auto
# written through to file on key assignment
Inventory = shelve.open(inventoryFile)
def skus():
return Inventory.keys()
def stock(sku):
return Inventory[sku]
def reduce(sku, qty):
Inventory[sku] = Inventory[sku] - qty
def closedbase():
Inventory.close() # if your dbm flavor requires it
# load buyers list once per process
# writes changes through to fil on changes
Buyers = pickle.load(open(buyerFile, 'r'))
def buyers():
return Buyers
def add_buyer(buyer):
Buyers.append(buyer)
pickle.dump(Buyers, open(buyerFile, 'w'))
def print_files():
text = ''
for key in Inventory.keys():
text = text + (' %s=>%d ' % (key, Inventory[key]))
print 'Stock => {%s}' % text
print 'Buyer =>', Buyers
# load order list from flat text file;
# converts quantity only to an integer
def load_orders(filename):
orders = []
for line in open(filename, 'r').readlines():
product, quantity, buyer = string.split(line)
orders.append( (product, string.atoi(quantity), buyer) )
return orders
| 35.232877 | 77 | 0.64619 | 345 | 2,572 | 4.808696 | 0.46087 | 0.012658 | 0.016878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.216952 | 2,572 | 72 | 78 | 35.722222 | 0.823734 | 0.529549 | 0 | 0 | 0 | 0 | 0.033333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.068966 | null | null | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76b63b1bac33101e8507f58ebdfd16790912e32c | 20,894 | py | Python | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | cdci_data_analysis/analysis/parameters.py | andreatramacere/cdci_data_analysis | 8ae34a7252d6baf011a3b99fbe4f6e624b63d7df | [
"MIT"
] | null | null | null | """
Overview
--------
general info about this module
Classes and Inheritance Structure
----------------------------------------------
.. inheritance-diagram::
Summary
---------
.. autosummary::
list of the module you want
Module API
----------
"""
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "Andrea Tramacere"
import ast
import decorator
from datetime import datetime, date, time
from astropy.time import Time as astropyTime
from astropy.time import TimeDelta as astropyTimeDelta
from astropy.coordinates import Angle as astropyAngle
from .catalog import BasicCatalog
import numpy as np
@decorator.decorator
def check_par_list(func,par_list,*args, **kwargs):
for par in par_list:
if isinstance(par,Parameter):
pass
else:
raise RuntimeError('each parameter in the par_list has to be an instance of Parameters')
return func(par_list, *args, **kwargs)
class ParameterGroup(object):
def __init__(self,par_list,name,exclusive=True,def_selected=None,selected=None):
self.name=name
self._par_list=par_list
self._check_pars(par_list)
self.exclusive=True
self.msk = np.ones(len(par_list), dtype=np.bool)
if exclusive==True:
self.msk[::]=False
if def_selected is None:
self.msk[0]==True
if def_selected is not None:
self.select(def_selected)
if selected is not None:
self.select(selected)
@property
def par_list(self):
return self._par_list
@property
def names(self):
return [p.name for p in self._par_list]
def select(self,name):
if isinstance(name,Parameter):
name=Parameter.value
for ID,p in enumerate(self._par_list):
if p.name==name:
self.msk[ID]=True
self._selected=self._par_list[ID].name
if self.msk.sum()>1 and self.exclusive==True:
raise RuntimeError('only one paramter can be selected in mutually exclusive groups')
def _check_pars(self, par_list):
for p in par_list:
if isinstance(p,Parameter):
pass
elif isinstance(p,ParameterRange):
pass
else:
raise RuntimeError('you can group Paramters or ParamtersRanges found',type(p))
def to_list(self):
_l=[]
for p in self._par_list:
if isinstance(p,Parameter):
_l.append(p)
elif isinstance(p,ParameterRange):
_l.extend(p.to_list())
return _l
def add_par(self,par):
self.par_list.append(par)
self.msk=np.append(self.msk,False)
def build_selector(self,name):
return Parameter(name, allowed_values=self.names)
class ParameterRange(object):
def __init__(self,p1,p2,name):
self._check_pars(p1,p2)
self.name=name
self.p1=p1
self.p2=p2
def _check_pars(self,p1,p2):
if type(p1)!=type(p2):
raise RuntimeError('pars must be of the same time')
for p in (p1,p2):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return [self.p1,self.p2]
class ParameterTuple(object):
def __init__(self,p_list,name):
self._check_pars(p_list)
self.name=name
self.p_list=tuple(p_list)
def _check_pars(self,p_list):
if any( type(x)!=type(p_list[0]) for x in p_list):
raise RuntimeError('pars must be of the same time')
for p in (p_list):
try:
assert (isinstance(p,Parameter))
except:
raise RuntimeError('both p1 and p2 must be Parameters objects, found',type(p))
def to_list(self):
return self.p_list
class Parameter(object):
def __init__(self,value=None,units=None,name=None,allowed_units=[],check_value=None,allowed_values=None,units_name=None):
self.check_value=check_value
self._allowed_units = allowed_units
self._allowed_values = allowed_values
self.name = name
self.units=units
self.value = value
self.units_name=units_name
#self._wtform_dict=wtform_dict
@property
def value(self):
return self._value
@value.setter
def value(self,v):
#print ('set',self.name,v,self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units,name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value',v,'not allowed, allowed=',self._allowed_values)
#print('set->',self.name,v,type(v))
if type(v)==str or type(v)== unicode:
self._value=v.strip()
else:
self._value = v
else:
self._value=None
@property
def units(self):
return self._units
@units.setter
def units(self,units):
if self._allowed_units !=[] and self._allowed_units is not None:
self.chekc_units(units,self._allowed_units,self.name)
self._units=units
def set_from_form(self,form,verbose=False):
par_name = self.name
units_name = self.units_name
v = None
u = None
in_dictionary=False
if units_name is not None:
if units_name in form.keys():
u = form[units_name]
if par_name in form.keys():
v=form[par_name]
in_dictionary=True
if in_dictionary is True:
self.set_par(value=v,units=u)
#print('setting par:', par_name, 'to val=', self.value, 'and units', units_name, 'to', self.units )
else:
if verbose is True:
print('setting par:', par_name, 'not in dictionary')
def set_par(self,value,units=None):
if units is not None:
self.units=units
self.value=value
def get_form(self,wtform_cls,key,validators,defaults):
return wtform_cls('key', validators=validators, default=defaults)
@staticmethod
def chekc_units(units,allowed,name):
if units not in allowed:
raise RuntimeError('wrong units for par: %s'%name, ' found: ',units,' allowed:', allowed)
@staticmethod
def check_value(val,units,par_name):
pass
# def get_form_field(self,key=None,default=None,validators=None,wtform_dict=None,wtform=None):
# if key is None:
# key=self.name
#
# if wtform is None and wtform_dict is None:
#
# wtform_dict=self._wtform_dict
#
# if default is not None:
# self.check_value(default,self.units)
# else:
# default=self.value
#
#
# if wtform is not None and wtform_dict is not None:
# raise RuntimeError('either you provide wtform or wtform_dict or you pass a wtform_dict to the constructor')
#
# elif wtform_dict is not None:
# wtform=wtform_dict[self.units]
#
# else:
# raise RuntimeError('yuo must provide wtform or wtform_dict')
#
# return wtform(label=key, validators=validators, default=default)
def reprJSON(self):
return dict(name=self.name, units=self.units, value=self.value)
#class Instrument(Parameter):
# def __init__(self,T_format,name,value=None):
#wtform_dict = {'iso': SelectField}
class Name(Parameter):
def __init__(self,value=None, name_format='str', name=None):
_allowed_units = ['str']
super(Name,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
class Float(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Float, self).__init__(value=value,
units=units,
check_value=self.check_float_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_float_value(v,name=self.name)
self._v = np.float(v)
else:
self._v=None
@staticmethod
def check_float_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.float(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Integer(Parameter):
def __init__(self,value=None,units=None,name=None):
_allowed_units = None
#wtform_dict = {'keV': FloatField}
super(Integer, self).__init__(value=value,
units=units,
check_value=self.check_int_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self.value=value
@property
def value(self):
return self._v
@value.setter
def value(self, v):
if v is not None and v!='':
self.check_int_value(v,name=self.name)
self._v = np.int(v)
else:
self._v=None
@staticmethod
def check_int_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
if value is None or value=='':
pass
else:
try:
value=ast.literal_eval(value)
except:
pass
value=np.int(value)
if type(value) == int or type(value) == np.int:
pass
elif type(value) == float or type(value) == np.float:
pass
else:
raise RuntimeError('type of ', name, 'not valid', type(value))
class Time(Parameter):
def __init__(self,value=None,T_format=None,name=None,Time_format_name=None):
#_allowed_units = astropyTime.FORMATS
#wtform_dict = {'iso': StringField}
#wtform_dict['mjd'] = FloatField
#wtform_dict['prod_list'] = TextAreaField
super(Time,self).__init__(value=value,
units=T_format,
units_name=Time_format_name,
name=name,
allowed_units=None)
#wtform_dict=wtform_dict)
self._set_time(value,format=T_format)
@property
def value(self):
return self._astropy_time.value
@value.setter
def value(self, v):
units=self.units
self._set_time(v, format=units)
def _set_time(self,value,format):
try:
value=ast.literal_eval(value)
except:
pass
self._astropy_time = astropyTime(value, format=format)
self._value =value
class TimeDelta(Parameter):
def __init__(self, value=None, delta_T_format='sec', name=None, delta_T_format_name=None):
# _allowed_units = astropyTime.FORMATS
# wtform_dict = {'iso': StringField}
# wtform_dict['mjd'] = FloatField
# wtform_dict['prod_list'] = TextAreaField
super(TimeDelta, self).__init__(value=value,
units=delta_T_format,
units_name=delta_T_format_name,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_time(value, format=delta_T_format)
@property
def value(self):
return self._astropy_time_delta.value
@value.setter
def value(self, v):
units = self.units
self._set_time(v, format=units)
def _set_time(self, value, format):
try:
value = ast.literal_eval(value)
except:
pass
#print ('value',value)
self._astropy_time_delta = astropyTimeDelta(value, format=format)
self._value = value
class InputProdList(Parameter):
def __init__(self,value=None,_format='names_list',name=None):
_allowed_units = ['names_list']
if value is None:
value=[]
super(InputProdList, self).__init__(value=value,
units=_format,
check_value=self.check_list_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
self._split(value)
def _split(self,str_list):
if type(str_list)==list:
pass
elif type(str_list)==str or type(str(str_list)):
if ',' in str_list:
str_list= str_list.split(',')
else:
str_list = str_list.split(' ')
else:
raise RuntimeError('parameter format is not correct')
if str_list == ['']:
str_list = []
return str_list
@property
def value(self):
if self._value==[''] or self._value is None:
return []
else:
return self._value
@value.setter
def value(self, v):
#print('set', self.name, v, self._allowed_values)
if v is not None:
if self.check_value is not None:
self.check_value(v, units=self.units, name=self.name)
if self._allowed_values is not None:
if v not in self._allowed_values:
raise RuntimeError('value', v, 'not allowed, allowed=', self._allowed_values)
if v == [''] or v is None or str(v) == '':
self._value=['']
else:
self._value = v
else:
self._value = ['']
self._value=self._split(self._value)
#print ('set to ',self._value)
@staticmethod
def check_list_value(value,units,name='par'):
if units=='names_list':
try:
#print(type(value))
assert (type(value) == list or type(value) == str or type(str(value))== str)
except:
raise RuntimeError('par:',name,', value is not product list format : list of strings','it is',type(value),value)
else:
raise RuntimeError(name,'units not valid',units)
class Angle(Parameter):
def __init__(self,value=None, units=None,name=None):
super(Angle, self).__init__(value=value,
units=units,
name=name,
allowed_units=None)
# wtform_dict=wtform_dict)
self._set_angle(value, units=units)
@property
def value(self):
return self._astropy_angle.value
@value.setter
def value(self, v, units=None):
if units is None:
units = self.units
self._set_angle(v, units=units)
def _set_angle(self, value, units):
if value=='' or value is None:
pass
else:
self._astropy_angle = astropyAngle(value, unit=units)
self._value = self._astropy_angle.value
# class AngularDistance(Parameter):
# def __init__(self, angular_units,name, value=None):
# _allowed_units = ['deg']
# super(AngularDistance, self).__init__(value=value,
# units=angular_units,
# check_value=self.check_angle_value,
# name=name,
# allowed_units=_allowed_units)
#
#
#
# @staticmethod
# def check_angle_value(value, units=None, name=None):
# print('check type of ', name, 'value', value, 'type', type(value))
# pass
#
class SpectralBoundary(Parameter):
def __init__(self,value=None,E_units='keV',name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV','Hz','MHz','GHz']
#wtform_dict = {'keV': FloatField}
super(SpectralBoundary, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class Energy(Parameter):
def __init__(self,value=None,E_units=None,name=None):
_allowed_units = ['keV','eV','MeV','GeV','TeV']
#wtform_dict = {'keV': FloatField}
super(Energy, self).__init__(value=value,
units=E_units,
check_value=self.check_energy_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_energy_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class DetectionThreshold(Parameter):
def __init__(self,value=None,units='sigma',name=None):
_allowed_units = ['sigma']
#wtform_dict = {'keV': FloatField}
super(DetectionThreshold, self).__init__(value=value,
units=units,
check_value=self.check_value,
name=name,
allowed_units=_allowed_units)
#wtform_dict=wtform_dict)
@staticmethod
def check_value(value, units=None,name=None):
#print('check type of ',name,'value', value, 'type',type(value))
try:
value=ast.literal_eval(value)
except:
pass
if type(value)==int or type(value)==np.int:
pass
elif type(value)==float or type(value)==np.float:
pass
else:
raise RuntimeError('type of ',name,'not valid',type(value))
class UserCatalog(Parameter):
def __init__(self, value=None,name_format='str', name=None):
_allowed_units = ['str']
super(UserCatalog,self).__init__(value=value,
units=name_format,
check_value=self.check_name_value,
name=name,
allowed_units=_allowed_units)
@staticmethod
def check_name_value(value, units=None, name=None):
pass
| 27.348168 | 128 | 0.542117 | 2,385 | 20,894 | 4.531656 | 0.092243 | 0.03886 | 0.029145 | 0.020448 | 0.560881 | 0.510455 | 0.473168 | 0.452813 | 0.432272 | 0.425981 | 0 | 0.001706 | 0.354791 | 20,894 | 763 | 129 | 27.38401 | 0.800015 | 0.14296 | 0 | 0.535308 | 0 | 0 | 0.042756 | 0 | 0 | 0 | 0 | 0 | 0.006834 | 1 | 0.138952 | false | 0.061503 | 0.022779 | 0.031891 | 0.23918 | 0.004556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
76c632d8dcd5648691e6cdef8c48b528a22c1ea2 | 447 | py | Python | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 1 | 2021-03-27T14:10:55.000Z | 2021-03-27T14:10:55.000Z | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 9 | 2020-07-17T16:21:15.000Z | 2020-09-01T15:50:10.000Z | setup.py | sdimitro/savedump-workflows | 2f5b34f006d84c09918b2ade98c20902d411ed3f | [
"Apache-2.0"
] | 2 | 2020-08-03T17:53:56.000Z | 2020-08-12T21:49:12.000Z | #!/usr/bin/env python3
from setuptools import setup
setup(
name='savedump',
version="0.1.0",
packages=[
"savedump",
],
entry_points={
'console_scripts': ['savedump=savedump.savedump:main'],
},
author='Delphix Platform Team',
author_email='serapheim@delphix.com',
description='Archive linux crash dumps and cores',
license='Apache-2.0',
url='https://github.com/sdimitro/savedump',
)
| 19.434783 | 63 | 0.635347 | 51 | 447 | 5.509804 | 0.784314 | 0.113879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016997 | 0.210291 | 447 | 22 | 64 | 20.318182 | 0.779037 | 0.04698 | 0 | 0 | 0 | 0 | 0.447059 | 0.122353 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
76c6d2932401f4203f7334417d8df963804344f7 | 899 | py | Python | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | ex081.py | LucasBalbinoSS/Exercicios-Python-Mundo3 | 11799e9529ce4d9f20285b53206083310a076203 | [
"MIT"
] | null | null | null | listaNum = list()
contadorde5 = 0
while True:
num = int(input('Digite um número: '))
if num == 5:
contadorde5 += 1
listaNum.append(num)
continuar = str(input('Quer continuar? [ S / N ] ')).strip().upper()
print()
if continuar[0] == 'N':
break
while continuar[0] != 'S' and continuar[0] != 'N':
continuar = str(input('Quer continuar? [ S / N ] ')).strip().upper()
print()
print('=-' * 35)
print(f'Sua lista ficou assim: {listaNum}')
print('=-' * 35)
if len(listaNum) == 1:
print('Você digitou apenas 1 número...')
else:
print(f'Foram digitados {len(listaNum)} números!')
print(f'A lista de forma descrescente se torna {sorted(listaNum, reverse=True)}')
print('=-' * 35)
if 5 in listaNum:
print(f'O valor 5 está sim na lista!\nEncontrei {contadorde5} deles!')
else:
print('Não encontrei nenhum número 5 na lista...')
| 24.972222 | 81 | 0.604004 | 122 | 899 | 4.45082 | 0.483607 | 0.044199 | 0.062615 | 0.077348 | 0.173112 | 0.173112 | 0.173112 | 0.173112 | 0.173112 | 0.173112 | 0 | 0.028571 | 0.221357 | 899 | 35 | 82 | 25.685714 | 0.747143 | 0 | 0 | 0.333333 | 0 | 0 | 0.394883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.407407 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
4f11fa446f5c128d63dab17202cbfa4c6043f4c7 | 3,135 | py | Python | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 57 | 2018-02-20T08:16:47.000Z | 2022-03-28T10:36:57.000Z | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 377 | 2018-07-19T11:56:27.000Z | 2021-07-09T13:08:12.000Z | trainings/workshop1/step12/network_outage.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 24 | 2018-04-14T20:49:40.000Z | 2022-03-29T10:44:26.000Z | import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response")
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "moler", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** calculating network outage time ***
1. run it
2. see logs - look for "Network outage" and "Ping works"
- be carefull in logs analysis - what's wrong?
3. fix incorrect calculation by exchanging:
no_ping = unix1.get_event(event_name="ping_no_response")
into:
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
"""
| 38.231707 | 118 | 0.686124 | 423 | 3,135 | 4.756501 | 0.250591 | 0.058151 | 0.031312 | 0.045229 | 0.358847 | 0.288767 | 0.248012 | 0.225149 | 0.225149 | 0.225149 | 0 | 0.012229 | 0.191388 | 3,135 | 81 | 119 | 38.703704 | 0.78146 | 0.046252 | 0 | 0.041667 | 0 | 0 | 0.194683 | 0.00821 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.0625 | 0.104167 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
4f155a9ab823ba33ff35f3612ec2e684b44b8924 | 2,782 | py | Python | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | null | null | null | base/abstract/contextual_data.py | kefir/snakee | a17734d4b2d7dfd3e6c7b195baa128fbc84d197b | [
"MIT"
] | 2 | 2021-04-10T19:22:15.000Z | 2022-03-08T19:37:56.000Z | from abc import ABC
from typing import Union, Optional, Iterable, Any
try: # Assume we're a sub-module in a package.
from utils import arguments as arg
from base.interfaces.context_interface import ContextInterface
from base.interfaces.contextual_interface import ContextualInterface
from base.interfaces.data_interface import ContextualDataInterface
from base.abstract.abstract_base import AbstractBaseObject
from base.abstract.simple_data import SimpleDataWrapper
from base.abstract.contextual import Contextual
except ImportError: # Apparently no higher-level package has been imported, fall back to a local import.
from ...utils import arguments as arg
from ..interfaces.context_interface import ContextInterface
from ..interfaces.contextual_interface import ContextualInterface
from ..interfaces.data_interface import ContextualDataInterface
from .abstract_base import AbstractBaseObject
from .simple_data import SimpleDataWrapper
from .contextual import Contextual
Data = Union[Iterable, Any]
OptionalFields = Optional[Union[str, Iterable]]
Source = Optional[ContextualInterface]
Context = Optional[ContextInterface]
DATA_MEMBER_NAMES = ('_data', )
DYNAMIC_META_FIELDS = tuple()
class ContextualDataWrapper(Contextual, ContextualDataInterface, ABC):
def __init__(
self, data, name: str,
source: Source = None,
context: Context = None,
check: bool = True,
):
self._data = data
super().__init__(name=name, source=source, context=context, check=check)
@classmethod
def _get_data_member_names(cls):
return DATA_MEMBER_NAMES
def get_data(self) -> Data:
return self._data
def set_data(self, data: Data, inplace: bool):
if inplace:
self._data = data
self.set_meta(**self.get_static_meta())
else:
return ContextualDataWrapper(data, **self.get_static_meta())
def apply_to_data(self, function, *args, dynamic=False, **kwargs):
return self.__class__(
data=function(self.get_data(), *args, **kwargs),
**self.get_static_meta() if dynamic else self.get_meta()
)
@staticmethod
def _get_dynamic_meta_fields() -> tuple:
return DYNAMIC_META_FIELDS
def get_static_meta(self, ex: OptionalFields = None) -> dict:
meta = self.get_meta(ex=ex)
for f in self._get_dynamic_meta_fields():
meta.pop(f, None)
return meta
def get_compatible_static_meta(self, other=arg.DEFAULT, ex=None, **kwargs) -> dict:
meta = self.get_compatible_meta(other=other, ex=ex, **kwargs)
for f in self._get_dynamic_meta_fields():
meta.pop(f, None)
return meta
| 37.093333 | 105 | 0.700935 | 332 | 2,782 | 5.659639 | 0.271084 | 0.033528 | 0.045237 | 0.027142 | 0.349122 | 0.267163 | 0.090474 | 0.055349 | 0.055349 | 0.055349 | 0 | 0 | 0.216751 | 2,782 | 74 | 106 | 37.594595 | 0.862322 | 0.043853 | 0 | 0.129032 | 0 | 0 | 0.001882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.274194 | 0.064516 | 0.532258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
4f1563734b81e35e89fdc77fb21035b9e52c5dfc | 1,880 | py | Python | snypy/snippets/rest/filters.py | sterapps/snypy-backend | e4733a1b7bf041c79c66ce74e64cc428d3c6ba5d | [
"MIT"
] | 2 | 2018-06-21T07:51:30.000Z | 2019-06-01T14:17:07.000Z | snypy/snippets/rest/filters.py | nezhar/snypy-backend | 0673b7dc7dc8b730639e0f634dcaa8b8178151e0 | [
"MIT"
] | 33 | 2018-05-10T10:37:46.000Z | 2021-10-30T11:07:22.000Z | snypy/snippets/rest/filters.py | sterapps/snypy-backend | e4733a1b7bf041c79c66ce74e64cc428d3c6ba5d | [
"MIT"
] | 3 | 2019-06-12T08:53:37.000Z | 2020-10-28T17:21:02.000Z | import django_filters
from snippets.models import File, Snippet, Label, SnippetLabel
class FileFilter(django_filters.FilterSet):
class Meta:
model = File
fields = [
'snippet',
'language',
]
class SnippetFilter(django_filters.FilterSet):
favorite = django_filters.BooleanFilter(method='filter_is_favorite', label="Is favorite?", )
labeled = django_filters.BooleanFilter(method='filter_is_labeled', label="Is labeled?", )
team_is_null = django_filters.BooleanFilter(method='filter_team_is_null', label="Team is None", )
# ToDo: Add after shares app
# shared_to = django_filters.NumberFilter(field_name="shared__user")
# shared_from = django_filters.NumberFilter(field_name="shared__user")
class Meta:
model = Snippet
fields = [
'labels',
'visibility',
'files__language',
'user',
'team',
]
def filter_is_favorite(self, queryset, name, value):
pass
def filter_is_labeled(self, queryset, name, value):
if value:
return queryset.exclude(labels=None)
return queryset.filter(labels=None)
def filter_team_is_null(self, queryset, name, value):
return queryset.filter(
team__isnull=value,
)
class LabelFilter(django_filters.FilterSet):
user = django_filters.NumberFilter(method='filter_user', label="User", )
class Meta:
model = Label
fields = [
'user',
'team',
]
def filter_user(self, queryset, name, value):
return queryset.filter(
user=value,
team=None,
)
class SnippetLabelFilter(django_filters.FilterSet):
class Meta:
model = SnippetLabel
fields = [
'snippet',
'label',
]
| 24.102564 | 101 | 0.607447 | 190 | 1,880 | 5.794737 | 0.273684 | 0.129882 | 0.079927 | 0.076294 | 0.326975 | 0.292461 | 0.154405 | 0 | 0 | 0 | 0 | 0 | 0.293617 | 1,880 | 77 | 102 | 24.415584 | 0.829066 | 0.08617 | 0 | 0.307692 | 0 | 0 | 0.103851 | 0 | 0 | 0 | 0 | 0.012987 | 0 | 1 | 0.076923 | false | 0.019231 | 0.038462 | 0.038462 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4f15648cc786e557ca07ca52d78e1a5532cecf8b | 1,892 | py | Python | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | null | null | null | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | 51 | 2022-02-25T22:28:40.000Z | 2022-03-31T22:34:58.000Z | Packs/CommonWidgets/Scripts/MyToDoTasksWidget/MyToDoTasksWidget_test.py | satyakidroid/content | b5342c522d44aec8f31f4ee0fc8ad269ac970903 | [
"MIT"
] | null | null | null | import json
import demistomock as demisto
from MyToDoTasksWidget import get_open_to_do_tasks_of_current_user
def test_open_to_do_tasks_of_current_user(mocker):
'''
Given:
- Mock response of 'internalHttpRequest' to '/v2/statistics/widgets/query' that includes an open task and
a close task
When:
- Running the MyToDoTasksWidget script
Then:
- Ensure the markdown table was generated correctly and includes only the open task
'''
res_body = {
'data': [
{
'assignee': 'admin',
'completed': '0001-01-01T00:00:00Z',
'dbotCreatedBy': 'admin',
'description': 'test_open_task',
'dueDate': '2021-11-30T15:49:11+02:00',
'id': '1@2',
'incidentId': '2',
'status': 'open',
'title': 'test open'
},
{
'assignee': 'admin',
'dbotCreatedBy': 'admin',
'description': 'test_close_task',
'dueDate': '2021-11-30T15:49:11+02:00',
'id': '1@3',
'incidentId': '3',
'status': 'close',
'title': 'test close'
}
]
}
mocker.patch.object(
demisto,
'internalHttpRequest',
return_value={
'statusCode': 200,
'body': json.dumps(res_body)
}
)
expected_table = [
{
'Task Name': 'test open',
'Task Description': 'test_open_task',
'Task ID': '1@2',
'SLA': '2021-11-30 15:49:11+0200',
'Opened By': 'admin',
'Incident ID': '[2](#/Custom/caseinfoid/2)'
}
]
table = get_open_to_do_tasks_of_current_user()
assert len(table) == 1
assert table == expected_table
| 28.238806 | 113 | 0.493129 | 189 | 1,892 | 4.767196 | 0.481481 | 0.044395 | 0.026637 | 0.043285 | 0.166482 | 0.166482 | 0.166482 | 0.137625 | 0.073252 | 0.073252 | 0 | 0.07088 | 0.381078 | 1,892 | 66 | 114 | 28.666667 | 0.698548 | 0.146934 | 0 | 0.12 | 0 | 0 | 0.292884 | 0.048285 | 0 | 0 | 0 | 0.015152 | 0.04 | 1 | 0.02 | false | 0 | 0.06 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4f1af3d08cbf530d538f0749b68ac8ba48af17e8 | 2,840 | py | Python | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 47 | 2016-04-21T19:51:17.000Z | 2022-02-25T14:13:30.000Z | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 421 | 2016-04-20T18:45:24.000Z | 2022-03-14T14:50:41.000Z | src/globus_cli/services/transfer/client.py | sirosen/temp-cli-test | 416fd3fea17b4c7c2cf35d6ccde63cb5719a1af6 | [
"Apache-2.0"
] | 20 | 2016-09-10T20:25:27.000Z | 2021-10-06T16:02:47.000Z | import logging
import textwrap
import uuid
from typing import Any, Dict, Tuple, Union
import click
from globus_sdk import GlobusHTTPResponse, TransferClient
from .data import display_name_or_cname
from .recursive_ls import RecursiveLsResponse
log = logging.getLogger(__name__)
class CustomTransferClient(TransferClient):
# TODO: Remove this function when endpoints natively support recursive ls
def recursive_operation_ls(
self,
endpoint_id: Union[str, uuid.UUID],
params: Dict[str, Any],
depth: int = 3,
) -> RecursiveLsResponse:
"""
Makes recursive calls to ``GET /operation/endpoint/<endpoint_id>/ls``
Does not preserve access to top level operation_ls fields, but
adds a "path" field for every item that represents the full
path to that item.
:rtype: iterable of :class:`GlobusResponse <globus_sdk.response.GlobusResponse>`
:param endpoint_id: The endpoint being recursively ls'ed. If no "path" is given
in params, the start path is determined by this endpoint.
:param params: Parameters that will be passed through as query params.
:param depth: The maximum file depth the recursive ls will go to.
"""
endpoint_id = str(endpoint_id)
log.info(
"TransferClient.recursive_operation_ls(%s, %s, %s)",
endpoint_id,
depth,
params,
)
return RecursiveLsResponse(self, endpoint_id, params, max_depth=depth)
def get_endpoint_w_server_list(
self, endpoint_id
) -> Tuple[GlobusHTTPResponse, Union[str, GlobusHTTPResponse]]:
"""
A helper for handling endpoint server list lookups correctly accounting
for various endpoint types.
- Raises click.UsageError when used on Shares
- Returns (<get_endpoint_response>, "S3") for S3 endpoints
- Returns (<get_endpoint_response>, <server_list_response>) for all other
Endpoints
"""
endpoint = self.get_endpoint(endpoint_id)
if endpoint["host_endpoint_id"]: # not GCS -- this is a share endpoint
raise click.UsageError(
textwrap.dedent(
"""\
{id} ({0}) is a share and does not have servers.
To see details of the share, use
globus endpoint show {id}
To list the servers on the share's host endpoint, use
globus endpoint server list {host_endpoint_id}
"""
).format(display_name_or_cname(endpoint), **endpoint.data)
)
if endpoint["s3_url"]: # not GCS -- legacy S3 endpoint type
return (endpoint, "S3")
else:
return (endpoint, self.endpoint_server_list(endpoint_id))
| 35.949367 | 88 | 0.634859 | 334 | 2,840 | 5.257485 | 0.407186 | 0.068337 | 0.023918 | 0.020501 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003477 | 0.291197 | 2,840 | 78 | 89 | 36.410256 | 0.868852 | 0.355282 | 0 | 0 | 0 | 0 | 0.053716 | 0.030169 | 0 | 0 | 0 | 0.012821 | 0 | 1 | 0.054054 | false | 0 | 0.216216 | 0 | 0.378378 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
4f1e7496b3483e64bf6a8b20b24344e523b362a7 | 808 | py | Python | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | null | null | null | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | 2 | 2015-01-06T09:29:29.000Z | 2015-01-06T09:50:15.000Z | pyrene/main.py | krisztianfekete/pyrene | ad9f2fb979f06930399c9c8214c3fe3c2d6efa06 | [
"MIT"
] | null | null | null | # Py3 compatibility
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import os
import sys
import shutil
from .network import Network
from .util import Directory
from .shell import PyreneCmd
def main():
dot_pyrene = os.path.expanduser('~/.pyrene')
dot_pypirc = os.path.expanduser('~/.pypirc')
tempdir = tempfile.mkdtemp(suffix='.pyrene')
network = Network(dot_pyrene)
try:
if not os.path.exists(dot_pyrene):
network.add_known_repos(dot_pypirc)
cmd = PyreneCmd(network, Directory(tempdir), dot_pypirc)
line = ' '.join(sys.argv[1:])
if line:
cmd.onecmd(line)
else:
cmd.cmdloop()
finally:
shutil.rmtree(tempdir)
if __name__ == '__main__':
main()
| 21.263158 | 64 | 0.659653 | 97 | 808 | 5.226804 | 0.474227 | 0.053254 | 0.063116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003247 | 0.237624 | 808 | 37 | 65 | 21.837838 | 0.819805 | 0.02104 | 0 | 0 | 0 | 0 | 0.043093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.333333 | 0 | 0.37037 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
4f21df34e1d98ce1e3adaa44158d7fed8c12ff92 | 1,121 | py | Python | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 1 | 2019-11-22T11:02:52.000Z | 2019-11-22T11:02:52.000Z | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 4 | 2019-11-21T14:13:32.000Z | 2020-02-11T15:15:23.000Z | snippets/Sage/batchdel.py | JLLeitschuh/TIPL | 89c5d82932f89a2b4064d5d86ac83045ce9bc7d5 | [
"Apache-2.0"
] | 1 | 2020-02-11T06:19:45.000Z | 2020-02-11T06:19:45.000Z | import sys,os
from numpy import *
from subprocess import *
from glob import glob
doResume=1
showisq=1
showlen=0
rdelete=1
fixmasks=1
vmsFix=lambda wholeFile: '\\;'.join(wholeFile.split(';'))
megsize=lambda fileName: os.path.getsize(fileName)/1e6
if len(sys.argv)<2:
for rt,drs,files in os.walk(os.getcwd(),topdown=False):
ffiles=filter(lambda x: x.find('.csv')>=0,files)
for cFile in ffiles:
#
if cFile.lower().find('lacun')>=0: cPre='lacun'
if cFile.lower().find('canal')>=0: cPre='canal'
if cFile.lower().find('edge')<0:
wholeFile=(rt+'/'+cFile)
try:
curDir='/'.join((rt+'/'+cFile).split('/')[:-2])
curSample='_'.join((rt+'/'+cFile).split('/')[-3].split('_')[1:])
os.chdir(curDir)
if showisq: os.system('ls -lh '+vmsFix(wholeFile))
if megsize(wholeFile)<0.1:
csvfiles=glob(rt+'/'+cPre+'_*.csv')
if showlen:
for acsvFile in csvfiles: os.system('wc -l '+acsvFile)
for ccsv in csvfiles:
execCmd='rm '+ccsv
print (ccsv,megsize(ccsv))
if rdelete: os.system(execCmd)
except:
print rt+'/'+cFile+' already gone'
| 29.5 | 69 | 0.619982 | 160 | 1,121 | 4.325 | 0.41875 | 0.040462 | 0.052023 | 0.069364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018458 | 0.178412 | 1,121 | 37 | 70 | 30.297297 | 0.732899 | 0 | 0 | 0 | 0 | 0 | 0.06875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.117647 | null | null | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.