hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
890720448b3e50f1c48c03d7c7569b9fe8390425
| 348
|
py
|
Python
|
python-flask-server/jatdb_server/models/__init__.py
|
NGenetzky/jatdb
|
518e0cedca1b61b8a744aef5e02255d8501bf8eb
|
[
"MIT"
] | null | null | null |
python-flask-server/jatdb_server/models/__init__.py
|
NGenetzky/jatdb
|
518e0cedca1b61b8a744aef5e02255d8501bf8eb
|
[
"MIT"
] | null | null | null |
python-flask-server/jatdb_server/models/__init__.py
|
NGenetzky/jatdb
|
518e0cedca1b61b8a744aef5e02255d8501bf8eb
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from jatdb_server.models.content_file import ContentFile
from jatdb_server.models.trello_action import TrelloAction
from jatdb_server.models.trello_query import TrelloQuery
from jatdb_server.models.universal_resource import UniversalResource
| 34.8
| 68
| 0.864943
| 47
| 348
| 6.12766
| 0.553191
| 0.125
| 0.208333
| 0.291667
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006349
| 0.094828
| 348
| 9
| 69
| 38.666667
| 0.907937
| 0.16954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f1a29e2658903dfe87d0135717ceaa3a02e10a7
| 5,646
|
py
|
Python
|
TestAttackManual.py
|
kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network
|
2504e35695c4f9305eed35468e4defa881d16c44
|
[
"BSD-2-Clause"
] | 24
|
2018-10-12T12:07:42.000Z
|
2022-03-18T22:34:45.000Z
|
TestAttackManual.py
|
kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network
|
2504e35695c4f9305eed35468e4defa881d16c44
|
[
"BSD-2-Clause"
] | 2
|
2019-06-14T00:20:22.000Z
|
2020-10-28T19:02:56.000Z
|
TestAttackManual.py
|
kartik-joshi/Adversarial-Attack-on-Recurrent-Neural-Network
|
2504e35695c4f9305eed35468e4defa881d16c44
|
[
"BSD-2-Clause"
] | 5
|
2018-10-12T12:07:43.000Z
|
2021-11-12T13:23:11.000Z
|
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding, Reshape
from keras.layers import LSTM
from keras.datasets import imdb
import numpy as np
max_features = 20000
maxlen = 256 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
# print('Loading data...')
# # (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# print(len(x_train), 'train sequences')
# print(len(x_test), 'test sequences')
#
# print("train labals", len(y_train))
#
# list1 = []
# for i in range(len(y_train)):
# if y_train[i] == 1:
# list1.append([1, 0])
# else:
# list1.append([0, 1])
#
# y_train = np.array(list1)
#
# print("after process", len(y_train))
#
# print("Test labals", len(y_test))
#
# list1 = []
# for i in range(len(y_test)):
# if y_test[i] == 1:
# list1.append([1, 0])
# else:
# list1.append([0, 1])
#
# y_test = np.array(list1)
#
# print("after process", len(y_test))
#
# print('Pad sequences (samples x time)')
# x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
# x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
# print('x_train shape:', x_train.shape)
# print('x_test shape:', x_test.shape)
#
# x_train = x_train.reshape((x_train.shape[0], 16, 16, 1))
# x_test = x_test.reshape((x_test.shape[0], 16, 16, 1))
Valid = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 89, 27, 1, 9289, 17, 199, 131, 4, 4191, 16, 1339, 23, 8, 759, 3, 1385, 7, 3, 21, 1368, 11415, 16, 5149, 17, 1634, 7, 1, 1368, 9, 3, 1356, 8, 13, 990, 12, 877, 38, 19, 27, 239, 12, 100, 234, 60, 483, 11960, 3, 7, 3, 20, 131, 1101, 71, 8, 13, 251, 27, 1146, 7, 308, 16, 735, 1516, 17, 29, 143, 28, 77, 2305, 18, 11]
Adversarial = [10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000, 10000]
model = Sequential()
model.add(Reshape((256,), input_shape=(16, 16, 1)))
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2, activation='softmax'))
model.load_weights("models\imdb_model.h5")
# model.add(Dense(1, activation='sigmoid'))
# model.add(Dense(2))
# model.add(softmax(x, axis=-1))
x_train = np.array(Valid)
inputs = np.reshape(x_train, (1, 16, 16, 1))
valid = model.predict_classes(inputs, batch_size=None)
#result = model.predict(inputs, batch_size=None)
x_train = np.array(Adversarial)
inputs = np.reshape(x_train, (1, 16, 16, 1))
adv = model.predict_classes(inputs, batch_size=None)
#result = model.predict(inputs, batch_size=None)
print("Valid image classification ", valid)
print("Adversarial image classification ", adv)
print("same words", len(set(Valid)&set(Adversarial)))
# # try using different optimizers and different optimizer configs
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=['accuracy'])
#
# print('Train...')
# model.fit(x_train, y_train,
# batch_size=batch_size,
# epochs=1,
# # epochs=15,
# validation_data=(x_test, y_test))
# score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
# print('Test score:', score)
# print('Test accuracy:', acc)
# model.save_weights("imdb_model.h5")
# print("Saved model to disk")
| 57.030303
| 1,807
| 0.638859
| 945
| 5,646
| 3.740741
| 0.17672
| 0.721358
| 1.077793
| 1.4314
| 0.527016
| 0.520792
| 0.520792
| 0.509477
| 0.490806
| 0.47553
| 0
| 0.373856
| 0.187035
| 5,646
| 99
| 1,808
| 57.030303
| 0.396296
| 0.313673
| 0
| 0.074074
| 0
| 0
| 0.026124
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.259259
| 0
| 0.259259
| 0.148148
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8f1d884554b14dec41b3f7c78e68c1db5758f972
| 49
|
py
|
Python
|
implementation_files/cosim_pandapipes_pandapower/simulators/heat_consumer/__init__.py
|
ERIGrid2/benchmark-model-multi-energy-networks
|
4172480a5fcdf99d086b98ea24e00342f8e42a91
|
[
"BSD-3-Clause"
] | null | null | null |
implementation_files/cosim_pandapipes_pandapower/simulators/heat_consumer/__init__.py
|
ERIGrid2/benchmark-model-multi-energy-networks
|
4172480a5fcdf99d086b98ea24e00342f8e42a91
|
[
"BSD-3-Clause"
] | null | null | null |
implementation_files/cosim_pandapipes_pandapower/simulators/heat_consumer/__init__.py
|
ERIGrid2/benchmark-model-multi-energy-networks
|
4172480a5fcdf99d086b98ea24e00342f8e42a91
|
[
"BSD-3-Clause"
] | null | null | null |
from .mosaik_wrapper import HEXConsumerSimulator
| 24.5
| 48
| 0.897959
| 5
| 49
| 8.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081633
| 49
| 1
| 49
| 49
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8f67bedcc5168ce9b6d5e4d276e634eca1463ec3
| 17
|
py
|
Python
|
src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/res_test_robot_session_server/a/lib.py
|
alex729/RED
|
128bf203cf035892c02805aabd0c915f96006bb0
|
[
"Apache-2.0"
] | 375
|
2015-11-02T19:15:30.000Z
|
2022-03-19T03:32:10.000Z
|
src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/res_test_robot_session_server/a/lib.py
|
alex729/RED
|
128bf203cf035892c02805aabd0c915f96006bb0
|
[
"Apache-2.0"
] | 433
|
2015-11-03T13:24:40.000Z
|
2022-03-30T11:20:14.000Z
|
src/RobotFrameworkCore/org.robotframework.ide.core-functions/src/test/python/scripts/res_test_robot_session_server/a/lib.py
|
alex729/RED
|
128bf203cf035892c02805aabd0c915f96006bb0
|
[
"Apache-2.0"
] | 133
|
2016-05-02T02:20:06.000Z
|
2022-01-06T06:01:28.000Z
|
def kw1():
pass
| 8.5
| 11
| 0.588235
| 3
| 17
| 3.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0.235294
| 17
| 2
| 12
| 8.5
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
710f4b6b64ae739fa67f3fad8473d9aa8088ce4a
| 683
|
py
|
Python
|
lastweekend_photos/apps/photo_gallery/rest_api/filters.py
|
OmidFarvid/lastweekend-photos
|
f7f433843a14ca846bd7a9a8707ca1bf1c04d7da
|
[
"MIT"
] | null | null | null |
lastweekend_photos/apps/photo_gallery/rest_api/filters.py
|
OmidFarvid/lastweekend-photos
|
f7f433843a14ca846bd7a9a8707ca1bf1c04d7da
|
[
"MIT"
] | null | null | null |
lastweekend_photos/apps/photo_gallery/rest_api/filters.py
|
OmidFarvid/lastweekend-photos
|
f7f433843a14ca846bd7a9a8707ca1bf1c04d7da
|
[
"MIT"
] | null | null | null |
import django_filters
from django_filters import rest_framework as filters
# from apps.photo_gallery.models import Event
#
#
# class EventFilter(filters.FilterSet):
# start_date_min = django_filters.DateFilter(field_name='start_date', lookup_expr='gte', required=False)
# start_date_max = django_filters.DateFilter(field_name='start_date', lookup_expr='lte', required=False)
# end_date_min = django_filters.DateFilter(field_name='end_date', lookup_expr='gte', required=False)
# end_date_max = django_filters.DateFilter(field_name='end_date', lookup_expr='lte', required=False)
#
# class Meta:
# model = Event
# exclude = ('event_flyer', 'image')
| 42.6875
| 108
| 0.74817
| 90
| 683
| 5.355556
| 0.377778
| 0.161826
| 0.190871
| 0.232365
| 0.605809
| 0.605809
| 0.473029
| 0.414938
| 0.414938
| 0
| 0
| 0
| 0.1347
| 683
| 15
| 109
| 45.533333
| 0.815567
| 0.852123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
714456e8cb645be470325f7641ddf4dbcf8c18f1
| 20,205
|
py
|
Python
|
populator/tests/test_migration_processing.py
|
gbif-norway/resolver-docker
|
1119223d8c640627adb6a3c481f03d04cca39804
|
[
"Apache-2.0"
] | null | null | null |
populator/tests/test_migration_processing.py
|
gbif-norway/resolver-docker
|
1119223d8c640627adb6a3c481f03d04cca39804
|
[
"Apache-2.0"
] | 5
|
2021-02-03T07:24:00.000Z
|
2022-02-03T13:57:44.000Z
|
populator/tests/test_migration_processing.py
|
gbif-norway/resolver-docker
|
1119223d8c640627adb6a3c481f03d04cca39804
|
[
"Apache-2.0"
] | 2
|
2020-11-24T14:23:55.000Z
|
2022-02-26T00:32:39.000Z
|
from populator.management.commands import _migration_processing as migration_processing
from populator.management.commands.populate_resolver import create_duplicates_file
from populator.models import ResolvableObjectMigration
from django.db import connection, transaction
from django.test import TestCase, TransactionTestCase
from django.forms.models import model_to_dict
import os
class MigrationProcessingTest(TestCase):
def _get_temp_count(self):
with connection.cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM temp")
return cursor.fetchone()[0]
def test_import_dwca_imports_rows(self):
migration_processing.import_dwca('my_dataset_id', '/code/populator/tests/mock_data/dwca-seabird_estimates-v1.0.zip')
self.assertEqual(ResolvableObjectMigration.objects.count(), 20191)
def test_import_dwca_skips_bad_rows(self):
migration_processing.import_dwca('my_dataset_id', '/code/populator/tests/mock_data/dwc_archive_bad_rows.zip')
self.assertEqual(ResolvableObjectMigration.objects.count(), 11)
def test_blank_fields_not_imported(self):
migration_processing.import_dwca('my_dataset_id', '/code/populator/tests/mock_data/dwc_archive_bad_rows.zip')
# The DwCA has a column 'sex' with no data, check that we don't import {"sex": ''} into the jsonb field
first = ResolvableObjectMigration.objects.all().first()
self.assertTrue('sex' not in first.data.keys())
def test_get_core_id(self):
self.assertEqual('measurementid', migration_processing.get_core_id('measurementorfact'))
self.assertEqual('occurrenceid', migration_processing.get_core_id('occurrence'))
def test_get_core_id_fail(self):
self.assertEqual(False, migration_processing.get_core_id('measurement'))
def test_get_columns(self):
heading_string = b'HEADING1,\tHeading,heading\theading3\theading4\t'
headings_result = ['heading1,', 'heading,heading', 'heading3', 'heading4']
self.assertEqual(headings_result, migration_processing.get_columns(heading_string))
def test_create_temp_table(self):
headings = ['heading1', 'heading2', 'order', 'heading4']
with connection.cursor() as cursor:
migration_processing.create_temp_table(headings)
cursor.execute('SELECT * FROM temp')
results = cursor.fetchall()
self.assertEqual(len(results), 0)
self.assertEqual(headings, [col[0] for col in cursor.description])
def test_create_temp_table_with_previously_existing_table(self):
headings = ['heading1', 'heading2', 'order', 'heading4']
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (test text)")
migration_processing.create_temp_table(headings)
cursor.execute('SELECT * FROM temp')
cursor.fetchall()
self.assertEqual(headings, [col[0] for col in cursor.description])
def test_sync_id_column_add_new_id_col(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (eventid text, heading2 text, heading3 text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:ba128c35-5e8f-408f-8597-00b1972dace1', 'a', 'b')")
self.assertTrue(migration_processing.sync_id_column('eventid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'eventid': 'urn:uuid:ba128c35-5e8f-408f-8597-00b1972dace1', 'id': 'urn:uuid:ba128c35-5e8f-408f-8597-00b1972dace1', 'heading2': 'a', 'heading3': 'b'})
def test_sync_id_column_replace_id_col(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, eventid text, heading text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:2', 'urn:uuid:1', 'b')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': 'urn:uuid:2', 'occurrenceid':'urn:uuid:2', 'eventid': 'urn:uuid:1', 'heading': 'b'})
def test_sync_id_with_purl_othercatalognumbers_url(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, othercatalognumbers text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:1', 'http://purl.org/nhmuio/id/82b6903f-7613-4aba-b83b-948d0df6391a')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': '82b6903f-7613-4aba-b83b-948d0df6391a', 'occurrenceid': 'urn:uuid:1', 'othercatalognumbers': '82b6903f-7613-4aba-b83b-948d0df6391a'})
def test_sync_id_with_purl_othercatalognumbers_uuid(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, othercatalognumbers text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:1', 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811', 'occurrenceid': 'urn:uuid:1', 'othercatalognumbers': 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811'})
def test_sync_id_with_multiple_othercatalognumbers(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, othercatalognumbers text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:1', 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811|a55cbe46-5f2f-4c07-8223-9d4b0c8ed811')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
# Note that the second othercatalognumber gets deleted
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811', 'occurrenceid': 'urn:uuid:1', 'othercatalognumbers': 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811'})
def test_sync_id_with_duplicate_othercatalognumbers(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, othercatalognumbers text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:1', 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811')")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:2', 'urn:uuid:2', 'b55cbe46-5f2f-4c07-8223-9d4b0c8ed811')")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:3', 'urn:uuid:3', '3136D80A-E74C-11E4-A2DC-00155D012A60')")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:4', 'urn:uuid:4', '3136D80A-E74C-11E4-A2DC-00155D012A60,5FF9E4CE-E74D-11E4-891B-00155D012A60')")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:5', 'urn:uuid:5', 'http://purl.org/nhmuio/id/bdb4f713-5ef6-472b-9e9c-3d03dcb4b6b7')")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:6', 'urn:uuid:6', 'bdb4f713-5ef6-472b-9e9c-3d03dcb4b6b7')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
self.assertEqual([('urn:uuid:1', 'urn:uuid:1', ''), ('urn:uuid:2', 'urn:uuid:2', ''),
('urn:uuid:3', 'urn:uuid:3', ''), ('urn:uuid:4', 'urn:uuid:4', ''),
('urn:uuid:5', 'urn:uuid:5', ''), ('urn:uuid:6', 'urn:uuid:6', '')], cursor.fetchall())
def test_sync_id_with_invalid_othercatalognumbers(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, othercatalognumbers text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:1', 'abc')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
def test_purlfriendly_id_with_urn_prefix(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1')")
migration_processing.purlfriendly_id_column()
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': '1'})
def test_purlfriendly_id_with_url(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text)")
cursor.execute("INSERT INTO temp VALUES ('http://purl.org/nhmuio/id/1')")
migration_processing.purlfriendly_id_column()
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': '1'})
def test_sync_occurrence_id_column_with_event_core(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, occurrenceid text, heading2 text, heading3 text)")
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'urn:uuid:2', 'a', 'b')")
self.assertTrue(migration_processing.sync_id_column('occurrenceid'))
cursor.execute('SELECT * FROM temp')
columns = [col[0] for col in cursor.description]
self.assertEqual(dict(zip(columns, cursor.fetchone())), {'id': 'urn:uuid:2','occurrenceid':'urn:uuid:2', 'heading2': 'a', 'heading3': 'b'})
def test_sync_id_column_with_no_coreid_col_returns_false(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE temp (id text, heading text)") # This happens e.g. http://data.nina.no:8080/ipt/archive.do?r=arko_strandeng occurrence.txt
cursor.execute("INSERT INTO temp VALUES ('urn:uuid:1', 'b')")
self.assertFalse(migration_processing.sync_id_column('occurrenceid'))
def test_add_dataset_id(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, occurrenceid text, "order" text, heading3 text)')
cursor.execute("INSERT INTO temp VALUES ('a', 'b', 'c', 'd')")
cursor.execute("INSERT INTO temp VALUES ('e', 'f', 'g', 'h')")
migration_processing.add_dataset_id('2b52369a-7fe0-4d28-b88c-c882c0ce71d8')
cursor.execute("SELECT * FROM temp")
results = [('a', 'b', 'c', 'd', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8'), ('e', 'f', 'g', 'h', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8')]
self.assertEqual(cursor.fetchall(), results)
def test_add_dataset_id_with_preexisting_dataset_id(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, occurrenceid text, "order" text, datasetid text)')
cursor.execute("INSERT INTO temp VALUES ('a', 'b', 'c', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8')")
cursor.execute("INSERT INTO temp VALUES ('e', 'f', 'g', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8')")
migration_processing.add_dataset_id('2b52369a-7fe0-4d28-b88c-c882c0ce71d8')
cursor.execute("SELECT * FROM temp")
results = [('a', 'b', 'c', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8'), ('e', 'f', 'g', '2b52369a-7fe0-4d28-b88c-c882c0ce71d8')]
self.assertEqual(cursor.fetchall(), results)
def test_insert_json_into_migration_table(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, occurrenceid text, "order" text, heading3 text)')
cursor.execute("INSERT INTO temp VALUES ('ba128c35-5e8f-408f-8597-00b1972dace1', 'ba128c35-5e8f-408f-8597-00b1972dace1', 'a', 'b')")
migration_processing.insert_json_into_migration_table('dataset_id', 'occurrence')
expected = {'id': 'ba128c35-5e8f-408f-8597-00b1972dace1', 'type': 'occurrence',
'data': {'id': 'ba128c35-5e8f-408f-8597-00b1972dace1',
'occurrenceid': 'ba128c35-5e8f-408f-8597-00b1972dace1',
'order': 'a', 'heading3': 'b'},
'dataset_id': 'dataset_id'}
self.assertEqual([model_to_dict(x) for x in ResolvableObjectMigration.objects.all()], [expected])
def test_insert_json_into_migration_table_multiple(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, scientificname text)')
cursor.execute("INSERT INTO temp VALUES ('a', 'eudyptes')")
cursor.execute("INSERT INTO temp VALUES ('b', 'another')")
migration_processing.insert_json_into_migration_table('dataset_id', 'occurrence')
expected = [{'id': 'a', 'data': {'id': 'a', 'scientificname': 'eudyptes'}, 'type': 'occurrence', 'dataset_id': 'dataset_id'},
{'id': 'b', 'data': {'id': 'b', 'scientificname': 'another'}, 'type': 'occurrence', 'dataset_id': 'dataset_id'}]
self.assertEqual([model_to_dict(x) for x in ResolvableObjectMigration.objects.all()], expected)
def test_insert_json_into_migration_table_nulls(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, scientificname text)')
cursor.execute("INSERT INTO temp VALUES ('a', NULL)")
cursor.execute("INSERT INTO temp VALUES ('b', 'another')")
migration_processing.insert_json_into_migration_table('dataset_id', 'occurrence')
expected = [{'id': 'a', 'data': {'id': 'a'}, 'type': 'occurrence', 'dataset_id': 'dataset_id'},
{'id': 'b', 'data': {'id': 'b', 'scientificname': 'another'}, 'type': 'occurrence', 'dataset_id': 'dataset_id'}]
self.assertEqual([model_to_dict(x) for x in ResolvableObjectMigration.objects.all()], expected)
def test_insert_with_previous_dataset(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('a', 'a-name')")
cursor.execute("INSERT INTO temp VALUES ('b', 'b-name')")
migration_processing.insert_json_into_migration_table('a_d_id', 'occurrence')
with connection.cursor() as cursor:
cursor.execute('DROP TABLE temp')
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('c', 'c-name')")
cursor.execute("INSERT INTO temp VALUES ('d', 'd-name')")
migration_processing.insert_json_into_migration_table('b_d_id', 'occurrence')
expected = [
{'id': 'a', 'data': {'id': 'a', 'sname': 'a-name'}, 'type': 'occurrence', 'dataset_id': 'a_d_id'},
{'id': 'b', 'data': {'id': 'b', 'sname': 'b-name'}, 'type': 'occurrence', 'dataset_id': 'a_d_id'},
{'id': 'c', 'data': {'id': 'c', 'sname': 'c-name'}, 'type': 'occurrence', 'dataset_id': 'b_d_id'},
{'id': 'd', 'data': {'id': 'd', 'sname': 'd-name'}, 'type': 'occurrence', 'dataset_id': 'b_d_id'},
]
self.assertEqual([model_to_dict(x) for x in ResolvableObjectMigration.objects.all()], expected)
def test_remove_duplicates(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'a-name')")
cursor.execute("INSERT INTO temp VALUES ('b', 'b-name')")
migration_processing.insert_json_into_migration_table('a_d_id', 'occurrence')
with connection.cursor() as cursor:
cursor.execute('DROP TABLE temp')
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'c-name')")
cursor.execute("INSERT INTO temp VALUES ('d', 'd-name')")
migration_processing.remove_duplicates()
with connection.cursor() as cursor:
cursor.execute('SELECT COUNT(*) FROM temp')
temp_count = cursor.fetchone()[0]
self.assertEqual(temp_count, 1)
def test_insert_with_previous_dataset_with_duplicates_keeps_first_result(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'a-name')")
cursor.execute("INSERT INTO temp VALUES ('b', 'b-name')")
migration_processing.insert_json_into_migration_table('a_d_id', 'occurrence')
with connection.cursor() as cursor:
cursor.execute('DROP TABLE temp')
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'c-name')")
cursor.execute("INSERT INTO temp VALUES ('d', 'd-name')")
migration_processing.remove_duplicates()
migration_processing.insert_json_into_migration_table('b_d_id', 'occurrence')
expected = [
{'id': 'b', 'data': {'id': 'b', 'sname': 'b-name'}, 'type': 'occurrence', 'dataset_id': 'a_d_id'},
{'id': 'x', 'data': {'id': 'x', 'sname': 'a-name'}, 'type': 'occurrence', 'dataset_id': 'a_d_id'},
{'id': 'd', 'data': {'id': 'd', 'sname': 'd-name'}, 'type': 'occurrence', 'dataset_id': 'b_d_id'},
]
results = [model_to_dict(x) for x in ResolvableObjectMigration.objects.all()]
self.assertEqual(results, expected)
def test_record_duplicates_works_with_records_with_duplicate_ids(self):
file = '/code/test_duplicates.txt'
create_duplicates_file(file)
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'a-name')")
cursor.execute("INSERT INTO temp VALUES ('b', 'b-name')")
migration_processing.insert_json_into_migration_table('a_d_id', 'occurrence')
with connection.cursor() as cursor:
cursor.execute('DROP TABLE temp')
cursor.execute('CREATE TABLE temp (id text, sname text)')
cursor.execute("INSERT INTO temp VALUES ('x', 'c-name')")
cursor.execute("INSERT INTO temp VALUES ('d', 'd-name')")
migration_processing.record_duplicates('b_d_id', 'occurrence', file)
with open(file) as f:
content = f.readlines()
self.assertEqual(len(content), 2) # Including header
result = [line.rstrip('\n') for line in content]
expected = ['x', '{"id":"x","sname":"c-name"}', 'b_d_id', 'occurrence', '{"id": "x", "sname": "a-name"}', 'a_d_id']
self.assertEqual(result[1].split('|'), expected)
def test_record_duplicates_works_with_weird_char_encoding(self):
file = '/code/duplicates.txt'
create_duplicates_file(file)
count = migration_processing.import_dwca('my_dataset_id', '/code/populator/tests/mock_data/dwca-molltax-v1.195.zip')
self.assertEqual(count, 23227)
count = migration_processing.import_dwca('my_dataset_id', '/code/populator/tests/mock_data/dwca-molltax-v1.195.zip')
self.assertEqual(count, 0)
self.assertEqual(ResolvableObjectMigration.objects.count(), 23227)
with open(file) as f:
content = f.readlines()
self.assertEqual(len(content), 23228)
os.remove(file)
def test_occurrence_records_get_occurrence_id(self): # Necessary for event-based datasets
pass
| 63.939873
| 218
| 0.652215
| 2,461
| 20,205
| 5.199106
| 0.099553
| 0.087378
| 0.060883
| 0.073701
| 0.841501
| 0.793357
| 0.751778
| 0.72583
| 0.706682
| 0.67964
| 0
| 0.04938
| 0.201188
| 20,205
| 315
| 219
| 64.142857
| 0.743371
| 0.01465
| 0
| 0.538462
| 0
| 0.058608
| 0.345023
| 0.078782
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.113553
| false
| 0.003663
| 0.054945
| 0
| 0.175824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a436e482639b0abf6bcb6515fdce626a869a39e5
| 130
|
py
|
Python
|
textattack/models/__init__.py
|
fighting41love/TextAttack
|
24e48f0022dc3a7bdcd5cbb3430f1c72cfcb522d
|
[
"MIT"
] | 2
|
2020-07-08T08:55:37.000Z
|
2020-09-03T00:57:38.000Z
|
textattack/models/__init__.py
|
SatoshiRobatoFujimoto/TextAttack
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
[
"MIT"
] | null | null | null |
textattack/models/__init__.py
|
SatoshiRobatoFujimoto/TextAttack
|
a809a9bddddff9f41750949e26edde26c8af6cfa
|
[
"MIT"
] | null | null | null |
from . import classification
from . import entailment
from . import translation
from . import summarization
from . import helpers
| 21.666667
| 28
| 0.807692
| 15
| 130
| 7
| 0.466667
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 130
| 6
| 29
| 21.666667
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a44d59b00e3227d6fcc0b5b424ab2c91d5b4da9f
| 5,153
|
py
|
Python
|
tests/units/round_messages/reach_quorum_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 23
|
2020-02-10T10:08:20.000Z
|
2021-11-17T02:02:06.000Z
|
tests/units/round_messages/reach_quorum_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 3
|
2020-03-05T07:18:16.000Z
|
2020-06-01T09:03:11.000Z
|
tests/units/round_messages/reach_quorum_test.py
|
iconloop/LFT2
|
3862d63aaf148f99240c6d4a54408dc2520b94c6
|
[
"Apache-2.0"
] | 6
|
2020-02-21T02:02:40.000Z
|
2021-12-01T23:35:07.000Z
|
import os
import random
import pytest
from typing import Tuple, Sequence
from lft.app.vote import DefaultVoteFactory, DefaultVote
from lft.consensus.round import RoundMessages
@pytest.fixture
async def setup(voter_num: int):
voters = [os.urandom(16) for _ in range(voter_num)]
vote_factories = [DefaultVoteFactory(voter) for voter in voters]
random.shuffle(vote_factories)
quorum = random.randint(2, len(vote_factories))
return voters, vote_factories, quorum, RoundMessages()
Setup = Tuple[Sequence[bytes], Sequence[DefaultVoteFactory], int, RoundMessages]
@pytest.mark.asyncio
@pytest.mark.parametrize("voter_num", range(4, 100))
async def test_reach_quorum(setup: Setup, voter_num: int):
voters, vote_factories, quorum, round_messages = setup
for vote_factory in vote_factories[:quorum - 1]:
vote = await vote_factory.create_vote(data_id=os.urandom(16),
commit_id=os.urandom(16),
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert not round_messages.reach_quorum(quorum)
vote_factory = vote_factories[-1]
vote = await vote_factory.create_vote(data_id=os.urandom(16),
commit_id=os.urandom(16),
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert round_messages.reach_quorum(quorum)
assert not round_messages.reach_quorum_consensus(quorum)
@pytest.mark.asyncio
@pytest.mark.parametrize("voter_num", range(4, 100))
async def test_reach_quorum_duplicate(setup: Setup, voter_num: int):
voters, vote_factories, quorum, round_messages = setup
for vote_factory in vote_factories[:quorum - 1]:
# duplicate votes for same data.
# The votes have different vote ID.
for _ in range(random.randint(2, quorum)):
vote = DefaultVote(id_=os.urandom(16),
data_id=os.urandom(16),
commit_id=os.urandom(16),
voter_id=vote_factory._node_id,
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert not round_messages.reach_quorum(quorum)
vote_factory = vote_factories[-1]
vote = await vote_factory.create_vote(data_id=os.urandom(16),
commit_id=os.urandom(16),
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert round_messages.reach_quorum(quorum)
assert not round_messages.reach_quorum_consensus(quorum)
@pytest.mark.asyncio
@pytest.mark.parametrize("voter_num", range(4, 100))
async def test_reach_quorum_none_vote(setup: Setup, voter_num: int):
voters, vote_factories, quorum, round_messages = setup
for vote_factory in vote_factories[:quorum - 1]:
vote = vote_factory.create_none_vote(epoch_num=random.randint(0, 10), round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert not round_messages.reach_quorum(quorum)
vote_factory = vote_factories[-1]
vote = vote_factory.create_none_vote(epoch_num=random.randint(0, 10), round_num=random.randint(0, 10))
round_messages.add_vote(vote)
assert round_messages.reach_quorum(quorum)
@pytest.mark.asyncio
@pytest.mark.parametrize("voter_num", range(4, 100))
async def test_reach_quorum_integration(setup: Setup, voter_num: int):
voters, vote_factories, quorum, round_messages = setup
for vote_factory in vote_factories[:quorum - 1]:
for _ in range(random.randint(2, quorum)):
vote = await _random_vote(vote_factory)
round_messages.add_vote(vote)
assert not round_messages.reach_quorum(quorum)
vote_factory = vote_factories[-1]
vote = await _random_vote(vote_factory)
round_messages.add_vote(vote)
assert round_messages.reach_quorum(quorum)
async def _random_vote(vote_factory: DefaultVoteFactory):
r = random.randint(0, 15)
if r < 5:
return DefaultVote(id_=os.urandom(16),
data_id=os.urandom(16),
commit_id=os.urandom(16),
voter_id=vote_factory._node_id,
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
elif r < 10:
return vote_factory.create_lazy_vote(voter_id=vote_factory._node_id,
epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
else:
return vote_factory.create_none_vote(epoch_num=random.randint(0, 10),
round_num=random.randint(0, 10))
| 39.638462
| 110
| 0.627984
| 636
| 5,153
| 4.836478
| 0.116352
| 0.092978
| 0.086476
| 0.09948
| 0.796814
| 0.789662
| 0.789662
| 0.789662
| 0.768856
| 0.768856
| 0
| 0.030466
| 0.280225
| 5,153
| 129
| 111
| 39.945736
| 0.798868
| 0.01242
| 0
| 0.71875
| 0
| 0
| 0.007078
| 0
| 0
| 0
| 0
| 0
| 0.104167
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.104167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a46139069b278b7a3371e6fcb3a75093788c168f
| 82
|
py
|
Python
|
twispy/__init__.py
|
346pro/Twispy
|
0d7729fc725fc718da9305ded897bce9021a3337
|
[
"MIT"
] | 12
|
2017-04-08T17:21:27.000Z
|
2018-08-30T14:26:18.000Z
|
twispy/__init__.py
|
StarryBlueSky/Twispy
|
0d7729fc725fc718da9305ded897bce9021a3337
|
[
"MIT"
] | 2
|
2017-04-14T20:01:55.000Z
|
2017-11-06T13:14:47.000Z
|
twispy/__init__.py
|
StarryBlueSky/Twispy
|
0d7729fc725fc718da9305ded897bce9021a3337
|
[
"MIT"
] | 1
|
2019-11-03T02:24:40.000Z
|
2019-11-03T02:24:40.000Z
|
# coding=utf-8
from twispy.request import Request
from twispy.handler import API
| 16.4
| 34
| 0.804878
| 13
| 82
| 5.076923
| 0.692308
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.134146
| 82
| 4
| 35
| 20.5
| 0.915493
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f10140909d7c9a6eafd24e2fedc3e6277375a929
| 21
|
py
|
Python
|
mielelogic/__init__.py
|
PTST/MieleLogic
|
25ff2d2f2d59c6e944db9d9804c3fe62894a312a
|
[
"MIT"
] | null | null | null |
mielelogic/__init__.py
|
PTST/MieleLogic
|
25ff2d2f2d59c6e944db9d9804c3fe62894a312a
|
[
"MIT"
] | null | null | null |
mielelogic/__init__.py
|
PTST/MieleLogic
|
25ff2d2f2d59c6e944db9d9804c3fe62894a312a
|
[
"MIT"
] | null | null | null |
from .miele import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1526efede384c420a349749deb717be3531861e
| 30
|
py
|
Python
|
app/model/__init__.py
|
wax911/insomnia-graphql-generator
|
290490a96eaf6d6a892e2e1ee4e970b3b3b5c02d
|
[
"Apache-2.0"
] | 7
|
2018-09-18T08:44:18.000Z
|
2021-03-31T23:36:35.000Z
|
app/model/__init__.py
|
wax911/insomnia-graphql-generator
|
290490a96eaf6d6a892e2e1ee4e970b3b3b5c02d
|
[
"Apache-2.0"
] | null | null | null |
app/model/__init__.py
|
wax911/insomnia-graphql-generator
|
290490a96eaf6d6a892e2e1ee4e970b3b3b5c02d
|
[
"Apache-2.0"
] | 2
|
2018-05-06T13:43:15.000Z
|
2021-01-26T01:03:35.000Z
|
from app.model.basic import *
| 15
| 29
| 0.766667
| 5
| 30
| 4.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f1637b2f1a04bcbe1902f64e15364798ff383c47
| 41
|
py
|
Python
|
extra/graph/company_tree.py
|
lsbardel/mathfun
|
98e7c210409c2b5777e91059c3651cef4f3045dd
|
[
"BSD-3-Clause"
] | null | null | null |
extra/graph/company_tree.py
|
lsbardel/mathfun
|
98e7c210409c2b5777e91059c3651cef4f3045dd
|
[
"BSD-3-Clause"
] | null | null | null |
extra/graph/company_tree.py
|
lsbardel/mathfun
|
98e7c210409c2b5777e91059c3651cef4f3045dd
|
[
"BSD-3-Clause"
] | null | null | null |
from mathfun.graph.template import Graph
| 20.5
| 40
| 0.853659
| 6
| 41
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74c34c3ef54a2b9c39a6eda61946e83d4b50fd30
| 139
|
py
|
Python
|
learnergy/core/__init__.py
|
anukaal/learnergy
|
704fc2b3fcb80df41ed28d750dc4e6475df23315
|
[
"Apache-2.0"
] | 39
|
2020-02-27T00:47:45.000Z
|
2022-03-28T14:57:26.000Z
|
learnergy/core/__init__.py
|
anukaal/learnergy
|
704fc2b3fcb80df41ed28d750dc4e6475df23315
|
[
"Apache-2.0"
] | 5
|
2021-05-11T08:23:37.000Z
|
2022-01-20T12:50:59.000Z
|
learnergy/core/__init__.py
|
anukaal/learnergy
|
704fc2b3fcb80df41ed28d750dc4e6475df23315
|
[
"Apache-2.0"
] | 6
|
2020-04-15T00:23:13.000Z
|
2022-01-29T16:22:05.000Z
|
"""A core package for all common learnergy modules.
"""
from learnergy.core.dataset import Dataset
from learnergy.core.model import Model
| 23.166667
| 51
| 0.791367
| 20
| 139
| 5.5
| 0.6
| 0.236364
| 0.309091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129496
| 139
| 5
| 52
| 27.8
| 0.909091
| 0.345324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
74c75d357482f50a3601afd1d0f8272e5c4708f9
| 47
|
py
|
Python
|
source/Utils.py
|
jonbrew/Arbiter
|
7959404e2a088ff9dd3010c7f0798db12b88c915
|
[
"MIT"
] | null | null | null |
source/Utils.py
|
jonbrew/Arbiter
|
7959404e2a088ff9dd3010c7f0798db12b88c915
|
[
"MIT"
] | null | null | null |
source/Utils.py
|
jonbrew/Arbiter
|
7959404e2a088ff9dd3010c7f0798db12b88c915
|
[
"MIT"
] | null | null | null |
def pct_inc(p1, p2) :
return ((p2-p1)/p1)*100
| 15.666667
| 24
| 0.617021
| 10
| 47
| 2.8
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 0.148936
| 47
| 2
| 25
| 23.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
741190ef4d317d0383f06f67e74962e5969abf5b
| 198
|
py
|
Python
|
samples/driver-hello-world/lib/__init__.py
|
bafu/lib-python-databox
|
fc5f0184e2fdcf5cc8d2fc45546105232b5ada5a
|
[
"MIT"
] | null | null | null |
samples/driver-hello-world/lib/__init__.py
|
bafu/lib-python-databox
|
fc5f0184e2fdcf5cc8d2fc45546105232b5ada5a
|
[
"MIT"
] | null | null | null |
samples/driver-hello-world/lib/__init__.py
|
bafu/lib-python-databox
|
fc5f0184e2fdcf5cc8d2fc45546105232b5ada5a
|
[
"MIT"
] | null | null | null |
from lib.utils import *
#from lib.catalog import *
from lib.export import *
#from lib.subscriptions import *
#from lib.key_value import *
##from lib.time_series import *
from lib.core_store import *
| 28.285714
| 32
| 0.767677
| 31
| 198
| 4.806452
| 0.419355
| 0.328859
| 0.52349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 198
| 7
| 33
| 28.285714
| 0.871345
| 0.565657
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
746527a8ed2f77b4c4d3fcc54299beffcb897f26
| 1,887
|
py
|
Python
|
STUDENTS1.py
|
WitteDuivel/library-management-oncemore
|
09f62c86b86028d8b76d9757051110732b290b46
|
[
"MIT"
] | 1
|
2021-03-14T18:28:49.000Z
|
2021-03-14T18:28:49.000Z
|
STUDENTS1.py
|
WitteDuivel/library-management-oncemore
|
09f62c86b86028d8b76d9757051110732b290b46
|
[
"MIT"
] | null | null | null |
STUDENTS1.py
|
WitteDuivel/library-management-oncemore
|
09f62c86b86028d8b76d9757051110732b290b46
|
[
"MIT"
] | 1
|
2021-03-14T16:23:21.000Z
|
2021-03-14T16:23:21.000Z
|
import mysql.connector
mydb=mysql.connector.connect(host="localhost",user="root",passwd="5678",database="library_management_project")
mycursor=mydb.cursor()
def useradd():
studIDp=int(input("ENTER YOUR STUDENT_ID:- "))
stfnamep=input("ENTER YOUR FIRST NAME:- ")
stlnamep=input("ENTER YOUR LAST NAME:- ")
stcoursep=input("ENTER YOUR COURSE:- ")
styearp=int(input("ENTER YOUR ADMISSION YEAR:- "))
stcontactp=int(input("ENTER YOUR CONTACT NUMBER:- "))
stagep=int(input("ENTER YOUR AGE:- "))
stbirthdatep=int(input("ENTER YOUR DATE OF BIRTH(MMDDYYYY):- "))
stgenderp=input("ENTER YOUR GENDER:- ")
sql = "INSERT INTO students (stud_ID, stfname, stlname, stcourse, styear, stcontact, stage, stbirthdate, stgender) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (studIDp, stfnamep, stlnamep, stcoursep, styearp, stcontactp, stagep, stbirthdatep, stgenderp)
mycursor.execute(sql, val)
mydb.commit()
print(mycursor.rowcount, "was inserted.")
def userupdate():
studIDp=int(input("ENTER YOUR STUDENT_ID:- "))
stfnamep=input("ENTER YOUR FIRST NAME:- ")
stlnamep=input("ENTER YOUR LAST NAME:- ")
stcoursep=input("ENTER YOUR COURSE:- ")
styearp=int(input("ENTER YOUR ADMISSION YEAR:- "))
stcontactp=int(input("ENTER YOUR CONTACT NUMBER:- "))
stagep=int(input("ENTER YOUR AGE:- "))
stbirthdatep=int(input("ENTER YOUR DATE OF BIRTH(MMDDYYYY):- "))
stgenderp=input("ENTER YOUR GENDER:- ")
sql = "UPDATE STUDENTS SET stud_ID = %s, stfname = %s, stlname = %s, stcourse = %s, styear = %s, stcontact = %s, stage = %s, stbirthdate = %s, stgender = %s WHERE stud_ID = studID"
val = (studIDp, stfnamep, stlnamep, stcoursep, styearp, stcontactp, stagep, stbirthdatep, stgenderp)
mycursor.execute(sql,val)
mydb.commit()
print(mycursor.rowcount, "records updated")
| 55.5
| 185
| 0.670376
| 233
| 1,887
| 5.399142
| 0.330472
| 0.143084
| 0.200318
| 0.135135
| 0.701908
| 0.701908
| 0.701908
| 0.701908
| 0.694754
| 0.694754
| 0
| 0.002569
| 0.174881
| 1,887
| 33
| 186
| 57.181818
| 0.805395
| 0
| 0
| 0.727273
| 0
| 0.060606
| 0.450917
| 0.014024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.030303
| 0.030303
| 0
| 0.090909
| 0.060606
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
749734d7ec46a051a2ef757e3146ede4e25e0749
| 27
|
py
|
Python
|
migemo/__init__.py
|
oguna/pymigemo
|
850a08c8cf9ba0bb892b07d384f0c3f363bd9ae9
|
[
"MIT"
] | null | null | null |
migemo/__init__.py
|
oguna/pymigemo
|
850a08c8cf9ba0bb892b07d384f0c3f363bd9ae9
|
[
"MIT"
] | null | null | null |
migemo/__init__.py
|
oguna/pymigemo
|
850a08c8cf9ba0bb892b07d384f0c3f363bd9ae9
|
[
"MIT"
] | null | null | null |
from .migemo import Migemo
| 13.5
| 26
| 0.814815
| 4
| 27
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74a70ec756ca1775a3b3204ded2ec2e99826ec62
| 32
|
py
|
Python
|
tests/example.py
|
orsinium-labs/benchmark-imports
|
d5b141218a57645d7e1ec0134ca2e6c8e7f9759f
|
[
"MIT"
] | null | null | null |
tests/example.py
|
orsinium-labs/benchmark-imports
|
d5b141218a57645d7e1ec0134ca2e6c8e7f9759f
|
[
"MIT"
] | null | null | null |
tests/example.py
|
orsinium-labs/benchmark-imports
|
d5b141218a57645d7e1ec0134ca2e6c8e7f9759f
|
[
"MIT"
] | null | null | null |
import math
print(math.sin(1))
| 8
| 18
| 0.71875
| 6
| 32
| 3.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.125
| 32
| 3
| 19
| 10.666667
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
776a7a4fc62c605b878275e565078eced1e458e0
| 191
|
py
|
Python
|
StormRequest/Exceptions.py
|
notariuss/StormRequest
|
15b491cfb2780d802dc973094547774ebd703ddc
|
[
"Unlicense"
] | 1
|
2018-09-14T18:18:56.000Z
|
2018-09-14T18:18:56.000Z
|
StormRequest/Exceptions.py
|
notariuss/StormRequest
|
15b491cfb2780d802dc973094547774ebd703ddc
|
[
"Unlicense"
] | null | null | null |
StormRequest/Exceptions.py
|
notariuss/StormRequest
|
15b491cfb2780d802dc973094547774ebd703ddc
|
[
"Unlicense"
] | null | null | null |
class UnsupportedMethod(Exception):
def __init__(self, message, errors):
super().__init__(message)
class NoPayload(Exception):
def __init__(self):
super().__init__()
| 23.875
| 40
| 0.680628
| 19
| 191
| 6
| 0.526316
| 0.210526
| 0.280702
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193717
| 191
| 8
| 41
| 23.875
| 0.74026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77ab3cbb077ea7c6aef028f303ea5fc868c7f50b
| 818
|
py
|
Python
|
configs/data/kittimots_motion_supp.py
|
MSiam/video_class_agnostic_segmentation
|
c4614fe675e8a5352012f603c15bc24fb43d690c
|
[
"Apache-2.0"
] | 15
|
2021-03-30T07:01:52.000Z
|
2022-02-24T06:35:27.000Z
|
configs/data/kittimots_motion_supp.py
|
MSiam/video_class_agnostic_segmentation
|
c4614fe675e8a5352012f603c15bc24fb43d690c
|
[
"Apache-2.0"
] | 1
|
2021-06-29T11:32:14.000Z
|
2021-06-29T14:18:32.000Z
|
configs/data/kittimots_motion_supp.py
|
MSiam/video_class_agnostic_segmentation
|
c4614fe675e8a5352012f603c15bc24fb43d690c
|
[
"Apache-2.0"
] | 3
|
2021-05-11T13:48:45.000Z
|
2021-11-12T01:21:28.000Z
|
from configs.data.kittimots_motion import *
data = dict(
imgs_per_gpu=2,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/KITTIMOTS_MOSeg_train.json',
img_prefix=data_root + 'images/',
flow_prefix=data_root + 'flow_suppressed/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/KITTIMOTS_MOSeg_val.json',
img_prefix=data_root + 'images/',
flow_prefix=data_root + 'flow_suppressed/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/KITTIMOTS_MOSeg_val.json',
img_prefix=data_root + 'images/',
flow_prefix=data_root + 'flow_suppressed/',
pipeline=test_pipeline))
| 34.083333
| 70
| 0.666259
| 100
| 818
| 5.07
| 0.3
| 0.142012
| 0.16568
| 0.112426
| 0.804734
| 0.804734
| 0.804734
| 0.804734
| 0.804734
| 0.804734
| 0
| 0.003165
| 0.227384
| 818
| 23
| 71
| 35.565217
| 0.799051
| 0
| 0
| 0.5
| 0
| 0
| 0.218826
| 0.134474
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7ad40cb378a37aba085680b342019de80c7c506e
| 3,108
|
py
|
Python
|
apps/product/migrations/0001_initial.py
|
wasim2263/super-shop-management
|
9e8a9fdec7c3f47842eb97b1b682f6ab37b5e22d
|
[
"MIT"
] | null | null | null |
apps/product/migrations/0001_initial.py
|
wasim2263/super-shop-management
|
9e8a9fdec7c3f47842eb97b1b682f6ab37b5e22d
|
[
"MIT"
] | null | null | null |
apps/product/migrations/0001_initial.py
|
wasim2263/super-shop-management
|
9e8a9fdec7c3f47842eb97b1b682f6ab37b5e22d
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.12 on 2021-06-17 19:29
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('name', models.CharField(max_length=255, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('name', models.CharField(max_length=255, unique=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('is_removed', models.BooleanField(default=False)),
('code', models.CharField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('unit_price', models.DecimalField(decimal_places=2, default=0.0, max_digits=8)),
('unit_type', models.CharField(choices=[('kg', 'KG'), ('piece', 'Piece'), ('litre', 'Litre')], max_length=20)),
('stock', models.DecimalField(decimal_places=2, default=0.0, max_digits=8)),
('brand', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.brand')),
('category', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='product.category')),
],
options={
'abstract': False,
},
),
]
| 49.333333
| 147
| 0.606178
| 316
| 3,108
| 5.835443
| 0.256329
| 0.058568
| 0.078091
| 0.084599
| 0.769523
| 0.769523
| 0.752712
| 0.732646
| 0.732646
| 0.732646
| 0
| 0.016309
| 0.250322
| 3,108
| 62
| 148
| 50.129032
| 0.775107
| 0.014801
| 0
| 0.581818
| 1
| 0
| 0.092157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.072727
| 0
| 0.145455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7afb86fb81a4eb99c74d8f43a8e7313582e3e565
| 71
|
py
|
Python
|
src/runner/__init__.py
|
cmlab-mira/MedicalPro
|
3918c95197fd24406ce2117cc7ff9ce21bb8c620
|
[
"MIT"
] | 6
|
2020-02-01T07:19:32.000Z
|
2021-05-10T13:55:49.000Z
|
src/runner/__init__.py
|
cmlab-mira/template
|
ae462f6eed9f3aa71d130c281c01c22fe0124a7b
|
[
"MIT"
] | 19
|
2019-05-21T12:48:37.000Z
|
2020-04-01T09:56:42.000Z
|
src/runner/__init__.py
|
cmlab-mira/template
|
ae462f6eed9f3aa71d130c281c01c22fe0124a7b
|
[
"MIT"
] | 1
|
2019-08-09T17:06:56.000Z
|
2019-08-09T17:06:56.000Z
|
from .trainers import *
from .predictors import *
from .utils import *
| 17.75
| 25
| 0.746479
| 9
| 71
| 5.888889
| 0.555556
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169014
| 71
| 3
| 26
| 23.666667
| 0.898305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb0558ca4d28ffda3e0ececd8bb80adb17e6b2fd
| 2,208
|
py
|
Python
|
testmapwithspy1.py
|
Toinas/IntelligentSuit
|
f3d33b327406d899d18ecf5fd6a100b0c786fdce
|
[
"MIT"
] | null | null | null |
testmapwithspy1.py
|
Toinas/IntelligentSuit
|
f3d33b327406d899d18ecf5fd6a100b0c786fdce
|
[
"MIT"
] | null | null | null |
testmapwithspy1.py
|
Toinas/IntelligentSuit
|
f3d33b327406d899d18ecf5fd6a100b0c786fdce
|
[
"MIT"
] | null | null | null |
import numpy np
image=[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0,
0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1, 0, 0,
0, 0, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 2, 1, 1, 0, 0, 0,
0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
def blit(image, img_width, location, frame_size):
frame=['.'] * frame_size ** 2
vert_offset=0
img_lines = [image[x:x+img_width] for x in range(0,len(image),img_width)]
for line in img_lines:
for idx, value in enumerate(line):
frame[idx+vert_offset+(location[0]*frame_size)+location[1]] = value
vert_offset += frame_size
return frame
def display(width, frame):
lines = [frame[x:x+width] for x in range(0,len(frame),width)]
for line in lines:
print ' '.join(line)
def scrollimage(image):
image = np.reshape(image, (16,16))
np.roll(image,1, axis=1)
return np.reshape(image,256)
if __name__ == "__main__":
frame = blit(image, 16, [0,0], 16)
display(16, frame)
print "\n"
new image = scrollimage(image)
display(16, frame)
# frame = blit(image, 3, [3,3], 16)
# display(12, frame)
# print "\n"
# frame = blit(image, 3, [6,6], 16)
# display(12, frame)
# print "\n"
# frame = blit(image, 3, [9,9], 16)
# display(12, frame)
| 33.454545
| 80
| 0.408062
| 419
| 2,208
| 2.102625
| 0.107399
| 0.211124
| 0.255392
| 0.263337
| 0.419977
| 0.419977
| 0.419977
| 0.363224
| 0.363224
| 0.363224
| 0
| 0.22741
| 0.398551
| 2,208
| 65
| 81
| 33.969231
| 0.435994
| 0.081522
| 0
| 0.2
| 0
| 0
| 0.006144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.025
| null | null | 0.05
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bb225948430a1e881fb208df696ef5e354cd15ce
| 222
|
py
|
Python
|
power_perceiver/np_batch_processor/__init__.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | null | null | null |
power_perceiver/np_batch_processor/__init__.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | 33
|
2022-02-16T07:51:41.000Z
|
2022-03-31T11:24:11.000Z
|
power_perceiver/np_batch_processor/__init__.py
|
openclimatefix/power_perceiver
|
bafcdfaf6abf42fbab09da641479f74709ddd395
|
[
"MIT"
] | null | null | null |
from power_perceiver.np_batch_processor.encode_space_time import EncodeSpaceTime
from power_perceiver.np_batch_processor.sun_position import SunPosition
from power_perceiver.np_batch_processor.topography import Topography
| 55.5
| 80
| 0.918919
| 30
| 222
| 6.4
| 0.5
| 0.140625
| 0.28125
| 0.3125
| 0.53125
| 0.53125
| 0
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 222
| 3
| 81
| 74
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bb5ad40703c73c8e0aaa4353388354ffb2cc3641
| 33
|
py
|
Python
|
common/common/middleware/__init__.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 7
|
2020-09-01T21:52:37.000Z
|
2022-02-25T16:00:08.000Z
|
common/common/middleware/__init__.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 4
|
2021-09-10T22:15:09.000Z
|
2022-03-25T22:17:43.000Z
|
common/common/middleware/__init__.py
|
maosplx/L2py
|
5d81b2ea150c0096cfce184706fa226950f7f583
|
[
"MIT"
] | 9
|
2020-09-01T21:53:39.000Z
|
2022-03-30T12:03:04.000Z
|
from . import length, middleware
| 16.5
| 32
| 0.787879
| 4
| 33
| 6.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 33
| 1
| 33
| 33
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
70271524b2f2d61abba8d50f436a3a5547c10745
| 118
|
py
|
Python
|
tkdet/structures/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | 1
|
2020-10-09T02:27:13.000Z
|
2020-10-09T02:27:13.000Z
|
tkdet/structures/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | null | null | null |
tkdet/structures/__init__.py
|
tkhe/tkdetection
|
54e6c112ef2930e755f457e38449736f5743a9ea
|
[
"MIT"
] | null | null | null |
from .boxes import *
from .image_list import *
from .instances import *
from .keypoints import *
from .masks import *
| 19.666667
| 25
| 0.745763
| 16
| 118
| 5.4375
| 0.5
| 0.45977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169492
| 118
| 5
| 26
| 23.6
| 0.887755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
709857f8df96326f1aa06af23a0770f8e4728134
| 40
|
py
|
Python
|
strategypy/bots/unittest_moveleft.py
|
davide-ceretti/strategypy
|
37df9569e3a9fc8a0f1487a29a7897db6363c42e
|
[
"MIT"
] | 8
|
2015-03-03T17:40:41.000Z
|
2020-11-08T19:02:23.000Z
|
strategypy/bots/unittest_moveleft.py
|
davide-ceretti/strategypy
|
37df9569e3a9fc8a0f1487a29a7897db6363c42e
|
[
"MIT"
] | 19
|
2015-01-14T12:07:05.000Z
|
2015-03-19T11:53:11.000Z
|
strategypy/bots/unittest_moveleft.py
|
davide-ceretti/strategypy
|
37df9569e3a9fc8a0f1487a29a7897db6363c42e
|
[
"MIT"
] | 6
|
2015-03-16T18:17:06.000Z
|
2021-11-04T23:44:47.000Z
|
def action(ctx):
return 'move left'
| 13.333333
| 22
| 0.65
| 6
| 40
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225
| 40
| 2
| 23
| 20
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0.225
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
70a686cc59a45782a2cc398e0fbf3901bf5aebd6
| 315
|
py
|
Python
|
db_upgrade.py
|
dnaroid/laser-server
|
ad250b54847c3807127ea33493f66bd5b15b1034
|
[
"BSD-3-Clause"
] | null | null | null |
db_upgrade.py
|
dnaroid/laser-server
|
ad250b54847c3807127ea33493f66bd5b15b1034
|
[
"BSD-3-Clause"
] | null | null | null |
db_upgrade.py
|
dnaroid/laser-server
|
ad250b54847c3807127ea33493f66bd5b15b1034
|
[
"BSD-3-Clause"
] | null | null | null |
#!env/bin/python
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('Current database version: ' + str(api.db_version(
SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)))
| 31.5
| 61
| 0.84127
| 42
| 315
| 6
| 0.452381
| 0.214286
| 0.25
| 0.206349
| 0.333333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092063
| 315
| 9
| 62
| 35
| 0.881119
| 0.047619
| 0
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5661a33625cc5603279a33d28fbe1adc8c4c009a
| 39
|
py
|
Python
|
tasks/models/__init__.py
|
csdevsc/colcat_crowdsourcing_application
|
ad6015ca9cfc2a91063408280978a4c2eb4d6bc0
|
[
"MIT"
] | null | null | null |
tasks/models/__init__.py
|
csdevsc/colcat_crowdsourcing_application
|
ad6015ca9cfc2a91063408280978a4c2eb4d6bc0
|
[
"MIT"
] | null | null | null |
tasks/models/__init__.py
|
csdevsc/colcat_crowdsourcing_application
|
ad6015ca9cfc2a91063408280978a4c2eb4d6bc0
|
[
"MIT"
] | null | null | null |
from data import *
from tasks import *
| 13
| 19
| 0.74359
| 6
| 39
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205128
| 39
| 2
| 20
| 19.5
| 0.935484
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5662d3938ff8e404030a6d6ea2a1d667c72a9b61
| 173
|
py
|
Python
|
flask_mysql/crud/biolerplate_code/flask_app/__init__.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
flask_mysql/crud/biolerplate_code/flask_app/__init__.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
flask_mysql/crud/biolerplate_code/flask_app/__init__.py
|
ZhouSusan/CodingDojoPython
|
8d89c9a94a3be18e79fbf24e25348eae8c96a338
|
[
"MIT"
] | null | null | null |
from flask_app import app
from flask_app.controllers import
if __name__ == "__main__":
app.run(debug=True)
from flask import Flask
from flask_bcrypt import Bcrypt
| 19.222222
| 34
| 0.768786
| 26
| 173
| 4.692308
| 0.461538
| 0.295082
| 0.196721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17341
| 173
| 8
| 35
| 21.625
| 0.853147
| 0
| 0
| 0
| 0
| 0
| 0.046512
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.666667
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
567bb981871e1748a08b51accf9647ba7bcc514d
| 37
|
py
|
Python
|
dataset_seam/__init__.py
|
lonestar686/pytorch-saltnet
|
aa75581567ccd5643b854cfb4d54c8881ed2a0d6
|
[
"MIT"
] | null | null | null |
dataset_seam/__init__.py
|
lonestar686/pytorch-saltnet
|
aa75581567ccd5643b854cfb4d54c8881ed2a0d6
|
[
"MIT"
] | null | null | null |
dataset_seam/__init__.py
|
lonestar686/pytorch-saltnet
|
aa75581567ccd5643b854cfb4d54c8881ed2a0d6
|
[
"MIT"
] | null | null | null |
from .seam_data import SEAM, TileBase
| 37
| 37
| 0.837838
| 6
| 37
| 5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b3ee1d6c082dc69b60bab96d107a86c451c72fd
| 12,938
|
py
|
Python
|
species/read/read_color.py
|
vandalt/species
|
527dd900a60c4d691bd490569cd3b2007f9beead
|
[
"MIT"
] | null | null | null |
species/read/read_color.py
|
vandalt/species
|
527dd900a60c4d691bd490569cd3b2007f9beead
|
[
"MIT"
] | null | null | null |
species/read/read_color.py
|
vandalt/species
|
527dd900a60c4d691bd490569cd3b2007f9beead
|
[
"MIT"
] | null | null | null |
"""
Module with reading functionalities of color and magnitude data from photometric and
spectral libraries.
"""
import os
import configparser
from typing import Optional, Tuple
import h5py
import numpy as np
from typeguard import typechecked
from species.core import box
from species.read import read_spectrum
from species.util import phot_util
class ReadColorMagnitude:
"""
Class for reading color-magnitude data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_color: Tuple[str, str],
filter_mag: str) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_color : tuple(str, str)
Filter names for the color. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
filter_mag : str
Filter name for the absolute magnitudes (see also description of ``filters_color``).
Returns
-------
NoneType
None
"""
self.library = library
self.filters_color = filters_color
self.filter_mag = filter_mag
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_magnitude(self,
object_type: Optional[str] = None) -> box.ColorMagBox:
"""
Function for extracting color-magnitude data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorMagBox
Box with the colors and magnitudes.
"""
if self.lib_type == 'phot_lib':
with h5py.File(self.database, 'r') as h5_file:
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
dist = np.asarray(h5_file[f'photometry/{self.library}/distance'])
dist_error = np.asarray(h5_file[f'photometry/{self.library}/distance_error'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
if indices.size > 0:
with h5py.File(self.database, 'r') as h5_file:
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[1]}'])
else:
raise ValueError(f'There is not data available from \'{self.library}\' for '
f'\'{object_type}\' type objects with the chosen filters.')
color = mag1 - mag2
if self.filter_mag == self.filters_color[0]:
mag, _ = phot_util.apparent_to_absolute((mag1, None), (dist, dist_error))
elif self.filter_mag == self.filters_color[1]:
mag, _ = phot_util.apparent_to_absolute((mag2, None), (dist, dist_error))
color = color[indices]
mag = mag[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color.size):
if not np.isnan(color[i]) and not np.isnan(mag[i]):
indices.append(i)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=color[indices],
magnitude=mag[indices],
sptype=sptype[indices],
names=obj_names[indices])
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filter_mag)
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
magnitude=phot_box_2.abs_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colormag_box
class ReadColorColor:
"""
Class for reading color-color data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_colors: Tuple[Tuple[str, str], Tuple[str, str]]) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_colors : tuple(tuple(str, str), tuple(str, str))
Filter names for the colors. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
Returns
-------
NoneType
None
"""
self.library = library
self.filters_colors = filters_colors
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_color(self,
object_type: Optional[str] = None) -> box.ColorColorBox:
"""
Function for extracting color-color data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorColorBox
Box with the colors.
"""
if self.lib_type == 'phot_lib':
h5_file = h5py.File(self.database, 'r')
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][1]}'])
mag3 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][0]}'])
mag4 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][1]}'])
color1 = mag1 - mag2
color2 = mag3 - mag4
color1 = color1[indices]
color2 = color2[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color1.size):
if not np.isnan(color1[i]) and not np.isnan(color2[i]):
indices.append(i)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=color1[indices],
color2=color2[indices],
sptype=sptype[indices],
names=obj_names[indices])
h5_file.close()
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][0])
read_spec_3 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][1])
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
phot_box_3 = read_spec_3.get_magnitude(sptypes=None)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
color2=phot_box_2.app_mag[:, 0]-phot_box_3.app_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colorbox
| 39.565749
| 100
| 0.529139
| 1,406
| 12,938
| 4.689189
| 0.12091
| 0.060064
| 0.023358
| 0.031852
| 0.830275
| 0.806916
| 0.778098
| 0.761565
| 0.736994
| 0.691339
| 0
| 0.014957
| 0.37471
| 12,938
| 326
| 101
| 39.687117
| 0.8
| 0.15868
| 0
| 0.628415
| 0
| 0
| 0.098296
| 0.054684
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021858
| false
| 0
| 0.04918
| 0
| 0.092896
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3b625b2731273ccb49023bc715fec206170cb4de
| 110
|
py
|
Python
|
libsvc/persistence/__init__.py
|
derekmerck/endpoint
|
5b74f0b3303bbf419a6c9f71e9a4a156583bf51d
|
[
"MIT"
] | null | null | null |
libsvc/persistence/__init__.py
|
derekmerck/endpoint
|
5b74f0b3303bbf419a6c9f71e9a4a156583bf51d
|
[
"MIT"
] | null | null | null |
libsvc/persistence/__init__.py
|
derekmerck/endpoint
|
5b74f0b3303bbf419a6c9f71e9a4a156583bf51d
|
[
"MIT"
] | null | null | null |
from .persistence import PersistenceBackend, ShelfMixin
from .redis_persistence import RedisPersistenceBackend
| 55
| 55
| 0.9
| 10
| 110
| 9.8
| 0.7
| 0.346939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 110
| 2
| 56
| 55
| 0.960784
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b6d696a3cfb32c61aa79adf234cb42b6c93acda
| 38
|
py
|
Python
|
blocks/__init__.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | null | null | null |
blocks/__init__.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | 6
|
2020-11-06T22:40:05.000Z
|
2022-03-12T00:51:06.000Z
|
blocks/__init__.py
|
blandfort/mirror
|
70ae41fd151275d42506d07117aa2ea3ce59ad23
|
[
"MIT"
] | null | null | null |
from .countdown import CountdownBlock
| 19
| 37
| 0.868421
| 4
| 38
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 1
| 38
| 38
| 0.970588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e775a9ec46afc1926576d289cbfc029d509f902
| 91
|
py
|
Python
|
Django_ResumeParser/views.py
|
Aarif1430/Django_Crispy_Forms
|
67b8ae5220cd2f3c85189f8901af1cd3eba17af8
|
[
"MIT"
] | 4
|
2019-03-01T03:00:55.000Z
|
2021-01-13T21:20:32.000Z
|
Django_ResumeParser/views.py
|
Aarif1430/Django_Crispy_Forms
|
67b8ae5220cd2f3c85189f8901af1cd3eba17af8
|
[
"MIT"
] | null | null | null |
Django_ResumeParser/views.py
|
Aarif1430/Django_Crispy_Forms
|
67b8ae5220cd2f3c85189f8901af1cd3eba17af8
|
[
"MIT"
] | null | null | null |
from django.shortcuts import redirect
def home(request):
return redirect('/ResumeParser')
| 22.75
| 37
| 0.802198
| 11
| 91
| 6.636364
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098901
| 91
| 4
| 38
| 22.75
| 0.890244
| 0
| 0
| 0
| 0
| 0
| 0.141304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d9195ec624a625dc9f48ad46dd07237c5aa0c88b
| 36
|
py
|
Python
|
pysrc/qulacs/observable/__init__.py
|
kodack64/qulacs-osaka
|
4ccc3ff084f10942e22d8663a01ed67efd24d9f7
|
[
"MIT"
] | 4
|
2022-01-26T06:56:00.000Z
|
2022-03-18T02:07:24.000Z
|
pysrc/qulacs/observable/__init__.py
|
kodack64/qulacs-osaka
|
4ccc3ff084f10942e22d8663a01ed67efd24d9f7
|
[
"MIT"
] | 104
|
2021-11-12T04:15:02.000Z
|
2022-03-30T05:12:20.000Z
|
pysrc/qulacs/observable/__init__.py
|
kodack64/qulacs-osaka
|
4ccc3ff084f10942e22d8663a01ed67efd24d9f7
|
[
"MIT"
] | 3
|
2021-12-19T11:52:38.000Z
|
2022-03-09T04:20:17.000Z
|
from qulacs_core.observable import *
| 36
| 36
| 0.861111
| 5
| 36
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d96d131d2253e3c49d7c95f8f6273353430cd55f
| 9,566
|
gyp
|
Python
|
third_party/protobuf2/protobuf.gyp
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | 11
|
2015-03-20T04:08:08.000Z
|
2021-11-15T15:51:36.000Z
|
third_party/protobuf2/protobuf.gyp
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/protobuf2/protobuf.gyp
|
rwatson/chromium-capsicum
|
b03da8e897f897c6ad2cda03ceda217b760fd528
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'conditions': [
['OS!="win"', {
'variables': {
'config_h_dir':
'.', # crafted for gcc/linux.
},
}, { # else, OS=="win"
'variables': {
'config_h_dir':
'src/vsprojects', # crafted for msvc.
},
'target_defaults': {
'msvs_disabled_warnings': [
4018, # signed/unsigned mismatch in comparison
4244, # implicit conversion, possible loss of data
4355, # 'this' used in base member initializer list
],
'defines!': [
'WIN32_LEAN_AND_MEAN', # Protobuf defines this itself.
],
},
}]
],
'targets': [
# The "lite" lib is about 1/7th the size of the heavy lib,
# but it doesn't support some of the more exotic features of
# protobufs, like reflection. To generate C++ code that can link
# against the lite version of the library, add the option line:
#
# option optimize_for = LITE_RUNTIME;
#
# to your .proto file.
{
'target_name': 'protobuf_lite',
'type': '<(library)',
'toolsets': ['host', 'target'],
'sources': [
'src/src/google/protobuf/stubs/common.h',
'src/src/google/protobuf/stubs/once.h',
'src/src/google/protobuf/extension_set.h',
'src/src/google/protobuf/generated_message_util.h',
'src/src/google/protobuf/message_lite.h',
'src/src/google/protobuf/repeated_field.h',
'src/src/google/protobuf/wire_format_lite.h',
'src/src/google/protobuf/wire_format_lite_inl.h',
'src/src/google/protobuf/io/coded_stream.h',
'src/src/google/protobuf/io/zero_copy_stream.h',
'src/src/google/protobuf/io/zero_copy_stream_impl_lite.h',
'src/src/google/protobuf/stubs/common.cc',
'src/src/google/protobuf/stubs/once.cc',
'src/src/google/protobuf/stubs/hash.cc',
'src/src/google/protobuf/stubs/hash.h',
'src/src/google/protobuf/stubs/map-util.h',
'src/src/google/protobuf/stubs/stl_util-inl.h',
'src/src/google/protobuf/extension_set.cc',
'src/src/google/protobuf/generated_message_util.cc',
'src/src/google/protobuf/message_lite.cc',
'src/src/google/protobuf/repeated_field.cc',
'src/src/google/protobuf/wire_format_lite.cc',
'src/src/google/protobuf/io/coded_stream.cc',
'src/src/google/protobuf/io/zero_copy_stream.cc',
'src/src/google/protobuf/io/zero_copy_stream_impl_lite.cc',
'<(config_h_dir)/config.h',
],
'include_dirs': [
'<(config_h_dir)',
'src/src',
],
# This macro must be defined to suppress the use of dynamic_cast<>,
# which requires RTTI.
'defines': [
'GOOGLE_PROTOBUF_NO_RTTI',
],
'direct_dependent_settings': {
'include_dirs': [
'<(config_h_dir)',
'src/src',
],
'defines': [
'GOOGLE_PROTOBUF_NO_RTTI',
],
},
},
# This is the full, heavy protobuf lib that's needed for c++ .proto's
# that don't specify the LITE_RUNTIME option. The protocol
# compiler itself (protoc) falls into that category.
{
'target_name': 'protobuf',
'type': '<(library)',
'toolsets': ['host'],
'sources': [
'src/src/google/protobuf/descriptor.h',
'src/src/google/protobuf/descriptor.pb.h',
'src/src/google/protobuf/descriptor_database.h',
'src/src/google/protobuf/dynamic_message.h',
'src/src/google/protobuf/generated_message_reflection.h',
'src/src/google/protobuf/message.h',
'src/src/google/protobuf/reflection_ops.h',
'src/src/google/protobuf/service.h',
'src/src/google/protobuf/text_format.h',
'src/src/google/protobuf/unknown_field_set.h',
'src/src/google/protobuf/wire_format.h',
'src/src/google/protobuf/wire_format_inl.h',
'src/src/google/protobuf/io/gzip_stream.h',
'src/src/google/protobuf/io/printer.h',
'src/src/google/protobuf/io/tokenizer.h',
'src/src/google/protobuf/io/zero_copy_stream_impl.h',
'src/src/google/protobuf/compiler/code_generator.h',
'src/src/google/protobuf/compiler/command_line_interface.h',
'src/src/google/protobuf/compiler/importer.h',
'src/src/google/protobuf/compiler/parser.h',
'src/src/google/protobuf/stubs/substitute.cc',
'src/src/google/protobuf/stubs/substitute.h',
'src/src/google/protobuf/stubs/strutil.cc',
'src/src/google/protobuf/stubs/strutil.h',
'src/src/google/protobuf/stubs/structurally_valid.cc',
'src/src/google/protobuf/descriptor.cc',
'src/src/google/protobuf/descriptor.pb.cc',
'src/src/google/protobuf/descriptor_database.cc',
'src/src/google/protobuf/dynamic_message.cc',
'src/src/google/protobuf/extension_set_heavy.cc',
'src/src/google/protobuf/generated_message_reflection.cc',
'src/src/google/protobuf/message.cc',
'src/src/google/protobuf/reflection_ops.cc',
'src/src/google/protobuf/service.cc',
'src/src/google/protobuf/text_format.cc',
'src/src/google/protobuf/unknown_field_set.cc',
'src/src/google/protobuf/wire_format.cc',
# This file pulls in zlib, but it's not actually used by protoc, so
# instead of compiling zlib for the host, let's just exclude this.
# 'src/src/google/protobuf/io/gzip_stream.cc',
'src/src/google/protobuf/io/printer.cc',
'src/src/google/protobuf/io/tokenizer.cc',
'src/src/google/protobuf/io/zero_copy_stream_impl.cc',
'src/src/google/protobuf/compiler/importer.cc',
'src/src/google/protobuf/compiler/parser.cc',
],
'dependencies': [
'protobuf_lite',
],
'export_dependent_settings': [
'protobuf_lite',
],
},
{
'target_name': 'protoc',
'type': 'executable',
'toolsets': ['host'],
'sources': [
'src/src/google/protobuf/compiler/code_generator.cc',
'src/src/google/protobuf/compiler/command_line_interface.cc',
'src/src/google/protobuf/compiler/cpp/cpp_enum.cc',
'src/src/google/protobuf/compiler/cpp/cpp_enum.h',
'src/src/google/protobuf/compiler/cpp/cpp_enum_field.cc',
'src/src/google/protobuf/compiler/cpp/cpp_enum_field.h',
'src/src/google/protobuf/compiler/cpp/cpp_extension.cc',
'src/src/google/protobuf/compiler/cpp/cpp_extension.h',
'src/src/google/protobuf/compiler/cpp/cpp_field.cc',
'src/src/google/protobuf/compiler/cpp/cpp_field.h',
'src/src/google/protobuf/compiler/cpp/cpp_file.cc',
'src/src/google/protobuf/compiler/cpp/cpp_file.h',
'src/src/google/protobuf/compiler/cpp/cpp_generator.cc',
'src/src/google/protobuf/compiler/cpp/cpp_helpers.cc',
'src/src/google/protobuf/compiler/cpp/cpp_helpers.h',
'src/src/google/protobuf/compiler/cpp/cpp_message.cc',
'src/src/google/protobuf/compiler/cpp/cpp_message.h',
'src/src/google/protobuf/compiler/cpp/cpp_message_field.cc',
'src/src/google/protobuf/compiler/cpp/cpp_message_field.h',
'src/src/google/protobuf/compiler/cpp/cpp_primitive_field.cc',
'src/src/google/protobuf/compiler/cpp/cpp_primitive_field.h',
'src/src/google/protobuf/compiler/cpp/cpp_service.cc',
'src/src/google/protobuf/compiler/cpp/cpp_service.h',
'src/src/google/protobuf/compiler/cpp/cpp_string_field.cc',
'src/src/google/protobuf/compiler/cpp/cpp_string_field.h',
'src/src/google/protobuf/compiler/java/java_enum.cc',
'src/src/google/protobuf/compiler/java/java_enum.h',
'src/src/google/protobuf/compiler/java/java_enum_field.cc',
'src/src/google/protobuf/compiler/java/java_enum_field.h',
'src/src/google/protobuf/compiler/java/java_extension.cc',
'src/src/google/protobuf/compiler/java/java_extension.h',
'src/src/google/protobuf/compiler/java/java_field.cc',
'src/src/google/protobuf/compiler/java/java_field.h',
'src/src/google/protobuf/compiler/java/java_file.cc',
'src/src/google/protobuf/compiler/java/java_file.h',
'src/src/google/protobuf/compiler/java/java_generator.cc',
'src/src/google/protobuf/compiler/java/java_helpers.cc',
'src/src/google/protobuf/compiler/java/java_helpers.h',
'src/src/google/protobuf/compiler/java/java_message.cc',
'src/src/google/protobuf/compiler/java/java_message.h',
'src/src/google/protobuf/compiler/java/java_message_field.cc',
'src/src/google/protobuf/compiler/java/java_message_field.h',
'src/src/google/protobuf/compiler/java/java_primitive_field.cc',
'src/src/google/protobuf/compiler/java/java_primitive_field.h',
'src/src/google/protobuf/compiler/java/java_service.cc',
'src/src/google/protobuf/compiler/java/java_service.h',
'src/src/google/protobuf/compiler/python/python_generator.cc',
'src/src/google/protobuf/compiler/main.cc',
],
'dependencies': [
'protobuf',
],
'include_dirs': [
'<(config_h_dir)',
'src/src',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| 42.705357
| 75
| 0.645515
| 1,261
| 9,566
| 4.762887
| 0.168121
| 0.118881
| 0.231768
| 0.38628
| 0.785548
| 0.765734
| 0.558608
| 0.394106
| 0.291875
| 0.043457
| 0
| 0.003022
| 0.204265
| 9,566
| 223
| 76
| 42.896861
| 0.785996
| 0.126594
| 0
| 0.222798
| 0
| 0
| 0.714766
| 0.659017
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.010363
| 0
| 0.010363
| 0.010363
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d995ed9a78a2caa29c4fbde84460690350266ffa
| 5,384
|
py
|
Python
|
data/KS_Eqn_data.py
|
BethanyL/PDEKoopman2
|
0006a312c3dae21e50aa55c7a0b2855b399eb66a
|
[
"MIT"
] | 10
|
2020-11-03T18:40:18.000Z
|
2022-03-14T20:16:08.000Z
|
data/KS_Eqn_data.py
|
BethanyL/PDEKoopman2
|
0006a312c3dae21e50aa55c7a0b2855b399eb66a
|
[
"MIT"
] | null | null | null |
data/KS_Eqn_data.py
|
BethanyL/PDEKoopman2
|
0006a312c3dae21e50aa55c7a0b2855b399eb66a
|
[
"MIT"
] | 4
|
2021-05-27T16:30:23.000Z
|
2022-03-14T20:16:09.000Z
|
"""
Create training/validation data for KS Equation.
All data comes from solutions to Kuramoto-Sivashinsky equation.
Training data:
Initial conditions:
120,000 ICs
White noise, Sines, Square waves
Solve from t = 0 to 6.25 in steps of 0.125
128 spatial points in [-4*pi,4*pi)
Validation data:
Same structure as training data but with 20,000 ICs
"""
import numpy as np
# Must install pyDOE package, see https://pythonhosted.org/pyDOE/index.html
import pyDOE
from scipy.stats import geom
from PDEsolvers import KS_Periodic
np.random.seed(0)
# Inputs (data)
data_prefix = 'KS_Eqn'
n = 128 # Number of grid points
n_IC = 6000 # Number of initial conditions in each file
n_train = 20 # Number of training files
M = n_IC * n_train // 3 # Samples from latin hypercube
# Inputs (KS)
L = 8 * np.pi # Length of domain
dt = 0.125 # Size of time step for data
n_time = 51 # Number of time steps
T = dt * (n_time - 1) # End time
# Discretize x
x = np.linspace(-L / 2, L / 2, n + 1)
x = x[:n]
# Create vectors of random values for sines
# Sampling of A and phi
X = pyDOE.lhs(2, samples=M, criterion='maximin')
A_vect = X[:, 0]
phi_vect = 2 * np.pi * X[:, 1]
# Sampling of omega
max_omega = 10
cum_distrib = geom.cdf(np.arange(1, max_omega + 1), 0.25)
cum_distrib = cum_distrib / cum_distrib[-1]
numbs = np.random.uniform(size=M)
omega_vect = np.zeros(M)
for k in range(max_omega):
omega_vect = omega_vect + (numbs < cum_distrib[k])
omega_vect = 11 - omega_vect
# Create vectors of random values for square waves
# Sampling of A, c, and w
X = pyDOE.lhs(3, samples=M, criterion='maximin')
A2_vect = X[:, 0]
c_vect = L * X[:, 1] - L / 2
w_vect = (L - 4 * (x[1] - x[0])) * X[:, 2] + 2 * (x[1] - x[0])
# Loop over files
sine_ind = 0
square_ind = 0
for train_num in range(n_train):
data_set = 'train{}_x'.format(train_num + 1)
# Set Initial Conditions
u_0 = np.zeros((n_IC, n))
# White noise
for k in range(0, n_IC - 2, 3):
ut = np.zeros(n, dtype=np.complex128)
ut[0] = np.random.normal()
ut[1:n // 2] = (np.random.normal(size=(n // 2 - 1))
+ 1j * np.random.normal(size=(n // 2 - 1)))
ut[n // 2] = np.random.normal()
ut[n // 2 + 1:] = np.flipud(np.conj(ut[1:n // 2]))
u = np.real(np.fft.ifft(ut))
u_0[k, :] = u - np.mean(u)
# Sines
for k in range(1, n_IC - 1, 3):
u_0[k, :] = A_vect[sine_ind] * np.sin(2 * np.pi * omega_vect[sine_ind]
/ L * x + phi_vect[sine_ind])
sine_ind += 1
# Square waves
for k in range(2, n_IC, 3):
u = (A2_vect[square_ind] * np.logical_or(
np.logical_or(
np.abs(x - c_vect[square_ind]) < w_vect[square_ind] / 2,
np.abs(x + L - c_vect[square_ind]) < w_vect[square_ind] / 2),
np.abs(x - L - c_vect[square_ind]) < w_vect[square_ind] / 2))
u_0[k, :] = u - np.mean(u)
square_ind += 1
# Solve KS Equation
Data = np.zeros((n_IC, n_time, n), dtype=np.float32)
for k in range(n_IC):
Data[k, :, :] = KS_Periodic(x, T, n_time, u_0[k, :])
# Save data file
np.save('{}_{}'.format(data_prefix, data_set), Data, allow_pickle=False)
# Validation Data
n_IC = 30000 # Number of initial conditions
data_set = 'val_x'
M = n_IC // 3 # Samples from latin hypercube
# Create vectors of random values for sines
# Sampling of A and phi
X = pyDOE.lhs(2, samples=M, criterion='maximin')
A_vect = X[:, 0]
phi_vect = 2 * np.pi * X[:, 1]
# Sampling of omega
max_omega = 10
cum_distrib = geom.cdf(np.arange(1, max_omega + 1), 0.25)
cum_distrib = cum_distrib / cum_distrib[-1]
numbs = np.random.uniform(size=M)
omega_vect = np.zeros(M)
for k in range(max_omega):
omega_vect = omega_vect + (numbs < cum_distrib[k])
omega_vect = 11 - omega_vect
# Create vectors of random values for square waves
# Sampling of A, c, and w
X = pyDOE.lhs(3, samples=M, criterion='maximin')
A2_vect = X[:, 0]
c_vect = L * X[:, 1] - L / 2
w_vect = (L - 4 * (x[1] - x[0])) * X[:, 2] + 2 * (x[1] - x[0])
# Set Initial Conditions
u_0 = np.zeros((n_IC, n))
# White noise
for k in range(0, n_IC - 2, 3):
ut = np.zeros(n, dtype=np.complex128)
ut[0] = np.random.normal()
ut[1:n // 2] = (np.random.normal(size=(n // 2 - 1))
+ 1j * np.random.normal(size=(n // 2 - 1)))
ut[n // 2 + 1:] = np.flipud(np.conj(ut[1:n // 2]))
u = np.real(np.fft.ifft(ut))
u_0[k, :] = u - np.mean(u)
# Sines
sine_ind = 0
for k in range(1, n_IC - 1, 3):
u_0[k, :] = A_vect[sine_ind] * np.sin(2 * np.pi * omega_vect[sine_ind]
/ L * x + phi_vect[sine_ind])
sine_ind += 1
# Square waves
square_ind = 0
for k in range(2, n_IC, 3):
u = (A2_vect[square_ind] * np.logical_or(
np.logical_or(
np.abs(x - c_vect[square_ind]) < w_vect[square_ind] / 2,
np.abs(x + L - c_vect[square_ind]) < w_vect[square_ind] / 2),
np.abs(x - L - c_vect[square_ind]) < w_vect[square_ind] / 2))
u_0[k, :] = u - np.mean(u)
square_ind += 1
# Solve KS Equation
Data = np.zeros((n_IC, n_time, n), dtype=np.float32)
for k in range(n_IC):
Data[k, :, :] = KS_Periodic(x, T, n_time, u_0[k, :])
# Save data file
np.save('{}_{}'.format(data_prefix, data_set), Data, allow_pickle=False)
| 29.102703
| 78
| 0.595468
| 955
| 5,384
| 3.208377
| 0.159162
| 0.052872
| 0.059399
| 0.035901
| 0.729112
| 0.707572
| 0.704961
| 0.704961
| 0.704961
| 0.704961
| 0
| 0.045567
| 0.25
| 5,384
| 184
| 79
| 29.26087
| 0.713224
| 0.232541
| 0
| 0.792453
| 0
| 0
| 0.014191
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037736
| 0
| 0.037736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d9996a88fd4400c3095d1654847ddd898fa8ffb3
| 29
|
py
|
Python
|
evaluation/__init__.py
|
ardihikaru/hfsoftmax
|
55966f3a902c16df9b1ca93a77c5cd43efd47fd9
|
[
"MIT"
] | 95
|
2018-02-10T05:12:57.000Z
|
2022-03-15T07:58:58.000Z
|
evaluation/__init__.py
|
dapengchen123/hfsoftmax
|
467bd90814abdf3e5ad8384e6e05749172b68ae6
|
[
"MIT"
] | 14
|
2018-05-15T08:48:50.000Z
|
2021-11-06T08:31:56.000Z
|
evaluation/__init__.py
|
dapengchen123/hfsoftmax
|
467bd90814abdf3e5ad8384e6e05749172b68ae6
|
[
"MIT"
] | 25
|
2018-07-04T09:16:28.000Z
|
2022-02-07T20:54:47.000Z
|
from .verify import evaluate
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79d64e8adf51aea098382e7f4fd33f66d54fa20b
| 366
|
py
|
Python
|
humfrey/update/templatetags/humfrey_update.py
|
ox-it/humfrey
|
c92e46a24a9bf28aa9638a612f166d209315e76b
|
[
"BSD-3-Clause"
] | 6
|
2015-01-09T15:53:07.000Z
|
2020-02-13T14:00:53.000Z
|
humfrey/update/templatetags/humfrey_update.py
|
ox-it/humfrey
|
c92e46a24a9bf28aa9638a612f166d209315e76b
|
[
"BSD-3-Clause"
] | null | null | null |
humfrey/update/templatetags/humfrey_update.py
|
ox-it/humfrey
|
c92e46a24a9bf28aa9638a612f166d209315e76b
|
[
"BSD-3-Clause"
] | 1
|
2017-05-12T20:46:15.000Z
|
2017-05-12T20:46:15.000Z
|
from django import template
register = template.Library()
@register.filter
def can_view(obj, user):
return obj.can_view(user)
@register.filter
def can_change(obj, user):
return obj.can_change(user)
@register.filter
def can_execute(obj, user):
return obj.can_execute(user)
@register.filter
def can_delete(obj, user):
return obj.can_delete(user)
| 17.428571
| 32
| 0.745902
| 55
| 366
| 4.818182
| 0.290909
| 0.211321
| 0.256604
| 0.301887
| 0.558491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144809
| 366
| 20
| 33
| 18.3
| 0.846645
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0.071429
| 0.285714
| 0.642857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
8deba64769384e38daec29daef61ad97a9f4f95e
| 75
|
py
|
Python
|
marker/tests/test_pythagorean_therom/pythagorean_theorem.py
|
tahamian/autograder
|
47c9705fef27c369eaa1b4ded423dbd82bcac3eb
|
[
"MIT"
] | null | null | null |
marker/tests/test_pythagorean_therom/pythagorean_theorem.py
|
tahamian/autograder
|
47c9705fef27c369eaa1b4ded423dbd82bcac3eb
|
[
"MIT"
] | 5
|
2020-02-11T03:51:34.000Z
|
2020-05-16T21:05:58.000Z
|
marker/tests/test_pythagorean_therom/pythagorean_theorem.py
|
tahamian/autograder
|
47c9705fef27c369eaa1b4ded423dbd82bcac3eb
|
[
"MIT"
] | null | null | null |
import math
def pythagorean(a, b):
return math.sqrt(a ** 2 + b ** 2)
| 12.5
| 37
| 0.586667
| 13
| 75
| 3.384615
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.253333
| 75
| 5
| 38
| 15
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
5c23aa1462951ea80e2931b047829de1c1497d90
| 1,909
|
py
|
Python
|
edsnlp/pipelines/factories.py
|
aphp/edsnlp
|
a39ea23ff193764a8b1a42c5de463cd43202c134
|
[
"BSD-3-Clause"
] | 32
|
2022-03-08T16:45:09.000Z
|
2022-03-31T15:21:00.000Z
|
edsnlp/pipelines/factories.py
|
aphp/edsnlp
|
a39ea23ff193764a8b1a42c5de463cd43202c134
|
[
"BSD-3-Clause"
] | 19
|
2022-03-09T11:44:43.000Z
|
2022-03-31T14:32:06.000Z
|
edsnlp/pipelines/factories.py
|
aphp/edsnlp
|
a39ea23ff193764a8b1a42c5de463cd43202c134
|
[
"BSD-3-Clause"
] | 1
|
2022-03-11T16:14:21.000Z
|
2022-03-11T16:14:21.000Z
|
# flake8: noqa: F811
from .core.advanced.factory import create_component as advanced
from .core.context.factory import create_component as context
from .core.endlines.factory import create_component as endlines
from .core.matcher.factory import create_component as matcher
from .core.normalizer.accents.factory import create_component as accents
from .core.normalizer.factory import create_component as normalizer
from .core.normalizer.lowercase.factory import remove_lowercase
from .core.normalizer.pollution.factory import create_component as pollution
from .core.normalizer.quotes.factory import create_component as quotes
from .core.sentences.factory import create_component as sentences
from .misc.consultation_dates.factory import create_component as consultation_dates
from .misc.dates.factory import create_component as dates
from .misc.measures.factory import create_component as measures
from .misc.reason.factory import create_component as reason
from .misc.sections.factory import create_component as sections
from .ner.covid.factory import create_component as covid
from .ner.scores.charlson.factory import create_component as charlson
from .ner.scores.emergency.ccmu.factory import create_component as ccmu
from .ner.scores.emergency.gemsa.factory import create_component as gemsa
from .ner.scores.emergency.priority.factory import create_component as priority
from .ner.scores.factory import create_component as score
from .ner.scores.sofa.factory import create_component as sofa
from .ner.scores.tnm.factory import create_component as tnm
from .qualifiers.family.factory import create_component as family
from .qualifiers.history.factory import create_component as history
from .qualifiers.hypothesis.factory import create_component as hypothesis
from .qualifiers.negation.factory import create_component as negation
from .qualifiers.reported_speech.factory import create_component as rspeech
| 63.633333
| 83
| 0.858565
| 269
| 1,909
| 5.977695
| 0.171004
| 0.226368
| 0.31903
| 0.470149
| 0.50995
| 0.043532
| 0
| 0
| 0
| 0
| 0
| 0.0023
| 0.089052
| 1,909
| 29
| 84
| 65.827586
| 0.922369
| 0.009429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5c2e60d3a8761bd988f125086236d87a0f975776
| 6,917
|
py
|
Python
|
nvidia-dla-blocks/hw/verif/regression/testplans/nvdla_test_list_L10.py
|
minisparrow/freedom
|
b31723a2cf3d1f245f9de2dcfedde4df2180cc6f
|
[
"Apache-2.0"
] | null | null | null |
nvidia-dla-blocks/hw/verif/regression/testplans/nvdla_test_list_L10.py
|
minisparrow/freedom
|
b31723a2cf3d1f245f9de2dcfedde4df2180cc6f
|
[
"Apache-2.0"
] | null | null | null |
nvidia-dla-blocks/hw/verif/regression/testplans/nvdla_test_list_L10.py
|
minisparrow/freedom
|
b31723a2cf3d1f245f9de2dcfedde4df2180cc6f
|
[
"Apache-2.0"
] | 1
|
2020-07-16T11:20:40.000Z
|
2020-07-16T11:20:40.000Z
|
for i in range(plan_arguments['RUN_NUM']):
############################################# CC #############################################
add_test(name='cc_feature_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' None reuse CC random case, input data format is fixed as feature ''')
add_test(name='cc_pitch_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' None reuse CC random case, input data format is fixed as image ''')
add_test(name='cc_feature_data_full_reuse_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,2 ', get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG], # for reuse case, at least 2 layers are required
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CC reuse input data random case, input data format is fixed as feature ''')
add_test(name='cc_feature_weight_full_reuse_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,2 ', get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG], # for reuse case, at least 2 layers are required
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CC reuse weight random case, input data format is fixed as feature ''')
add_test(name='cc_image_data_full_reuse_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,2 ', get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG], # for reuse case, at least 2 layers are required
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CC reuse input data random case, input data format is fixed as image ''')
add_test(name='cc_rtest',
tags=['L10', 'cc'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' None reuse CC random case ''')
############################################## PDP #############################################
add_test(name='pdp_split_rtest',
tags=['L10', 'pdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' PDP random case, fixed to split mode ''')
add_test(name='pdp_non_split_rtest',
tags=['L10', 'pdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' PDP random case, fixed to non-split mode ''')
add_test(name='pdp_rtest',
tags=['L10', 'pdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' PDP random case ''')
############################################# SDP #############################################
if 'NVDLA_SDP_BS_ENABLE' in project.PROJVAR and project.PROJVAR['NVDLA_SDP_BS_ENABLE'] is True:
add_test(name='sdp_bs_rtest',
tags=['L10', 'sdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' SDP offline random case, with BS enabled and not bypassed ''')
if 'NVDLA_SDP_BN_ENABLE' in project.PROJVAR and project.PROJVAR['NVDLA_SDP_BN_ENABLE'] is True:
add_test(name='sdp_bn_rtest',
tags=['L10', 'sdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' SDP offline random case, with BN enabled and not bypassed ''')
if 'NVDLA_SDP_EW_ENABLE' in project.PROJVAR and project.PROJVAR['NVDLA_SDP_EW_ENABLE'] is True:
add_test(name='sdp_ew_rtest',
tags=['L10', 'sdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' SDP offline random case, with EW enabled and not bypassed ''')
add_test(name='sdp_rtest',
tags=['L10', 'sdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' SDP offline random case ''')
############################################# CDP #############################################
add_test(name='cdp_exp_rtest',
tags=['L10', 'cdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CDP random case, fixed to EXPONENT mode of LE LUT ''')
add_test(name='cdp_lin_rtest',
tags=['L10', 'cdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CDP random case, fixed to LINEAR mode of LE LUT ''')
add_test(name='cdp_rtest',
tags=['L10', 'cdp'],
args=[' -rtlarg +uvm_set_config_int=uvm_test_top,layers,%d ' % plan_arguments['LAYER_NUM'], get_seed_args(), DISABLE_COMPARE_ALL_UNITS_SB_ARG],
module='nvdla_uvm_test',
config=['nvdla_utb'],
desc=''' CDP random case ''')
| 54.464567
| 174
| 0.578575
| 882
| 6,917
| 4.151927
| 0.096372
| 0.061169
| 0.048061
| 0.069907
| 0.949208
| 0.939924
| 0.927362
| 0.889132
| 0.875478
| 0.836974
| 0
| 0.007244
| 0.241579
| 6,917
| 126
| 175
| 54.896825
| 0.690812
| 0.022842
| 0
| 0.64
| 0
| 0
| 0.401283
| 0.119681
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.03
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c34f5db628299b67d9ea24f7f3974c2d3dd8015
| 5,916
|
py
|
Python
|
Platforms/Web/Processing/Api/Discord/errors.py
|
HeapUnderfl0w/Phaazebot
|
54e637bd4bc213b8efdaf23d5f331f2569e96843
|
[
"MIT"
] | null | null | null |
Platforms/Web/Processing/Api/Discord/errors.py
|
HeapUnderfl0w/Phaazebot
|
54e637bd4bc213b8efdaf23d5f331f2569e96843
|
[
"MIT"
] | null | null | null |
Platforms/Web/Processing/Api/Discord/errors.py
|
HeapUnderfl0w/Phaazebot
|
54e637bd4bc213b8efdaf23d5f331f2569e96843
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.index import WebIndex
import json
from aiohttp.web import Response, Request
async def apiDiscordGuildUnknown(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=400, error="discord_guild_unknown")
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a phaaze known guild"
if guild_name:
default_msg += f" with name '{guild_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=400
)
async def apiDiscordMissingPermission(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
user_id:str
user_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=400, error="discord_missing_permission")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = user_id
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = user_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Missing 'administrator' or 'manage_guild' permission"
if user_name:
default_msg += f" for user '{user_name}'"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if user_id:
default_msg += f" (User ID:{user_id})"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Missing Permission: {WebRequest.path} | {msg}", require="api:400")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=400
)
async def apiDiscordMemberNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
user_id:str
user_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_member_not_found")
user_id:str = kwargs.get("user_id", "")
if user_id:
res["user_id"] = user_id
user_name:str = kwargs.get("user_name", "")
if user_name:
res["user_name"] = user_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid member"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if user_name:
default_msg += f" with name '{user_name}'"
if guild_id:
default_msg += f" (Guild ID: {guild_id})"
if user_id:
default_msg += f" (User ID: {user_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Member not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
async def apiDiscordRoleNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
role_id:str
role_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_role_not_found")
role_id:str = kwargs.get("role_id", "")
if role_id:
res["role_id"] = role_id
role_name:str = kwargs.get("role_name", "")
if role_name:
res["role_name"] = role_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid role"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if role_name:
default_msg += f" with name '{role_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
if role_id:
default_msg += f" (Role ID:{role_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Role not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
async def apiDiscordChannelNotFound(cls:"WebIndex", WebRequest:Request, **kwargs:dict) -> Response:
"""
Takes from kwargs:
msg:str
channel_id:str
channel_name:str
guild_id:str
guild_name:str
"""
res:dict = dict(status=404, error="discord_channel_not_found")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = channel_id
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = channel_name
guild_id:str = kwargs.get("guild_id", "")
if guild_id:
res["guild_id"] = guild_id
guild_name:str = kwargs.get("guild_name", "")
if guild_name:
res["guild_name"] = guild_name
# build message
default_msg:str = "Could not find a valid channel"
if guild_name:
default_msg += f" on guild '{guild_name}'"
if channel_name:
default_msg += f" with name '{channel_name}'"
if guild_id:
default_msg += f" (Guild ID:{guild_id})"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.Web.BASE.Logger.debug(f"(API/Discord) 400 Channel not Found: {WebRequest.path} | {msg}", require="api:404")
return cls.response(
text=json.dumps( res ),
content_type="application/json",
status=404
)
| 24.345679
| 113
| 0.68881
| 906
| 5,916
| 4.284768
| 0.081678
| 0.081144
| 0.071097
| 0.043792
| 0.793663
| 0.793663
| 0.762751
| 0.762751
| 0.762751
| 0.762751
| 0
| 0.012022
| 0.156356
| 5,916
| 242
| 114
| 24.446281
| 0.765778
| 0.011663
| 0
| 0.7
| 0
| 0
| 0.281432
| 0.02211
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.026667
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
308fd779c425a8aa77041e0808189459f901cd7a
| 19
|
py
|
Python
|
lppydsmc/data/__init__.py
|
Quettle/lppydsmc
|
37290792e845086f7ea182d81f284d68b6cdcbea
|
[
"MIT"
] | null | null | null |
lppydsmc/data/__init__.py
|
Quettle/lppydsmc
|
37290792e845086f7ea182d81f284d68b6cdcbea
|
[
"MIT"
] | null | null | null |
lppydsmc/data/__init__.py
|
Quettle/lppydsmc
|
37290792e845086f7ea182d81f284d68b6cdcbea
|
[
"MIT"
] | null | null | null |
from . import saver
| 19
| 19
| 0.789474
| 3
| 19
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 19
| 1
| 19
| 19
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
30cd857191e434074dedf1737ca8f1179a3e4f2d
| 183
|
py
|
Python
|
src/profile_build_tools/__init__.py
|
atu4403/profile_build_tools
|
37e7e03c285bd310a605b924d5e9c143036edd8b
|
[
"MIT"
] | null | null | null |
src/profile_build_tools/__init__.py
|
atu4403/profile_build_tools
|
37e7e03c285bd310a605b924d5e9c143036edd8b
|
[
"MIT"
] | null | null | null |
src/profile_build_tools/__init__.py
|
atu4403/profile_build_tools
|
37e7e03c285bd310a605b924d5e9c143036edd8b
|
[
"MIT"
] | null | null | null |
from .atcoder_images import update_atcoder_images
from .pypi_stats import get_pypi_stats
from .qiita_stats import get_qiita_stats
from .github_stats import get_github_stats, _to_list
| 36.6
| 52
| 0.879781
| 30
| 183
| 4.9
| 0.4
| 0.22449
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092896
| 183
| 4
| 53
| 45.75
| 0.885542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
30d1cc135258e97fecc0b8ea9dc90f0b189bdfae
| 2,193
|
py
|
Python
|
src/jobs_done10/_tests/test_repository.py
|
prusse-martin/jobs_done10
|
2433bfda862278c699a0532a3b32c9d7d0acf9e3
|
[
"MIT"
] | 21
|
2015-11-26T13:27:01.000Z
|
2022-01-11T03:17:54.000Z
|
src/jobs_done10/_tests/test_repository.py
|
prusse-martin/jobs_done10
|
2433bfda862278c699a0532a3b32c9d7d0acf9e3
|
[
"MIT"
] | 7
|
2018-08-31T16:00:52.000Z
|
2021-11-10T17:57:04.000Z
|
src/jobs_done10/_tests/test_repository.py
|
prusse-martin/jobs_done10
|
2433bfda862278c699a0532a3b32c9d7d0acf9e3
|
[
"MIT"
] | 7
|
2016-02-12T18:13:21.000Z
|
2020-06-09T15:15:24.000Z
|
from jobs_done10.repository import Repository
def testNameFromURL():
tests = [
('/path/to/repo.git/', 'repo'),
('file:///path/to/repo.git/', 'repo'),
('file://~/path/to/repo.git/', 'repo'),
('git://host.xz/path/to/repo.git/', 'repo'),
('git://host.xz/~user/path/to/repo.git/', 'repo'),
('host.xz:/path/to/repo.git/', 'repo'),
('host.xz:path/to/repo.git', 'repo'),
('host.xz:~user/path/to/repo.git/', 'repo'),
('http://host.xz/path/to/repo.git/', 'repo'),
('https://host.xz/path/to/repo.git/', 'repo'),
('path/to/repo.git/', 'repo'),
('rsync://host.xz/path/to/repo.git/', 'repo'),
('ssh://host.xz/path/to/repo.git/', 'repo'),
('ssh://host.xz/path/to/repo.git/', 'repo'),
('ssh://host.xz/~/path/to/repo.git', 'repo'),
('ssh://host.xz/~user/path/to/repo.git/', 'repo'),
('ssh://host.xz:port/path/to/repo.git/', 'repo'),
('ssh://user@host.xz/path/to/repo.git/', 'repo'),
('ssh://user@host.xz/path/to/repo.git/', 'repo'),
('ssh://user@host.xz/~/path/to/repo.git', 'repo'),
('ssh://user@host.xz/~user/path/to/repo.git/', 'repo'),
('ssh://user@host.xz:port/path/to/repo.git/', 'repo'),
('user@host.xz:/path/to/repo.git/', 'repo'),
('user@host.xz:path/to/repo.git', 'repo'),
('user@host.xz:~user/path/to/repo.git/', 'repo'),
('~/path/to/repo.git', 'repo'),
]
for url, expected_name in tests:
assert Repository(url=url).name == expected_name, 'Failed for url "%s"' % url
def testEquality():
assert Repository() == Repository()
assert Repository(url='http://example.com') == Repository(url='http://example.com')
assert Repository(url='http://example.com', branch='foo') != Repository(url='http://example.com')
assert Repository(url='http://example.com') != Repository(url='http://other.com')
assert Repository(url='http://example.com', branch='bar') == \
Repository(url='http://example.com', branch='bar')
assert Repository(url='http://example.com', branch='foo') != \
Repository(url='http://example.com', branch='bar')
| 43
| 101
| 0.554036
| 301
| 2,193
| 4.026578
| 0.119601
| 0.161716
| 0.214521
| 0.278878
| 0.833333
| 0.833333
| 0.833333
| 0.769802
| 0.674092
| 0.581683
| 0
| 0.001111
| 0.179207
| 2,193
| 50
| 102
| 43.86
| 0.672222
| 0
| 0
| 0.146341
| 0
| 0
| 0.512095
| 0.314012
| 0
| 0
| 0
| 0
| 0.170732
| 1
| 0.04878
| false
| 0
| 0.02439
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
30f55feaf26bc2741029bc60ca52c4e976cd149e
| 143
|
py
|
Python
|
views/template.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | null | null | null |
views/template.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | 1
|
2022-03-12T00:57:37.000Z
|
2022-03-12T00:57:37.000Z
|
views/template.py
|
dev-easyshares/mighty
|
a6cf473fb8cfbf5b92db68c7b068fc8ae2911b8b
|
[
"MIT"
] | null | null | null |
from django.views.generic.base import TemplateView
from mighty.views.base import BaseView
class TemplateView(BaseView, TemplateView):
pass
| 28.6
| 50
| 0.825175
| 18
| 143
| 6.555556
| 0.611111
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111888
| 143
| 5
| 51
| 28.6
| 0.929134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ebaa996dedecfab8722ce05c03253747907e1a81
| 94
|
py
|
Python
|
newsdemo/apps/crawler/views.py
|
zeyap/news_viz
|
597b078255aa5a95623643089bb51dcaebe63c32
|
[
"MIT"
] | 1
|
2019-01-08T03:13:11.000Z
|
2019-01-08T03:13:11.000Z
|
newsdemo/apps/crawler/views.py
|
zeyap/news_viz
|
597b078255aa5a95623643089bb51dcaebe63c32
|
[
"MIT"
] | null | null | null |
newsdemo/apps/crawler/views.py
|
zeyap/news_viz
|
597b078255aa5a95623643089bb51dcaebe63c32
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.contrib.auth import authenticate
import json
| 23.5
| 44
| 0.861702
| 13
| 94
| 6.230769
| 0.692308
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 94
| 3
| 45
| 31.333333
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ebc287d0ebc887bb7da84e451c9e3c1008c869fe
| 136
|
py
|
Python
|
models/__init__.py
|
dpressel/ComerNet
|
db7c93e936f33c814c6dc6bd7b765ab660f59f85
|
[
"Apache-2.0"
] | 30
|
2019-10-08T08:05:02.000Z
|
2021-12-20T08:59:00.000Z
|
models/__init__.py
|
dpressel/ComerNet
|
db7c93e936f33c814c6dc6bd7b765ab660f59f85
|
[
"Apache-2.0"
] | 4
|
2019-10-29T09:28:24.000Z
|
2020-04-09T05:57:08.000Z
|
models/__init__.py
|
dpressel/ComerNet
|
db7c93e936f33c814c6dc6bd7b765ab660f59f85
|
[
"Apache-2.0"
] | 7
|
2020-01-28T07:03:23.000Z
|
2021-05-21T18:50:29.000Z
|
from models.attention import *
from models.rnn import *
from models.seq2seq import *
from models.loss import *
from models.beam import *
| 27.2
| 30
| 0.786765
| 20
| 136
| 5.35
| 0.4
| 0.46729
| 0.598131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.139706
| 136
| 5
| 31
| 27.2
| 0.905983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
ebd29ea5e317f1eafd1c47389ca39d6b28f31ef3
| 27
|
py
|
Python
|
SplitChannels/nionswift_plugin/split_channels/__init__.py
|
Brow71189/swift_workshop_18
|
436aca4b8f541d20a502f1e5d3ebfa8fe9a24bee
|
[
"MIT"
] | null | null | null |
SplitChannels/nionswift_plugin/split_channels/__init__.py
|
Brow71189/swift_workshop_18
|
436aca4b8f541d20a502f1e5d3ebfa8fe9a24bee
|
[
"MIT"
] | null | null | null |
SplitChannels/nionswift_plugin/split_channels/__init__.py
|
Brow71189/swift_workshop_18
|
436aca4b8f541d20a502f1e5d3ebfa8fe9a24bee
|
[
"MIT"
] | null | null | null |
from . import SplitChannels
| 27
| 27
| 0.851852
| 3
| 27
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 27
| 1
| 27
| 27
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ebde302c944082a1bcc1e429c61e413f8c52a8c3
| 27
|
py
|
Python
|
src/euler_python_package/euler_python/medium/p378.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p378.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
src/euler_python_package/euler_python/medium/p378.py
|
wilsonify/euler
|
5214b776175e6d76a7c6d8915d0e062d189d9b79
|
[
"MIT"
] | null | null | null |
def problem378():
pass
| 9
| 17
| 0.62963
| 3
| 27
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.259259
| 27
| 2
| 18
| 13.5
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ccf4ba2feb011bf1e46919792b8a40ca09749b68
| 7,005
|
py
|
Python
|
python/ray/experimental/workflow/tests/test_storage.py
|
tdml13/ray
|
01db1ec9f922370da27b78b0d85ce4f3ffaf4377
|
[
"Apache-2.0"
] | 39
|
2021-02-02T23:09:31.000Z
|
2022-03-28T16:39:12.000Z
|
python/ray/experimental/workflow/tests/test_storage.py
|
tdml13/ray
|
01db1ec9f922370da27b78b0d85ce4f3ffaf4377
|
[
"Apache-2.0"
] | 65
|
2021-02-04T08:23:41.000Z
|
2022-03-16T19:16:20.000Z
|
python/ray/experimental/workflow/tests/test_storage.py
|
tdml13/ray
|
01db1ec9f922370da27b78b0d85ce4f3ffaf4377
|
[
"Apache-2.0"
] | 20
|
2021-02-05T05:51:39.000Z
|
2022-03-04T21:13:24.000Z
|
import ray
from ray.experimental.workflow import storage
from ray.experimental.workflow import workflow_storage
def some_func(x):
return x + 1
def some_func2(x):
return x - 1
def test_raw_storage():
ray.init()
workflow_id = test_workflow_storage.__name__
raw_storage = storage.get_global_storage()
step_id = "some_step"
input_metadata = {"2": "c"}
output_metadata = {"a": 1}
args = ([1, "2"], {"k": b"543"})
output = ["the_answer"]
object_resolved = 42
obj_ref = ray.put(object_resolved)
# test creating normal objects
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
step_status = raw_storage.get_step_status(workflow_id, step_id)
assert step_status.args_exists
assert step_status.output_object_exists
assert step_status.output_metadata_exists
assert step_status.input_metadata_exists
assert step_status.func_body_exists
assert raw_storage.load_step_input_metadata(workflow_id,
step_id) == input_metadata
assert raw_storage.load_step_func_body(workflow_id, step_id)(33) == 34
assert raw_storage.load_step_args(workflow_id, step_id) == args
assert ray.get(raw_storage.load_object_ref(
workflow_id, obj_ref.hex())) == object_resolved
assert raw_storage.load_step_output_metadata(workflow_id,
step_id) == output_metadata
assert raw_storage.load_step_output(workflow_id, step_id) == output
# test overwrite
input_metadata = [input_metadata, "overwrite"]
output_metadata = [output_metadata, "overwrite"]
args = (args, "overwrite")
output = (output, "overwrite")
object_resolved = (object_resolved, "overwrite")
obj_ref = ray.put(object_resolved)
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func2)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
assert raw_storage.load_step_input_metadata(workflow_id,
step_id) == input_metadata
assert raw_storage.load_step_func_body(workflow_id, step_id)(33) == 32
assert raw_storage.load_step_args(workflow_id, step_id) == args
assert ray.get(raw_storage.load_object_ref(
workflow_id, obj_ref.hex())) == object_resolved
assert raw_storage.load_step_output_metadata(workflow_id,
step_id) == output_metadata
assert raw_storage.load_step_output(workflow_id, step_id) == output
ray.shutdown()
def test_workflow_storage():
ray.init()
workflow_id = test_workflow_storage.__name__
raw_storage = storage.get_global_storage()
step_id = "some_step"
input_metadata = {
"name": "test_basic_workflows.append1",
"object_refs": ["abc"],
"workflows": ["def"]
}
output_metadata = {
"output_step_id": "a12423",
"dynamic_output_step_id": "b1234"
}
args = ([1, "2"], {"k": b"543"})
output = ["the_answer"]
object_resolved = 42
obj_ref = ray.put(object_resolved)
# test basics
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_object_ref(workflow_id, obj_ref)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
raw_storage.save_step_output(workflow_id, step_id, output)
wf_storage = workflow_storage.WorkflowStorage(workflow_id)
assert wf_storage.load_step_output(step_id) == output
assert wf_storage.load_step_args(step_id, [], []) == args
assert wf_storage.load_step_func_body(step_id)(33) == 34
assert ray.get(wf_storage.load_object_ref(
obj_ref.hex())) == object_resolved
# test "inspect_step"
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
output_object_valid=True)
assert inspect_result.is_recoverable()
step_id = "some_step2"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
raw_storage.save_step_output_metadata(workflow_id, step_id,
output_metadata)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
output_step_id=output_metadata["dynamic_output_step_id"])
assert inspect_result.is_recoverable()
step_id = "some_step3"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
raw_storage.save_step_args(workflow_id, step_id, args)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
args_valid=True,
func_body_valid=True,
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert inspect_result.is_recoverable()
step_id = "some_step4"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
raw_storage.save_step_func_body(workflow_id, step_id, some_func)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
func_body_valid=True,
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert not inspect_result.is_recoverable()
step_id = "some_step5"
raw_storage.save_step_input_metadata(workflow_id, step_id, input_metadata)
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult(
object_refs=input_metadata["object_refs"],
workflows=input_metadata["workflows"])
assert not inspect_result.is_recoverable()
step_id = "some_step6"
inspect_result = wf_storage.inspect_step(step_id)
assert inspect_result == workflow_storage.StepInspectResult()
assert not inspect_result.is_recoverable()
ray.shutdown()
| 41.696429
| 78
| 0.71763
| 925
| 7,005
| 4.96973
| 0.084324
| 0.073091
| 0.109637
| 0.125299
| 0.843159
| 0.771373
| 0.758103
| 0.758103
| 0.731999
| 0.731999
| 0
| 0.008356
| 0.197002
| 7,005
| 167
| 79
| 41.946108
| 0.808889
| 0.010707
| 0
| 0.65
| 0
| 0
| 0.048087
| 0.010397
| 0
| 0
| 0
| 0
| 0.235714
| 1
| 0.028571
| false
| 0
| 0.021429
| 0.014286
| 0.064286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6933d959529c17ad0262f4c63f5ad37341042a8b
| 21
|
py
|
Python
|
rc4/__init__.py
|
DavidBuchanan314/rc4
|
eae984edb6443f954344c9590c3b1565c3b59d2d
|
[
"MIT"
] | 14
|
2019-08-11T08:50:03.000Z
|
2022-03-07T07:26:26.000Z
|
rc4/__init__.py
|
DavidBuchanan314/rc4
|
eae984edb6443f954344c9590c3b1565c3b59d2d
|
[
"MIT"
] | null | null | null |
rc4/__init__.py
|
DavidBuchanan314/rc4
|
eae984edb6443f954344c9590c3b1565c3b59d2d
|
[
"MIT"
] | 4
|
2019-06-06T19:57:14.000Z
|
2021-03-21T18:09:48.000Z
|
from .rc4 import RC4
| 10.5
| 20
| 0.761905
| 4
| 21
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 0.190476
| 21
| 1
| 21
| 21
| 0.823529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
693f0a0ea836a4fb86a1f4caa264373625ab4343
| 53
|
py
|
Python
|
tests/files/while.py
|
docmarionum1/py65c
|
cd59ef25d2759b63efa5655f529fd31564cc31b0
|
[
"WTFPL"
] | 12
|
2015-08-03T05:16:18.000Z
|
2020-09-12T12:38:16.000Z
|
tests/files/while.py
|
docmarionum1/py65c
|
cd59ef25d2759b63efa5655f529fd31564cc31b0
|
[
"WTFPL"
] | null | null | null |
tests/files/while.py
|
docmarionum1/py65c
|
cd59ef25d2759b63efa5655f529fd31564cc31b0
|
[
"WTFPL"
] | null | null | null |
i = 10
j = 0
while i > 2:
i = i - 1
j = j + 8
| 10.6
| 13
| 0.339623
| 13
| 53
| 1.384615
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0.509434
| 53
| 5
| 14
| 10.6
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
15e2dc5662d58fd67bbebf0e1860fb708ce9e108
| 36
|
py
|
Python
|
survivalbox/__init__.py
|
JohannesTheo/SurvivalBox
|
e4d4c754bdc28961dc5e8fa5ce74eb1875f043e3
|
[
"MIT"
] | null | null | null |
survivalbox/__init__.py
|
JohannesTheo/SurvivalBox
|
e4d4c754bdc28961dc5e8fa5ce74eb1875f043e3
|
[
"MIT"
] | null | null | null |
survivalbox/__init__.py
|
JohannesTheo/SurvivalBox
|
e4d4c754bdc28961dc5e8fa5ce74eb1875f043e3
|
[
"MIT"
] | null | null | null |
from .survivalbox import SurvivalBox
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
c61b37d13518bdeabac7ab835f2f6f662637c1c9
| 43
|
py
|
Python
|
WebApp/tests/models/pointrend.py
|
Diversion2k22/Objectify
|
9bf89ded16dbf7ca5c2fb67a96eea14d7de3039c
|
[
"MIT"
] | 5
|
2022-02-05T07:09:50.000Z
|
2022-03-31T17:13:10.000Z
|
WebApp/tests/models/pointrend.py
|
Diversion2k22/Objectify
|
9bf89ded16dbf7ca5c2fb67a96eea14d7de3039c
|
[
"MIT"
] | 1
|
2022-02-06T09:43:04.000Z
|
2022-02-06T09:43:04.000Z
|
WebApp/tests/models/pointrend.py
|
Diversion2k22/Objectify
|
9bf89ded16dbf7ca5c2fb67a96eea14d7de3039c
|
[
"MIT"
] | 9
|
2022-01-31T15:45:00.000Z
|
2022-03-31T17:13:15.000Z
|
def load_model():
print("Model Loaded")
| 21.5
| 25
| 0.674419
| 6
| 43
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 25
| 21.5
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
c638ae35f536f4b26dca2447c5d6a323ea1ce035
| 95
|
py
|
Python
|
src/Messages/ShutdownRequest.py
|
M4ddinPoe/PiNetRadio
|
a97653d85620c9e94f216acdbc28d7be95040293
|
[
"MIT"
] | null | null | null |
src/Messages/ShutdownRequest.py
|
M4ddinPoe/PiNetRadio
|
a97653d85620c9e94f216acdbc28d7be95040293
|
[
"MIT"
] | null | null | null |
src/Messages/ShutdownRequest.py
|
M4ddinPoe/PiNetRadio
|
a97653d85620c9e94f216acdbc28d7be95040293
|
[
"MIT"
] | null | null | null |
from src.Messages.MessageData import MessageData
class ShutdownRequest(MessageData):
pass
| 19
| 48
| 0.821053
| 10
| 95
| 7.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126316
| 95
| 5
| 49
| 19
| 0.939759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
c63a30e0e8833ceec9af670d42a6f572965e3c25
| 72
|
py
|
Python
|
src/modules/__init__.py
|
Nobregaigor/FEBio-Python
|
1ad5578af00e44bd6def06ee17538ac5e4375a38
|
[
"MIT"
] | null | null | null |
src/modules/__init__.py
|
Nobregaigor/FEBio-Python
|
1ad5578af00e44bd6def06ee17538ac5e4375a38
|
[
"MIT"
] | null | null | null |
src/modules/__init__.py
|
Nobregaigor/FEBio-Python
|
1ad5578af00e44bd6def06ee17538ac5e4375a38
|
[
"MIT"
] | null | null | null |
from .sys_functions import *
from .enums import *
from .classes import *
| 24
| 28
| 0.763889
| 10
| 72
| 5.4
| 0.6
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152778
| 72
| 3
| 29
| 24
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d6826000c0a3b0ee797f22772d829d88679013b0
| 101
|
py
|
Python
|
office365/directory/applications/spa_application.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 544
|
2016-08-04T17:10:16.000Z
|
2022-03-31T07:17:20.000Z
|
office365/directory/applications/spa_application.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 438
|
2016-10-11T12:24:22.000Z
|
2022-03-31T19:30:35.000Z
|
office365/directory/applications/spa_application.py
|
rikeshtailor/Office365-REST-Python-Client
|
ca7bfa1b22212137bb4e984c0457632163e89a43
|
[
"MIT"
] | 202
|
2016-08-22T19:29:40.000Z
|
2022-03-30T20:26:15.000Z
|
from office365.runtime.client_value import ClientValue
class SpaApplication(ClientValue):
pass
| 16.833333
| 54
| 0.821782
| 11
| 101
| 7.454545
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 0.128713
| 101
| 5
| 55
| 20.2
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
ba3358f953cf1f1f90ff1fbf40efa0b7423ce42e
| 253
|
py
|
Python
|
data/lib/utils/__init__.py
|
Synell/PERT-Maker
|
8eac93eaa788ee0a201437e5bd30d55133d7cd38
|
[
"MIT"
] | null | null | null |
data/lib/utils/__init__.py
|
Synell/PERT-Maker
|
8eac93eaa788ee0a201437e5bd30d55133d7cd38
|
[
"MIT"
] | null | null | null |
data/lib/utils/__init__.py
|
Synell/PERT-Maker
|
8eac93eaa788ee0a201437e5bd30d55133d7cd38
|
[
"MIT"
] | null | null | null |
#----------------------------------------------------------------------
# Libraries
from .byte import Byte
from .color import Color
from .stringUtils import StringUtils
#----------------------------------------------------------------------
| 31.625
| 72
| 0.312253
| 13
| 253
| 6.076923
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110672
| 253
| 7
| 73
| 36.142857
| 0.351111
| 0.592885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba44918acdb4baaa0f8315404d0c1902a914bc99
| 43
|
py
|
Python
|
part_map/__init__.py
|
jdpatt/bga_color_map
|
e807d33c74ba7d5dad859a2488858be23fd2bc6c
|
[
"MIT"
] | 2
|
2019-08-06T12:34:59.000Z
|
2020-03-05T04:42:40.000Z
|
part_map/__init__.py
|
jdpatt/bga_color_map
|
e807d33c74ba7d5dad859a2488858be23fd2bc6c
|
[
"MIT"
] | 11
|
2018-04-07T16:58:46.000Z
|
2020-03-08T00:43:42.000Z
|
part_map/__init__.py
|
jdpatt/part_map
|
e807d33c74ba7d5dad859a2488858be23fd2bc6c
|
[
"MIT"
] | null | null | null |
"""Part Visualizer"""
from .cli import map
| 14.333333
| 21
| 0.697674
| 6
| 43
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 2
| 22
| 21.5
| 0.810811
| 0.348837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ba993339f61f62810d30570d1823e5a84b8c6c74
| 48
|
py
|
Python
|
myapp/tasks/__init__.py
|
jollyshuai/cube-studio
|
02ee737801f37a78a1b2e49c844c8401b41d9c48
|
[
"Apache-2.0"
] | 1
|
2022-03-19T14:10:26.000Z
|
2022-03-19T14:10:26.000Z
|
myapp/tasks/__init__.py
|
jollyshuai/cube-studio
|
02ee737801f37a78a1b2e49c844c8401b41d9c48
|
[
"Apache-2.0"
] | null | null | null |
myapp/tasks/__init__.py
|
jollyshuai/cube-studio
|
02ee737801f37a78a1b2e49c844c8401b41d9c48
|
[
"Apache-2.0"
] | null | null | null |
from . import schedules
from . import async_task
| 24
| 24
| 0.8125
| 7
| 48
| 5.428571
| 0.714286
| 0.526316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145833
| 48
| 2
| 24
| 24
| 0.926829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
baa8de1b17d2e8e1eb26823055bf88f29a82f7fd
| 159
|
py
|
Python
|
qualitymeter/refactoring_opportunities/pullup_method_identification.py
|
hamidm21/QualityMeter
|
2b0645662634b267338b91141740db45bf8f17ad
|
[
"MIT"
] | null | null | null |
qualitymeter/refactoring_opportunities/pullup_method_identification.py
|
hamidm21/QualityMeter
|
2b0645662634b267338b91141740db45bf8f17ad
|
[
"MIT"
] | null | null | null |
qualitymeter/refactoring_opportunities/pullup_method_identification.py
|
hamidm21/QualityMeter
|
2b0645662634b267338b91141740db45bf8f17ad
|
[
"MIT"
] | null | null | null |
"""
The module identify pull-up method refactoring opportunities in Java projects
"""
# Todo: Implementing a decent pull-up method identification algorithm.
| 22.714286
| 77
| 0.786164
| 20
| 159
| 6.25
| 0.85
| 0.096
| 0.192
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144654
| 159
| 6
| 78
| 26.5
| 0.919118
| 0.924528
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.166667
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bab5c049512fb65f2c9e1cd838290529bbc19cb0
| 73
|
py
|
Python
|
spydrnet/tests/test_verilog_to_edif.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 34
|
2020-03-12T15:40:49.000Z
|
2022-02-28T07:13:47.000Z
|
spydrnet/tests/test_verilog_to_edif.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 104
|
2020-01-06T20:32:19.000Z
|
2022-01-02T00:20:14.000Z
|
spydrnet/tests/test_verilog_to_edif.py
|
ganeshgore/spydrnet
|
22672b8fc7d63461a71077bd20f29df6d38e96f4
|
[
"BSD-3-Clause"
] | 10
|
2020-09-02T20:24:00.000Z
|
2022-02-24T16:10:07.000Z
|
import unittest
class TestVerilogToEdif(unittest.TestCase):
pass
| 9.125
| 43
| 0.767123
| 7
| 73
| 8
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178082
| 73
| 8
| 44
| 9.125
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
bac7916afa7e6b2813f450c6068dae12eac86200
| 4,373
|
py
|
Python
|
tst/lyap/verifier/test_Z3Verifier.py
|
oxford-oxcav/fossil
|
f5b8e2bba80d8792b149ee75b51d3ee74df9b88e
|
[
"BSD-3-Clause"
] | 1
|
2021-05-21T17:24:31.000Z
|
2021-05-21T17:24:31.000Z
|
tst/lyap/verifier/test_Z3Verifier.py
|
oxford-oxcav/fossil
|
f5b8e2bba80d8792b149ee75b51d3ee74df9b88e
|
[
"BSD-3-Clause"
] | null | null | null |
tst/lyap/verifier/test_Z3Verifier.py
|
oxford-oxcav/fossil
|
f5b8e2bba80d8792b149ee75b51d3ee74df9b88e
|
[
"BSD-3-Clause"
] | 1
|
2021-11-09T15:35:26.000Z
|
2021-11-09T15:35:26.000Z
|
# Copyright (c) 2021, Alessandro Abate, Daniele Ahmed, Alec Edwards, Mirco Giacobbe, Andrea Peruffo
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from src.lyap.verifier.z3verifier import Z3Verifier
from functools import partial
from src.lyap.learner.net import NN
from src.shared.activations import ActivationType
from experiments.benchmarks.benchmarks_lyap import *
import torch
from src.shared.components.Translator import Translator
from unittest import mock
from z3 import *
from src.shared.cegis_values import CegisStateKeys
from src.shared.consts import TranslatorType
class TestZ3Verifier(unittest.TestCase):
def test_poly2_with_good_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[1].weight[0][0] = 1
model.layers[1].weight[0][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
print(V)
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertTrue(res[CegisStateKeys.found])
def test_poly2_with_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=True,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 0
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
model.layers[0].bias[0] = 1
model.layers[0].bias[1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
def test_poly2_with_another_bad_Lyapunov_function(self):
system = partial(poly_2, batch_size=100)
n_vars = 2
verifier = Z3Verifier
x = verifier.new_vars(n_vars)
f, domain, _ = system(functions=verifier.solver_fncts(), inner=0, outer=100)
domain_z3 = domain(verifier.solver_fncts(), x)
verifier = Z3Verifier(n_vars, f, domain_z3, x)
# model
model = NN(2, 2,
bias=False,
activate=[ActivationType.SQUARE],
equilibria=None)
model.layers[0].weight[0][0] = 1
model.layers[0].weight[0][1] = 1
model.layers[0].weight[1][0] = 0
model.layers[0].weight[1][1] = 1
xdot = f(Z3Verifier.solver_fncts(), x)
translator = Translator(model, np.matrix(x).T, xdot, None, 1)
res = translator.get(**{'factors': None})
V, Vdot = res[CegisStateKeys.V], res[CegisStateKeys.V_dot]
res = verifier.verify(V, Vdot)
self.assertEqual(res[CegisStateKeys.found], res[CegisStateKeys.cex] == [])
self.assertFalse(res[CegisStateKeys.found])
if __name__ == '__main__':
unittest.main()
| 36.747899
| 99
| 0.621541
| 558
| 4,373
| 4.75448
| 0.216846
| 0.06634
| 0.063325
| 0.081417
| 0.743309
| 0.734263
| 0.723332
| 0.702978
| 0.702978
| 0.702978
| 0
| 0.038072
| 0.255202
| 4,373
| 118
| 100
| 37.059322
| 0.776481
| 0.06037
| 0
| 0.707865
| 0
| 0
| 0.007073
| 0
| 0
| 0
| 0
| 0
| 0.067416
| 1
| 0.033708
| false
| 0
| 0.134831
| 0
| 0.179775
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
baf0d4e23de23f1c34b1f0ddba9107cd86bc94e3
| 45
|
py
|
Python
|
grasshopper/__init__.py
|
kmarburger/livestock3d
|
767e6bd1b7658357f720e112d550416cb7c45226
|
[
"MIT"
] | 1
|
2021-03-05T16:46:30.000Z
|
2021-03-05T16:46:30.000Z
|
grasshopper/__init__.py
|
kmarburger/livestock3d
|
767e6bd1b7658357f720e112d550416cb7c45226
|
[
"MIT"
] | 1
|
2018-06-22T11:40:15.000Z
|
2018-06-27T16:35:45.000Z
|
grasshopper/__init__.py
|
kmarburger/livestock3d
|
767e6bd1b7658357f720e112d550416cb7c45226
|
[
"MIT"
] | 4
|
2018-03-29T19:41:01.000Z
|
2019-12-06T14:06:46.000Z
|
from . templates import *
from . ssh import *
| 22.5
| 25
| 0.711111
| 6
| 45
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 45
| 2
| 26
| 22.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
baf894b0dfcabe5ee8bb98007cd88daf0f51c8d4
| 122
|
py
|
Python
|
popmon/version.py
|
SCarozza/popmon
|
89add7f0c93781526571895c9075d935bdc6d89b
|
[
"MIT"
] | null | null | null |
popmon/version.py
|
SCarozza/popmon
|
89add7f0c93781526571895c9075d935bdc6d89b
|
[
"MIT"
] | null | null | null |
popmon/version.py
|
SCarozza/popmon
|
89add7f0c93781526571895c9075d935bdc6d89b
|
[
"MIT"
] | null | null | null |
"""THIS FILE IS AUTO-GENERATED BY SETUP.PY."""
name = "popmon"
version = "0.3.12"
full_version = "0.3.12"
release = True
| 17.428571
| 46
| 0.655738
| 21
| 122
| 3.761905
| 0.809524
| 0.202532
| 0.227848
| 0.278481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07767
| 0.155738
| 122
| 6
| 47
| 20.333333
| 0.68932
| 0.327869
| 0
| 0
| 1
| 0
| 0.236842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
034a4644f850ec4608521c4cd8964398f1f171f6
| 7,094
|
py
|
Python
|
laceworksdk/api/v2/vulnerability_policies.py
|
alannix-lw/python-sdk
|
2f4f189de6aa5e6a2cfd27314d69d8a27814d6c0
|
[
"MIT"
] | 2
|
2020-09-08T20:42:05.000Z
|
2020-09-09T14:27:55.000Z
|
laceworksdk/api/v2/vulnerability_policies.py
|
alannix-lw/lacework-python-sdk
|
2f4f189de6aa5e6a2cfd27314d69d8a27814d6c0
|
[
"MIT"
] | null | null | null |
laceworksdk/api/v2/vulnerability_policies.py
|
alannix-lw/lacework-python-sdk
|
2f4f189de6aa5e6a2cfd27314d69d8a27814d6c0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Lacework VulnerabilityPolicies API wrapper.
"""
from laceworksdk.api.crud_endpoint import CrudEndpoint
class VulnerabilityPoliciesAPI(CrudEndpoint):
def __init__(self, session):
"""
Initializes the VulnerabilityPoliciesAPI object.
:param session: An instance of the HttpSession class
:return VulnerabilityPoliciesAPI object.
"""
super().__init__(session, "VulnerabilityPolicies")
def create(self,
policy_type,
policy_name,
severity,
state,
filter,
props,
policy_eval_type=None,
fail_on_violation=False,
alert_on_violation=False,
**request_params):
"""
A method to create a new VulnerabilityPolicies object.
:param policy_type: A string representing the type of the policy.
:param policy_name: A string representing the name of the policy.
:param severity: A string representing the severity of the policy.
("Info", "Low", "Medium", "High", "Critical")
:param state: A boolean representing the state of the policy.
:param filter:
obj:
:param rule: An object representing a policy filter rule.
obj:
:param operator: A string representing the rule operator.
("include", "exclude", "equals", "notEquals")
:param values: An array of strings representing the rule values.
:param exception: An object representing a policy filter exception.
obj:
:param operator: A string representing the rule operator.
("include", "exclude", "equals", "notEquals")
:param values: An array of strings representing the exception values.
:param props: An object containing properties of the policy.
obj:
:param description: A string representing the property description.
:param createdBy: A string representing the creator of the property.
:param updatedBy: A string representing the updater of the property.
:param policy_eval_type: A string representing the policy evaluation type.
:param fail_on_violation: A boolean representing whether the policy should fail on violations.
:param alert_on_violation: A boolean representing whether the policy should alert on violations.
:param request_params: Additional request parameters.
(provides support for parameters that may be added in the future)
:return response json
"""
return super().create(
policy_type=policy_type,
policy_name=policy_name,
severity=severity,
state=int(bool(state)),
filter=filter,
props=props,
policy_eval_type=policy_eval_type,
fail_on_violation=int(bool(fail_on_violation)),
alert_on_violation=int(bool(alert_on_violation)),
**request_params
)
def get(self,
guid=None):
"""
A method to get VulnerabilityPolicies objects.
:param guid: A string representing the object GUID.
:return response json
"""
return super().get(id=guid)
def get_by_guid(self,
guid):
"""
A method to get a VulnerabilityPolicies object by GUID.
:param guid: A string representing the object GUID.
:return response json
"""
return self.get(guid=guid)
def update(self,
guid,
policy_type=None,
policy_name=None,
severity=None,
state=None,
filter=None,
props=None,
policy_eval_type=None,
fail_on_violation=None,
alert_on_violation=None,
**request_params):
"""
A method to update a VulnerabilityPolicies object.
:param guid: A string representing the object GUID.
:param policy_type: A string representing the type of the policy.
:param policy_name: A string representing the name of the policy.
:param severity: A string representing the severity of the policy.
("Info", "Low", "Medium", "High", "Critical")
:param state: A boolean representing the state of the policy.
:param filter:
obj:
:param rule: An object representing a policy filter rule.
obj:
:param operator: A string representing the rule operator.
("include", "exclude", "equals", "notEquals")
:param values: An array of strings representing the rule values.
:param exception: An object representing a policy filter exception.
obj:
:param operator: A string representing the rule operator.
("include", "exclude", "equals", "notEquals")
:param values: An array of strings representing the exception values.
:param props: An object containing properties of the policy.
obj:
:param description: A string representing the property description.
:param createdBy: A string representing the creator of the property.
:param updatedBy: A string representing the updater of the property.
:param policy_eval_type: A string representing the policy evaluation type.
:param fail_on_violation: A boolean representing whether the policy should fail on violations.
:param alert_on_violation: A boolean representing whether the policy should alert on violations.
:param request_params: Additional request parameters.
(provides support for parameters that may be added in the future)
:return response json
"""
if state is not None:
state = int(bool(state))
if fail_on_violation is not None:
fail_on_violation = int(bool(fail_on_violation))
if alert_on_violation is not None:
alert_on_violation = int(bool(alert_on_violation))
return super().update(
guid,
policy_type=policy_type,
policy_name=policy_name,
severity=severity,
state=state,
filter=filter,
props=props,
policy_eval_type=policy_eval_type,
fail_on_violation=fail_on_violation,
alert_on_violation=alert_on_violation,
**request_params
)
def delete(self,
guid):
"""
A method to delete a VulnerabilityPolicies object.
:param guid: A string representing the object GUID.
:return response json
"""
return super().delete(id=guid)
| 38.765027
| 104
| 0.591063
| 751
| 7,094
| 5.459387
| 0.141145
| 0.102439
| 0.101951
| 0.118049
| 0.809024
| 0.777561
| 0.758293
| 0.742195
| 0.70878
| 0.70878
| 0
| 0.000215
| 0.344516
| 7,094
| 182
| 105
| 38.978022
| 0.881505
| 0.57753
| 0
| 0.328358
| 0
| 0
| 0.009009
| 0.009009
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089552
| false
| 0
| 0.014925
| 0
| 0.19403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cee46735041dda012e2428557b3fcef104dc47c5
| 12
|
py
|
Python
|
server/amberalertcn/api/v1/lib/__init__.py
|
fuzhouch/amberalertcn
|
5118666a7b3d7b1c40b255be8ac6c9b12e2fc4dc
|
[
"BSD-3-Clause"
] | 151
|
2015-01-25T10:25:29.000Z
|
2022-03-15T10:04:09.000Z
|
server/amberalertcn/api/v1/lib/__init__.py
|
fuzhouch/amberalertcn
|
5118666a7b3d7b1c40b255be8ac6c9b12e2fc4dc
|
[
"BSD-3-Clause"
] | null | null | null |
server/amberalertcn/api/v1/lib/__init__.py
|
fuzhouch/amberalertcn
|
5118666a7b3d7b1c40b255be8ac6c9b12e2fc4dc
|
[
"BSD-3-Clause"
] | 70
|
2015-02-02T02:35:48.000Z
|
2021-05-13T09:51:08.000Z
|
"""
lib
"""
| 3
| 3
| 0.25
| 1
| 12
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 12
| 3
| 4
| 4
| 0.333333
| 0.25
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3022b2a1ed3bdcd775cda90c1368265310c3c5a2
| 12,647
|
py
|
Python
|
bot.py
|
Aadhith-Ujesh/online_class_attending_bot
|
2e177d0874be54a8323e5db944eeded3b1ab6374
|
[
"MIT"
] | null | null | null |
bot.py
|
Aadhith-Ujesh/online_class_attending_bot
|
2e177d0874be54a8323e5db944eeded3b1ab6374
|
[
"MIT"
] | null | null | null |
bot.py
|
Aadhith-Ujesh/online_class_attending_bot
|
2e177d0874be54a8323e5db944eeded3b1ab6374
|
[
"MIT"
] | null | null | null |
import pyautogui
import datetime
def attender(team,driver,t3):
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
# driver = webdriver.Chrome(r"H:\msteamsbot\chromedriver_win32\chromedriver.exe")
driver.get("https://login.microsoftonline.com/common/oauth2/v2.0/authorize?response_type=id_token&scope=openid%20profile&client_id=5e3ce6c0-2b1f-4285-8d4b-75ee78787346&redirect_uri=https%3A%2F%2Fteams.microsoft.com%2Fgo&state=eyJpZCI6ImFhNzZiYzE1LTg4ZDAtNDlmMi1hMDllLTUwZGNkZjZjNzU2MiIsInRzIjoxNjMzNDQ5NTYyLCJtZXRob2QiOiJyZWRpcmVjdEludGVyYWN0aW9uIn0%3D&nonce=a2db4be8-ec7b-44fb-a89e-953c6dad8ecf&client_info=1&x-client-SKU=MSAL.JS&x-client-Ver=1.3.4&client-request-id=13704c37-bf4c-4fc1-9f99-b55993562eb0&response_mode=fragment&sso_reload=true")
driver.maximize_window()
time.sleep(1)
uname = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.ID, 'i0116'))
)
time.sleep(2)
uname.send_keys("257258@student.annauniv.edu")
nextbutton = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.ID, 'idSIButton9'))
)
nextbutton.click()
time.sleep(2)
passwd = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.ID, 'i0118'))
)
passwd.send_keys("Ujesh9112k@")
loginbutton = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.ID, 'idSIButton9'))
)
loginbutton.click()
time.sleep(2)
no = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.ID, 'idBtn_Back'))
)
no.click()
time.sleep(1)
try:
finalLogin = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.ID, 'i0116'))
)
finalLogin.send_keys("257258@student.annauniv.edu")
time.sleep(1)
finalNext = WebDriverWait(driver,5).until(
EC.presence_of_element_located((By.ID, 'idSIButton9'))
)
finalNext.click()
except:
cl = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.CLASS_NAME, 'table'))
)
cl.click()
time.sleep(15)
time.sleep(2)
dismiss = (driver.find_elements_by_class_name("action-button "))
for i in range(len(dismiss)):
val = dismiss[i].get_attribute("Title")
if(val == "Dismiss"):
dismiss[i].click()
pyautogui.moveTo(300, 300)
parentElement1 = driver.find_elements_by_class_name("team-card")
time.sleep(2)
pyautogui.scroll(-800)
parentElement2 = driver.find_elements_by_class_name("team-card")
time.sleep(2)
pyautogui.scroll(-800)
parentElement3 = driver.find_elements_by_class_name("team-card")
time.sleep(2)
pyautogui.scroll(-800)
parentElement4 = driver.find_elements_by_class_name("team-card")
time.sleep(2)
parentElement = set()
parentElement = set.union(set(parentElement1),set(parentElement2),set(parentElement3),set(parentElement4))
parentElement = list(parentElement)
for ele in range(len(parentElement)):
elementList = parentElement[ele].find_element_by_class_name("team-name")
print(elementList.text)
for ele in range(len(parentElement)):
elementList = parentElement[ele].find_element_by_class_name("team-name")
if(elementList.text == team):
parentElement[ele].click()
break
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\extensionicon.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\extensionicon.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\Screenshot (46).png', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\Screenshot (46).png', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\onlyscreen.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\onlyscreen.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\system.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\system.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\startrecording.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\startrecording.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\chrometab.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\chrometab.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\microsoft-teams.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\microsoft-teams.PNG', confidence=0.6)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\share.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\share.PNG', confidence=0.7)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
k = 1
while(k<30):
try:
joinbutton = driver.find_element_by_class_name("ts-calling-join-button")
joinbutton.click()
break
except:
time.sleep(60)
driver.refresh()
k+=1
try:
img_location = pyautogui.locateOnScreen('H:/msteamsbot/allowbutton.PNG', confidence=0.6)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:/msteamsbot/allowbutton.PNG', confidence=0.5)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
time.sleep(5)
try:
img_location = pyautogui.locateOnScreen('H:/msteamsbot/vdoff.PNG', confidence=0.9)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:/msteamsbot/vdoff.PNG', confidence=0.8)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
time.sleep(2)
join = driver.find_element_by_class_name("join-btn")
join.click()
time.sleep(2)
while(1):
now = datetime.datetime.now()
cur = now.strftime("%H:%M")
arr1 = cur.split(":")
arr1 = list(map(int,arr1))
t1 = (arr1[0]*3600) + (arr1[1]*60)
print(t3-t1)
if(t1>=t3):
#exit class
pyautogui.moveTo(400, 400, 2)
time.sleep(1)
hangup = driver.find_element_by_id("hangup-button")
hangup.click()
time.sleep(2)
break
else:
time.sleep(60)
#save the video
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\stopsharing.PNG', confidence=0.6)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\stopsharing.PNG', confidence=0.5)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\continue.PNG', confidence=0.6)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\continue.PNG', confidence=0.5)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(1)
try:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\save.PNG', confidence=0.6)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
except:
img_location = pyautogui.locateOnScreen('H:\msteamsbot\save.PNG', confidence=0.5)
image_location_point = pyautogui.center(img_location)
x, y = image_location_point
pyautogui.click(x, y)
time.sleep(2)
time.sleep(100)
# try:
# img_location = pyautogui.locateOnScreen('H:\msteamsbot\maximize.PNG', confidence=0.8)
# image_location_point = pyautogui.center(img_location)
# x, y = image_location_point
# pyautogui.click(x, y)
# time.sleep(2)
# except:
# img_location = pyautogui.locateOnScreen('H:\msteamsbot\maximize.PNG', confidence=0.7)
# image_location_point = pyautogui.center(img_location)
# x, y = image_location_point
# pyautogui.click(x, y)
# time.sleep(2)
# try:
# img_location = pyautogui.locateOnScreen('H:\msteamsbot\quit.PNG', confidence=0.8)
# image_location_point = pyautogui.center(img_location)
# x, y = image_location_point
# pyautogui.click(x, y)
# time.sleep(1)
# except:
# img_location = pyautogui.locateOnScreen('H:\msteamsbot\quit.PNG', confidence=0.7)
# image_location_point = pyautogui.center(img_location)
# x, y = image_location_point
# pyautogui.click(x, y)
# time.sleep(1)
# time.sleep(1000000)
profile = driver.find_element_by_id("personDropdown")
profile.click()
time.sleep(1)
signout = driver.find_element_by_id("logout-button")
signout.click()
logout = WebDriverWait(driver,50).until(
EC.presence_of_element_located((By.CLASS_NAME, 'table'))
)
logout.click()
time.sleep(1)
return
| 37.41716
| 549
| 0.654701
| 1,522
| 12,647
| 5.263469
| 0.142576
| 0.082387
| 0.134815
| 0.202222
| 0.764574
| 0.75671
| 0.73811
| 0.736612
| 0.736113
| 0.733491
| 0
| 0.032053
| 0.230331
| 12,647
| 338
| 550
| 37.41716
| 0.790939
| 0.081996
| 0
| 0.609319
| 0
| 0.003584
| 0.132976
| 0.067179
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003584
| false
| 0.007168
| 0.02509
| 0
| 0.032258
| 0.007168
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
305a61a05aebe16630a356ef804d94909257122b
| 10,156
|
py
|
Python
|
tests/sns/test_sns_paginators.py
|
paulhutchings/beartype-boto3-example
|
d69298d9444d578799e2a17cb63de11474b2278a
|
[
"MIT"
] | 3
|
2021-11-16T06:21:11.000Z
|
2021-11-22T08:59:11.000Z
|
tests/sns/test_sns_paginators.py
|
paulhutchings/beartype-boto3-example
|
d69298d9444d578799e2a17cb63de11474b2278a
|
[
"MIT"
] | 9
|
2021-11-19T03:29:00.000Z
|
2021-12-30T23:54:47.000Z
|
tests/sns/test_sns_paginators.py
|
paulhutchings/beartype-boto3-example
|
d69298d9444d578799e2a17cb63de11474b2278a
|
[
"MIT"
] | null | null | null |
import pytest
from bearboto3.sns import (
ListEndpointsByPlatformApplicationPaginator,
ListPlatformApplicationsPaginator,
ListSubscriptionsPaginator,
ListSubscriptionsByTopicPaginator,
ListTopicsPaginator,
ListPhoneNumbersOptedOutPaginator,
ListOriginationNumbersPaginator,
ListSMSSandboxPhoneNumbersPaginator,
)
from beartype import beartype
from beartype.roar import (
BeartypeCallHintPepParamException,
BeartypeCallHintPepReturnException,
BeartypeDecorHintPep484585Exception,
)
# ============================
# ListEndpointsByPlatformApplicationPaginator
# ============================
def test_list_endpoints_by_platform_application_arg_pass(
gen_list_endpoints_by_platform_application_paginator,
):
@beartype
def func(param: ListEndpointsByPlatformApplicationPaginator):
pass
func(gen_list_endpoints_by_platform_application_paginator)
def test_list_endpoints_by_platform_application_arg_fail(gen_list_topics_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListEndpointsByPlatformApplicationPaginator):
pass
func(gen_list_topics_paginator)
def test_list_endpoints_by_platform_application_return_pass(
gen_list_endpoints_by_platform_application_paginator,
):
@beartype
def func() -> ListEndpointsByPlatformApplicationPaginator:
return gen_list_endpoints_by_platform_application_paginator
func()
def test_list_endpoints_by_platform_application_return_fail(gen_list_topics_paginator):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListEndpointsByPlatformApplicationPaginator:
return gen_list_topics_paginator
func()
# ============================
# ListPlatformApplicationsPaginator
# ============================
def test_list_platform_applications_arg_pass(gen_list_platform_applications_paginator):
@beartype
def func(param: ListPlatformApplicationsPaginator):
pass
func(gen_list_platform_applications_paginator)
def test_list_platform_applications_arg_fail(gen_list_subscriptions_by_topic_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListPlatformApplicationsPaginator):
pass
func(gen_list_subscriptions_by_topic_paginator)
def test_list_platform_applications_return_pass(
gen_list_platform_applications_paginator,
):
@beartype
def func() -> ListPlatformApplicationsPaginator:
return gen_list_platform_applications_paginator
func()
def test_list_platform_applications_return_fail(
gen_list_subscriptions_by_topic_paginator,
):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListPlatformApplicationsPaginator:
return gen_list_subscriptions_by_topic_paginator
func()
# ============================
# ListSubscriptionsPaginator
# ============================
def test_list_subscriptions_arg_pass(gen_list_subscriptions_paginator):
@beartype
def func(param: ListSubscriptionsPaginator):
pass
func(gen_list_subscriptions_paginator)
def test_list_subscriptions_arg_fail(gen_list_platform_applications_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListSubscriptionsPaginator):
pass
func(gen_list_platform_applications_paginator)
def test_list_subscriptions_return_pass(gen_list_subscriptions_paginator):
@beartype
def func() -> ListSubscriptionsPaginator:
return gen_list_subscriptions_paginator
func()
def test_list_subscriptions_return_fail(gen_list_platform_applications_paginator):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListSubscriptionsPaginator:
return gen_list_platform_applications_paginator
func()
# ============================
# ListSubscriptionsByTopicPaginator
# ============================
def test_list_subscriptions_by_topic_arg_pass(
gen_list_subscriptions_by_topic_paginator,
):
@beartype
def func(param: ListSubscriptionsByTopicPaginator):
pass
func(gen_list_subscriptions_by_topic_paginator)
def test_list_subscriptions_by_topic_arg_fail(gen_list_platform_applications_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListSubscriptionsByTopicPaginator):
pass
func(gen_list_platform_applications_paginator)
def test_list_subscriptions_by_topic_return_pass(
gen_list_subscriptions_by_topic_paginator,
):
@beartype
def func() -> ListSubscriptionsByTopicPaginator:
return gen_list_subscriptions_by_topic_paginator
func()
def test_list_subscriptions_by_topic_return_fail(
gen_list_platform_applications_paginator,
):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListSubscriptionsByTopicPaginator:
return gen_list_platform_applications_paginator
func()
# ============================
# ListTopicsPaginator
# ============================
def test_list_topics_arg_pass(gen_list_topics_paginator):
@beartype
def func(param: ListTopicsPaginator):
pass
func(gen_list_topics_paginator)
def test_list_topics_arg_fail(gen_list_endpoints_by_platform_application_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListTopicsPaginator):
pass
func(gen_list_endpoints_by_platform_application_paginator)
def test_list_topics_return_pass(gen_list_topics_paginator):
@beartype
def func() -> ListTopicsPaginator:
return gen_list_topics_paginator
func()
def test_list_topics_return_fail(gen_list_endpoints_by_platform_application_paginator):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListTopicsPaginator:
return gen_list_endpoints_by_platform_application_paginator
func()
# ============================
# ListPhoneNumbersOptedOutPaginator
# ============================
def test_list_phone_numbers_opted_out_arg_pass(
gen_list_phone_numbers_opted_out_paginator,
):
@beartype
def func(param: ListPhoneNumbersOptedOutPaginator):
pass
func(gen_list_phone_numbers_opted_out_paginator)
def test_list_phone_numbers_opted_out_arg_fail(
gen_list_platform_applications_paginator,
):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListPhoneNumbersOptedOutPaginator):
pass
func(gen_list_platform_applications_paginator)
def test_list_phone_numbers_opted_out_return_pass(
gen_list_phone_numbers_opted_out_paginator,
):
@beartype
def func() -> ListPhoneNumbersOptedOutPaginator:
return gen_list_phone_numbers_opted_out_paginator
func()
def test_list_phone_numbers_opted_out_return_fail(
gen_list_platform_applications_paginator,
):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListPhoneNumbersOptedOutPaginator:
return gen_list_platform_applications_paginator
func()
# ============================
# ListOriginationNumbersPaginator
# ============================
def test_list_origination_numbers_arg_pass(gen_list_origination_numbers_paginator):
@beartype
def func(param: ListOriginationNumbersPaginator):
pass
func(gen_list_origination_numbers_paginator)
def test_list_origination_numbers_arg_fail(gen_list_phone_numbers_opted_out_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListOriginationNumbersPaginator):
pass
func(gen_list_phone_numbers_opted_out_paginator)
def test_list_origination_numbers_return_pass(gen_list_origination_numbers_paginator):
@beartype
def func() -> ListOriginationNumbersPaginator:
return gen_list_origination_numbers_paginator
func()
def test_list_origination_numbers_return_fail(
gen_list_phone_numbers_opted_out_paginator,
):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListOriginationNumbersPaginator:
return gen_list_phone_numbers_opted_out_paginator
func()
# ============================
# ListSMSSandboxPhoneNumbersPaginator
# ============================
def test_list_sms_sandbox_phone_numbers_arg_pass(
gen_list_sms_sandbox_phone_numbers_paginator,
):
@beartype
def func(param: ListSMSSandboxPhoneNumbersPaginator):
pass
func(gen_list_sms_sandbox_phone_numbers_paginator)
def test_list_sms_sandbox_phone_numbers_arg_fail(gen_list_topics_paginator):
with pytest.raises(BeartypeCallHintPepParamException):
@beartype
def func(param: ListSMSSandboxPhoneNumbersPaginator):
pass
func(gen_list_topics_paginator)
def test_list_sms_sandbox_phone_numbers_return_pass(
gen_list_sms_sandbox_phone_numbers_paginator,
):
@beartype
def func() -> ListSMSSandboxPhoneNumbersPaginator:
return gen_list_sms_sandbox_phone_numbers_paginator
func()
def test_list_sms_sandbox_phone_numbers_return_fail(gen_list_topics_paginator):
with pytest.raises(
(BeartypeCallHintPepReturnException, BeartypeDecorHintPep484585Exception)
):
@beartype
def func() -> ListSMSSandboxPhoneNumbersPaginator:
return gen_list_topics_paginator
func()
| 26.041026
| 88
| 0.738283
| 909
| 10,156
| 7.733773
| 0.050605
| 0.063727
| 0.050071
| 0.054623
| 0.9
| 0.875533
| 0.822902
| 0.692176
| 0.600996
| 0.376956
| 0
| 0.006528
| 0.170441
| 10,156
| 389
| 89
| 26.107969
| 0.827893
| 0.071288
| 0
| 0.767932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.270042
| false
| 0.135021
| 0.016878
| 0.067511
| 0.35443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
305ae15bf7af455323a06a1fee692b0e57a3736d
| 3,807
|
py
|
Python
|
imp/game/checks.py
|
philskillz-coder/uno-discord-bot
|
02a3fce3df73d75d31795590dbdb93aaa1d7f268
|
[
"MIT"
] | null | null | null |
imp/game/checks.py
|
philskillz-coder/uno-discord-bot
|
02a3fce3df73d75d31795590dbdb93aaa1d7f268
|
[
"MIT"
] | null | null | null |
imp/game/checks.py
|
philskillz-coder/uno-discord-bot
|
02a3fce3df73d75d31795590dbdb93aaa1d7f268
|
[
"MIT"
] | null | null | null |
from imp.better import BetterInteraction
from discord import app_commands
from imp.better import errors
def IsHost(message: str = None):
message = message or "You must be a game host for this."
async def predicate(interaction: BetterInteraction):
fulfilled = interaction.client.game_manager.is_host(interaction.user)
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.HostCheckError(message)
return True
return app_commands.check(predicate)
def IsNotHost(message: str = None):
message = message or "You mustn't be a game host for this."
async def predicate(interaction: BetterInteraction):
fulfilled = not interaction.client.game_manager.is_host(interaction.user)
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.HostCheckError(message)
return True
return app_commands.check(predicate)
def IsParticipant(message: str = None):
message = message or "You must participate in a game for this."
async def predicate(interaction: BetterInteraction):
fulfilled = interaction.client.game_manager.is_player(interaction.user)
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.ParticipantCheckError(message)
return True
return app_commands.check(predicate)
def IsNotParticipant(message: str = None):
message = message or "You mustn't participate in a game for this."
async def predicate(interaction: BetterInteraction):
fulfilled = not interaction.client.game_manager.is_player(interaction.user)
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.ParticipantCheckError(message)
return True
return app_commands.check(predicate)
def GameStarted(message: str = None):
message = message or "The game must be started for this."
async def predicate(interaction: BetterInteraction):
fulfilled = interaction.client.game_manager.get_player_game(interaction.user).started
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.GameCheckError(message)
return True
return app_commands.check(predicate)
def GameNotStarted(message: str = None):
message = message or "The game mustn't be started for this."
async def predicate(interaction: BetterInteraction):
fulfilled = not interaction.client.game_manager.get_player_game(interaction.user).started
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.GameCheckError(message)
return True
return app_commands.check(predicate)
def PlayerTurn(message: str = None):
message = message or "It's not your turn!"
async def predicate(interaction: BetterInteraction):
game = interaction.client.game_manager.get_player_game(interaction.user)
fulfilled = game.is_turn(game.get_participant(interaction.user))
if not fulfilled:
await interaction.response.send_message(
content=message,
ephemeral=True
)
raise errors.PlayerTurnCheckError(message)
return True
return app_commands.check(predicate)
| 29.742188
| 97
| 0.656685
| 401
| 3,807
| 6.149626
| 0.162095
| 0.035685
| 0.03974
| 0.059611
| 0.896188
| 0.87794
| 0.865775
| 0.865775
| 0.786294
| 0.731955
| 0
| 0
| 0.276596
| 3,807
| 127
| 98
| 29.976378
| 0.895425
| 0
| 0
| 0.625
| 0
| 0
| 0.063567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079545
| false
| 0
| 0.034091
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
065a7532e6d3b0f5b2a755be9b2d24f0869d4e12
| 36
|
py
|
Python
|
cupy_alias/indexing/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
cupy_alias/indexing/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
cupy_alias/indexing/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
from clpy.indexing import * # NOQA
| 18
| 35
| 0.722222
| 5
| 36
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 36
| 1
| 36
| 36
| 0.896552
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
069f53f3ed6fd77e97450b7af364c4628c3b80f4
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/tomli/__init__.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/tomli/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/pip/_vendor/tomli/__init__.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/cf/51/25/b749cb02a5396340ce9fda7fffc4272d66af9443a947242291d6202aba
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
06ad0e94115a4a04f97e82f05713091995f8055c
| 406
|
py
|
Python
|
iceworm/trees/_antlr/__init__.py
|
wrmsr/iceworm
|
fc9df6679d29fcac138787a9caf97566d055ef99
|
[
"BSD-3-Clause"
] | null | null | null |
iceworm/trees/_antlr/__init__.py
|
wrmsr/iceworm
|
fc9df6679d29fcac138787a9caf97566d055ef99
|
[
"BSD-3-Clause"
] | 1
|
2021-01-19T14:29:19.000Z
|
2021-01-19T14:34:27.000Z
|
iceworm/trees/_antlr/__init__.py
|
wrmsr/iceworm
|
fc9df6679d29fcac138787a9caf97566d055ef99
|
[
"BSD-3-Clause"
] | 1
|
2020-12-31T22:29:52.000Z
|
2020-12-31T22:29:52.000Z
|
from .IceSqlLexer import IceSqlLexer # noqa
from .IceSqlLexer import IceSqlParserConfig # noqa
from .IceSqlListener import IceSqlListener # noqa
from .IceSqlListener import IceSqlParserConfig # noqa
from .IceSqlParser import IceSqlParser # noqa
from .IceSqlParser import IceSqlParserConfig # noqa
from .IceSqlVisitor import IceSqlParserConfig # noqa
from .IceSqlVisitor import IceSqlVisitor # noqa
| 45.111111
| 54
| 0.82266
| 40
| 406
| 8.35
| 0.2
| 0.167665
| 0.335329
| 0.383234
| 0.287425
| 0.287425
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 406
| 8
| 55
| 50.75
| 0.954286
| 0.096059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
06b2eb470a816201de92d8a10085d22381be91d7
| 22
|
py
|
Python
|
snippets/13.py
|
krrg/python-tutorial
|
a6ef8865cb46ee48624d7c7451d1894741405cc4
|
[
"MIT"
] | null | null | null |
snippets/13.py
|
krrg/python-tutorial
|
a6ef8865cb46ee48624d7c7451d1894741405cc4
|
[
"MIT"
] | null | null | null |
snippets/13.py
|
krrg/python-tutorial
|
a6ef8865cb46ee48624d7c7451d1894741405cc4
|
[
"MIT"
] | null | null | null |
my_list = [3, 1, 4, 5]
| 22
| 22
| 0.5
| 6
| 22
| 1.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0.227273
| 22
| 1
| 22
| 22
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ebf1c8d0535282298d56bf14f685b6255678d1d3
| 35
|
py
|
Python
|
cupy_alias/padding/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 142
|
2018-06-07T07:43:10.000Z
|
2021-10-30T21:06:32.000Z
|
cupy_alias/padding/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 282
|
2018-06-07T08:35:03.000Z
|
2021-03-31T03:14:32.000Z
|
cupy_alias/padding/__init__.py
|
fixstars/clpy
|
693485f85397cc110fa45803c36c30c24c297df0
|
[
"BSD-3-Clause"
] | 19
|
2018-06-19T11:07:53.000Z
|
2021-05-13T20:57:04.000Z
|
from clpy.padding import * # NOQA
| 17.5
| 34
| 0.714286
| 5
| 35
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 35
| 1
| 35
| 35
| 0.892857
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ebf227574a132bbba02b58e0a9c02bee89c35991
| 40
|
py
|
Python
|
layoutlmv2/src/layoutlm/trainers/__init__.py
|
bjorz/unilm
|
47396f86d32f52c98557e5f8e5c4fb3b9db52658
|
[
"MIT"
] | null | null | null |
layoutlmv2/src/layoutlm/trainers/__init__.py
|
bjorz/unilm
|
47396f86d32f52c98557e5f8e5c4fb3b9db52658
|
[
"MIT"
] | null | null | null |
layoutlmv2/src/layoutlm/trainers/__init__.py
|
bjorz/unilm
|
47396f86d32f52c98557e5f8e5c4fb3b9db52658
|
[
"MIT"
] | null | null | null |
from .funsd_trainer import FunsdTrainer
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
233ef9c92519a99521c52fd9bc198d3800045e8e
| 504
|
py
|
Python
|
rev/rev-verybabyrev/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 2
|
2021-08-09T17:08:12.000Z
|
2021-08-09T17:08:17.000Z
|
rev/rev-verybabyrev/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
rev/rev-verybabyrev/solve.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 1
|
2021-10-09T16:51:56.000Z
|
2021-10-09T16:51:56.000Z
|
af = list(b"\x13\x13\x11\x17\x12\x1d\x48\x45\x45\x41\x0b\x26\x2c\x42\x5f\x09\x0b\x5f\x6c\x3d\x56\x56\x1b\x54\x5f\x41\x45\x29\x3c\x0b\x5c\x58\x00\x5f\x5d\x09\x54\x6c\x2a\x40\x06\x06\x6a\x27\x48\x42\x5f\x4b\x56\x42\x2d\x2c\x43\x5d\x5e\x6c\x2d\x41\x07\x47\x43\x5e\x31\x6b\x5a\x0a\x3b\x6e\x1c\x49\x54\x5e\x1a\x2b\x34\x05\x5e\x47\x28\x28\x1f\x11\x26\x3b\x07\x50\x04\x06\x04\x0d\x0b\x05\x03\x48\x77\x0a")
flag = "r"
char = "r"
for stuff in af:
flag += chr(ord(char) ^ stuff)
char = flag[-1]
print(flag)
| 56
| 398
| 0.702381
| 117
| 504
| 3.025641
| 0.581197
| 0.033898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.317895
| 0.05754
| 504
| 8
| 399
| 63
| 0.427368
| 0
| 0
| 0
| 0
| 0.142857
| 0.765873
| 0.761905
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
88c74708cf2e882f6ff3829ceeefa31686df8fde
| 52
|
py
|
Python
|
winejournal/blueprints/wines/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | null | null | null |
winejournal/blueprints/wines/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | 5
|
2021-02-08T20:22:06.000Z
|
2021-09-07T23:52:33.000Z
|
winejournal/blueprints/wines/__init__.py
|
rickandersonaia/wine-journal
|
9664c8ed8df9eb853562c500e888490a61a6e44d
|
[
"CC-BY-4.0"
] | 2
|
2018-06-27T15:03:38.000Z
|
2020-03-14T15:40:34.000Z
|
from winejournal.blueprints.wines.views import wines
| 52
| 52
| 0.884615
| 7
| 52
| 6.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057692
| 52
| 1
| 52
| 52
| 0.938776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
cc929d26dacbc3b8bb8090d61642a64f128c1683
| 164
|
py
|
Python
|
release/scripts/presets/camera/Samsung_Galaxy_S4.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/presets/camera/Samsung_Galaxy_S4.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/presets/camera/Samsung_Galaxy_S4.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
import bpy
bpy.context.camera.sensor_width = 4.8
bpy.context.camera.sensor_height = 3.6
bpy.context.camera.lens = 4.20
bpy.context.camera.sensor_fit = 'HORIZONTAL'
| 27.333333
| 44
| 0.786585
| 28
| 164
| 4.5
| 0.535714
| 0.31746
| 0.507937
| 0.52381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046667
| 0.085366
| 164
| 5
| 45
| 32.8
| 0.793333
| 0
| 0
| 0
| 0
| 0
| 0.060976
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ccd073433061247f2b6c86839a7ad279b8954cc2
| 77
|
py
|
Python
|
tests/test_phase_envelope.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 28
|
2019-05-25T14:50:00.000Z
|
2022-01-18T00:54:22.000Z
|
tests/test_phase_envelope.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 10
|
2019-06-15T06:07:14.000Z
|
2021-09-01T04:32:50.000Z
|
tests/test_phase_envelope.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 5
|
2019-08-04T05:37:34.000Z
|
2022-01-18T10:10:40.000Z
|
import pytest
from gibbs.phase_envelope import calculate_phase_envelope_grid
| 25.666667
| 62
| 0.909091
| 11
| 77
| 6
| 0.727273
| 0.393939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 77
| 2
| 63
| 38.5
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aeb31688c2f22ab3bc556ce425fc70f00215395e
| 33
|
py
|
Python
|
supernovasearch/__init__.py
|
mmiguel6288code/super-nova-search
|
2f63beb45b579139f7699e734d0c65adaf08809b
|
[
"Apache-2.0"
] | null | null | null |
supernovasearch/__init__.py
|
mmiguel6288code/super-nova-search
|
2f63beb45b579139f7699e734d0c65adaf08809b
|
[
"Apache-2.0"
] | null | null | null |
supernovasearch/__init__.py
|
mmiguel6288code/super-nova-search
|
2f63beb45b579139f7699e734d0c65adaf08809b
|
[
"Apache-2.0"
] | null | null | null |
from . import TelCam_TSX, ImgProc
| 33
| 33
| 0.818182
| 5
| 33
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aecf8c718e0f77affd4df682a8e0f35402e8865f
| 123
|
py
|
Python
|
nlptoolkit/data/__init__.py
|
jianzhnie/d2nlp
|
94da74ec9be3aeee699b358f6bba9fde43bd80c0
|
[
"Apache-2.0"
] | 3
|
2021-12-01T12:25:00.000Z
|
2022-03-07T02:22:00.000Z
|
nlptoolkit/data/__init__.py
|
jianzhnie/nlp-toolkit
|
94da74ec9be3aeee699b358f6bba9fde43bd80c0
|
[
"Apache-2.0"
] | null | null | null |
nlptoolkit/data/__init__.py
|
jianzhnie/nlp-toolkit
|
94da74ec9be3aeee699b358f6bba9fde43bd80c0
|
[
"Apache-2.0"
] | null | null | null |
'''
Author: jianzhnie
Date: 2022-03-04 17:13:55
LastEditTime: 2022-03-04 17:13:55
LastEditors: jianzhnie
Description:
'''
| 13.666667
| 33
| 0.731707
| 19
| 123
| 4.736842
| 0.631579
| 0.133333
| 0.177778
| 0.222222
| 0.311111
| 0.311111
| 0
| 0
| 0
| 0
| 0
| 0.256881
| 0.113821
| 123
| 8
| 34
| 15.375
| 0.568807
| 0.918699
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aedd5f5f563570341566e9a11f5f88ecb22decf4
| 27
|
py
|
Python
|
cap1/c1e2.4.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
cap1/c1e2.4.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
cap1/c1e2.4.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
b=5
a=3
print((2*a)*(3*b))
| 6.75
| 18
| 0.481481
| 9
| 27
| 1.444444
| 0.666667
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0.111111
| 27
| 3
| 19
| 9
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9d69caaaf8337adc344d906925f79bb62544207f
| 280
|
py
|
Python
|
ch03tests/saskatchewan/test_overlap.py
|
pittachalk/rsd-engineeringcourse
|
58465485278ceb7b07729ca6e86ab116b668d955
|
[
"CC-BY-3.0"
] | null | null | null |
ch03tests/saskatchewan/test_overlap.py
|
pittachalk/rsd-engineeringcourse
|
58465485278ceb7b07729ca6e86ab116b668d955
|
[
"CC-BY-3.0"
] | null | null | null |
ch03tests/saskatchewan/test_overlap.py
|
pittachalk/rsd-engineeringcourse
|
58465485278ceb7b07729ca6e86ab116b668d955
|
[
"CC-BY-3.0"
] | 1
|
2022-02-20T07:59:49.000Z
|
2022-02-20T07:59:49.000Z
|
from .overlap import overlap
def test_full_overlap():
assert overlap((1.,1.,4.,4.),(2.,2.,3.,3.)) == 1.0
def test_partial_overlap():
assert overlap((1,1,4,4),(2,2,3,4.5)) == 2.0
def test_no_overlap():
assert overlap((1,1,4,4),(4.5,4.5,5,5)) == 0.0
| 25.454545
| 54
| 0.560714
| 55
| 280
| 2.745455
| 0.272727
| 0.05298
| 0.397351
| 0.417219
| 0.516556
| 0.516556
| 0.516556
| 0.357616
| 0.357616
| 0.357616
| 0
| 0.146018
| 0.192857
| 280
| 10
| 55
| 28
| 0.522124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.428571
| 1
| 0.428571
| true
| 0
| 0.142857
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
9dd3405557174c4208ad363c5703913a9c2ebaed
| 9,961
|
py
|
Python
|
hufscoops/haksik_db_to.py
|
JunKiBeom/HUFormation-kakao
|
a76c23fa0e8e0625b7ae98e79df117c9e4ac8fb5
|
[
"MIT"
] | 5
|
2017-12-03T15:09:09.000Z
|
2019-04-21T09:17:21.000Z
|
hufscoops/haksik_db_to.py
|
roharon/HUFormation
|
ff6e49e285e39b132d426a96f64d7f5aa6f375ed
|
[
"MIT"
] | 3
|
2017-12-12T03:50:15.000Z
|
2020-06-06T01:09:50.000Z
|
hufscoops/haksik_db_to.py
|
roharon/HUFormation
|
ff6e49e285e39b132d426a96f64d7f5aa6f375ed
|
[
"MIT"
] | 10
|
2017-12-03T13:16:22.000Z
|
2021-12-14T05:35:48.000Z
|
import sqlite3
import random
import datetime
#from django.shortcuts import render
def db_send(cafeteria, dates):
if dates == 'today':
today = datetime.date.today()
today_date = today.strftime('%m월 %d일')
con = sqlite3.connect("./DB/haksik_data.db")
cur = con.cursor()
elif dates == 'tomorrow':
today = datetime.date.today() + datetime.timedelta(days=1)
today_date = today.strftime('%m월 %d일')
con = sqlite3.connect("./DB/tomorrow_haksik_data.db")
cur = con.cursor()
else:
con = sqlite3.connect("./DB/haksik_data.db")
cur = con.cursor()
line_mark = '_____________________'
if cafeteria == '인문관':
cur.execute("SELECT breakfast from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT breakfast from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT breakfast from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT breakfast from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT breakfast from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT breakfast from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT breakfast from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT breakfast from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
num = len(info)
for i in range(0, num):
if len(info[i][0]) == 0:
pass
else:
haksik_list.append(info[i][0])
#print(haksik_list)
count = len(haksik_list)
# print(num)
# print(cur.fetchone()[0])
# 메뉴 개수
menu = ''
for i in range(0, count):
menu = menu + '\n' + line_mark + '\n' + str(haksik_list[i])
if cafeteria == '인문관':
cur.execute("SELECT lunch from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT lunch from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT lunch from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT lunch from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT lunch from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT lunch from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT lunch from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT lunch from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
num = len(info)
for i in range(0, num):
if len(info[i][0]) <= 3:
pass
else:
haksik_list.append(info[i][0])
#print(haksik_list)
count = len(haksik_list)
# print(num)
# print(cur.fetchone()[0])
# 메뉴 개수
for i in range(0, count):
if haksik_list[i] in menu:
pass
else:
menu = menu + '\n' + line_mark + '\n' + str(haksik_list[i])
if cafeteria == '인문관':
cur.execute("SELECT dinner from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT dinner from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT dinner from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT dinner from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT dinner from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT dinner from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT dinner from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT dinner from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
#print(info)
num = len(info)
for i in range(0, num):
if len(info[i][0]) <= 4:
pass
else:
haksik_list.append(info[i][0])
#print(haksik_list)
count = len(haksik_list)
# print(num)
# print(cur.fetchone()[0])
# 메뉴 개수
for i in range(0, count):
if haksik_list[i] in menu:
pass
else:
menu = menu + '\n' + str(line_mark) + '\n' + str(haksik_list[i])
con.close()
if len(menu) <= 27:
emoti = '(허걱)', '(멘붕)', '(깜짝)', '(허걱)', '(부르르)', '(훌쩍)', '(우와)', '(심각)', '(헉)'
menu = '\n오늘은 학식이 없어요 ' + random.choice(emoti)
return [menu, today_date]
def db_time_send(campus, cafe_time):
con = sqlite3.connect("./DB/haksik_data.db")
cur = con.cursor()
menu = ''
today = datetime.date.today()
today_date = today.strftime('%m월 %d일')
if cafe_time == '아침':
sentence = 'breakfast'
elif cafe_time == '점심':
sentence = 'lunch'
elif cafe_time == '저녁':
sentence = 'dinner'
if campus == 'seoul':
cafeteria = ["인문관", "교수회관", "스카이라운지"]
elif campus == 'global':
cafeteria = ['후생관', '어문관', '기숙사 식당', '교직원 식당', '국제사회교육원']
for j in range(0, len(cafeteria)):
querys = "SELECT " + sentence + " from " + cafeteria[j]
cur.execute(querys)
haksik_list = []
info = cur.fetchall()
num = len(info)
for i in range(0, num):
if len(info[i][0]) == 0:
pass
else:
haksik_list.append(info[i][0])
count = len(haksik_list)
for i in range(0, count):
if len(str(haksik_list[i])) < 4:
continue
menu = menu + '\n' + '_____________' + '\n' + cafeteria[j] + ' 메뉴\n' + str(haksik_list[i])
con.close()
if len(menu) <= 18:
emoti = '(허걱)', '(멘붕)', '(깜짝)', '(허걱)', '(부르르)', '(훌쩍)', '(우와)', '(심각)', '(헉)'
menu = '\n오늘은 학식이 없어요 ' + random.choice(emoti)
return menu
"""
def template(campus, cafe_time):
con = sqlite3.connect('./DB/haksik_data.db')
cur = con.cursor()
if cafe_time == '아침':
sentence = 'breakfast'
elif cafe_time == '점심':
sentence = 'lunch'
elif cafe_time == '저녁':
sentence = 'dinner'
if campus == 'seoul':
cafeteria = ["인문관", "교수회관", "스카이라운지"]
elif campus == 'global':
cafeteria = ['후생관', '어문관', '기숙사 식당', '교직원 식당', '국제사회교육원']
cafemenu = []
return render(request, 'blog/glo_haksik_table.html', {'posts': posts}
template('global', '점심')
"""
"""
elif campus == '글로벌':
if cafeteria == '아침':
cur.execute("SELECT breakfast from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT breakfast from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT breakfast from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT breakfast from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT breakfast from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT breakfast from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT breakfast from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT breakfast from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
num = len(info)
for i in range(0, num):
if len(info[i][0]) == 0:
pass
else:
haksik_list.append(info[i][0])
count = len(haksik_list)
# 메뉴 개수
menu = ''
for i in range(0, count):
menu = menu + '\n' + '_____________' + '\n' + str(haksik_list[i])
if cafeteria == '인문관':
cur.execute("SELECT lunch from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT lunch from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT lunch from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT lunch from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT lunch from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT lunch from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT lunch from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT lunch from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
num = len(info)
for i in range(0, num):
if len(info[i][0]) <= 2:
pass
else:
haksik_list.append(info[i][0])
#print(haksik_list)
count = len(haksik_list)
# print(num)
# print(cur.fetchone()[0])
# 메뉴 개수
for i in range(0, count):
if haksik_list[i] in menu:
pass
else:
menu = menu + '\n' + '_____________' + '\n' + str(haksik_list[i])
if cafeteria == '인문관':
cur.execute("SELECT dinner from 인문관")
elif cafeteria == '교수회관':
cur.execute("SELECT dinner from 교수회관")
elif cafeteria == '스카이 라운지':
cur.execute("SELECT dinner from 스카이라운지")
elif cafeteria == '후생관':
cur.execute("SELECT dinner from 후생관")
elif cafeteria == '어문관':
cur.execute("SELECT dinner from 어문관")
elif cafeteria == '기숙사 식당':
cur.execute("SELECT dinner from 기숙사")
elif cafeteria == '교직원 식당':
cur.execute("SELECT dinner from 교직원")
elif cafeteria == '국제사회교육원':
cur.execute("SELECT dinner from 국제사회교육원")
haksik_list = []
info = cur.fetchall()
#print(info)
num = len(info)
for i in range(0, num):
if len(info[i][0]) == 0:
pass
else:
haksik_list.append(info[i][0])
#print(haksik_list)
count = len(haksik_list)
# print(num)
# print(cur.fetchone()[0])
# 메뉴 개수
for i in range(0, count):
if haksik_list[i] in menu:
pass
else:
menu = menu + '\n' + '_____________' + '\n' + str(haksik_list[i])
con.close()
if len(menu) <= 16:
emoti = '(허걱)', '(멘붕)', '(깜짝)', '(허걱)', '(부르르)', '(훌쩍)', '(우와)', '(심각)', '(헉)'
menu = '\n오늘은 학식이 없어요 ' + random.choice(emoti)
return menu
"""
| 28.138418
| 103
| 0.527357
| 1,191
| 9,961
| 4.29471
| 0.093199
| 0.095797
| 0.150147
| 0.078201
| 0.910459
| 0.910459
| 0.90303
| 0.897752
| 0.897752
| 0.897752
| 0
| 0.008153
| 0.322759
| 9,961
| 353
| 104
| 28.21813
| 0.750074
| 0.022688
| 0
| 0.614865
| 0
| 0
| 0.19699
| 0.009219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013514
| false
| 0.040541
| 0.02027
| 0
| 0.047297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
9dd9f70ba64ad097aa34a4420cad2ed7c96c68ef
| 104
|
py
|
Python
|
pysweng/oop.py
|
lopezpdvn/pysweng
|
af28b5454385db5314876dde37f2c2bc18731734
|
[
"MIT"
] | null | null | null |
pysweng/oop.py
|
lopezpdvn/pysweng
|
af28b5454385db5314876dde37f2c2bc18731734
|
[
"MIT"
] | null | null | null |
pysweng/oop.py
|
lopezpdvn/pysweng
|
af28b5454385db5314876dde37f2c2bc18731734
|
[
"MIT"
] | null | null | null |
def dummy_function(a):
return a
DUMMY_GLOBAL_CONSTANT_0 = 'FOO';
DUMMY_GLOBAL_CONSTANT_1 = 'BAR';
| 14.857143
| 32
| 0.740385
| 16
| 104
| 4.375
| 0.6875
| 0.314286
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 0.153846
| 104
| 6
| 33
| 17.333333
| 0.772727
| 0
| 0
| 0
| 0
| 0
| 0.058252
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
9de08653abcf110539665e83bcbf0dc0cde901eb
| 134
|
py
|
Python
|
npe2/manifest/contributions/_icon.py
|
nclack/npe2
|
92555a848de10ad597f35721fd14e4b13c51c82d
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T00:29:45.000Z
|
2022-03-29T00:29:45.000Z
|
npe2/manifest/contributions/_icon.py
|
nclack/npe2
|
92555a848de10ad597f35721fd14e4b13c51c82d
|
[
"BSD-3-Clause"
] | 102
|
2021-11-23T20:18:15.000Z
|
2022-03-31T22:59:46.000Z
|
npe2/manifest/contributions/_icon.py
|
nclack/npe2
|
92555a848de10ad597f35721fd14e4b13c51c82d
|
[
"BSD-3-Clause"
] | 4
|
2021-11-22T23:04:47.000Z
|
2022-02-10T00:12:09.000Z
|
from typing import Optional
from pydantic import BaseModel
class Icon(BaseModel):
light: Optional[str]
dark: Optional[str]
| 14.888889
| 30
| 0.746269
| 17
| 134
| 5.882353
| 0.647059
| 0.22
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.186567
| 134
| 8
| 31
| 16.75
| 0.917431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d1ca9f4f8754dce75423f0f4499f51f0b3f3af62
| 28
|
py
|
Python
|
zzh/encoder/__init__.py
|
zhangzhenhu/zzh
|
ebacd9c0c46a0a537d014550bd2bff0a85452a6e
|
[
"MIT"
] | null | null | null |
zzh/encoder/__init__.py
|
zhangzhenhu/zzh
|
ebacd9c0c46a0a537d014550bd2bff0a85452a6e
|
[
"MIT"
] | null | null | null |
zzh/encoder/__init__.py
|
zhangzhenhu/zzh
|
ebacd9c0c46a0a537d014550bd2bff0a85452a6e
|
[
"MIT"
] | null | null | null |
from .label_encoder import *
| 28
| 28
| 0.821429
| 4
| 28
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 28
| 1
| 28
| 28
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae05744a57b1d712dc103eadd64d33eac844ab76
| 29
|
py
|
Python
|
experimental/privacy_utils/__init__.py
|
lxuechen/fast-dpsgd
|
bcd920f81cb8501d16d4e953133bedba86029f56
|
[
"MIT"
] | 29
|
2021-10-24T00:43:29.000Z
|
2022-03-25T02:31:21.000Z
|
experimental/privacy_utils/__init__.py
|
lxuechen/fast-dpsgd
|
bcd920f81cb8501d16d4e953133bedba86029f56
|
[
"MIT"
] | 8
|
2021-10-30T05:57:31.000Z
|
2022-03-30T16:22:46.000Z
|
experimental/privacy_utils/__init__.py
|
lxuechen/fast-dpsgd
|
bcd920f81cb8501d16d4e953133bedba86029f56
|
[
"MIT"
] | 4
|
2021-11-03T04:40:37.000Z
|
2022-03-04T00:26:15.000Z
|
from . import privacy_engine
| 14.5
| 28
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae42766342bf4348be85b4e05f55b77fb91722e6
| 30,652
|
py
|
Python
|
markyp_bootstrap4/buttons.py
|
volfpeter/markyp-bootstrap4
|
1af5a1f9dc861a14323706ace28882ef6555739a
|
[
"MIT"
] | 21
|
2019-07-16T15:03:43.000Z
|
2021-11-16T10:51:58.000Z
|
markyp_bootstrap4/buttons.py
|
volfpeter/markyp-bootstrap4
|
1af5a1f9dc861a14323706ace28882ef6555739a
|
[
"MIT"
] | null | null | null |
markyp_bootstrap4/buttons.py
|
volfpeter/markyp-bootstrap4
|
1af5a1f9dc861a14323706ace28882ef6555739a
|
[
"MIT"
] | null | null | null |
"""
Bootstrap button elements.
See https://getbootstrap.com/docs/4.3/components/buttons/.
"""
from typing import Optional, Type
from markyp import ElementType, PropertyDict, PropertyValue
from markyp.elements import Element, StandaloneElement
from markyp_html import join
from markyp_html.forms import button, input_, label
from markyp_html.inline import a
__all__ = (
"ButtonContext", "ButtonStyle", "BaseButtonFactory", "BaseToggleButtonFactory",
"ElementButtonFactory", "ElementToggleButtonFactory",
"StandaloneElementButtonFactory", "StandaloneElementToggleButtonFactory",
"a_button", "a_toggle",
"b_button", "b_toggle",
"i_button", "i_toggle",
"l_button", "l_toggle"
)
class ButtonContext(object):
"""
The set of existing button contexts.
"""
PRIMARY: str = "primary"
SECONDARY: str = "secondary"
SUCCESS: str = "success"
DANGER: str = "danger"
WARNING: str = "warning"
INFO: str = "info"
LIGHT: str = "light"
DARK: str = "dark"
LINK: str = "link"
class ButtonStyle(object):
"""
A set of CSS class names that can be applied on buttons.
"""
__slots__ = ()
ACTIVE: str = "active"
BLOCK: str = "btn-block"
DISABLED: str = "disabled"
LARGE: str = "btn-lg"
SMALL: str = "btn-sm"
class BaseButtonFactory(object):
"""
Base class for button factories.
See https://getbootstrap.com/docs/4.3/components/buttons/.
"""
__slots__ = ()
def create_element(self,
*args: ElementType,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new element.
Positional arguments will become the children of the created element if the element
can have children, otherwise these arguments might be ignored or converted into an
attribute (it is up to the factory).
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
"""
raise NotImplementedError("create_element() is abstract.")
def get_css_class(self,
context: str,
*,
class_: Optional[str] = None,
outline: bool = False,
active: bool = False) -> str:
"""
Returns the CSS class string to set on the created element.
Arguments:
context: One of the constants from `ButtonContext`.
class_: Additional CSS class names to include.
outline: Whether an outline button is being created a basic one.
active: Whether the button is active (i.e. selected).
"""
base = f"btn btn-outline-{context}" if outline else f"btn btn-{context}"
if active:
base += " active"
return f"{base} {class_}" if class_ else base
def update_attributes(self,
attributes: PropertyDict,
*,
disabled: bool = False,
active: bool = False) -> PropertyDict:
"""
Updates the given dictionary of element attribute name-value pairs with the
attributes that are required by the button that is being created.
_Never_ set the `class_` or `class` attributes in this method. Those attributes
are dealt with in `get_css_class()`.
Arguments:
attributes: The element attribute dictionary to update.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
attributes["type"] = "button"
if disabled:
attributes["disabled"] = None
if active:
attributes["aria-pressed"] = True
return attributes
def primary(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with primary context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.PRIMARY, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def primary_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with primary context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.PRIMARY, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def secondary(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with secondary context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.SECONDARY, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def secondary_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with secondary context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.SECONDARY, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def success(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with success context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.SUCCESS, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def success_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with success context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.SUCCESS, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def danger(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with danger context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.DANGER, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def danger_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with danger context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.DANGER, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def warning(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with warning context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.WARNING, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def warning_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with warning context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.WARNING, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def info(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with info context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.INFO, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def info_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with info context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.INFO, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def light(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with light context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.LIGHT, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def light_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with light context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.LIGHT, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def dark(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with dark context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.DARK, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def dark_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with dark context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.DARK, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def link(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new button with link context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.LINK, class_=class_, outline=False, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
def link_outline(self,
*args: ElementType,
class_: Optional[str] = None,
disabled: bool = False,
active: bool = False,
**kwargs: PropertyValue) -> ElementType:
"""
Creates a new outline button with link context.
Positional arguments will become the children of the created element if the element
can have children. Otherwise these arguments might be converted into an attribute
of the created element or be completely ignored - it is up to the factory.
Keyword arguments are converted into element attributes.
Arguments:
class_: CSS class names to set on the created element.
disabled: Whether the element should be disabled.
active: Whether the button is active (i.e. selected).
"""
return self.create_element(
*args,
class_=self.get_css_class(ButtonContext.LINK, class_=class_, outline=True, active=active),
**self.update_attributes(kwargs, disabled=disabled, active=active)
)
class BaseToggleButtonFactory(BaseButtonFactory):
"""
Base class for toggle button factories.
See https://getbootstrap.com/docs/4.3/components/buttons/#button-plugin.
"""
__slots__ = ()
def update_attributes(self,
attributes: PropertyDict,
*,
disabled: bool = False,
active: bool = False) -> PropertyDict:
attributes = super().update_attributes(attributes, disabled=disabled, active=active)
attributes["aria-pressed"] = active
attributes["autocomplete"] = "off"
attributes["data-toggle"] = "button"
return attributes
class ElementButtonFactory(BaseButtonFactory):
"""
Button element factory for `Element` instances.
"""
__slots__ = ("_generator",)
def __init__(self, generator: Type[Element]) -> None:
"""
Initialization.
Arguments:
generator: The type of the elements the factory will produce.
"""
super().__init__()
self._generator: Type[Element] = generator
"""
The type of the elements the object will produce.
"""
def create_element(self,
*args: ElementType,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> ElementType:
return self._generator(*args, class_=class_, **kwargs)
class ElementToggleButtonFactory(BaseToggleButtonFactory):
"""
Toggle button element factory for `Element` instances.
"""
__slots__ = ("_generator",)
def __init__(self, generator: Type[Element]) -> None:
"""
Initialization.
Arguments:
generator: The type of the elements the factory will produce.
"""
super().__init__()
self._generator: Type[Element] = generator
"""
The type of the elements the object will produce.
"""
def create_element(self,
*args: ElementType,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> ElementType:
return self._generator(*args, class_=class_, **kwargs)
class StandaloneElementButtonFactory(BaseButtonFactory):
"""
Button element factory for `StandaloneElement` instances.
"""
__slots__ = ("_generator", "_pos_arg_attr")
def __init__(self,
generator: Type[StandaloneElement],
*,
pos_arg_attr: str = "value") -> None:
"""
Initialization.
Arguments:
generator: The type of the elements the factory will produce.
pos_arg_attr: The name of the element attribute factory methods should store
the stringified version of positional arguments in.
"""
super().__init__()
self._generator: Type[StandaloneElement] = generator
"""
The type of the elements the object will produce.
"""
self._pos_arg_attr: str = pos_arg_attr
"""
The name of the element attribute factory methods should store
the stringified version of positional arguments in.
"""
def create_element(self,
*args: ElementType,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> ElementType:
kwargs[self._pos_arg_attr] = " ".join(str(a) for a in args)
return self._generator(class_=class_, **kwargs)
class StandaloneElementToggleButtonFactory(BaseToggleButtonFactory):
"""
Toggle button element factory for `StandaloneElement` instances.
"""
__slots__ = ("_generator", "_pos_arg_attr")
def __init__(self,
generator: Type[StandaloneElement],
*,
pos_arg_attr: str = "value") -> None:
"""
Initialization.
Arguments:
generator: The type of the elements the factory will produce.
pos_arg_attr: The name of the element attribute factory methods should store
the stringified version of positional arguments in.
"""
super().__init__()
self._generator: Type[StandaloneElement] = generator
"""
The type of the elements the object will produce.
"""
self._pos_arg_attr: str = pos_arg_attr
"""
The name of the element attribute factory methods should store
the stringified version of positional arguments in.
"""
def create_element(self,
*args: ElementType,
class_: Optional[str] = None,
**kwargs: PropertyValue) -> ElementType:
kwargs[self._pos_arg_attr] = " ".join(str(a) for a in args)
return self._generator(class_=class_, **kwargs)
a_button: ElementButtonFactory = ElementButtonFactory(a)
"""
Button factory that produces anchor (`a`) elements.
Anchor elements don't support the `disabled` flag. To achieve the same effect,
you should apply the `ButtonStyle.disabled` CSS class on the element instead.
"""
a_toggle: ElementToggleButtonFactory = ElementToggleButtonFactory(a)
"""
Toggle button factory that produces anchor (`a`) elements.
Anchor elements don't support the `disabled` flag. To achieve the same effect,
you should apply the `ButtonStyle.disabled` CSS class on the element instead.
"""
b_button: ElementButtonFactory = ElementButtonFactory(button)
"""
Button factory that produces `button` elements.
"""
b_toggle: ElementToggleButtonFactory = ElementToggleButtonFactory(button)
"""
Toggle button factory that produces `button` elements.
"""
i_button: StandaloneElementButtonFactory = StandaloneElementButtonFactory(input_, pos_arg_attr="value")
"""
Button factory that produces `input_` elements.
"""
i_toggle: StandaloneElementToggleButtonFactory = StandaloneElementToggleButtonFactory(input_, pos_arg_attr="value")
"""
Toggle button factory that produces `input_` elements.
"""
l_button: ElementButtonFactory = ElementButtonFactory(label)
"""
Button factory that produces `label` elements.
"""
l_toggle: ElementToggleButtonFactory = ElementToggleButtonFactory(label)
"""
Toggle button factory that produces `label` elements.
"""
| 37.748768
| 115
| 0.613337
| 3,271
| 30,652
| 5.64384
| 0.056252
| 0.030876
| 0.052489
| 0.03808
| 0.864796
| 0.858188
| 0.840691
| 0.840691
| 0.840691
| 0.836033
| 0
| 0.000286
| 0.3144
| 30,652
| 811
| 116
| 37.795314
| 0.878182
| 0.388816
| 0
| 0.64939
| 0
| 0
| 0.038212
| 0.008914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091463
| false
| 0
| 0.018293
| 0.006098
| 0.27439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ae75c982704b3daa58de673288d10214194bcb8c
| 165
|
py
|
Python
|
skimage/exposure/__init__.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | 1
|
2020-12-27T18:42:22.000Z
|
2020-12-27T18:42:22.000Z
|
skimage/exposure/__init__.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | null | null | null |
skimage/exposure/__init__.py
|
RKDSOne/scikit-image
|
baa67eafcace9cde1b94ad2d467e2f2e0468e759
|
[
"BSD-3-Clause"
] | 2
|
2015-12-29T17:04:26.000Z
|
2020-10-17T15:47:30.000Z
|
from .exposure import histogram, equalize, equalize_hist
from .exposure import rescale_intensity, cumulative_distribution
from ._adapthist import equalize_adapthist
| 41.25
| 64
| 0.872727
| 19
| 165
| 7.315789
| 0.578947
| 0.172662
| 0.258993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 165
| 3
| 65
| 55
| 0.926667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ae902ed8586c8e02f498e3197c5e8e498148b8ba
| 55
|
py
|
Python
|
server/__init__.py
|
ponyfat/httpsServer
|
f57991711cb55d0ee637994829ccf9bdf498b87a
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
ponyfat/httpsServer
|
f57991711cb55d0ee637994829ccf9bdf498b87a
|
[
"MIT"
] | null | null | null |
server/__init__.py
|
ponyfat/httpsServer
|
f57991711cb55d0ee637994829ccf9bdf498b87a
|
[
"MIT"
] | null | null | null |
import httpsServer
def server():
httpsServer.server()
| 13.75
| 21
| 0.781818
| 6
| 55
| 7.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 55
| 4
| 21
| 13.75
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
882c16bc46f1c7af1e7db7776ab175fe2a8ce743
| 41,473
|
py
|
Python
|
lenstools/scripts/raytracing.py
|
asabyr/LensTools
|
e155d6d39361e550906cec00dbbc57686a4bca5c
|
[
"MIT"
] | 1
|
2021-04-27T02:03:11.000Z
|
2021-04-27T02:03:11.000Z
|
lenstools/scripts/raytracing.py
|
asabyr/LensTools
|
e155d6d39361e550906cec00dbbc57686a4bca5c
|
[
"MIT"
] | null | null | null |
lenstools/scripts/raytracing.py
|
asabyr/LensTools
|
e155d6d39361e550906cec00dbbc57686a4bca5c
|
[
"MIT"
] | null | null | null |
########################################################
############Ray Tracing scripts#########################
########################################################
from __future__ import division,with_statement
import sys,os
import time
import gc
from operator import add
from functools import reduce
from lenstools.simulations.logs import logdriver,logstderr,peakMemory,peakMemoryAll
from lenstools.utils.mpi import MPIWhirlPool
from lenstools import ConvergenceMap,OmegaMap,ShearMap
from lenstools.catalog import Catalog,ShearCatalog
from lenstools.simulations.raytracing import RayTracer,DensityPlane
from lenstools.pipeline.simulation import SimulationBatch
from lenstools.pipeline.settings import MapSettings,TelescopicMapSettings,CatalogSettings
from lenstools.scripts import integration_types
import numpy as np
import astropy.units as u
#############################################################
#########Spilt realizations in subdirectories################
#############################################################
def _subdirectories(num_realizations,realizations_in_subdir):
assert not(num_realizations%realizations_in_subdir),"The number of realizations in each subdirectory must be the same!"
s = list()
if num_realizations==realizations_in_subdir:
return s
for c in range(num_realizations//realizations_in_subdir):
s.append("{0}-{1}".format(c*realizations_in_subdir+1,(c+1)*realizations_in_subdir))
return s
#####################################################################################
#######Callback to call during raytracing to save the convergence at every step######
#####################################################################################
def convergence_callback(jacobian,tracer,k,realization,angle,map_batch,settings):
convMap = ConvergenceMap(data=1.0-0.5*(jacobian[0]+jacobian[3]),angle=angle)
savename = os.path.join(map_batch.storage_subdir,"WLconv_z{0:.2f}_{1:04d}r.{2}".format(tracer.redshift[k],realization+1,settings.format))
logdriver.debug("Saving convergence map to {0}".format(savename))
convMap.save(savename)
################################################
#######Single redshift ray tracing##############
################################################
def singleRedshift(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
parts = batch_id.split("|")
if len(parts)==2:
assert isinstance(settings,MapSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = parts
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Get the corresponding simulation collection and map batch handlers
collection = [model.getCollection(geometry_id)]
map_batch = collection[0].getMapSet(settings.directory_name)
cut_redshifts = np.array([0.0])
elif len(parts)==1:
assert isinstance(settings,TelescopicMapSettings)
#Get a handle on the model
model = batch.getModel(parts[0])
#Get the corresponding simulation collection and map batch handlers
map_batch = model.getTelescopicMapSet(settings.directory_name)
collection = map_batch.mapcollections
cut_redshifts = map_batch.redshifts
else:
if (pool is None) or (pool.is_master()):
logdriver.error("Format error in {0}: too many '|'".format(batch_id))
sys.exit(1)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(map_batch.home_subdir,"settings.p")
settings = MapSettings.read(local_settings_file)
assert isinstance(settings,MapSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read map angle,redshift and resolution from the settings
map_angle = settings.map_angle
source_redshift = settings.source_redshift
resolution = settings.map_resolution
if len(parts)==2:
#########################
#Use a single collection#
#########################
#Read the plane set we should use
plane_set = (settings.plane_set,)
#Randomization
nbody_realizations = (settings.mix_nbody_realizations,)
cut_points = (settings.mix_cut_points,)
normals = (settings.mix_normals,)
map_realizations = settings.lens_map_realizations
elif len(parts)==1:
#######################
#####Telescopic########
#######################
#Check that we have enough info
for attr_name in ["plane_set","mix_nbody_realizations","mix_cut_points","mix_normals"]:
if len(getattr(settings,attr_name))!=len(collection):
if (pool is None) or (pool.is_master()):
logdriver.error("You need to specify a setting {0} for each collection!".format(attr_name))
sys.exit(1)
#Read the plane set we should use
plane_set = settings.plane_set
#Randomization
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
map_realizations = settings.lens_map_realizations
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_map_realization = 0 + realization_offset
last_map_realization = map_realizations + realization_offset
realizations_per_task = map_realizations
logdriver.debug("Generating lensing map realizations from {0} to {1}".format(first_map_realization+1,last_map_realization))
else:
assert map_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, map_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = map_realizations//(pool.size+1)
first_map_realization = realizations_per_task*pool.rank + realization_offset
last_map_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing map realizations from {1} to {2}".format(pool.rank,first_map_realization+1,last_map_realization))
#Planes will be read from this path
plane_path = os.path.join("{0}","ic{1}","{2}")
if (pool is None) or (pool.is_master()):
for c,coll in enumerate(collection):
logdriver.info("Reading planes from {0}".format(plane_path.format(coll.storage_subdir,"-".join([str(n) for n in nbody_realizations[c]]),plane_set[c])))
#Plane info file is the same for all collections
if (not hasattr(settings,"plane_info_file")) or (settings.plane_info_file is None):
info_filename = batch.syshandler.map(os.path.join(plane_path.format(collection[0].storage_subdir,nbody_realizations[0][0],plane_set[0]),"info.txt"))
else:
info_filename = settings.plane_info_file
if (pool is None) or (pool.is_master()):
logdriver.info("Reading lens plane summary information from {0}".format(info_filename))
#Read how many snapshots are available
with open(info_filename,"r") as infofile:
num_snapshots = len(infofile.readlines())
#Save path for the maps
save_path = map_batch.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing maps will be saved to {0}".format(save_path))
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_map_realization,last_map_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
tracer = RayTracer()
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(info_filename,"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Select the right collection
for n,z in enumerate(cut_redshifts):
if lens_redshift>=z:
c = n
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations[c]))
cut = np.random.randint(low=0,high=len(cut_points[c]))
normal = np.random.randint(low=0,high=len(normals[c]))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[c][nbody],cut_points[c][cut],normals[c][normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(collection[c].storage_subdir,nbody_realizations[c][nbody],plane_set[c]),settings.plane_name_format.format(snapshot_number,cut_points[c][cut],normals[c][normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Start a bucket of light rays from a regular grid of initial positions
b = np.linspace(0.0,map_angle.value,resolution)
xx,yy = np.meshgrid(b,b)
pos = np.array([xx,yy]) * map_angle.unit
if settings.tomographic_convergence:
#Trace the ray deflections and save the convergence at every step
tracer.shoot(pos,z=source_redshift,kind="jacobians",callback=convergence_callback,realization=r,angle=map_angle,map_batch=map_batch,settings=settings)
else:
#Trace the ray deflections
jacobian = tracer.shoot(pos,z=source_redshift,kind="jacobians")
now = time.time()
logdriver.info("Jacobian ray tracing for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Compute shear,convergence and omega from the jacobians
if settings.convergence or settings.reduced_shear or settings.reduced_shear_convergence:
convMap = ConvergenceMap(data=1.0-0.5*(jacobian[0]+jacobian[3]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
if settings.convergence:
savename = batch.syshandler.map(os.path.join(save_path,"WLconv_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving convergence map to {0}".format(savename))
convMap.save(savename)
logdriver.debug("Saved convergence map to {0}".format(savename))
##############################################################################################################################
if settings.shear or settings.convergence_ks or settings.reduced_shear or settings.reduced_shear_convergence:
shearMap = ShearMap(data=np.array([0.5*(jacobian[3]-jacobian[0]),-0.5*(jacobian[1]+jacobian[2])]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
if settings.shear:
savename = batch.syshandler.map(os.path.join(save_path,"WLshear_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving shear map to {0}".format(savename))
shearMap.save(savename)
if settings.convergence_ks:
convMap = shearMap.convergence()
savename = batch.syshandler.map(os.path.join(save_path,"WLconv-ks_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving convergence (KS) map to {0}".format(savename))
convMap.save(savename)
if settings.reduced_shear or settings.reduced_shear_convergence:
for ng in (0,1):
shearMap.data[ng] /= (1. - convMap.data)
if settings.reduced_shear:
savename = batch.syshandler.map(os.path.join(save_path,"WLredshear_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving reduced shear map to {0}".format(savename))
shearMap.save(savename)
if settings.reduced_shear_convergence:
convMap = shearMap.convergence()
savename = batch.syshandler.map(os.path.join(save_path,"WLredconv_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving reduced shear corrected convergence map to {0}".format(savename))
convMap.save(savename)
##############################################################################################################################
if settings.omega:
omegaMap = OmegaMap(data=-0.5*(jacobian[2]-jacobian[1]),angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift)
savename = batch.syshandler.map(os.path.join(save_path,"WLomega_z{0:.2f}_{1:04d}r.{2}".format(source_redshift,r+1,settings.format)))
logdriver.info("Saving omega map to {0}".format(savename))
omegaMap.save(savename)
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
############################################################################################################################################################################
#########################################################
#######Save intermediate results of LOS integration######
#########################################################
def save_intermediate(add_on,tracer,k,ctype,map_batch=None,map_angle=None,realization=None):
savename = os.path.join(map_batch.storage,"{0}-lens{1}-{2:04d}r.fits".format(ctype,k,realization))
logdriver.info("Saving z={0:.3f} add-on to convergence to {1}".format(tracer.redshift[k],savename))
side = map_angle.to(u.rad).value*tracer.distance[k]
ConvergenceMap(add_on,angle=side).save(savename)
########################################################################################################################
def losIntegrate(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
parts = batch_id.split("|")
if len(parts)==2:
assert isinstance(settings,MapSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = parts
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Get the corresponding simulation collection and map batch handlers
collection = [model.getCollection(geometry_id)]
map_batch = collection[0].getMapSet(settings.directory_name)
cut_redshifts = np.array([0.0])
elif len(parts)==1:
assert isinstance(settings,TelescopicMapSettings)
#Get a handle on the model
model = batch.getModel(parts[0])
#Get the corresponding simulation collection and map batch handlers
map_batch = model.getTelescopicMapSet(settings.directory_name)
collection = map_batch.mapcollections
cut_redshifts = map_batch.redshifts
else:
if (pool is None) or (pool.is_master()):
logdriver.error("Format error in {0}: too many '|'".format(batch_id))
sys.exit(1)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(map_batch.home_subdir,"settings.p")
settings = MapSettings.read(local_settings_file)
assert isinstance(settings,MapSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read map angle,redshift and resolution from the settings
map_angle = settings.map_angle
source_redshift = settings.source_redshift
resolution = settings.map_resolution
if len(parts)==2:
#########################
#Use a single collection#
#########################
#Read the plane set we should use
plane_set = (settings.plane_set,)
#Randomization
nbody_realizations = (settings.mix_nbody_realizations,)
cut_points = (settings.mix_cut_points,)
normals = (settings.mix_normals,)
map_realizations = settings.lens_map_realizations
elif len(parts)==1:
#######################
#####Telescopic########
#######################
#Check that we have enough info
for attr_name in ["plane_set","mix_nbody_realizations","mix_cut_points","mix_normals"]:
if len(getattr(settings,attr_name))!=len(collection):
if (pool is None) or (pool.is_master()):
logdriver.error("You need to specify a setting {0} for each collection!".format(attr_name))
sys.exit(1)
#Read the plane set we should use
plane_set = settings.plane_set
#Randomization
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
map_realizations = settings.lens_map_realizations
#Integration type
if settings.integration_type not in integration_types:
if (pool is None) or (pool.is_master()):
logdriver.error("Integration type {0} not supported, please choose one in {1}".format(settings.integration_type,integration_types))
sys.exit(1)
if (pool is None) or (pool.is_master()):
logdriver.info("Line of sight integration type: {0}".format(settings.integration_type))
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_map_realization = 0 + realization_offset
last_map_realization = map_realizations + realization_offset
realizations_per_task = map_realizations
logdriver.debug("Generating lensing map realizations from {0} to {1}".format(first_map_realization+1,last_map_realization))
else:
assert map_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, map_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = map_realizations//(pool.size+1)
first_map_realization = realizations_per_task*pool.rank + realization_offset
last_map_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing map realizations from {1} to {2}".format(pool.rank,first_map_realization+1,last_map_realization))
#Planes will be read from this path
plane_path = os.path.join("{0}","ic{1}","{2}")
if (pool is None) or (pool.is_master()):
for c,coll in enumerate(collection):
logdriver.info("Reading planes from {0}".format(plane_path.format(coll.storage_subdir,"-".join([str(n) for n in nbody_realizations[c]]),plane_set[c])))
#Plane info file is the same for all collections
if (not hasattr(settings,"plane_info_file")) or (settings.plane_info_file is None):
info_filename = batch.syshandler.map(os.path.join(plane_path.format(collection[0].storage_subdir,nbody_realizations[0][0],plane_set[0]),"info.txt"))
else:
info_filename = settings.plane_info_file
if (pool is None) or (pool.is_master()):
logdriver.info("Reading lens plane summary information from {0}".format(info_filename))
#Read how many snapshots are available
with open(info_filename,"r") as infofile:
num_snapshots = len(infofile.readlines())
#Save path for the maps
save_path = map_batch.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing maps will be saved to {0}".format(save_path))
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_map_realization,last_map_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
if settings.lens_type=="PotentialPlane":
tracer = RayTracer()
elif settings.lens_type=="DensityPlane":
tracer = RayTracer(lens_type=DensityPlane)
else:
raise ValueError("Lens type {0} not recognized!".format(settings.lens_type))
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(info_filename,"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Select the right collection
for n,z in enumerate(cut_redshifts):
if lens_redshift>=z:
c = n
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations[c]))
cut = np.random.randint(low=0,high=len(cut_points[c]))
normal = np.random.randint(low=0,high=len(normals[c]))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[c][nbody],cut_points[c][cut],normals[c][normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(collection[c].storage_subdir,nbody_realizations[c][nbody],plane_set[c]),settings.plane_name_format.format(snapshot_number,cut_points[c][cut],normals[c][normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Start a bucket of light rays from a regular grid of initial positions
b = np.linspace(0.0,map_angle.value,resolution)
xx,yy = np.meshgrid(b,b)
pos = np.array([xx,yy]) * map_angle.unit
#Save intermediate results
if settings.tomographic_convergence:
callback = save_intermediate
else:
callback = None
#Perform the line of sight integration (choose integration type)
if settings.integration_type=="born":
image = tracer.convergenceBorn(pos,z=source_redshift,save_intermediate=False)
img_type = ConvergenceMap
elif settings.integration_type=="born-rt":
image = tracer.convergenceBorn(pos,z=source_redshift,real_trajectory=True,save_intermediate=False)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2-ll":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,include_gp=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn2-gp":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=False,include_ll=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,callback=callback,transpose_up_to=settings.transpose_up_to,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2-gp":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,include_ll=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="postBorn1+2-ll":
image = tracer.convergencePostBorn2(pos,z=source_redshift,save_intermediate=False,include_first_order=True,include_gp=False,transpose_up_to=settings.transpose_up_to,callback=callback,map_batch=map_batch,map_angle=map_angle,realization=r+1)
img_type = ConvergenceMap
elif settings.integration_type=="omega2":
image = tracer.omegaPostBorn2(pos,z=source_redshift,save_intermediate=False)
img_type = OmegaMap
else:
raise NotImplementedError
now = time.time()
logdriver.info("Line of sight integration for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Save the image
savename = batch.syshandler.map(os.path.join(save_path,"{0}_z{1:.2f}_{2:04d}r".format(settings.integration_type,source_redshift,r+1)))
if settings.transpose_up_to>=0:
savename += "_t{0}".format(settings.transpose_up_to)
savename += ".{0}".format(settings.format)
logdriver.info("Saving {0} map to {1}".format(settings.integration_type,savename))
img_type(data=image,angle=map_angle,cosmology=map_batch.cosmology,redshift=source_redshift).save(savename)
logdriver.debug("Saving {0} map to {1}".format(settings.integration_type,savename))
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
############################################################################################################################################################################
###############################################
#######Galaxy catalog ray tracing##############
###############################################
def simulatedCatalog(pool,batch,settings,batch_id):
#Safety check
assert isinstance(pool,MPIWhirlPool) or (pool is None)
assert isinstance(batch,SimulationBatch)
assert isinstance(settings,CatalogSettings)
#Separate the id into cosmo_id and geometry_id
cosmo_id,geometry_id = batch_id.split("|")
#Get a handle on the model
model = batch.getModel(cosmo_id)
#Scale the box size to the correct units
nside,box_size = geometry_id.split("b")
box_size = float(box_size)*model.Mpc_over_h
#Get the corresponding simulation collection and catalog handler
collection = model.getCollection(box_size,nside)
catalog = collection.getCatalog(settings.directory_name)
#Override the settings with the previously pickled ones, if prompted by user
if settings.override_with_local:
local_settings_file = os.path.join(catalog.home_subdir,"settings.p")
settings = CatalogSettings.read(local_settings_file)
assert isinstance(settings,CatalogSettings)
if (pool is None) or (pool.is_master()):
logdriver.warning("Overriding settings with the previously pickled ones at {0}".format(local_settings_file))
##################################################################
##################Settings read###################################
##################################################################
#Read the catalog save path from the settings
catalog_save_path = catalog.storage_subdir
if (pool is None) or (pool.is_master()):
logdriver.info("Lensing catalogs will be saved to {0}".format(catalog_save_path))
#Read the total number of galaxies to raytrace from the settings
total_num_galaxies = settings.total_num_galaxies
#Pre-allocate numpy arrays
initial_positions = np.zeros((2,total_num_galaxies)) * settings.catalog_angle_unit
galaxy_redshift = np.zeros(total_num_galaxies)
#Keep track of the number of galaxies for each catalog
galaxies_in_catalog = list()
#Fill in initial positions and redshifts
for galaxy_position_file in settings.input_files:
try:
galaxies_before = reduce(add,galaxies_in_catalog)
except TypeError:
galaxies_before = 0
#Read the galaxy positions and redshifts from the position catalog
if (pool is None) or (pool.is_master()):
logdriver.info("Reading galaxy positions and redshifts from {0}".format(galaxy_position_file))
position_catalog = Catalog.read(galaxy_position_file)
if (pool is None) or (pool.is_master()):
logdriver.info("Galaxy catalog {0} contains {1} galaxies".format(galaxy_position_file,len(position_catalog)))
#This is just to avoid confusion
assert position_catalog.meta["AUNIT"]==settings.catalog_angle_unit.to_string(),"Catalog angle units, {0}, do not match with the ones privided in the settings, {1}".format(position_catalog.meta["AUNIT"],settings.catalog_angle_unit.to_string())
#Keep track of the number of galaxies in the catalog
galaxies_in_catalog.append(len(position_catalog))
if (pool is None) or (pool.is_master()):
#Save a copy of the position catalog to the simulated catalogs directory
position_catalog.write(os.path.join(catalog_save_path,os.path.basename(galaxy_position_file)),overwrite=True)
#Fill in initial positions and redshifts
initial_positions[0,galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["x"] * getattr(u,position_catalog.meta["AUNIT"])
initial_positions[1,galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["y"] * getattr(u,position_catalog.meta["AUNIT"])
galaxy_redshift[galaxies_before:galaxies_before+len(position_catalog)] = position_catalog["z"]
#Make sure that the total number of galaxies matches, and units are correct
assert reduce(add,galaxies_in_catalog)==total_num_galaxies,"The total number of galaxies in the catalogs, {0}, does not match the number provided in the settings, {1}".format(reduce(add,galaxies_in_catalog),total_num_galaxies)
##########################################################################################################################################################
####################################Initial positions and redshifts of galaxies loaded####################################################################
##########################################################################################################################################################
#Read the randomization information from the settings
nbody_realizations = settings.mix_nbody_realizations
cut_points = settings.mix_cut_points
normals = settings.mix_normals
catalog_realizations = settings.lens_catalog_realizations
if hasattr(settings,"realizations_per_subdirectory"):
realizations_in_subdir = settings.realizations_per_subdirectory
else:
realizations_in_subdir = catalog_realizations
#Create subdirectories as necessary
catalog_subdirectory = _subdirectories(catalog_realizations,realizations_in_subdir)
if (pool is None) or (pool.is_master()):
for d in catalog_subdirectory:
dir_to_make = os.path.join(catalog_save_path,d)
if not(os.path.exists(dir_to_make)):
logdriver.info("Creating catalog subdirectory {0}".format(dir_to_make))
os.mkdir(dir_to_make)
#Safety barrier sync
if pool is not None:
pool.comm.Barrier()
#Decide which map realizations this MPI task will take care of (if pool is None, all of them)
try:
realization_offset = settings.first_realization - 1
except AttributeError:
realization_offset = 0
if pool is None:
first_realization = 0 + realization_offset
last_realization = catalog_realizations + realization_offset
realizations_per_task = catalog_realizations
logdriver.debug("Generating lensing catalog realizations from {0} to {1}".format(first_realization+1,last_realization))
else:
assert catalog_realizations%(pool.size+1)==0,"Perfect load-balancing enforced, catalog_realizations must be a multiple of the number of MPI tasks!"
realizations_per_task = catalog_realizations//(pool.size+1)
first_realization = realizations_per_task*pool.rank + realization_offset
last_realization = realizations_per_task*(pool.rank+1) + realization_offset
logdriver.debug("Task {0} will generate lensing catalog realizations from {1} to {2}".format(pool.rank,first_realization+1,last_realization))
#Planes will be read from this path
plane_path = os.path.join(collection.storage_subdir,"ic{0}",settings.plane_set)
if (pool is None) or (pool.is_master()):
logdriver.info("Reading planes from {0}".format(plane_path.format("-".join([str(n) for n in nbody_realizations]))))
#Read how many snapshots are available
with open(batch.syshandler.map(os.path.join(plane_path.format(nbody_realizations[0]),"info.txt")),"r") as infofile:
num_snapshots = len(infofile.readlines())
begin = time.time()
#Log initial memory load
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
if (pool is None) or (pool.is_master()):
logstderr.info("Initial memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#We need one of these for cycles for each map random realization
for rloc,r in enumerate(range(first_realization,last_realization)):
#Set random seed to generate the realizations
np.random.seed(settings.seed + r)
#Instantiate the RayTracer
tracer = RayTracer()
#Force garbage collection
gc.collect()
#Start timestep
start = time.time()
last_timestamp = start
#############################################################
###############Add the lenses to the system##################
#############################################################
#Open the info file to read the lens specifications (assume the info file is the same for all nbody realizations)
infofile = open(os.path.join(plane_path.format(nbody_realizations[0]),"info.txt"),"r")
#Read the info file line by line, and decide if we should add the particular lens corresponding to that line or not
for s in range(num_snapshots):
#Read the line
line = infofile.readline().strip("\n")
#Stop if there is nothing more to read
if line=="":
break
#Split the line in snapshot,distance,redshift
line = line.split(",")
snapshot_number = int(line[0].split("=")[1])
distance,unit = line[1].split("=")[1].split(" ")
if unit=="Mpc/h":
distance = float(distance)*model.Mpc_over_h
else:
distance = float(distance)*getattr(u,"unit")
lens_redshift = float(line[2].split("=")[1])
#Randomization of planes
nbody = np.random.randint(low=0,high=len(nbody_realizations))
cut = np.random.randint(low=0,high=len(cut_points))
normal = np.random.randint(low=0,high=len(normals))
#Log to user
logdriver.debug("Realization,snapshot=({0},{1}) --> NbodyIC,cut_point,normal=({2},{3},{4})".format(r,s,nbody_realizations[nbody],cut_points[cut],normals[normal]))
#Add the lens to the system
logdriver.info("Adding lens at redshift {0}".format(lens_redshift))
plane_name = batch.syshandler.map(os.path.join(plane_path.format(nbody_realizations[nbody]),settings.plane_name_format.format(snapshot_number,cut_points[cut],normals[normal],settings.plane_format)))
tracer.addLens((plane_name,distance,lens_redshift))
#Close the infofile
infofile.close()
now = time.time()
logdriver.info("Plane specification reading completed in {0:.3f}s".format(now-start))
last_timestamp = now
#Rearrange the lenses according to redshift and roll them randomly along the axes
tracer.reorderLenses()
now = time.time()
logdriver.info("Reordering completed in {0:.3f}s".format(now-last_timestamp))
last_timestamp = now
#Trace the ray deflections through the lenses
jacobian = tracer.shoot(initial_positions,z=galaxy_redshift,kind="jacobians")
now = time.time()
logdriver.info("Jacobian ray tracing for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
last_timestamp = now
#Build the shear catalog and save it to disk
if settings.reduced_shear:
shear_catalog = ShearCatalog([(jacobian[3]-jacobian[0])/(jacobian[3]+jacobian[0]),-(jacobian[1]+jacobian[2])/(jacobian[3]+jacobian[0])],names=("shear1","shear2"))
else:
shear_catalog = ShearCatalog([0.5*(jacobian[3]-jacobian[0]),-0.5*(jacobian[1]+jacobian[2])],names=("shear1","shear2"))
for n,galaxy_position_file in enumerate(settings.input_files):
try:
galaxies_before = reduce(add,galaxies_in_catalog[:n])
except TypeError:
galaxies_before = 0
#Build savename
if settings.reduced_shear:
shear_root = "WLredshear_"
else:
shear_root = "WLshear_"
if len(catalog_subdirectory):
shear_catalog_savename = batch.syshandler.map(os.path.join(catalog_save_path,catalog_subdirectory[r//realizations_in_subdir],shear_root+os.path.basename(galaxy_position_file.split(".")[0])+"_{0:04d}r.{1}".format(r+1,settings.format)))
else:
shear_catalog_savename = batch.syshandler.map(os.path.join(catalog_save_path,shear_root+os.path.basename(galaxy_position_file.split(".")[0])+"_{0:04d}r.{1}".format(r+1,settings.format)))
if settings.reduced_shear:
logdriver.info("Saving simulated reduced shear catalog to {0}".format(shear_catalog_savename))
else:
logdriver.info("Saving simulated shear catalog to {0}".format(shear_catalog_savename))
shear_catalog[galaxies_before:galaxies_before+galaxies_in_catalog[n]].write(shear_catalog_savename,overwrite=True)
now = time.time()
#Log peak memory usage to stdout
peak_memory_task,peak_memory_all = peakMemory(),peakMemoryAll(pool)
logdriver.info("Weak lensing calculations for realization {0} completed in {1:.3f}s".format(r+1,now-last_timestamp))
logdriver.info("Peak memory usage: {0:.3f} (task), {1[0]:.3f} (all {1[1]} tasks)".format(peak_memory_task,peak_memory_all))
#Log progress and peak memory usage to stderr
if (pool is None) or (pool.is_master()):
logstderr.info("Progress: {0:.2f}%, peak memory usage: {1:.3f} (task), {2[0]:.3f} (all {2[1]} tasks)".format(100*(rloc+1.)/realizations_per_task,peak_memory_task,peak_memory_all))
#Safety sync barrier
if pool is not None:
pool.comm.Barrier()
if (pool is None) or (pool.is_master()):
now = time.time()
logdriver.info("Total runtime {0:.3f}s".format(now-begin))
| 40.981225
| 252
| 0.696718
| 5,595
| 41,473
| 5.004826
| 0.08025
| 0.015642
| 0.011428
| 0.015427
| 0.826262
| 0.796086
| 0.776016
| 0.765088
| 0.749875
| 0.72302
| 0
| 0.012329
| 0.117956
| 41,473
| 1,011
| 253
| 41.021761
| 0.753151
| 0.140766
| 0
| 0.694698
| 0
| 0.025594
| 0.143941
| 0.016444
| 0
| 0
| 0
| 0
| 0.036563
| 1
| 0.010969
| false
| 0
| 0.02925
| 0
| 0.043876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
882efab0caa49847432ad109b0b2c800df03cc16
| 58
|
py
|
Python
|
test_module.py
|
eshutan/astr-19
|
2c5f6f2d307518c651c980bffc56cc9d3a651762
|
[
"MIT"
] | null | null | null |
test_module.py
|
eshutan/astr-19
|
2c5f6f2d307518c651c980bffc56cc9d3a651762
|
[
"MIT"
] | 1
|
2022-03-31T18:04:57.000Z
|
2022-03-31T18:04:57.000Z
|
test_module.py
|
eshutan/astr-19
|
2c5f6f2d307518c651c980bffc56cc9d3a651762
|
[
"MIT"
] | null | null | null |
def hello_world():
print("hello world, we made a module")
| 29
| 39
| 0.724138
| 10
| 58
| 4.1
| 0.8
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 58
| 2
| 39
| 29
| 0.82
| 0
| 0
| 0
| 0
| 0
| 0.491525
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
88366ff7df2b6e6cea549758f84ab0e73b84db32
| 102
|
py
|
Python
|
couchdb_cluster_admin/__init__.py
|
dimagi/couchdb-cluster-admin
|
1e201d6722ea4165a5f9230f5e1551df80fd14ca
|
[
"BSD-3-Clause"
] | 2
|
2018-06-20T13:39:39.000Z
|
2021-09-24T14:25:07.000Z
|
couchdb_cluster_admin/__init__.py
|
dimagi/couchdb-cluster-admin
|
1e201d6722ea4165a5f9230f5e1551df80fd14ca
|
[
"BSD-3-Clause"
] | 13
|
2017-10-31T20:44:49.000Z
|
2021-07-28T14:56:14.000Z
|
couchdb_cluster_admin/__init__.py
|
dimagi/couchdb-cluster-admin
|
1e201d6722ea4165a5f9230f5e1551df80fd14ca
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from __future__ import unicode_literals
__version__ = "0.7.0"
| 20.4
| 39
| 0.833333
| 14
| 102
| 5.071429
| 0.642857
| 0.28169
| 0.450704
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.117647
| 102
| 4
| 40
| 25.5
| 0.755556
| 0
| 0
| 0
| 0
| 0
| 0.04902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
884900ff4ceb6fff53d0c9a9c72299eaccad8146
| 175
|
py
|
Python
|
glimslib/simulation/config.py
|
danielabler/glimslib
|
3d345bf3ed2d364e83a00ad9297dd5f81d7193db
|
[
"MIT"
] | null | null | null |
glimslib/simulation/config.py
|
danielabler/glimslib
|
3d345bf3ed2d364e83a00ad9297dd5f81d7193db
|
[
"MIT"
] | 5
|
2019-01-05T01:38:05.000Z
|
2019-06-03T16:49:27.000Z
|
glimslib/simulation/config.py
|
danielabler/glimslib
|
3d345bf3ed2d364e83a00ad9297dd5f81d7193db
|
[
"MIT"
] | 3
|
2019-12-28T16:01:48.000Z
|
2022-01-20T08:44:01.000Z
|
from glimslib.config import *
output_dir_simulation_tmp = os.path.join(output_dir_simulation, 'sim_tmp')
output_dir_plot_tmp = os.path.join(output_dir_simulation, 'plot_tmp')
| 43.75
| 74
| 0.828571
| 28
| 175
| 4.75
| 0.464286
| 0.270677
| 0.428571
| 0.195489
| 0.481203
| 0.481203
| 0.481203
| 0
| 0
| 0
| 0
| 0
| 0.068571
| 175
| 4
| 75
| 43.75
| 0.815951
| 0
| 0
| 0
| 0
| 0
| 0.085227
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
889f60be797ad3a7a481fde13a5dc66b601d901d
| 346
|
py
|
Python
|
building_footprint_segmentation/seg/base_criterion.py
|
santhi-2020/building-footprint-segmentation
|
88e6481ed8b957858da872c069c054007ff27f83
|
[
"MIT"
] | 28
|
2020-11-24T05:28:10.000Z
|
2022-03-25T08:29:36.000Z
|
building_footprint_segmentation/seg/base_criterion.py
|
santhi-2020/building-footprint-segmentation
|
88e6481ed8b957858da872c069c054007ff27f83
|
[
"MIT"
] | 6
|
2021-01-13T15:22:00.000Z
|
2022-03-09T07:30:15.000Z
|
building_footprint_segmentation/seg/base_criterion.py
|
santhi-2020/building-footprint-segmentation
|
88e6481ed8b957858da872c069c054007ff27f83
|
[
"MIT"
] | 14
|
2020-12-22T13:23:10.000Z
|
2022-02-19T18:57:49.000Z
|
from abc import abstractmethod
class BaseCriterion:
def __init__(self, **kwargs):
pass
def __call__(self, ground_truth, predictions):
return self.compute_criterion(ground_truth, predictions)
@abstractmethod
def compute_criterion(self, ground_truth, predictions):
raise NotImplementedError
| 24.714286
| 65
| 0.702312
| 34
| 346
| 6.764706
| 0.588235
| 0.143478
| 0.286957
| 0.226087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234104
| 346
| 13
| 66
| 26.615385
| 0.867925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0.111111
| 0.111111
| 0.111111
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.