hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
743d4a9a855ae115998b48388d8c8ff2fca08302
2,769
py
Python
tests/unit/pywbemcli/conftest.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
8
2017-04-01T13:55:00.000Z
2022-03-15T18:28:47.000Z
tests/unit/pywbemcli/conftest.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
918
2017-03-03T14:29:03.000Z
2022-03-29T15:32:16.000Z
tests/unit/pywbemcli/conftest.py
pywbem/pywbemtools
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
[ "Apache-2.0" ]
2
2020-01-17T15:56:46.000Z
2020-02-12T18:49:30.000Z
""" Pytest common fixtures """ from __future__ import absolute_import, print_function import os import pytest from pywbemtools._utils import CONNECTIONS_FILENAME, \ DEFAULT_CONNECTIONS_DIR, DEFAULT_CONNECTIONS_FILE SCRIPT_DIR = os.path.dirname(__file__) # Backup file of the default connections file BAK_SUFFIX = '.bak' CONNECTIONS_BAK_FILENAME = CONNECTIONS_FILENAME + BAK_SUFFIX CONNECTIONS_BAK_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR, CONNECTIONS_BAK_FILENAME) # Save files for the default connections file and its backup file SAVE_SUFFIX = '.testsavepywbemclitests' CONNECTIONS_SAVE_FILENAME = CONNECTIONS_FILENAME + SAVE_SUFFIX CONNECTIONS_SAVE_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR, CONNECTIONS_SAVE_FILENAME) CONNECTIONS_BAK_SAVE_FILENAME = CONNECTIONS_BAK_FILENAME + SAVE_SUFFIX CONNECTIONS_BAK_SAVE_FILE = os.path.join(DEFAULT_CONNECTIONS_DIR, CONNECTIONS_BAK_SAVE_FILENAME) @pytest.fixture def default_connections_file_path(): """ Fixture to return the path name of the default connections file. """ return DEFAULT_CONNECTIONS_FILE @pytest.fixture(scope='session', autouse=True) def save_default_connections_file(request): """ Fixture that saves away an existing default connections file and its backup file at the begin of a test session and restores them at the end of the test session. This function is called once per test session (i.e. execution of the pytest command) before the first test is executed. """ # Save the default connections file and its backup file if os.path.isfile(DEFAULT_CONNECTIONS_FILE): os.rename(DEFAULT_CONNECTIONS_FILE, CONNECTIONS_SAVE_FILE) if os.path.isfile(CONNECTIONS_BAK_FILE): os.rename(CONNECTIONS_BAK_FILE, CONNECTIONS_BAK_SAVE_FILE) def teardown(): """ Restore the saved default connections file and its saved backup file. This function is called once per test session (i.e. execution of the pytest command) after the last test has been executed. """ # Restore the saved default connections file if os.path.isfile(DEFAULT_CONNECTIONS_FILE): os.remove(DEFAULT_CONNECTIONS_FILE) if os.path.isfile(CONNECTIONS_SAVE_FILE): os.rename(CONNECTIONS_SAVE_FILE, DEFAULT_CONNECTIONS_FILE) # Restore the saved backup file of the default connections file if os.path.isfile(CONNECTIONS_BAK_FILE): os.remove(CONNECTIONS_BAK_FILE) if os.path.isfile(CONNECTIONS_BAK_SAVE_FILE): os.rename(CONNECTIONS_BAK_SAVE_FILE, CONNECTIONS_BAK_FILE) request.addfinalizer(teardown)
35.961039
79
0.727338
352
2,769
5.448864
0.221591
0.19708
0.194995
0.037539
0.471846
0.407195
0.380083
0.307091
0.232534
0.069864
0
0
0.21524
2,769
76
80
36.434211
0.882651
0.300831
0
0.111111
0
0
0.018569
0.012561
0
0
0
0
0
1
0.083333
false
0
0.111111
0
0.222222
0.027778
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
743efaed013ebb381bd98fe53bed0e263d0f7320
13,143
py
Python
old/eval_scripts/evaluation_functions.py
konatasick/face-of-art
e796747d0ef2df2df863adf53e217ff5c86c816b
[ "MIT" ]
220
2019-09-01T01:52:04.000Z
2022-03-28T12:52:07.000Z
old/eval_scripts/evaluation_functions.py
TrueMatthewKirkham/face-of-art
ffa62a579cc8bc389e2088923736c4947a1fad70
[ "MIT" ]
16
2019-10-24T07:55:11.000Z
2022-02-10T01:28:13.000Z
old/eval_scripts/evaluation_functions.py
TrueMatthewKirkham/face-of-art
ffa62a579cc8bc389e2088923736c4947a1fad70
[ "MIT" ]
33
2019-09-23T15:08:50.000Z
2022-02-08T07:54:52.000Z
import tensorflow as tf from menpofit.visualize import plot_cumulative_error_distribution from menpofit.error import compute_cumulative_error from scipy.integrate import simps from menpo_functions import load_menpo_image_list, load_bb_dictionary from logging_functions import * from data_loading_functions import * from time import time import sys from PyQt5 import QtWidgets qapp=QtWidgets.QApplication(['']) def load_menpo_test_list(img_dir, test_data='full', image_size=256, margin=0.25, bb_type='gt'): mode = 'TEST' bb_dir = os.path.join(img_dir, 'Bounding_Boxes') bb_dictionary = load_bb_dictionary(bb_dir, mode, test_data=test_data) img_menpo_list = load_menpo_image_list( img_dir=img_dir, train_crop_dir=None, img_dir_ns=None, mode=mode, bb_dictionary=bb_dictionary, image_size=image_size, margin=margin, bb_type=bb_type, test_data=test_data, augment_basic=False, augment_texture=False, p_texture=0, augment_geom=False, p_geom=0) return img_menpo_list def evaluate_heatmap_fusion_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25, bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False, debug_data_size=20): t = time() from deep_heatmaps_model_fusion_net import DeepHeatmapsModel import logging logging.getLogger('tensorflow').disabled = True # load test image menpo list test_menpo_img_list = load_menpo_test_list( img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type) if debug: test_menpo_img_list = test_menpo_img_list[:debug_data_size] print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images - debug mode) ***' % debug_data_size) else: print ('\n*** FUSION NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images) ***' % (len(test_menpo_img_list))) # create heatmap model tf.reset_default_graph() model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path, test_data=test_data, menpo_verbose=False) # add placeholders model.add_placeholders() # build model model.build_model() # create loss ops model.create_loss_ops() num_batches = int(1. * len(test_menpo_img_list) / batch_size) if num_batches == 0: batch_size = len(test_menpo_img_list) num_batches = 1 reminder = len(test_menpo_img_list) - num_batches * batch_size num_batches_reminder = num_batches + 1 * (reminder > 0) img_inds = np.arange(len(test_menpo_img_list)) with tf.Session() as session: # load trained parameters saver = tf.train.Saver() saver.restore(session, model_path) print ('\nnum batches: ' + str(num_batches_reminder)) err = [] for j in range(num_batches): print ('batch %d / %d ...' % (j + 1, num_batches_reminder)) batch_inds = img_inds[j * batch_size:(j + 1) * batch_size] batch_images, _, batch_landmarks_gt = load_images_landmarks( test_menpo_img_list, batch_inds=batch_inds, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, scale=scale) batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images}) batch_pred_landmarks = batch_heat_maps_to_landmarks( batch_maps_pred, batch_size=batch_size, image_size=image_size, num_landmarks=num_landmarks) batch_err = session.run( model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks}) err = np.hstack((err, batch_err)) if reminder > 0: print ('batch %d / %d ...' % (j + 2, num_batches_reminder)) reminder_inds = img_inds[-reminder:] batch_images, _, batch_landmarks_gt = load_images_landmarks( test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, scale=scale) batch_maps_pred = session.run(model.pred_hm_f, {model.images: batch_images}) batch_pred_landmarks = batch_heat_maps_to_landmarks( batch_maps_pred, batch_size=reminder, image_size=image_size, num_landmarks=num_landmarks) batch_err = session.run( model.nme_per_image, {model.lms: batch_landmarks_gt, model.pred_lms: batch_pred_landmarks}) err = np.hstack((err, batch_err)) print ('\ndone!') print ('run time: ' + str(time() - t)) return err def evaluate_heatmap_primary_network(model_path, img_path, test_data, batch_size=10, image_size=256, margin=0.25, bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False, debug_data_size=20): t = time() from deep_heatmaps_model_primary_net import DeepHeatmapsModel import logging logging.getLogger('tensorflow').disabled = True # load test image menpo list test_menpo_img_list = load_menpo_test_list( img_path, test_data=test_data, image_size=image_size, margin=margin, bb_type=bb_type) if debug: test_menpo_img_list = test_menpo_img_list[:debug_data_size] print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images - debug mode) ***' % debug_data_size) else: print ('\n*** PRIMARY NETWORK: calculating normalized mean error on: ' + test_data + ' set (%d images) ***' % (len(test_menpo_img_list))) # create heatmap model tf.reset_default_graph() model = DeepHeatmapsModel(mode='TEST', batch_size=batch_size, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, img_path=img_path, test_model_path=model_path, test_data=test_data, menpo_verbose=False) # add placeholders model.add_placeholders() # build model model.build_model() # create loss ops model.create_loss_ops() num_batches = int(1. * len(test_menpo_img_list) / batch_size) if num_batches == 0: batch_size = len(test_menpo_img_list) num_batches = 1 reminder = len(test_menpo_img_list) - num_batches * batch_size num_batches_reminder = num_batches + 1 * (reminder > 0) img_inds = np.arange(len(test_menpo_img_list)) with tf.Session() as session: # load trained parameters saver = tf.train.Saver() saver.restore(session, model_path) print ('\nnum batches: ' + str(num_batches_reminder)) err = [] for j in range(num_batches): print ('batch %d / %d ...' % (j + 1, num_batches_reminder)) batch_inds = img_inds[j * batch_size:(j + 1) * batch_size] batch_images, _, batch_landmarks_gt = load_images_landmarks( test_menpo_img_list, batch_inds=batch_inds, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, scale=scale) batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images}) batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation batch_pred_landmarks = batch_heat_maps_to_landmarks( batch_maps_small_pred, batch_size=batch_size, image_size=image_size, num_landmarks=num_landmarks) batch_err = session.run( model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks}) err = np.hstack((err, batch_err)) if reminder > 0: print ('batch %d / %d ...' % (j + 2, num_batches_reminder)) reminder_inds = img_inds[-reminder:] batch_images, _, batch_landmarks_gt = load_images_landmarks( test_menpo_img_list, batch_inds=reminder_inds, image_size=image_size, c_dim=c_dim, num_landmarks=num_landmarks, scale=scale) batch_maps_small_pred = session.run(model.pred_hm_p, {model.images: batch_images}) batch_maps_small_pred = zoom(batch_maps_small_pred, zoom=[1, 4, 4, 1], order=1) # NN interpolation batch_pred_landmarks = batch_heat_maps_to_landmarks( batch_maps_small_pred, batch_size=reminder, image_size=image_size, num_landmarks=num_landmarks) batch_err = session.run( model.nme_per_image, {model.lms_small: batch_landmarks_gt, model.pred_lms_small: batch_pred_landmarks}) err = np.hstack((err, batch_err)) print ('\ndone!') print ('run time: ' + str(time() - t)) return err def evaluate_heatmap_network(model_path, network_type, img_path, test_data, batch_size=10, image_size=256, margin=0.25, bb_type='gt', c_dim=3, scale=1, num_landmarks=68, debug=False, debug_data_size=20): if network_type.lower() == 'fusion': return evaluate_heatmap_fusion_network( model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size, margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug, debug_data_size=debug_data_size) elif network_type.lower() == 'primary': return evaluate_heatmap_primary_network( model_path=model_path, img_path=img_path, test_data=test_data, batch_size=batch_size, image_size=image_size, margin=margin, bb_type=bb_type, c_dim=c_dim, scale=scale, num_landmarks=num_landmarks, debug=debug, debug_data_size=debug_data_size) else: sys.exit('\n*** Error: please choose a valid network type: Fusion/Primary ***') def AUC(errors, max_error, step_error=0.0001): x_axis = list(np.arange(0., max_error + step_error, step_error)) ced = np.array(compute_cumulative_error(errors, x_axis)) return simps(ced, x=x_axis) / max_error, 1. - ced[-1] def print_nme_statistics( errors, model_path, network_type, test_data, max_error=0.08, log_path='', save_log=True, plot_ced=True, norm='interocular distance'): auc, failures = AUC(errors, max_error=max_error) print ("\n****** NME statistics for " + network_type + " Network ******\n") print ("* model path: " + model_path) print ("* dataset: " + test_data + ' set') print ("\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors))) print ("\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc)) print ("\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%') if plot_ced: plt.figure() plt.yticks(np.linspace(0, 1, 11)) plot_cumulative_error_distribution( list(errors), legend_entries=[network_type], marker_style=['s'], marker_size=7, x_label='Normalised Point-to-Point Error\n('+norm+')\n*' + test_data + ' set*', ) if save_log: with open(os.path.join(log_path, network_type.lower() + "_nme_statistics_on_" + test_data + "_set.txt"), "wb") as f: f.write(b"************************************************") f.write(("\n****** NME statistics for " + str(network_type) + " Network ******\n").encode()) f.write(b"************************************************") f.write(("\n\n* model path: " + str(model_path)).encode()) f.write(("\n\n* dataset: " + str(test_data) + ' set').encode()) f.write(b"\n\n* Normalized mean error (percentage of "+norm+"): %.2f" % (100 * np.mean(errors))) f.write(b"\n\n* AUC @ %.2f: %.2f" % (max_error, 100 * auc)) f.write(("\n\n* failure rate @ %.2f: %.2f" % (max_error, 100 * failures) + '%').encode()) if plot_ced: plt.savefig(os.path.join(log_path, network_type.lower() + '_nme_ced_on_' + test_data + '_set.png'), bbox_inches='tight') plt.close() print ('\nlog path: ' + log_path) def print_ced_compare_methods( method_errors,method_names,test_data,log_path='', save_log=True, norm='interocular distance'): plt.yticks(np.linspace(0, 1, 11)) plot_cumulative_error_distribution( [list(err) for err in list(method_errors)], legend_entries=list(method_names), marker_style=['s'], marker_size=7, x_label='Normalised Point-to-Point Error\n('+norm+')\n*'+test_data+' set*' ) if save_log: plt.savefig(os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png'), bbox_inches='tight') print ('ced plot path: ' + os.path.join(log_path,'nme_ced_on_'+test_data+'_set.png')) plt.close()
43.81
120
0.640645
1,771
13,143
4.405985
0.117448
0.034858
0.034987
0.04101
0.792772
0.783545
0.770857
0.768294
0.762655
0.741125
0
0.012814
0.239976
13,143
300
121
43.81
0.768345
0.020315
0
0.646789
0
0
0.102939
0.007464
0
0
0
0
0
1
0.03211
false
0
0.06422
0
0.123853
0.110092
0
0
0
null
0
0
0
0
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
5
743f8e8a60a48a089d0eae27267518b7437b7876
191
py
Python
src/python_code/gather_ood_experiments.py
ipmach/Thesis2021
91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0
[ "MIT" ]
null
null
null
src/python_code/gather_ood_experiments.py
ipmach/Thesis2021
91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0
[ "MIT" ]
null
null
null
src/python_code/gather_ood_experiments.py
ipmach/Thesis2021
91dbb0eebba64f1fa2c18562e2c9f35f532ef7c0
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from DataBase_Manager.beta_manager import gather_data import sys experiment_path = sys.argv[1] csv_path = sys.argv[2] gather_data(experiment_path, save_file=csv_path)
23.875
53
0.816754
32
191
4.59375
0.625
0.136054
0.14966
0
0
0
0
0
0
0
0
0.017241
0.089005
191
8
54
23.875
0.827586
0.109948
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.4
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
7440b717546ee638774417faa2b4dd5418e3f9b2
792
py
Python
data/spacex.py
Apallapuqu/MaxenceR263
0278ea56ee15d08068ad9cd542ec5d81704010d5
[ "MIT" ]
3
2021-10-14T15:46:24.000Z
2021-10-15T17:29:32.000Z
data/spacex.py
Apallapuqu/MaxenceR263
0278ea56ee15d08068ad9cd542ec5d81704010d5
[ "MIT" ]
1
2021-11-04T14:47:00.000Z
2021-11-04T15:27:16.000Z
data/spacex.py
Apallapuqu/MaxenceR263
0278ea56ee15d08068ad9cd542ec5d81704010d5
[ "MIT" ]
1
2021-11-04T01:17:06.000Z
2021-11-04T01:17:06.000Z
import requests def info_spacex(): url = "https://api.spacexdata.com/v3/info" request = requests.get(url) data = request.json() print( "SpaceX Info ->\n" f"• Founder : {data['founder']}\n" f"• Founded : {data['founded']}\n" f"• Employees : {data['employees']}\n" f"• Vehicles : {data['vehicles']}\n" f"• Launch Sites : {data['launch_sites']}\n" f"• Ceo : {data['ceo']}\n" f"• Cto : {data['cto']}\n" f"• Coo : {data['coo']}\n" f"• Cto Propulsion : {data['cto_propulsion']}\n" f"• Address : {data['headquarters']['address']}\n" f"• City : {data['headquarters']['city']}\n" f"• State : {data['headquarters']['state']}\n\n" f"• Summary : {data['summary']}\n" )
31.68
58
0.506313
109
792
3.770642
0.311927
0.06326
0.094891
0.029197
0
0
0
0
0
0
0
0.001709
0.261364
792
24
59
33
0.678632
0
0
0
0
0
0.630051
0.218434
0
0
0
0
0
1
0.047619
false
0
0.047619
0
0.095238
0.047619
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7440d94d7871edd47482f8141c9c0e81af6169de
16,430
py
Python
codes/tests/qs_test.py
madokast/cctpy
b02c64220ea533a4fc9cad0b882d1be6edadf1c0
[ "MIT" ]
1
2021-12-27T13:20:43.000Z
2021-12-27T13:20:43.000Z
codes/tests/qs_test.py
madokast/cctpy
b02c64220ea533a4fc9cad0b882d1be6edadf1c0
[ "MIT" ]
null
null
null
codes/tests/qs_test.py
madokast/cctpy
b02c64220ea533a4fc9cad0b882d1be6edadf1c0
[ "MIT" ]
null
null
null
import unittest import numpy as np from cctpy.baseutils import Vectors, Equal, Stream from cctpy.constant import M, MM, YI, XI, Protons, ZI, MRAD from cctpy.particle import RunningParticle, ParticleFactory, PhaseSpaceParticle, ParticleRunner from cctpy.qs_hard_edge_magnet import QsHardEdgeMagnet from cctpy.abstract_classes import LocalCoordinateSystem from cctpy.plotuils import Plot2 class QsTest(unittest.TestCase): def test_quad_0(self): """ 测试 qs 四极场 Returns ------- """ length = 0.2 * M aper = 30 * MM g = 10. L = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) qs = QsHardEdgeMagnet(length, g, L, aper, lc) m = qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0)) self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, -0.1))) m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 0)) self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, -0.15))) m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 5 * MM)) self.assertTrue(Equal.equal_vector(m, Vectors.create(-0.05, -3.061616997868383E-18, -0.15))) def test_quad_1(self): """ 测试 qs 四极场 Returns ------- """ length = 0.2 * M aper = 30 * MM g = -45.7 L = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) qs = QsHardEdgeMagnet(length, g, L, aper, lc) m = qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0)) self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, 0.457))) m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 0)) self.assertTrue(Equal.equal_vector(m, Vectors.create(0.0, 0.0, 0.6855))) m = qs.magnetic_field_at(Vectors.create(15 * MM, 0.1, 5 * MM)) self.assertTrue(Equal.equal_vector(m, Vectors.create(0.2285, 1.399158968025851E-17, 0.6855))) def test_second_0(self): length = 0.2 * M aper = 30 * MM g = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 0))).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array([-0.0, -0.0, -0.0, -0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))) self.assertTrue(Equal.equal_vector(my, np.array([-0.0, -0.0, -0.0, -0.0, -0.0, 0.0, 0.0, 0.0, 0.0, 0.0]))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.005, -0.0038888888888888888, -0.002777777777777778, -0.0016666666666666672, -5.555555555555558E-4, 5.555555555555558E-4, 0.001666666666666666, 0.0027777777777777775, 0.0038888888888888888, 0.005]))) def test_second_1(self): length = 0.2 * M aper = 30 * MM g = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, 1 * MM))).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array( [-0.001, -7.777777777777777E-4, -5.555555555555557E-4, -3.3333333333333343E-4, -1.1111111111111116E-4, 1.1111111111111116E-4, 3.3333333333333316E-4, 5.555555555555554E-4, 7.777777777777777E-4, 0.001] ))) self.assertTrue(Equal.equal_vector(my, np.array( [-6.123233995736766E-20, -4.762515330017485E-20, -3.4017966642982043E-20, -2.0410779985789227E-20, -6.80359332859641E-21, 6.80359332859641E-21, 2.041077998578921E-20, 3.4017966642982025E-20, 4.762515330017485E-20, 6.123233995736766E-20] ))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.00495, -0.00385, -0.0027500000000000003, -0.0016500000000000006, -5.500000000000002E-4, 5.500000000000002E-4, 0.0016499999999999991, 0.0027499999999999994, 0.00385, 0.00495]))) def test_second_2(self): length = 0.2 * M aper = 30 * MM g = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(10 * MM, 0.1, -1 * MM))).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array( [0.001, 7.777777777777777E-4, 5.555555555555557E-4, 3.3333333333333343E-4, 1.1111111111111116E-4, -1.1111111111111116E-4, -3.3333333333333316E-4, -5.555555555555554E-4, -7.777777777777777E-4, -0.001] ))) self.assertTrue(Equal.equal_vector(my, np.array( [6.123233995736766E-20, 4.762515330017485E-20, 3.4017966642982043E-20, 2.0410779985789227E-20, 6.80359332859641E-21, -6.80359332859641E-21, -2.041077998578921E-20, -3.4017966642982025E-20, -4.762515330017485E-20, -6.123233995736766E-20] ))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.00495, -0.00385, -0.0027500000000000003, -0.0016500000000000006, -5.500000000000002E-4, 5.500000000000002E-4, 0.0016499999999999991, 0.0027499999999999994, 0.00385, 0.00495] ))) def test_second_3(self): length = 0.2 * M aper = 30 * MM g = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-5 * MM, 0.1, -1 * MM))).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array( [-5.0E-4, -3.8888888888888887E-4, -2.7777777777777783E-4, -1.6666666666666672E-4, -5.555555555555558E-5, 5.555555555555558E-5, 1.6666666666666658E-4, 2.777777777777777E-4, 3.8888888888888887E-4, 5.0E-4] ))) self.assertTrue(Equal.equal_vector(my, np.array( [-3.061616997868383E-20, -2.3812576650087424E-20, -1.7008983321491022E-20, -1.0205389992894614E-20, -3.401796664298205E-21, 3.401796664298205E-21, 1.0205389992894605E-20, 1.7008983321491013E-20, 2.3812576650087424E-20, 3.061616997868383E-20] ))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.0012000000000000001, -9.333333333333333E-4, -6.666666666666668E-4, -4.0000000000000013E-4, -1.3333333333333337E-4, 1.3333333333333337E-4, 3.999999999999998E-4, 6.666666666666665E-4, 9.333333333333333E-4, 0.0012000000000000001] ))) def test_second_4(self): length = 0.2 * M aper = 30 * MM g = 0 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) mx = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map(lambda k: QsHardEdgeMagnet(length, g, k, aper, lc)).map( lambda qs: qs.magnetic_field_at(Vectors.create(-8 * MM, 0.1, 1 * MM))).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array( [7.999999999999999E-4, 6.222222222222221E-4, 4.444444444444444E-4, 2.6666666666666673E-4, 8.88888888888889E-5, -8.88888888888889E-5, -2.666666666666665E-4, -4.444444444444443E-4, -6.222222222222221E-4, -7.999999999999999E-4] ))) self.assertTrue(Equal.equal_vector(my, np.array( [4.8985871965894125E-20, 3.8100122640139875E-20, 2.7214373314385626E-20, 1.632862398863138E-20, 5.442874662877126E-21, -5.442874662877126E-21, -1.6328623988631368E-20, -2.721437331438562E-20, -3.8100122640139875E-20, -4.8985871965894125E-20] ))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.00315, -0.00245, -0.00175, -0.0010500000000000004, -3.500000000000001E-4, 3.500000000000001E-4, 0.0010499999999999995, 0.0017499999999999996, 0.00245, 0.00315] ))) def test_quad_and_second_0(self): length = 0.2 * M aper = 30 * MM lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) p = Vectors.create(-8 * MM, 0.1, 1 * MM) mx = Stream.linspace(-100, 100, 10).map( lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map( lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[0]).to_vector() my = Stream.linspace(-100, 100, 10).map( lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map( lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[1]).to_vector() mz = Stream.linspace(-100, 100, 10).map( lambda k: QsHardEdgeMagnet(length, np.sin(k / 180) * 20, (1.1 ** (k / 2)) * 2, aper, lc)).map( lambda qs: qs.magnetic_field_at(p)).map(lambda m: m[2]).to_vector() self.assertTrue(Equal.equal_vector(mx, np.array( [0.01054817141861684, 0.008375158765307863, 0.006074168017454833, 0.0036793034142077055, 0.0012243616386317005, -0.0012609533747681091, -0.003760913727964488, -0.006301201552951284, -0.009026933421046367, -0.012426561361512444] ))) self.assertTrue(Equal.equal_vector(my, np.array( [6.458892182333354E-19, 5.128305687142586E-19, 3.7193552100296426E-19, 2.2529235746506973E-19, 7.497052808745602E-20, -7.721112571419089E-20, -2.302895479410525E-19, -3.8583731563020608E-19, -5.52740256010035E-19, -7.609074297892195E-19] ))) self.assertTrue(Equal.equal_vector(mz, np.array( [-0.08438592505476789, -0.06700286672870447, -0.04859794794067867, -0.029447702336309195, -0.009833171528290597, 0.009977251489327706, 0.029769042946726516, 0.04949189248669594, 0.06956922943567483, 0.09178208545491932] ))) def test_track_y(self): """ 六级 qs track 对比 y 方向 Returns ------- """ plane = PhaseSpaceParticle.YYP_PLANE delta = 0. number = 6 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) qs = QsHardEdgeMagnet(0.2, 0, 10000 * 2, 300000 * MM, lc) rp = ParticleFactory.create_proton( Vectors.create(0, -0.5, 0), YI ) # print(f"rp={rp}") pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane( plane, 3.5 * MM, 7.2 * MM, delta, number ) # print(*pps, sep='\n', end='\n\n') pp = ParticleFactory.create_from_phase_space_particles( rp, rp.get_natural_coordinate_system(y_direction=ZI), pps ) # print(*pp, sep='\n\n') ParticleRunner.run_ps_only_cpu0(pp, qs, 1.2) ParticleRunner.run_only(rp, qs, 1.2) # print(f"rp={rp}") # print(*pp, sep='\n\n') pps_end = PhaseSpaceParticle.create_from_running_particles(rp, rp.get_natural_coordinate_system(), pp) li = PhaseSpaceParticle.phase_space_particles_project_to_plane(pps_end, plane) li = np.array( [[x / MM, xp / MRAD] for x, xp in li] ) x = li[:, 0] y = li[:, 1] x0 = np.array( [4.571009592873671, 13.005311328487931, 4.473631539146663, -4.5763158484424205, -13.005311328486815, -4.473631539149022] ) y0 = np.array( [1.9535672206449075, 13.092945863265955, 5.607514554681223, -1.9596240807758292, -13.092945863264303, -5.6075145546827025] ) self.assertTrue( (np.abs(x.flatten() - x0.flatten()) < 0.05).all() ) self.assertTrue( (np.abs(y.flatten() - y0.flatten()) < 0.05).all() ) # Plot2.plot2d([(li, 'r.')]) # # Plot2.plot2d([(np.column_stack((x0, y0)), 'k.')]) # # Plot2.show() def test_track_x(self): """ 六级 QS track 对比 x 方向 Returns ------- """ plane = PhaseSpaceParticle.XXP_PLANE delta = 0. number = 6 lc = LocalCoordinateSystem(main_direction=YI, second_direction=-XI) qs = QsHardEdgeMagnet(0.2, 0, 10000 * 2, 300000 * MM, lc) rp = ParticleFactory.create_proton( Vectors.create(0, -0.5, 0), YI ) # print(f"rp={rp}") pps = PhaseSpaceParticle.phase_space_particles_along_positive_ellipse_in_plane( plane, 3.5 * MM, 7.2 * MM, delta, number ) # print(*pps, sep='\n', end='\n\n') pp = ParticleFactory.create_from_phase_space_particles( rp, rp.get_natural_coordinate_system(y_direction=ZI), pps ) # print(*pp, sep='\n\n') ParticleRunner.run_ps_only_cpu0(pp, qs, 1.2) ParticleRunner.run_only(rp, qs, 1.2) # print(f"rp={rp}") # print(*pp, sep='\n\n') pps_end = PhaseSpaceParticle.create_from_running_particles(rp, rp.get_natural_coordinate_system(), pp) li = PhaseSpaceParticle.phase_space_particles_project_to_plane(pps_end, plane) li = np.array( [[x / MM, xp / MRAD] for x, xp in li] ) x = li[:, 0] y = li[:, 1] x0 = np.array( [-1.6363082716640025, -2.964662344582841, 3.848704911140664, -10.799631136000919, -29.05411958099093, -5.103782688758285] ) y0 = np.array( [-8.439377477318738, -14.025193841237206, 4.508357473356099, -12.381715740031598, -40.36236303026269, -6.717141236931342] ) self.assertTrue( (np.abs(x.flatten() - x0.flatten()) < 0.05).all() ) self.assertTrue( (np.abs(y.flatten() - y0.flatten()) < 0.05).all() ) # Plot2.plot2d([(li, 'r.')]) # # Plot2.plot2d([(np.column_stack((x0, y0)), 'k.')]) # # Plot2.show() if __name__ == '__main__': unittest.main()
43.12336
116
0.609251
2,174
16,430
4.50598
0.126955
0.011433
0.0147
0.01715
0.715394
0.712332
0.712332
0.712332
0.704369
0.687015
0
0.288805
0.240475
16,430
380
117
43.236842
0.496194
0.032623
0
0.537255
0
0
0.000509
0
0
0
0
0
0.109804
1
0.039216
false
0
0.031373
0
0.07451
0
0
0
0
null
0
0
0
0
1
1
1
1
1
0
1
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
744107433714491e0a8e46ba4af32bda59ff3f79
8,238
py
Python
src/nr/util/process/root.py
NiklasRosenstein/python-nr.util
087f2410d38006c1005a5fb330c47a56bcdb2279
[ "MIT" ]
null
null
null
src/nr/util/process/root.py
NiklasRosenstein/python-nr.util
087f2410d38006c1005a5fb330c47a56bcdb2279
[ "MIT" ]
3
2022-02-16T13:17:28.000Z
2022-03-14T15:28:41.000Z
src/nr/util/process/root.py
NiklasRosenstein/python-nr.util
087f2410d38006c1005a5fb330c47a56bcdb2279
[ "MIT" ]
null
null
null
from __future__ import annotations import os import sys if __name__ == '__main__': # Ensure that the parent directory is not in sys.path. norm = lambda x: os.path.normpath(os.path.abspath(x)) dirname = os.path.dirname(norm(__file__)) sys.path[:] = [x for x in sys.path if norm(x) != dirname] del norm, dirname import ctypes import io import json import re import shlex import shutil import subprocess import tempfile import traceback import typing as t if os.name == 'nt': import ctypes.wintypes as wintypes windll = ctypes.windll # type: ignore WinError = ctypes.WinError # type: ignore get_last_error = ctypes.get_last_error # type: ignore class winapi: _WaitForSingleObject = windll.kernel32.WaitForSingleObject _WaitForSingleObject.restype = wintypes.DWORD _WaitForSingleObject.argtypes = [wintypes.HANDLE, wintypes.DWORD] @staticmethod def WaitForSingleObject(handle, ms=0): return winapi._WaitForSingleObject(handle, ms) _GetExitCodeProcess = windll.kernel32.GetExitCodeProcess _GetExitCodeProcess.restype = wintypes.BOOL _GetExitCodeProcess.argtypes = [wintypes.HANDLE, ctypes.POINTER(wintypes.DWORD)] @staticmethod def GetExitCodeProcess(handle): result = wintypes.DWORD() success = winapi._GetExitCodeProcess(handle, ctypes.byref(result)) if not success: raise WinError(get_last_error()) return result.value _MessageBox = windll.user32.MessageBoxW _MessageBox.restype = ctypes.c_int _MessageBox.argtypes = [wintypes.HWND, wintypes.LPWSTR, wintypes.LPWSTR, wintypes.UINT] @staticmethod def MessageBox(hwnd, text, caption, type): return winapi._MessageBox(hwnd, text, caption, type) class _SHELLEXECUTEINFO(ctypes.Structure): _fields_ = [ ('cbSize', wintypes.DWORD), ('fMask', wintypes.ULONG), ('hwnd', wintypes.HWND), ('lpVerb', wintypes.LPCSTR), ('lpFile', wintypes.LPCSTR), ('lpParameters', wintypes.LPCSTR), ('lpDirectory', wintypes.LPCSTR), ('nShow', ctypes.c_int), ('hInstApp', wintypes.HINSTANCE), ('lpIDList', wintypes.LPVOID), ('lpClass', wintypes.LPCSTR), ('hkeyClass', wintypes.HKEY), ('dwHotKey', wintypes.DWORD), ('DUMMYUNIONNAME', wintypes.HANDLE), ('hProcess', wintypes.HANDLE), ] _ShellExecuteEx = windll.shell32.ShellExecuteEx _ShellExecuteEx.restype = wintypes.BOOL _ShellExecuteEx.argtypes = [ctypes.POINTER(_SHELLEXECUTEINFO)] SW_HIDE = 0 SW_MAXIMIMIZE = 3 SW_MINIMIZE = 6 SW_RESTORE = 9 SW_SHOW = 5 SW_SHOWDEFAULT = 10 SW_SHOWMAXIMIZED = 3 SW_SHOWMINIMIZED = 2 SW_SHOWMINNOACTIVE = 7 SW_SHOWNA = 8 SW_SHOWNOACTIVE = 4 SW_SHOWNORMAL = 1 @staticmethod def ShellExecuteEx(hwnd=None, verb='', file='', parameters=None, directory=None, show=SW_SHOW, mask=0): # TODO: More parameters data = winapi._SHELLEXECUTEINFO() data.cbSize = ctypes.sizeof(data) data.fMask = mask data.hwnd = hwnd data.lpVerb = verb.encode() data.lpFile = file.encode() data.lpParameters = parameters.encode() data.lpDirectory = directory.encode() data.nShow = show data.hInstApp = None data.lpIDList = None data.lpClass = None data.hkeyClass = None data.dwHotKey = 0 data.DUMMYUNIONNAME = None data.hProcess = None result = winapi._ShellExecuteEx(ctypes.byref(data)) if not result: raise WinError(get_last_error()) return {'hInstApp': data.hInstApp, 'hProcess': data.hProcess} def alert(*msg: str) -> None: # TODO (@NiklasRosenstein): Support GUI alerts for other systems. message = ' '.join(map(str, msg)) print(message, file=sys.stderr) sys.stderr.flush() if os.name == 'nt': winapi.MessageBox(None, message, "Python", 0) def quote(s: str) -> str: if os.name == 'nt' and os.sep == '\\': s = s.replace('"', '\\"') if re.search(r'\s', s) or any(c in s for c in '<>'): s = '"' + s + '"' else: s = shlex.quote(s) return s def is_root() -> bool: if os.name == 'nt': try: return bool(windll.shell32.IsUserAnAdmin()) except: traceback.print_exc() print("ctypes.windll.shell32.IsUserAnAdmin() failed -- " "assuming not an admin.", file=sys.stderr) sys.stderr.flush() return False elif os.name == 'posix': return os.getuid() == 0 else: raise RuntimeError('Unsupported os: {!r}'.format(os.name)) def elevate(command: str | list[str], cwd: str | None = None, environ: t.Mapping[str, str] | None = None) -> None: """ Runs a command as an admin in the specified *cwd* and *environ*. On Windows, this creates a temporary directory where this information is stored temporarily so that the new process can launch the proper subprocess. """ if isinstance(command, str): command = shlex.split(command) if os.name == 'nt': return _elevate_windows(command, cwd, environ) elif os.name == 'posix': command = ['sudo', '-E'] + list(command) sys.exit(subprocess.call(command)) else: raise RuntimeError('Unsupported os: {!r}'.format(os.name)) def _elevate_windows(command, cwd, environ): assert os.name == 'nt' datadir = tempfile.mkdtemp() try: # TODO: Maybe we could also use named pipes and transfer them # via the processdata.json to the elevated process. # This file will receive all the process information. datafile = os.path.join(datadir, 'processdata.json') data = { 'command': command, 'cwd': cwd or os.getcwd(), 'environ': environ or os.environ.copy(), 'outfile': os.path.join(datadir, 'out.bin') } with open(datafile, 'w') as fp: json.dump(data, fp) # Ensure the output file exists. open(data['outfile'], 'w').close() # Create the windows elevated process that calls this file. This # file will then know what to do with the information from the # process data directory. hProc = winapi.ShellExecuteEx( file=sys.executable, verb='runas', parameters=' '.join(map(quote, [os.path.abspath(__file__), '--windows-process-data', datadir])), directory=datadir, mask=64, show=winapi.SW_HIDE )['hProcess'] # Read the output from the process and write it to our stdout. with open(data['outfile'], 'rb+', 0) as outfile: while True: hr = winapi.WaitForSingleObject(hProc, 40) while True: line = outfile.readline() if not line: break sys.stdout.buffer.write(line) if hr != 0x102: break return winapi.GetExitCodeProcess(hProc) finally: try: shutil.rmtree(datadir) except: print("ERROR: Unable to remove data directory of elevated process.") print("ERROR: Directory at \"{}\"".format(datadir)) traceback.print_exc() def _elevate_windows_elevated(datadir): assert os.name == 'nt' datafile = os.path.join(datadir, 'processdata.json') with open(datafile, 'r') as pdata_fp: data = json.load(pdata_fp) try: with open(data['outfile'], 'wb', 0) as fp: sys.stderr = sys.stdout = io.TextIOWrapper(fp) os.environ.update(data['environ']) return subprocess.call(data['command'], cwd=data['cwd'], stdout=fp, stderr=fp) except: alert(traceback.format_exc()) sys.exit(1) def main(argv=None, prog=None): import argparse parser = argparse.ArgumentParser(prog=prog) parser.add_argument('--windows-process-data', help='The path to a Windows process data directory. This is used to ' 'provide data for the elevated process since no environment variables ' 'can be via ShellExecuteEx().') args, unknown = parser.parse_known_args(argv) if args.windows_process_data: if not is_root(): alert("--windows-process-data can only be used in an elevated process.") sys.exit(1) sys.exit(_elevate_windows_elevated(args.windows_process_data)) elif unknown: elevate(unknown) sys.exit() else: parser.print_usage() _entry_point = lambda: sys.exit(main()) if __name__ == '__main__': _entry_point()
30.511111
119
0.657563
1,011
8,238
5.257171
0.295747
0.012418
0.010536
0.009407
0.079586
0.058325
0.0365
0.021449
0.021449
0.021449
0
0.006535
0.219835
8,238
269
120
30.624535
0.820445
0.097232
0
0.183962
0
0
0.104872
0.013902
0
0
0.000675
0.007435
0.009434
1
0.051887
false
0
0.070755
0.009434
0.259434
0.033019
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74414156113f70aed599e59e489aec7e8aaa778e
43,200
py
Python
sdh/metrics/org/api.py
SmartDeveloperHub/sdh-org-metrics
78a50501a04ce7c9d8bd55d4688d66c9f6e36766
[ "Apache-2.0" ]
null
null
null
sdh/metrics/org/api.py
SmartDeveloperHub/sdh-org-metrics
78a50501a04ce7c9d8bd55d4688d66c9f6e36766
[ "Apache-2.0" ]
null
null
null
sdh/metrics/org/api.py
SmartDeveloperHub/sdh-org-metrics
78a50501a04ce7c9d8bd55d4688d66c9f6e36766
[ "Apache-2.0" ]
null
null
null
""" #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=# This file is part of the Smart Developer Hub Project: http://www.smartdeveloperhub.org Center for Open Middleware http://www.centeropenmiddleware.com/ #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=# Copyright (C) 2015 Center for Open Middleware. #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=# """ from sdh.metrics.org import app, st as store from sdh.metrics.server import ORG, SCM, CI, APIError import calendar from datetime import datetime __author__ = 'Alejandro F. Carrera' def get_average_list(l): return reduce(lambda x, y: x + y, l) / len(l) def get_correct_kwargs(kwargs): args = { 'begin': 0 if kwargs.get('begin') is None else kwargs.get('begin'), 'end': calendar.timegm(datetime.now().timetuple()) if kwargs.get('end') is None else kwargs.get('end'), 'max': 0 if kwargs.get('max') is None else kwargs.get('max') } if args['max'] == 0: args['step'] = 86400 else: args['step'] = (args.get('end') - args.get('begin')) / args.get('max') return args def detect_overlap_date(a_begin, a_end, b_begin, b_end): return ( (int(a_begin) <= int(b_begin)) and (int(a_end) >= int(b_end)) # contains ) or ( (int(a_begin) >= int(b_begin)) and (int(a_end) <= int(b_end)) # contains ) or ( (int(a_begin) <= int(b_begin)) and (int(b_begin) <= int(a_end)) # shift right ) or ( (int(a_begin) <= int(b_end)) and (int(b_end) <= int(a_end)) # shift left ) def detect_project_repositories_overlap(uri, args): temp_frame = store.get_project_temporal_frame(uri) return detect_overlap_date( args.get('begin'), args.get('end'), temp_frame.get('first_commit'), temp_frame.get('last_commit') ) def get_external_position_metric(uid, endpoint, position, aggregate, args, flag): try: pr = get_position_products(uid, args, position, flag) pr_res = [] if args['begin'] == 0: args['begin'] = None tmp_arg = args if flag: if aggregate == 'sum': tmp_frame = store.get_specific_products_temporal_frame(pr) tmp_arg['begin'] = tmp_frame.get('first_commit') tmp_arg['end'] = tmp_frame.get('last_commit') pr_res = map( lambda x: app.request_metric(endpoint, prid=x.get('id'), **tmp_arg), pr ) else: for k in pr: pr_temp_frame = store.get_product_temporal_frame(k.get('uri')) tmp_arg['begin'] = pr_temp_frame.get('first_commit') tmp_arg['end'] = pr_temp_frame.get('last_commit') pr_res.append(app.request_metric(endpoint, prid=k.get('id'), **tmp_arg)) else: pr_res = map(lambda k: app.request_metric(endpoint, prid=k.get('id'), **tmp_arg), pr) if len(pr_res): context = pr_res[0][0] else: context = args v = zip(*map(lambda x: x[1], pr_res)) if aggregate == 'avg': res = [get_average_list(x) for x in v] else: res = [sum(x) for x in v] return context, res except (EnvironmentError, AttributeError) as e: raise APIError(e.message) return args, [] def get_position_repositories(uid, args, position, flag_total, only_uris): positions_id = store.get_all_members_id(position) if uid not in positions_id: return [] else: projects = store.get_all_member_projects(positions_id[uid]) res_prj = set() res = [] for x in projects: repos = store.get_all_project_repositories(x) if not flag_total: for k in repos: rep_info = store.db.hgetall(k) if detect_overlap_date( args.get('begin'), args.get('end'), rep_info.get('first_commit'), rep_info.get('last_commit') ): res_prj.add(k) if only_uris: return res_prj else: [res.append({ 'id': store.db.hgetall(x).get('id'), 'uri': x }) for x in res_prj] return res def get_position_projects(uid, args, position, flag_total, only_uris): positions_id = store.get_all_members_id(position) if uid not in positions_id: return [] else: projects = store.get_all_member_projects(positions_id[uid]) if not flag_total: res_prj = set() for x in projects: if detect_project_repositories_overlap(x, args): res_prj.add(x) projects = list(res_prj) res = [] if only_uris: return projects else: [res.append({ 'id': store.db.get(x), 'uri': x }) for x in projects] return res def get_position_products(uid, args, position, flag_total): pr = get_position_projects(uid, args, position, flag_total, False) pro = set() res = [] for x in pr: pro = pro.union(set(store.get_all_project_products(x.get('uri')))) [res.append({ 'id': store.db.get(x), 'uri': x }) for x in pro] return res def get_position_position(uid, args, fil, position, flag_total): pr = set(get_position_projects(uid, args, fil, flag_total, True)) members = store.get_all_members(position) members_dir = set() res = [] for x in members: if len(pr.intersection(set(store.get_all_member_projects(x)))) > 0: members_dir.add(x) [res.append({ 'id': store.db.hgetall(x).get("id"), 'uri': x }) for x in members_dir] return res def get_director_position(uid, args, position, flag_total): return get_position_position(uid, args, 'directors', position, flag_total) def get_pmanager_position(uid, args, position, flag_total): return get_position_position(uid, args, 'productmanagers', position, flag_total) def get_project_roles(pjid, args, role, flag_total): projects_id = store.get_all_projects_id() if pjid not in projects_id: return [] else: if not flag_total and not detect_project_repositories_overlap(projects_id[pjid], args): return [] if role == "softwaredeveloper": tmp_arg = args if not flag_total: pr_temp_frame = store.get_project_temporal_frame(projects_id[pjid]) tmp_arg['begin'] = pr_temp_frame.get('first_commit') tmp_arg['end'] = pr_temp_frame.get('last_commit') co, res = app.request_view('project-developers', pjid=pjid, **tmp_arg) return res else: res = set() users_id = store.get_all_members(role) for x in users_id: pr_res = store.get_all_member_projects(x) if projects_id[pjid] in pr_res: res.add(x) res_set = [] [res_set.append({ 'id': store.db.hgetall(x).get("id"), 'uri': x }) for x in res] return res_set def get_director_roles(uid, args, role, flag_total): return get_position_position(uid, args, 'directors', role, flag_total) def get_pmanager_roles(uid, args, role, flag_total): return get_position_position(uid, args, 'productmanagers', role, flag_total) def helper_get_director_pmanagers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_position(uid, args, 'productmanagers', flag_total) def helper_get_director_architects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_position(uid, args, 'architects', flag_total) def helper_get_pmanager_architects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_pmanager_position(uid, args, 'architects', flag_total) def helper_get_position_developers(uid, position, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) try: res = set() pr = get_position_products(uid, args, position, flag_total) devs = map(lambda k: app.request_view('product-developers', prid=k.get('id'), **kwargs), pr) [[res.add(j.get('uri')) for j in x] for x in map(lambda x: x[1], devs)] res_devs = [] [res_devs.append({ "id": store.db.hgetall(x).get("id"), "uri": x }) for x in res] return args, res_devs except (EnvironmentError, AttributeError) as e: raise APIError(e.message) return args, [] @app.view('/product-projects', target=ORG.Project, parameters=[ORG.Product], id='product-projects', title='Projects of Product') def get_product_projects(prid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) products_id = store.get_all_products_id() if prid not in products_id: return args, [] else: projects = store.get_all_product_projects(products_id[prid]) if not flag_total: res_prj = set() for x in projects: if detect_project_repositories_overlap(x, args): res_prj.add(x) projects = list(res_prj) res = [] [res.append({ 'id': store.db.get(x), 'uri': x }) for x in projects] return args, res @app.view('/project-repositories', target=SCM.Repository, parameters=[ORG.Project], id='project-repositories', title='Repositories of Project') def get_project_repositories(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) projects_id = store.get_all_projects_id() if pjid not in projects_id: return args, [] else: repos = store.get_all_project_repositories(projects_id[pjid]) if not flag_total: res_prj = set() for k in repos: rep_info = store.db.hgetall(k) if detect_overlap_date( args.get('begin'), args.get('end'), rep_info.get('first_commit'), rep_info.get('last_commit') ): res_prj.add(k) repos = res_prj res = [] [res.append({ 'id': store.db.hgetall(x).get('id'), 'uri': x }) for x in repos] return args, res @app.metric('/total-project-stakeholders', parameters=[ORG.Project], id='project-stakeholders', title='Stakeholders of Project') def get_total_project_stakeholders(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_project_roles(pjid, args, 'stakeholder', flag_total))] @app.view('/project-stakeholders', target=ORG.Person, parameters=[ORG.Project], id='project-stakeholders', title='Stakeholders of Project') def get_project_stakeholders(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_project_roles(pjid, args, 'stakeholder', flag_total) @app.metric('/total-project-swarchitects', parameters=[ORG.Project], id='project-swarchitects', title='Software Architects of Project') def get_total_project_swarchitects(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_project_roles(pjid, args, 'softwarearchitect', flag_total))] @app.view('/project-swarchitects', target=ORG.Person, parameters=[ORG.Project], id='project-swarchitects', title='Software Architects of Project') def get_project_swarchitects(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_project_roles(pjid, args, 'softwarearchitect', flag_total) @app.metric('/total-project-pjmanagers', parameters=[ORG.Project], id='project-pjmanagers', title='Project Managers of Project') def get_total_project_pjmanagers(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_project_roles(pjid, args, 'projectmanager', flag_total))] @app.view('/project-pjmanagers', target=ORG.Person, parameters=[ORG.Project], id='project-pjmanagers', title='Project Managers of Project') def get_project_pjmanagers(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_project_roles(pjid, args, 'projectmanager', flag_total) @app.metric('/total-project-swdevelopers', parameters=[ORG.Project], id='project-swdevelopers', title='Software Developers of Project') def get_total_project_swdevelopers(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_project_roles(pjid, args, 'softwaredeveloper', flag_total))] @app.view('/project-swdevelopers', target=ORG.Person, parameters=[ORG.Project], id='project-swdevelopers', title='Software Developers of Project') def get_project_swdevelopers(pjid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_project_roles(pjid, args, 'softwaredeveloper', flag_total) @app.metric('/total-director-repositories', parameters=[ORG.Person], id='director-repositories', title='Repositories of Director') def get_total_director_repositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_repositories(uid, args, 'directors', flag_total, False))] @app.view('/director-repositories', target=SCM.Repository, parameters=[ORG.Person], id='director-repositories', title='Repositories of Director') def get_director_repositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_repositories(uid, args, 'directors', flag_total, False) @app.metric('/total-director-projects', parameters=[ORG.Person], id='director-projects', title='Projects of Director') def get_total_director_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_projects(uid, args, 'directors', flag_total, False))] @app.view('/director-projects', target=ORG.Project, parameters=[ORG.Person], id='director-projects', title='Projects of Director') def get_director_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_projects(uid, args, 'directors', flag_total, False) @app.metric('/total-architect-projects', parameters=[ORG.Person], id='architect-projects', title='Projects of Architect') def get_total_architects_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_projects(uid, args, 'architects', flag_total, False))] @app.view('/architect-projects', target=ORG.Project, parameters=[ORG.Person], id='architect-projects', title='Projects of Architect') def get_architect_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_projects(uid, args, 'architects', flag_total, False) @app.metric('/total-pmanager-projects', parameters=[ORG.Person], id='pmanager-projects', title='Projects of Product Manager') def get_total_manager_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_projects(uid, args, 'productmanagers', flag_total, False))] @app.view('/pmanager-projects', target=ORG.Project, parameters=[ORG.Person], id='pmanager-projects', title='Projects of Product Manager') def get_manager_projects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_projects(uid, args, 'productmanagers', flag_total, False) @app.metric('/total-director-products', parameters=[ORG.Person], id='director-products', title='Products of Director') def get_total_director_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_products(uid, args, 'directors', flag_total))] @app.view('/director-products', target=ORG.Product, parameters=[ORG.Person], id='director-products', title='Products of Director') def get_director_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_products(uid, args, 'directors', flag_total) @app.metric('/total-architect-products', parameters=[ORG.Person], id='architects-products', title='Products of Architect') def get_total_architect_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_products(uid, args, 'architects', flag_total))] @app.view('/architect-products', target=ORG.Product, parameters=[ORG.Person], id='architects-products', title='Products of Architect') def get_architect_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_products(uid, args, 'architects', flag_total) @app.metric('/total-pmanager-repositories', parameters=[ORG.Person], id='pmanager-repositories', title='Repositories of Product Manager') def get_total_pmanager_repositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_repositories(uid, args, 'productmanagers', flag_total, False))] @app.view('/pmanager-repositories', target=SCM.Repository, parameters=[ORG.Person], id='pmanager-repositories', title='Repositories of Product Manager') def get_pmanager_repositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_repositories(uid, args, 'productmanagers', flag_total, False) @app.metric('/total-pmanager-products', parameters=[ORG.Person], id='pmanager-products', title='Products of Product Manager') def get_total_manager_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_position_products(uid, args, 'productmanagers', flag_total))] @app.view('/pmanager-products', target=ORG.Product, parameters=[ORG.Person], id='pmanager-products', title='Products of Product Manager') def get_manager_products(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_position_products(uid, args, 'productmanagers', flag_total) @app.metric('/total-director-productmanagers', parameters=[ORG.Person], id='director-productmanagers', title='Product Managers of Director') def get_total_director_pmanagers(uid, **kwargs): co, res = helper_get_director_pmanagers(uid, **kwargs) return co, [len(res)] @app.view('/director-productmanagers', target=ORG.Person, parameters=[ORG.Person], id='director-productmanagers', title='Product Managers of Director') def get_director_pmanagers(uid, **kwargs): return helper_get_director_pmanagers(uid, **kwargs) @app.metric('/total-director-architects', parameters=[ORG.Person], id='director-architects', title='Architects of Director') def get_total_director_architects(uid, **kwargs): co, res = helper_get_director_architects(uid, **kwargs) return co, [len(res)] @app.view('/director-architects', target=ORG.Person, parameters=[ORG.Person], id='director-architects', title='Architects of Director') def get_director_architects(uid, **kwargs): return helper_get_director_architects(uid, **kwargs) @app.metric('/total-director-developers', parameters=[ORG.Person], id='director-developers', title='Developers of Director') def get_total_director_developers(uid, **kwargs): co, res = helper_get_position_developers(uid, 'directors', **kwargs) return co, [len(res)] @app.view('/director-developers', target=ORG.Person, parameters=[ORG.Person], id='director-developers', title='Developers of Director') def get_director_developers(uid, **kwargs): return helper_get_position_developers(uid, 'directors', **kwargs) @app.metric('/total-director-stakeholders', parameters=[ORG.Person], id='director-stakeholders', title='Stakeholders of Director') def get_total_director_stakeholders(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_director_roles(uid, args, 'stakeholder', flag_total))] @app.view('/director-stakeholders', target=ORG.Person, parameters=[ORG.Person], id='director-stakeholders', title='Stakeholders of Director') def get_director_stakeholders(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_roles(uid, args, 'stakeholder', flag_total) @app.metric('/total-director-swarchitects', parameters=[ORG.Person], id='director-swarchitects', title='Software Architects of Director') def get_total_director_swarchitects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_director_roles(uid, args, 'softwarearchitect', flag_total))] @app.view('/director-swarchitects', target=ORG.Person, parameters=[ORG.Person], id='director-swarchitects', title='Software Architects of Director') def get_director_swarchitects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_roles(uid, args, 'softwarearchitect', flag_total) @app.metric('/total-director-swdevelopers', parameters=[ORG.Person], id='director-swdevelopers', title='Software Developers of Director') def get_total_director_swdevelopers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_director_roles(uid, args, 'softwaredeveloper', flag_total))] @app.view('/director-swdevelopers', target=ORG.Person, parameters=[ORG.Person], id='director-swdevelopers', title='Software Developers of Director') def get_director_swdevelopers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_roles(uid, args, 'softwaredeveloper', flag_total) @app.metric('/total-director-pjmanagers', parameters=[ORG.Person], id='director-pjmanagers', title='Project Managers of Director') def get_total_director_pjmanagers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_director_roles(uid, args, 'projectmanager', flag_total))] @app.view('/director-pjmanagers', target=ORG.Person, parameters=[ORG.Person], id='director-pjmanagers', title='Project Managers of Director') def get_director_pjmanagers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_director_roles(uid, args, 'projectmanager', flag_total) @app.metric('/total-director-members', parameters=[ORG.Person], id='director-members', title='Members below Director') def get_total_director_members(uid, **kwargs): res = {} co, pm = helper_get_director_pmanagers(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in pm] co, ar = helper_get_director_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'directors', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] return co, [len(res.keys())] @app.view('/director-members', target=ORG.Person, parameters=[ORG.Person], id='director-members', title='Members below Director') def get_director_members(uid, **kwargs): res = {} co, pm = helper_get_director_pmanagers(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in pm] co, ar = helper_get_director_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'directors', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = [] [res_mem.append({ "id": x, "uri": res[x] }) for x in res.keys()] return co, res_mem @app.metric('/director-productmembers', aggr='avg', parameters=[ORG.Person], id='director-productmembers', title='Product Members AVG of Director') def get_avg_director_productmembers(uid, **kwargs): res = {} co, pm = helper_get_director_pmanagers(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in pm] co, ar = helper_get_director_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'directors', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = len(res.keys()) flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_pr = len(get_position_products(uid, args, 'directors', flag_total)) if res_pr == 0: return co, [0] return co, [float(res_mem) / float(res_pr)] @app.metric('/director-productrepositories', aggr='avg', parameters=[ORG.Person], id='director-productrepositories', title='Product Repositories AVG of Director') def get_avg_director_productrepositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_rep = len(get_position_repositories(uid, args, 'directors', flag_total, True)) res_pr = len(get_position_products(uid, args, 'directors', flag_total)) if res_pr == 0: return args, [0] return args, [float(res_rep) / float(res_pr)] @app.metric('/director-projectmembers', aggr='avg', parameters=[ORG.Person], id='director-projectmembers', title='Project Members AVG of Director') def get_avg_director_projectmembers(uid, **kwargs): res = {} co, pm = helper_get_director_pmanagers(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in pm] co, ar = helper_get_director_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'directors', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = len(res.keys()) flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_pr = len(get_position_projects(uid, args, 'directors', flag_total, True)) if res_pr == 0: return co, [0] return co, [float(res_mem) / float(res_pr)] @app.metric('/director-projectrepositories', aggr='avg', parameters=[ORG.Person], id='director-projectrepositories', title='Project Repositories AVG of Director') def get_avg_director_projectrepositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_rep = len(get_position_repositories(uid, args, 'directors', flag_total, True)) res_pr = len(get_position_projects(uid, args, 'directors', flag_total, True)) if res_pr == 0: return args, [0] return args, [float(res_rep) / float(res_pr)] @app.metric('/director-activity', parameters=[ORG.Person], id='director-activity', title='Activity of Director') def get_director_activity(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) co, res = get_external_position_metric(uid, 'sum-product-activity', 'directors', 'sum', args, flag_total) res_makeup = [] if len(res): res_max = max(res) [res_makeup.append(float(x)/res_max) for x in res] return co, res_makeup @app.metric('/director-quality', aggr='avg', parameters=[ORG.Person], id='director-quality', title='Quality of Director') def get_director_quality(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-quality', 'directors', 'avg', args, flag_total) @app.metric('/director-health', aggr='avg', parameters=[ORG.Person], id='director-health', title='Health of Director') def get_director_health(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-health', 'directors', 'avg', args, flag_total) @app.metric('/director-costs', parameters=[ORG.Person], id='director-costs', title='Costs of Director') def get_director_costs(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-cost', 'directors', 'sum', args, flag_total) @app.metric('/director-externals', parameters=[ORG.Person], id='director-externals', title='External Committers from Products of Director') def get_director_externals(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-externals', 'directors', 'sum', args, flag_total) @app.metric('/director-timetomarket', aggr='avg', parameters=[ORG.Person], id='director-timetomarket', title='Time To Market from Products of Director') def get_director_timetomarket(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-timetomarket', 'directors', 'avg', args, flag_total) @app.metric('/total-pmanager-architects', parameters=[ORG.Person], id='pmanager-architects', title='Architects of Product Manager') def get_total_pmanager_architects(uid, **kwargs): co, res = helper_get_pmanager_architects(uid, **kwargs) return co, [len(res)] @app.view('/pmanager-architects', target=ORG.Person, parameters=[ORG.Person], id='pmanager-architects', title='Architects of Product Manager') def get_pmanager_architects(uid, **kwargs): return helper_get_pmanager_architects(uid, **kwargs) @app.metric('/total-pmanager-developers', parameters=[ORG.Person], id='pmanager-developers', title='Developers of Product Manager') def get_total_pmanager_developers(uid, **kwargs): co, res = helper_get_position_developers(uid, 'productmanagers', **kwargs) return co, [len(res)] @app.view('/pmanager-developers', target=ORG.Person, parameters=[ORG.Person], id='pmanager-developers', title='Developers of Product Manager') def get_pmanager_developers(uid, **kwargs): return helper_get_position_developers(uid, 'productmanagers', **kwargs) @app.metric('/total-pmanager-stakeholders', parameters=[ORG.Person], id='pmanager-stakeholders', title='Stakeholders of Product Manager') def get_total_pmanager_stakeholders(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_pmanager_roles(uid, args, 'stakeholder', flag_total))] @app.view('/pmanager-stakeholders', target=ORG.Person, parameters=[ORG.Person], id='pmanager-stakeholders', title='Stakeholders of Product Manager') def get_pmanager_stakeholders(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_pmanager_roles(uid, args, 'stakeholder', flag_total) @app.metric('/total-pmanager-swarchitects', parameters=[ORG.Person], id='pmanager-swarchitects', title='Software Architects of Product Manager') def get_total_pmanager_swarchitects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_pmanager_roles(uid, args, 'softwarearchitect', flag_total))] @app.view('/pmanager-swarchitects', target=ORG.Person, parameters=[ORG.Person], id='pmanager-swarchitects', title='Software Architects of Product Manager') def get_pmanager_swarchitects(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_pmanager_roles(uid, args, 'softwarearchitect', flag_total) @app.metric('/total-pmanager-swdevelopers', parameters=[ORG.Person], id='pmanager-swdevelopers', title='Software Developers of Product Manager') def get_total_pmanager_swdevelopers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_pmanager_roles(uid, args, 'softwaredeveloper', flag_total))] @app.view('/pmanager-swdevelopers', target=ORG.Person, parameters=[ORG.Person], id='pmanager-swdevelopers', title='Software Developers of Product Manager') def get_pmanager_swdevelopers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_pmanager_roles(uid, args, 'softwaredeveloper', flag_total) @app.metric('/total-pmanager-pjmanagers', parameters=[ORG.Person], id='pmanager-pjmanagers', title='Project Managers of Product Manager') def get_total_pmanager_pjmanagers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, [len(get_pmanager_roles(uid, args, 'projectmanager', flag_total))] @app.view('/pmanager-pjmanagers', target=ORG.Person, parameters=[ORG.Person], id='pmanager-pjmanagers', title='Project Managers of Product Manager') def get_pmanager_pjmanagers(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return args, get_pmanager_roles(uid, args, 'projectmanager', flag_total) @app.metric('/total-pmanager-members', parameters=[ORG.Person], id='pmanager-members', title='Members below Product Manager') def get_total_pmanager_members(uid, **kwargs): res = {} co, ar = helper_get_pmanager_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] return co, [len(res.keys())] @app.metric('/pmanager-productrepositories', aggr='avg', parameters=[ORG.Person], id='pmanager-productrepositories', title='Product Repositories AVG of Product Manager') def get_avg_pmanager_productrepositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_rep = len(get_position_repositories(uid, args, 'productmanagers', flag_total, True)) res_pr = len(get_position_products(uid, args, 'productmanagers', flag_total)) if res_pr == 0: return args, [0] return args, [float(res_rep) / float(res_pr)] @app.metric('/pmanager-productmembers', aggr='avg', parameters=[ORG.Person], id='pmanager-productmembers', title='Product Members AVG of Product Manager') def get_avg_pmanager_productmembers(uid, **kwargs): res = {} co, ar = helper_get_pmanager_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = len(res.keys()) flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_pr = len(get_position_products(uid, args, 'productmanagers', flag_total)) if res_pr == 0: return co, [0] return co, [float(res_mem) / float(res_pr)] @app.metric('/pmanager-projectrepositories', aggr='avg', parameters=[ORG.Person], id='pmanager-projectrepositories', title='Project Repositories AVG of Product Manager') def get_avg_pmanager_projectrepositories(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_rep = len(get_position_repositories(uid, args, 'productmanagers', flag_total, True)) res_pr = len(get_position_projects(uid, args, 'productmanagers', flag_total, True)) if res_pr == 0: return args, [0] return args, [float(res_rep) / float(res_pr)] @app.metric('/pmanager-projectmembers', aggr='avg', parameters=[ORG.Person], id='pmanager-projectmembers', title='Project Members AVG of Product Manager') def get_avg_pmanager_projectmembers(uid, **kwargs): res = {} co, ar = helper_get_pmanager_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = len(res.keys()) flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) res_pr = len(get_position_projects(uid, args, 'productmanagers', flag_total, True)) if res_pr == 0: return co, [0] return co, [float(res_mem) / float(res_pr)] @app.view('/pmanager-members', target=ORG.Person, parameters=[ORG.Person], id='pmanager-members', title='Members below Product Manager') def get_pmanager_members(uid, **kwargs): res = {} co, ar = helper_get_pmanager_architects(uid, **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in ar] co, dev = helper_get_position_developers(uid, 'productmanagers', **kwargs) [res.update({x.get('id'): x.get('uri')}) for x in dev] res_mem = [] [res_mem.append({ "id": x, "uri": res[x] }) for x in res.keys()] return co, res_mem @app.metric('/pmanager-activity', parameters=[ORG.Person], id='pmanager-activity', title='Activity of Product Manager') def get_pmanager_activity(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) co, res = get_external_position_metric(uid, 'sum-product-activity', 'productmanagers', 'sum', args, flag_total) res_makeup = [] if len(res): res_max = max(res) [res_makeup.append(float(x)/res_max) for x in res] return co, res_makeup @app.metric('/pmanager-quality', aggr='avg', parameters=[ORG.Person], id='pmanager-quality', title='Quality of Product Manager') def get_pmanager_quality(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-quality', 'productmanagers', 'avg', args, flag_total) @app.metric('/pmanager-health', aggr='avg', parameters=[ORG.Person], id='pmanager-health', title='Health of Product Manager') def get_pmanager_health(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-health', 'productmanagers', 'avg', args, flag_total) @app.metric('/pmanager-costs', parameters=[ORG.Person], id='pmanager-costs', title='Costs of Product Manager') def get_pmanager_costs(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-cost', 'productmanagers', 'sum', args, flag_total) @app.metric('/pmanager-timetomarket', aggr='avg', parameters=[ORG.Person], id='pmanager-timetomarket', title='Time To Market from Products of Product Manager') def get_pmanager_timetomarket(uid, **kwargs): flag_total = kwargs.get('begin') is None and kwargs.get('end') is None args = get_correct_kwargs(kwargs) return get_external_position_metric(uid, 'sum-product-timetomarket', 'productmanagers', 'avg', args, flag_total)
42.60355
116
0.673472
5,765
43,200
4.869905
0.041977
0.049047
0.033411
0.051719
0.900089
0.84545
0.800534
0.757222
0.702369
0.647373
0
0.001075
0.181944
43,200
1,013
117
42.645607
0.79335
0.025787
0
0.546753
0
0
0.181676
0.046322
0
0
0
0
0
1
0.120779
false
0
0.005195
0.014286
0.271429
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
6
7441ed8754d181a3bbc97faed79497d959d46b12
874
py
Python
daiquiri/jobs/migrations/0004_phases_to_char.py
agy-why/daiquiri
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
[ "Apache-2.0" ]
14
2018-12-23T18:35:02.000Z
2021-12-15T04:55:12.000Z
daiquiri/jobs/migrations/0004_phases_to_char.py
agy-why/daiquiri
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
[ "Apache-2.0" ]
40
2018-12-20T12:44:05.000Z
2022-03-21T11:35:20.000Z
daiquiri/jobs/migrations/0004_phases_to_char.py
agy-why/daiquiri
4d3e2ce51e202d5a8f1df404a0094a4e018dcb4d
[ "Apache-2.0" ]
5
2019-05-16T08:03:35.000Z
2021-08-23T20:03:11.000Z
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-03-31 11:03 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_jobs', '0003_owner_fk'), ] operations = [ migrations.AlterField( model_name='job', name='job_type', field=models.CharField(choices=[('QUERY', 'Query')], max_length=10), ), migrations.AlterField( model_name='job', name='phase', field=models.CharField(choices=[('PENDING', 'Pending'), ('QUEUED', 'Queued'), ('EXECUTING', 'Executing'), ('COMPLETED', 'Completed'), ('ERROR', 'Error'), ('ABORTED', 'Aborted'), ('UNKNOWN', 'Unknown'), ('HELD', 'Held'), ('SUSPENDED', 'Suspended'), ('ARCHIVED', 'Archived')], max_length=10), ), ]
33.615385
302
0.584668
87
874
5.724138
0.609195
0.042169
0.100402
0.116466
0.144578
0.144578
0
0
0
0
0
0.034074
0.227689
874
25
303
34.96
0.703704
0.074371
0
0.333333
1
0
0.244417
0
0
0
0
0
0
1
0
false
0
0.111111
0
0.277778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7442858aa79374bc0e7c8a884d99101738677bd6
4,339
py
Python
tools/CH_constants.py
hussamnasir/geni-ch
b6626a14e95768ba33bbc1faca9e4e74b0040920
[ "MIT" ]
3
2015-05-01T12:48:35.000Z
2017-02-25T07:13:07.000Z
tools/CH_constants.py
GENI-NSF/geni-ch
184909ef656976071fd891091caeaf892c99deb1
[ "MIT" ]
466
2015-05-20T03:49:54.000Z
2020-03-18T23:09:52.000Z
tools/CH_constants.py
hussamnasir/geni-ch
b6626a14e95768ba33bbc1faca9e4e74b0040920
[ "MIT" ]
6
2015-05-06T13:44:13.000Z
2017-07-12T17:57:17.000Z
#---------------------------------------------------------------------- # Copyright (c) 2011-2016 Raytheon BBN Technologies # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and/or hardware specification (the "Work") to # deal in the Work without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Work, and to permit persons to whom the Work # is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Work. # # THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS # IN THE WORK. #---------------------------------------------------------------------- # Constants for the CH (Federation Registry service) # AKA Service Registry (SR) AKA Clearinghouse (CH) # List of services provided by the CH server services = ["SERVICE"] # dictionary of types of services provided by the CH (name : code) # That is the kinds of services that are advertised in the CH SERVICE_AGGREGATE_MANAGER = 0 SERVICE_SLICE_AUTHORITY = 1 SERVICE_PROJECT_AUTHORITY = 2 SERVICE_MEMBER_AUTHORITY = 3 SERVICE_AUTHORIZATION_SERVICE = 4 SERVICE_LOGGING_SERVICE = 5 SERVICE_CREDENTIAL_STORE = 6 SERVICE_CERTIFICATE_AUTHORITY = 7 SERVICE_KEY_MANAGER = 8 SERVICE_WIMAX_SITE = 10 SERVICE_IRODS = 11 service_types = { "AGGREGATE_MANAGER" : SERVICE_AGGREGATE_MANAGER, "SLICE_AUTHORITY" : SERVICE_SLICE_AUTHORITY, "PROJECT_AUTHORITY" : SERVICE_PROJECT_AUTHORITY, "MEMBER_AUTHORITY" : SERVICE_MEMBER_AUTHORITY, "AUTHORIZATION_SERVICE" : SERVICE_AUTHORIZATION_SERVICE, "LOGGING_SERVICE" : SERVICE_LOGGING_SERVICE, "CREDENTIAL_STORE" : SERVICE_CREDENTIAL_STORE, "CERTIFICATE_AUTHORITY" : SERVICE_CERTIFICATE_AUTHORITY, "KEY_MANAGER" : SERVICE_KEY_MANAGER, "WIMAX_SITE" : SERVICE_WIMAX_SITE, "IRODS" : SERVICE_IRODS } # Mapping from external to internal data schema field_mapping = { "_GENI_SERVICE_ID" : "id", "SERVICE_URN": 'service_urn', "SERVICE_URL": 'service_url', "_GENI_SERVICE_CERT_FILENAME": 'service_cert', "SERVICE_CERT": 'service_cert', "SERVICE_NAME": 'service_name', "SERVICE_DESCRIPTION": 'service_description', "SERVICE_TYPE": "service_type", "_GENI_SERVICE_SHORT_NAME": "short_name" } # The externally visible data schema for services mandatory_fields = { "SERVICE_URN": {"TYPE": "URN"}, "SERVICE_URL": {"TYPE": "URL"}, "SERVICE_CERT": {"TYPE": "CERTIFICATE"}, "SERVICE_NAME" : {"TYPE" : "STRING"}, "SERVICE_DESCRIPTION": {"TYPE" : "STRING"} } supplemental_fields = { "_GENI_SERVICE_CERT_FILENAME": {"TYPE": "STRING", "OBJECT": "SERVICE"}, "_GENI_SERVICE_ID" : {"TYPE" : "INTEGER", "OBJECT": "SERVICE"}, "_GENI_SERVICE_ATTRIBUTES" : {"TYPE" : "DICTIONARY", "OBJECT" : "SERVICE"}, "_GENI_SERVICE_SHORT_NAME" : {"TYPE": "STRING", "OBJECT": "SERVICE"} } # Defined attributes on services # A dictionary: For each attribute we have a name pointing to a dictionary # with 'description', 'service_types', 'acceptable_values' # 'service_types' means a list of service types to which this attribute # applies. This tag is optional and if not supplied it is not restricted # 'acceptable_values' means a list of acceptable values for this attribute # This tag is optional and if not supplied it is not restricted defined_attributes = { "SPEAKS_FOR" : { "description" : "Does this aggregate accept speaks-for credentials and options?", "service_types" : [SERVICE_AGGREGATE_MANAGER], "acceptable_values" : ['t', 'f'] }, "AM_API_VERSION" : { "description" : "The version of the AM API supported by this aggregate", "service_types" : [SERVICE_AGGREGATE_MANAGER], "acceptable_values" : ['1', '2', '3'] } }
40.551402
89
0.705001
543
4,339
5.41989
0.360958
0.019028
0.031261
0.024465
0.099218
0.085627
0.068637
0.033979
0.033979
0.033979
0
0.006663
0.169855
4,339
106
90
40.933962
0.810383
0.454713
0
0.032787
0
0
0.40515
0.072103
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74437028daada4e5ca6b65cd6259ece7a9695dc3
1,657
py
Python
Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py
AllSafeCyberSecur1ty/Nuclear-Engineering
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
[ "MIT" ]
1
2022-03-26T20:01:13.000Z
2022-03-26T20:01:13.000Z
Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py
AllSafeCyberSecur1ty/Nuclear-Engineering
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
[ "MIT" ]
null
null
null
Simulation-Chernobyl/simulation_chernobyl/graphics/logger.py
AllSafeCyberSecur1ty/Nuclear-Engineering
302d6dcc7c0a85a9191098366b076cf9cb5a9f6e
[ "MIT" ]
1
2022-03-26T19:59:13.000Z
2022-03-26T19:59:13.000Z
import pygame class Logger(): def __init__(self): self.mem_text = "" # to store previous text self.text1, self.text2, self.text3 = "", "", "" # text buffer self.text_color1, self.text_color2, self.text_color3 = (0,0,0), (0,0,0), (0,0,0) # text color buffer self.max_width = 100 # log displayed on surface during simulation def gui_logger(self, surface, font): surface.blit(font.render(str(self.text1), True, self.text_color1), (363, 669)) # text output line 1 surface.blit(font.render(str(self.text2), True, self.text_color2), (363, 684)) # text output line 2 surface.blit(font.render(str(self.text3), True, self.text_color3), (363, 699)) # text output line 3 # add new text to log def log_add(self, text, text_color=(0,0,0)): if text != self.mem_text: # if new text is sent: self.text3, self.text2, self.text1 = self.text2, self.text1, text # add it to begenning, and shift others self.text_color3, self.text_color2, self.text_color1 = self.text_color2, self.text_color1, text_color # same for colors if len(text) > self.max_width: # if text is larger than window: split it and shift again self.text3, self.text2, self.text1 = self.text2, text[self.max_width : len(text)], text[0 : self.max_width] self.text_color3, self.text_color2, self.text_color1 = self.text_color2, text_color, text_color # same for colors self.mem_text = text # update mem # load max text width def gui_max_width(self, max_width): self.max_width = max_width
55.233333
133
0.63971
250
1,657
4.092
0.264
0.125122
0.02346
0.02346
0.419355
0.319648
0.231672
0.231672
0.109482
0.109482
0
0.053514
0.244418
1,657
29
134
57.137931
0.763578
0.211225
0
0
0
0
0
0
0
0
0
0
0
1
0.190476
false
0
0.047619
0
0.285714
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7443df89c1d0dd9c460536ddbdae972c55cb92f9
9,765
py
Python
ob/models.py
peterkuma/tjrapid
8813cb0a3633785754d20263f180428688bcb026
[ "MIT" ]
null
null
null
ob/models.py
peterkuma/tjrapid
8813cb0a3633785754d20263f180428688bcb026
[ "MIT" ]
1
2021-05-16T16:40:00.000Z
2021-05-16T16:40:00.000Z
ob/models.py
peterkuma/tjrapid
8813cb0a3633785754d20263f180428688bcb026
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Copyright (c) 2007-2012 Peter Kuma import os from datetime import date, datetime import urllib.request, urllib.error, urllib.parse import json from django.utils import timezone from django.db import models from django.utils.translation import ugettext_lazy as _ from django.conf import settings from markdown import markdown from textile import textile from django.utils.safestring import mark_safe from django.contrib.contenttypes.fields import GenericRelation from django_attach.models import Attachment from linguo.models import MultilingualModel from linguo.managers import MultilingualManager from django.urls import reverse from django.utils.translation import get_language from django.core.validators import MaxValueValidator from main.models import Category MARKUP_CHOICES = ( ('markdown', 'Markdown'), ('textile', 'Textile'), ('html', 'HTML'), ) MARKERS = ( 'amenity/arts_centre', 'amenity/atm', 'amenity/bank', 'amenity/bar', 'amenity/bbq', 'amenity/bench', 'amenity/bicycle_parking', 'amenity/bicycle_repair_station', 'amenity/biergarten', 'amenity/boat_rental', 'amenity/bureau_de_change', 'amenity/bus_station', 'amenity/cafe', 'amenity/car_wash', 'amenity/casino', 'amenity/charging_station', 'amenity/cinema', 'amenity/community_centre', 'amenity/courthouse', 'amenity/dentist', 'amenity/doctors', 'amenity/drinking_water', 'amenity/emergency_phone', 'amenity/excrement_bags', 'amenity/fast_food', 'amenity/ferry', 'amenity/firestation', 'amenity/fountain', 'amenity/fuel', 'amenity/hospital', 'amenity/hunting_stand', 'amenity/ice_cream', 'amenity/internet_cafe', 'amenity/library', 'amenity/motorcycle_parking', 'amenity/nightclub', 'amenity/parking', 'amenity/parking_entrance_multistorey', 'amenity/parking_entrance_underground', 'amenity/parking_subtle', 'amenity/parking_tickets', 'amenity/pharmacy', 'amenity/place_of_worship', 'amenity/police', 'amenity/post_box', 'amenity/post_office', 'amenity/prison', 'amenity/pub', 'amenity/public_bath', 'amenity/public_bookcase', 'amenity/public_transport_tickets', 'amenity/recycling', 'amenity/rental_bicycle', 'amenity/rental_car', 'amenity/restaurant', 'amenity/shelter', 'amenity/shower', 'amenity/social_facility', 'amenity/taxi', 'amenity/telephone', 'amenity/theatre', 'amenity/toilets', 'amenity/town_hall', 'amenity/vehicle_inspection', 'amenity/veterinary', 'amenity/waste_basket', 'amenity/waste_disposal', 'barrier/cattle_grid', 'barrier/cycle_barrier', 'barrier/full-height_turnstile', 'barrier/gate', 'barrier/kissing_gate', 'barrier/lift_gate', 'barrier/motorcycle_barrier', 'barrier/stile', 'barrier/toll_booth', 'highway/bus_stop', 'highway/elevator', 'highway/ford', 'highway/traffic_light', 'historic/archaeological_site', 'historic/bust', 'historic/castle', 'historic/city_gate', 'historic/fort', 'historic/fortress', 'historic/manor', 'historic/memorial', 'historic/monument', 'historic/obelisk', 'historic/palace', 'historic/plaque', 'historic/shrine', 'historic/statue', 'historic/stone', 'leisure/amusement_arcade', 'leisure/beach_resort', 'leisure/bird_hide', 'leisure/bowling_alley', 'leisure/firepit', 'leisure/fishing', 'leisure/fitness', 'leisure/golf', 'leisure/miniature_golf', 'leisure/outdoor_seating', 'leisure/playground', 'leisure/sauna', 'leisure/slipway', 'leisure/water_park', 'man_made/bell_tower', 'man_made/chimney', 'man_made/communications_tower', 'man_made/crane', 'man_made/cross', 'man_made/lighthouse', 'man_made/mast', 'man_made/mast_communications', 'man_made/mast_lighting', 'man_made/power_tower', 'man_made/power_tower_small', 'man_made/storage_tank', 'man_made/telescope_dish', 'man_made/telescope_dome', 'man_made/tower_cantilever_communication', 'man_made/tower_cooling', 'man_made/tower_defensive', 'man_made/tower_dish', 'man_made/tower_dome', 'man_made/tower_generic', 'man_made/tower_lattice', 'man_made/tower_lattice_communication', 'man_made/tower_lattice_lighting', 'man_made/tower_lighting', 'man_made/tower_observation', 'man_made/water_tower', 'man_made/windmill', 'natural/cave', 'natural/peak', 'natural/saddle', 'office/consulate', 'office/embassy', 'religion/buddhist', 'religion/christian', 'religion/hinduist', 'religion/jewish', 'religion/muslim', 'religion/shintoist', 'religion/sikhist', 'religion/taoist', 'shop/alcohol', 'shop/art', 'shop/bag', 'shop/bakery', 'shop/beauty', 'shop/bed', 'shop/beverages', 'shop/bicycle', 'shop/bookmaker', 'shop/butcher', 'shop/car', 'shop/car_parts', 'shop/carpet', 'shop/car_repair', 'shop/charity', 'shop/chemist', 'shop/clothes', 'shop/coffee', 'shop/computer', 'shop/confectionery', 'shop/convenience', 'shop/copyshop', 'shop/dairy', 'shop/deli', 'shop/department_store', 'shop/diy', 'shop/electronics', 'shop/fabric', 'shop/florist', 'shop/furniture', 'shop/garden_centre', 'shop/gift', 'shop/greengrocer', 'shop/hairdresser', 'shop/hifi', 'shop/houseware', 'shop/interior_decoration', 'shop/jewelry', 'shop/laundry', 'shop/marketplace', 'shop/massage', 'shop/medical_supply', 'shop/mobile_phone', 'shop/music', 'shop/musical_instrument', 'shop/newsagent', 'shop/optician', 'shop/outdoor', 'shop/paint', 'shop/perfumery', 'shop/pet', 'shop/photo', 'shop/seafood', 'shop/second_hand', 'shop/shoes', 'shop/sports', 'shop/stationery', 'shop/supermarket', 'shop/tea', 'shop/ticket', 'shop/tobacco', 'shop/toys', 'shop/trade', 'shop/travel_agency', 'shop/tyres', 'shop/variety_store', 'shop/video', 'shop/video_games', 'tourism/alpinehut', 'tourism/apartment', 'tourism/artwork', 'tourism/audioguide', 'tourism/board', 'tourism/camping', 'tourism/caravan_park', 'tourism/chalet', 'tourism/guest_house', 'tourism/guidepost', 'tourism/hostel', 'tourism/hotel', 'tourism/information', 'tourism/map', 'tourism/motel', 'tourism/museum', 'tourism/office', 'tourism/picnic', 'tourism/terminal', 'tourism/viewpoint', 'tourism/wilderness_hut', ) MARKERS = [(x, x) for x in MARKERS] class Member(models.Model): first_name = models.CharField(_('first name'), max_length=50) surname = models.CharField(_('surname'), max_length=50) category = models.CharField(_('category'), max_length=5) email = models.EmailField(_('e-mail'), blank=True) def __unicode__(self): return '%s %s' % (self.first_name, self.surname) def email_special(self): return self.email.replace('@', '[zavinac]') class Meta: ordering = ('category','surname') verbose_name = _('member') verbose_name_plural = _('members') class Event(MultilingualModel): title = models.CharField(_('title'), max_length=100) name = models.SlugField( _('name'), unique=True, help_text=_('Short name that will appear in the URL') ) start_date = models.DateField(_('start date')) end_date = models.DateField(_('end date'), null=True, blank=True) location = models.CharField(_('location'), max_length=100) latitude = models.FloatField(_('latitude'), null=True, blank=True) longitude = models.FloatField(_('longitude'), null=True, blank=True) map_zoom = models.PositiveIntegerField(_('map zoom'), default=15, validators=[MaxValueValidator(19),] ) category = models.ForeignKey(Category, verbose_name=_('category'), on_delete=models.CASCADE, ) markup = models.CharField( _('markup'), max_length=50, choices=MARKUP_CHOICES, default='markdown', help_text=_('Documentation: <a href="https://en.wikipedia.org/wiki/Markdown">Markdown</a>, <a href="http://en.wikipedia.org/wiki/Textile_(markup_language)">Textile</a>') ) head = models.TextField( _('head'), blank=True, help_text=_('Add files and images below') ) body = models.TextField( _('body'), blank=True, help_text=_('Add files and images below') ) attachments = GenericRelation(Attachment) created = models.DateTimeField(_('created'),auto_now_add=True) modified = models.DateTimeField(_('modified'),auto_now=True) def get_absolute_url(self): import ob.views return reverse(ob.views.event, kwargs={ 'lang': get_language(), 'category_name': Category.objects.get(name_en='orienteering').name, 'name': self.name, }) def head_html(self): if self.markup == 'markdown': return mark_safe(markdown(self.head)) elif self.markup == 'textile': return mark_safe(textile(self.head)) else: return mark_safe(self.head) def body_html(self): if self.markup == 'markdown': return mark_safe(markdown(self.body)) elif self.markup == 'textile': return mark_safe(textile(self.body)) else: return mark_safe(self.body) def is_upcoming(self): return self.end_date is None and self.start_date >= date.today() or \ self.end_date is not None and self.end_date >= date.today() def larger_map_link(self): import ob.views return None if self.mappoint_set.count() == 0 else \ reverse(ob.views.event_map, kwargs={ 'lang': get_language(), 'category_name': self.category.name, 'name': self.name, }) objects = MultilingualManager() class Meta: ordering = ('-start_date',) verbose_name = _('event') verbose_name_plural = _('events') translate = ('title', 'name', 'location', 'head', 'body') class MapPoint(MultilingualModel): title = models.CharField(_('title'), max_length=100) latitude = models.FloatField(_('latitude')) longitude = models.FloatField(_('longitude')) marker = models.CharField(_('marker'), null=True, blank=True, max_length=100, choices=MARKERS, ) event = models.ForeignKey(Event, verbose_name=_('event'), on_delete=models.CASCADE, ) objects = MultilingualManager() class Meta: verbose_name = _('map point') verbose_name_plural = _('map points') translate = ('title',)
24.659091
171
0.725038
1,190
9,765
5.756303
0.377311
0.027591
0.01927
0.009927
0.099562
0.07708
0.067445
0.05635
0.040584
0.015766
0
0.003797
0.110087
9,765
395
172
24.721519
0.784465
0.005735
0
0.059621
0
0.00271
0.478207
0.130963
0
0
0
0
0
1
0.01897
false
0
0.056911
0.00813
0.176152
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74455e5242de473902ce5c1ce7ac6048af1210c7
18,394
py
Python
tests/test_hal_nav2.py
rdhyee/restnavigator
507a854ab9ea5fe42dc10d8698f67e43b8657902
[ "MIT" ]
21
2015-04-26T08:41:33.000Z
2020-07-22T04:05:37.000Z
tests/test_hal_nav2.py
rdhyee/restnavigator
507a854ab9ea5fe42dc10d8698f67e43b8657902
[ "MIT" ]
17
2015-01-09T18:49:50.000Z
2015-03-09T07:57:17.000Z
tests/test_hal_nav2.py
rdhyee/restnavigator
507a854ab9ea5fe42dc10d8698f67e43b8657902
[ "MIT" ]
11
2015-04-21T04:12:56.000Z
2022-03-27T00:24:25.000Z
'''Refactored tests from test_hal_nav.py''' import json import httpretty import pytest import conftest import uritemplate import restnavigator as RN from restnavigator import exc import restnavigator.halnav as HN def uri_of(doc): '''Pull out the url from a hal document''' return doc['_links']['self']['href'] def link_to(doc): '''Pull out the self link of a hal document''' return doc['_links']['self'] def register_hal_page(doc, **kwargs): status = kwargs.pop('status', 200) method = kwargs.pop('method', 'GET') content_type = kwargs.pop('content_type', 'application/hal+json') def body_callback(request, url, headers): '''We do a callback so the response body can be updated''' headers2 = kwargs.pop('headers', headers) return ( status, headers2, json.dumps(doc), ) httpretty.HTTPretty.register_uri( method, body=body_callback, content_type=content_type, uri=uri_of(doc), **kwargs ) @pytest.fixture def page(index_page, curie_links, index_uri): '''Returns a function that creates pages''' def _page(name, number): selflink = { 'href': index_uri + name + '/' + str(number), 'name': name + str(number), } nextlink = { 'href': index_uri + name + '/' + str(number + 1), 'name': name + str(number + 1), } doc = { '_links': { 'self': selflink, 'curies': curie_links, 'next': nextlink }, 'name': name, 'number': number, 'data': conftest.random_sentence(), } register_hal_page(doc) _page.registry.setdefault(name, []).append(doc) return doc _page.registry = {} return _page @pytest.yield_fixture def http(request): '''Enables httpretty and disables it after the test''' httpretty.HTTPretty.enable() yield httpretty.HTTPretty httpretty.HTTPretty.disable() httpretty.HTTPretty.reset() @pytest.fixture def index_uri(): '''Fixture for the root uri''' return 'http://fakeuri.example/api/' @pytest.fixture def curie(): '''Returns the current curie string''' return conftest.random_word(2).lower() @pytest.fixture def curify(curie): def _curify(rel): return curie + ':' + rel return _curify @pytest.fixture def curie_links(curie, index_uri): '''Returns a templated curie link''' return [{ 'name': curie, 'href': index_uri + 'rels/{rel}', 'templated': True, }] @pytest.fixture def index_page(curie_links, index_uri, http): '''Registers a basic index page that can be extended''' doc = { '_links': { 'curies': curie_links, 'self': {'href': index_uri}, }, 'data': conftest.random_paragraphs(), } register_hal_page(doc) return doc @pytest.fixture def N(index_uri, index_page): '''A basic HALNavigator with the index_uri as root''' return RN.Navigator.hal(index_uri) class TestNavigator: '''tests for halnav.Navigator''' @pytest.fixture def fake_session(self): '''Creates a non functional fake session object''' class FakeNonFuncSession: headers = {'X-Custom': 'foo'} return FakeNonFuncSession() def test_custom_session(self, index_uri, fake_session): N = RN.Navigator.hal(index_uri, session=fake_session) N2 = RN.Navigator.hal(index_uri) assert N._core.session is fake_session assert N.headers is fake_session.headers class TestPartialNavigator: '''tests for halnav.PartialNavigator''' @pytest.fixture def rel(self, curify, name): '''The link relation for the templated link''' return curify(name) @pytest.fixture(params=[set(['x']), set(['x', 'y']), set(['x', 'y', 'z'])]) def vars(self, request): '''A set of random variables''' return request.param @pytest.fixture(params=[(0,0,0), (1,2,3)]) def values(self, request): return dict(zip('xyz', request.param)) @pytest.fixture def name(self): '''The name of the templated resource''' return conftest.random_word(5).lower() + 's' @pytest.fixture def post_template(self, name, index_uri, index_page, rel, vars): '''Creates and registers a post templated link''' href = "{index_uri}{name}/{{{varpath}}}".format( index_uri=index_uri, name=name, varpath='}/{'.join(v for v in sorted(vars)) ) link = { 'href': href, 'title': 'Templated link for ' + name, 'templated': True, } index_page['_links'][rel] = link return href @pytest.fixture def tpl_rel(self, name, curify): return curify(name + '_tpl') @pytest.fixture def posts(self, rel, name, index_uri, index_page, page, tpl_rel): '''Creates and registers some posts''' resource0 = page(name, 0) index_page['_links'][rel] = link_to(resource0) index_page['_links'][tpl_rel] = { 'href': index_uri + name + '/{id}', 'title': 'Template for ' + name, 'templated': True, } register_hal_page(resource0) last = resource0 for i in range(1, 5): resource = page(name, i) last['_links']['next'] = link_to(resource) last = resource register_hal_page(resource) return page.registry[name][:] @pytest.fixture def template_partial(self, rel, index_page, N, post_template): return N[rel] def test_template_uri(self, template_partial, post_template): assert template_partial.template_uri == post_template def test_expand_uri( self, vars, post_template, template_partial, values): uri = template_partial.expand_uri(**values) assert uri == uritemplate.expand(post_template, values) def test_expand_link( self, vars, post_template, template_partial, values): link = template_partial.expand_link(**values) assert not link.props.get('templated', False) assert link.uri == uritemplate.expand(post_template, values) def test_expand(self, vars, post_template, template_partial, values): post1 = template_partial(**values) assert not post1.fetched assert post1.uri == uritemplate.expand(post_template, values) def test_variables(self, template_partial, vars): assert template_partial.variables == vars @pytest.mark.parametrize('i', range(0, 5)) def test_valid_expansion(self, posts, name, N, tpl_rel, i): partial = N[tpl_rel] nav = partial(id=i) nav.fetch() assert nav.status == (200, 'OK') assert nav.uri == uri_of(posts[i]) class TestHALNavGetItem: '''Tests the __getitem__ method of HALNavigator ''' @pytest.fixture def names(self): namelist = [conftest.random_word().lower() for _ in range(3)] def _names(i): return namelist[i] return _names @pytest.fixture def rels(self, names, curify): def _rels(i): return curify(names(i)) return _rels @pytest.fixture def resources(self, names, rels, index_page, index_uri, page): last = index_page for i in range(3): new = page(names(i), i) last['_links'][rels(i)] = { 'href': uri_of(new), 'title': "Page for " + names(i) } last = new def test_fetch_behavior(self, N, resources, rels): Na = N[rels(0)] Nb = N[rels(0), rels(1)] assert Na.fetched assert not Nb.fetched def test_sequence_equivalence(self, N, resources, rels): Na = N[rels(0), rels(1), rels(2)] Nb = N[rels(0)][rels(1)][rels(2)] assert Na is Nb @pytest.fixture def link_resources(self, rels, names, index_page, page): first = page(names(0), 1) index_page['_links'][rels(0)] = link_to(first) register_hal_page(first) second1 = page(names(1), 1) second2 = page(names(1), 2) first['_links'][rels(1)] = [ { 'href': uri_of(second1), 'name': 'name_x', },{ 'href': uri_of(second2), 'name': 'name_y', } ] register_hal_page(second1) register_hal_page(second2) third_1 = page(names(2), 1) third_2 = page(names(2), 2) second1['_links'][rels(2)] = link_to(third_1) second2['_links'][rels(2)] = link_to(third_2) register_hal_page(third_1) register_hal_page(third_2) def test_linklist_in_sequence(self, N, link_resources, rels): Nchained = N[rels(0), rels(1), 'name':'name_x', rels(2)] Nfirst = N[rels(0)] Nsecondlist = Nfirst[rels(1)] Nsecond = Nsecondlist.get_by('name', 'name_x') Nthird = Nsecond[rels(2)] assert Nchained is Nthird def test_linklist_index(self, N, link_resources, rels): Nchained = N[rels(0), rels(1), 1, rels(2)] Nfirst = N[rels(0)] Nsecondlist = Nfirst[rels(1)] Nsecond = Nsecondlist[1] Nthird = Nsecond[rels(2)] assert Nchained is Nthird def test_bad_rel(self, N, link_resources, rels): with pytest.raises(exc.OffTheRailsException): N[rels(1)] with pytest.raises(exc.OffTheRailsException): N[rels(0), rels(0)] def test_bad_name(self, N, link_resources, rels): with pytest.raises(exc.OffTheRailsException): N[rels(0), rels(1), 'name':'badname'] def test_bad_index(self, N, link_resources, rels): with pytest.raises(exc.OffTheRailsException): N[rels(0), rels(1), 100] @pytest.fixture def template_uri(self, index_uri): return index_uri + 'tpl/{id}' @pytest.fixture def tpl_rel(self, curify): return curify('tpl') @pytest.fixture def tpl_resources(self, page, tpl_rel, template_uri, index_page): index_page['_links'][tpl_rel] = { 'href': template_uri, 'templated': True, 'title': 'Template link', } for i in range(3): resource = page('tpl', i) register_hal_page(resource) return template_uri def test_template_sequence(self, N, tpl_resources, tpl_rel): Na = N[tpl_rel](id=0) Nb = N[tpl_rel](id=1) Nc = N[tpl_rel](id=2) Na(), Nb(), Nc() assert Na.status == (200, 'OK') assert Nb.status == (200, 'OK') assert Nc.status == (200, 'OK') class TestEmbedded: '''tests for embedded document features''' @pytest.fixture def blog_posts(self, http, page): '''Posts are both linked and embedded''' _posts = [page('post', x) for x in range(3)] for post in _posts: register_hal_page(post) return _posts @pytest.fixture def comments(self, page): '''Comments are embedded only and have no self link''' comments = [page('comments', x) for x in range(3)] for comment in comments: del comment['_links']['self'] return comments @pytest.fixture def nested(self, page): '''Nested are several layers deep embedded docs. They are not linked to, but do have urls. ''' nested = [page('nested', n) for n in range(3)] for (nest1, nest2) in zip(nested[:-1], nested[1:]): nest1['_embedded'] = { 'xx:nested': nest2 } register_hal_page(nest1) register_hal_page(nest2) # register remaining page return nested @pytest.fixture def index(self, index_uri, comments, blog_posts, http, nested): doc = { '_links': { 'curies': [{ 'name': 'xx', 'href': index_uri + 'rels/{rel}', 'templated': True, }], 'self': {'href': index_uri}, 'first': link_to(blog_posts[0]), 'xx:second': link_to(blog_posts[1]), 'xx:posts': [link_to(post) for post in blog_posts], 'xx:nested-links': [link_to(nest) for nest in nested], 'xx:non-embedded-nest': link_to(nested[0]), }, 'data': 'Some data here', '_embedded': { 'xx:posts': blog_posts, 'xx:comments': comments, } } register_hal_page(doc) return doc def test_comments_are_orphans(self, N, index): '''Checks that all embedded documents that don't have self links are OrphanHALNavigators ''' comments = N['xx:comments'] for comment in comments: assert comment.parent is N def test_posts_arent_orphans(self, N, index): posts = N['xx:posts'] for i, post in enumerate(posts): href = index['_embedded']['xx:posts'][i]['_links']['self']['href'] assert post.uri == href def test_length_accurate(self, N, index, comments): assert len(N['xx:comments']) == len(comments) def test_embedded_only_rel_in_navigator(self, N, index): N.fetch() assert 'xx:comments' in N def test_embedded_only_rel_not_in_links(self, N, index): assert 'xx:comments' not in N.links() def test_embedded_only_rel_in_embedded(self, N, index): assert 'xx:comments' in N.embedded() def test_both_rel_in_navigator(self, N, index): N.fetch() assert 'xx:posts' in N def test_both_rel_in_links(self, N, index): assert 'xx:posts' in N.links() def test_both_rel_in_embedded(self, N, index): assert 'xx:posts' in N.embedded() def test_embedded_default_curie(self, N, index): N._core.default_curie = 'xx' p1 = N['posts'] assert p1 is N['xx:posts'] def test_nested_works(self, N, index, nested): nest1 = N['xx:non-embedded-nest'] nest2 = nest1['xx:nested'] nest3 = nest2['xx:nested'] nest3_chained = N['xx:non-embedded-nest', 'xx:nested', 'xx:nested'] assert nest3 is nest3_chained def test_fetch_then_get_embedded(self, N, index): # for this test, nested[0] is linked from index, but not # embedded anywhere. nested[1] is embedded in nested[0], but # is also its own resource. We want to ensure the same # navigator object is used for both nested1 = N['xx:nested-links'][1] nested1.fetch() assert N['xx:non-embedded-nest', 'xx:nested'] is nested1 def test_get_embedded_then_fetch(self, N, index): # reverse order of previous test nested1 = N['xx:non-embedded-nest', 'xx:nested'] nested1_linked = N['xx:nested-links'][1] # Nothing we've done to nested1_linked should have resolved it # except that we already saw it as an embedded doc. assert nested1_linked.resolved assert nested1 is nested1_linked def test_cached_embedded_requests(self, N, index, http): N.fetch() main_nav_request = http.last_request embedded = N.embedded()['xx:posts'][0] # get the cached state of the embedded resource, no additional # http requests should be made. embedded() assert main_nav_request is http.last_request # if we explicitly refetch the embedded (not orphaned) Navigator, we # expect that a new http request is being made. embedded.fetch() assert main_nav_request is not http.last_request class TestCreate: @pytest.fixture def new_resource(self, page): grelp = page('grelp', 0) register_hal_page(grelp) return grelp @pytest.fixture(params=[302, 303, 202, 202, 303]) def post_status(self, request): return request.param @pytest.fixture def hosts(self, page, http, post_status, new_resource): host_page = page('hosts', 0) register_hal_page( host_page, method='POST', status=post_status, location=uri_of(new_resource), ) return host_page @pytest.fixture def index(self, hosts, index_uri): doc = { '_links': { 'self': {'href': index_uri}, 'xx:create-hosts': link_to(hosts), } } register_hal_page(doc) return doc def test_uses_post(self, N, index, http): N['xx:create-hosts'].create({'name': 'foo'}) last_request_method = http.last_request.method assert last_request_method == 'POST' def test_content_type_json(self, N, index, http): N['xx:create-hosts'].create({'name': 'foo'}) last_content_type = http.last_request.headers['Content-Type'] assert last_content_type == 'application/json' def test_body_is_correct(self, N, index, http): N['xx:create-hosts'].create({'name': 'foo'}) last_body = http.last_request.body assert last_body == b'{"name": "foo"}' def test_new_resource_uri_correct( self, N, index, new_resource, post_status): N2 = N['xx:create-hosts'] N3 = N2.create({'name': 'foo'}) if post_status == 202: assert N3.parent.uri == N2.uri assert N3.fetched else: assert N3.uri == uri_of(new_resource) assert not N3.fetched def test_headers_passed(self, N, index, http): headers = {'X-Custom': 'foo'} N['xx:create-hosts'].create({'name': 'foo'}, headers=headers) custom_header = http.last_request.headers['X-Custom'] assert custom_header == 'foo' def test_files_passed(self, N, index, http): headers = {'X-Custom': 'foo'} N['xx:create-hosts'].create(files={'file': ('filename', )}, headers=headers) custom_header = http.last_request assert custom_header == 'foo' def test_empty_post(self, N, index): # Just want to ensure no error is thrown N['xx:create-hosts'].create()
31.604811
84
0.584321
2,296
18,394
4.516551
0.135017
0.024301
0.043202
0.007715
0.271649
0.208004
0.167888
0.119286
0.094118
0.079556
0
0.013312
0.289388
18,394
581
85
31.659208
0.780047
0.09128
0
0.204036
0
0
0.080155
0.001874
0
0
0
0
0.098655
1
0.170404
false
0.004484
0.017937
0.020179
0.282511
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
744596be112cd09ec44dc8a6cfce90d30a73ab12
405
py
Python
initialize/application.py
adripo/vonage-python-code-snippets
443d83a8cb3c052a634c997f1a3e3eccd61541ab
[ "MIT" ]
8
2017-11-10T17:47:40.000Z
2019-03-09T19:35:01.000Z
initialize/application.py
adripo/vonage-python-code-snippets
443d83a8cb3c052a634c997f1a3e3eccd61541ab
[ "MIT" ]
26
2017-04-12T18:13:51.000Z
2019-03-04T15:29:35.000Z
initialize/application.py
adripo/vonage-python-code-snippets
443d83a8cb3c052a634c997f1a3e3eccd61541ab
[ "MIT" ]
18
2020-10-01T07:23:22.000Z
2022-01-17T00:20:44.000Z
import os import vonage from dotenv import load_dotenv dotenv_path = join(dirname(__file__), "../.env") load_dotenv(dotenv_path) VONAGE_APPLICATION_ID=os.environ.get("VONAGE_APPLICATION_ID") VONAGE_APPLICATION_PRIVATE_KEY_PATH=os.environ.get("VONAGE_APPLICATION_PRIVATE_KEY_PATH") client = vonage.Client( application_id=VONAGE_APPLICATION_ID, private_key=VONAGE_APPLICATION_PRIVATE_KEY_PATH, )
27
89
0.832099
56
405
5.517857
0.321429
0.330097
0.184466
0.262136
0.433657
0
0
0
0
0
0
0
0.079012
405
14
90
28.928571
0.828418
0
0
0
0
0
0.155556
0.138272
0
0
0
0
0
1
0
false
0
0.272727
0
0.272727
0
0
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
3
74465e288ae568c87d8ee7da98f3d73b7dabf8da
22
py
Python
python.py
oguzcan710/mastering-git-august
b165333bdd96552d834350d95f7fd950e5844538
[ "Apache-2.0" ]
null
null
null
python.py
oguzcan710/mastering-git-august
b165333bdd96552d834350d95f7fd950e5844538
[ "Apache-2.0" ]
null
null
null
python.py
oguzcan710/mastering-git-august
b165333bdd96552d834350d95f7fd950e5844538
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/bin/python
11
21
0.681818
4
22
3.75
0.75
0
0
0
0
0
0
0
0
0
0
0
0.045455
22
1
22
22
0.714286
0.909091
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
74473c34a9a7c4a517aae11602ad9b0523cc56e0
931
py
Python
pony_express/solution.py
ijkilchenko/google_code_jam
4584d0125f79e0420881356e8a722c5afc0c1ca9
[ "MIT" ]
null
null
null
pony_express/solution.py
ijkilchenko/google_code_jam
4584d0125f79e0420881356e8a722c5afc0c1ca9
[ "MIT" ]
null
null
null
pony_express/solution.py
ijkilchenko/google_code_jam
4584d0125f79e0420881356e8a722c5afc0c1ca9
[ "MIT" ]
null
null
null
import sys def read(f): with open(f) as file: lines = file.readlines() T = int(lines[0]) line = 1 for t in range(1, T+1): N, Q, H, A, UV, line = _get_case(line, lines) y = solve(N, Q, H, A, UV) print('Case #%i: %0.6f' % (t, y)) def _get_case(line, lines): N, Q = [int(s) for s in lines[line].split()] H = [] for r in range(N): row = [int(s) for s in lines[line+1+r].split()] H.append(row) line = line + 1 + r A = [] for r in range(N): row = [int(s) for s in lines[line+1+r].split()] A.append(row) line = line + 1 + r UV = [] for r in range(Q): row = [int(s) for s in lines[line+1+r].split()] UV.append(row) line = line + 1 + r return N, Q, H, A, UV, line def solve(N, Q, H, A, UV): city = 0 horse = H[0] visited = [] queue = [0] read('sample.in') #read(sys.argv[1])
20.688889
55
0.481203
166
931
2.674699
0.259036
0.078829
0.081081
0.036036
0.515766
0.515766
0.292793
0.25
0.25
0.25
0
0.02589
0.336198
931
44
56
21.159091
0.692557
0.01826
0
0.235294
0
0
0.026287
0
0
0
0
0
0
1
0.088235
false
0
0.029412
0
0.147059
0.029412
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7448420490dadf79cbb285def423344210d69da7
254
py
Python
apps/products/filters.py
ykyk1229/TurtleDove
074ed03396d603a920bc382ee916fc0f3adab6ea
[ "MIT" ]
1
2019-09-02T01:17:34.000Z
2019-09-02T01:17:34.000Z
apps/products/filters.py
ykyk1229/TurtleDove
074ed03396d603a920bc382ee916fc0f3adab6ea
[ "MIT" ]
null
null
null
apps/products/filters.py
ykyk1229/TurtleDove
074ed03396d603a920bc382ee916fc0f3adab6ea
[ "MIT" ]
null
null
null
import django_filters from .models import Product class ProductFilter(django_filters.rest_framework.FilterSet): """ 业务线过滤类 """ pid = django_filters.NumberFilter("pid") class Meta: model = Product fields = ['pid', ]
18.142857
61
0.653543
26
254
6.230769
0.653846
0.240741
0
0
0
0
0
0
0
0
0
0
0.244094
254
13
62
19.538462
0.84375
0.023622
0
0
0
0
0.025862
0
0
0
0
0
0
1
0
false
0
0.285714
0
0.714286
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
74488c7384e202632e7b938723fff09c66866753
3,724
py
Python
corpus_process.py
Protostars/CSeg
1acd301e804de8135310c5d39ed136b4cfd8865f
[ "MIT" ]
1
2019-12-15T06:03:42.000Z
2019-12-15T06:03:42.000Z
corpus_process.py
Protostars/CSeg
1acd301e804de8135310c5d39ed136b4cfd8865f
[ "MIT" ]
null
null
null
corpus_process.py
Protostars/CSeg
1acd301e804de8135310c5d39ed136b4cfd8865f
[ "MIT" ]
1
2019-12-15T06:03:19.000Z
2019-12-15T06:03:19.000Z
import re import os import pickle import sys from math import log DICT_NAME = "dict.txt" BI_GRAM_FILE = "bi_gram.txt" HMM_PROB = "hmm_prob" SMALL_PROB = 1e-200 # 识别汉字、数字和字母、全角字符,及+,#,&,.,% re_ch = re.compile("([\u4E00-\u9FD5a-zA-Z0-9\uFF10-\uFF5A+#&\._%%]+)", re.U) re_stop = re.compile("([。,]+)", re.U) # 处理分词语料,生成词典和2-gram列表 # 语料内容:每行一个句子,词用空格分开 def process(input_file, output_path): words = {} bi_grams = {} max_word_length = 0 m_w = '' with open(input_file, 'r', encoding='utf8') as f: for line in f: line = line.strip() if not line: continue word_line = line.split() last_word = '<BOS>' for w in word_line: if re_ch.match(w): words[w] = words.get(w, 0) + 1 # 没匹配到的是一些符号:、,等等 if last_word: bg = last_word + ' ' + w bi_grams[bg] = bi_grams.get(bg, 0) + 1 last_word = w if len(w) > max_word_length: max_word_length = len(w) m_w = w elif re_stop.match(w): if last_word: bg = last_word + ' <EOS>' bi_grams[bg] = bi_grams.get(bg, 0) + 1 last_word = '<BOS>' if last_word: bg = last_word + ' <EOS>' bi_grams[bg] = bi_grams.get(bg, 0) + 1 print("字典大小:%d" % len(words)) print("最长词长度:%d %s" % (max_word_length,m_w)) with open(os.path.join(output_path, DICT_NAME), 'w', encoding='utf8') as f: for k in sorted(words): f.write("%s %d\n" % (k, words[k])) print("2-gram 数量:%d" % len(bi_grams)) with open(os.path.join(output_path, BI_GRAM_FILE), 'w', encoding='utf8') as f: for k in sorted(bi_grams): f.write("%s %d\n" % (k, bi_grams[k])) # 估计HMM模型的概率 def process_hmm(input_file, output_path): line_count = 0 state_list = ['B', 'M', 'E', 'S'] A = {} B = {} Pi = {} State_Count = {} for s in state_list: A[s] = {t: 0. for t in state_list} # 转移概率 B[s] = {} # 观测概率 Pi[s] = 0. # 初始概率 State_Count[s] = 0 print('开始估计HMM概率...') with open(input_file, 'r', encoding='utf8') as f: for line in f: line_count += 1 line = line.strip() if not line: continue word_list = line.split() chars = ''.join(word_list) states = [] for w in word_list: if len(w) == 1: states.append('S') else: states += ['B']+['M']*(len(w)-2)+['E'] assert len(chars) == len(states) i = 0 for s in states: State_Count[s] += 1 if i == 0: Pi[s] += 1. else: A[states[i-1]][s] += 1. B[s][chars[i]] = B[s].get(chars[i], 0) + 1. i += 1 A = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in A.items()} B = {k: {tk: log(max(tv/State_Count[k], SMALL_PROB)) for tk, tv in v.items()} for k, v in B.items()} Pi = {k: log(max(v/line_count, SMALL_PROB)) for k, v in Pi.items()} with open(os.path.join(output_path, HMM_PROB), 'wb') as f: pickle.dump(A, f) pickle.dump(B, f) pickle.dump(Pi, f) if __name__ == '__main__': argv = sys.argv[1:] if len(argv) < 2: print('corpus_process.py <corpus_file> <out_dir>') sys.exit() else: process(argv[0], argv[1]) process_hmm(argv[0], argv[1]) print("处理完成")
32.955752
108
0.475295
535
3,724
3.145794
0.231776
0.041592
0.030897
0.035651
0.341652
0.341652
0.317885
0.267974
0.229947
0.196673
0
0.021583
0.365467
3,724
112
109
33.25
0.690647
0.028733
0
0.20202
0
0
0.067684
0.013315
0
0
0
0
0.010101
1
0.020202
false
0
0.050505
0
0.070707
0.060606
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
744953a33d13dce6a8e313b4de8e7bbc4605a9b1
1,954
py
Python
main.py
tvirus-01/scraping-seekingalpha
0dedd14195a9f4f354475a3cbe1728325cb45b33
[ "MIT" ]
1
2021-05-18T16:27:06.000Z
2021-05-18T16:27:06.000Z
main.py
tvirus-01/scraping-seekingalpha
0dedd14195a9f4f354475a3cbe1728325cb45b33
[ "MIT" ]
null
null
null
main.py
tvirus-01/scraping-seekingalpha
0dedd14195a9f4f354475a3cbe1728325cb45b33
[ "MIT" ]
null
null
null
from selenium import webdriver from selenium.webdriver.chrome.options import Options from bs4 import BeautifulSoup import requests import time import os import csv root_url = "https://seekingalpha.com" query = "stock repurchase program" url = "https://seekingalpha.com/search?q="+query.replcae(" ", "+") chrome_driver_path = "/usr/lib/chromium-browser/chromedriver" #add your own driver path opts = Options() opts.add_argument("--headless") opts.add_argument("--no-sandbox") opts.add_argument("user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36") driver = webdriver.Chrome(chrome_driver_path, options=opts) driver.get(url) time.sleep(5) soup = BeautifulSoup(driver.page_source, 'lxml') result_list = soup.find("div", {"id":"result_list"}) result_page = result_list.find("div", {"class":"result-pages"}) fields = ['Title', 'Link', 'MetaData', 'Summary'] csv_rows = [] for a in result_page.find_all("a"): link = a['href'] new_url = url+link driver.get(new_url) time.sleep(5) new_soup = BeautifulSoup(driver.page_source, 'lxml') new_result_list = new_soup.find("div", {"id":"result_list"}) items = new_result_list.find_all("li") for item in items: item_link = item.find("div", {"class":"item-link"}) item_link_a = item_link.find("a") item_meta = item.find("div", {"class":"item-metadata"}) item_summary = item.find("div", {"class":"item-summary"}) name = item_link_a.text.replace(" ", "").replace("\n", "") link = root_url+item_link_a['href'] metadata = item_meta.text.replace(" ", "") summary = item_summary.text csv_rows.append([str(name), str(link), str(metadata), str(summary)]) with open("SeekingAlpha.csv", 'w') as csvfile: csvwriter = csv.writer(csvfile) csvwriter.writerow(fields) csvwriter.writerows(csv_rows) print("Done")
30.53125
153
0.680655
274
1,954
4.693431
0.390511
0.046656
0.037325
0.037325
0.139969
0.093313
0
0
0
0
0
0.01816
0.154555
1,954
64
154
30.53125
0.760291
0.012283
0
0.043478
0
0.021739
0.236269
0.031088
0
0
0
0
0
1
0
false
0
0.152174
0
0.152174
0.021739
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
744e61002cde51c152c53abdd50110f3f6f68a3b
6,071
py
Python
falx/utils/generate_random_traces.py
Mestway/falx
fcae5e536f44c19780ae9f00b94e397723924757
[ "BSD-2-Clause" ]
10
2021-01-29T19:08:55.000Z
2022-03-03T05:08:53.000Z
falx/utils/generate_random_traces.py
Mestway/falx
fcae5e536f44c19780ae9f00b94e397723924757
[ "BSD-2-Clause" ]
null
null
null
falx/utils/generate_random_traces.py
Mestway/falx
fcae5e536f44c19780ae9f00b94e397723924757
[ "BSD-2-Clause" ]
2
2020-06-28T21:52:12.000Z
2020-09-04T07:18:08.000Z
import eval_utils import json from falx.eval_interface import FalxEvalInterface from falx.utils import table_utils from timeit import default_timer as timer import numpy as np from pprint import pprint from falx.visualization.chart import VisDesign, LayeredChart from falx.visualization.matplotlib_chart import MatplotlibChart import falx.visualization.visual_trace as visual_trace np.random.seed(2019) def get_mark_type(chart): chart_obj = chart.to_vl_obj() marks = [chart_obj['mark']] if "mark" in chart_obj else [layer["mark"] for layer in chart_obj["layer"]] marks = [m if isinstance(m, (str,)) else m["type"] for m in marks] return marks def process_data(bid, num_samples_dict): f_in = f"../../benchmarks/{bid}.json" for k in [2, 3, 4]: if bid in num_samples_dict[k]: break num_samples = k with open(f_in, "r") as f: data = json.load(f) #print(data) input_data = data["input_data"] extra_consts = data["constants"] if "constants" in data else [] vis = VisDesign.load_from_vegalite(data["vl_spec"], data["output_data"]) full_trace = vis.eval() partitioned = visual_trace.partition_trace(full_trace) sample_trace = [] raw_sample_trace = [] raw_full_trace = [] for key in partitioned: ty = "bar" if key in ["BarV","BarH"] else ("line" if key == "Line" else ("point" if key == "Point" else "area")) traces = partitioned[key] num_samples = int(np.ceil(num_samples / 2.0)) if ty == "line" or ty == "area" else num_samples indexes = np.random.choice(list(range(len(traces))), num_samples) samples = [traces[i] for i in indexes] tr_table = visual_trace.trace_to_table(samples) full_tr_table = visual_trace.trace_to_table(traces) for tr in full_tr_table[key]: raw_full_trace.append({"type": ty, "props": tr}) for tr in tr_table[key]: raw_sample_trace.append({"type": ty, "props": tr}) if ty == "line": kreplace = lambda x: "x" if x in ["x1", "x2"] else "y" if x in ["y1", "y2"] else x sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "y1", "size", "color", "column"] if k in tr}}) sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "y2", "size", "color", "column"] if k in tr}}) elif ty == "bar": kreplace = lambda x: "x" if x in ["x1"] else "y" if x in ["y1"] else x sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in tr}}) elif ty == "point": sample_trace.append({"type": ty, "props": tr}) elif ty == "area": kreplace = lambda x: "x" if x in ["x1", "x2"] else ("y2" if x in ["yb1", "yb2"] else "y" if x in ["yt1", "yt2"] else x) sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x1", "yt1", "yb1", "color", "column"] if k in tr}}) sample_trace.append({"type": ty, "props": {kreplace(k):tr[k] for k in ["x2", "yt2", "yb2", "color", "column"] if k in tr}}) data["sample_trace"] = sample_trace data["raw_sample_trace"] = raw_sample_trace data["raw_full_trace"] = raw_full_trace return data if __name__ == '__main__': benchmark_ids = [ "test_1", "test_2", "test_3", "test_4", "test_5", "test_6", "test_7", "test_8", "test_9", "test_10", "test_11", "test_12", "test_13", "test_14", "test_15", "test_16", "test_17", "test_18", "test_19", "test_20", "test_21", "test_22", "test_23", "001", "002", "003", "004", "005", "006", "007", "008", "009", "010", "011", "012", "013", "014", "015", "016", "017", "018", "019", "020", "021", "022", "023", "024", "025", "026", "027", "028", "029", "030", "031", "032", "033", "034", "035", "036", "037", "038", "039", "040", "041", "042", "043", "044", "045", "046", "047", "048", "049", "050", "051", "052", "053", "054", "055", "056", "057", "058", "059", "060", ] num_samples_dict = { 1: ['test_21', '050', '025', '058', '001', '011', 'test_7', '042', '032', '012', 'test_15', 'test_10', '023', 'test_1', '052', 'test_6', '035', '010', '006', '054', '051', 'test_14', '056', '024', '017', '053', '020', '033', '031', 'test_8', '047', '030', '029', 'test_2', 'test_11', 'test_13'], 2: ['test_21', '025', '050', '001', '011', '058', '012', '032', 'test_7', 'test_10', '010', '017', '023', '042', '052', 'test_15', '035', 'test_6', 'test_1', '006', '054', '051', 'test_14', '024', '053', '056', '009', '020', '033', 'test_8', '031', '047', '030', 'test_16', '029', 'test_2', '034', 'test_13', '014', '037', 'test_12', 'test_11', 'test_23'], 3: ['test_21', '015', '050', '001', '025', '011', '058', '012', '006', '032', 'test_7', '010', 'test_10', '017', '023', 'test_1', '042', 'test_6', 'test_15', '052', '035', '005', '045', '054', '051', 'test_14', '007', '038', '041', '022', '024', '053', '056', '020', '009', '033', 'test_22', '004', 'test_8', 'test_2', '031', 'test_13', '047', 'test_16', '029', '030', '034', '014', '037', 'test_12', 'test_11', 'test_23', '044'], 4: ['058', '015', 'test_21', '050', '006', '011', '032', 'test_7', '010', 'test_10', '012', '017', '025', 'test_14', '023', '042', 'test_15', 'test_1', '052', 'test_6', '005', '035', '045', '001', '051', '038', '007', '041', '022', '054', '016', '024', '056', '053', '009', '020', 'test_17', '033', '021', '008', '044', '031', '030', '047', 'test_16', 'test_22', '004', '029', 'test_13', '034', 'test_2', 'test_8', '014', 'test_11', 'test_12', '037', 'test_23'] } #benchmark_ids = ["test_4"] full_data = [] for i, bid in enumerate(benchmark_ids): data = process_data(bid, num_samples_dict) full_data.append(data) print(json.dumps(full_data))
54.205357
469
0.541591
889
6,071
3.502812
0.224972
0.045922
0.038536
0.043674
0.271676
0.229929
0.181118
0.134233
0.111432
0.111432
0
0.159871
0.232416
6,071
111
470
54.693694
0.508369
0.006095
0
0
0
0
0.24685
0.004476
0
0
0
0
0
1
0.023529
false
0
0.117647
0
0.164706
0.023529
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
745481823c68affcc1265816e955ae79a8aa4353
1,180
py
Python
Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
Python Advanced/3. Multidimensional Lists/Exercise/08. Bombs.py
a-shiro/SoftUni-Courses
7d0ca6401017a28b5ff7e7fa3e5df8bba8ddbe77
[ "MIT" ]
null
null
null
from collections import deque def read_matrix(dimensions): mtrx = [] for _ in range(dimensions): col = [int(n) for n in input().split(' ')] mtrx.append(col) return mtrx def get_cells(matrix): cells_alive = 0 cells_sum = 0 for r in matrix: for c in r: if c > 0: cells_alive += 1 cells_sum += c return cells_alive, cells_sum dimensions = int(input()) matrix = read_matrix(dimensions) bombs = deque([[int(y) for y in x.split(",")] for x in input().split(' ')]) for b in range(len(bombs)): bomb_row, bomb_col = bombs.popleft() damage = matrix[bomb_row][bomb_col] if matrix[bomb_row][bomb_col] > 0: for row in range(3): for col in range(3): if 0 <= bomb_row - 1 + row < len(matrix) and 0 <= bomb_col - 1 + col < len(matrix): if matrix[bomb_row - 1 + row][bomb_col - 1 + col] > 0: matrix[bomb_row - 1 + row][bomb_col - 1 + col] -= damage alive, sum = get_cells(matrix) print(f'Alive cells: {alive}') print(f'Sum: {sum}') for el in matrix: print(' '.join(str(x) for x in el))
22.264151
99
0.555085
177
1,180
3.570621
0.248588
0.066456
0.079114
0.066456
0.151899
0.088608
0.088608
0.088608
0.088608
0
0
0.01968
0.311017
1,180
52
100
22.692308
0.757688
0
0
0
0
0
0.028814
0
0
0
0
0
0
1
0.060606
false
0
0.030303
0
0.151515
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74549295644b9ab856d42ba18aad581d90fa00df
2,548
py
Python
section2/src/inference/UNetInferenceAgent.py
taruntalreja08/Alzheimer-progression
e656ecdabe0d14fe7365669113dae13c92252f5d
[ "MIT" ]
null
null
null
section2/src/inference/UNetInferenceAgent.py
taruntalreja08/Alzheimer-progression
e656ecdabe0d14fe7365669113dae13c92252f5d
[ "MIT" ]
1
2020-06-07T15:04:01.000Z
2020-06-07T15:04:01.000Z
section2/src/inference/UNetInferenceAgent.py
taruntalreja08/Alzheimer-progression
e656ecdabe0d14fe7365669113dae13c92252f5d
[ "MIT" ]
null
null
null
""" Contains class that runs inferencing """ import torch import numpy as np from networks.RecursiveUNet import UNet from utils.utils import med_reshape class UNetInferenceAgent: """ Stores model and parameters and some methods to handle inferencing """ def __init__(self, parameter_file_path='', model=None, device="cpu", patch_size=64): self.model = model self.patch_size = patch_size self.device = device if model is None: self.model = UNet(num_classes=3) if parameter_file_path: self.model.load_state_dict(torch.load(parameter_file_path, map_location=self.device)) self.model.to(device) def single_volume_inference_unpadded(self, volume): """ Runs inference on a single volume of arbitrary patch size, padding it to the conformant size first Arguments: volume {Numpy array} -- 3D array representing the volume Returns: 3D NumPy array with prediction mask """ raise NotImplementedError def single_volume_inference(self, volume): """ Runs inference on a single volume of conformant patch size Arguments: volume {Numpy array} -- 3D array representing the volume Returns: 3D NumPy array with prediction mask """ self.model.eval() # Assuming volume is a numpy array of shape [X,Y,Z] and we need to slice X axis slices = [] # TASK: Write code that will create mask for each slice across the X (0th) dimension. After # that, put all slices into a 3D Numpy array. You can verify if your method is # correct by running it on one of the volumes in your training set and comparing # with the label in 3D Slicer. # <YOUR CODE HERE> slices.append(volume[0:1]) arr = np.zeros(volume.shape, dtype=np.float32) for idx, label in enumerate(slices): if label is not np.nan: label = label.split(" ") mask = np.zeros(volume.shape[1] * volume.shape[2], dtype=np.uint8) posit = map(int, label[0::2]) leng = map(int, label[1::2]) for p, l in zip(posit, leng): mask[p:(p+l)] = 1 arr[:, :, idx] = mask.reshape(volume.shape[1], volume.shape[1], order='F') slices = slices.asarray() slices.reshape((-1, slices.shape[0], slices.shape[1])) return slices
31.45679
100
0.600863
332
2,548
4.542169
0.412651
0.039788
0.03382
0.03183
0.202918
0.172414
0.172414
0.172414
0.172414
0.119363
0
0.015393
0.311617
2,548
80
101
31.85
0.844356
0.345761
0
0
0
0
0.003287
0
0
0
0
0.0125
0
1
0.090909
false
0
0.121212
0
0.272727
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
7454dbd88f5a6b8a189be8a7fbf19150aa71b9b0
2,666
py
Python
tests/trans_sec/controller/http_server_flask_tests.py
cablelabs/transparent-security
28d40c2f0c64e5c6ffde173ba17be235ce3f6f57
[ "Apache-2.0" ]
22
2020-01-09T00:19:53.000Z
2022-01-19T18:43:12.000Z
tests/trans_sec/controller/http_server_flask_tests.py
cablelabs/transparent-security
28d40c2f0c64e5c6ffde173ba17be235ce3f6f57
[ "Apache-2.0" ]
334
2020-01-06T19:49:45.000Z
2021-08-18T16:44:44.000Z
tests/trans_sec/controller/http_server_flask_tests.py
cablelabs/transparent-security
28d40c2f0c64e5c6ffde173ba17be235ce3f6f57
[ "Apache-2.0" ]
4
2020-06-04T20:44:47.000Z
2021-09-09T18:50:41.000Z
# Copyright (c) 2019 Cable Television Laboratories, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Unit tests for http_session.py import time import requests import unittest import logging from trans_sec.controller.http_server_flask import SDNControllerServer logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger('http_server_flask_tests') class HttpSessionTests(unittest.TestCase): """ Unit tests for utility functions in convert.py """ def setUp(self): """ Start HTTP server :return: """ self.http_server = SDNControllerServer(TestSDNController()) self.http_server.start() # TODO - sleeping to wait for the server to start. Look at the # http_server class to see if the start() call can bock time.sleep(1) def tearDown(self): self.http_server.stop() def test_agg_attack_url_params(self): # Test attack with params param_attack = { 'src_mac': '00:00:00:00:00', 'dst_ip': '10.1.0.1', 'dst_port': '1234', } ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack', params=param_attack) self.assertEquals(201, ret_val.status_code) json_attack = {'event': param_attack} ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack', json=json_attack) self.assertEquals(201, ret_val.status_code) ret_val = requests.post(url='http://127.0.0.1:9998/aggAttack', json=param_attack) self.assertEquals(201, ret_val.status_code) class TestSDNController: def __init__(self): pass @staticmethod def add_attacker(body): logging.info('Adding an attacker - [%s]', body) @staticmethod def remove_attacker(body): logging.info('Removing an attacker - [%s]', body) @staticmethod def add_agg_attacker(body): logging.info('Adding an attacker - [%s]', body) @staticmethod def remove_agg_attacker(body): logging.info('Removing an attacker - [%s]', body)
29.955056
74
0.650038
345
2,666
4.898551
0.428986
0.04142
0.04497
0.054438
0.305325
0.301775
0.292899
0.292899
0.268639
0.159763
0
0.0335
0.249812
2,666
88
75
30.295455
0.8115
0.307952
0
0.311111
0
0
0.152552
0.0129
0
0
0
0.011364
0.066667
1
0.177778
false
0.022222
0.111111
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
7455ff3dcda7a5add726f7de0d09df9484ff450a
764
py
Python
ccgpack/curvelet.py
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
3
2019-04-15T08:40:32.000Z
2019-04-18T22:06:29.000Z
ccgpack/curvelet.py
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
null
null
null
ccgpack/curvelet.py
vafaei-ar/ccgpack
73cf1d644f747fd2251ed9256cae740e6b052da7
[ "BSD-3-Clause" ]
2
2019-04-15T08:41:35.000Z
2021-10-02T08:24:22.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function import ctypes import numpy as np import os path = '/'.join(os.path.abspath(__file__).split('/')[:-1]) fftw_path = 'cpp_src/fftw-2.1.5/fftw/.libs/libfftw.so.2' crv_path = 'cpp_src/curvelet.so' ctypes.cdll.LoadLibrary(os.path.join(path,fftw_path)) curlib = ctypes.cdll.LoadLibrary(os.path.join(path,crv_path)) def curvelet(m,r_scale,n_scales=7,n_wedges=10,ac=1): assert r_scale<=n_scales,'Incompatible scale request.' m = np.array(m, dtype=np.double) nx = m.shape[0] ny = m.shape[1] aptr = m.ctypes.data_as(ctypes.POINTER(ctypes.POINTER(ctypes.c_double))) curlib.curvelet(aptr,nx,ny,n_scales,r_scale-1,n_wedges,ac) return m
30.56
76
0.739529
131
764
4.053435
0.427481
0.045198
0.090395
0.086629
0.131827
0.131827
0.131827
0
0
0
0
0.017751
0.115183
764
24
77
31.833333
0.767751
0
0
0
0
0
0.117955
0.055046
0
0
0
0
0.052632
1
0.052632
false
0
0.315789
0
0.421053
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
74567795a187febc4b7818b1e7dc40331bc56c8b
296
py
Python
django_openapi/utils.py
tokikanno/openapi-router
884463d43eb472dce3d5fd835390f676f8a672f5
[ "MIT" ]
19
2020-08-03T06:51:54.000Z
2022-03-15T14:45:10.000Z
django_openapi/utils.py
tokikanno/openapi-router
884463d43eb472dce3d5fd835390f676f8a672f5
[ "MIT" ]
3
2021-01-13T10:01:07.000Z
2021-04-29T02:17:41.000Z
django_openapi/utils.py
tokikanno/openapi-router
884463d43eb472dce3d5fd835390f676f8a672f5
[ "MIT" ]
4
2020-12-20T14:50:29.000Z
2021-12-14T21:17:46.000Z
# -*- coding:utf-8 -*- from __future__ import division from __future__ import print_function from __future__ import unicode_literals from django.http import JsonResponse def json_response(data, status_code=200): resp = JsonResponse(data) resp.status_code = status_code return resp
22.769231
41
0.777027
39
296
5.435897
0.589744
0.141509
0.226415
0
0
0
0
0
0
0
0
0.016
0.155405
296
12
42
24.666667
0.832
0.067568
0
0
0
0
0
0
0
0
0
0
0
1
0.125
false
0
0.5
0
0.75
0.125
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
3
74572a2aa855eb547d5395986ea7bf0a29902db0
3,058
py
Python
David S/Textswapping/texteditor.py
daaronr/dr_text_md_tools
7d74a95c9f3cfd0f66d46f9bee23878e8e783da8
[ "BSD-2-Clause" ]
null
null
null
David S/Textswapping/texteditor.py
daaronr/dr_text_md_tools
7d74a95c9f3cfd0f66d46f9bee23878e8e783da8
[ "BSD-2-Clause" ]
null
null
null
David S/Textswapping/texteditor.py
daaronr/dr_text_md_tools
7d74a95c9f3cfd0f66d46f9bee23878e8e783da8
[ "BSD-2-Clause" ]
null
null
null
#texteditor.py #Description: uses csv-format file 'dictionary' to make a series of replacements in 'oldfile' and create 'newfile' #Execute as "python texteditor.py oldfile dictionary" #Creates 'newfile' #see also README.md import os #tells python to use the module 'os' import sys #tells python to use the module 'sys' used to do one line command in the terminal #changes to a particular directory #Todo: edit this so the program can be called anywhere, look for files in the given directory or based on input arglist = sys.argv #creates a list which stores the variables which are input directly in the one-line command in the terminal oldfile = str(arglist[1]) newfile = str(arglist[2]) dic = str(arglist[3]) colonne = int(arglist[4]) chosenDirectory = str(arglist[5]) #takes as variable the elements of list 'arglist' #Todo: default 'newfile=oldfile' if no third argument os.chdir(chosenDirectory) #Run this with parameters in commandline ... python texteditor1.py oldfilename newfilename substitutionlistfilename columnNumberinCSV directoryofNewfile(+ other parameter perhaps) mydictionary = open(dic,'rb') data = csv.reader(mydictionnary) d={} # creates a dictionary called d colonne = colonne - 1 for line in mydictionary: x=line.split(',') a = x[0] b = x[colonne] for p in b: b = b.replace('+' ,',') #no presentation errors eg. new line or extra spaces #DR: Later on we may want to allow quoted strings containing characters like newline #DR: I'm not sure if we want this 'cleanup' -- is it necessary? #DR: Also, we need a way to allow *intended* commas in the dictionary file itself; perhaps "\," suffices d[a]=b # x is a string which contains the whole line # x[0] is the first word of the line x[1] is the second # d[a]=b appends in the dictionary (d) the old and new word in the form d{'oldword' = newword} mydictionary.close() thefile = open(oldfile,'r') #opens oldfile, read mode content = thefile.read() #copies everything in the file and assigns it to the string variable 'content' for k, v in d.items(): content = content.replace(k ,v) #makes requested substitutions using the dictionary d #DR: How does this command work? Also, why did you choose the letters 'k' and 'v' #to change the existing file this is the code : #DR: For future work, let's allow a parameter to the command that specifies whether it edits in place or creates a new file; #... or even better, have it edit in place if the new and old filenames and locations are identical ''' thefile.close() #closes the file to allow edits thefile = open(file1,'w') #opens the file in the writing mode thefile.truncate() #clears all text from thefile thefile.write(content) #puts in the text from the string variable 'content' thefile.close() #need to close the file for some reason ''' #to create a new file this is the code: mynewfile = open(newfile,'w+') #mynewfile = open("newfile.txt",'w+') #DR: can we have this file named based on the third input argument? mynewfile.write(content) mynewfile.close()
35.149425
179
0.733813
493
3,058
4.551724
0.434077
0.022282
0.011586
0.01426
0.061497
0.046346
0
0
0
0
0
0.004374
0.177567
3,058
87
180
35.149425
0.887873
0.658927
0
0
0
0
0.011189
0
0
0
0
0.011494
0
1
0
false
0
0.071429
0
0.071429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
745999d132ff9aeafd110c46482e9bd04fc9b1a8
9,832
py
Python
server/app.py
meowterspace/mission_control
c98ff78c9d1e6ad9c70e60f83b8fb2fa6af32887
[ "MIT" ]
null
null
null
server/app.py
meowterspace/mission_control
c98ff78c9d1e6ad9c70e60f83b8fb2fa6af32887
[ "MIT" ]
4
2017-10-31T17:26:11.000Z
2018-01-12T22:11:03.000Z
server/app.py
meowterspace/mission_control
c98ff78c9d1e6ad9c70e60f83b8fb2fa6af32887
[ "MIT" ]
null
null
null
# Import all external libraries import json import datetime import time import os import io import errno from flask import Flask, render_template, send_file, request, session, redirect, render_template_string from flask_socketio import SocketIO, emit, send from threading import Thread, Lock import resources import sys print(sys.argv) # This is a dictionary of all the settings Flask has. Since I have a complex setup it is best to have the # config ready at all times so I can quickly change server settings. This dictionary is a default dictionary # and will be overwritten when the server starts with the settings specified in CONFIX.txt CONFIG = { 'JSON_AS_ASCII': True, 'USE_X_SENDFILE': False, 'SESSION_COOKIE_PATH': None, 'SESSION_COOKIE_DOMAIN': None, 'SESSION_COOKIE_NAME': 'session', 'DEBUG': False, 'LOGGER_HANDLER_POLICY': 'always', 'LOGGER_NAME': None, 'SESSION_COOKIE_SECURE': False, 'SECRET_KEY': None, 'EXPLAIN_TEMPLATE_LOADING': False, 'MAX_CONTENT_LENGTH': None, 'PROPAGATE_EXCEPTIONS': None, 'APPLICATION_ROOT': None, 'SERVER_NAME': None, 'PREFERRED_URL_SCHEME': 'http', 'JSONIFY_PRETTYPRINT_REGULAR': True, 'TESTING': False, 'PERMANENT_SESSION_LIFETIME': datetime.timedelta(31), 'TEMPLATES_AUTO_RELOAD': None, 'TRAP_BAD_REQUEST_ERRORS': False, 'JSON_SORT_KEYS': True, 'JSONIFY_MIMETYPE': 'application/json', 'SESSION_COOKIE_HTTPONLY': True, 'SEND_FILE_MAX_AGE_DEFAULT': datetime.timedelta(0, 43200), 'PRESERVE_CONTEXT_ON_EXCEPTION': None, 'SESSION_REFRESH_EACH_REQUEST': True, 'TRAP_HTTP_EXCEPTIONS': False } # This library contains game data, such as the name, and whether or not the game is active. A lot of this isn't # currently implimented however it outlines a plan of future development. Some elements like USER_LIST and # ACTIVE are used throughout the program. GAME = { 'GAME_NAME': None, 'DESCRIPTION': None, 'DIFFICULTY': 0, 'ONLINE_MODE': True, 'WHITE_LIST': False, 'BLACK_LIST': False, 'CHEATS': False, 'MAX_GAME_LENGTH': None, 'BANNED_USERNAMES': False, 'SCINARIO': False, 'ACTIVE': False, 'USER_LIST': [] } users = [] # A list of all the current users in the game. # This defines the user class. class User: def __init__(self, name): self.name = name #The code below is responsible for loading in the server settings from CONFIG.txt if os.path.exists('CONFIG.txt') == True: # checks if CONFIG.txt exists yet with open('CONFIG.txt', 'r') as f: # if it does exist, open the file for line in f.readlines(): line_split = line.strip().split("=") # For every line, split the left and right half at the = sign CONFIG[line_split[0].strip()] = line_split[1].strip() # Update CONFIG dictionary f.close() # close CONFIG.txt else: # if it doesn't exit with open('CONFIG.txt', 'w') as f: # Open CONFIG.txt (generates file) for key in CONFIG: f.write(key+' = '+str(CONFIG[key])+'\n') # write the default config to the file f.close() # Close new file # Flask server setup app = Flask(__name__, template_folder='../client') # Defines the Flask Server. The template_folder is set to # ../client (the client folder) which means the browser has # no access to the server at all as it is in a different # directory. app.config['SECRET_KEY'] = 'secret' # Sets the server encryption key app.config['DEBUG'] = CONFIG['DEBUG'] # Sets the server settings to equal CONFIG for i in app.config: print(i, app.config[i]) # prints the new server CONFIG to the terminal socketio = SocketIO(app) # Defines SOCKETIO app # Below is a meta data dictionary that is to updated and sent with each websocket message # to the client so the message can be verified for debugging / tracking purposes, or # can be used to measure information loss in the websocket stream meta = { 'time' : '', #current Time 'zone' : 'GMT', #timezone 'serv' : '', #time the server's been active 'uuid' : '' #unique message id } start = datetime.datetime.now() # This represents the time the server started #================== RUN ================================= player = resources.setup() # Calls the setup function in resources that returns a player object #================== APP ROUTES - FLASK ================================= # This is the index route. If a user navigates to {ip}:{port}/ then this route will be run. # It is set to render the index.html page when the GET request is recieved. @app.route('/') def index(): return render_template('index.html') # This is the login route. When a user submits their name in /index.html, before joining # it is directed to /login. This route takes the data POSTed to it and assigns is to the # client specific session variable. The session is now indentifiable with that username. @app.route('/login', methods=['POST']) def login(): if request.method == "POST": session['username'] = request.form['username'] username = session['username'] exec(str(username)+"=User('"+str(username)+"')") # this creates a new user class in the name of # the user POSTed to the server. print(session['username']) exec("print("+str(username)+".name)") GAME['USER_LIST'].append(username) # Adds the name to the list of current users print(session) return redirect('/lobby') # redirects the client's page to /lobby return redirect(url_for('/')) # If nothing is posted to /login, send back to the index. @app.route('/lobby') def lobby(): if session['username'] != None: # If the user has logged in, this will render the lobby.html # webpage. return render_template('lobby.html') # This route is required if an image is requested from the server. Since the server can't send # images or files over HTTP normally it has to encode it into byte data first and send it as a # string where the browser will automatically decode it at the other end because it knows the # mimetype to be image. @app.route('/resources/img/<path:path>') def img_route(path): ext = os.path.splitext(path[-1].lower()) if ext == '.jpg' or '.png' or '.gif': with open('../client/resources/img/'+str(path), 'rb') as bites: return send_file( io.BytesIO(bites.read()), mimetype='image/'+str(ext) ) # This route is for anything else that hasn't been listed above. E.g Javascript/css files in # the client folder. It takes the path and if the path exists, it will return what ever is # at that path location within /client folder. If there is nothing there it returns 404 @app.route('/<path:path>') def route(path): if os.path.exists('../client/'+str(path)) == True: return render_template('/'+str(path)) else: return "ERROR 404: "+str(path)+" doesn't exist" #================== APP ROUTES - SOCKETIO ============================== lock = Lock() # defines multithreading lock # This socketIO decorator defines what happens when a websocket message is recieved on the # open websocket channel ('/') @socketio.on('message') def handle_message(message): meta['time'] = str(datetime.datetime.now()) # updates the meta time meta['serv'] = str(datetime.datetime.now()-start) # updates the meta server time meta['uuid'] = str(uuid.uuid4()) # gives the meta a Unique ID to_send = {} to_send.update(meta) # The below packages the meta data with the data from the server so it can all be # sent in one message rather than many different confusing messages that could # get lost or fall out of time. The multithreading Lock is needed because the # calculation thread is running seperate and both threads are unable to access # the same information at the same time, so the calculation thread is very # quickly locked while the latest data is pulled and then unlocked. with lock: to_send.update(resources.data) send(to_send) # send data via websockets to whoever sent the origional message # This socketIO decorator defines what happens when a websocket message is recieved on the # /update namespace channel. This channel is used for user input, so it runs the update # function in resources.py with the message (new updated data) as the parameter # so the data can be updated. Again this requires a Thread Lock. @socketio.on('message', namespace='/update') def handle_incoming_data(message): print('Incoming Data: '+str(message)) with lock: resources.update(player, message) print(resources.data) print('Data updated') # This socketIo decorator defines what happens when a websocket message is recieved # on the /lobbu channel (from lobby.html). All it needs to do is simply send # the game information back to the client that requested it @socketio.on('message', namespace='/lobbu') def handle_lobby_message(message): send(GAME) #================== THREADS ============================== # This defines the compute thread. This runs seperately from everything else # in this script so that it can run in real time and wont be interruped by any # other processes. def compute(time): while True: for i in resources.OBJECTS: # This is for gravity purposes. It runs the calculation against if (i[1] == 'planet'): resources.run(i[0], player, time) # every object in the game (bar the player) global resources.data time.sleep(time) # waits interval before running calculations again. if __name__ == '__main__': compute_thread = Thread(target=compute, args=(0.1)) # Here the thread is actually set up with the parameter 0.1 second delay compute_thread.start() # Starts the thread socketio.run(app) # Starts the server app
40.79668
126
0.68572
1,424
9,832
4.660815
0.291433
0.016272
0.007684
0.012656
0.033901
0.033901
0.033901
0.033901
0.033901
0.033901
0
0.003169
0.197518
9,832
240
127
40.966667
0.838023
0.508849
0
0.040268
0
0
0.230217
0.071534
0
0
0
0
0
0
null
null
0
0.073826
null
null
0.053691
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
745b09ab868c2f65731e7156a385d464a997846f
5,676
py
Python
ndd/counts.py
simomarsili/ndd
3a8f8f80116ddaf8666dd13b246a04c9806447a7
[ "BSD-3-Clause" ]
34
2017-01-25T21:42:07.000Z
2022-03-05T02:12:11.000Z
ndd/counts.py
simomarsili/ndd
3a8f8f80116ddaf8666dd13b246a04c9806447a7
[ "BSD-3-Clause" ]
4
2018-06-22T19:15:33.000Z
2020-05-06T12:37:24.000Z
ndd/counts.py
simomarsili/ndd
3a8f8f80116ddaf8666dd13b246a04c9806447a7
[ "BSD-3-Clause" ]
2
2019-07-31T07:53:02.000Z
2019-07-31T07:53:22.000Z
# -*- coding: utf-8 -*- """CountsDistribution class.""" import json import logging from collections.abc import Mapping, MappingView from types import GeneratorType import numpy import ndd.fnsb from ndd.exceptions import NddError logger = logging.getLogger(__name__) def unique(nk, sort=True): """Return nk, zk""" counter = ndd.fnsb.counter counter.fit(nk) nk = counter.nk zk = counter.zk unique.counter = counter # always return a copy if sort: ids = numpy.argsort(nk) nk = nk[ids] zk = zk[ids] else: nk = numpy.array(nk) zk = numpy.array(zk) return nk, zk def as_counts_array(counts): """Convert input to counts array.""" if isinstance(counts, (Mapping, MappingView)): return numpy.fromiter(counts.values(), dtype=int) if isinstance(counts, (GeneratorType, map, filter)): return numpy.fromiter(counts, dtype=int) return numpy.asarray(counts) def check_k(k): """ if k is an integer, just check if an array set k = prod(k) if None, return Raises ------ NddError If k is not valid (wrong type, negative, too large...) """ MAX_LOGK = 200 * numpy.log(2) if k is None: return k try: k = numpy.float64(k) except ValueError: raise NddError('%r is not a valid cardinality' % k) if k.ndim: # if k is a sequence, set k = prod(k) if k.ndim > 1: raise NddError('k must be a scalar or 1D array') logk = numpy.sum(numpy.log(x) for x in k) if logk > MAX_LOGK: # too large a number; backoff to n_bins? # TODO: log warning raise NddError('k is too large (%e).' 'Must be < 2^200 ' % numpy.exp(logk)) k = numpy.prod(k) else: # if a scalar check size if k <= 0: raise NddError('k must be > 0 (%r)' % k) if numpy.log(k) > MAX_LOGK: raise NddError('k is too large (%e).' 'Must be < 2^200 ' % k) if not k.is_integer(): raise NddError('k must be a whole number (got %r).' % k) return k class CountsDistribution: """ Contains counts data and statistics. Parameters ---------- nk : array-like Unique frequencies in a counts array. zk : array_like, optional Frequencies distribution or "multiplicities". Must be len(zk) == len(nk). k : int or array-like, optional Alphabet size (the number of bins with non-zero probability). Must be >= len(nk). A float is a valid input for whole numbers (e.g. k=1.e3). If an array, set k = numpy.prod(k). Default: k = sum(nk > 0) """ def __init__(self, *, nk=None, zk=None, k=None): self.nk = None self.k = None self.zk = None self._n = None self._k1 = None self.counts = None if (nk is None) != (zk is None): raise NddError('nk and zk should be passed together.') if nk is not None: self.nk = as_counts_array(nk) self.zk = as_counts_array(zk) self._n = numpy.sum(self.zk * self.nk) self._k1 = numpy.sum(self.zk[self.nk > 0]) if k is not None: self.k = check_k(k) def __repr__(self): return 'CountsDistribution(nk=%r, k=%r, zk=%r)' % (self.nk, self.k, self.zk) def __str__(self): return json.dumps( { 'nk': [int(x) for x in self.nk], 'k': self.k, 'zk': [int(x) for x in self.zk] }, indent=4) def fit(self, counts): """Fit nk, zk (multiplicities) from counts array.""" counts = as_counts_array(counts) self.nk, self.zk = unique(counts) self._n = numpy.sum(self.zk * self.nk) self._k1 = numpy.sum(self.zk[self.nk > 0]) return self @property def normalized(self): """CountsDistribution are normalized.""" if self.nk is None: return False return (len(self.nk) == 1 and self.nk[0] == 0 and numpy.isclose(sum(self.nk), 1)) def random(self, k=1000, n=100): """Generate random counts and fit multiplicities.""" a = numpy.random.randint(k, size=n) _, self.counts = numpy.unique(a, return_counts=1) self.nk, self.zk = numpy.unique(self.counts, return_counts=1) return self @staticmethod def sorted_are_equal(a, b): """True if sorted arrays are equal.""" def int_sort(x): return sorted(x.astype(numpy.int32)) return int_sort(a) == int_sort(b) def __eq__(self, other): return (self.sorted_are_equal(self.nk, other.nk) and self.sorted_are_equal(self.zk, other.zk)) @property def n(self): """Number of samples""" if self._n is None: self._n = numpy.sum(self.zk * self.nk) return self._n @property def k1(self): """Number of bins with counts > 0.""" if self._k1 is None: self._k1 = numpy.sum(self.zk[self.nk > 0]) return self._k1 @property def coincidences(self): """Number of coincidences.""" return self.n - self.k1 @property def sampling_ratio(self): """The strongly undersampled regime is defined as ratio < 0.1""" return self.coincidences / self.n @property def multiplicities(self): """Return counts and their frequencies as (counts, frequencies).""" return self.nk, self.zk
28.522613
75
0.555673
779
5,676
3.970475
0.215661
0.036857
0.023278
0.027158
0.137084
0.102166
0.079534
0.079534
0.071452
0.071452
0
0.013552
0.323996
5,676
198
76
28.666667
0.792546
0.21494
0
0.145161
0
0
0.061129
0.005833
0
0
0
0.005051
0
1
0.137097
false
0.008065
0.056452
0.032258
0.362903
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
745b6a06e7b05b2a82df687d97773686b7b6c978
1,999
py
Python
events-svc/app/actions/event_actions.py
zerodevgroup/fastapi-referece-architecture
185e4b3036799fec2044dd415022ba0760b5bb45
[ "MIT" ]
null
null
null
events-svc/app/actions/event_actions.py
zerodevgroup/fastapi-referece-architecture
185e4b3036799fec2044dd415022ba0760b5bb45
[ "MIT" ]
null
null
null
events-svc/app/actions/event_actions.py
zerodevgroup/fastapi-referece-architecture
185e4b3036799fec2044dd415022ba0760b5bb45
[ "MIT" ]
null
null
null
import aiohttp import asyncio import base64 import os from events import EventIn, EventOut, EventUpdate from fastapi.logger import logger from db import events, database STRIPE_CHARGES_URL = os.getenv("STRIPE_CHARGES_URL") STRIPE_API_KEY = os.getenv("STRIPE_API_KEY") async def add_event(payload: EventIn): logger.debug(f"Service: Adding event with {payload}") query = events.insert().values(**payload.dict()) return await database.execute(query=query) async def get_all_events(): logger.debug(f"Service: Getting all events") query = events.select() return await database.fetch_all(query=query) async def get_event(id): logger.debug(f"Service: Getting event {id}") query = events.select(events.c.id==id) return await database.fetch_one(query=query) async def delete_event(id: int): logger.debug(f"Service: Deleting event {id}") query = events.delete().where(events.c.id==id) return await database.execute(query=query) async def update_event(id: int, payload: EventIn): logger.debug(f"Service: Updating event {id} with {payload}") query = ( events .update() .where(events.c.id == id) .values(**payload.dict()) ) return await database.execute(query=query) async def add_stripe_payment(payload: PaymentIn): logger.debug(f"Service: Adding stripe payment with {payload}") # Convert amount to stripe (implied decimals) stripeAmount = int(payload.amount * 100) stripe_payload = { "amount": stripeAmount, "currency": payload.currency, "source": payload.source, "description": payload.description, } authorizationToken = base64.b64encode(f"{STRIPE_API_KEY}:".encode()) headers = {"Authorization": "Basic " + "".join(chr(x) for x in authorizationToken)} async with aiohttp.ClientSession() as session: async with session.post(STRIPE_CHARGES_URL, data=stripe_payload, headers=headers) as resp: return await resp.text()
32.241935
98
0.69935
257
1,999
5.346304
0.315175
0.034935
0.052402
0.082969
0.291849
0.19869
0.150655
0.120815
0.088792
0.088792
0
0.005505
0.182091
1,999
61
99
32.770492
0.834862
0.021511
0
0.0625
0
0
0.15609
0
0
0
0
0
0
1
0
false
0
0.145833
0
0.270833
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
745ce16b8c6283b71d3e3aefb6237ab8e9a44ba5
776
py
Python
datatools/test/number.py
hayj/DataTools
a779a533afe8e176c33b91d17974468ef7c6b17e
[ "MIT" ]
null
null
null
datatools/test/number.py
hayj/DataTools
a779a533afe8e176c33b91d17974468ef7c6b17e
[ "MIT" ]
null
null
null
datatools/test/number.py
hayj/DataTools
a779a533afe8e176c33b91d17974468ef7c6b17e
[ "MIT" ]
null
null
null
# coding: utf-8 from fr.hayj.util.number import *; from fr.hayj.util.duration import *; import unittest import re # The level allow the unit test execution to choose only the top level test unittestLevel = 1 if unittestLevel <= 1: class UtilTest(unittest.TestCase): def test1(self): self.assertTrue(truncateFloat(0.00002000002, 2) == 0.00) self.assertTrue(truncateFloat(0.00002000002, 8) == 0.00002) self.assertTrue(truncateFloat(0.00002000002, 20) == 0.00002000002) self.assertTrue(truncateFloat(0.02, 8) == 0.02) self.assertTrue(truncateFloat(0.02, 1) == 0.0) self.assertTrue(truncateFloat(5e-5, 1) == 0.0) self.assertTrue(truncateFloat(5e-5, 10) == 0.00005)
32.333333
78
0.640464
101
776
4.920792
0.435644
0.197183
0.380282
0.28169
0.488934
0.132797
0.132797
0.132797
0
0
0
0.157095
0.237113
776
23
79
33.73913
0.682432
0.112113
0
0
0
0
0
0
0
0
0
0
0.466667
1
0.066667
false
0
0.266667
0
0.4
0
0
0
0
null
0
1
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
3
745d592616ed0c1497992e114b4112aa3cdca55c
1,006
py
Python
backend/server_flask.py
sigmarising/Multimedia-Homework-website
38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4
[ "MIT" ]
null
null
null
backend/server_flask.py
sigmarising/Multimedia-Homework-website
38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4
[ "MIT" ]
null
null
null
backend/server_flask.py
sigmarising/Multimedia-Homework-website
38fdf0c2c539b2d02d4062c55982f1e9df5f5ef4
[ "MIT" ]
null
null
null
import os from flask import Flask, send_from_directory, request, jsonify # Path settings SERVER_PATH = os.path.abspath(os.path.dirname(__file__)) FTP_PATH = os.path.abspath(os.path.join(SERVER_PATH, 'ftp/')) # Flask app settings app = Flask(__name__) app.config['JSON_AS_ASCII'] = False @app.route('/api/getFilesList', methods=['POST', 'GET']) def api_get_files_list(): files_list = os.listdir(FTP_PATH) files_list.sort() return jsonify({"list": files_list}) @app.route('/api/uploadFile', methods=['POST', 'GET']) def api_upload_file(): file = request.files['file'] file_path = os.path.abspath(os.path.join(FTP_PATH, file.filename)) file.save(file_path) return jsonify({}) @app.route('/api/downloadFile/<path:filename>', methods=['POST', 'GET']) def api_download_file(filename): return send_from_directory(FTP_PATH, filename, as_attachment=True) if __name__ == "__main__": if not os.path.exists(FTP_PATH): os.makedirs(FTP_PATH) app.run(debug=True)
27.189189
72
0.709742
146
1,006
4.59589
0.342466
0.062593
0.044709
0.076006
0.204173
0.114754
0.080477
0
0
0
0
0
0.131213
1,006
36
73
27.944444
0.767735
0.031809
0
0
0
0
0.122554
0.033986
0
0
0
0
0
1
0.125
false
0
0.083333
0.041667
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
745efcdb285c929a84a0e99a3d292fa8280b80d9
20
py
Python
django_project/users/migrations/__init__.py
Manish-Sharma-1810/My-Django-Blog-App
9dc791bd11c9ff0e3eea21125dd127e8c2435e00
[ "MIT" ]
null
null
null
django_project/users/migrations/__init__.py
Manish-Sharma-1810/My-Django-Blog-App
9dc791bd11c9ff0e3eea21125dd127e8c2435e00
[ "MIT" ]
null
null
null
django_project/users/migrations/__init__.py
Manish-Sharma-1810/My-Django-Blog-App
9dc791bd11c9ff0e3eea21125dd127e8c2435e00
[ "MIT" ]
1
2021-12-20T14:15:47.000Z
2021-12-20T14:15:47.000Z
# This file is empty
20
20
0.75
4
20
3.75
1
0
0
0
0
0
0
0
0
0
0
0
0.2
20
1
20
20
0.9375
0.9
0
null
0
null
0
0
null
0
0
0
null
1
null
true
0
0
null
null
null
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
5
745fc74246c6c357dfc1fd1dc49a06020766a990
1,240
py
Python
Python/468.py
JWang169/LintCodeJava
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
[ "CNRI-Python" ]
1
2020-12-10T05:36:15.000Z
2020-12-10T05:36:15.000Z
Python/468.py
JWang169/LintCodeJava
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
[ "CNRI-Python" ]
null
null
null
Python/468.py
JWang169/LintCodeJava
b75b06fa1551f5e4d8a559ef64e1ac29db79c083
[ "CNRI-Python" ]
3
2020-04-06T05:55:08.000Z
2021-08-29T14:26:54.000Z
class Solution: def validIPAddress(self, IP: str) -> str: # if not IP: # return "Neither" if len(IP.split('.')) == 4: ips = IP.split('.') return self.validIPv4(ips) elif len(IP.split(':')) == 8: ips = IP.split(':') return self.validIPv6(ips) return "Neither" def validIPv4(self, ips): for number in ips: # no leading zero if len(number) > 1 and number[0] == '0': return "Neither" # 0 to 255 if number.isdigit() and 0 <= int(number) <= 255: continue else: return "Neither" return "IPv4" def validIPv6(self, ips): hexSet = set(['a', 'b', 'c', 'd', 'e', 'f']) for numbers in ips: # empty group or too long if len(numbers) == 0 or len(numbers) > 4: return "Neither" for idx, num in enumerate(numbers): if not num.isdigit() and num.lower() not in hexSet: print("char not valid") return "Neither" return "IPv6"
28.837209
67
0.429032
132
1,240
4.030303
0.431818
0.146617
0.037594
0.06015
0.075188
0
0
0
0
0
0
0.030882
0.451613
1,240
43
68
28.837209
0.751471
0.064516
0
0.178571
0
0
0.058874
0
0
0
0
0
0
1
0.107143
false
0
0
0
0.464286
0.035714
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7461e3bcb8e9c632b051701ae54e04f11972850a
1,158
py
Python
buzzmobile/process/bearing/calculate_directions.py
gtagency/buzzmobile
8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a
[ "MIT" ]
25
2015-02-18T04:15:09.000Z
2019-12-11T14:29:02.000Z
buzzmobile/process/bearing/calculate_directions.py
gtagency/buzzmobile
8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a
[ "MIT" ]
173
2016-09-16T05:34:27.000Z
2017-11-15T07:31:53.000Z
buzzmobile/process/bearing/calculate_directions.py
gtagency/buzzmobile
8f7215d35b3b7fe3c6ca6419f0123ba18e8aca0a
[ "MIT" ]
4
2015-01-18T15:46:35.000Z
2017-04-08T00:39:05.000Z
"""Utilities for calculating directions and distances given coords.""" import math EARTH_RADIUS = 6.3710088e6 def get_distance(fix1, fix2): """Calculates great-circle distance between two positions in meters.""" lat1 = math.radians(fix1.latitude) lon1 = math.radians(fix1.longitude) lat2 = math.radians(fix2.latitude) lon2 = math.radians(fix2.longitude) angle = (math.pow(math.sin((lat2 - lat1) / 2), 2) + math.cos(lat1) * math.cos(lat2) * math.pow(math.sin((lon2 - lon1) / 2), 2)) unit_distance = 2 * math.atan2(math.sqrt(angle), math.sqrt(1 - angle)) return EARTH_RADIUS * unit_distance def get_forward_angle(fix1, fix2): """Calculates forward azimuth between two positions in radians.""" lat1 = math.radians(fix1.latitude) lon1 = math.radians(fix1.longitude) lat2 = math.radians(fix2.latitude) lon2 = math.radians(fix2.longitude) y = math.sin(lon2 - lon1) * math.cos(lat2) x = (math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)) angle = math.atan2(y, x) return (angle + 2 * math.pi) % (2 * math.pi)
35.090909
75
0.648532
160
1,158
4.65
0.30625
0.11828
0.080645
0.056452
0.346774
0.295699
0.295699
0.295699
0.295699
0.295699
0
0.058824
0.207254
1,158
32
76
36.1875
0.751634
0.16494
0
0.363636
0
0
0
0
0
0
0
0
0
1
0.090909
false
0
0.045455
0
0.227273
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
746248a9a6ff0d428ab94f820ef076b18b48282a
123
py
Python
iot_api/user_api/websocket/alerts.py
dolfandringa/rolaguard_backend
d4df7b55fc001aa6e0499edcfa94bf1b1c63b084
[ "Apache-2.0" ]
null
null
null
iot_api/user_api/websocket/alerts.py
dolfandringa/rolaguard_backend
d4df7b55fc001aa6e0499edcfa94bf1b1c63b084
[ "Apache-2.0" ]
7
2020-05-05T20:10:59.000Z
2021-05-26T17:59:24.000Z
iot_api/user_api/websocket/alerts.py
dolfandringa/rolaguard_backend
d4df7b55fc001aa6e0499edcfa94bf1b1c63b084
[ "Apache-2.0" ]
1
2021-01-28T05:54:11.000Z
2021-01-28T05:54:11.000Z
from iot_api import socketio def emit_alert_event(event, recipient): socketio.emit('new_alert', event, room=recipient)
30.75
53
0.788618
18
123
5.166667
0.666667
0.215054
0
0
0
0
0
0
0
0
0
0
0.113821
123
4
53
30.75
0.853211
0
0
0
0
0
0.072581
0
0
0
0
0
0
1
0.333333
false
0
0.333333
0
0.666667
0
1
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
1
0
1
0
0
6
746481340991d95d9f71098fa29212550353d414
4,968
py
Python
chocolate/connection/pandas.py
Intelecy/chocolate
0ba4f6f0130eab851d32d5534241c8cac3f6666e
[ "BSD-3-Clause" ]
105
2017-10-27T02:14:22.000Z
2022-01-13T12:57:05.000Z
chocolate/connection/pandas.py
Intelecy/chocolate
0ba4f6f0130eab851d32d5534241c8cac3f6666e
[ "BSD-3-Clause" ]
31
2017-10-03T13:41:35.000Z
2021-08-20T21:01:29.000Z
chocolate/connection/pandas.py
areeh/chocolate
5f946cb9daf42c3ab44508648917d46bc105c2fc
[ "BSD-3-Clause" ]
38
2017-10-05T20:19:42.000Z
2022-03-28T11:34:04.000Z
from contextlib import contextmanager import pickle import numpy import pandas from ..base import Connection class DataFrameConnection(Connection): """Connection to a pandas DataFrame. This connection is meant when it is not possible to use the file system or other type of traditional database (e.g. a `Kaggle <http://kaggle.com>`_ scripts) and absolutely not in concurrent processes. In fact, using this connection in different processes will result in two independent searches **not** sharing any information. Args: from_file: The name of a file containing a pickled data frame connection. Using this connection requires small adjustments to the proposed main script. When the main process finishes, all data will vanish if not explicitly writen to disk. Thus, instead of doing a single evaluation, the main process will incorporate a loop calling the search/sample ``next`` method multiple times. Additionally, at the end of the experiment, either extract the best configuration using :meth:`results_as_dataframe` or write all the data using :mod:`pickle`. """ def __init__(self, from_file=None): if from_file is not None: with open(from_file, "rb") as f: conn = pickle.load(f.read()) if type(conn) != DataFrameConnection: raise TypeError("Unpickled connection is not of type DataFrameConnection") self.results = conn.results self.complementary = conn.complementary self.space = conn.space else: self.results = pandas.DataFrame() self.complementary = pandas.DataFrame() self.space = None @contextmanager def lock(self, *args, **kwargs): """This function does not lock anything. Do not use in concurrent processes. """ yield def all_results(self): """Get a list of all entries of the result table. The order is undefined. """ return list(self.results.T.to_dict().values()) def find_results(self, filter): """Get a list of all results associated with *filter*. The order is undefined. """ selection = self.results for k, v in filter.items(): selection = selection[selection[k] == v] return list(selection.T.to_dict().values()) def insert_result(self, document): """Insert a new *document* in the result data frame. The columns does not need to be defined nor all present. Any new column will be added to the database and any missing column will get value None. """ self.results = self.results.append(document, ignore_index=True) def update_result(self, document, value): """Update or add *value* of given rows in the result data frame. Args: document: An identifier of the rows to update. value: A mapping of values to update or add. """ size = len(self.results.index) selection = [True] * size for k, v in document.items(): selection = numpy.logical_and(self.results[k] == v, selection) for k, v in value.items(): if not k in self.results: self.results[k] = pandas.Series([None] * size) self.results.loc[selection, k] = v def count_results(self): """Get the total number of entries in the result table.""" return len(self.results.index) def all_complementary(self): """Get all entries of the complementary information table as a list. The order is undefined. """ return list(self.complementary.T.to_dict().values()) def insert_complementary(self, document): """Insert a new document (row) in the complementary information data frame.""" self.complementary = self.complementary.append(document, ignore_index=True) def find_complementary(self, filter): """Find a document (row) from the complementary information data frame.""" selection = self.complementary for k, v in filter.items(): selection = selection[selection[k] == v] return list(selection.T.to_dict().values())[0] def get_space(self): """Returns the space used for previous experiments.""" return self.space def insert_space(self, space): """Insert a space in the database. Raises: AssertionError: If a space is already present. """ assert self.space is None, "Space table cannot contain more than one space, clear table first." self.space = space def clear(self): """Clear all data.""" self.results = pandas.DataFrame() self.complementary = pandas.DataFrame() self.space = None def pop_id(self, document): """Pops the database unique id from the document.""" return document
36.262774
103
0.63748
633
4,968
4.957346
0.308057
0.04557
0.024219
0.016571
0.215105
0.166029
0.116635
0.095602
0.095602
0.095602
0
0.000278
0.275966
4,968
136
104
36.529412
0.872116
0.402375
0
0.16129
0
0
0.045861
0
0
0
0
0
0.016129
1
0.225806
false
0
0.080645
0
0.435484
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
7467690eb0ac6eea07f7e7a2ad3b657c481f13cd
241
py
Python
src/__init__.py
Losintech/python-alpha-orm
01e88c5cf21b881dc670d605b353df8ae52eb83c
[ "MIT" ]
1
2019-12-06T05:18:38.000Z
2019-12-06T05:18:38.000Z
src/__init__.py
Losintech/python-alpha-orm
01e88c5cf21b881dc670d605b353df8ae52eb83c
[ "MIT" ]
null
null
null
src/__init__.py
Losintech/python-alpha-orm
01e88c5cf21b881dc670d605b353df8ae52eb83c
[ "MIT" ]
null
null
null
from alphaorm.AlphaORM import AlphaORM,AlphaRecord AlphaORM.setup('mysql', { 'host' : 'localhost', 'user' : 'root', 'password' : '', 'database' : 'alphaorm' }) m = AlphaORM.create('python') m.name = 'Alpha' m.age = 10 AlphaORM.store(m)
18.538462
50
0.659751
29
241
5.482759
0.689655
0
0
0
0
0
0
0
0
0
0
0.009662
0.141079
241
13
51
18.538462
0.758454
0
0
0
0
0
0.252066
0
0
0
0
0
0
1
0
false
0.090909
0.090909
0
0.090909
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
2
746ab1f26e2827ead488e3722b6ba0e4f14c48ca
523
py
Python
src/proto/player_state_proto.py
edwadli/splendor-ai
0881924bf59539ba307592a2f546d56732359120
[ "Apache-2.0" ]
2
2020-02-02T06:01:23.000Z
2021-09-21T02:49:29.000Z
src/proto/player_state_proto.py
edwadli/splendor-ai
0881924bf59539ba307592a2f546d56732359120
[ "Apache-2.0" ]
3
2019-06-04T07:45:33.000Z
2019-06-07T02:14:27.000Z
src/proto/player_state_proto.py
edwadli/splendor-ai
0881924bf59539ba307592a2f546d56732359120
[ "Apache-2.0" ]
2
2019-05-31T06:52:58.000Z
2019-06-05T19:45:16.000Z
"""Data schema for a player's state.""" import collections PlayerState = collections.namedtuple( "PlayerState", [ # Dictionary of Gems held by player. "gems", # List of purchased DevelopmentCards. "purchased_cards", # List of reserved (non-hidden) DevelopmentCards. "unhidden_reserved_cards", # List of reserved (hidden) DevelopmentCards. Note that reserved cards are # typically hidden when topdecked. "hidden_reserved_cards", # NobleTiles obtained. "noble_tiles", ])
22.73913
78
0.695985
56
523
6.392857
0.607143
0.050279
0.061453
0.106145
0
0
0
0
0
0
0
0
0.210325
523
22
79
23.772727
0.866828
0.535373
0
0
0
0
0.366379
0.189655
0
0
0
0
0
1
0
false
0
0.111111
0
0.111111
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
746ad17c2512eef00d948851110356b09d69b731
4,871
py
Python
sheepdoge/config.py
mattjmcnaughton/sheepdoge
9c028d6f51cb59afcaf25a5680f961ec7e25676b
[ "Apache-2.0" ]
7
2018-03-18T07:25:10.000Z
2022-01-28T17:35:08.000Z
sheepdoge/config.py
mattjmcnaughton/sheepdoge
9c028d6f51cb59afcaf25a5680f961ec7e25676b
[ "Apache-2.0" ]
15
2017-08-19T14:03:10.000Z
2017-12-29T23:22:05.000Z
sheepdoge/config.py
mattjmcnaughton/sheepdoge
9c028d6f51cb59afcaf25a5680f961ec7e25676b
[ "Apache-2.0" ]
null
null
null
from configparser import ConfigParser, NoOptionError from typing import Dict # pylint: disable=unused-import import os from sheepdoge.exception import ( SheepdogeConfigurationAlreadyInitializedException, SheepdogeConfigurationNotInitializedException, ) DEFAULTS = { "kennel_playbook_path": "kennel.yml", "kennel_roles_path": ".kennel_roles", "pupfile_path": "pupfile.yml", "vault_password_file": None, } # type: Dict[str, str] class Config(object): """Config class for which there should only be one instance at anytime. Additionally, we can only set the config values during initialization. Multiple different classes can access this single instance at a time. """ _config = None # type: Config def __init__(self, config_dict): # type: (Dict[str, str]) -> None self._config_dict = config_dict @classmethod def clear_config_singleton(cls): # type: () -> None """Delete the current configuration singleton to allow the initialization of a new one. This method is predominantly used during test. """ cls._config = None @classmethod def get_config_singleton(cls): # type: () -> Config """Return the current config singleton instance. We must initialize the singleton before calling this method. :return: The singleton instance. """ if cls._config is None: raise SheepdogeConfigurationNotInitializedException return cls._config @classmethod def initialize_config_singleton( cls, config_file_contents=None, config_options=None ): # type: (str, Dict[str, str]) -> None """Initialize the config singleton with the proper values. If we specify no additional values during configuration, then the config will contain all defaults. We can, in priority order, pass in the contents of a *.cfg file and a dictionary of options. Typically we derive this dictionary of options from the command line. Finally, after setting all of the base configuration values, we compute additional configuration values which are useful throughout the program. :param config_file_contents: The str contents of the .cfg file containing kennel configuration. :param config_options: The dict specifying the highest priority configuration values. """ if cls._config is not None: raise SheepdogeConfigurationAlreadyInitializedException() config_dict = {} # type: Dict[str, str] cls._set_config_default_values(config_dict) if config_file_contents: cls._set_config_file_values(config_dict, config_file_contents) if config_options: cls._set_config_option_values(config_dict, config_options) cls._set_calculated_config_values(config_dict) cls._config = cls(config_dict) @classmethod def _set_config_default_values(cls, config_dict): # type: (Dict[str, str]) -> None """Set defaults for all views here - they will be overwritten in the following steps if necessary. """ config_dict.update(DEFAULTS) @classmethod def _set_config_file_values(cls, config_dict, config_file_contents): # type: (Dict[str, str], str) -> None config_parser = ConfigParser() config_parser.read_string(config_file_contents) kennel_cfg_section = "kennel" for currently_defined_key in config_dict.keys(): try: config_file_value = config_parser.get( kennel_cfg_section, currently_defined_key ) config_dict[currently_defined_key] = config_file_value except NoOptionError: pass # If the value isn't specified, skip @classmethod def _set_config_option_values(cls, config_dict, config_options): # type: (Dict[str, str], Dict[str, str]) -> None config_dict.update(config_options) @classmethod def _set_calculated_config_values(cls, config_dict): # type: (Dict[str, str]) -> None pupfile_path = config_dict["pupfile_path"] pupfile_dir = os.path.dirname(os.path.realpath(pupfile_path)) kennel_roles_path = config_dict["kennel_roles_path"] abs_kennel_roles_dir = os.path.realpath(kennel_roles_path) calculated_config = { "abs_pupfile_dir": pupfile_dir, "abs_kennel_roles_dir": abs_kennel_roles_dir, } config_dict.update(calculated_config) def get(self, key): # type: (str) -> str """Retrieve the value for the given configuration key. :param key: One of the available configuration options. """ return self._config_dict[key]
34.062937
76
0.667419
567
4,871
5.488536
0.268078
0.067481
0.02892
0.031491
0.092224
0.040488
0.032776
0.023779
0.023779
0
0
0
0.25929
4,871
142
77
34.302817
0.862528
0.353726
0
0.098592
0
0
0.059107
0
0
0
0
0
0
1
0.126761
false
0.028169
0.056338
0
0.239437
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
746b61a512fe54bf6d58d51dee7f9f6d9529baf8
1,202
py
Python
_argparse.py
zzyyyl/USTC-Helper
6f16361bdead655b799da40e3ecd7758745c2568
[ "MIT" ]
3
2022-03-18T18:24:53.000Z
2022-03-24T01:50:41.000Z
_argparse.py
zzyyyl/USTC-Helper
6f16361bdead655b799da40e3ecd7758745c2568
[ "MIT" ]
null
null
null
_argparse.py
zzyyyl/USTC-Helper
6f16361bdead655b799da40e3ecd7758745c2568
[ "MIT" ]
null
null
null
from argparse import ArgumentParser from USTCHelper import config import json, base64 import os class ArgumentError(Exception): def __init__(self, text): self.text = text def __str__(self): return f"ArgumentError: {self.text}" def ArgParser(): parser = ArgumentParser() parser.add_argument("--daily", help="run your daily schedule", action='store_true') parser.add_argument("-s", "--service", help="service to run", metavar="SERVICE", dest="service") parser.add_argument("--silence", help="run in silence", action='store_true') parser.add_argument("-u", "--username", help="your student ID", metavar="ID", dest="stuid") parser.add_argument("--store-password", help="store password in config", action='store_true') parser.add_argument("--config", help="config for services", metavar="CONF") return parser def ArgConflictCheck(args): if args.daily: if args.service: raise ArgumentError("Conflict arguments: --daily, --service") def ArgInit(args): if args.config: config["in-command"]["state"] = True config["in-command"]["config"] = json.loads(base64.b64decode(args.config.encode()).decode('gbk'))
36.424242
105
0.6797
148
1,202
5.405405
0.412162
0.0675
0.1275
0.07875
0.12
0.12
0
0
0
0
0
0.005964
0.163062
1,202
32
106
37.5625
0.789264
0
0
0
0
0
0.270383
0
0
0
0
0
0
1
0.192308
false
0.038462
0.153846
0.038462
0.461538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
746d2dd92129a5cba7d3203cc33cfadd617835cd
127
py
Python
wandering-warriors/modules/__init__.py
python-discord/code-jam-6
a7eb3b1256ae113c93f0337892c667768e8bc199
[ "MIT" ]
76
2020-01-17T12:09:48.000Z
2022-03-26T19:17:26.000Z
wandering-warriors/modules/__init__.py
1nf1del/code-jam-6
a7eb3b1256ae113c93f0337892c667768e8bc199
[ "MIT" ]
17
2020-01-21T23:13:34.000Z
2020-02-07T00:07:04.000Z
wandering-warriors/modules/__init__.py
1nf1del/code-jam-6
a7eb3b1256ae113c93f0337892c667768e8bc199
[ "MIT" ]
91
2020-01-17T12:01:06.000Z
2022-03-22T20:38:59.000Z
from .abacus import Abacus from .draw_pad import DrawPad from .ledger import Ledger __all__ = ('Abacus', 'DrawPad', 'Ledger')
21.166667
41
0.748031
17
127
5.294118
0.470588
0
0
0
0
0
0
0
0
0
0
0
0.141732
127
5
42
25.4
0.825688
0
0
0
0
0
0.149606
0
0
0
0
0
0
1
0
false
0
0.75
0
0.75
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
4
746f96c13dd49fef6cef2fd1b4ab1513df02e05d
3,920
py
Python
old/summarize-scripts/python/rank_summarizer.py
kedz/cuttsum
992c21192af03fd2ef863f5ab7d10752f75580fa
[ "Apache-2.0" ]
6
2015-09-10T02:22:21.000Z
2021-10-01T16:36:46.000Z
old/summarize-scripts/python/rank_summarizer.py
kedz/cuttsum
992c21192af03fd2ef863f5ab7d10752f75580fa
[ "Apache-2.0" ]
null
null
null
old/summarize-scripts/python/rank_summarizer.py
kedz/cuttsum
992c21192af03fd2ef863f5ab7d10752f75580fa
[ "Apache-2.0" ]
2
2018-04-04T10:44:32.000Z
2021-10-01T16:37:26.000Z
import os import sys import argparse from cuttsum.readers import gold_reader from cuttsum.summarizers import RankSummarizer def main(): args = parse_args() bow_file, lvec_file, sim_file = args[0:3] dbg_sim_mode, odir, n_return, use_temp, penalty_mode = args[3:] if sim_file is None: if dbg_sim_mode == u'max': sim_idx = 3 elif dbg_sim_mode == u'min': sim_idx = 4 elif dbg_sim_mode == u'avg': sim_idx = 5 data_reader = gold_reader(bow_file, lvec_file, sim_idx) else: print "IMPLEMENT SIM FILE LOADER" sys.exit() ts_system = RankSummarizer(use_temp, vec_dims=100) ts_system.run(data_reader, odir, n_return, penalty_mode) print "Run complete!" def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-b', '--bow-file', help=u'BOW file', type=unicode, required=True) parser.add_argument('-l', '--lvec-file', help=u'latent vector file', type=unicode, required=True) parser.add_argument('--debug-sim-mode', help=u'max, min, or avg', type=unicode, default=u'max', required=False) parser.add_argument('-s', '--sim-file', help=u'sim file', type=unicode, required=False) parser.add_argument('-o', '--output-dir', help=u'Location iterative summaries for rouge', type=unicode, required=True) parser.add_argument('-n', '--num-return', help=u'Return top-n updates at each time interval', type=int, required=True) parser.add_argument('--temp', dest='use_temp', action='store_true') parser.add_argument('--no-temp', dest='use_temp', action='store_false') parser.set_defaults(use_temp=True) parser.add_argument('-p', '--penalty-mode', help=u'agg or max', type=unicode, required=True) args = parser.parse_args() bow_file = args.bow_file lvec_file = args.lvec_file sim_file = args.sim_file dbg_sim_mode = args.debug_sim_mode odir = args.output_dir n_return = args.num_return use_temp = args.use_temp penalty_mode = args.penalty_mode if use_temp is True and penalty_mode not in [u'agg', u'max']: import sys sys.stderr.write(u'Bad --penalty-mode argument: \'agg\' or \'max\'\n') sys.stderr.flush() sys.exit() if odir != '' and not os.path.exists(odir): os.makedirs(odir) if dbg_sim_mode not in [u'max', u'min', u'avg']: sys.stderr.write(u'Bad argument for --debug-sim-mode: ') sys.stderr.write(u'max, min or avg are legal args\n') sys.stderr.flush() sys.exit() if not os.path.exists(bow_file) or os.path.isdir(bow_file): sys.stderr.write((u'--bow-file argument {} either does not exist' \ + u' or is a directory!\n').format(bow_file)) sys.stderr.flush() sys.exit() if not os.path.exists(lvec_file) or os.path.isdir(lvec_file): sys.stderr.write((u'--lvec-file argument {} either does not exist' \ + u' or is a directory!\n').format(lvec_file)) sys.stderr.flush() sys.exit() if sim_file is not None: if not os.path.exists(sim_file) or os.path.isdir(sim_file): sys.stderr.write((u'--sim-file argument {} either does not exist' \ + u' or is a directory!\n').format(sim_file)) sys.stderr.flush() sys.exit() return (bow_file, lvec_file, sim_file, dbg_sim_mode, odir, n_return, use_temp, penalty_mode) if __name__ == '__main__': main()
32.666667
96
0.570663
530
3,920
4.039623
0.216981
0.042504
0.071462
0.058851
0.479215
0.287249
0.230733
0.185894
0.144792
0.144792
0
0.003305
0.305357
3,920
119
97
32.941176
0.78296
0
0
0.186813
0
0
0.168878
0
0
0
0
0
0
0
null
null
0
0.065934
null
null
0.021978
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
746fe9ce70247f8f0f948de126caec9e75bc4ccb
1,378
py
Python
app/api/client.py
cloudblue/processor-acronis-customer-centric
fbb022de150818e29cf6cb6205650893acb3613e
[ "Apache-2.0" ]
3
2019-12-21T13:45:28.000Z
2019-12-24T20:04:34.000Z
app/api/client.py
cloudblue/processor-acronis-customer-centric
fbb022de150818e29cf6cb6205650893acb3613e
[ "Apache-2.0" ]
null
null
null
app/api/client.py
cloudblue/processor-acronis-customer-centric
fbb022de150818e29cf6cb6205650893acb3613e
[ "Apache-2.0" ]
null
null
null
""" This file is property of the Ingram Micro Cloud Blue. Copyright (c) 2019 Ingram Micro. All Rights Reserved. """ import json from requests import api from connect.logger import logger from urllib.parse import urlencode, quote_plus class Client: @staticmethod def send_request(verb, uri, config, body=None): logger.error("REQUEST------------------->") logger.error('Request: %s %s' % (verb, uri)) logger.debug(body) options = {'url': uri, 'headers': {'Content-Type': config['Content-Type']}} if 'bearer' in config: options['headers']['Authorization'] = 'Bearer ' + config['bearer'] elif 'basic' in config: options['headers']['Authorization'] = 'Basic ' + config['basic'] if body: options['data'] = urlencode(body, quote_via=quote_plus) if config[ 'Content-Type'] == 'application/x-www-form-urlencoded' else json.dumps( body) response = api.request(verb, **options) if 200 <= response.status_code <= 300: logger.debug(str(response)) if response.content: return response.json() else: logger.error('Response') logger.error(str(response)) raise Exception(response.json()['error'])
33.609756
146
0.563135
146
1,378
5.280822
0.486301
0.057069
0.046693
0.057069
0.090791
0
0
0
0
0
0
0.010341
0.298258
1,378
40
147
34.45
0.78697
0.077649
0
0
0
0
0.167854
0.047506
0
0
0
0
0
1
0.035714
false
0
0.142857
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7470950ef82ea911ccd94deb0cfaffcdcd9405a5
4,048
py
Python
venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py
akatsarakis/tx_benchmarking
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
[ "MIT" ]
3
2020-07-07T17:08:41.000Z
2022-01-10T19:25:46.000Z
venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py
akatsarakis/tx_benchmarking
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
[ "MIT" ]
null
null
null
venmo-sim/leiden/simulate_txs_on_leiden_clustered_venmo_graph.py
akatsarakis/tx_benchmarking
f8233e58bba3f4fb54d82273d7ca8631bae36ebc
[ "MIT" ]
null
null
null
import argparse import csv import re # read arg (the num of shards, also the num of distributed nodes) from command line def parse_args(): global ARGS parser = argparse.ArgumentParser() parser.add_argument('node_tot', type=int, help='total number of distributed-nodes/shards in the system') ARGS = parser.parse_args() parse_args() node_tot = ARGS.node_tot # read clustering result of the graph, and bind them to distributed nodes # (ensure the total vertex to each node is as equal as possible) ## first, read in total number of vertices and clusters ## from the first line of the input file fp = open("./results/clustered_venmo_dataset_7024852.txt", "r") # , encoding='utf-8') firstline_str = fp.readline() firstline_pattern = re.compile(r"Clustering with (\d+) elements and (\d+) clusters") firstline_match = firstline_pattern.match(firstline_str) if not firstline_match: raise Exception('Failed to identify total vertex number and cluster number in input file') vertex_tot = eval(firstline_match.group(1)) cluster_tot = eval(firstline_match.group(2)) print('%d vertices, %d clusters, %d shards' % (vertex_tot, cluster_tot, node_tot)) ## binding initialization curline_str = fp.readline() cur_cluster = -1 expected_node_size = vertex_tot / node_tot vertex_cluster_no = [-1] * vertex_tot vertex_node_no = [-1] * vertex_tot cluster_size = [0] * cluster_tot cluster_vertices = [[] for i in range(cluster_tot)] # '[[]] * cluster_tot' is wrong, because it is shallow copy cluster_node_no = [-1] * cluster_tot ## read in each line and count while curline_str: if curline_str[0] == '[': # beginning of a new cluster cur_cluster += 1 curline_str = curline_str[curline_str.find(']')+1:] # remove the preceding '[ cluster_no]' of the string strs = curline_str[:-1].split(',') # slice off newline character, and split by comma for cur_str in strs: if not cur_str: continue # ignore empty substrings (in the case the last char is a comma) # bind vertex to its cluster written in the input file cur_vertex = eval(cur_str) vertex_cluster_no[cur_vertex] = cur_cluster cluster_vertices[cur_cluster].append(cur_vertex) cluster_size[cur_cluster] += 1 curline_str = fp.readline() # next line ## bind clusters to distributed nodes according to counting result cur_node = 0 cur_node_size = 0 for cur_cluster in range(cluster_tot): if cur_node + 1 < node_tot \ and cur_node_size + cluster_size[cur_cluster] / 2 > expected_node_size \ and cur_node_size != 0: # should bind to the next node, unless this cluster is the first cluster on the current node cur_node += 1 cur_node_size = 0 cur_node_size += cluster_size[cur_cluster] cluster_node_no[cur_cluster] = cur_node for cur_vertex in cluster_vertices[cur_cluster]: vertex_node_no[cur_vertex] = cur_node fp.close() print('-- Binding result:') node_size = [0] * node_tot for cur_vertex in range(vertex_tot): node_size[vertex_node_no[cur_vertex]] += 1 for cur_node in range(node_tot): print('Shard %d size:' % cur_node, node_size[cur_node]) # read txes and sort by time def takeThird(elem): return elem[2] # This function is used to sort txes; here we # assume dates can be sorted in string sort manner fp = open("../venmo_dataset_normalized_shorted.csv", "r") # , encoding='utf-8') csv_file = csv.reader(fp) all_tx = [] for row in csv_file: all_tx.append([int(row[0]), int(row[1]), row[2]]) # all_tx = all_tx[:4000000] # we only used the first 4000000 lines of txes all_tx.sort(key=takeThird) fp.close() # simulate txs, calculate ratio of remote tx tx_remote_cnt = 0 tx_cnt = 0 for tx in all_tx: tx_cnt += 1 if vertex_node_no[tx[0]] != vertex_node_no[tx[1]]: tx_remote_cnt += 1 vertex_node_no[tx[0]] = vertex_node_no[tx[1]] print('-- Result:') print('tx total:', tx_cnt) print('remote:', tx_remote_cnt) print('remote ratio:', tx_remote_cnt / tx_cnt)
34.896552
112
0.704792
639
4,048
4.242567
0.266041
0.033567
0.030985
0.020657
0.095906
0.04574
0.04574
0.022132
0.022132
0.022132
0
0.017099
0.190959
4,048
115
113
35.2
0.810687
0.272233
0
0.075
0
0
0.129287
0.037037
0
0
0
0
0
1
0.025
false
0
0.0375
0.0125
0.075
0.0875
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7471a6cafdc6fccdf2cbf3b0bcfba1e4f01d5df3
4,363
py
Python
custom_components/ampio/__init__.py
pszypowicz/ampio-hacc
34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c
[ "MIT" ]
2
2021-08-18T07:18:05.000Z
2022-01-01T19:07:09.000Z
custom_components/ampio/__init__.py
pszypowicz/ampio-hacc
34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c
[ "MIT" ]
null
null
null
custom_components/ampio/__init__.py
pszypowicz/ampio-hacc
34a929cccc2b0e35f1f5d8aeafbf965bfc7dab6c
[ "MIT" ]
1
2021-11-29T18:16:45.000Z
2021-11-29T18:16:45.000Z
"""Ampio Systems Platform.""" import asyncio import json import logging from typing import Any, Dict, Optional import voluptuous as vol from homeassistant.config_entries import ConfigEntry from homeassistant.const import ( CONF_CLIENT_ID, CONF_DEVICE, CONF_DEVICE_CLASS, CONF_FRIENDLY_NAME, CONF_ICON, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_PROTOCOL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP, ) from homeassistant.core import Event, callback from homeassistant.helpers import ( config_validation as cv, device_registry as dr, event, template, ) from homeassistant.helpers.dispatcher import ( async_dispatcher_connect, async_dispatcher_send, ) from homeassistant.helpers.entity import Entity from homeassistant.helpers.entity_registry import EntityRegistry, async_get_registry from homeassistant.helpers.typing import ConfigType, HomeAssistantType from .client import AmpioAPI, async_setup_discovery from .const import ( AMPIO_CONNECTED, AMPIO_DISCOVERY_UPDATED, AMPIO_MODULE_DISCOVERY_UPDATED, COMPONENTS, CONF_BROKER, CONF_STATE_TOPIC, CONF_UNIQUE_ID, DATA_AMPIO, DATA_AMPIO_API, DATA_AMPIO_DISPATCHERS, DATA_AMPIO_PLATFORM_LOADED, PROTOCOL_311, SIGNAL_ADD_ENTITIES, ) from .models import AmpioModuleInfo _LOGGER = logging.getLogger(__name__) DOMAIN = "ampio" VERSION_TOPIC_FROM = "ampio/from/info/version" VERSION_TOPIC_TO = "ampio/to/info/version" DISCOVERY_TOPIC_FROM = "ampio/from/can/dev/list" DISCOVERY_TOPIC_TO = "ampio/to/can/dev/list" ATTR_DEVICES = "devices" CONF_KEEPALIVE = "keepalive" PROTOCOL_31 = "3.1" DEFAULT_PORT = 1883 DEFAULT_KEEPALIVE = 60 DEFAULT_PROTOCOL = PROTOCOL_311 CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.All( vol.Schema( { vol.Optional(CONF_CLIENT_ID): cv.string, vol.Optional(CONF_KEEPALIVE, default=DEFAULT_KEEPALIVE): vol.All( vol.Coerce(int), vol.Range(min=15) ), vol.Optional(CONF_BROKER): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_USERNAME): cv.string, vol.Optional(CONF_PASSWORD): cv.string, vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.All( cv.string, vol.In([PROTOCOL_31, PROTOCOL_311]) ), }, ), ) }, extra=vol.ALLOW_EXTRA, ) async def async_setup(hass: HomeAssistantType, config: ConfigType): """Stub to allow setting up this component. Configuration through YAML is not supported at this time. """ return True async def async_setup_entry(hass: HomeAssistantType, config_entry: ConfigEntry) -> bool: """Set up the Ampio component.""" ampio_data = hass.data.setdefault(DATA_AMPIO, {}) for component in COMPONENTS: ampio_data.setdefault(component, []) conf = CONFIG_SCHEMA({DOMAIN: dict(config_entry.data)})[DOMAIN] ampio_data[DATA_AMPIO_API]: AmpioAPI = AmpioAPI( hass, config_entry, conf, ) ampio_data[DATA_AMPIO_DISPATCHERS] = [] ampio_data[DATA_AMPIO_PLATFORM_LOADED] = [] for component in COMPONENTS: coro = hass.config_entries.async_forward_entry_setup(config_entry, component) ampio_data[DATA_AMPIO_PLATFORM_LOADED].append(hass.async_create_task(coro)) await ampio_data[DATA_AMPIO_API].async_connect() async def async_connected(): """Start discovery on connected.""" await async_setup_discovery(hass, conf, config_entry) async_dispatcher_connect(hass, AMPIO_CONNECTED, async_connected) async def async_stop_ampio(_event: Event): """Stop MQTT component.""" await ampio_data[DATA_AMPIO_API].async_disconnect() hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop_ampio) return True async def async_unload_entry(hass, config_entry): """Unload ZHA config entry.""" dispatchers = hass.data[DATA_AMPIO].get(DATA_AMPIO_DISPATCHERS, []) for unsub_dispatcher in dispatchers: unsub_dispatcher() for component in COMPONENTS: await hass.config_entries.async_forward_entry_unload(config_entry, component) return True
27.789809
88
0.698602
517
4,363
5.599613
0.270793
0.040415
0.036269
0.037306
0.121934
0.067012
0.021416
0
0
0
0
0.006741
0.217969
4,363
156
89
27.967949
0.841735
0.005272
0
0.078947
0
0
0.027337
0.021479
0
0
0
0
0
1
0
false
0.017544
0.140351
0
0.166667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747300fe06e37214b9d4b666eba20905c7e7c63d
7,096
py
Python
bsd2/vagrant-ansible/ansible/plugins/inventory/nova.py
dlab-berkeley/collaboratool-archive
fa474e05737f78e628d6b9398c58cf7c966a7bba
[ "Apache-2.0" ]
1
2016-01-20T14:36:02.000Z
2016-01-20T14:36:02.000Z
bsd2/vagrant-ansible/ansible/plugins/inventory/nova.py
dlab-berkeley/collaboratool-archive
fa474e05737f78e628d6b9398c58cf7c966a7bba
[ "Apache-2.0" ]
null
null
null
bsd2/vagrant-ansible/ansible/plugins/inventory/nova.py
dlab-berkeley/collaboratool-archive
fa474e05737f78e628d6b9398c58cf7c966a7bba
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # (c) 2012, Marco Vito Moscaritolo <marco@agavee.com> # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- inventory: nova short_description: OpenStack external inventory script description: - Generates inventory that Ansible can understand by making API request to OpenStack endpoint using the novaclient library. - | When run against a specific host, this script returns the following variables: os_os-ext-sts_task_state os_addresses os_links os_image os_os-ext-sts_vm_state os_flavor os_id os_rax-bandwidth_bandwidth os_user_id os_os-dcf_diskconfig os_accessipv4 os_accessipv6 os_progress os_os-ext-sts_power_state os_metadata os_status os_updated os_hostid os_name os_created os_tenant_id os__info os__loaded where some item can have nested structure. - All information are set on B(nova.ini) file version_added: None options: version: description: - OpenStack version to use. required: true default: null choices: [ "1.1", "2" ] username: description: - Username used to authenticate in OpenStack. required: true default: null api_key: description: - Password used to authenticate in OpenStack, can be the ApiKey on some authentication system. required: true default: null auth_url: description: - Authentication URL required to generate token. - To manage RackSpace use I(https://identity.api.rackspacecloud.com/v2.0/) required: true default: null auth_system: description: - Authentication system used to login - To manage RackSpace install B(rackspace-novaclient) and insert I(rackspace) required: true default: null region_name: description: - Region name to use in request - In RackSpace some value can be I(ORD) or I(DWF). required: true default: null project_id: description: - Project ID to use in connection - In RackSpace use OS_TENANT_NAME required: false default: null endpoint_type: description: - The endpoint type for novaclient - In RackSpace use 'publicUrl' required: false default: null service_type: description: - The service type you are managing. - In RackSpace use 'compute' required: false default: null service_name: description: - The service name you are managing. - In RackSpace use 'cloudServersOpenStack' required: false default: null insicure: description: - To no check security required: false default: false choices: [ "true", "false" ] author: Marco Vito Moscaritolo notes: - This script assumes Ansible is being executed where the environment variables needed for novaclient have already been set on nova.ini file - For more details, see U(https://github.com/openstack/python-novaclient) examples: - description: List instances code: nova.py --list - description: Instance property code: nova.py --instance INSTANCE_IP ''' import sys import re import os import ConfigParser from novaclient import client as nova_client try: import json except: import simplejson as json ################################################### # executed with no parameters, return the list of # all groups and hosts def nova_load_config_file(): p = ConfigParser.SafeConfigParser() path1 = os.getcwd() + "/nova.ini" path2 = os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")) path3 = "/etc/ansible/nova.ini" if os.path.exists(path1): p.read(path1) elif os.path.exists(path2): p.read(path2) elif os.path.exists(path3): p.read(path3) else: return None return p config = nova_load_config_file() client = nova_client.Client( version = config.get('openstack', 'version'), username = config.get('openstack', 'username'), api_key = config.get('openstack', 'api_key'), auth_url = config.get('openstack', 'auth_url'), region_name = config.get('openstack', 'region_name'), project_id = config.get('openstack', 'project_id'), auth_system = config.get('openstack', 'auth_system') ) if len(sys.argv) == 2 and (sys.argv[1] == '--list'): groups = {} # Cycle on servers for f in client.servers.list(): private = [ x['addr'] for x in getattr(f, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed'] public = [ x['addr'] for x in getattr(f, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating'] # Define group (or set to empty string) group = f.metadata['group'] if f.metadata.has_key('group') else 'undefined' # Create group if not exist if group not in groups: groups[group] = [] # Append group to list if f.accessIPv4: groups[group].append(f.accessIPv4) continue if public: groups[group].append(''.join(public)) continue if private: groups[group].append(''.join(private)) continue # Return server list print json.dumps(groups) sys.exit(0) ##################################################### # executed with a hostname as a parameter, return the # variables for that host elif len(sys.argv) == 3 and (sys.argv[1] == '--host'): results = {} ips = [] for instance in client.servers.list(): private = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'fixed'] public = [ x['addr'] for x in getattr(instance, 'addresses').itervalues().next() if x['OS-EXT-IPS:type'] == 'floating'] ips.append( instance.accessIPv4) ips.append(''.join(private)) ips.append(''.join(public)) if sys.argv[2] in ips: for key in vars(instance): # Extract value value = getattr(instance, key) # Generate sanitized key key = 'os_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower() # Att value to instance result (exclude manager class) #TODO: maybe use value.__class__ or similar inside of key_name if key != 'os_manager': results[key] = value print json.dumps(results) sys.exit(0) else: print "usage: --list ..OR.. --host <hostname>" sys.exit(1)
29.940928
142
0.643461
923
7,096
4.864572
0.321777
0.024499
0.028062
0.030735
0.14343
0.098886
0.073942
0.073942
0.073942
0.073942
0
0.006329
0.242954
7,096
236
143
30.067797
0.829486
0.15699
0
0.220994
0
0.01105
0.592681
0.028044
0
0
0
0.004237
0
0
null
null
0.005525
0.038674
null
null
0.016575
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
74737681cf0d8052b62c5b5de4ce70418a73d187
61
py
Python
ml/Graph/scratch.py
Shivams9/pythoncodecamp
e6cd27f4704a407ee360414a8c9236b254117a59
[ "MIT" ]
6
2021-08-04T08:15:22.000Z
2022-02-02T11:15:56.000Z
ML/Graph/scratch.py
Maurya232Abhishek/Python-repository-for-basics
3dcec5c529a0847df07c9dcc1424675754ce6376
[ "MIT" ]
14
2021-08-02T06:28:00.000Z
2022-03-25T10:44:15.000Z
ML/Graph/scratch.py
Maurya232Abhishek/Python-repository-for-basics
3dcec5c529a0847df07c9dcc1424675754ce6376
[ "MIT" ]
6
2021-07-16T04:56:41.000Z
2022-02-16T04:40:06.000Z
l=[[1,2,3],[2,2,3],[3,3,3]] print(l) a=[2,3] b=[4,5] print(b)
12.2
27
0.459016
20
61
1.4
0.45
0.214286
0.214286
0
0
0
0
0
0
0
0
0.22807
0.065574
61
5
28
12.2
0.263158
0
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.4
1
0
1
null
1
1
0
0
0
0
0
0
0
0
1
0
0
1
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
747472dbf6c760efb2300b9c6ffbf248dba2c191
6,477
py
Python
pyformlang/rsa/recursive_automaton.py
IlyaEp/pyformlang
eef239844beff5e9da3be4a4a240440ece81c10b
[ "MIT" ]
15
2020-06-25T14:38:27.000Z
2022-03-09T17:55:07.000Z
pyformlang/rsa/recursive_automaton.py
IlyaEp/pyformlang
eef239844beff5e9da3be4a4a240440ece81c10b
[ "MIT" ]
11
2020-09-23T09:48:35.000Z
2021-08-24T08:37:47.000Z
pyformlang/rsa/recursive_automaton.py
IlyaEp/pyformlang
eef239844beff5e9da3be4a4a240440ece81c10b
[ "MIT" ]
5
2020-03-08T19:00:17.000Z
2021-08-15T12:38:05.000Z
""" Representation of a recursive automaton """ from typing import AbstractSet from pyformlang.finite_automaton.finite_automaton import to_symbol from pyformlang.finite_automaton.symbol import Symbol from pyformlang.regular_expression import Regex from pyformlang.cfg import CFG, Epsilon from pyformlang.rsa.box import Box def remove_repetition_of_nonterminals_from_productions(grammar_in_text: str): """ Remove nonterminal repeats on the left side of the rule For example: grammar: S -> a S b S -> a b grammar after function execution: S -> a S b | a b """ productions = dict() for production in grammar_in_text.splitlines(): if "->" not in production: continue head, body = production.split(" -> ") if head in productions: productions[head] += " | " + body else: productions[head] = body grammar_new = str() for nonterminal in productions: grammar_new += f'{nonterminal} -> {productions[nonterminal]}\n' return grammar_new[:-1] class RecursiveAutomaton: """ Represents a recursive automaton This class represents a recursive automaton. Parameters ---------- labels : set of :class:`~pyformlang.finite_automaton.Symbol`, optional A finite set of labels for boxes initial_label : :class:`~pyformlang.finite_automaton.Symbol`, optional A start label for automaton boxes : set of :class:`~pyformlang.rsa.Box`, optional A finite set of boxes """ def __init__(self, labels: AbstractSet[Symbol] = None, initial_label: Symbol = None, boxes: AbstractSet[Box] = None): if labels is not None: labels = {to_symbol(x) for x in labels} self._labels = labels or set() if initial_label is not None: initial_label = to_symbol(initial_label) if initial_label not in self._labels: self._labels.add(initial_label) self._initial_label = initial_label or Symbol("") self._boxes = dict() if boxes is not None: for box in boxes: self._boxes.update({to_symbol(box.label): box}) self._labels.add(box.label) for label in self._labels: box = self.get_box(label) if box is None: raise ValueError("RSA must have the same number of labels and DFAs") def get_box(self, label: Symbol): """ Box by label """ label = to_symbol(label) if label in self._boxes: return self._boxes[label] return None def add_box(self, new_box: Box): """ Set a box Parameters ----------- new_box : :class:`~pyformlang.rsa.Box` The new box """ self._boxes.update({new_box.label: new_box}) self._labels.add(to_symbol(new_box.label)) def get_number_of_boxes(self): """ Size of set of boxes """ return len(self._boxes) def change_initial_label(self, new_initial_label: Symbol): """ Set an initial label Parameters ----------- new_initial_label : :class:`~pyformlang.finite_automaton.Symbol` The new initial label """ new_initial_label = to_symbol(new_initial_label) if new_initial_label not in self._labels: raise ValueError("New initial label not in set of labels for boxes") @property def labels(self) -> set: """ The set of labels """ return self._labels @property def boxes(self) -> dict: """ The set of boxes """ return self._boxes @property def initial_label(self) -> Symbol: """ The initial label """ return self._initial_label @classmethod def from_regex(cls, regex: Regex, initial_label: Symbol): """ Create a recursive automaton from regular expression Parameters ----------- regex : :class:`~pyformlang.regular_expression.Regex` The regular expression initial_label : :class:`~pyformlang.finite_automaton.Symbol` The initial label for the recursive automaton Returns ----------- rsa : :class:`~pyformlang.rsa.RecursiveAutomaton` The new recursive automaton built from regular expression """ initial_label = to_symbol(initial_label) box = Box(regex.to_epsilon_nfa().minimize(), initial_label) return RecursiveAutomaton({initial_label}, initial_label, {box}) @classmethod def from_cfg(cls, cfg: CFG): """ Create a recursive automaton from context-free grammar Parameters ----------- cfg : :class:`~pyformlang.cfg.CFG` The context-free grammar Returns ----------- rsa : :class:`~pyformlang.rsa.RecursiveAutomaton` The new recursive automaton built from context-free grammar """ initial_label = to_symbol(cfg.start_symbol) grammar_in_true_format = remove_repetition_of_nonterminals_from_productions(cfg.to_text()) boxes = set() labels = set() notation_for_epsilon = Epsilon().to_text() for production in grammar_in_true_format.splitlines(): head, body = production.split(" -> ") labels.add(to_symbol(head)) if body == "": body = notation_for_epsilon boxes.add(Box(Regex(body).to_epsilon_nfa().minimize(), to_symbol(head))) return RecursiveAutomaton(labels, initial_label, boxes) def is_equivalent_to(self, other): """ Check whether two recursive automata are equivalent Parameters ---------- other : :class:`~pyformlang.rsa.RecursiveAutomaton` The input recursive automaton Returns ---------- are_equivalent : bool Whether the two recursive automata are equivalent or not """ if not isinstance(other, RecursiveAutomaton): return False if self._labels != other._labels: return False for label in self._labels: box_1 = self.get_box(label) box_2 = other.get_box(label) if not box_1 == box_2: return False return True def __eq__(self, other): return self.is_equivalent_to(other)
28.915179
98
0.603366
742
6,477
5.080863
0.15903
0.098674
0.027851
0.041114
0.24244
0.163395
0.096021
0.068966
0.04191
0.04191
0
0.001102
0.299213
6,477
223
99
29.044843
0.829478
0.28578
0
0.14433
0
0
0.036984
0.006724
0
0
0
0
0
1
0.134021
false
0
0.061856
0.010309
0.350515
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7475e81fcba007e37fae56c5bb3b0e881ffb33a4
17,080
py
Python
py/ColladaMesh.py
dineshkummarc/Odin
f23719ff05a9d03cebd0d5c4c0179a98eff2d935
[ "BSD-3-Clause" ]
1
2019-04-22T16:33:55.000Z
2019-04-22T16:33:55.000Z
py/ColladaMesh.py
dineshkummarc/Odin
f23719ff05a9d03cebd0d5c4c0179a98eff2d935
[ "BSD-3-Clause" ]
null
null
null
py/ColladaMesh.py
dineshkummarc/Odin
f23719ff05a9d03cebd0d5c4c0179a98eff2d935
[ "BSD-3-Clause" ]
null
null
null
from bisect import bisect import ColladaMaterial # Named array of points. class PointArray(): def __init__(self, n, ofs): self.name = n self.points = [] self.stride = 1 self.offset = int(ofs) def GetFirstChildElement(node): for elem in node.childNodes: if elem.nodeType == elem.ELEMENT_NODE: return elem return None def GetChildElements(node): elems = [] for elem in node.childNodes: if elem.nodeType == elem.ELEMENT_NODE: elems.append(elem) return elems def GetSourceArray(parent, srcId): for src in parent.getElementsByTagName('source'): if src.getAttribute('id') == srcId[1:]: technique = src.getElementsByTagName('technique_common')[0] accessor = GetFirstChildElement(technique) sourceURL = accessor.getAttribute('source') count = int(accessor.getAttribute('count')) param = GetFirstChildElement(accessor) paramType = param.getAttribute('type') for node in GetChildElements(src): if node.getAttribute('id') == sourceURL[1:]: data = node.firstChild.data data = data.strip() data = data.replace('\n', ' ') if paramType == 'name': data = [str(s) for s in data.split(' ')] elif paramType == 'float': data = [float(s) for s in data.split(' ')] elif paramType == 'float4x4': data = [float(s) for s in data.split(' ')] return data return [] def GetChildArray(parent, tag, typecast): for node in GetChildElements(parent): if node.tagName == tag: return [typecast(x) for x in node.firstChild.data.strip().split(' ')] return [] class Skin: def __init__(self, skin, numWeights, origPosMap): self.bindShapeMatrix = GetChildArray(skin, 'bind_shape_matrix', float) jointURL = "" for joints in skin.getElementsByTagName('joints'): for inp in joints.getElementsByTagName('input'): semantic = inp.getAttribute('semantic') sourceURL = inp.getAttribute('source') if semantic == 'JOINT': self.jointNames = GetSourceArray(skin, sourceURL) jointURL = sourceURL elif semantic == 'INV_BIND_MATRIX': self.invBindMatrices = GetSourceArray(skin, sourceURL) else: print('Skipping input with unknown semantic ' + semantic) for vertexWeights in skin.getElementsByTagName('vertex_weights'): jointOffset = 0 weightOffset = 0 weightURL = '' for inp in vertexWeights.getElementsByTagName('input'): semantic = inp.getAttribute('semantic') offset = int(inp.getAttribute('offset')) sourceURL = inp.getAttribute('source') if semantic == 'JOINT': if sourceURL != jointURL: print('TODO: multiple jointURLs specified, need to match up indices.') jointOffset = offset elif semantic == 'WEIGHT': weightURL = sourceURL weightOffset = offset else: print('Skipping input with unknown semantic ' + semantic) weights = GetSourceArray(skin, weightURL) vertexCount = GetChildArray(vertexWeights, 'vcount', int) v = GetChildArray(vertexWeights, 'v', int) vstride = max(jointOffset, weightOffset) + 1 self.vertexWeightCount = numWeights self.vertexWeights = [] self.jointIndices = [] index = 0 for vc in vertexCount: tempWeights = [] tempIndices = [] for c in range(vc): tempWeights.append(weights[v[(index + c) * vstride + weightOffset]]) tempIndices.append(v[(index + c) * vstride + jointOffset]) temp = zip(tempWeights, tempIndices) temp.sort() temp.reverse() tempWeights = [s[0] for s in temp][:numWeights] tempIndices = [s[1] for s in temp][:numWeights] for n in range(len(tempWeights), numWeights): tempWeights.append(0) tempIndices.append(0) weightSum = 0 for n in range(numWeights): weightSum = weightSum + tempWeights[n] for n in range(numWeights): tempWeights[n] = tempWeights[n] / weightSum self.vertexWeights.extend(tempWeights) self.jointIndices.extend(tempIndices) index = index + vc #Expand vertex weights and joint indices according to origPosMap newVertexWeights = [] newJointIndices = [] for i in range(len(origPosMap)): origIndex = int(origPosMap[i]) for j in range(numWeights): newVertexWeights.append(self.vertexWeights[origIndex * numWeights + j]) newJointIndices.append(self.jointIndices[origIndex * numWeights + j]) self.vertexWeights = newVertexWeights self.jointIndices = newJointIndices def Write(self, fileHandle): fileHandle.write(' "bindShapeMatrix" : ') fileHandle.write(str(self.bindShapeMatrix) + ",\n") fileHandle.write(' "jointNames" : ') fileHandle.write(str(self.jointNames).replace("'", '"') + ",\n") fileHandle.write(' "invBindMatrices" : ') fileHandle.write(str(self.invBindMatrices) + ",\n") fileHandle.write(' "vertexWeights" : ') fileHandle.write(str(self.vertexWeights) + ",\n") fileHandle.write(' "jointIndices" : ') fileHandle.write(str(self.jointIndices) + ",\n") #TODO: passing in the doc is like begging for trouble, pass in the needed elements instead. class Mesh: def __init__(self, doc, node): self.materialLUT = dict() instanceMaterials = node.getElementsByTagName('instance_material') for mat in instanceMaterials: self.materialLUT[mat.getAttribute('symbol')] = mat.getAttribute('target') geometry = None instanceGeometryURL = '' if node.tagName == 'instance_controller': instanceControllerURL = node.getAttribute('url') controllers = doc.getElementsByTagName('controller') controller = None for c in controllers: if c.getAttribute('id') == instanceControllerURL[1:]: controller = c break if c == None: print("Couldn't find the controller with id '" + instanceControllerURL + "', skipping") return skins = c.getElementsByTagName('skin') if len(skins) != 1: print("Controller doesn't contain exactly one skin, skipping.") return instanceGeometryURL = skins[0].getAttribute('source'); elif node.tagName == 'instance_geometry': instanceGeometryURL = node.getAttribute('url') if len(instanceGeometryURL) != 0: if instanceGeometryURL[0] != '#': print('Geometry URL pointing outside of this document, skipping.') return geometries = doc.getElementsByTagName('geometry') geometry = None for g in geometries: if g.getAttribute('id') == instanceGeometryURL[1:]: geometry = g break if geometry == None: print("Couldn't find the geometry with id '" + instanceGeometryURL + "', skipping") return self.faces = [] self.materials = [] self.verts = [] geometryId = geometry.getAttribute("id") self.uniqueVerts = dict() self.sourceArrays = dict() self.outFileName = geometry.getAttribute('name') + '.json' self.skin = None self.origPosMap = dict() self.skinNode = None # Check if there's a skin node for this mesh. for controller in doc.getElementsByTagName("controller"): if self.skinNode != None: break controllerId = controller.getAttribute("id") for skin in controller.getElementsByTagName("skin"): if skin.getAttribute("source")[1:] == geometryId: self.skinNode = skin break for mesh in geometry.getElementsByTagName("mesh"): # TODO: This assumes there's only one <mesh> per <geometry>, check spec. # Only export normals and uv's if they're required by the material. self.needsNormals = False self.needsUV = False # Get all the triangles and polygons in the mesh. polygons = mesh.getElementsByTagName("polygons") triangles = mesh.getElementsByTagName("triangles") for tri in triangles: polygons.append(tri) # Get all the materials in the mesh. self.BuildMaterials(doc, polygons) # Create a list of all the sources sourceList = self.BuildSourceList(mesh, polygons) # Look up the source and pull the data. for srcItem in sourceList: sourceURL = srcItem[0] offset = srcItem[1] targetAttr = srcItem[2] foundSource = False for source in mesh.getElementsByTagName('source'): if source.getAttribute('id') == sourceURL[1:]: foundSource = True if not self.sourceArrays.has_key(targetAttr): self.sourceArrays[targetAttr] = [] self.GetSrcArray(source, targetAttr, offset) break if not foundSource: print("Couldn't find matching source.") break # Get unique indices. for polygon in polygons: for p in polygon.getElementsByTagName("p"): face = p.firstChild.data.strip().split(' '); stride = len(face) / (int(polygon.getAttribute("count")) * 3) for i in range(0, len(face), stride): posArr = self.sourceArrays["vertexPositions"][0] fIndex = int(face[i+posArr.offset]) px = posArr.points[fIndex*3] py = posArr.points[fIndex*3+1] pz = posArr.points[fIndex*3+2] vert = (px,py,pz) if self.needsNormals: for nc in range(0, len(self.sourceArrays["vertexNormals"])): normArr = self.sourceArrays["vertexNormals"][nc] fIndex = int(face[i+normArr.offset]) nx = normArr.points[fIndex*3] ny = normArr.points[fIndex*3+1] nz = normArr.points[fIndex*3+2] vert = vert + (nx,ny,nz) if self.needsUV: for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])): texArr = self.sourceArrays["vertexTextureCoords"][tn] fIndex = int(face[i+texArr.offset]) u = texArr.points[fIndex*2] v = texArr.points[fIndex*2+1] vert = vert + (u,v) index = self.GetUniqueVertexIndex(vert) self.origPosMap[index] = face[i] self.faces.append(index) self.vertArrays = [None]*len(self.uniqueVerts) for v in self.uniqueVerts.iteritems(): self.vertArrays[v[1]] = v[0] offs = 0 arr = PointArray("vertexPositions", 0) for v in self.vertArrays: arr.points.append(v[offs+0]) arr.points.append(v[offs+1]) arr.points.append(v[offs+2]) self.verts.append([arr]) offs += 3 if (self.needsNormals): self.verts.append([]) for nc in range(0, len(self.sourceArrays["vertexNormals"])): arr = PointArray("vertexNormals", 3) for v in self.vertArrays: arr.points.append(v[offs+0]) arr.points.append(v[offs+1]) arr.points.append(v[offs+2]) self.verts[-1].append(arr) offs += 3 if (self.needsUV): self.verts.append([]) for tn in range(0, len(self.sourceArrays["vertexTextureCoords"])): arr = PointArray("vertexTextureCoords", offs) for v in self.vertArrays: arr.points.append(v[offs+0]) arr.points.append(v[offs+1]) self.verts[-1].append(arr) offs += 2 # If there's a skin node set, create the skin. if self.skinNode: self.skin = Skin(self.skinNode, 4, self.origPosMap) def BuildSourceList(self, mesh, polygons): # Build a list of (sourceURL, offset, targetAttr) tuples to extract. srcArray = [] for polygon in polygons: for input in polygon.getElementsByTagName("input"): semantic = input.getAttribute("semantic") offset = input.getAttribute('offset') sourceURL = input.getAttribute('source') targetAttr = 'vertexPositions' if semantic == 'NORMAL': targetAttr = 'vertexNormals' if not self.needsNormals: continue elif semantic == 'TEXCOORD': targetAttr = 'vertexTextureCoords' if not self.needsUV: continue # There's an extra level of indirection for vertex semantics. if semantic == 'VERTEX': for vertex in mesh.getElementsByTagName('vertices'): for input in vertex.getElementsByTagName('input'): sourceURL = input.getAttribute('source') semantic = input.getAttribute('semantic') if semantic == 'NORMAL': if not self.needsNormals: continue targetAttr = 'vertexNormals' elif semantic == 'POSITION': targetAttr = 'vertexPositions' if [sourceURL, offset, targetAttr] not in srcArray: srcArray.append([sourceURL, offset, targetAttr]) else: if [sourceURL, offset, targetAttr] not in srcArray: srcArray.append([sourceURL, offset, targetAttr]) return srcArray def BuildMaterials(self, doc, polygons): # Get all the materials in the mesh. gcount = 0 for polygon in polygons: materialSymbol = polygon.getAttribute('material') if materialSymbol == '': continue materialURL = self.materialLUT[materialSymbol] material = None for mat in doc.getElementsByTagName('material'): if mat.getAttribute('id') == materialURL[1:]: material = mat; break if material == None: print("Couldn't find material '" + materialURL + "'.") instanceEffects = material.getElementsByTagName('instance_effect') if len(instanceEffects) == 0: print('No instance effects') effectURL = instanceEffects[0].getAttribute('url') if effectURL[0] != '#': print('Effect URL points outside document.') for fx in doc.getElementsByTagName("effect"): fxId = fx.getAttribute('id') if fxId == effectURL[1:]: mat = ColladaMaterial.Material(fx, doc, self.skinNode != None) mat.count = gcount self.materials.append(mat) if mat.materialType != "matte": self.needsNormals = True if mat.HasTextureChannel(): self.needsUV = True break gcount += int(polygon.getAttribute("count")) * 3 def WriteToScene(self, fileHandle, indent, outFolder): for i in range(indent): fileHandle.write(' ') fileHandle.write('{ "type" : "mesh", "file" : "' + outFolder + '/' + self.outFileName + '" }') # Write the mesh as a JSON file. def Write(self, outFolder): print('Writing mesh ' + outFolder + '/' + self.outFileName) fileHandle = open(outFolder + '/' + self.outFileName, 'w') fileHandle.write('{\n') fileHandle.write(' "materials" : \n [\n') for m in range(len(self.materials)): self.materials[m].Write(outFolder) fileHandle.write(' { "file" : "' + outFolder + '/' + self.materials[m].name + '.json", "start" : ' + str(self.materials[m].count) + ' }') if m != len(self.materials) - 1: fileHandle.write(',') fileHandle.write('\n') fileHandle.write(' ],\n') fileHandle.write(' "indices" : ') fileHandle.write(str(self.faces)) fileHandle.write(',\n') if self.skin != None: self.skin.Write(fileHandle) for pa in range(len(self.verts)): fileHandle.write(' "' + self.verts[pa][0].name + '" : [') for vsi in range(len(self.verts[pa])): fileHandle.write(str(self.verts[pa][vsi].points).replace("'", "")) if vsi != len(self.verts[pa]) - 1: fileHandle.write(', ') fileHandle.write(']') if pa != len(self.verts) - 1: fileHandle.write(',') fileHandle.write('\n') fileHandle.write('\n}') fileHandle.close() def GetUniqueVertexIndex(self, a): if a not in self.uniqueVerts: self.uniqueVerts[a] = len(self.uniqueVerts) return self.uniqueVerts[a]; # Pull a float_array from the Collada format and store it as a PointArray in the mesh. def GetSrcArray(self, source, dstName, offset): the_array = source.getElementsByTagName("float_array")[0]; arr = the_array.firstChild.data arr = arr.strip(); arr = arr.replace('\n', ' ') newArray = PointArray(dstName, offset) newArray.points = [str(s) for s in arr.split(' ')] s = source.getElementsByTagName("accessor")[0].getAttribute("stride") if s == "": newArray.stride = 1 else: newArray.stride = int(s) #print("Got source " + newArray.name + ", count " + str(len(newArray.points)) + ", stride " + str(newArray.stride) + ", offset " + str(newArray.offset)) self.sourceArrays[dstName].append(newArray)
37.955556
156
0.606382
1,809
17,080
5.709232
0.165837
0.040666
0.012393
0.012393
0.176414
0.136135
0.111348
0.097405
0.079009
0.060418
0
0.006349
0.271546
17,080
449
157
38.040089
0.823742
0.058782
0
0.215584
0
0
0.092551
0
0
0
0
0.002227
0
1
0.036364
false
0
0.005195
0
0.083117
0.031169
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747904efdd9f1ce7e4568f67aa61e74e2291d27b
1,757
py
Python
src/zope/site/tests/test_folder.py
zopefoundation/zope.site
a523fdaab2801d6c2b15d10bd93115c9d53f4fd7
[ "ZPL-2.1" ]
3
2018-10-09T14:20:30.000Z
2021-03-09T22:23:50.000Z
src/zope/site/tests/test_folder.py
zopefoundation/zope.site
a523fdaab2801d6c2b15d10bd93115c9d53f4fd7
[ "ZPL-2.1" ]
15
2015-03-20T09:14:08.000Z
2021-03-05T16:39:27.000Z
src/zope/site/tests/test_folder.py
zopefoundation/zope.site
a523fdaab2801d6c2b15d10bd93115c9d53f4fd7
[ "ZPL-2.1" ]
1
2015-04-03T09:43:40.000Z
2015-04-03T09:43:40.000Z
import doctest import unittest from zope.site.folder import Folder from zope.site.testing import siteSetUp, siteTearDown, checker from zope.site.tests.test_site import TestSiteManagerContainer def setUp(test=None): siteSetUp() def tearDown(test=None): siteTearDown() class FolderTest(TestSiteManagerContainer): def makeTestObject(self): return Folder() class TestRootFolder(unittest.TestCase): def test_IRoot_before_IContainer_rootFolder(self): from zope.site.folder import rootFolder from zope.interface import providedBy from zope.location.interfaces import IRoot from zope.container.interfaces import IContainer folder = rootFolder() provides = list(providedBy(folder).flattened()) iroot = provides.index(IRoot) container = provides.index(IContainer) self.assertLess(iroot, container) def test_IRoot_before_IContainer_IRootFolder(self): from zope.site.interfaces import IRootFolder from zope.location.interfaces import IRoot from zope.container.interfaces import IContainer provides = list(IRootFolder.__iro__) iroot = provides.index(IRoot) container = provides.index(IContainer) self.assertLess(iroot, container) def test_suite(): flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE return unittest.TestSuite(( unittest.defaultTestLoader.loadTestsFromName(__name__), doctest.DocTestSuite('zope.site.folder', setUp=setUp, tearDown=tearDown), doctest.DocFileSuite("folder.txt", setUp=setUp, tearDown=tearDown, checker=checker, optionflags=flags), ))
27.453125
65
0.690381
175
1,757
6.822857
0.308571
0.067002
0.050251
0.030151
0.365997
0.284757
0.284757
0.284757
0.284757
0.284757
0
0
0.232783
1,757
63
66
27.888889
0.885757
0
0
0.243902
0
0
0.014815
0
0
0
0
0
0.04878
1
0.146341
false
0
0.292683
0.02439
0.536585
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
747a06d2abc8c71978e54d63ef1aeef2b7e2379e
6,712
py
Python
stl/ast.py
karen-leung-tri/py-stl
87f764a2b8b7c756a239f200b77c3875f959cae6
[ "BSD-3-Clause" ]
null
null
null
stl/ast.py
karen-leung-tri/py-stl
87f764a2b8b7c756a239f200b77c3875f959cae6
[ "BSD-3-Clause" ]
null
null
null
stl/ast.py
karen-leung-tri/py-stl
87f764a2b8b7c756a239f200b77c3875f959cae6
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- from collections import deque, namedtuple from functools import lru_cache import funcy as fn from lenses import lens, bind import stl def flatten_binary(phi, op, dropT, shortT): def f(x): return x.args if isinstance(x, op) else [x] args = [arg for arg in phi.args if arg is not dropT] if any(arg is shortT for arg in args): return shortT elif not args: return dropT elif len(args) == 1: return args[0] else: return op(tuple(fn.mapcat(f, phi.args))) class AST(object): __slots__ = () def __or__(self, other): return flatten_binary(Or((self, other)), Or, BOT, TOP) def __and__(self, other): return flatten_binary(And((self, other)), And, TOP, BOT) def __invert__(self): if isinstance(self, Neg): return self.arg return Neg(self) def __rshift__(self, t): if self in (BOT, TOP): return self phi = self for _ in range(t): phi = Next(phi) return phi def __call__(self, trace, time=0): return stl.pointwise_sat(self)(trace, time) @property def children(self): return tuple() def walk(self): """Walk of the AST.""" pop = deque.pop children = deque([self]) while len(children) > 0: node = pop(children) yield node children.extend(node.children) @property def params(self): def get_params(leaf): if isinstance(leaf, ModalOp): if isinstance(leaf.interval[0], Param): yield leaf.interval[0] if isinstance(leaf.interval[1], Param): yield leaf.interval[1] elif isinstance(leaf, LinEq): if isinstance(leaf.const, Param): yield leaf.const return set(fn.mapcat(get_params, self.walk())) def set_params(self, val): phi = param_lens(self) return phi.modify(lambda x: float(val.get(x, val.get(str(x), x)))) @property def lineqs(self): return set(lineq_lens.collect()(self)) @property def atomic_predicates(self): return set(AP_lens.collect()(self)) @property def var_names(self): symbols = set(bind(self.lineqs).Each().terms.Each().collect()) symbols |= self.atomic_predicates return set(bind(symbols).Each().id.collect()) def inline_context(self, context): phi, phi2 = self, None def update(ap): return context.get(ap, ap) while phi2 != phi: phi2, phi = phi, AP_lens.modify(update)(phi) return phi def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class _Top(AST): __slots__ = () def __repr__(self): return "⊤" def __invert__(self): return BOT class _Bot(AST): __slots__ = () def __repr__(self): return "⊥" def __invert__(self): return TOP TOP = _Top() BOT = _Bot() class AtomicPred(namedtuple("AP", ["id"]), AST): __slots__ = () def __repr__(self): return f"{self.id}" def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) @property def children(self): return tuple() class LinEq(namedtuple("LinEquality", ["terms", "op", "const"]), AST): __slots__ = () def __repr__(self): return " + ".join(map(str, self.terms)) + f" {self.op} {self.const}" @property def children(self): return tuple() def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class Var(namedtuple("Var", ["coeff", "id"])): __slots__ = () def __repr__(self): if self.coeff == -1: coeff_str = "-" elif self.coeff == +1: coeff_str = "" else: coeff_str = f"{self.coeff}" return f"{coeff_str}{self.id}" class Interval(namedtuple('I', ['lower', 'upper'])): __slots__ = () def __repr__(self): return f"[{self.lower},{self.upper}]" class NaryOpSTL(namedtuple('NaryOp', ['args']), AST): __slots__ = () OP = "?" def __repr__(self): return f" {self.OP} ".join(f"({x})" for x in self.args) @property def children(self): return tuple(self.args) class Or(NaryOpSTL): __slots__ = () OP = "∨" def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class And(NaryOpSTL): __slots__ = () OP = "∧" def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class ModalOp(namedtuple('ModalOp', ['interval', 'arg']), AST): __slots__ = () OP = '?' def __repr__(self): return f"{self.OP}{self.interval}({self.arg})" @property def children(self): return (self.arg,) class F(ModalOp): __slots__ = () OP = "◇" def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class G(ModalOp): __slots__ = () OP = "□" def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class Until(namedtuple('ModalOp', ['arg1', 'arg2']), AST): __slots__ = () def __repr__(self): return f"({self.arg1}) U ({self.arg2})" @property def children(self): return (self.arg1, self.arg2) def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class Neg(namedtuple('Neg', ['arg']), AST): __slots__ = () def __repr__(self): return f"¬({self.arg})" @property def children(self): return (self.arg,) def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class Next(namedtuple('Next', ['arg']), AST): __slots__ = () def __repr__(self): return f"◯({self.arg})" @property def children(self): return (self.arg,) def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) class Param(namedtuple('Param', ['name']), AST): __slots__ = () def __repr__(self): return self.name def __hash__(self): # TODO: compute hash based on contents return hash(repr(self)) @lru_cache() def param_lens(phi, *, getter=False): return bind(phi).Recur(Param) def type_pred(*args): ast_types = set(args) return lambda x: type(x) in ast_types lineq_lens = lens.Recur(LinEq) AP_lens = lens.Recur(AtomicPred)
20.975
76
0.567789
827
6,712
4.351874
0.171705
0.066685
0.036677
0.045846
0.403445
0.363434
0.307585
0.280356
0.247569
0.235621
0
0.004239
0.29708
6,712
319
77
21.040752
0.756889
0.066448
0
0.411765
0
0
0.051688
0.010082
0
0
0
0.003135
0
1
0.25
false
0
0.02451
0.196078
0.735294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
747a9c5326878d11b9906f17374aa59d5eda3f27
5,958
py
Python
nel-wikipedia/kb_creator.py
nadjet/explosion_projects
74f09fc97befb429a6db9e3235619c3d7b27ea7a
[ "MIT" ]
1
2020-07-18T19:19:54.000Z
2020-07-18T19:19:54.000Z
nel-wikipedia/kb_creator.py
JaredDelora/projects
ad0fc26dbc05182a439ffe76a362d40c24e25066
[ "MIT" ]
null
null
null
nel-wikipedia/kb_creator.py
JaredDelora/projects
ad0fc26dbc05182a439ffe76a362d40c24e25066
[ "MIT" ]
null
null
null
# coding: utf-8 from __future__ import unicode_literals import logging from spacy.kb import KnowledgeBase from train_descriptions import EntityEncoder import wiki_io as io logger = logging.getLogger(__name__) def create_kb( nlp, max_entities_per_alias, min_entity_freq, min_occ, entity_def_path, entity_descr_path, entity_alias_path, entity_freq_path, prior_prob_path, entity_vector_length, ): # Create the knowledge base from Wikidata entries kb = KnowledgeBase(vocab=nlp.vocab, entity_vector_length=entity_vector_length) entity_list, filtered_title_to_id = _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length) _define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path) return kb def _define_entities(nlp, kb, entity_def_path, entity_descr_path, min_entity_freq, entity_freq_path, entity_vector_length): # read the mappings from file title_to_id = io.read_title_to_id(entity_def_path) id_to_descr = io.read_id_to_descr(entity_descr_path) # check the length of the nlp vectors if "vectors" in nlp.meta and nlp.vocab.vectors.size: input_dim = nlp.vocab.vectors_length logger.info("Loaded pretrained vectors of size %s" % input_dim) else: raise ValueError( "The `nlp` object should have access to pretrained word vectors, " " cf. https://spacy.io/usage/models#languages." ) logger.info("Filtering entities with fewer than {} mentions".format(min_entity_freq)) entity_frequencies = io.read_entity_to_count(entity_freq_path) # filter the entities for in the KB by frequency, because there's just too much data (8M entities) otherwise filtered_title_to_id, entity_list, description_list, frequency_list = get_filtered_entities( title_to_id, id_to_descr, entity_frequencies, min_entity_freq ) logger.info("Kept {} entities from the set of {}".format(len(description_list), len(title_to_id.keys()))) logger.info("Training entity encoder") encoder = EntityEncoder(nlp, input_dim, entity_vector_length) encoder.train(description_list=description_list, to_print=True) logger.info("Getting entity embeddings") embeddings = encoder.apply_encoder(description_list) logger.info("Adding {} entities".format(len(entity_list))) kb.set_entities( entity_list=entity_list, freq_list=frequency_list, vector_list=embeddings ) return entity_list, filtered_title_to_id def _define_aliases(kb, entity_alias_path, entity_list, filtered_title_to_id, max_entities_per_alias, min_occ, prior_prob_path): logger.info("Adding aliases from Wikipedia and Wikidata") _add_aliases( kb, entity_list=entity_list, title_to_id=filtered_title_to_id, max_entities_per_alias=max_entities_per_alias, min_occ=min_occ, prior_prob_path=prior_prob_path, ) def get_filtered_entities(title_to_id, id_to_descr, entity_frequencies, min_entity_freq: int = 10): filtered_title_to_id = dict() entity_list = [] description_list = [] frequency_list = [] for title, entity in title_to_id.items(): freq = entity_frequencies.get(title, 0) desc = id_to_descr.get(entity, None) if desc and freq > min_entity_freq: entity_list.append(entity) description_list.append(desc) frequency_list.append(freq) filtered_title_to_id[title] = entity return filtered_title_to_id, entity_list, description_list, frequency_list def _add_aliases(kb, entity_list, title_to_id, max_entities_per_alias, min_occ, prior_prob_path): wp_titles = title_to_id.keys() # adding aliases with prior probabilities # we can read this file sequentially, it's sorted by alias, and then by count logger.info("Adding WP aliases") with prior_prob_path.open("r", encoding="utf8") as prior_file: # skip header prior_file.readline() line = prior_file.readline() previous_alias = None total_count = 0 counts = [] entities = [] while line: splits = line.replace("\n", "").split(sep="|") new_alias = splits[0] count = int(splits[1]) entity = splits[2] if new_alias != previous_alias and previous_alias: # done reading the previous alias --> output if len(entities) > 0: selected_entities = [] prior_probs = [] for ent_count, ent_string in zip(counts, entities): if ent_string in wp_titles: wd_id = title_to_id[ent_string] p_entity_givenalias = ent_count / total_count selected_entities.append(wd_id) prior_probs.append(p_entity_givenalias) if selected_entities: try: kb.add_alias( alias=previous_alias, entities=selected_entities, probabilities=prior_probs, ) except ValueError as e: logger.error(e) total_count = 0 counts = [] entities = [] total_count += count if len(entities) < max_entities_per_alias and count >= min_occ: counts.append(count) entities.append(entity) previous_alias = new_alias line = prior_file.readline() def read_kb(nlp, kb_file): kb = KnowledgeBase(vocab=nlp.vocab) kb.load_bulk(kb_file) return kb
36.777778
158
0.648372
749
5,958
4.798398
0.230975
0.037006
0.047579
0.042571
0.293823
0.235392
0.195047
0.195047
0.185031
0.185031
0
0.002789
0.277946
5,958
161
159
37.006211
0.832636
0.06764
0
0.080645
0
0
0.066005
0
0
0
0
0
0
1
0.048387
false
0
0.040323
0
0.120968
0.008065
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747becfe33166824b286845631f9101c61bac5a1
1,045
py
Python
modulesExecution.py
rando3/leetcode-python
b13bb35fb3cdc9813c62944547d260be2f9cab02
[ "MIT" ]
null
null
null
modulesExecution.py
rando3/leetcode-python
b13bb35fb3cdc9813c62944547d260be2f9cab02
[ "MIT" ]
null
null
null
modulesExecution.py
rando3/leetcode-python
b13bb35fb3cdc9813c62944547d260be2f9cab02
[ "MIT" ]
null
null
null
from collections import deque, defaultdict GRAY, BLACK = 0, 1 class Graph: def __init__(self, vertices): self.graph = defaultdict(list) # dictionary containing adjacency List self.V = vertices # No. of vertices def addEdge(self, u, v): ''' Function to add an edge to graph ''' self.graph[u].append(v) def topological(self): order, enter, state = deque(), set(self.graph), {} def dfs(node): state[node] = False for k in self.graph.get(node, ()): sk = state.get(k, None) if not sk: print("No valid ordering exists.") return else: continue enter.discard(k) dfs(k) order.appendleft(node) state[node] = True while enter: dfs(enter.pop()) return order if __name__ == "__main__": g = Graph(3) g.addEdge(1, 2) g.addEdge(1, 3) print(g.topological())
24.302326
78
0.504306
120
1,045
4.291667
0.525
0.069903
0.050485
0
0
0
0
0
0
0
0
0.01092
0.386603
1,045
42
79
24.880952
0.792512
0.083254
0
0
0
0
0.034737
0
0
0
0
0
0
1
0.129032
false
0
0.032258
0
0.258065
0.064516
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747d2dffa5879ddf1f60b8ac85c5568915e42c9d
178
py
Python
boxPrint/printers/printrun_printer.py
bobofei/Mohou_Box-master
3d1c320a6258422406e2ba2f96ec7986beba1330
[ "Apache-2.0" ]
null
null
null
boxPrint/printers/printrun_printer.py
bobofei/Mohou_Box-master
3d1c320a6258422406e2ba2f96ec7986beba1330
[ "Apache-2.0" ]
null
null
null
boxPrint/printers/printrun_printer.py
bobofei/Mohou_Box-master
3d1c320a6258422406e2ba2f96ec7986beba1330
[ "Apache-2.0" ]
null
null
null
import threaded_printer class Printer(threaded_printer.Printer): def __init__(self, profile, usb_info): threaded_printer.Printer.__init__(self, profile, usb_info)
22.25
66
0.769663
22
178
5.636364
0.454545
0.362903
0.354839
0.290323
0.354839
0
0
0
0
0
0
0
0.146067
178
7
67
25.428571
0.815789
0
0
0
0
0
0
0
0
0
0
0
0
1
0.25
false
0
0.25
0
0.75
0.75
1
0
0
null
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
1
1
0
8
747ef7880794c3ef33f57ad1412aece6844cc7a6
4,661
py
Python
facebook/messenger/hello-world/webhook.py
imsardine/learning
925841ddd93d60c740a62e12d9f57ef15b6e0a20
[ "MIT" ]
null
null
null
facebook/messenger/hello-world/webhook.py
imsardine/learning
925841ddd93d60c740a62e12d9f57ef15b6e0a20
[ "MIT" ]
null
null
null
facebook/messenger/hello-world/webhook.py
imsardine/learning
925841ddd93d60c740a62e12d9f57ef15b6e0a20
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import os from flask import Flask, request, abort, jsonify import requests SEND_API = 'https://graph.facebook.com/v2.6/me/messages' WEBHOOK_VERIFY_TOKEN = os.environ['WEBHOOK_VERIFY_TOKEN'] PAGE_ACCESS_TOKEN = os.environ['PAGE_ACCESS_TOKEN'] APP_SECRET = os.environ['APP_SECRET'] app = Flask(__name__) @app.route('/webhook', methods=['GET', 'POST']) def webhook(): if request.method == 'GET': # GET for webhook verification return verify_webhook() # POST for message events assert request.is_json data = request.get_json() if data['object'] == 'page': for entry in data['entry']: for event in entry['messaging']: timestamp = event['timestamp'] sender_id = event['sender']['id'] recipient_id = event['recipient']['id'] if 'message' in event: on_message_event(timestamp, sender_id, event['message']) elif 'postback' in event: on_postback_event(timestamp, sender_id, event['postback']) else: abort(400) # Unknown event else: abort(400) # Bad Request return '' def verify_webhook(mode, verify_token): query_params = request.args hub_mode = query_params.get('hub.mode') hub_verify_token = query_params.get('hub.verify_token') if hub_verify_token != WEBHOOK_VERIFY_TOKEN: abort(403) # Forbidden elif hub_mode == 'subscribe': return query_params['hub.challenge'] def on_message_event(timestamp, sender_id, message): if 'text' in message: handle_text_message(sender_id, message['text']) elif 'attachments' in message: pass else: abort(400) def on_postback_event(timestamp, sender_id, postback): payload = postback['payload'] send_text(sender_id, 'Thanks for selecting %s' % payload) def send_text(recipient_id, text): send_message(recipient_id, {'text': text}) def send_message(recipient_id, message): params = {'access_token': PAGE_ACCESS_TOKEN} data = { 'recipient': { 'id': recipient_id }, 'message': message, } resp = requests.post(SEND_API, params=params, json=data) app.logger.info('Message posted: message = %s, response = %s', data, resp.json()) def handle_text_message(sender_id, text): if u'吃什麼' in text: send_text(sender_id, u'Judy 爸爸說:不知道') elif text == 'generic': send_message(sender_id, demo_generic_template(sender_id, text)) else: send_text(sender_id, text) def demo_generic_template(sender_id, message): return { 'attachment': { 'type': 'template', # structured message 'payload': { 'template_type': 'generic', 'elements': [ { 'title': 'rift', 'subtitle': 'Next-generation virtual reality', 'item_url': 'https://www.oculus.com/en-us/rift/', 'image_url': 'http://messengerdemo.parseapp.com/img/rift.png', 'buttons': [ { 'type': 'web_url', 'url': 'https://www.oculus.com/en-us/rift/', 'title': 'Open Web URL', }, { 'type': 'postback', 'title': 'Call Postback', 'payload': 'Payload for first bubble' } ], }, { 'title': 'touch', 'subtitle': 'Your Hands, Now in VR', 'item_url': 'https://www.oculus.com/en-us/touch/', 'image_url': 'http://messengerdemo.parseapp.com/img/touch.png', 'buttons': [ { 'type': 'web_url', 'url': 'https://www.oculus.com/en-us/touch/', 'title': 'Open Web URL', }, { 'type': 'postback', 'title': 'Call Postback', 'payload': 'Payload for second bubble' } ], }, ], } } } if __name__ == '__main__': app.run()
34.272059
87
0.488307
451
4,661
4.840355
0.286031
0.051306
0.045809
0.050389
0.288136
0.223546
0.165827
0.130096
0.096198
0.096198
0
0.005287
0.391332
4,661
135
88
34.525926
0.764188
0.027676
0
0.173913
0
0
0.221092
0
0
0
0
0
0.008696
1
0.069565
false
0.008696
0.026087
0.008696
0.130435
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747f3e4104e555a4cdac60485eb1931d3dd4f55a
18,847
py
Python
custom_components/midea_dehumidifier_lan/config_flow.py
drthanwho/homeassistant-midea-dehumidifier-lan
b22c609bf07fd540ffa3a3ff28aaa2a184578b69
[ "MIT" ]
null
null
null
custom_components/midea_dehumidifier_lan/config_flow.py
drthanwho/homeassistant-midea-dehumidifier-lan
b22c609bf07fd540ffa3a3ff28aaa2a184578b69
[ "MIT" ]
null
null
null
custom_components/midea_dehumidifier_lan/config_flow.py
drthanwho/homeassistant-midea-dehumidifier-lan
b22c609bf07fd540ffa3a3ff28aaa2a184578b69
[ "MIT" ]
null
null
null
"""Config flow for Midea Dehumidifier (Local) integration.""" from __future__ import annotations import ipaddress import logging from typing import Any, Final from homeassistant import data_entry_flow from homeassistant.config_entries import ConfigFlow from homeassistant.const import ( CONF_API_VERSION, CONF_DEVICES, CONF_ID, CONF_IP_ADDRESS, CONF_NAME, CONF_PASSWORD, CONF_TOKEN, CONF_TYPE, CONF_UNIQUE_ID, CONF_USERNAME, ) import voluptuous as vol from midea_beautiful.appliance import AirConditionerAppliance, DehumidifierAppliance from midea_beautiful.cloud import MideaCloud from midea_beautiful.exceptions import ( AuthenticationError, CloudAuthenticationError, CloudError, MideaError, MideaNetworkError, ProtocolError, RetryLaterError, ) from midea_beautiful.lan import LanDevice from midea_beautiful.midea import DEFAULT_APP_ID, DEFAULT_APPKEY, SUPPORTED_APPS from custom_components.midea_dehumidifier_lan import MideaClient from .const import ( # pylint: disable=unused-import CONF_ADVANCED_SETTINGS, CONF_APPID, CONF_APPKEY, CONF_DETECT_AC_APPLIANCES, CONF_MOBILE_APP, CONF_BROADCAST_ADDRESS, CONF_TOKEN_KEY, CONF_USE_CLOUD, CONF_WHAT_TO_DO, CURRENT_CONFIG_VERSION, DEFAULT_APP, DEFAULT_PASSWORD, DEFAULT_USERNAME, DOMAIN, IGNORED_IP_ADDRESS, TAG_CAUSE, TAG_ID, TAG_NAME, ) _LOGGER = logging.getLogger(__name__) IGNORE = "IGNORE" USE_CLOUD = "CLOUD" LAN = "LAN" def _unreachable_appliance_schema( name: str, ): return vol.Schema( { vol.Optional(CONF_WHAT_TO_DO, default=LAN): vol.In( { IGNORE: "Ignore appliance", LAN: "Provide appliance's IPv4 address", USE_CLOUD: "Use cloud API to poll devices", } ), vol.Optional( CONF_IP_ADDRESS, description={"suggested_value": IGNORED_IP_ADDRESS}, ): str, vol.Optional(CONF_NAME, default=name): str, vol.Optional(CONF_TOKEN): str, vol.Optional(CONF_TOKEN_KEY): str, } ) # pylint: disable=too-many-arguments def _advanced_settings_schema( username: str, password: str, appkey: str, appid: int, broadcast_address: str, use_cloud: bool, ): return vol.Schema( { vol.Required(CONF_USERNAME, default=username): str, vol.Required(CONF_PASSWORD, default=password): str, vol.Required(CONF_APPKEY, default=appkey): str, vol.Required(CONF_APPID, default=appid): int, vol.Optional(CONF_BROADCAST_ADDRESS, default=broadcast_address): str, vol.Required(CONF_USE_CLOUD, default=use_cloud): bool, vol.Required(CONF_DETECT_AC_APPLIANCES, default=False): bool, } ) def _reauth_schema( password: str, appkey: str, appid: int, ): return vol.Schema( { vol.Required(CONF_PASSWORD, default=password): str, vol.Required(CONF_APPKEY, default=appkey): str, vol.Required(CONF_APPID, default=appid): int, } ) def _user_schema(username: str, password: str, app: str): return vol.Schema( { vol.Required(CONF_USERNAME, default=username): str, vol.Required(CONF_PASSWORD, default=password): str, vol.Optional(CONF_MOBILE_APP, default=app): vol.In(SUPPORTED_APPS.keys()), vol.Required(CONF_ADVANCED_SETTINGS, default=False): bool, } ) class _FlowException(Exception): def __init__(self, message, cause: str = None) -> None: super().__init__() self.message = message self.cause = cause # pylint: disable=too-many-instance-attributes class MideaLocalConfigFlow(ConfigFlow, domain=DOMAIN): """ Configuration flow for Midea dehumidifiers on local network uses discovery based on Midea cloud, so it first requires credentials for it. If some appliances are registered in the cloud, but not discovered, configuration flow will prompt for additional information. """ VERSION = CURRENT_CONFIG_VERSION cloud: MideaCloud | None = None # type: ignore appliance_idx = -1 appliances: list[LanDevice] = [] devices_conf: list[dict] = [] conf = {} advanced_settings = False client: Final = MideaClient() error_cause: str = "" errors: dict = {} def _supported_appliance(self, appliance: LanDevice) -> bool: """Checks if appliance is supported by integration""" aircon = False if self.conf.get(CONF_DETECT_AC_APPLIANCES, False): aircon = AirConditionerAppliance.supported(appliance.type) return aircon or DehumidifierAppliance.supported(appliance.type) def _validate_appliance(self, appliance: LanDevice, conf: dict): """ Validates that appliance configuration is correct and matches physical device """ assert self.cloud use_cloud = conf.get(CONF_USE_CLOUD, False) if appliance.address == IGNORED_IP_ADDRESS or ( appliance.address is None and not use_cloud ): _LOGGER.debug("Ignored appliance with id=%s", appliance.appliance_id) return try: if use_cloud: discovered = self.client.appliance_state( cloud=self.cloud, use_cloud=use_cloud, appliance_id=appliance.appliance_id, ) else: try: ipaddress.IPv4Address(appliance.address) except Exception as ex: raise _FlowException( "invalid_ip_address", appliance.address ) from ex discovered = self.client.appliance_state( address=appliance.address, cloud=self.cloud, ) except ProtocolError as ex: raise _FlowException("connection_error", str(ex)) from ex except AuthenticationError as ex: raise _FlowException("invalid_auth", str(ex)) from ex except MideaNetworkError as ex: raise _FlowException("cannot_connect", str(ex)) from ex except MideaError as ex: raise _FlowException("not_discovered", str(ex)) from ex if discovered is None: raise _FlowException("not_discovered", appliance.address) appliance.update(discovered) def _connect_and_discover(self: MideaLocalConfigFlow): """Validates that cloud credentials are valid and discovers local appliances""" cloud = self.client.connect_to_cloud( account=self.conf[CONF_USERNAME], password=self.conf[CONF_PASSWORD], appkey=self.conf[CONF_APPKEY], appid=self.conf[CONF_APPID], ) addresses = self.conf.get(CONF_BROADCAST_ADDRESS, []) if isinstance(addresses, str): addresses = [addresses] if appliances := self.client.find_appliances(cloud, addresses=addresses): self.devices_conf = [{} for _ in appliances] else: self.devices_conf = [] self.appliances = appliances self.cloud = cloud async def _validate_discovery_phase(self, user_input: dict[str, Any] | None): assert user_input is not None if self.advanced_settings: assert self.conf is not None self.conf[CONF_APPKEY] = user_input[CONF_APPKEY] self.conf[CONF_APPID] = user_input[CONF_APPID] if address := user_input.get(CONF_BROADCAST_ADDRESS): try: ipaddress.IPv4Address(address) except Exception as ex: raise _FlowException("invalid_ip_address", address) from ex self.conf[CONF_BROADCAST_ADDRESS] = address self.conf[CONF_USE_CLOUD] = user_input[CONF_USE_CLOUD] self.conf[CONF_DETECT_AC_APPLIANCES] = user_input[CONF_DETECT_AC_APPLIANCES] else: self.conf = user_input self.conf[CONF_USE_CLOUD] = False self.conf[CONF_DETECT_AC_APPLIANCES] = False app = user_input.get(CONF_MOBILE_APP, DEFAULT_APP) self.conf.update(SUPPORTED_APPS.get(app, SUPPORTED_APPS[DEFAULT_APP])) if user_input.get(CONF_ADVANCED_SETTINGS): return await self.async_step_advanced_settings() self.appliance_idx = -1 await self.hass.async_add_executor_job(self._connect_and_discover) if self.conf[CONF_USE_CLOUD]: for i, appliance in enumerate(self.appliances): self.devices_conf[i][CONF_USE_CLOUD] = True else: for i, appliance in enumerate(self.appliances): if self._supported_appliance(appliance): if not appliance.address: self.appliance_idx = i break if self.appliance_idx >= 0: return await self.async_step_unreachable_appliance() return await self._async_add_entry() def _process_exception(self, ex: Exception): if isinstance(ex, _FlowException): self.error_cause = str(ex.cause) self.errors["base"] = ex.message elif isinstance(ex, CloudAuthenticationError): self.error_cause = f"{ex.error_code} - {ex.message}" self.errors["base"] = "invalid_auth" elif isinstance(ex, CloudError): self.error_cause = f"{ex.error_code} - {ex.message}" self.errors["base"] = "midea_client" elif isinstance(ex, RetryLaterError): self.error_cause = f"{ex.error_code} - {ex.message}" self.errors["base"] = "retry_later" elif isinstance(ex, MideaError): self.error_cause = f"{ex.message}" self.errors["base"] = "midea_client" else: raise ex async def _do_validate(self, user_input: dict[str, Any]): try: return await self._validate_discovery_phase(user_input) except Exception as ex: # pylint: disable=broad-except self._process_exception(ex) return None async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> data_entry_flow.FlowResult: self.advanced_settings = False if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") self.errors = {} self.error_cause = "" username = DEFAULT_USERNAME password = DEFAULT_PASSWORD app = DEFAULT_APP if user_input is not None: username = user_input.get(CONF_USERNAME, username) password = user_input.get(CONF_PASSWORD, password) app = user_input.get(CONF_MOBILE_APP, app) res = await self._do_validate(user_input) if res: return res return self.async_show_form( step_id="user", data_schema=_user_schema(username=username, password=password, app=app), description_placeholders=self._placeholders(), errors=self.errors, ) async def async_step_advanced_settings( self, user_input: dict[str, Any] | None = None ): """Step for managing advanced settings""" self.errors = {} self.error_cause = "" self.advanced_settings = True if user_input is not None: res = await self._do_validate(user_input) if res: return res else: user_input = {} username = user_input.get( CONF_USERNAME, self.conf.get(CONF_USERNAME, DEFAULT_USERNAME) ) password = user_input.get( CONF_PASSWORD, self.conf.get(CONF_PASSWORD, DEFAULT_PASSWORD) ) appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY) appid = user_input.get(CONF_APPID, DEFAULT_APP_ID) broadcast_address = user_input.get( CONF_BROADCAST_ADDRESS, self.conf.get(CONF_BROADCAST_ADDRESS, "") ) use_cloud = user_input.get(CONF_USE_CLOUD, self.conf.get(CONF_USE_CLOUD, False)) return self.async_show_form( step_id="advanced_settings", data_schema=_advanced_settings_schema( username=username, password=password, appkey=appkey, appid=appid, broadcast_address=broadcast_address, use_cloud=use_cloud, ), description_placeholders=self._placeholders(), errors=self.errors, ) async def async_step_unreachable_appliance( self, user_input: dict[str, Any] | None = None ): """Manage the appliances that were not discovered automatically on LAN.""" errors: dict = {} self.error_cause = "" appliance = self.appliances[self.appliance_idx] device_conf = self.devices_conf[self.appliance_idx] if user_input is not None: what_to_do = user_input.get(CONF_WHAT_TO_DO, LAN) appliance.address = ( user_input.get(CONF_IP_ADDRESS, IGNORED_IP_ADDRESS) if what_to_do == LAN else IGNORED_IP_ADDRESS ) appliance.name = user_input.get(CONF_NAME, appliance.name) appliance.token = user_input.get(CONF_TOKEN, "") appliance.key = user_input.get(CONF_TOKEN_KEY, "") device_conf[CONF_USE_CLOUD] = what_to_do == USE_CLOUD try: await self.hass.async_add_executor_job( self._validate_appliance, appliance, device_conf, ) # Find next unreachable appliance self.appliance_idx = self.appliance_idx + 1 while self.appliance_idx < len(self.appliances): if self._supported_appliance(appliance): if self.appliances[self.appliance_idx].address is None: return await self.async_step_unreachable_appliance() self.appliance_idx = self.appliance_idx + 1 # If no unreachable appliances, create entry if self.appliance_idx >= len(self.appliances): return await self._async_add_entry() appliance = self.appliances[self.appliance_idx] except _FlowException as ex: self.error_cause = str(ex.cause) errors["base"] = ex.message name = appliance.name return self.async_show_form( step_id="unreachable_appliance", data_schema=_unreachable_appliance_schema(name), description_placeholders=self._placeholders(appliance=appliance), errors=errors, ) def _placeholders(self, appliance: LanDevice = None): placeholders = { TAG_CAUSE: self.error_cause or "", } if appliance: placeholders[TAG_ID] = appliance.unique_id placeholders[TAG_NAME] = appliance.name return placeholders async def _async_add_entry(self): assert self.conf is not None for i, appliance in enumerate(self.appliances): if not self._supported_appliance(appliance): continue if self.devices_conf[i].get(CONF_USE_CLOUD, False) or ( appliance.address and appliance.address != IGNORED_IP_ADDRESS ): self.devices_conf[i].update( { CONF_IP_ADDRESS: appliance.address, CONF_UNIQUE_ID: appliance.unique_id, CONF_ID: appliance.appliance_id, CONF_NAME: appliance.name, CONF_TYPE: appliance.type, CONF_TOKEN: appliance.token, CONF_TOKEN_KEY: appliance.key, CONF_API_VERSION: appliance.version, } ) self.conf[CONF_DEVICES] = self.devices_conf existing_entry = await self.async_set_unique_id(self.conf[CONF_USERNAME]) if existing_entry: self.hass.config_entries.async_update_entry( entry=existing_entry, data=self.conf, ) # Reload the config entry otherwise devices will remain unavailable self.hass.async_create_task( self.hass.config_entries.async_reload(existing_entry.entry_id) ) return self.async_abort(reason="reauth_successful") if len(self.devices_conf) == 0: return self.async_abort(reason="no_configured_devices") return self.async_create_entry( title="Midea Dehumidifiers", data=self.conf, ) async def async_step_reauth(self, config): """Handle reauthorization request from Abode.""" self.conf = {**config} return await self.async_step_reauth_confirm() async def async_step_reauth_confirm(self, user_input: dict[str, Any] | None = None): """Handle reauthorization flow.""" self.errors = {} username = self.conf.get(CONF_USERNAME, DEFAULT_USERNAME) password = "" appkey = self.conf.get(CONF_APPKEY, DEFAULT_APPKEY) appid = self.conf.get(CONF_APPID, DEFAULT_APP_ID) if user_input is not None: password = user_input.get(CONF_PASSWORD, "") appkey = user_input.get(CONF_APPKEY, DEFAULT_APPKEY) appid = user_input.get(CONF_APPID, DEFAULT_APP_ID) try: self.client.connect_to_cloud( account=username, password=password, appkey=appkey, appid=appid, ) except Exception as ex: # pylint: disable=broad-except self._process_exception(ex) else: self.conf[CONF_USERNAME] = username self.conf[CONF_PASSWORD] = password self.conf[CONF_APPKEY] = appkey self.conf[CONF_APPID] = appid return await self._async_add_entry() return self.async_show_form( step_id="reauth_confirm", data_schema=_reauth_schema( password=password, appkey=appkey, appid=appid, ), description_placeholders=self._placeholders(), errors=self.errors, )
36.314066
88
0.607205
2,029
18,847
5.375062
0.120749
0.033009
0.022006
0.029342
0.404915
0.30121
0.219879
0.161287
0.128461
0.108839
0
0.000694
0.312092
18,847
518
89
36.38417
0.840494
0.044729
0
0.284738
0
0
0.030973
0.003667
0
0
0
0
0.009112
1
0.022779
false
0.045558
0.034169
0.009112
0.143508
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
747f4ee04719abc2662ba9a8f13f2719e601fb4b
2,297
py
Python
solver.py
cserradag96/nqueens
350dfddb29419560d94f5218e5f2e75d2be9ccf3
[ "MIT" ]
null
null
null
solver.py
cserradag96/nqueens
350dfddb29419560d94f5218e5f2e75d2be9ccf3
[ "MIT" ]
null
null
null
solver.py
cserradag96/nqueens
350dfddb29419560d94f5218e5f2e75d2be9ccf3
[ "MIT" ]
null
null
null
#!/usr/bin/python3 # -*- encoding: utf-8 -*- ####################################################################################################################### # DESCRIPTION: ####################################################################################################################### # Solver for N-Queens using Minisat ####################################################################################################################### # AUTHORS: ####################################################################################################################### # Carlos Serrada, 13-11347, <cserradag96@gmail.com> # Juan Ortiz, 13-11021 <ortiz.juan14@gmail.com> ####################################################################################################################### # PATH: ####################################################################################################################### from sys import path # System path from os import getcwd # Current path from os.path import join # Join paths # Add custom lib path to application path path.append(join(getcwd(), "lib")) ####################################################################################################################### # DEPENDENCIES: ####################################################################################################################### import sys from nqueens import * ####################################################################################################################### # MAIN: ####################################################################################################################### if __name__ == "__main__": size = sys.argv[1] printStatus("Generando CNF") puzzle = NQueens(readNQ(size)) printStatus("Guardando CNF") writeFile(puzzle.cnf, "input.txt") printStatus("Ejecutando minisat") minisat("input.txt", "output.txt") printStatus("Generando imagen") writeFile(puzzle.genBitmap("output.txt"), namePBM(size)) ####################################################################################################################### # :) #######################################################################################################################
40.298246
119
0.252068
108
2,297
5.287037
0.583333
0.042032
0.035026
0
0
0
0
0
0
0
0
0.009882
0.07488
2,297
56
120
41.017857
0.258824
0.129734
0
0
0
0
0.196043
0
0
0
0
0
0
1
0
false
0
0.3125
0
0.3125
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
2
747f76629e7eb85c0e9005d5800b74e6ec8c3805
4,793
py
Python
codango/codango/settings/base.py
andela-ooshodi/codango-debug
fa68f4305586c2d7f28307f10204c3b50f731fef
[ "MIT" ]
null
null
null
codango/codango/settings/base.py
andela-ooshodi/codango-debug
fa68f4305586c2d7f28307f10204c3b50f731fef
[ "MIT" ]
null
null
null
codango/codango/settings/base.py
andela-ooshodi/codango-debug
fa68f4305586c2d7f28307f10204c3b50f731fef
[ "MIT" ]
null
null
null
""" Django settings for codango project. Generated by 'django-admin startproject' using Django 1.8.3. For more information on this file, see https://docs.djangoproject.com/en/1.8/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.8/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) from __future__ import absolute_import import os import cloudinary # This will make sure the app is always imported when # Django starts so that shared_task will use this app. from .celery import app as celery_app from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS from django.contrib.messages import constants as message_constants from celery.schedules import crontab BASE_DIR = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) BOWER_INSTALLED_APPS = ( 'mdi', 'moment', 'jquery', 'bootstrap', 'ace-builds', ) BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'static') # context processor for django-endless-pagination TEMPLATE_CONTEXT_PROCESSORS += ( 'django.core.context_processors.request', ) ENDLESS_PAGINATION_LOADING = """<img src="/static/img/ajax-loader.gif" alt="loading"/>""" STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'djangobower.finders.BowerFinder', ) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'account', 'resources', 'userprofile', 'comments', 'votes', 'bootstrapform', 'cloudinary', 'djangobower', 'endless_pagination', 'djcelery' ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', ) ROOT_URLCONF = 'codango.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] AUTHENTICATION_BACKEND = ( 'django.contrib.auth.backends.ModelBackend', ) WSGI_APPLICATION = 'codango.wsgi.application' # Internationalization # https://docs.djangoproject.com/en/1.8/topics/i18n/ LANGUAGE_CODE = 'en' TIME_ZONE = 'Africa/Lagos' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.8/howto/static-files/ STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) STATIC_URL = '/static/' APPEND_SLASH = False STATIC_ROOT = 'staticfiles' STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') MEDIA_URL = '/media/' cloudinary.config( cloud_name=os.getenv('cloud_name'), api_key=os.getenv('api_key'), api_secret=os.getenv('api_secret') ) # custom message tag for django messaging middleware MESSAGE_TAGS = { message_constants.ERROR: 'danger' } # Custom Email ADMIN_EMAIL = 'olufunmilade.oshodi@andela.com' CODANGO_EMAIL = 'noreply@codango.com' # Celery configuration # The backend used to store task results using RabbitMQ as a broker # This sends results back as AMQP messages CELERY_RESULT_BACKEND = 'amqp' # Scheduling periodic task with Celery CELERYBEAT_SCHEDULE = { # Executes every sunday midnight 'popular-post-updates': { 'task': 'resources.tasks.send_recent_posts', 'schedule': crontab(), 'args': (ADMIN_EMAIL,), }, } # Celery Test Runner for unit tests TEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'
25.494681
89
0.722721
556
4,793
6.093525
0.42446
0.061393
0.032468
0.036895
0.098583
0.093566
0.06523
0.06523
0
0
0
0.004715
0.15919
4,793
187
90
25.631016
0.83598
0.264552
0
0
1
0
0.450672
0.331141
0
0
0
0
0
1
0
false
0
0.063636
0
0.063636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
747fcc5d8e8611eca8dd7a7d6b6a6a5d0dd6a8da
2,343
py
Python
pysnmp/DGS1100-24P-MGMT-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/DGS1100-24P-MGMT-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/DGS1100-24P-MGMT-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module DGS1100-24P-MGMT-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DGS1100-24P-MGMT-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 18:30:19 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "ValueRangeConstraint") dgs1100_24P, = mibBuilder.importSymbols("DGS1100PRIMGMT-MIB", "dgs1100-24P") SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, ObjectIdentity, MibIdentifier, NotificationType, IpAddress, TimeTicks, Unsigned32, Gauge32, Counter32, Bits, Counter64, Integer32, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "NotificationType", "IpAddress", "TimeTicks", "Unsigned32", "Gauge32", "Counter32", "Bits", "Counter64", "Integer32", "iso") TextualConvention, DisplayString, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "MacAddress", "RowStatus") swL2MgmtMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 134, 9, 9)) if mibBuilder.loadTexts: swL2MgmtMIB.setLastUpdated('201404260000Z') if mibBuilder.loadTexts: swL2MgmtMIB.setOrganization('D-Link Corp.') class PortList(OctetString): subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127) class VlanIndex(Unsigned32): pass class VlanId(Integer32): subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(1, 4094) mibBuilder.exportSymbols("DGS1100-24P-MGMT-MIB", VlanId=VlanId, swL2MgmtMIB=swL2MgmtMIB, VlanIndex=VlanIndex, PYSNMP_MODULE_ID=swL2MgmtMIB, PortList=PortList)
80.793103
477
0.788732
236
2,343
7.817797
0.5
0.099729
0.022764
0.027642
0.289431
0.182114
0.182114
0.182114
0.182114
0.182114
0
0.070534
0.080239
2,343
28
478
83.678571
0.785615
0.142552
0
0
0
0
0.283
0.022
0
0
0
0
0
1
0
false
0.055556
0.444444
0
0.722222
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
1
0
0
0
0
2
7481800efb51f4d1a5766aebafa35170462e666c
38
py
Python
autox/autox_ts/metrics/__init__.py
OneToolsCollection/4paradigm-AutoX
f8e838021354de17f5bb9bc44e9d68d12dda6427
[ "Apache-2.0" ]
null
null
null
autox/autox_ts/metrics/__init__.py
OneToolsCollection/4paradigm-AutoX
f8e838021354de17f5bb9bc44e9d68d12dda6427
[ "Apache-2.0" ]
null
null
null
autox/autox_ts/metrics/__init__.py
OneToolsCollection/4paradigm-AutoX
f8e838021354de17f5bb9bc44e9d68d12dda6427
[ "Apache-2.0" ]
null
null
null
from .metrics import _get_score_metric
38
38
0.894737
6
38
5.166667
1
0
0
0
0
0
0
0
0
0
0
0
0.078947
38
1
38
38
0.885714
0
0
0
0
0
0
0
0
0
0
0
0
1
0
true
0
1
0
1
0
1
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
1
0
1
0
0
6
74858fe3b513ef3751cf3b9d1d2671162ca352a5
1,136
py
Python
setup.py
abuvanth/framework-identifier
0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9
[ "MIT" ]
null
null
null
setup.py
abuvanth/framework-identifier
0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9
[ "MIT" ]
null
null
null
setup.py
abuvanth/framework-identifier
0619b1831bf29eaabdb8b7a0ef1abfd2328b39f9
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- import setuptools,os with open("README.md", "r") as fh: long_description = fh.read() thelibFolder = os.path.dirname(os.path.realpath(__file__)) requirementPath = thelibFolder + '/requirements.txt' install_requires = [] # Examples: ["gunicorn", "docutils>=0.3", "lxml==0.5a7"] if os.path.isfile(requirementPath): with open(requirementPath) as f: install_requires = f.read().splitlines() setuptools.setup( name="wappalyze", version="1.6", author="Shaddy Garg", author_email="shaddygarg1@gmail.com", description="Framework Identifier tool", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/shaddygarg/framework-identifier", packages=setuptools.find_packages(), package_dir={'wappalyze': 'wappalyze'}, package_data={'wappalyze': ['apps.json']}, install_requires=install_requires, scripts=['wapp'], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], )
34.424242
78
0.680458
127
1,136
5.944882
0.661417
0.07947
0.050331
0.07947
0
0
0
0
0
0
0
0.010493
0.161092
1,136
32
79
35.5
0.781742
0.085387
0
0
0
0
0.295367
0.02027
0
0
0
0
0
1
0
false
0
0.034483
0
0.034483
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74859fe61f59a2e68fef51e02980d620f6fd3fff
4,089
py
Python
convert.py
FlorianPoot/Faroad-PnP
aa31ce80f8a3888a903eac92827e7aef6c58dfe3
[ "MIT" ]
null
null
null
convert.py
FlorianPoot/Faroad-PnP
aa31ce80f8a3888a903eac92827e7aef6c58dfe3
[ "MIT" ]
null
null
null
convert.py
FlorianPoot/Faroad-PnP
aa31ce80f8a3888a903eac92827e7aef6c58dfe3
[ "MIT" ]
null
null
null
import math import sqlite3 import re DATABASE_PATH = "line1.db" class Convert: model_altium = {"desc": 0, "designator": (1, 10), "position": (2, 3), "rotation": 9} model_kicad = {"desc": 0, "designator": (1, 2), "position": (3, 4), "rotation": 5} model_mnt = {"desc": 0, "designator": (4, 5), "position": (1, 2), "rotation": 3} model_ultiboard = {"desc": 0, "designator": (1, 7), "position": (2, 3), "rotation": 4} def __init__(self, path: str): self.path = path def parse(self) -> list: """Parse data from pick and place file""" # TODO Work only for 4 types of file # TODO Read already generated file with open(self.path, "r") as file: lines = file.readlines() lines = [line.replace("\n", "") for line in lines] # Remove new line if lines[0].split() == ['Designator', 'Footprint', 'Mid', 'X', 'Mid', 'Y', 'Ref', 'X', 'Ref', 'Y', 'Pad', 'X', 'Pad', 'Y', 'TB', 'Rotation', 'Comment']: file_model = self.model_altium lines = [[s.strip() for s in line.split(" ") if s] for line in lines] lines = lines[2:] # Remove header elif lines[0] == "Ref,Val,Package,PosX,PosY,Rot,Side\n": file_model = self.model_kicad lines = [line.replace('"', "") for line in lines] lines = [line.split(",") for line in lines] lines = lines[1:] # Remove header elif lines[0] == "Ultiboard Information Export File": file_model = self.model_ultiboard lines = lines[9:] # Remove header lines = [line.split() for line in lines] elif self.path[-3:] == "mnt": file_model = self.model_mnt lines = [line.replace("-", " ") for line in lines] lines = [line.split() for line in lines] else: raise ValueError("Unknown file model") data = list() for line in lines: if len(line) > 0: d = dict() d["desc"] = "".join(re.findall("[a-zA-Z]", line[file_model["desc"]])) digit = re.findall(r"\d+", line[file_model["desc"]]) if len(digit) > 0: d["desc"] += digit[0].zfill(3) d["designator"] = f"{line[int(file_model['designator'][0])]} {line[int(file_model['designator'][1])]}" d["position"] = [float(re.findall(r"\d+\.\d+|\d+", line[i])[0]) for i in file_model["position"]] d["rotation"] = float(line[file_model["rotation"]]) data.append(d) return data @staticmethod def search(designator: str) -> list: """Look in database""" matches = re.split("[_ :]", designator) conn = sqlite3.connect(DATABASE_PATH) cur = conn.cursor() dat = cur.execute("SELECT * FROM chip_lib;").fetchall() # chip_name = [d[2] for d in dat] # Select chip_name for matche in matches: temp = list() for d in dat: if matche.upper() in d[2].upper(): temp.append(d) if len(temp) > 0: dat = temp return dat @staticmethod def panel_dimensions(points: list) -> tuple: """Get panel dimensions""" x = [p[0] for p in points] y = [p[1] for p in points] x_min, y_min = min(x), min(y) x_max, y_max = max(x), max(y) return round(abs(x_max - x_min), 3), round(abs(y_max - y_min), 3) @staticmethod def rotate(origin: tuple, points: list, angle: int) -> list: """Rotate a list of points clockwise by a given angle around a given origin""" new_pos = list() angle = math.radians(-angle) for p in points: ox, oy = origin px, py = p qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy) qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy) new_pos.append([qx, qy]) return new_pos
32.452381
118
0.512106
531
4,089
3.868173
0.278719
0.048199
0.035054
0.054528
0.133398
0.08666
0.070107
0.056475
0.056475
0.056475
0
0.018162
0.32673
4,089
125
119
32.712
0.727933
0.079237
0
0.0625
0
0.0125
0.127375
0.031041
0
0
0
0.008
0
1
0.0625
false
0
0.0375
0
0.2125
0.0125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7488b095e308da429d77f9667247659a77a3caeb
10,490
py
Python
src/device/Device.py
Electronya/PirBlaster
804b86b2699183c40f2157cba0e151bec9d4725a
[ "MIT" ]
1
2020-11-02T09:37:43.000Z
2020-11-02T09:37:43.000Z
src/device/Device.py
Electronya/PirBlasterBackend
804b86b2699183c40f2157cba0e151bec9d4725a
[ "MIT" ]
1
2021-06-26T14:56:31.000Z
2021-06-26T14:56:31.000Z
src/device/Device.py
Electronya/PirBlasterBackend
804b86b2699183c40f2157cba0e151bec9d4725a
[ "MIT" ]
1
2020-11-02T09:34:33.000Z
2020-11-02T09:34:33.000Z
from logging import Logger import paho.mqtt.client as mqtt from ircodec.command import CommandSet import os from exceptions import CommandNotFound, \ CommandFileAccess class Device(): # Constants STATUS_TOPIC = 'status' CMD_TOPIC = 'command' RESULT_TOPIC = 'result' ONLINE_MSG = 'ONLINE' OFFLINE_MSG = 'OFFLINE' SUCCESS_MSG = 'done' ERROR_MSG = 'unsupported' def __init__(self, logger, appConfig, devConfig, isNew=False): """ Constructor. Params: logger: The logger. appConfig: The application configuration. devConfig: The device configuration. isNew: The flag indicating if the device is a new one, or an existing commande set exists. """ self.config = devConfig self.logger = logger.getLogger(f"{devConfig['location']}." f"{devConfig['name']}") if isNew: self.logger.info('Creating new device') name = self.config['commandSet']['model'] emitter = self.config['commandSet']['emitterGpio'] receiver = self.config['commandSet']['receiverGpio'] description = self.config['commandSet']['description'] self.commandSet = CommandSet(name, emitter_gpio=emitter, receiver_gpio=receiver, description=description) else: self.logger.info('Loading existing device') manufacturer = self.config['commandSet']['manufacturer'] model = self.config['commandSet']['model'] try: self.commandSet = CommandSet.load(os.path.join('./commandSets', manufacturer, f"{model}." f"json")) except Exception: raise CommandFileAccess('unable to access the command file.') self.baseTopic = f"{self.config['topicPrefix']}/{self.config['location']}/{self.config['name']}/" # noqa: E501 self._initMqttClient(appConfig.getUserName(), appConfig.getUserPassword(), appConfig.getBrokerHostname(), appConfig.getBrokerPort()) def _initMqttClient(self, userName, userPassword, brokerIp, brokerPort): """ Initialize the MQTT client. Params: userName: The user name for connecting to the broker. userPassword: The user password for connecting to the broker. brokerHostname: The broker hostname. brokerPort: The broker port. """ self.client = mqtt.Client(client_id=f"{self.config['location']}." f"{self.config['name']}") self.client.on_connect = self._on_connect self.client.on_disconnect = self._on_disconnect self.client.on_message = self._on_message self.client.on_publish = self._on_publish self.client.on_subscribe = self._on_subscribe self.client.on_log = self._on_log willTopic = self.baseTopic + self.STATUS_TOPIC self.client.will_set(willTopic, self.OFFLINE_MSG, self.config['lastWill']['qos'], self.config['lastWill']['retain']) self.client.username_pw_set(userName, userPassword) # TODO: Implement switch for secure or not. # self.client.tls_set() # self.client.tls_insecure_set(True) self.logger.info(f"Connecting to {brokerIp}:{brokerPort}") self.logger.debug(f"Connecting as {userName} with password " f"{userPassword}") self.client.connect(brokerIp, port=brokerPort) def _publishCmdResult(self, success): """ Publish a command result. Params: success: The flag indicating to send success or fail result. """ resultTopic = self.baseTopic + self.RESULT_TOPIC if success: self.logger.info('Command sent') self.client.publish(resultTopic, payload=self.SUCCESS_MSG) else: self.logger.warning('Command unsupported') self.client.publish(resultTopic, payload=self.ERROR_MSG) def _on_connect(self, client, usrData, flags, rc): """ The on connect callback. Params: client: The mqtt client. usrData: User data. flags: The connection flags. rc: The connection result. """ self.logger.info('Connected') self.logger.debug(f"rc {rc}") statusTopic = self.baseTopic + self.STATUS_TOPIC self.client.publish(statusTopic, payload=self.ONLINE_MSG, qos=1, retain=True) cmdTopic = self.baseTopic + self.CMD_TOPIC self.client.subscribe(cmdTopic) def _on_disconnect(self, client, usrData, rc): """ The on disconnect callback. Params: client: The mqtt client. usrData: User data. flags: The connection flags. rc: The connection result. """ self.logger.info('Disconnected') self.logger.debug(f"rc {rc}") def _on_message(self, client, usrData, msg): """ The on message callback. Params: client: The mqtt client. usrData: User data. msg: The message data. """ reuslt = True receivedMsg = msg.payload.decode('utf-8') self.logger.info(f"Message recieved {receivedMsg}") try: for i in range(0, 4): self.logger.debug(f"Sending packet #{i}") gap = self.config['commandSet']['packetGap'] self.commandSet.emit(receivedMsg, emit_gap=gap) except KeyError as e: self.logger.warning(str(e)) reuslt = False self._publishCmdResult(reuslt) def _on_publish(self, client, usrData, mid): """ The on publish callback. Params: client: The mqtt client. usrData: User data. mid: The message ID that have been published. """ self.logger.info('Message published') self.logger.debug(f"mid {mid}") def _on_subscribe(self, client, usrData, mid, grantedQoS): """ The on subscribe callback. Params: client: The mqtt client. usrData: User data. mid: The message ID that have been published. grantedQoS: The granted QoS for the subcription. """ self.logger.info(f"Subscibed with QoS {grantedQoS}") self.logger.debug(f"mid {mid}") def _on_log(self, client, usrData, logLevel, logMsg): """ The on log callback. Params: client: The mqtt client. usrData: User data. logLevel: The level of the log message. logMsg: The log message. """ switcher = { mqtt.MQTT_LOG_INFO: self.logger.info, mqtt.MQTT_LOG_NOTICE: self.logger.info, mqtt.MQTT_LOG_WARNING: self.logger.warning, mqtt.MQTT_LOG_ERR: self.logger.error, mqtt.MQTT_LOG_DEBUG: self.logger.debug, } switcher[logLevel](logMsg) def startLoop(self): """ Start the network loop. """ self.client.loop_start() def stopLoop(self): """ Stop the network loop. """ self.client.loop_stop() self.client.disconnect() def getName(self): """ Get the device name. Return: The device name. """ return self.config['name'] def getLocation(self): """ Get the device location, Return: The device location. """ return self.config['location'] def getConfig(self): """ Get the device configuration. Return: The device configuration. """ self.logger.debug('Getting device config') return self.config def setConfig(self, config): """ Set the device configuration. Params: config: The device configuration. """ self.logger.debug(f"Setting device config to {config}") self.config = config def getCommandList(self): """ Get the device command list. Return: The device command list. """ self.logger.debug('Getting command list') cmdSetJson = self.commandSet.to_json() return cmdSetJson['commands'].keys() def addCommand(self, command, description): """" Add a command to the device. Params: command: The command name. description: The command description. """ self.logger.debug(f"Adding command {command} to command set") self.commandSet.add(command, description=description) def deleteCommand(self, command): """ Delete a command from the device. Params: command: The command name. Raise: CommandNotFound if the requested command is not supported. """ self.logger.debug(f"Deleting command {command} from command set") try: self.commandSet.remove(command) except KeyError: raise CommandNotFound(command) def saveCommandSet(self): """ Save the device command set. Raise: CommandFileAccess if the save operation fail. """ try: self.commandSet.save_as(os.path.join('./commandSets', self.config['commandSet']['manufacturer'], f"{self.config['commandSet']['model']}" f".json")) except Exception: raise CommandFileAccess('unable to access the command file.')
33.301587
120
0.538418
1,012
10,490
5.504941
0.211462
0.052055
0.03231
0.025848
0.210734
0.202118
0.148627
0.122061
0.112368
0.095136
0
0.001053
0.366444
10,490
314
121
33.407643
0.837195
0.250906
0
0.097222
0
0
0.149661
0.029874
0
0
0
0.003185
0
1
0.131944
false
0.034722
0.034722
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7488fb87c2da80cc7ce4b43784746c835d6f4aeb
52
py
Python
test/inventory/mock_inventories/single_host1.py
oddlama/forge
d09b0f309ce7dcda79dc03765473b48732c71845
[ "MIT" ]
14
2021-12-17T10:38:27.000Z
2022-03-02T01:20:01.000Z
test/inventory/mock_inventories/single_host1.py
oddlama/forge
d09b0f309ce7dcda79dc03765473b48732c71845
[ "MIT" ]
2
2022-01-11T13:31:09.000Z
2022-02-03T15:41:43.000Z
test/inventory/mock_inventories/single_host1.py
oddlama/forge
d09b0f309ce7dcda79dc03765473b48732c71845
[ "MIT" ]
2
2022-02-03T15:20:51.000Z
2022-02-03T15:45:11.000Z
inventory_var = "from_inventory" hosts = ["host1"]
13
32
0.711538
6
52
5.833333
0.833333
0
0
0
0
0
0
0
0
0
0
0.022222
0.134615
52
3
33
17.333333
0.755556
0
0
0
0
0
0.365385
0
0
0
0
0
0
1
0
false
0
0
0
0
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
748b0cd71a8c5de3ca15b5e64c2515f58ee12902
5,459
py
Python
hrssCalcbulk from folder of .fit files.py
Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles
d82f064ee0a97b2d826f434b5a21e8869c090e17
[ "MIT" ]
2
2021-08-21T14:05:45.000Z
2022-03-26T02:22:02.000Z
hrssCalcbulk from folder of .fit files.py
Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles
d82f064ee0a97b2d826f434b5a21e8869c090e17
[ "MIT" ]
1
2021-07-19T17:45:13.000Z
2021-07-19T19:18:07.000Z
hrssCalcbulk from folder of .fit files.py
Emberflurry/Bulk-HRSS-TSS-calc-from-Garmin-.fitfiles
d82f064ee0a97b2d826f434b5a21e8869c090e17
[ "MIT" ]
null
null
null
import os import datetime from fitparse import * import pandas as pd import numpy as np from tqdm import tqdm from datetime import datetime import re # import matplotlib.pyplot as plt (not needed I guess) directory = 'fitfiles' # may want to make this more flexible--ie: not just in the directory of the code...works for now tho and not bad. #Note^: may need to rename, perhaps make easy tkinter interface for picking a local directory or have part of the program move the fit files to the project directory automatically # if files need renaming INTEGRATE IN THE FUTURE!!!! ESPECIALLY WITH FULL GARMIN->OUTPUT WORKFLOW AUTO!!! def fitfile_decapitalization(): rename_dict = {'FIT': 'fit'} for filename in os.listdir(directory): base_file, ext = os.path.splitext(filename) ext = ext.replace('.','') if ext in rename_dict: new_ext = rename_dict[ext] new_file = base_file + '.' + new_ext old_path = os.path.join(directory, filename) new_path = os.path.join(directory, new_file) os.rename(old_path, new_path) fitfile_decapitalization() # HRSS Calc--PERSONAL INFO--REQUIRED FOR CALCULATIONS TO BE ACCURATE lthr = 191.0 # heart rate(bpm) at lactate threshold my_maxhr = 212 # max heart rate(bpm) my_rhr = 50 # resting heart rate(bpm) my_sex = "MALE" eulersNum = 2.7182818 # duh if my_sex == "MALE": my_baseconstant = .64 my_yvalue = 1.92 else: my_yvalue = 1.67 my_baseconstant = .86 # component calcs of the multi-part exponential HRSS equation: my_hrrAtLT = ((lthr - my_rhr) / (my_maxhr - my_rhr)) sixtyatLTHR_SS = 60 * my_hrrAtLT * my_baseconstant * ( eulersNum ** (my_yvalue * my_hrrAtLT)) # aka "N" in relevant equations N_ova_hundy = sixtyatLTHR_SS / 100 hundy_ova_N = 100 / sixtyatLTHR_SS def load_workout(workout_file): """ Load fitfile and transforms it into a pandas Dataframe. Nan Values are replaced. """ fitfile = FitFile(workout_file) # This is an ugly hack to avoid timing issues while True: try: fitfile.messages break except KeyError: continue # Get all data messages that are of type "record" workout = [] for record in fitfile.get_messages('record'): r = {} # Go through all the data entries in this record for record_data in record: r[record_data.name] = record_data.value # add the record(s) to the workout file workout.append(r) # not used, don't remember why, but im not touching it. """workout_df = pd.DataFrame(workout) workout_df.fillna(method='ffill', inplace=True) workout_df.fillna(method='backfill', inplace=True)""" # save as a df (specifically a numpy array) workout = np.array(workout) return workout def get_date(workout_df): # pass the workout df, returns the date workout_date = workout_df['timestamp'][0].date() return workout_date def gett_date(string): # splits a date that is input for future timestamp parsing split = [] for i in re.split("-|T|:| ", string)[:-1]: if (i[0] == '0'): i = i[1:] split.append(eval(i)) date = datetime(split[0], split[1], split[2], split[3], split[4], split[5]) return date def difference_between_dates(date1, date2): # parses timestamps (which are still stored as date data (haha)) for changes in time between recordings secs = (date2 - date1).seconds mins = (secs / 60) return round(mins, 4) # NEW, ROUNDS TO 4 DP # Loop through fitfile directory, load hr data, calculate HRSS for filename in tqdm(os.listdir(directory)): if filename.endswith('.fit'): workout = load_workout((os.path.join(directory, filename))) if 'heart_rate' in workout[0]: # printing first 2 rows to manually check presence/forms print(workout[0]) print(workout[1]) print(filename) # for HRSS: form is SUM (Ti*HRRi*baseconst * e^(yval*HRRi) ) * 100/(60*HHRlt*basconst * e^(yval*HRRlt) ) # simplified: SUM (ATERM) * BTERM # workflow is: calc aterm*bterm indiv, then sum instantChT = [] # list of "instantaneous" changes in time for i in range(len(workout) - 1): # print(workout[i]) instantChT.append(difference_between_dates(workout[i]["timestamp"], workout[i + 1]["timestamp"])) print(instantChT) # this works instantHr = [] # list of (hopefully corresponding) instantaneous heart rate readings for i in range(len(workout) - 1): instantHr.append(workout[i]["heart_rate"]) print(instantHr) HRRi = [] # list of instantaneous heart rate reserve values for i in range(len(instantHr)): HRRi.append((instantHr[i] - my_rhr) / (my_maxhr - my_rhr)) print(HRRi) AtermBterm = [] # see simplified equation roughly 20 lines above for i in range(len(instantChT)): AtermBterm.append( (instantChT[i] * HRRi[i] * my_baseconstant * (eulersNum ** (my_yvalue * HRRi[i]))) * hundy_ova_N) print(AtermBterm) print(sum(AtermBterm)) else: print("issue w HR in: " + filename + " :_(...either lacking HR data or is mislabeled, i think.") continue
35.914474
179
0.63235
732
5,459
4.621585
0.382514
0.018623
0.008868
0.013006
0.073603
0.023056
0.013006
0
0
0
0
0.01697
0.265983
5,459
151
180
36.152318
0.827302
0.328082
0
0.0625
0
0
0.046465
0
0
0
0
0
0
1
0.052083
false
0
0.083333
0
0.177083
0.09375
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
748c0c6816a34dc727849d503d55e952ab640086
87,762
py
Python
test/features/pit/fixtures_pit.py
NikkaZ/dbtvault_spark
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
[ "Apache-2.0" ]
null
null
null
test/features/pit/fixtures_pit.py
NikkaZ/dbtvault_spark
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
[ "Apache-2.0" ]
null
null
null
test/features/pit/fixtures_pit.py
NikkaZ/dbtvault_spark
383723cd2a35a0bc7b82fd4e77fb1eda0f68cb07
[ "Apache-2.0" ]
null
null
null
from behave import fixture @fixture def pit(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_PROFILE": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_PROFILE": { "EFFECTIVE_FROM": "LOAD_DATE" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", "STG_CUSTOMER_LOGIN", "STG_CUSTOMER_PROFILE"], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_PROFILE": { "source_model": "STG_CUSTOMER_PROFILE", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_PROFILE": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", "STG_CUSTOMER_PROFILE": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_PROFILE": ["CUSTOMER_ID", "DASHBOARD_COLOUR", "DISPLAY_NAME", "LOAD_DATE", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "RAW_STAGE_PROFILE": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "DASHBOARD_COLOUR": "VARCHAR", "DISPLAY_NAME": "VARCHAR", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_PROFILE": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DASHBOARD_COLOUR": "VARCHAR", "DISPLAY_NAME": "VARCHAR", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME", "SAT_CUSTOMER_PROFILE_PK": "BINARY(16)", "SAT_CUSTOMER_PROFILE_LDTS": "DATETIME" } } } @fixture def pit_one_sat(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } } } @fixture def pit_two_sats(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_LOGIN_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN_TS": { "source_model": "STG_CUSTOMER_LOGIN_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"], "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"], "RAW_STAGE_LOGIN_TS": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "RAW_STAGE_LOGIN_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR", "CUSTOMER_ADDRESS": "VARCHAR", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR" } }, "SAT_CUSTOMER_LOGIN_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } } } @fixture def pit_bigquery(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_PROFILE": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_PROFILE": { "EFFECTIVE_FROM": "LOAD_DATE" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", "STG_CUSTOMER_LOGIN", "STG_CUSTOMER_PROFILE"], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_PROFILE": { "source_model": "STG_CUSTOMER_PROFILE", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_PROFILE": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", "STG_CUSTOMER_PROFILE": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_PROFILE": ["CUSTOMER_ID", "DASHBOARD_COLOUR", "DISPLAY_NAME", "LOAD_DATE", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "STRING", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "STRING", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "RAW_STAGE_PROFILE": { "+column_types": { "CUSTOMER_ID": "STRING", "DASHBOARD_COLOUR": "STRING", "DISPLAY_NAME": "STRING", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "STRING", "CUSTOMER_ID": "STRING", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "DEVICE_USED": "STRING", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_PROFILE": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "DASHBOARD_COLOUR": "STRING", "DISPLAY_NAME": "STRING", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "STRING" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_PK": "STRING", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "STRING", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME", "SAT_CUSTOMER_PROFILE_PK": "STRING", "SAT_CUSTOMER_PROFILE_LDTS": "DATETIME" } } } @fixture def pit_one_sat_bigquery(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "STRING", "CUSTOMER_ID": "STRING", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "STRING", "CUSTOMER_ID": "STRING", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_PK": "STRING", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_PK": "STRING", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } } } @fixture def pit_two_sats_bigquery(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_LOGIN_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN_TS": { "source_model": "STG_CUSTOMER_LOGIN_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"], "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"], "RAW_STAGE_LOGIN_TS": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "STRING", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "STRING", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "RAW_STAGE_LOGIN_TS": { "+column_types": { "CUSTOMER_ID": "STRING", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "STRING", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "STRING", "CUSTOMER_ID": "STRING", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "STRING", "CUSTOMER_ID": "STRING", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "CUSTOMER_NAME": "STRING", "CUSTOMER_ADDRESS": "STRING", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "DEVICE_USED": "STRING", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "STRING" } }, "SAT_CUSTOMER_LOGIN_TS": { "+column_types": { "CUSTOMER_PK": "STRING", "HASHDIFF": "STRING", "DEVICE_USED": "STRING", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "STRING" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_PK": "STRING", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "STRING", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "STRING", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_PK": "STRING", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "STRING", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "STRING", "SAT_CUSTOMER_DETAILS_PK": "STRING", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "STRING", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } } } @fixture def pit_sqlserver(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_PROFILE": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DASHBOARD_COLOUR", "DISPLAY_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_PROFILE": { "EFFECTIVE_FROM": "LOAD_DATE" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", "STG_CUSTOMER_LOGIN", "STG_CUSTOMER_PROFILE"], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["LAST_LOGIN_DATE", "DEVICE_USED"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_PROFILE": { "source_model": "STG_CUSTOMER_PROFILE", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DASHBOARD_COLOUR", "DISPLAY_NAME"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_PROFILE": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", "STG_CUSTOMER_PROFILE": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"] , "RAW_STAGE_PROFILE": ["CUSTOMER_ID", "DASHBOARD_COLOUR", "DISPLAY_NAME", "LOAD_DATE", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(5)", "CUSTOMER_NAME": "VARCHAR(10)", "CUSTOMER_ADDRESS": "VARCHAR(30)", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "VARCHAR(5)", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR(10)", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "RAW_STAGE_PROFILE": { "+column_types": { "CUSTOMER_ID": "VARCHAR(5)", "DASHBOARD_COLOUR": "VARCHAR(10)", "DISPLAY_NAME": "VARCHAR(10)", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(5)", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(10)", "CUSTOMER_ADDRESS": "VARCHAR(30)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR(10)", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "SAT_CUSTOMER_PROFILE": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DASHBOARD_COLOUR": "VARCHAR(10)", "DISPLAY_NAME": "VARCHAR(10)", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATE": "DATETIME", "SOURCE": "VARCHAR(10)" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME", "SAT_CUSTOMER_PROFILE_PK": "BINARY(16)", "SAT_CUSTOMER_PROFILE_LDTS": "DATETIME" } } } @fixture def pit_one_sat_sqlserver(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME" } } } @fixture def pit_two_sats_sqlserver(context): """ Define the structures and metadata to perform PIT load """ context.vault_structure_type = "pit" context.hashed_columns = { "STG_CUSTOMER_DETAILS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_DETAILS_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["CUSTOMER_ADDRESS", "CUSTOMER_DOB", "CUSTOMER_NAME"] } }, "STG_CUSTOMER_LOGIN": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } }, "STG_CUSTOMER_LOGIN_TS": { "CUSTOMER_PK": "CUSTOMER_ID", "HASHDIFF": {"is_hashdiff": True, "columns": ["DEVICE_USED", "LAST_LOGIN_DATE"] } } } context.derived_columns = { "STG_CUSTOMER_DETAILS": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_DETAILS_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" }, "STG_CUSTOMER_LOGIN": { "EFFECTIVE_FROM": "LOAD_DATE" }, "STG_CUSTOMER_LOGIN_TS": { "EFFECTIVE_FROM": "LOAD_DATETIME" } } context.vault_structure_columns = { "HUB_CUSTOMER": { "source_model": ["STG_CUSTOMER_DETAILS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "HUB_CUSTOMER_TS": { "source_model": ["STG_CUSTOMER_DETAILS_TS", ], "src_pk": "CUSTOMER_PK", "src_nk": "CUSTOMER_ID", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS": { "source_model": "STG_CUSTOMER_DETAILS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_DETAILS_TS": { "source_model": "STG_CUSTOMER_DETAILS_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN": { "source_model": "STG_CUSTOMER_LOGIN", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATE", "src_source": "SOURCE" }, "SAT_CUSTOMER_LOGIN_TS": { "source_model": "STG_CUSTOMER_LOGIN_TS", "src_pk": "CUSTOMER_PK", "src_hashdiff": "HASHDIFF", "src_payload": ["DEVICE_USED", "LAST_LOGIN_DATE"], "src_eff": "EFFECTIVE_FROM", "src_ldts": "LOAD_DATETIME", "src_source": "SOURCE" }, "PIT_CUSTOMER": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE" }, "src_ldts": "LOAD_DATE" }, "PIT_CUSTOMER_TS": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_LG": { "source_model": "HUB_CUSTOMER_TS", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} }, "SAT_CUSTOMER_LOGIN_TS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATETIME"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS_TS": "LOAD_DATETIME", "STG_CUSTOMER_LOGIN_TS": "LOAD_DATETIME", }, "src_ldts": "LOAD_DATETIME" }, "PIT_CUSTOMER_HG": { "source_model": "HUB_CUSTOMER", "src_pk": "CUSTOMER_PK", "as_of_dates_table": "AS_OF_DATE", "satellites": { "SAT_CUSTOMER_DETAILS": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} }, "SAT_CUSTOMER_LOGIN": { "pk": {"PK": "CUSTOMER_PK"}, "ldts": {"LDTS": "LOAD_DATE"} } }, "stage_tables": { "STG_CUSTOMER_DETAILS": "LOAD_DATE", "STG_CUSTOMER_LOGIN": "LOAD_DATE", }, "src_ldts": "LOAD_DATE" } } context.stage_columns = { "RAW_STAGE_DETAILS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATE", "SOURCE"], "RAW_STAGE_DETAILS_TS": ["CUSTOMER_ID", "CUSTOMER_NAME", "CUSTOMER_ADDRESS", "CUSTOMER_DOB", "LOAD_DATETIME", "SOURCE"], "RAW_STAGE_LOGIN": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATE", "SOURCE"], "RAW_STAGE_LOGIN_TS": ["CUSTOMER_ID", "LAST_LOGIN_DATE", "DEVICE_USED", "LOAD_DATETIME", "SOURCE"] } context.seed_config = { "RAW_STAGE_DETAILS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_DETAILS_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_LOGIN": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR(50)", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "RAW_STAGE_LOGIN_TS": { "+column_types": { "CUSTOMER_ID": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "DEVICE_USED": "VARCHAR(50)", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "HUB_CUSTOMER_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "CUSTOMER_ID": "VARCHAR(50)", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_DETAILS_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "CUSTOMER_NAME": "VARCHAR(50)", "CUSTOMER_ADDRESS": "VARCHAR(50)", "CUSTOMER_DOB": "DATE", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_LOGIN": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATE", "LOAD_DATE": "DATE", "SOURCE": "VARCHAR(50)" } }, "SAT_CUSTOMER_LOGIN_TS": { "+column_types": { "CUSTOMER_PK": "BINARY(16)", "HASHDIFF": "BINARY(16)", "DEVICE_USED": "VARCHAR(50)", "LAST_LOGIN_DATE": "DATETIME", "EFFECTIVE_FROM": "DATETIME", "LOAD_DATETIME": "DATETIME", "SOURCE": "VARCHAR(50)" } }, "AS_OF_DATE": { "+column_types": { "AS_OF_DATE": "DATETIME" } }, "PIT_CUSTOMER": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } }, "PIT_CUSTOMER_TS": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_LG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_TS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_TS_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_TS_LDTS": "DATETIME" } }, "PIT_CUSTOMER_HG": { "+column_types": { "AS_OF_DATE": "DATETIME", "CUSTOMER_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_PK": "BINARY(16)", "SAT_CUSTOMER_DETAILS_LDTS": "DATETIME", "SAT_CUSTOMER_LOGIN_PK": "BINARY(16)", "SAT_CUSTOMER_LOGIN_LDTS": "DATETIME" } } }
33.293627
89
0.418997
6,654
87,762
5.064172
0.012023
0.06232
0.040597
0.030715
0.998665
0.998665
0.99638
0.988337
0.974449
0.971422
0
0.006588
0.450001
87,762
2,635
90
33.306262
0.69152
0.005629
0
0.773265
0
0
0.403113
0.047686
0
0
0
0
0
1
0.003549
false
0
0.000394
0
0.003943
0
0
0
0
null
0
0
0
1
1
1
1
1
1
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
7
748c84a6a78daf32f1db550d0822cdb68bd66142
1,908
py
Python
textquote.py
patmanteau/panflutist
74e56fe6b6873015cfba766bb61277d6d81bbece
[ "BSD-3-Clause" ]
null
null
null
textquote.py
patmanteau/panflutist
74e56fe6b6873015cfba766bb61277d6d81bbece
[ "BSD-3-Clause" ]
null
null
null
textquote.py
patmanteau/panflutist
74e56fe6b6873015cfba766bb61277d6d81bbece
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python r""" Panflute filter supporting \textquote and \foreigntextquote in LaTeX Issues: - Nested parens with pandoc-citeproc Usage: - Use Pandoc markdown bracketed Spans: - [Ganz Gallien ist von den Römern besetzt]{.textquote cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"} - [Toute la Gaule est occupée par les Romains]{.textquote lang="francais" punct="..." cite="[vgl. @Goscinny_Asterix_1967, 1\psqq]"} - This filter will emit \{textquote/foreigntextquote}[<cite>][<punct>]{<text>} commands """ from jinja2tex import latex_env import panflute as pf QUOTE = latex_env.from_string(r""" <%- if lang %>\foreigntextquote{<< lang >>}<% else %>\textquote<% endif -%> <% if cite %>[{<< cite >>}]<% endif -%> <% if punct %>[<< punct >>]<% endif -%> {<< text >>}""") def prepare(doc): pass def action(e, doc): if not doc.format == 'latex': return None if isinstance(e, pf.Span) and 'textquote' in e.classes: cite = e.attributes.get('cite') if cite: cite = pf.convert_text(cite, extra_args=['--biblatex'], input_format='markdown', output_format='latex') text = pf.convert_text(pf.Plain(e), extra_args=['--biblatex'], input_format='panflute', output_format='latex') values = { 'lang': e.attributes.get('lang'), 'cite': cite, 'punct': e.attributes.get('punct'), 'text': text } tex = QUOTE.render(values) return pf.RawInline(tex, format='latex') else: return None def finalize(doc): pass def main(doc=None): return pf.run_filter(action, prepare=prepare, finalize=finalize, doc=doc) if __name__ == '__main__': main()
28.058824
135
0.559224
213
1,908
4.896714
0.441315
0.042186
0.040268
0.042186
0.113135
0.059444
0.059444
0
0
0
0
0.008154
0.292977
1,908
67
136
28.477612
0.765011
0.271488
0
0.2
0
0
0.196816
0.015919
0
0
0
0
0
1
0.1
false
0.05
0.05
0.025
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
748cbeedaa4ac6fe4d023634af107928994f029c
335
py
Python
sandbox/gff/fixit.py
molecules/bio
2a86a931162be47beca4d7bf73b2b3978f3ba402
[ "MIT" ]
28
2020-11-28T01:18:16.000Z
2022-03-25T16:42:53.000Z
sandbox/gff/fixit.py
molecules/bio
2a86a931162be47beca4d7bf73b2b3978f3ba402
[ "MIT" ]
3
2021-09-28T11:52:07.000Z
2022-03-22T07:47:46.000Z
sandbox/gff/fixit.py
molecules/bio
2a86a931162be47beca4d7bf73b2b3978f3ba402
[ "MIT" ]
8
2020-12-01T17:02:26.000Z
2022-02-14T16:57:46.000Z
""" Adds version number sequence id of a GFF file. """ import sys ACC=sys.argv[1] VER=sys.argv[2] for line in sys.stdin: line = line.strip() elems = line.split() if elems and elems[0] == ACC: elems[0] = f'{elems[0]}.{VER}' if line.startswith("#"): print (line) else: print("\t".join(elems))
20.9375
46
0.570149
52
335
3.673077
0.615385
0.094241
0
0
0
0
0
0
0
0
0
0.01992
0.250746
335
16
47
20.9375
0.741036
0.137313
0
0
0
0
0.067376
0
0
0
0
0
0
1
0
false
0
0.083333
0
0.083333
0.166667
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
748db5b41786bab067b492eb0b8d340b621d6f8f
4,626
py
Python
abacusai/deployment.py
Ankit-RealityEngines/api-python
3627048d31df5972c5777de2a1c1211ea06a8804
[ "MIT" ]
null
null
null
abacusai/deployment.py
Ankit-RealityEngines/api-python
3627048d31df5972c5777de2a1c1211ea06a8804
[ "MIT" ]
null
null
null
abacusai/deployment.py
Ankit-RealityEngines/api-python
3627048d31df5972c5777de2a1c1211ea06a8804
[ "MIT" ]
null
null
null
from .return_class import AbstractApiClass from .refresh_schedule import RefreshSchedule class Deployment(AbstractApiClass): """ A model deployment """ def __init__(self, client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, regions=None, error=None, refreshSchedules={}): super().__init__(client, deploymentId) self.deployment_id = deploymentId self.name = name self.status = status self.description = description self.deployed_at = deployedAt self.created_at = createdAt self.project_id = projectId self.model_id = modelId self.model_version = modelVersion self.feature_group_id = featureGroupId self.feature_group_version = featureGroupVersion self.calls_per_second = callsPerSecond self.auto_deploy = autoDeploy self.regions = regions self.error = error self.refresh_schedules = client._build_class( RefreshSchedule, refreshSchedules) def __repr__(self): return f"Deployment(deployment_id={repr(self.deployment_id)}, name={repr(self.name)}, status={repr(self.status)}, description={repr(self.description)}, deployed_at={repr(self.deployed_at)}, created_at={repr(self.created_at)}, project_id={repr(self.project_id)}, model_id={repr(self.model_id)}, model_version={repr(self.model_version)}, feature_group_id={repr(self.feature_group_id)}, feature_group_version={repr(self.feature_group_version)}, calls_per_second={repr(self.calls_per_second)}, auto_deploy={repr(self.auto_deploy)}, regions={repr(self.regions)}, error={repr(self.error)}, refresh_schedules={repr(self.refresh_schedules)})" def to_dict(self): return {'deployment_id': self.deployment_id, 'name': self.name, 'status': self.status, 'description': self.description, 'deployed_at': self.deployed_at, 'created_at': self.created_at, 'project_id': self.project_id, 'model_id': self.model_id, 'model_version': self.model_version, 'feature_group_id': self.feature_group_id, 'feature_group_version': self.feature_group_version, 'calls_per_second': self.calls_per_second, 'auto_deploy': self.auto_deploy, 'regions': self.regions, 'error': self.error, 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules)} def refresh(self): self.__dict__.update(self.describe().__dict__) return self def describe(self): return self.client.describe_deployment(self.deployment_id) def update(self, description=None): return self.client.update_deployment(self.deployment_id, description) def rename(self, name): return self.client.rename_deployment(self.deployment_id, name) def set_auto(self, enable=None): return self.client.set_auto_deployment(self.deployment_id, enable) def set_model_version(self, model_version): return self.client.set_deployment_model_version(self.deployment_id, model_version) def set_feature_group_version(self, feature_group_version): return self.client.set_deployment_feature_group_version(self.deployment_id, feature_group_version) def start(self): return self.client.start_deployment(self.deployment_id) def stop(self): return self.client.stop_deployment(self.deployment_id) def delete(self): return self.client.delete_deployment(self.deployment_id) def create_batch_prediction(self, name=None, global_prediction_args=None, explanations=False, output_format=None, output_location=None, database_connector_id=None, database_output_config=None, refresh_schedule=None, csv_input_prefix=None, csv_prediction_prefix=None, csv_explanations_prefix=None): return self.client.create_batch_prediction(self.deployment_id, name, global_prediction_args, explanations, output_format, output_location, database_connector_id, database_output_config, refresh_schedule, csv_input_prefix, csv_prediction_prefix, csv_explanations_prefix) def wait_for_deployment(self, wait_states={'PENDING', 'DEPLOYING'}, timeout=480): return self.client._poll(self, wait_states, timeout=timeout) def get_status(self): return self.describe().status def create_refresh_policy(self, cron: str): return self.client.create_refresh_policy(self.name, cron, 'DEPLOYMENT', deployment_ids=[self.id]) def list_refresh_policies(self): return self.client.list_refresh_policies(deployment_ids=[self.id])
57.111111
642
0.75508
586
4,626
5.643345
0.158703
0.041125
0.062897
0.055035
0.206834
0.112186
0.057454
0
0
0
0
0.000753
0.138997
4,626
80
643
57.825
0.829525
0.003891
0
0
0
0.017544
0.180689
0.137315
0
0
0
0
0
1
0.315789
false
0
0.035088
0.280702
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
1
1
0
0
3
748f173a3418e52d978dedc4b9c8474f56ab2812
1,426
py
Python
tools/buildenv.py
pauldotknopf/protobuf-qml
e10f46567344738563140e8f06d5a2569b961d06
[ "MIT" ]
56
2015-05-14T16:00:43.000Z
2022-02-11T20:25:36.000Z
tools/buildenv.py
pauldotknopf/protobuf-qml
e10f46567344738563140e8f06d5a2569b961d06
[ "MIT" ]
19
2015-05-11T14:50:33.000Z
2018-09-06T05:45:34.000Z
tools/buildenv.py
pauldotknopf/protobuf-qml
e10f46567344738563140e8f06d5a2569b961d06
[ "MIT" ]
16
2015-10-29T13:25:48.000Z
2021-06-30T18:34:32.000Z
import logging import platform import os ROOT_DIR = os.path.realpath(os.path.dirname(os.path.dirname(__file__))) DEFAULT_DEPS = os.path.join(ROOT_DIR, 'build', 'deps') DEFAULT_OUT = os.path.join(ROOT_DIR, 'out') logger = logging.getLogger(__name__) def prepend_envvar(env, k, v, sep=os.pathsep): old = env.get(k) env[k] = sep.join([v, old]) if old else v # def disable_werror(env, warns): # for w in warns: # prepend_envvar(env, 'CPPFLAGS', '-Wno-error=%s' % w, ' ') def prepend_libdir(env, libdir): if platform.system() == 'Windows': prepend_envvar(env, 'PATH', libdir) prepend_envvar(env, 'LIB', libdir) prepend_envvar(env, 'LIBPATH', libdir) else: prepend_envvar(env, 'LD_LIBRARY_PATH', libdir) prepend_envvar(env, 'LIBRARY_PATH', libdir) prepend_envvar(env, 'LDFLAGS', '-L%s' % libdir, ' ') def prepend_include_dir(env, include): if platform.system() == 'Windows': prepend_envvar(env, 'INCLUDE', include) else: prepend_envvar(env, 'C_INCLUDE_PATH', include) prepend_envvar(env, 'CPLUS_INCLUDE_PATH', include) def setup_env(root, env=os.environ): logging.info('Setting up environment variable for root directory [%s]' % root) prepend_envvar(env, 'PATH', os.path.join(root, 'bin')) prepend_include_dir(env, os.path.join(root, 'include')) prepend_libdir(env, os.path.join(root, 'lib')) for k, v in env.items(): logging.debug('%s: %s' % (k ,v))
29.102041
80
0.685133
210
1,426
4.461905
0.3
0.166489
0.204909
0.074707
0.254002
0.153682
0.083244
0
0
0
0
0
0.150771
1,426
48
81
29.708333
0.773741
0.07784
0
0.125
0
0
0.149504
0
0
0
0
0
0
1
0.125
false
0
0.09375
0
0.21875
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
7492f98aa56400f8ae0d4988b98264efada3bc27
2,010
py
Python
rstobj/directives/toc.py
MacHu-GWU/rstobj-project
40601f08e9e7ee2df5c514af04b040f61f76ed78
[ "MIT" ]
null
null
null
rstobj/directives/toc.py
MacHu-GWU/rstobj-project
40601f08e9e7ee2df5c514af04b040f61f76ed78
[ "MIT" ]
null
null
null
rstobj/directives/toc.py
MacHu-GWU/rstobj-project
40601f08e9e7ee2df5c514af04b040f61f76ed78
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ table of content directive. """ import attr from .base import Directive @attr.s class TableOfContent(Directive): """ ``.. contents::`` directive. parameter definition see here: http://docutils.sourceforge.net/docs/ref/rst/directives.html#table-of-contents :param title: str, required. :param depth: int, optional. :param local: bool, optional. :type backlinks: str :param backlinks: optional. one of :attr:`TableOfContent.BacklinksOptions`. Example:: toc = TableOfContent(title="Table of Contents", depth=2) toc.render() Output:: .. contents:: Table of Contents :depth: 2 """ title = attr.ib(default=None) # type: str depth = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), ) # type: int local = attr.ib( default=False, validator=attr.validators.optional(attr.validators.instance_of(bool)), ) # type: bool backlinks = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str)), ) meta_directive_keyword = "contents" meta_not_none_fields = tuple() class BacklinksOptions(object): """ ``backlinks`` argument choices. - ``TableOfContent.BacklinksOptions.entry``: ``"entry"`` - ``TableOfContent.BacklinksOptions.top``: ``"top"`` - ``TableOfContent.BacklinksOptions.none``: ``"none"`` """ entry = "entry" top = "top" none = "none" @backlinks.validator def check_backlinks(self, attribute, value): # pragma: no cover if value not in [None, "entry", "top", "none"]: raise ValueError( "TableOfContent.backlinks has to be one of 'entry', 'top', 'none'!" ) @property def arg(self): if self.title is None: return "" else: return self.title
25.769231
113
0.595025
208
2,010
5.706731
0.399038
0.070767
0.043808
0.042965
0.203033
0.16765
0.16765
0.16765
0.121314
0.121314
0
0.002046
0.270647
2,010
77
114
26.103896
0.80764
0.389055
0
0.057143
0
0
0.088022
0.021779
0
0
0
0
0
1
0.057143
false
0
0.057143
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7493ca43323db77c43ed2fc83412e3ad722c32a7
3,256
py
Python
StaffPage.py
yurunyang1998/XUPT_-
415c20550b54914b1206cfa3b83ddbe8260e7a6f
[ "MIT" ]
null
null
null
StaffPage.py
yurunyang1998/XUPT_-
415c20550b54914b1206cfa3b83ddbe8260e7a6f
[ "MIT" ]
null
null
null
StaffPage.py
yurunyang1998/XUPT_-
415c20550b54914b1206cfa3b83ddbe8260e7a6f
[ "MIT" ]
null
null
null
from flask import * import databaseModel from functools import wraps import time bp = Blueprint("StaffPage", __name__, url_prefix='/StaffPage') def login_required(func): @wraps(func) def wrapper(*args, **kwargs): UserName = session.get('UserName') if(UserName is not None): g.UserName = UserName return func(*args, **kwargs) else: return redirect(url_for("index")) return wrapper @bp.route("/OrderToStore",methods=["POST"]) @login_required def OrderToStore(): try: OrderNum = request.form["OrderNum"] method = request.form["way"] order = databaseModel.Orders.query.filter_by(OrderNum = OrderNum).first() if(order != None): if(method == "inStore"): #如果是入库操作 neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) #像historyorder表里添加一行,表示该快递员经手过这个快递 databaseModel.db.session.add(neworder_) orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() #修改orderstaff 表的staffnum,表示当前快递已更换配送者 orderstaff_.StaffNum = g.UserName databaseModel.db.session.commit() if(method == "sigh"): #如果是签收操作 order_ = databaseModel.Orders.query.filter_by(OrderNum=OrderNum).first() order_.StagNum = 2 #更改为已签收 neworder_ = databaseModel.HistoryOrders(OrderNum=OrderNum, StaffNum=g.UserName) # 像historyorder表里添加一行,表示该快递员经手过这个快递 databaseModel.db.session.add(neworder_) orderstaff_ = databaseModel.OrderStaffs.query.filter_by(OrderNum=OrderNum).first() # 修改orderstaff 表的staffnum,表示当前快递已更换配送者 orderstaff_.StaffNum = "0" databaseModel.db.session.commit() except Exception as e: return {"code":"0"} return {"code":"200"} @bp.route("/HistoryOrders",methods=["GET"]) @login_required def HistoryOrders(): historyorders = databaseModel.HistoryOrders.query.filter_by(StaffNum= g.UserName).all() jsondata = {} for i,c in enumerate(historyorders): ordernum_ = c.OrderNum order = databaseModel.Orders.query.filter_by(OrderNum=ordernum_).first() recvaddr_ = order.RecvAddr if(order.StagNum == 1): orderstag_ = '已寄出' if(order.StagNum == 2): orderstag_ = "已签收" data = {"OrderNum":ordernum_,"RecvAddr":recvaddr_,"OrderStag":orderstag_} jsondata[i] = data return jsondata @bp.route("/StaffInfo",methods=["POST"]) @login_required def StaffInfo(): try: staffinfo = databaseModel.Staffs.query.filter_by(UserName=g.UserName).first() jsondata = {} StaffName =staffinfo.StaffName StaffNum = staffinfo.UserName StaffTele = staffinfo.StaffTele StaffIdCard = staffinfo.StaffIdCard print(StaffIdCard,StaffName) jsondata["code"] = "1" jsondata["StaffName"]= StaffName jsondata["StaffNum"] = StaffNum jsondata["StaffTele"] = StaffTele jsondata["StaffIdCard"] = StaffIdCard except Exception as e: current_app.logger.debug(e) return {"code":"0"} return jsondata
32.56
138
0.635749
312
3,256
6.519231
0.314103
0.06293
0.044739
0.051622
0.377581
0.333333
0.333333
0.333333
0.333333
0.247788
0
0.00409
0.249079
3,256
99
139
32.888889
0.827812
0.04914
0
0.276316
0
0
0.06252
0
0
0
0
0
0
1
0.065789
false
0
0.052632
0
0.223684
0.026316
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7493cdea568afe3db8cc36c7efb7d3a24d08c8e1
1,849
py
Python
daily_problems/problem_101_to_200/problem_131.py
rrwt/daily-coding-challenge
b16fc365fd142ebab429e605cb146c8bb0bc97a2
[ "MIT" ]
1
2019-04-18T03:29:02.000Z
2019-04-18T03:29:02.000Z
daily_problems/problem_101_to_200/problem_131.py
rrwt/daily-coding-challenge
b16fc365fd142ebab429e605cb146c8bb0bc97a2
[ "MIT" ]
null
null
null
daily_problems/problem_101_to_200/problem_131.py
rrwt/daily-coding-challenge
b16fc365fd142ebab429e605cb146c8bb0bc97a2
[ "MIT" ]
null
null
null
""" Given the head to a singly linked list, where each node also has a “random” pointer that points to anywhere in the linked list, deep clone the list. """ from typing import Tuple class Node: def __init__(self, data: int) -> None: self.data = data self.next = None self.random = None def deep_clone_ll(ll_head: Node) -> Tuple[Node, Node]: """ Time Complexity: O(n) Space Complexity: O(1) """ runner = ll_head # get nodes of new linked list while runner: node = Node(runner.data) node.next = runner.next runner.next = node runner = runner.next.next # get random pointers of new ll runner = ll_head while runner: runner.next.random = runner.random.next runner = runner.next.next # detach two lists and fix their next pointers runner = ll_head new_head = ll_head.next while runner.next: next_node = runner.next if next_node.next: runner.next = next_node.next next_node.next = next_node.next.next runner = runner.next else: runner.next = None return ll_head, new_head if __name__ == "__main__": head = Node(1) head.next = Node(2) head.next.next = Node(3) head.next.next.next = Node(4) head.next.next.next.next = Node(5) head.random = head.next.next head.next.random = head head.next.next.random = head.next.next.next.next head.next.next.next.random = head.next.next head.next.next.next.next.random = head.next original, copied = deep_clone_ll(head) while original: print("orig node:", original.data, ", random:", original.random.data) print("copied node:", copied.data, ", random:", copied.random.data) original = original.next copied = copied.next
26.042254
83
0.622499
259
1,849
4.332046
0.254826
0.171123
0.096257
0.071301
0.171123
0.153298
0.039216
0.039216
0
0
0
0.004464
0.273121
1,849
70
84
26.414286
0.830357
0.161168
0
0.155556
0
0
0.031537
0
0
0
0
0
0
1
0.044444
false
0
0.022222
0
0.111111
0.044444
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749446711702bf00c7f0f56b3ec4b6af7960f483
1,095
py
Python
view/basev64/__init__.py
desktophzj/wolverine-tkinter
8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9
[ "Apache-2.0" ]
null
null
null
view/basev64/__init__.py
desktophzj/wolverine-tkinter
8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9
[ "Apache-2.0" ]
null
null
null
view/basev64/__init__.py
desktophzj/wolverine-tkinter
8e499c877ba7f5da11e63e30ecb25d54f6f3b6e9
[ "Apache-2.0" ]
null
null
null
import tkinter as tk from tkinter import ttk raw_data_label_y_location = 20 raw_data_entry_location = raw_data_label_y_location + 20 encode_button_location = raw_data_entry_location + 20 decode_button_location = encode_button_location + 20 base64_data_label_y_location = decode_button_location + 20 base64_data_entry_y_location = base64_data_label_y_location + 20 class Base64: def create_base64_tab(self, tab_control): tab = ttk.Frame(tab_control) # raw data raw_data_label = tk.Label(tab, text="raw data") raw_data_label.place(x=500, y=0) raw_data_entry = tk.Entry(tab) raw_data_entry.place(x=500, y=100) encode_btn = tk.Button(tab, text="encode", width=5) encode_btn.place(x=500, y=200) decode_btn = tk.Button(tab, text="decode", width=5) decode_btn.place(x=500, y=300) # base64 base64_data_label = tk.Label(tab, text="base64 data") base64_data_label.place(x=500, y=400) base64_data_entry = tk.Entry(tab) base64_data_entry.place(x=500, y=500) return tab
33.181818
64
0.699543
172
1,095
4.116279
0.215116
0.09887
0.076271
0.084746
0.536723
0.237288
0
0
0
0
0
0.080645
0.207306
1,095
32
65
34.21875
0.735023
0.013699
0
0
0
0
0.028784
0
0
0
0
0
0
1
0.041667
false
0
0.083333
0
0.208333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749477a57d991a4da5d6d7c1a4757df77ab77a72
3,307
py
Python
research/cv/csd/src/edsr_slim.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/csd/src/edsr_slim.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/csd/src/edsr_slim.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """edsr_slim.py""" from src import common import mindspore.nn as nn import mindspore.ops as ops from mindspore import Tensor class EDSR(nn.Cell): """[EDSR] Args: nn ([type]): [description] """ def __init__(self, args): super(EDSR, self).__init__() self.n_colors = args.n_colors n_resblocks = args.n_resblocks self.n_feats = args.n_feats self.kernel_size = 3 scale = args.scale[0] act = nn.ReLU() self.rgb_range = args.rgb_range self.sub_mean = common.MeanShift(self.rgb_range) self.add_mean = common.MeanShift(self.rgb_range, sign=1) self.head = common.conv(args.n_colors, self.n_feats, self.kernel_size, padding=self.kernel_size//2) m_body = [ common.ResidualBlock( self.n_feats, self.kernel_size, act=act, res_scale=args.res_scale ) for _ in range(n_resblocks) ] self.body = nn.CellList(m_body) self.body_conv = common.conv(self.n_feats, self.n_feats, self.kernel_size, padding=self.kernel_size//2) self.upsampler = common.Upsampler(scale, self.n_feats) self.tail_conv = common.conv(self.n_feats, args.n_colors, self.kernel_size, padding=self.kernel_size//2) def construct(self, x, width_mult=Tensor(1.0)): """construct""" width_mult = width_mult.asnumpy().item() feature_width = int(self.n_feats * width_mult) conv2d = ops.Conv2D(out_channel=feature_width, kernel_size=self.kernel_size, mode=1, pad_mode='pad', pad=self.kernel_size // 2) biasadd = ops.BiasAdd() x = self.sub_mean(x) weight = self.head.weight[:feature_width, :self.n_colors, :, :] bias = self.head.bias[:feature_width] x = conv2d(x, weight) x = biasadd(x, bias) residual = x for block in self.body: residual = block(residual, width_mult) weight = self.body_conv.weight[:feature_width, :feature_width, :, :] bias = self.body_conv.bias[:feature_width] residual = conv2d(residual, weight) residual = biasadd(residual, bias) residual += x x = self.upsampler(residual, width_mult) weight = self.tail_conv.weight[:self.n_colors, :feature_width, :, :] bias = self.tail_conv.bias[:self.n_colors] conv2d = ops.Conv2D(out_channel=self.n_colors, kernel_size=self.kernel_size, mode=1, pad_mode='pad', pad=self.kernel_size//2) x = conv2d(x, weight) x = biasadd(x, bias) x = self.add_mean(x) return x
38.011494
112
0.629574
451
3,307
4.439024
0.292683
0.06993
0.083916
0.034965
0.286214
0.214785
0.147852
0.147852
0.102897
0.102897
0
0.010723
0.238585
3,307
86
113
38.453488
0.784353
0.213789
0
0.074074
0
0
0.002346
0
0
0
0
0
0
1
0.037037
false
0
0.074074
0
0.148148
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
7494c2f962bcdfe2ec84fe9915ae2232069d6c59
6,317
py
Python
1_prepare_data/docker/code/make_tfRecords.py
gianpd/amazon-sagemaker-tensorflow-object-detection-api
80c654a9767bb26389db468c7e6df3300f2debb4
[ "MIT-0" ]
null
null
null
1_prepare_data/docker/code/make_tfRecords.py
gianpd/amazon-sagemaker-tensorflow-object-detection-api
80c654a9767bb26389db468c7e6df3300f2debb4
[ "MIT-0" ]
null
null
null
1_prepare_data/docker/code/make_tfRecords.py
gianpd/amazon-sagemaker-tensorflow-object-detection-api
80c654a9767bb26389db468c7e6df3300f2debb4
[ "MIT-0" ]
null
null
null
import os import sys import json from pathlib import Path import pandas as pd import random import tensorflow as tf import io import argparse from PIL import Image from collections import namedtuple from object_detection.utils import dataset_util, label_map_util import logging logging.basicConfig(stream=sys.stdout, format='', level=logging.INFO, datefmt=None) logger = logging.getLogger('NJDD-prepare-data') # Initiate argument parser parser = argparse.ArgumentParser( description="Sample TensorFlow json-to-TFRecord converter") parser.add_argument("-json", "--json_path", help="Path to the input .json files.", type=str) # parser.add_argument("-subset", # "--subset", # help="Type of the subset: train, validation, test", type=str) parser.add_argument("-l", "--labels_path", help="Path to the labels (.pbtxt) file.", type=str) parser.add_argument("-o", "--output_dir", help="Path of the output dir for storing TFRecord (.record) file.", type=str) parser.add_argument("-i", "--image_dir", help="Path to the folder where the input image files are stored. " "Defaults to the same directory as JSON_DIR.", type=str, default=None) parser.add_argument("-c", "--csv_path", help="Path of output .csv file. If none provided, then no file will be " "written.", type=str, default=None) args = parser.parse_args() if args.image_dir is None: args.image_dir = args.json_dir label_map = label_map_util.load_labelmap(args.labels_path) label_map_dict = label_map_util.get_label_map_dict(label_map) def bbox_dict_to_df(bbox_dict): """ This function assumes that the objects list contains one element (v['objects'][0]) """ log_index = 'bbox_dict_to_df>' df_ls = [] for k, v in bbox_dict.items(): filename = k height = v['size']['height'] width = v['size']['width'] ym = v['objects'][0]['bbox'][0] xm = v['objects'][0]['bbox'][1] yM = v['objects'][0]['bbox'][2] xM = v['objects'][0]['bbox'][3] values = (filename, height, width, ym, xm, yM, xM, v['objects'][0]['name']) df_ls.append(values) logger.info(f'{log_index} Collected {len(df_ls)} objects') df = pd.DataFrame(df_ls, columns=['fname', 'height', 'width', 'ym', 'xm', 'yM', 'xM', 'class']) return df def split_dataset(df, perc=0.9): log_index = 'split_dataset>' df = df.sample(frac=1).reset_index(drop=True) num_train = int(perc * len(df)) df_train = df.iloc[0:num_train] df_val = df.iloc[num_train:] logger.info(f'{log_index} TRAINING EXAMPLES: {len(df_train)} - VALIDATION EXAMPLES: {len(df_val)}') return df_train, df_val def class_text_to_int(row_label): return label_map_dict[row_label] def split(df, group): data = namedtuple('data', ['fname', 'object']) gb = df.groupby(group) return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)] def create_tf_example(group, path): log_index = 'create_tf_example>' with tf.io.gfile.GFile(os.path.join(path, '{}'.format(group.fname)), 'rb') as fid: encoded_jpg = fid.read() encoded_jpg_io = io.BytesIO(encoded_jpg) image = Image.open(encoded_jpg_io) width, height = image.size logger.info(f'{log_index} Retrived image with size: {width, height} - (w,h)') filename = group.fname.encode('utf8') image_format = b'jpg' xmins = [] xmaxs = [] ymins = [] ymaxs = [] classes_text = [] classes = [] for _, row in group.object.iterrows(): xmins.append(row['xm']) xmaxs.append(row['xM']) ymins.append(row['ym']) ymaxs.append(row['yM']) classes_text.append(row['class'].encode('utf8')) classes.append(class_text_to_int(row['class'])) logger.info(f'{log_index} Collected {len(xmins)} rows') tf_example = tf.train.Example(features=tf.train.Features(feature={ 'image/height': dataset_util.int64_feature(height), 'image/width': dataset_util.int64_feature(width), 'image/filename': dataset_util.bytes_feature(filename), 'image/source_id': dataset_util.bytes_feature(filename), 'image/encoded': dataset_util.bytes_feature(encoded_jpg), 'image/format': dataset_util.bytes_feature(image_format), 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins), 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs), 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins), 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs), 'image/object/class/text': dataset_util.bytes_list_feature(classes_text), 'image/object/class/label': dataset_util.int64_list_feature(classes), })) return tf_example def main(): log_index = 'main>' logger.info(f'{log_index} Reading bbox at {args.json_path} ...') with open(args.json_path, 'r') as f: bbox_dict = json.load(f) df = bbox_dict_to_df(bbox_dict) df_train, df_val = split_dataset(df) for df_s, subset in zip([df_train, df_val], ['train', 'val']): logger.info(f'{log_index} Writing TFRecords for subset: {subset}') writer = tf.io.TFRecordWriter(os.path.join(args.output_dir, f'dywidag_{subset}.records')) path = Path(args.image_dir) grouped = split(df_s, 'fname') for group in grouped: tf_example = create_tf_example(group, path) writer.write(tf_example.SerializeToString()) writer.close() logger.info(f'{log_index} Successfully created the TFRecord file: {args.output_dir}') if args.csv_path is not None: df.to_csv(args.csv_path, index=None) logger.info(f'{log_index} Successfully created the CSV file: {args.csv_path}') if __name__ == '__main__': main()
39.48125
104
0.614849
837
6,317
4.444444
0.253286
0.038441
0.023656
0.030108
0.214516
0.094086
0.03871
0.022043
0
0
0
0.004636
0.248852
6,317
160
105
39.48125
0.779347
0.040051
0
0.014925
0
0
0.228805
0.027068
0
0
0
0
0
1
0.044776
false
0
0.097015
0.007463
0.179104
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74965fd1310312518af6b17b1aeb671c359c33bf
3,962
py
Python
rugosa/emulation/actions.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
1
2022-03-13T03:03:31.000Z
2022-03-13T03:03:31.000Z
rugosa/emulation/actions.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
null
null
null
rugosa/emulation/actions.py
Defense-Cyber-Crime-Center/rugosa
70f5b1db7e3f02ecccb0495fe1c0c77930769276
[ "MIT" ]
null
null
null
""" Interface for interesting actions. """ from dataclasses import dataclass import logging from typing import Union, List, Optional, Iterable from .call_hooks.win_api import constants as wc logger = logging.getLogger(__name__) @dataclass class Action: ip: int class ActionList: """ Represents a reverse linked list of actions that have occurred up to a specific ProcessorContext. """ def __init__(self, *actions: Action): self.tail: Optional[ActionNode] = None for action in actions: self.add(action) def __repr__(self): return f"ActionList({repr(self.tail) if self.tail else ''})" def __deepcopy__(self, memo): copy = ActionList() copy.tail = self.tail return copy def __iter__(self): if self.tail: yield from self.tail def __reversed__(self): if self.tail: yield from reversed(self.tail) def __getitem__(self, index: int): return list(self)[index] def __len__(self): return len(list(self)) def __bool__(self): return bool(self.tail) def __contains__(self, item): return any(item == action for action in self) def add(self, action: Action): self.tail = ActionNode(action, prev=self.tail) class ActionNode: """ Represents a node of a reverse linked list of actions that have occurred up to a specific ProcessorContext. """ def __init__(self, action: Action, prev: Optional["ActionNode"] = None): self.action = action self.prev = prev def __repr__(self): if self.prev: return f"{self.prev!r} -> {self.action}" else: return f"{self.action}" def __iter__(self): """ Iterates actions from the least recent action that has occurred to the most recent action that has occurred. """ if self.prev: yield from self.prev yield self.action def __reversed__(self): """ Iterates actions from the most recent action that has occurred to the least recent action that has occurred. """ yield self.action if self.prev: yield from reversed(self.prev) @dataclass class CommandExecuted(Action): command: str visibility: wc.Visibility = None @dataclass class DirectoryCreated(Action): path: str @dataclass class FileCreated(Action): handle: int path: str mode: str @dataclass class FileOpened(Action): handle: int path: str mode: str @dataclass class FileTruncated(Action): handle: int path: str mode: str @dataclass class FileDeleted(Action): handle: int path: str @dataclass class FileMoved(Action): handle: int old_path: str new_path: str @dataclass class FileClosed(Action): handle: int @dataclass class FileWritten(Action): handle: int data: bytes @dataclass class RegKeyOpened(Action): handle: int path: str root_key: str sub_key: str @dataclass class RegKeyDeleted(Action): handle: int path: str @dataclass class RegKeyValueDeleted(Action): handle: int path: str value_name: str @dataclass class RegKeyValueSet(Action): handle: int path: str data_type: str data: Union[bytes, str, List[str], int, None] @dataclass class ServiceCreated(Action): handle: int name: str access: wc.ServiceAccess service_type: wc.ServiceType start_type: wc.ServiceStart display_name: str binary_path: str @dataclass class ServiceOpened(Action): handle: int name: str @dataclass class ServiceDeleted(Action): handle: int @dataclass class ServiceDescriptionChanged(Action): handle: int description: str @dataclass class ShellOperation(Action): operation: str path: str parameters: str directory: str visibility: wc.Visibility = None
18.342593
79
0.646643
473
3,962
5.272727
0.255814
0.106656
0.090217
0.060946
0.346832
0.222534
0.20409
0.118284
0.118284
0.06656
0
0
0.268551
3,962
215
80
18.427907
0.860594
0.115598
0
0.463768
0
0
0.03033
0.007951
0
0
0
0
0
1
0.101449
false
0
0.028986
0.036232
0.710145
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
7497c1706d2cfdef04eb240c836fa9520fbded1f
10,037
py
Python
n2nmn-tensorlayer/models_shapes/nmn3_layers.py
jiaqi-xi/Neural-Module-Networks.Tensorlayer
3607e6717473aed51c653cf931dc7d80866b0227
[ "MIT" ]
null
null
null
n2nmn-tensorlayer/models_shapes/nmn3_layers.py
jiaqi-xi/Neural-Module-Networks.Tensorlayer
3607e6717473aed51c653cf931dc7d80866b0227
[ "MIT" ]
null
null
null
n2nmn-tensorlayer/models_shapes/nmn3_layers.py
jiaqi-xi/Neural-Module-Networks.Tensorlayer
3607e6717473aed51c653cf931dc7d80866b0227
[ "MIT" ]
3
2020-01-12T15:45:44.000Z
2020-01-31T13:07:46.000Z
from __future__ import absolute_import, division, print_function import sys import tensorflow as tf import tensorlayer as tl import numpy as np from tensorflow import convert_to_tensor as to_T sess = tf.Session() tl.layers.initialize_global_variables(sess) def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME', bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): # input has shape [batch, in_height, in_width, in_channels] input_dim = bottom.get_shape().as_list()[-1] # weights and biases variables with tf.variable_scope(name, reuse=reuse): # initialize the variables if weights_initializer is None: weights_initializer = tf.contrib.layers.xavier_initializer_conv2d() if bias_term and biases_initializer is None: biases_initializer = tf.constant_initializer(0.) # filter has shape [filter_height, filter_width, in_channels, out_channels] weights = tf.get_variable("weights", [kernel_size, kernel_size, input_dim, output_dim], initializer=weights_initializer) if bias_term: biases = tf.get_variable("biases", output_dim, initializer=biases_initializer) if not reuse: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(weights)) conv = tf.nn.conv2d(bottom, filter=weights, strides=[1, stride, stride, 1], padding=padding) if bias_term: conv = tf.nn.bias_add(conv, biases) return conv def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME', bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): # input has shape [batch, in_height, in_width, in_channels] input_dim = bottom.get_shape().as_list()[-1] # weights and biases variables with tf.variable_scope(name, reuse=reuse): # initialize the variables if weights_initializer is None: weights_initializer = tf.contrib.layers.xavier_initializer_conv2d() if bias_term and biases_initializer is None: biases_initializer = tf.constant_initializer(0.) # filter has shape [filter_height, filter_width, in_channels, out_channels] weights = tf.get_variable("weights", [kernel_size, kernel_size, input_dim, output_dim], initializer=weights_initializer) if bias_term: biases = tf.get_variable("biases", output_dim, initializer=biases_initializer) if not reuse: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(weights)) conv = tf.nn.conv2d(bottom, filter=weights, strides=[1, stride, stride, 1], padding=padding) if bias_term: conv = tf.nn.bias_add(conv, biases) relu = tf.nn.relu(conv) return relu def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME', bias_term=True, weights_initializer=None, biases_initializer=None): # input_shape is [batch, in_height, in_width, in_channels] input_shape = bottom.get_shape().as_list() batch_size, input_height, input_width, input_dim = input_shape output_shape = [batch_size, input_height*stride, input_width*stride, output_dim] # weights and biases variables with tf.variable_scope(name, reuse=reuse): # initialize the variables if weights_initializer is None: weights_initializer = tf.contrib.layers.xavier_initializer_conv2d() if bias_term and biases_initializer is None: biases_initializer = tf.constant_initializer(0.) # filter has shape [filter_height, filter_width, out_channels, in_channels] weights = tf.get_variable("weights", [kernel_size, kernel_size, output_dim, input_dim], initializer=weights_initializer) if bias_term: biases = tf.get_variable("biases", output_dim, initializer=biases_initializer) if not reuse: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(weights)) net = tl.layers.InputLayer(inputs=bottom, name=name+'input') deconv = tl.layers.DeConv2dLayer(net, act=tf.identity, shape=[kernel_size, kernel_size, output_dim, input_dim], output_shape=output_shape, strides=[1, stride, stride, 1], padding=padding, W_init=weights_initializer, b_init=biases_initializer, name=name+'deconv2d') return deconv.outputs def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME', bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding, bias_term, weights_initializer, biases_initializer, reuse=reuse) # relu = tl.layers.PReluLayer(deconv) relu = tf.nn.relu(deconv) return relu def pooling_layer(name, bottom, kernel_size, stride): #pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1], # strides=[1, stride, stride, 1], padding='SAME', name=name) net = tl.layers.InputLayer(inputs=bottom, name=name+'input') pool = tl.layers.PoolLayer(net, ksize=[1, kernel_size, kernel_size, 1], strides=[1, stride, stride, 1], padding='SAME', pool=tf.nn.max_pool, name=name+'pool') return pool.outputs def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): # flatten bottom input # input has shape [batch, in_height, in_width, in_channels] shape = bottom.get_shape().as_list() input_dim = 1 for d in shape[1:]: input_dim *= d # flat_bottom = tf.reshape(bottom, [-1, input_dim]) net = tl.layers.InputLayer(inputs=bottom, name=name+'input') flat_bottom = tl.layers.ReshapeLayer(net, [-1, input_dim], name=name+'reshape').outputs # weights and biases variables with tf.variable_scope(name, reuse=reuse): # initialize the variables if weights_initializer is None: weights_initializer = tf.contrib.layers.xavier_initializer() if bias_term and biases_initializer is None: biases_initializer = tf.constant_initializer(0.) # weights has shape [input_dim, output_dim] weights = tf.get_variable("weights", [input_dim, output_dim], initializer=weights_initializer) if bias_term: biases = tf.get_variable("biases", output_dim, initializer=biases_initializer) if not reuse: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(weights)) if bias_term: fc = tf.nn.xw_plus_b(flat_bottom, weights, biases) else: fc = tf.matmul(flat_bottom, weights) return fc def fc_relu_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer, biases_initializer, reuse=reuse) relu = tf.nn.relu(fc) return relu # convnet built for shapes dataset def shapes_convnet(input_batch, hidden_dim=64, output_dim=64, scope='shapes_convnet', reuse=None): with tf.variable_scope(scope, reuse=reuse): conv_1 = conv_relu_layer('conv_1', input_batch, kernel_size=10, stride=10, output_dim=hidden_dim, padding='VALID') conv_2 = conv_relu_layer('conv_2', conv_1, kernel_size=1, stride=1, output_dim=output_dim) return conv_2 # following convnet are safe even for empty data def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None): # use this for 1x1 convolution in modules to avoid the crash. bottom_shape = tf.shape(bottom) input_dim = bottom.get_shape().as_list()[-1] # weights and biases variables with tf.variable_scope(name, reuse=reuse): # initialize the variables weights_initializer = tf.contrib.layers.xavier_initializer() biases_initializer = tf.constant_initializer(0.) weights = tf.get_variable('weights', [input_dim, output_dim], initializer=weights_initializer) biases = tf.get_variable('biases', output_dim, initializer=biases_initializer) conv_flat = tf.matmul(tf.reshape(bottom, [-1, input_dim]), weights) + biases conv = tf.reshape(conv_flat, to_T([bottom_shape[0], bottom_shape[1], bottom_shape[2], output_dim])) return conv # use this for arbitrary convolution in modules to avoid the crash. def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME', bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None): g = tf.get_default_graph() with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}): return conv_layer(name, bottom, kernel_size, stride, output_dim, padding, bias_term, weights_initializer, biases_initializer, reuse=reuse) @tf.RegisterGradient('Conv2D_handle_empty_batch') def _Conv2DGrad(op, grad): with tf.device('/cpu:0'): filter_grad = tf.nn.conv2d_backprop_input( # compute gradient_filter tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'), op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format')) input_grad = tf.nn.conv2d_backprop_filter( # compute gradient_input op.inputs[0], tf.shape(op.inputs[1]), grad, op.get_attr('strides'), op.get_attr('padding'),op.get_attr('use_cudnn_on_gpu'), op.get_attr('data_format')) return [filter_grad, input_grad]
45.622727
115
0.672512
1,295
10,037
4.957529
0.118919
0.040654
0.035981
0.024922
0.72243
0.710903
0.69081
0.659502
0.641745
0.610748
0
0.0093
0.228654
10,037
219
116
45.83105
0.819943
0.125137
0
0.54375
0
0
0.034053
0.005714
0
0
0
0
0
1
0.06875
false
0
0.0375
0
0.175
0.00625
0
0
0
null
0
0
0
0
1
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
2
74984e77e478676a74e3414a580cf3837bbf6e4c
16,641
py
Python
reinforcement_learning/q_learn.py
noderod/DARLMID
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
[ "MIT" ]
null
null
null
reinforcement_learning/q_learn.py
noderod/DARLMID
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
[ "MIT" ]
null
null
null
reinforcement_learning/q_learn.py
noderod/DARLMID
5737dbe222ce5a5a847c1d0a8d1af64dda87e5b2
[ "MIT" ]
null
null
null
""" SUMMARY Reinforcement learning via q-learning on the provided data, using previous data if requested """ import argparse import json import random import sys import matplotlib.pyplot as plt import numpy as np import auxiliary as aux from vehicle import Vehicle # Sets seed for reproducibility random.seed(0) # Processes arguments parser = argparse.ArgumentParser() required_flags = parser.add_argument_group(title="Required") required_flags.add_argument("--epochs",required=True, help="Number of epochs", type=int) required_flags.add_argument("--explore-probability",required=True, help="Explore probability [0, 1]", type=float) required_flags.add_argument("--learning-rate",required=True, help="Learning rate [0, 1]", type=float) required_flags.add_argument("--discount-factor",required=True, help="Discount factor [0, 1]", type=float) required_flags.add_argument("--data",required=True, help="JSON filepath to read Q, rewards matrices and other information", type=str) required_flags.add_argument("--positive-demonstration", help="JSON filepath to read Q matrix updates after a number of positive demonstrations (already processed)", type=str) required_flags.add_argument("--negative-demonstration", help="JSON filepath to read Q matrix updates after a number of negative demonstrations (already processed)", type=str) required_flags.add_argument("--good-advice-decay", help="Training epochs good advice is remembered (50 by defaulr)", type=int) required_flags.add_argument("--bad-advice-decay", help="Training epochs bad advice is remembered (5 by defaulr)", type=int) required_flags.add_argument("--output", required=True, help="JSON filepath to output the results", type=str) parser.add_argument("--show", help="Show output reward vs. epoch plot", action="store_true") args = parser.parse_args() p_exp = args.explore_probability α = args.learning_rate γ = args.discount_factor assert (0 <= p_exp) and (p_exp <= 1), "Explore probability must be between 0 and 1" assert (0 <= α) and (α <= 1), "Learning rate must be between 0 and 1" assert (0 <= γ) and (γ <= 1), "Discount factor must be between 0 and 1" good_advice_decay_epochs = 50 good_decay_ratio = 1/good_advice_decay_epochs bad_advice_decay_epochs = 5 bad_decay_ratio = 1/bad_advice_decay_epochs if args.good_advice_decay: assert args.good_advice_decay >= 0, "Good advice decay cannot be negative epochs" good_advice_retention_epochs = args.good_advice_decay if args.bad_advice_decay: assert args.bad_advice_decay >= 0, "Bad advice decay cannot be negative epochs" bad_advice_retention_epochs = args.bad_advice_decay #----------------------------------------------------- # DATA PREPROCESSING #----------------------------------------------------- # Loads original data with open(args.data, "r") as jf: original_data = json.load(jf) R = original_data["rewards matrix"] Q = original_data["Q matrix"] nx = original_data["nx"] ny = original_data["ny"] possible_speeds = original_data["possible speeds"] speed_max = possible_speeds - 1 valid_positions = original_data["valid positions"] orientations = [i for i in range(0, len(original_data["orientations"]))] actions = [j for j in range(0, len(original_data["actions"]))] num_actions = len(actions) #----------------------------------------------------- # NECESSARY VARIABLES #----------------------------------------------------- β_good = 0.2 β_bad = 0.2 ξ_0 = 1 δ_0 = 0 Φ_0 = 0 R_expert_good = 1 R_expert_bad = -1 # Sets the Φ(s, a), R^{expert} # Always 0 Φ = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions))) R_expert = np.zeros((nx, ny, len(orientations), possible_speeds, len(actions))) #----------------------------------------------------- # ADVICE PROCESSING #----------------------------------------------------- # Stores advice actions # "good":{"x, y, o, v":True, ...} # "bad": {"x, y, o, v":True, ...} advice_locations = {"good":{}, "bad":{}} # From Useful Policy Invariant Shaping from Arbitrary Advice (Behboudian et al.) # Utilizes positive demonstration data # Positive intent -> Intentionally good demonstrations (although perhaps the user is incompetent) if args.positive_demonstration: # Retrieves demonstration data with open(args.positive_demonstration, "r") as jf: original_demonstration_data = json.load(jf) action_sets_taken = original_demonstration_data["actions taken"] # Simply take the data as is, modify the appropriate Q matrix value, adding +1 to the appropiate Q[s, a] location for an_action_path in action_sets_taken: # Goes step by step for a_step in an_action_path: step_x = a_step[0] step_y = a_step[1] step_o = a_step[2] step_v = a_step[3] step_a = a_step[4] advice_locations["good"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [good_advice_decay_epochs, step_a] R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_good # Utilizes negative demonstration data # Negative intent -> Intentionally poor or misleading demonstrations if args.negative_demonstration: # Retrieves demonstration data with open(args.negative_demonstration, "r") as jf: original_demonstration_data = json.load(jf) action_sets_taken = original_demonstration_data["actions taken"] for an_action_path in action_sets_taken: # Goes step by step for a_step in an_action_path: step_x = a_step[0] step_y = a_step[1] step_o = a_step[2] step_v = a_step[3] step_a = a_step[4] advice_locations["bad"][aux.state_to_str(step_x, step_y, step_o, step_v)] = [bad_advice_decay_epochs, step_a] R_expert[step_x][step_y][step_o][step_v][step_a] = R_expert_bad #----------------------------------------------------- # NECESSARY FUNCTIONS #----------------------------------------------------- # Tests with the current Q matrix # Each epoch tests a starting location with a random orientation but always zero speed # Each reward in the array is: max(Reward - steps, 0) # Up to 100 steps can be used # Returns an array containing rewards max_testing_iterations = 100 def test_Q(): results = [] # Reshuffles the valid starting locations random.shuffle(valid_positions) # Goes through every valid position for a_valid_position in valid_positions: xloc, yloc = a_valid_position starting_orientation = random.randint(0, 3) tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R) reward_so_far = 0 for an_iteration in range(0, max_testing_iterations): # Gets the current location v_x = tested_vehicle.xloc v_y = tested_vehicle.yloc v_orientation = tested_vehicle.orientation_index v_speed = tested_vehicle.speed # Adds the penalty/reward corresponding to this location reward_so_far += R[v_x][v_y] # If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) add the reward and then exit this iteration if R[v_x][v_y] != -1: break # Chooses the action index with the maximum reward in Q # If two actions have the same optimal Q-value, the first one will be chosen Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed] best_Q_value = max(Q_values_to_choose) action_index = Q_values_to_choose.index(best_Q_value) # Makes the vehicle attempt it tested_vehicle.execute_action(action_index, modify_self=True, get_copy_there=False, get_end_location=False) results.append(max(0, reward_so_far)) return results # Trains starting with the current Q matrix, which is updated at each step # Each epoch tests a starting location with a random orientation but always zero speed # Each reward in the array is: max(Reward - steps, 0) # Up to 100 steps can be used # Does not return anything max_training_iterations = 100 def train_Q(): # Reshuffles the valid starting locations random.shuffle(valid_positions) # Stores the good and bad advice states reached this round good_advice_states_seen = {} bad_advice_states_seen = {} # Goes through every valid position for a_valid_position in valid_positions: xloc, yloc = a_valid_position starting_orientation = random.randint(0, 3) tested_vehicle = Vehicle(nx, ny, xloc, yloc, starting_orientation, 0, speed_max, R) for an_iteration in range(0, max_training_iterations): # Gets the current location v_x = tested_vehicle.xloc v_y = tested_vehicle.yloc v_orientation = tested_vehicle.orientation_index v_speed = tested_vehicle.speed # If this is a reward, obstacle, or outside the circuit (unless it is outside the borders) then exit this iteration if R[v_x][v_y] != -1: break # Gets a random probability what_to_do = random.random() # If below the explore probability, explore, choose an action at random if what_to_do <= p_exp: chosen_action_index = random.randint(0, 4) given_reward = R[v_x][v_y] expert_opinion_used = False α_used = α else: # Chooses the action index with the maximum reward in Q # If two actions have the same optimal Q-value, the first one will be chosen Q_values_to_choose = Q[v_x][v_y][v_orientation][v_speed] # Selects the best actions a priori a_priori_best_Q_value = max(Q_values_to_choose) a_priori_best_action = Q_values_to_choose.index(a_priori_best_Q_value) # Checks if this state was considered good or bad s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed) if (s_as_state in advice_locations["good"]) and (advice_locations["good"][s_as_state][1] == a_priori_best_action) and (advice_locations["good"][s_as_state][0] > 0): if s_as_state not in good_advice_states_seen: good_advice_states_seen[s_as_state] = True expert_opinion_used = True advice_followed_times = good_advice_decay_epochs - advice_locations["good"][s_as_state][0] decay_ratio = good_decay_ratio α_used = 0.05 β_used = β_good elif (s_as_state in advice_locations["bad"]) and (advice_locations["bad"][s_as_state][1] == a_priori_best_action) and (advice_locations["bad"][s_as_state][0] > 0): if s_as_state not in bad_advice_states_seen: bad_advice_states_seen[s_as_state] = True expert_opinion_used = True advice_followed_times = bad_advice_decay_epochs - advice_locations["bad"][s_as_state][0] decay_ratio = bad_decay_ratio α_used = 0.1 β_used = β_bad else: # Action not provided as advice best_Q_value = a_priori_best_Q_value chosen_action_index = a_priori_best_action given_reward = R[v_x][v_y] expert_opinion_used = False α_used = α if expert_opinion_used: # Q(s, a) - ξ_t*Φ_t(s, a) policies_to_choose = [0 for a in range(0, num_actions)] # Stores Φ_t(s, a), Φ_t(s', a') values before the update pu_Φ_t_sa = np.zeros((num_actions)) pu_Φ_t_snan = np.zeros((num_actions)) for an_action in range(0, num_actions): # Gets the next location but does not move there yet if no expert was provided using a priori data [_0, possible_next_sa] = tested_vehicle.execute_action(a_priori_best_action, modify_self=False, get_copy_there=False, get_end_location=True) sn_x = possible_next_sa[0][0] sn_y = possible_next_sa[0][1] sn_o = possible_next_sa[1] sn_v = possible_next_sa[2] Q_sn = Q[sn_x][sn_y][sn_o][sn_v] sn_a = Q_sn.index(max(Q_sn)) # Φ_t(s, a) Φ_t_sa = Φ[v_x][v_y][v_orientation][v_speed][an_action] pu_Φ_t_sa[an_action] = Φ_t_sa # Assumption to avoid BFS # Φ_{t+1}(s', a') = Φ_t(s', a') # Φ_t(s', a') Φ_t_snan = Φ[sn_x][sn_y][sn_o][sn_v][sn_a] pu_Φ_t_snan[an_action] = Φ_t_snan # δ_t^Φ δ_t_Φ = -R_expert[v_x][v_y][v_orientation][v_speed][an_action] + γ*Φ_t_snan - Φ_t_sa # ξ_t # Counts how many times this particular advice has been followed ξ_t = 1 - advice_followed_times*decay_ratio # generates the local policies to choose from policies_to_choose[an_action] = Q_values_to_choose[an_action] - ξ_t*Φ_t_sa # Generates Φ_{t+1}(s, a) Φ[v_x][v_y][v_orientation][v_speed][an_action] = Φ_t_sa + β_used*δ_t_Φ # Chooses the optimal policy action chosen_action_index = policies_to_choose.index(max(policies_to_choose)) given_reward = R[v_x][v_y] + γ*pu_Φ_t_snan[chosen_action_index] - pu_Φ_t_sa[chosen_action_index] # Makes the vehicle attempt the action [_1, location_end] = tested_vehicle.execute_action(chosen_action_index, modify_self=True, get_copy_there=False, get_end_location=True) # Updates the Q matrix # Q[s, a] = Q[s, a] + α*(R[s] + γ*max(Q[s', a'], a') - Q[s, a]) v_x_new = location_end[0][0] v_y_new = location_end[0][1] v_orientation_new = location_end[1] v_speed_new = location_end[2] Q_apostrophe_max = max(Q[v_x_new][v_y_new][v_orientation_new][v_speed_new]) Q_sa = Q[v_x][v_y][v_orientation][v_speed][chosen_action_index] s_as_state = aux.state_to_str(v_x, v_y, v_orientation, v_speed) Q[v_x][v_y][v_orientation][v_speed][chosen_action_index] = Q_sa + α_used*(given_reward + γ*Q_apostrophe_max - Q_sa) # Marks certain states as seen this round for a_good_seen_state in good_advice_states_seen: # Good advice reward decays advice_locations["good"][a_good_seen_state][0] -= 1 for a_bad_seen_state in bad_advice_states_seen: # Bad advice reward rises advice_locations["bad"][a_bad_seen_state][0] -= 1 #----------------------------------------------------- # Q-LEARNING #----------------------------------------------------- # [[epoch index, RMS reward], ...] epoch_rewards = [] for an_epoch in range(0, args.epochs): # Tests tested_rewards = test_Q() # Calculates and appends the RMS reward to results epoch_rewards.append([an_epoch, aux.RMS(tested_rewards)]) # Trains (unless it is the last epoch) if an_epoch != (args.epochs - 1): train_Q() #----------------------------------------------------- # OUTPUTS RESULTS #----------------------------------------------------- with open(args.output, "w") as jf: jf.write(json.dumps({"Q matrix":Q, "epoch rewards":epoch_rewards}, indent=4)) #----------------------------------------------------- # SHOWS PLOT WITH RESULTS IF REQUESTED #----------------------------------------------------- if not args.show: sys.exit() plt.figure() epochs_used = [] rewards_obtained = [] for mt in range(0, len(epoch_rewards)): epochs_used.append(epoch_rewards[mt][0]) rewards_obtained.append(epoch_rewards[mt][1]) plt.plot(epochs_used, rewards_obtained, "k-") plt.xlabel("Epoch") plt.ylabel("Reward") plt.title("Reward vs. Epoch") plt.show()
36.654185
180
0.607656
2,287
16,641
4.136423
0.143857
0.004651
0.004757
0.006342
0.50518
0.463953
0.427907
0.383932
0.346934
0.32093
0
0.010339
0.261823
16,641
453
181
36.735099
0.759769
0.241151
0
0.262443
0
0
0.089313
0.005507
0
0
0
0
0.022624
1
0.00905
false
0
0.036199
0
0.049774
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749864186a6dd9c711cc350ae0e33d7867281662
162
py
Python
problem_24.py
MasterScott/project-euler
643df8258bf9ac84b14b648a3a5a254bc682c473
[ "MIT" ]
null
null
null
problem_24.py
MasterScott/project-euler
643df8258bf9ac84b14b648a3a5a254bc682c473
[ "MIT" ]
null
null
null
problem_24.py
MasterScott/project-euler
643df8258bf9ac84b14b648a3a5a254bc682c473
[ "MIT" ]
1
2019-10-28T23:33:46.000Z
2019-10-28T23:33:46.000Z
from itertools import permutations for i, p in enumerate(permutations([x for x in '0123456789'])): if i == 10**6 - 1: print ''.join(p) break
23.142857
63
0.604938
24
162
4.083333
0.75
0
0
0
0
0
0
0
0
0
0
0.117647
0.265432
162
6
64
27
0.705882
0
0
0
0
0
0.061728
0
0
0
0
0
0
0
null
null
0
0.2
null
null
0.2
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
3
7499a0d47d4f28aa8fbfe508262da370434f71b2
12,260
py
Python
src/sark110.py
EA4FRB/sark110-python
ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a
[ "MIT" ]
2
2019-09-03T18:43:53.000Z
2019-11-06T18:26:23.000Z
src/sark110.py
EA4FRB/sark110-python
ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a
[ "MIT" ]
1
2020-04-03T22:59:18.000Z
2020-04-29T16:17:02.000Z
src/sark110.py
EA4FRB/sark110-python
ff5ace7c0d71f9de48772cdb1cedb98f5c22df8a
[ "MIT" ]
1
2019-09-05T16:40:55.000Z
2019-09-05T16:40:55.000Z
# --------------------------------------------------------- """ This file is a part of the "SARK110 Antenna Vector Impedance Analyzer" software MIT License @author Copyright (c) 2020 Melchor Varela - EA4FRB Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # --------------------------------------------------------- import os import struct import time if os.name == 'nt': import pywinusb.hid as hid import threading elif os.name == 'posix': import hid else: raise ImportError("Error: no implementation for your platform ('{}') available".format(os.name)) SARK110_VENDOR_ID = 0x0483 SARK110_PRODUCT_ID = 0x5750 WAIT_HID_DATA_MS = 1000 class Sark110: _handler = 0 _is_connect = 0 _max_freq = 0 _min_freq = 0 _dev_name = "" _fw_version = "" _fw_protocol = -1 @property def fw_version(self) -> str: return self._fw_version @property def fw_protocol(self) -> int: return self._fw_protocol @property def dev_name(self) -> str: return self._dev_name @property def max_freq(self) -> int: return self._max_freq @property def min_freq(self) -> int: return self._min_freq @property def is_connected(self) -> bool: return self._is_connect def __init__(self): self._handler = 0 self._is_connect = 0 def open(self) -> int: """ Opens the device :return: <0 err; >0 ok """ # Windows: pywinusb if os.name == 'nt': target_vendor_id = SARK110_VENDOR_ID target_product_id = SARK110_PRODUCT_ID hid_filter = hid.HidDeviceFilter(vendor_id=target_vendor_id, product_id=target_product_id) try: self._handler = hid_filter.get_devices()[0] if not self._handler: return -1 else: self._handler.open() self._handler.set_raw_data_handler(self._rx_handler) return 1 except: return -2 # Linux: hidapi else: self._handler = hid.device() try: self._handler.open(SARK110_VENDOR_ID, SARK110_PRODUCT_ID) self._handler.set_nonblocking(0) return 1 except IOError as ex: return -1 def connect(self) -> int: """ Connect to the device and get its characteristics :return: <0 err; >0 ok """ if not self._handler: return -1 if self._cmd_version() < 0: return -2 self._is_connect = 1; return 1 def close(self): """ Closes the device :return: """ if self._handler: self._handler.close() self._handler = 0 self._is_connect = 0 def measure(self, freq: int, rs: float, xs: float, cal=True, samples=1) -> int: """ Takes one measurement sample at the specified frequency :param freq: frequency in hertz; 0 to turn-off the generator :param cal: True to get OSL calibrated data; False to get uncalibrated data :param samples: number of samples for averaging :param rs real part of the impedance :param xs imag part of the impedance :return: <0 err; >0 ok """ if not self._is_connect: return -1 snd = [0x0] * 19 snd[1] = 2 b = self._int2bytes(freq) snd[2] = b[0] snd[3] = b[1] snd[4] = b[2] snd[5] = b[3] if cal: snd[6] = 1 else: snd[6] = 0 snd[7] = samples rcv = self._send_rcv(snd) if rcv[0] != 79: return -2 b = bytearray([0, 0, 0, 0]) b[0] = rcv[1] b[1] = rcv[2] b[2] = rcv[3] b[3] = rcv[4] rs[0] = struct.unpack('f', b) b[0] = rcv[5] b[1] = rcv[6] b[2] = rcv[7] b[3] = rcv[8] xs[0] = struct.unpack('f', b) return 1 def buzzer(self, freq=0, duration=0) -> int: """ Sounds the sark110 buzzer. :param device: handler :param freq: frequency in hertz :param duration: duration in ms :return: <0 err; >0 ok """ if not self._is_connect: return -1 snd = [0x0] * 19 snd[1] = 20 b = self._short2bytes(freq) snd[2] = b[0] snd[3] = b[1] b = self._short2bytes(duration) snd[4] = b[0] snd[5] = b[1] rcv = self._send_rcv(snd) if duration == 0: time.sleep(.2) else: time.sleep(duration / 1000) if rcv[0] == 79: return 1 return -2 def reset(self) -> int: """ Resets the device :return: <0 err; >0 ok """ if not self._is_connect: return -1 snd = [0x0] * 19 snd[1] = 50 rcv = self._send_rcv(snd) if rcv == 79: return 1 return -2 def measure_ext(self, freq: int, step: int, rs: float, xs: float, cal=True, samples=1) -> int: """ Takes four measurement samples starting at the specified frequency and incremented at the specified step Uses half float, so a bit less precise :param device: handler :param freq: frequency in hertz; 0 to turn-off the generator :param step: step in hertz :param cal: True to get OSL calibrated data; False to get uncalibrated data :param samples: number of samples for averaging :param rs real part of the impedance (four vals) :param xs imag part of the impedance (four vals) :return: <0 err; >0 ok """ if not self._is_connect: return -1 snd = [0x0] * 19 snd[1] = 12 b = self._int2bytes(freq) snd[2] = b[0] snd[3] = b[1] snd[4] = b[2] snd[5] = b[3] b = self._int2bytes(step) snd[8] = b[0] snd[9] = b[1] snd[10] = b[2] snd[11] = b[3] if cal: snd[6] = 1 else: snd[6] = 0 snd[7] = samples rcv = self._send_rcv(snd) if rcv[0] != 79: return -2 rs[0] = self._half2float(rcv[1], rcv[2]) xs[0] = self._half2float(rcv[3], rcv[4]) rs[1] = self._half2float(rcv[5], rcv[6]) xs[1] = self._half2float(rcv[7], rcv[8]) rs[2] = self._half2float(rcv[9], rcv[10]) xs[2] = self._half2float(rcv[11], rcv[12]) rs[3] = self._half2float(rcv[13], rcv[14]) xs[3] = self._half2float(rcv[15], rcv[16]) return 1 # --------------------------------------------------------- # Get version command: used to check the connection and dev params def _cmd_version(self): if not self._handler: return -1 self._fw_protocol = 0 self._fw_version = "" snd = [0x0] * 19 snd[1] = 1 rcv = self._send_rcv(snd) if rcv[0] != 79: return -2 self._fw_protocol = (rcv[2] << 8) & 0xFF00 self._fw_protocol += rcv[1] & 0xFF ver = [0x0] * 15 ver[:] = rcv[3:] # Identifies the device if (self._fw_protocol & 0xff00) == 0x0100: self._max_freq = 200000000 self._min_freq = 100000 self._dev_name = "sark110 (100k to 200M)" elif (self._fw_protocol & 0xff00) == 0x0200: self._max_freq = 230000000 self._min_freq = 10000 self._dev_name = "sark110 (10k to 230M)" elif (self._fw_protocol & 0xff00) == 0x0300: self._max_freq = 230000000 self._min_freq = 10000 self._dev_name = "sark110 mk1" elif (self._fw_protocol & 0xff00) == 0x0a00: self._max_freq = 1000000000 self._min_freq = 100000 self._dev_name = "sark110 ulm" else: self._max_freq = 230000000 self._min_freq = 100000 self._dev_name = "sark110" # Converts version to str for i in range(15): if ver[i] == 0: break elif ver[i] == 46: self._fw_version += "." else: self._fw_version += "%c" % (ver[i]) return 1 # --------------------------------------------------------- # half float decompress def _half2float(self, byte1, byte2): hfs = (byte2 << 8) & 0xFF00 hfs += byte1 & 0xFF temp = self.__half2float(hfs) res_pack = struct.pack('I', temp) return struct.unpack('f', res_pack)[0] def __half2float(self, float16): s = int((float16 >> 15) & 0x00000001) # sign e = int((float16 >> 10) & 0x0000001f) # exponent f = int(float16 & 0x000003ff) # fraction if e == 0: if f == 0: return int(s << 31) else: while not (f & 0x00000400): f = f << 1 e -= 1 e += 1 f &= ~0x00000400 # print(s,e,f) elif e == 31: if f == 0: return int((s << 31) | 0x7f800000) else: return int((s << 31) | 0x7f800000 | (f << 13)) e = e + (127 - 15) f = f << 13 return int((s << 31) | (e << 23) | f) # --------------------------------------------------------- def _short2bytes(self, n): """ short to buffer array :param n: :return: """ b = bytearray([0, 0]) b[0] = n & 0xFF n >>= 8 b[1] = n & 0xFF return b def _int2bytes(self, n): """ int to buffer array :param n: :return: """ b = bytearray([0, 0, 0, 0]) b[0] = n & 0xFF n >>= 8 b[1] = n & 0xFF n >>= 8 b[2] = n & 0xFF n >>= 8 b[3] = n & 0xFF return b # --------------------------------------------------------- def _send_rcv(self, snd): # Windows: pywinusb if os.name == 'nt': try: report = self._handler.find_output_reports()[0] self.event.clear() report.set_raw_data(snd) report.send() self.event.wait() return _g_rcv[1:18] except: return [0] * 18 # Linux: hidapi else: try: self._handler.write(snd) return self._handler.read(18, WAIT_HID_DATA_MS) except: return [0] * 18 def _rx_handler(self, data): """ Handler called when a report is received :param data: :return: """ global _g_rcv _g_rcv = data.copy() self.event.set() # --------------------------------------------------------- _g_rcv = [0xff] * 19 if os.name == 'nt': event = threading.Event()
29.757282
112
0.49633
1,508
12,260
3.899204
0.215517
0.029932
0.019048
0.011224
0.331463
0.292347
0.264456
0.233163
0.188605
0.185884
0
0.072789
0.370228
12,260
411
113
29.829684
0.688771
0.258075
0
0.431159
0
0
0.017615
0
0
0
0.022865
0
0
1
0.076087
false
0
0.025362
0.021739
0.278986
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749bf7c085175f0c77d7d99fa53fad97cc10c478
2,295
py
Python
Utkarsh1.py
utkarsh7236/SCILLA
e11e4d753823ad522a1b3168283b6e6ffe3ea393
[ "Apache-2.0" ]
null
null
null
Utkarsh1.py
utkarsh7236/SCILLA
e11e4d753823ad522a1b3168283b6e6ffe3ea393
[ "Apache-2.0" ]
null
null
null
Utkarsh1.py
utkarsh7236/SCILLA
e11e4d753823ad522a1b3168283b6e6ffe3ea393
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python import sqlite3 path = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db' def select_all_tasks(conn): """ Query all rows in the tasks table :param conn: the Connection object :return: """ cur = conn.cursor() cur.execute("SELECT * FROM tasks") rows = cur.fetchall() for row in rows: print(row) def create_connection(db_file): """ create a database connection to the SQLite database specified by the db_file :param db_file: database file :return: Connection object or None """ conn = None try: conn = sqlite3.connect(db_file) except sqlite3.Error as e: print(e) return conn if __name__ == '__main__': data = create_connection(path) print("Opened database successfully") select_all_tasks(data) # import sqlite3 # from sqlite3 import Error # # # def create_connection(db_file): # """ create a database connection to the SQLite database # specified by the db_file # :param db_file: database file # :return: Connection object or None # """ # conn = None # try: # conn = sqlite3.connect(db_file) # except Error as e: # print(e) # # return conn # # # def select_all_tasks(conn): # """ # Query all rows in the tasks table # :param conn: the Connection object # :return: # """ # cur = conn.cursor() # cur.execute("SELECT * FROM tasks") # # rows = cur.fetchall() # # for row in rows: # print(row) # # # def select_task_by_priority(conn, priority): # """ # Query tasks by priority # :param conn: the Connection object # :param priority: # :return: # """ # cur = conn.cursor() # cur.execute("SELECT * FROM tasks WHERE priority=?", (priority,)) # # rows = cur.fetchall() # # for row in rows: # print(row) # # # def main(): # database = '/Users/Utkarsh/PycharmProjects/SCILLA/Experiments/circuits.db' # # # create a database connection # conn = create_connection(database) # with conn: # print("1. Query task by priority:") # select_task_by_priority(conn, 1) # # print("2. Query all tasks") # select_all_tasks(conn) # # # if __name__ == '__main__': # main()
21.650943
80
0.608715
277
2,295
4.891697
0.227437
0.035424
0.041328
0.039852
0.729889
0.673801
0.673801
0.558672
0.558672
0.526199
0
0.00537
0.269717
2,295
106
81
21.650943
0.803103
0.680174
0
0
0
0
0.186495
0.098071
0
0
0
0
0
1
0.105263
false
0
0.052632
0
0.210526
0.157895
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749c2c114c339b32826757ee2f0157c48eb8cfa9
383
py
Python
301-400/377.combination-sum-iv.py
guangxu-li/leetcode-in-python
8a5a373b32351500342705c141591a1a8f5f1cb1
[ "MIT" ]
null
null
null
301-400/377.combination-sum-iv.py
guangxu-li/leetcode-in-python
8a5a373b32351500342705c141591a1a8f5f1cb1
[ "MIT" ]
null
null
null
301-400/377.combination-sum-iv.py
guangxu-li/leetcode-in-python
8a5a373b32351500342705c141591a1a8f5f1cb1
[ "MIT" ]
null
null
null
# # @lc app=leetcode id=377 lang=python3 # # [377] Combination Sum IV # # @lc code=start from collections import defaultdict class Solution: def combinationSum4(self, nums: list[int], target: int) -> int: dp = defaultdict(int, {0: 1}) dp.update(map(lambda i: (i, sum(dp[i - n] for n in nums)), range(1, target + 1))) return dp[target] # @lc code=end
20.157895
89
0.624021
58
383
4.12069
0.655172
0.050209
0
0
0
0
0
0
0
0
0
0.040541
0.227154
383
18
90
21.277778
0.766892
0.232376
0
0
0
0
0
0
0
0
0
0
0
1
0.166667
false
0
0.166667
0
0.666667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
749c75778c0228cfbbf366e87be5465e687bc993
1,839
py
Python
inference.py
ShaneTian/Induction-Networks
5785e13425e3c9020c8699402d546d018ec735bf
[ "Apache-2.0" ]
5
2020-06-13T07:43:33.000Z
2021-06-14T03:27:11.000Z
inference.py
ShaneTian/Induction-Networks
5785e13425e3c9020c8699402d546d018ec735bf
[ "Apache-2.0" ]
2
2020-06-15T04:53:18.000Z
2020-10-24T12:37:27.000Z
inference.py
ShaneTian/Induction-Networks
5785e13425e3c9020c8699402d546d018ec735bf
[ "Apache-2.0" ]
1
2021-01-08T11:27:07.000Z
2021-01-08T11:27:07.000Z
import argparse import numpy as np from paddle import fluid def main(): parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, help="Path of __model__ and __params__") parser.add_argument("--use_cuda", action="store_true") args = parser.parse_args() print("Args:", args) place = fluid.CUDAPlace(0) if args.use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) # Load inference model inference_program, feed_target_names, fetch_targets = load_model(args.model_path, exe) print("Feed target names:", feed_target_names) print("Fetch targets:", fetch_targets) # A temp sample B, N, K, Q = 4, 2, 5, 5 max_length = 512 totalQ = np.array([N * Q], dtype=np.int32) support = np.random.randint(0, high=1000, size=[B, N, K, max_length]) support_len = np.random.randint(10, high=max_length, size=[B, N, K]) query = np.random.randint(0, high=1000, size=[B, N * Q, max_length]) query_len = np.random.randint(10, high=max_length, size=[B, N * Q]) # Run inference model pred_label, = exe.run(inference_program, feed={ feed_target_names[0]: totalQ, feed_target_names[1]: support, feed_target_names[2]: support_len, feed_target_names[3]: query, feed_target_names[4]: query_len }, fetch_list=fetch_targets) print("The predict label is:", pred_label) # [B, totalQ] def load_model(model_path, exe): [inference_program, feed_target_names, fetch_targets] = ( fluid.io.load_inference_model(dirname=model_path, executor=exe, params_filename="__params__")) return inference_program, feed_target_names, fetch_targets if __name__ == "__main__": main()
35.365385
90
0.641109
248
1,839
4.455645
0.346774
0.090498
0.135747
0.070588
0.241629
0.241629
0.241629
0.124887
0.124887
0.070588
0
0.020759
0.240348
1,839
52
91
35.365385
0.770222
0.035889
0
0
0
0
0.079141
0
0
0
0
0
0
1
0.051282
false
0
0.076923
0
0.153846
0.102564
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749ca24e3526b586f7bcd5efb10f8d95b181ede2
659
py
Python
dashboard/migrations/0032_auto_20210823_0417.py
zidandff/primaseru
a14fa7326098af220e0671c3bf3870b4016ab4bf
[ "MIT" ]
null
null
null
dashboard/migrations/0032_auto_20210823_0417.py
zidandff/primaseru
a14fa7326098af220e0671c3bf3870b4016ab4bf
[ "MIT" ]
null
null
null
dashboard/migrations/0032_auto_20210823_0417.py
zidandff/primaseru
a14fa7326098af220e0671c3bf3870b4016ab4bf
[ "MIT" ]
2
2021-08-10T09:09:58.000Z
2021-08-10T10:54:51.000Z
# Generated by Django 3.2.5 on 2021-08-23 04:17 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dashboard', '0031_participantgraduation_updated_at'), ] operations = [ migrations.AlterField( model_name='infosourceppdb', name='info_source', field=models.CharField(db_index=True, max_length=100, unique=True, verbose_name='Sumber Info Primaseru'), ), migrations.AlterField( model_name='participantcount', name='count', field=models.CharField(db_index=True, max_length=10), ), ]
27.458333
117
0.626707
69
659
5.826087
0.666667
0.099502
0.124378
0.144279
0.199005
0.199005
0.199005
0.199005
0
0
0
0.049485
0.264036
659
23
118
28.652174
0.779381
0.068285
0
0.235294
1
0
0.184641
0.060458
0
0
0
0
0
1
0
false
0
0.058824
0
0.235294
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
749d83fb75da72d4575c40c6649383b7eedc9468
3,125
py
Python
dynaban/pypot/callback.py
laukik-hase/imitation_of_human_arm_on_robotic_manipulator
995beb1ab41597ca6cbecd0baecdef1ef13450f9
[ "MIT" ]
3
2021-11-13T16:54:31.000Z
2021-11-13T20:50:18.000Z
dynaban/pypot/callback.py
laukik-hase/human_arm_imitation
995beb1ab41597ca6cbecd0baecdef1ef13450f9
[ "MIT" ]
null
null
null
dynaban/pypot/callback.py
laukik-hase/human_arm_imitation
995beb1ab41597ca6cbecd0baecdef1ef13450f9
[ "MIT" ]
null
null
null
import paho.mqtt.client as paho import time import Queue as queue import json import real_time_manipulator_math_utils import pprint pp = pprint.PrettyPrinter(indent=4) # import rbdl # import manip motion def on_connect(client, userdata, flags, rc): client.subscribe(topic, qos) def on_message(client, userdata, message): msg = message.payload.decode("utf-8") q.put(msg) # print("Received: ", msg) broker = "test.mosquitto.org" topic = "fyp/sensors" qos = 0 client = paho.Client("client_001") client.on_connect=on_connect client.on_message = on_message client.connect(broker) client.loop_start() JOINTS = 1 q = queue.Queue() JOINTS = 4 SPLINE = 1 WINDOWSIZE = 5 math_utils_obj = real_time_manipulator_math_utils.manipulator_math_utils(JOINTS) timestamps = [] angles = [] [angles.append([]) for j in range(JOINTS)] torques = [] [torques.append([]) for j in range(JOINTS)] padded_angles = [] first_val = True laukik_tatti = True while loop_flag==1: message = q.get() msg = json.loads(message) if laukik_tatti: laukik_tatti = False continue if first_val: init_timestamp = msg['timestamp'] first_val = False else: # collect till SPLINE if(msg['timestamp'] - init_timestamp > SPLINE): # print(timestamps) init_timestamp = msg['timestamp'] # padding for smooth transition in moving average for j in range(JOINTS): if padded_angles == []: angles[j] = [angles[j][0]]*(WINDOWSIZE-1) + angles[j] else: angles[j] = padded_angles[j] + angles[j] # moving average with length similar to timestamps print("raw angles") pp.pprint(angles) padded_angles = [ angles[j][-(WINDOWSIZE-1):] for j in range(JOINTS) ] angles = math_utils_obj.real_time_moving_average(angles) print("angles after moving avg") pp.pprint(angles) # torques = get torque from rbdl (timestamp, angles) # convert angles to steps transformation = [[1,0]]*JOINTS angles = math_utils_obj.angles_to_steps(angles, transformation) print("angles to steps") pp.pprint(angles) # call to get coeffs angle_coeffs = math_utils_obj.calculate_coefficients_angles(timestamps, angles) print("angles coefficients") pp.pprint(angle_coeffs) # set motion on manipulator # empty timestamps = [] angles = [] [angles.append([]) for j in range(JOINTS)] torques = [] [torques.append([]) for j in range(JOINTS)] timestamps.append(msg['timestamp'] - init_timestamp) angles[0].append(msg['shoulder']['pitch']) angles[1].append(msg['shoulder']['roll']) angles[2].append(msg['shoulder']['yaw']) angles[3].append(msg['elbow']['pitch']) client.disconnect() client.loop_stop( )
26.939655
91
0.5984
365
3,125
4.983562
0.312329
0.034634
0.019791
0.036284
0.173722
0.090159
0.090159
0.090159
0.090159
0.090159
0
0.009005
0.28928
3,125
116
92
26.939655
0.809996
0.10144
0
0.220779
0
0
0.069027
0
0
0
0
0
0
1
0.025974
false
0
0.077922
0
0.103896
0.12987
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749f2f672fb718a35a7272b4f9459c0ee34795d3
4,679
py
Python
emwiki/article/views.py
etmula/emwiki
e162a9976688d543f2e3bd71634913c14cdb9d54
[ "MIT" ]
2
2019-08-02T01:08:13.000Z
2020-11-17T12:47:02.000Z
emwiki/article/views.py
g063ff/emwiki
a379b2b2863e9059eac014183cb94c39670e11a0
[ "MIT" ]
101
2019-07-29T07:44:56.000Z
2022-03-31T04:11:04.000Z
emwiki/article/views.py
g063ff/emwiki
a379b2b2863e9059eac014183cb94c39670e11a0
[ "MIT" ]
11
2019-07-24T02:37:35.000Z
2021-12-09T05:24:31.000Z
import os from django.conf import settings from django.contrib.auth.decorators import login_required from django.core import serializers from django.http import HttpResponse, JsonResponse, Http404 from django.shortcuts import render, get_object_or_404 from django.urls import reverse, reverse_lazy from django.utils.decorators import method_decorator from django.views import View from django.views.generic.base import TemplateView from .models import Article, Comment class ArticleView(TemplateView): template_name = 'article/index.html' extra_context = { 'context_for_js': { 'article_html_base_uri': reverse_lazy('article:htmls'), 'comments_uri': reverse_lazy('article:comments'), 'bibs_uri': reverse_lazy('article:bibs'), 'names_uri': reverse_lazy('article:names'), 'search_uri': reverse_lazy('search:index') } } def get(self, request, name_or_filename, *args, **kwargs): response = super().get(request, *args, **kwargs) # These context data cannot define in class field. response.context_data['context_for_js']['article_base_uri'] = \ reverse('article:index', kwargs=dict(name_or_filename="temp")).replace('temp', '') response.context_data['context_for_js']['is_authenticated'] = request.user.is_authenticated response.context_data['context_for_js']['target'] = request.GET.get('target', 'article') response.context_data['target'] = request.GET.get('target', 'article') return response class ArticleIndexView(View): def get(self, request): return JsonResponse({'index': [ dict(name=article.name) for article in Article.objects.all() ]}) class ArticleHtmlView(View): def get(self, request, *args, **kwargs): if 'article_name' in request.GET: article = get_object_or_404(Article, name=request.GET.get('article_name')) return render(request, article.template_url) else: raise Http404() class BibView(View): def get(self, request): if 'article_name' in request.GET: article_name = request.GET.get("article_name") bib_file_path = os.path.join(settings.MML_FMBIBS_DIR, f'{article_name}.bib') if os.path.exists(bib_file_path): with open(bib_file_path, "r") as f: bib_text = f.read() else: bib_text = f"{bib_file_path} not found" return JsonResponse({"bib_text": bib_text}) class ProofView(View): def get(self, request, article_name, proof_name): return HttpResponse( open(os.path.join(settings.MML_HTML_DIR, 'proofs', article_name, proof_name)).read(), content_type='application/xml' ) class RefView(View): def get(self, request, article_name, ref_name): return HttpResponse( open(os.path.join(settings.MML_HTML_DIR, 'refs', article_name, ref_name)).read(), content_type='application/xml' ) class CommentView(View): def get(self, request, *args, **kwargs): query = Comment.objects if 'article_name' in request.GET: query = query.filter( article=Article.objects.get( name=request.GET.get("article_name")) ) if 'block' in request.GET: query = query.filter( block=request.GET.get('block') ) if 'block_order' in request.GET: query = query.filter( block_order=int(request.GET.get("block_order")) ) return HttpResponse( serializers.serialize('json', query.all()), content_type='application/json' ) @method_decorator(login_required) def post(self, request): article_name = request.POST.get('article_name', None) block = request.POST.get('block', None) block_order = request.POST.get("block_order", None) text = request.POST.get('comment', None) article = Article.objects.get(name=article_name) if Comment.objects.filter(article=article, block=block, block_order=block_order).exists(): comment = Comment.objects.get( article=article, block=block, block_order=block_order) else: comment = Comment(article=article, block=block, block_order=block_order, text='') comment.text = text comment.save() article.save_db2mizfile() article.commit_mizfile(request.user.username) return HttpResponse(status=201)
37.432
99
0.63069
546
4,679
5.223443
0.234432
0.065568
0.024544
0.041725
0.342216
0.301893
0.222651
0.084151
0.037868
0.037868
0
0.004587
0.254542
4,679
124
100
37.733871
0.813073
0.010259
0
0.173077
0
0
0.118168
0.004537
0
0
0
0
0
1
0.076923
false
0
0.105769
0.028846
0.346154
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749f6d74baed280144d534ef8d72acec4e74fc71
3,131
py
Python
kivy3/objects/lines.py
sb-blueprint/kivy3
e5d6c5758b610503109eb2c788bcff4f3410bd06
[ "MIT" ]
null
null
null
kivy3/objects/lines.py
sb-blueprint/kivy3
e5d6c5758b610503109eb2c788bcff4f3410bd06
[ "MIT" ]
null
null
null
kivy3/objects/lines.py
sb-blueprint/kivy3
e5d6c5758b610503109eb2c788bcff4f3410bd06
[ "MIT" ]
null
null
null
""" The MIT License (MIT) Copyright (c) 2013 Niko Skrypnik Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from kivy.graphics import Mesh as KivyMesh from kivy3 import Vector3 from kivy3.core.object3d import Object3D DEFAULT_VERTEX_FORMAT = [ (b'v_pos', 3, 'float'), (b'v_normal', 3, 'float'), (b'v_tc0', 2, 'float') ] DEFAULT_MESH_MODE = 'lines' class Lines(Object3D): def __init__(self, geometry, material, **kw): super(Lines, self).__init__(**kw) self.geometry = geometry self.material = material self.mtl = self.material # shortcut for material property self.vertex_format = kw.pop('vertex_format', DEFAULT_VERTEX_FORMAT) self.mesh_mode = kw.pop('mesh_mode', DEFAULT_MESH_MODE) self.create_mesh() def create_mesh(self): """ Create real mesh object from the geometry and material """ vertices = [] indices = [] idx = 0 for line in self.geometry.lines: for i, k in enumerate(['a', 'b']): v_idx = getattr(line, k) vertex = self.geometry.vertices[v_idx] vertices.extend(vertex) try: normal = line.vertex_normals[i] except IndexError: normal = Vector3([0, 0, 0]) vertices.extend(normal) try: tex_coords = self.geometry.face_vertex_uvs[0][idx] vertices.extend(tex_coords) except IndexError: vertices.extend([0, 0]) indices.append(idx) idx += 1 if idx >= 65535 - 1: msg = 'Mesh must not contain more than 65535 indices, {} given' raise ValueError(msg.format(idx + 1)) kw = dict( vertices=vertices, indices=indices, fmt=self.vertex_format, mode=self.mesh_mode ) if self.material.map: kw['texture'] = self.material.map self._mesh = KivyMesh(**kw) def custom_instructions(self): yield self.material yield self._mesh
36.406977
77
0.63526
404
3,131
4.831683
0.423267
0.045082
0.01332
0.008197
0
0
0
0
0
0
0
0.015604
0.283615
3,131
85
78
36.835294
0.854659
0.372724
0
0.074074
0
0
0.06359
0
0
0
0
0
0
1
0.055556
false
0
0.055556
0
0.12963
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
749fd515c10332cd2c59228ee905ba59c612341a
6,634
py
Python
Source/CLI/makedata/makedata.py
dmsovetov/Dreemchest
39255c88943abc69c7fa0710b7ca8486c08260e0
[ "MIT" ]
11
2016-02-18T15:24:49.000Z
2021-01-30T18:26:04.000Z
Source/CLI/makedata/makedata.py
dmsovetov/dreemchest
39255c88943abc69c7fa0710b7ca8486c08260e0
[ "MIT" ]
2
2016-05-23T22:48:35.000Z
2017-02-13T16:43:32.000Z
Source/CLI/makedata/makedata.py
dmsovetov/dreemchest
39255c88943abc69c7fa0710b7ca8486c08260e0
[ "MIT" ]
3
2016-08-19T13:26:59.000Z
2018-08-03T04:28:14.000Z
#!/usr/bin/python ################################################################################# # # The MIT License (MIT) # # Copyright (c) 2015 Dmitry Sovetov # # https://github.com/dmsovetov # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # ################################################################################# import argparse, time, files, os, actions, tasks, unity # substitute_variables def substitute_variables( args, *variables ): argv = vars( args ) for k, v in argv.items(): if isinstance( v, str ): for var in variables: argv[k] = argv[k].replace( '[' + var + ']', argv[var] ) # class TextureQuality class TextureQuality: HD = 'hd' SD = 'sd' Available = [HD, SD] # class TargetPlatform class TargetPlatform: Win = 'win' Mac = 'mac' iOS = 'ios' Android = 'android' Available = [Win, Mac, iOS, Android] # class TextureCompression class TextureCompression: Disabled = 'disabled' Pvr = 'pvr' Dxt = 'dxt' Etc = 'etc' # class TextureFormat class TextureFormat: Raw = 'raw' Png = 'png' Tga = 'tga' @staticmethod def convert_to(format): if format == TextureFormat.Raw: return actions.convert_to_raw if format == TextureFormat.Png: return actions.png_quant if format == TextureFormat.Tga: return actions.compress # class ExportError class ExportError(Exception): def __init__(self, msg): Exception.__init__(self, msg) # Imports the project def import_project(args, source, output): # Parse project assets assets = unity.project.parse_assets(args) # Import scenes if args.skip_scenes == 0: unity.project.import_scenes(assets, source, output) # Import prefabs unity.project.import_prefabs(assets, source, output) # Import materials unity.project.import_materials(assets, source, output) # Import assets unity.project.import_assets(assets, source, output) # Save the assets assets.save(output) # Builds the data to a specified folder def build(args, source, output): rules = { '*.tga': TextureFormat.convert_to(args.texFormat) , '*.png': TextureFormat.convert_to(args.texFormat) , '*.fbx': actions.convert_fbx } queue = tasks.create(args.workers) outdated = files.find_outdated(source) files.build(queue, outdated, output, rules) queue.start() # Write the manifest file with open(os.path.join(output, 'assets.json'), 'wt') as fh: fh.write(files.generate_manifest(outdated)) fh.close() # Entry point if __name__ == "__main__": # Parse arguments parser = argparse.ArgumentParser( description = 'Dreemchest make data tool', formatter_class = argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument( "-a", "--action", type = str, default = 'build', help = "Build action.", choices = ["clean", "build", "install", "import"] ) parser.add_argument( "-s", "--source", type = str, required = True, help = "Input resource path." ) parser.add_argument( "-o", "--output", type = str, required = True, help = "Output path." ) parser.add_argument( "-tc", "--compression", type = str, default = TextureCompression.Disabled, help = "Hardware texture compression." ) parser.add_argument( "-tf", "--texFormat", type = str, default = TextureFormat.Raw, help = "Exported image format." ) parser.add_argument( "-p", "--platform", type = str, default = TargetPlatform.Win, help = "Target platform.", choices = TargetPlatform.Available ) parser.add_argument( "-v", "--version", type = str, default = '1.0', help = "Resource version" ) parser.add_argument( "-w", "--workers", type = int, default = 8, help = "The number of concurrent workers." ) parser.add_argument( "-q", "--quality", type = str, default = TextureQuality.HD, help = "Texture quality.", choices = TextureQuality.Available ) parser.add_argument( "-c", "--cache", type = str, default = '[source]/[platform]/cache', help = "Cache file name." ) parser.add_argument( "--strip-unused", type = bool, default = False, help = "The unused assets won't be imported." ) parser.add_argument( "--use-uuids", type = int, default = 1, help = "The UUIDs will be used instead of file names." ) parser.add_argument( "--skip-scenes", type = int, default = 0, help = "Scenes wont be imported." ) args = parser.parse_args() substitute_variables( args, 'version', 'compression', 'platform', 'quality', 'source' ) # Check the input arguments if not os.path.exists(args.source): raise AssertionError('the input folder does not exist') # Create the output folder if not os.path.exists(args.output): os.makedirs(args.output) print('--- Building [{0}] data package to [{1}] with [{2}] texture compression ---'.format(args.platform, args.output, args.compression) ) start = time.time() try: if args.action == 'build': build(args, args.source, args.output) elif args.action == 'import': import_project(args, args.source, args.output) except ExportError as e: print(e.message) print('--- {0} seconds ---'.format(int(time.time() - start)))
41.204969
184
0.61833
765
6,634
5.296732
0.352941
0.028875
0.054541
0.017769
0.050839
0.010365
0
0
0
0
0
0.002786
0.242538
6,634
161
185
41.204969
0.803383
0.227012
0
0
0
0
0.160471
0.005078
0
0
0
0
0.011628
0
null
null
0
0.127907
null
null
0.034884
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
1
749fd6335e4af9798ed3bf9db18e89282313e71e
1,262
py
Python
core/tools/pencil.py
INTJT/conway
7d3165382b3a691163cae30e33db97c544c043a6
[ "MIT" ]
null
null
null
core/tools/pencil.py
INTJT/conway
7d3165382b3a691163cae30e33db97c544c043a6
[ "MIT" ]
null
null
null
core/tools/pencil.py
INTJT/conway
7d3165382b3a691163cae30e33db97c544c043a6
[ "MIT" ]
null
null
null
from core.tools.tool import Tool from core.editor import Editor class Pencil(Tool): def __init__(self, button, board): super(Pencil, self).__init__(button, board) self._changed = set() Editor()["pre-pencil"] = board.copy() self._fill = button def _need_change(self, value): return value != self._fill def move(self, x, y): editor = Editor() radius = editor["radius"] selection = editor["selection"] pre_board = editor["pre-pencil"] for i in range(-radius + 1, radius): if y + i not in selection[1]: continue for j in range(-radius + 1, radius): if x + j not in selection[0]: continue if self._need_change(pre_board[y + i, x + j]) and not (x + j, y + i) in self._changed: self._changed.add((x + j, y + i)) pre_board[y + i, x + j] = not pre_board[y + i, x + j] def execute(self): Editor()["pre-pencil"] = None for point in self._changed: self._board[point] = not self._board[point] def revoke(self): self.execute() def need_save(self): return len(self._changed) != 0
25.24
102
0.532488
165
1,262
3.915152
0.278788
0.018576
0.069659
0.04644
0.123839
0.123839
0
0
0
0
0
0.006046
0.344691
1,262
49
103
25.755102
0.775091
0
0
0.0625
0
0
0.035658
0
0
0
0
0
0
1
0.1875
false
0
0.0625
0.0625
0.34375
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a000fd93cbaeff0dd5bb32c3ac3e334a8c69dc
19,855
py
Python
packages/gtmcore/gtmcore/workflows/tests/test_gitlab.py
jjwatts/gigantum-client
88ce0475fb6880322bdd06d987c494e29064f278
[ "MIT" ]
null
null
null
packages/gtmcore/gtmcore/workflows/tests/test_gitlab.py
jjwatts/gigantum-client
88ce0475fb6880322bdd06d987c494e29064f278
[ "MIT" ]
null
null
null
packages/gtmcore/gtmcore/workflows/tests/test_gitlab.py
jjwatts/gigantum-client
88ce0475fb6880322bdd06d987c494e29064f278
[ "MIT" ]
null
null
null
# Copyright (c) 2017 FlashX, LLC # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest import responses from gtmcore.workflows.gitlab import GitLabManager, ProjectPermissions, GitLabException @pytest.fixture() def gitlab_mngr_fixture(): """A pytest fixture that returns a GitLabRepositoryManager instance""" yield GitLabManager("repo.gigantum.io", "usersrv.gigantum.io", "fakeaccesstoken") @pytest.fixture() def property_mocks_fixture(): """A pytest fixture that returns a GitLabRepositoryManager instance""" responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'key': 'afaketoken'}, status=200) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook', json=[{ "id": 26, "description": "", }], status=200) yield class TestGitLabManager(object): @responses.activate def test_user_token(self, gitlab_mngr_fixture): """test the user_token property""" # Setup responses mock for this test responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'key': 'afaketoken'}, status=200) assert gitlab_mngr_fixture._gitlab_token is None # Get token token = gitlab_mngr_fixture.user_token assert token == 'afaketoken' assert gitlab_mngr_fixture._gitlab_token == 'afaketoken' # Assert token is returned and set on second call and does not make a request responses.add(responses.GET, 'https://usersrv.gigantum.io/key', status=400) assert token == gitlab_mngr_fixture.user_token @responses.activate def test_user_token_error(self, gitlab_mngr_fixture): """test the user_token property""" # Setup responses mock for this test responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'message': 'it failed'}, status=400) # Make sure error is raised when getting the key fails and returns !=200 with pytest.raises(GitLabException): _ = gitlab_mngr_fixture.user_token def test_repository_id(self): """test the repository_id property""" assert GitLabManager.get_repository_id("tester", "test-lb-1") == "tester%2Ftest-lb-1" @responses.activate def test_exists_true(self, property_mocks_fixture, gitlab_mngr_fixture): """test the exists method for a repo that should exist""" assert gitlab_mngr_fixture.repository_exists("testuser", "test-labbook") is True @responses.activate def test_exists_false(self, gitlab_mngr_fixture): """test the exists method for a repo that should not exist""" responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'key': 'afaketoken'}, status=200) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fderp', json=[{ "message": "404 Project Not Found" }], status=404) assert gitlab_mngr_fixture.repository_exists("testuser", "derp") is False @responses.activate def test_create(self, gitlab_mngr_fixture, property_mocks_fixture): """test the create method""" # Setup responses mock for this test responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects', json={ "id": 27, "description": "", }, status=201) responses.add(responses.POST, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook', json={ "success": True }, status=201) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "message": "404 Project Not Found" }], status=404) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "id": 27, "description": "", }], status=200) gitlab_mngr_fixture.create_labbook("testuser", "new-labbook", visibility="private") assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True @responses.activate def test_create_errors(self, gitlab_mngr_fixture, property_mocks_fixture): """test the create method""" # Should fail because the repo "already exists" with pytest.raises(ValueError): gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private") # Should fail because the call to gitlab failed responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects', json={ "id": 27, "description": "", }, status=400) with pytest.raises(ValueError): gitlab_mngr_fixture.create_labbook("testuser", "test-labbook", visibility="private") @responses.activate def test_get_collaborators(self, gitlab_mngr_fixture, property_mocks_fixture): """Test the get_collaborators method""" responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json=[ { "id": 29, "name": "Jane Doe", "username": "janed", "access_level": ProjectPermissions.OWNER.value, "expires_at": None }, { "id": 30, "name": "John Doeski", "username": "jd", "access_level": ProjectPermissions.READ_ONLY.value, "expires_at": None } ], status=200) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', status=400) collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook") assert len(collaborators) == 2 assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER) assert collaborators[1] == (30, 'jd', ProjectPermissions.READ_ONLY) # Verify it fails on error to gitlab (should get second mock on second call) with pytest.raises(ValueError): gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook") @responses.activate def test_add_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture): """Test the add_collaborator method""" responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100', json=[ { "id": 100, "name": "New Person", "username": "person100", "state": "active", } ], status=200) responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json={ "id": 100, "name": "New Person", "username": "person100", "state": "active", }, status=201) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json=[ { "id": 29, "name": "Jane Doe", "username": "janed", "access_level": 40, "expires_at": None }, { "id": 100, "name": "New Person", "username": "person100", "access_level": 30, "expires_at": None } ], status=200) gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.READ_WRITE) collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook") assert len(collaborators) == 2 assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER) assert collaborators[1] == (100, 'person100', ProjectPermissions.READ_WRITE) @responses.activate def test_add_collaborator_errors(self, gitlab_mngr_fixture, property_mocks_fixture): """Test the add_collaborator method exception handling""" responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100', json=[ { "id": 100, "name": "New Person", "username": "person100", "state": "active", } ], status=400) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100', json=[ { "id": 100, "name": "New Person", "username": "person100", "state": "active", } ], status=201) responses.add(responses.POST, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json={ "id": 100, "name": "New Person", "username": "person100", "state": "active", }, status=400) with pytest.raises(ValueError): _ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.OWNER) with pytest.raises(ValueError): _ = gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "person100", ProjectPermissions.READ_ONLY) @responses.activate def test_delete_collaborator(self, gitlab_mngr_fixture, property_mocks_fixture): """Test the delete_collaborator method""" responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100', json=[ { "id": 100, "name": "New Person", "username": "person100", "state": "active", } ], status=200) responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100', status=204) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json=[ { "id": 29, "name": "Jane Doe", "username": "janed", "access_level": 40, "expires_at": None } ], status=200) gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100') collaborators = gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook") assert len(collaborators) == 1 assert collaborators[0] == (29, 'janed', ProjectPermissions.OWNER) @responses.activate def test_delete_collaborator_error(self, gitlab_mngr_fixture, property_mocks_fixture): """Test the delete_collaborator method exception handling""" responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/users?username=person100', json=[ { "id": 100, "name": "New Person", "username": "person100", "state": "active", } ], status=200) responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members/100', status=204) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook/members', json=[ { "id": 29, "name": "Jane Doe", "username": "janed", "access_level": 40, "expires_at": None } ], status=400) # What is this test even for? # gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100') # with pytest.raises(TestGitLabManager): # gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 'person100') @responses.activate def test_error_on_missing_repo(self, gitlab_mngr_fixture): """Test the exception handling on a repo when it doesn't exist""" responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'key': 'afaketoken'}, status=200) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Ftest-labbook', json=[{ "message": "404 Project Not Found" }], status=404) with pytest.raises(ValueError): gitlab_mngr_fixture.get_collaborators("testuser", "test-labbook") with pytest.raises(ValueError): gitlab_mngr_fixture.add_collaborator("testuser", "test-labbook", "test", ProjectPermissions.READ_ONLY) with pytest.raises(ValueError): gitlab_mngr_fixture.delete_collaborator("testuser", "test-labbook", 100) @responses.activate def test_configure_git_credentials(self, gitlab_mngr_fixture): """test the configure_git_credentials method""" host = "test.gigantum.io" username = "testuser" # Setup responses mock for this test responses.add(responses.GET, 'https://usersrv.gigantum.io/key', json={'key': 'afaketoken'}, status=200) # Check that creds are empty token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username) assert token is None # Set creds gitlab_mngr_fixture.configure_git_credentials(host, username) # Check that creds are configured token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username) assert token == "afaketoken" # Set creds gitlab_mngr_fixture.clear_git_credentials(host) # Check that creds are configured token = gitlab_mngr_fixture._check_if_git_credentials_configured(host, username) assert token is None @responses.activate def test_delete(self, gitlab_mngr_fixture, property_mocks_fixture): """test the create method""" # Setup responses mock for this test responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "id": 27, "description": "", }], status=200) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "id": 27, "description": "", }], status=200) responses.add(responses.DELETE, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json={ "message": "202 Accepted" }, status=202) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "message": "404 Project Not Found" }], status=404) responses.add(responses.GET, 'https://repo.gigantum.io/api/v4/projects/testuser%2Fnew-labbook', json=[{ "message": "404 Project Not Found" }], status=404) responses.add(responses.DELETE, 'https://usersrv.gigantum.io/webhook/testuser/new-labbook', json={}, status=204) assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is True gitlab_mngr_fixture.remove_repository("testuser", "new-labbook") assert gitlab_mngr_fixture.repository_exists("testuser", "new-labbook") is False with pytest.raises(ValueError): gitlab_mngr_fixture.remove_repository("testuser", "new-labbook")
47.27381
123
0.514279
1,797
19,855
5.553144
0.140234
0.048101
0.081772
0.062531
0.761199
0.740455
0.697164
0.665097
0.643151
0.587734
0
0.026221
0.385344
19,855
419
124
47.386635
0.791462
0.131151
0
0.696486
0
0.028754
0.206282
0
0
0
0
0
0.067093
1
0.054313
false
0
0.009585
0
0.067093
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
74a12178988d7dbec19f36ed51efbc1d85879c24
3,248
py
Python
pinsey/thread/LikesBotThread.py
RailKill/Pinsey
72a283e6c5683b27918b511d80e45c3af4e67539
[ "MIT" ]
3
2021-02-01T06:47:06.000Z
2022-01-09T05:54:35.000Z
pinsey/thread/LikesBotThread.py
RailKill/Pinsey
72a283e6c5683b27918b511d80e45c3af4e67539
[ "MIT" ]
4
2019-10-23T09:52:36.000Z
2022-03-11T23:17:23.000Z
pinsey/thread/LikesBotThread.py
RailKill/Pinsey
72a283e6c5683b27918b511d80e45c3af4e67539
[ "MIT" ]
null
null
null
import logging from pynder import errors from random import randint from PyQt5 import QtCore class LikesBotThread(QtCore.QThread): """ This is a QThread which runs in the background as a PyQt Signal. It emits the matches object. To access the matches object, you need to retrieve from the signal, which is named 'data_downloaded'. For example: instance = MatchesThread() instance.data_downloaded.connect(yourMethod) instance.start() With the example above, yourMethod() will be called when the background thread has finished fetching the matches data. The matches object will be passed in as the first parameter. Therefore, if you define your method like this: yourMethod(matches), then the session object will be passed into 'matches'. """ data_downloaded = QtCore.pyqtSignal(object) def __init__(self, session, likes_handler, decision_handler=None): QtCore.QThread.__init__(self) self.session = session self.friends = session.get_fb_friends() self.likes_handler = likes_handler self.decision_handler = decision_handler self.abort = False self.logger = logging.getLogger(__name__) def stop(self): self.abort = True def run(self): while not self.abort: if self.session.likes_remaining != 0: nearby_users = self.session.nearby_users() try: user = next(nearby_users) # Iterate through generator object. if self.decision_handler: if not self.decision_handler.analyze(user, self.friends): self.likes_handler.dislike_user(user, 'Bot') continue self.likes_handler.like_user(user, 'Bot') self.logger.info(u'Liking ' + user.name + '.') except StopIteration: try: # No more users to go through. Reset the distance filter to fetch the users again. self.session.profile.distance_filter = self.session.profile.distance_filter except errors.RequestError: self.logger.error('Request timed out when trying to update distance filter in profile.') except errors.RecsError: self.logger.info('There are probably no more nearby users to fetch. ' 'Increasing distance filter by 1 mile...') self.session.profile.distance_filter += 1 self.sleep(randint(3, 5)) # Give it a break, 3 to 5 seconds between every swipe. else: try: like_in_seconds = self.session.can_like_in like_in_hours = like_in_seconds / 60 / 60 self.logger.info('Out of likes. Can like in: ' + str(like_in_seconds) + ' seconds (' + str(like_in_hours) + ' hours).') except errors.RequestError: self.logger.info('Out of likes. Retrying in an hour...') self.sleep(3600 * 6) # Out of likes, pausing for X hours.
47.764706
112
0.591441
375
3,248
4.994667
0.4
0.046983
0.029899
0.041644
0.107848
0.025627
0
0
0
0
0
0.007889
0.336515
3,248
67
113
48.477612
0.861253
0.248768
0
0.106383
0
0
0.106672
0
0
0
0
0
0
1
0.06383
false
0
0.085106
0
0.191489
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a2534ab9e9ba29bdec5c71f0832442026ad789
13,081
py
Python
train.py
SkyLLine/Inception-v4-in-mindspore
9a23261252baad27cbd7bc8028087e4bba48b318
[ "MIT" ]
1
2020-12-23T12:31:08.000Z
2020-12-23T12:31:08.000Z
train.py
SkyLLine/Inception-v4-in-mindspore
9a23261252baad27cbd7bc8028087e4bba48b318
[ "MIT" ]
null
null
null
train.py
SkyLLine/Inception-v4-in-mindspore
9a23261252baad27cbd7bc8028087e4bba48b318
[ "MIT" ]
null
null
null
import mindspore as ms import mindspore.nn as nn import mindspore.ops.operations as operator import os from lr_generator import get_lr from CrossEntropy import CrossEntropy import argparse from inception_A import inception_A from inception_B import inception_B import numpy as np from inception_C import inception_C from network import Stem from reduction_A import reduction_A from reduction_B import reduction_B from reduction_C import reduction_C import mindspore.dataset as ds from mindspore import context from mindspore import Tensor from mindspore.parallel._auto_parallel_context import auto_parallel_context from mindspore.nn.optim.momentum import Momentum import os import urllib.request from urllib.parse import urlparse import gzip import argparse import mindspore.dataset as ds import mindspore.nn as nn from mindspore import context from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor from mindspore.train import Model from mindspore.common.initializer import TruncatedNormal import mindspore.dataset.transforms.vision.c_transforms as CV import mindspore.dataset.transforms.c_transforms as C from mindspore.dataset.transforms.vision import Inter from mindspore.nn.metrics import Accuracy from mindspore.common import dtype as mstype from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits from mindspore.train.model import Model, ParallelMode from config import config from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore.communication.management import init import mindspore.nn as nn import mindspore.common.initializer as weight_init from dataloader import create_dataset def unzipfile(gzip_path): """unzip dataset file Args: gzip_path: dataset file path """ open_file = open(gzip_path.replace('.gz', ''), 'wb') gz_file = gzip.GzipFile(gzip_path) open_file.write(gz_file.read()) gz_file.close() def download_dataset(): """Download the dataset from http://yann.lecun.com/exdb/mnist/.""" print("******Downloading the MNIST dataset******") train_path = "./MNIST_Data/train/" test_path = "./MNIST_Data/test/" train_path_check = os.path.exists(train_path) test_path_check = os.path.exists(test_path) if train_path_check == False and test_path_check == False: os.makedirs(train_path) os.makedirs(test_path) train_url = {"http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"} test_url = {"http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz", "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"} for url in train_url: url_parse = urlparse(url) # split the file name from url file_name = os.path.join(train_path, url_parse.path.split('/')[-1]) if not os.path.exists(file_name.replace('.gz', '')): file = urllib.request.urlretrieve(url, file_name) unzipfile(file_name) os.remove(file_name) for url in test_url: url_parse = urlparse(url) # split the file name from url file_name = os.path.join(test_path, url_parse.path.split('/')[-1]) if not os.path.exists(file_name.replace('.gz', '')): file = urllib.request.urlretrieve(url, file_name) unzipfile(file_name) os.remove(file_name) # def create_dataset(data_path, batch_size=32, repeat_size=1, # num_parallel_workers=1): # """ create dataset for train or test # Args: # data_path: Data path # batch_size: The number of data records in each group # repeat_size: The number of replicated data records # num_parallel_workers: The number of parallel workers # """ # # define dataset # mnist_ds = ds.MnistDataset(data_path) # # define operation parameters # resize_height, resize_width = 299, 299 # rescale = 1.0 / 255.0 # shift = 0.0 # rescale_nml = 1 / 0.3081 # shift_nml = -1 * 0.1307 / 0.3081 # # define map operations # resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32) # rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images # rescale_op = CV.Rescale(rescale, shift) # rescale images # hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network. # type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network # # apply map operations on images # mnist_ds = mnist_ds.map(input_columns="label", operations=type_cast_op, num_parallel_workers=num_parallel_workers) # mnist_ds = mnist_ds.map(input_columns="image", operations=resize_op, num_parallel_workers=num_parallel_workers) # mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_op, num_parallel_workers=num_parallel_workers) # mnist_ds = mnist_ds.map(input_columns="image", operations=rescale_nml_op, num_parallel_workers=num_parallel_workers) # mnist_ds = mnist_ds.map(input_columns="image", operations=hwc2chw_op, num_parallel_workers=num_parallel_workers) # # apply DatasetOps # buffer_size = 10000 # mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script # mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True) # mnist_ds = mnist_ds.repeat(repeat_size) # return mnist_ds parser = argparse.ArgumentParser(description='Image classification') parser.add_argument('--run_distribute', type=bool, default=True, help='Run distribute') parser.add_argument('--device_num', type=int, default=8, help='Device num.') parser.add_argument('--do_train', type=bool, default=True, help='Do train or not.') parser.add_argument('--do_eval', type=bool, default=False, help='Do eval or not.') parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path') parser.add_argument('--data_url', default=None, help='Location of data.') parser.add_argument('--train_url', default=None, help='Location of training outputs.') opt = parser.parse_args() dict = {} i = 0 class InceptionV4(nn.Cell): def __init__(self): super().__init__() self.Stem = Stem(3) self.inception_A = inception_A(384) self.reduction_A = reduction_A(384) self.inception_B = inception_B(1024) self.reduction_B = reduction_B(1024) self.inception_C = inception_C(1536) self.avgpool = nn.AvgPool2d(8) #### reshape成2维 self.dropout = nn.Dropout(0.8) self.linear = nn.Dense(1536, 1000) def construct(self, x): x = self.Stem(x) x = self.inception_A(x) x = self.inception_A(x) x = self.inception_A(x) x = self.inception_A(x) x = self.reduction_A(x) x = self.inception_B(x) x = self.inception_B(x) x = self.inception_B(x) x = self.inception_B(x) x = self.inception_B(x) x = self.inception_B(x) x = self.inception_B(x) x = self.reduction_B(x) x = self.inception_C(x) x = self.inception_C(x) x = self.inception_C(x) x = self.avgpool(x) x = self.dropout(x) x = nn.Flatten()(x) x = self.linear(x) return x def generate_inception_module(self, input_channels, output_channels, block_num, block): if block == 1: layers = nn.SequentialCell([inception_A(input_channels)]) for i in range(block_num): layers = nn.SequentialCell(inception_A(input_channels), layers) input_channels = output_channels if block == 2: layers = nn.SequentialCell([inception_B(input_channels)]) for i in range(block_num): layers = nn.SequentialCell(inception_B(input_channels), layers) input_channels = output_channels if block == 3: layers = nn.SequentialCell([inception_C(input_channels)]) for i in range(block_num): layers = nn.SequentialCell(inception_C(input_channels), layers) input_channels = output_channels return layers def train(opt): # device_id = int(os.getenv('DEVICE_ID')) # context.set_context(mode=context.GRAPH_MODE, device_target="CPU", save_graphs=False) # context.set_context(enable_task_sink=True, device_id=device_id) # context.set_context(enable_loop_sink=True) # context.set_context(enable_mem_reuse=True) # # if not opt.do_eval and opt.run_distribute: # context.set_auto_parallel_context(device_num=opt.device_num, parallel_mode=ParallelMode.DATA_PARALLEL, # mirror_mean=True, parameter_broadcast=True) # auto_parallel_context().set_all_reduce_fusion_split_indices([107, 160]) # init() loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num) mnist_path = "./MNIST_Data" download_dataset() dataset = create_dataset(os.path.join(mnist_path, "train"), 32, 1) net = InceptionV4(4, 7, 3) # net = LeNet5() stepsize = 32 lr = 0.01 optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9) config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) # save the network model and parameters for subsequence fine-tuning ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) # group layers into an object with training and evaluation features net_loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean') model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()}) model.train(config.epoch_size, dataset, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False) ######################################### def weight_variable(): """Weight initial.""" return TruncatedNormal(0.02) def conv(in_channels, out_channels, kernel_size, stride=1, padding=0): """Conv layer weight initial.""" weight = weight_variable() return nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, weight_init=weight, has_bias=False, pad_mode="valid") def fc_with_initialize(input_channels, out_channels): """Fc layer weight initial.""" weight = weight_variable() bias = weight_variable() return nn.Dense(input_channels, out_channels, weight, bias) class LeNet5(nn.Cell): """Lenet network structure.""" # define the operator required def __init__(self): super(LeNet5, self).__init__() self.conv1 = conv(1, 6, 5) self.conv2 = conv(6, 16, 5) self.fc1 = fc_with_initialize(16 * 5 * 5, 120) self.fc2 = fc_with_initialize(120, 84) self.fc3 = fc_with_initialize(84, 10) self.relu = nn.ReLU() self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2) self.flatten = nn.Flatten() # use the preceding operators to construct networks def construct(self, x): x = self.conv1(x) x = self.relu(x) x = self.max_pool2d(x) x = self.conv2(x) x = self.relu(x) x = self.max_pool2d(x) x = self.flatten(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.relu(x) x = self.fc3(x) return x def ans(): context.set_context(mode=context.GRAPH_MODE) net = InceptionV4() print("start") ds = create_dataset('./dataset', True, config.epoch_size, config.batch_size) lr = 0.01 optt = nn.Momentum(net.trainable_params(), lr, momentum=0.9) config_ck = CheckpointConfig(save_checkpoint_steps=1875, keep_checkpoint_max=10) # save the network model and parameters for subsequence fine-tuning ckpoint_cb = ModelCheckpoint(prefix="checkpoint_lenet", config=config_ck) # group layers into an object with training and evaluation features net_loss = CrossEntropy(smooth_factor=config.label_smooth_factor, num_classes=config.class_num) loss_scale = FixedLossScaleManager(config.loss_scale, drop_overflow_update=False) lr = Tensor(get_lr(global_step=0, lr_init=config.lr_init, lr_end=0.0, lr_max=config.lr_max, warmup_epochs=config.warmup_epochs, total_epochs=config.epoch_size, steps_per_epoch=config.batch_size, lr_decay_mode='cosine')) optt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config.momentum, config.weight_decay, config.loss_scale) model = Model(net, net_loss, optt, metrics={"Accuracy": Accuracy()}) model.train(config.epoch_size, ds, callbacks=[ckpoint_cb, LossMonitor()], dataset_sink_mode=False) if __name__ == '__main__': ans()
39.759878
125
0.690467
1,776
13,081
4.866554
0.193131
0.007868
0.022215
0.024297
0.413514
0.385746
0.346292
0.292607
0.264144
0.25188
0
0.018462
0.200826
13,081
329
126
39.759878
0.808303
0.240272
0
0.306604
0
0.018868
0.066019
0
0
0
0
0
0
1
0.056604
false
0
0.216981
0
0.311321
0.009434
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a435d54372c1ab9bc2bc926f10969db52f3fc9
4,238
py
Python
dash/models.py
lakesite/dash
c64cea5fe4c5aff529e3f121270dbf29f700aeb2
[ "MIT" ]
null
null
null
dash/models.py
lakesite/dash
c64cea5fe4c5aff529e3f121270dbf29f700aeb2
[ "MIT" ]
1
2019-09-17T12:22:26.000Z
2019-09-18T22:58:14.000Z
dash/models.py
lakesite/dash
c64cea5fe4c5aff529e3f121270dbf29f700aeb2
[ "MIT" ]
1
2019-09-27T03:58:43.000Z
2019-09-27T03:58:43.000Z
import datetime from flask_bcrypt import * from flask import current_app from flask_sqlalchemy import SQLAlchemy from flask_security import UserMixin, RoleMixin db = SQLAlchemy() roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')), db.Column('role_id', db.Integer(), db.ForeignKey('role.id')) ) class Role(db.Model, RoleMixin): id = db.Column(db.Integer(), primary_key=True) name = db.Column(db.String(80), unique=True) description = db.Column(db.String(255)) class User(db.Model, UserMixin): id = db.Column(db.Integer, primary_key=True) email = db.Column(db.String(255), unique=True) password = db.Column(db.String(255)) registered_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now) active = db.Column(db.Boolean, nullable=False, default=False) confirmed_at = db.Column(db.DateTime, nullable=True) admin = db.Column(db.Boolean, nullable=False, default=False) notes = db.Column(db.Text, nullable=True) roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic')) company_id = db.Column(db.Integer(), db.ForeignKey('company.id')) company = db.relationship('Company') def __unicode__(self): return("<User id={} email={}>".format(self.id, self.email)) def __repr__(self): return("<User ID: {}, email: {}>".format(self.id, self.email)) def __init__(self, email, password, roles, active=False, admin=False, confirmed_at=None): self.email = email self.password = password if admin: self.roles = [Role.query.filter_by(name='admin').first()] self.registered_on = datetime.datetime.now() self.admin = admin self.active = active self.confirmed_at = confirmed_at def get_id(self): return self.id class ServiceAgreement(db.Model): id = db.Column(db.Integer, primary_key=True) company_id = db.Column('company_id', db.Integer(), db.ForeignKey('company.id'), nullable=False) company = db.relationship('Company') name = db.Column(db.String(50), nullable=False) started_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now) text = db.Column(db.Text, nullable=True, default='') def __unicode__(self): return("<started = %>".format(self.started_on)) class Company(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(255), unique=True) email = db.Column(db.String(255)) phone = db.Column(db.String(20)) bio = db.Column(db.String(255)) users = db.relationship("User", backref="user", lazy="dynamic") def __repr__(self): return("{}".format(self.name)) def __unicode__(self): return("<name = %>".format(self.name)) class UserRequest(db.Model): id = db.Column(db.Integer, primary_key=True) user_id = db.Column('user_id', db.Integer(), db.ForeignKey('user.id'), nullable=False) user = db.relationship('User') requested_on = db.Column(db.DateTime, nullable=False, default=datetime.datetime.now) title = db.Column(db.String(255), nullable=False) request = db.Column(db.Text, nullable=False) status = db.Column(db.Integer) def __unicode__(self): return("<User=% request=%>".format(self.user.email, self.request)) class Iteration(db.Model): id = db.Column(db.Integer, primary_key=True) project = db.Column('project_id', db.Integer(), db.ForeignKey('project.id')) class Project(db.Model): id = db.Column(db.Integer, primary_key=True) company_id = db.Column('company_id', db.Integer(), db.ForeignKey('company.id'), nullable=False) company = db.relationship('Company') name = db.Column(db.String(255)) status = db.Column(db.Integer, nullable=False, default=0) __table_args__ = tuple(db.UniqueConstraint('company', 'name', name='_company_projectname_uc')) def save_changes(self, form, new=False): self.name = form.name.data if new: db.session.add(project) db.session.commit() def get_id(self): return self.id def __unicode__(self): return self.name
32.106061
99
0.666116
569
4,238
4.826011
0.165202
0.10488
0.10925
0.064093
0.523307
0.428623
0.397669
0.354334
0.311726
0.311726
0
0.008926
0.18051
4,238
131
100
32.351145
0.781745
0
0
0.241758
0
0
0.072204
0.005427
0
0
0
0
0
1
0.120879
false
0.032967
0.054945
0.098901
0.736264
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
1
74a4b609cbd07e7dfd15a960bb342cacfe14d3fb
2,034
py
Python
setup.py
Jumpscale/ays9
63bd414ff06372ba885c55eec528f427e63bcbe1
[ "Apache-2.0" ]
4
2017-06-07T08:10:06.000Z
2017-11-10T02:20:38.000Z
setup.py
Jumpscale/ays9
63bd414ff06372ba885c55eec528f427e63bcbe1
[ "Apache-2.0" ]
242
2017-05-18T10:51:48.000Z
2019-09-18T15:09:47.000Z
setup.py
Jumpscale/ays9
63bd414ff06372ba885c55eec528f427e63bcbe1
[ "Apache-2.0" ]
5
2017-06-16T15:43:25.000Z
2017-09-29T12:48:06.000Z
from setuptools import setup, find_packages from setuptools.command.install import install as _install from setuptools.command.develop import develop as _develop import os def _post_install(libname, libpath): from js9 import j # add this plugin to the config c = j.core.state.configGet('plugins', defval={}) c[libname] = libpath j.core.state.configSet('plugins', c) print("****:%s:%s" % (libname, libpath)) j.tools.jsloader.generate() # not needed to do # j.tools.jsloader.copyPyLibs() class install(_install): def run(self): _install.run(self) libname = self.config_vars['dist_name'] libpath = os.path.join(os.path.dirname( os.path.abspath(__file__)), libname) self.execute(_post_install, (libname, libpath), msg="Running post install task") class develop(_develop): def run(self): _develop.run(self) libname = self.config_vars['dist_name'] libpath = os.path.join(os.path.dirname( os.path.abspath(__file__)), libname) self.execute(_post_install, (libname, libpath), msg="Running post install task") long_description = "" try: from pypandoc import convert long_description = convert("README.md", 'rst') except ImportError: long_description = "" setup( name='JumpScale9AYS', version='9.2.0', description='Automation framework for cloud workloads ays lib', long_description=long_description, url='https://github.com/Jumpscale/ays9', author='GreenItGlobe', author_email='info@gig.tech', license='Apache', packages=find_packages(), include_package_data=True, install_requires=[ 'JumpScale9>=9.2.0', 'JumpScale9Lib>=9.2.0', 'jsonschema>=2.6.0', 'python-jose==1.3.2', 'sanic>=0.5.4', 'aiohttp>=2.2.5' ], cmdclass={ 'install': install, 'develop': develop, 'developement': develop }, scripts=['cmds/ays'], )
25.746835
67
0.629302
243
2,034
5.123457
0.45679
0.028916
0.043373
0.060241
0.24257
0.24257
0.24257
0.24257
0.24257
0.24257
0
0.016731
0.235988
2,034
78
68
26.076923
0.784427
0.037365
0
0.237288
0
0
0.187308
0
0
0
0
0
0
1
0.050847
false
0
0.118644
0
0.20339
0.016949
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a4c95aba95c6ff272c009bc42f15868fc311a3
514
py
Python
misc/build_proto.py
engeg/recipes-py
9dac536b55887262b4ce846f3db7a7f596542e5e
[ "Apache-2.0" ]
23
2016-01-20T00:45:26.000Z
2022-02-26T04:25:30.000Z
misc/build_proto.py
engeg/recipes-py
9dac536b55887262b4ce846f3db7a7f596542e5e
[ "Apache-2.0" ]
8
2016-01-15T19:00:38.000Z
2018-03-06T00:15:24.000Z
misc/build_proto.py
engeg/recipes-py
9dac536b55887262b4ce846f3db7a7f596542e5e
[ "Apache-2.0" ]
13
2015-09-05T05:52:43.000Z
2019-07-08T17:34:27.000Z
#!/usr/bin/env vpython # Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys from google.protobuf import json_format as jsonpb ROOT = os.path.dirname(os.path.dirname(__file__)) sys.path.append(os.path.join(ROOT, '.recipe_deps', '_pb')) from PB.go.chromium.org.luci.buildbucket.proto.build import Build sys.stdout.write(jsonpb.Parse(sys.stdin.read(), Build()).SerializeToString())
30.235294
77
0.766537
83
514
4.662651
0.722892
0.046512
0.067183
0
0
0
0
0
0
0
0
0.008791
0.114786
514
16
78
32.125
0.841758
0.344358
0
0
0
0
0.045045
0
0
0
0
0
0
1
0
false
0
0.571429
0
0.571429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
74a63aca3e7e9199c9e3c65398a87cb9cc5d927d
12,246
py
Python
nettruyen_downloader_script.py
quantrancse/nettruyen-downloader-script
1ff5d67fc3a99009385fd3ab5976c582246a2783
[ "MIT" ]
4
2020-09-15T16:29:53.000Z
2022-02-18T17:36:46.000Z
nettruyen_downloader_script.py
quantrancse/nettruyen-downloader-script
1ff5d67fc3a99009385fd3ab5976c582246a2783
[ "MIT" ]
null
null
null
nettruyen_downloader_script.py
quantrancse/nettruyen-downloader-script
1ff5d67fc3a99009385fd3ab5976c582246a2783
[ "MIT" ]
4
2021-07-06T04:26:15.000Z
2022-01-07T05:55:46.000Z
import argparse import signal import sys import time from concurrent.futures import ThreadPoolExecutor from os import mkdir from os.path import isdir from urllib.parse import urlparse import requests from bs4 import BeautifulSoup HEADERS = { 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', 'DNT': '1', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.9' } class MangaInfo(): def __init__(self): self.manga_url = '' self.manga_name = '' self.chapter_name_list = [] self.chapter_url_list = [] self.save_path = '' self.list_of_download_chapter = [] class DownloadEngine(): def __init__(self): self.stop_signal = 0 self.error403_signal = 0 def set_manga(self, manga): self.current_manga = manga self.image_formats = ['.jpg', '.jpeg', '.png', '.gif', '.tiff', '.bmp'] def stop_download(self, sig, frame): self.stop_signal = 1 def run(self): signal.signal(signal.SIGINT, self.stop_download) self.crawl_chapter_data_list() def crawl_chapter_data_list(self): chapter_list = [] # Get each chapter info for index in self.current_manga.list_of_download_chapter: chapter_detail = {} chapter_detail['chapter_url'] = self.current_manga.chapter_url_list[index] chapter_detail['chapter_name'] = self.current_manga.chapter_name_list[index] if ':' in chapter_detail['chapter_name']: chapter_detail['chapter_name'] = chapter_detail['chapter_name'].split(':')[ 0] chapter_list.append(chapter_detail) # Remove downloaded chapters | if not create directory chapter_list = [i_chapter for i_chapter in chapter_list if not isdir( self.current_manga.save_path + '/' + i_chapter['chapter_name'])] chapter_list = list(reversed(chapter_list)) if chapter_list: # Create directory and start to download index = 0 print('Start download ..... Press Ctrl+C to stop.') for chapter_data in chapter_list: if self.stop_signal: break chapter_dir_path = self.current_manga.save_path + \ '/' + chapter_data['chapter_name'] mkdir(chapter_dir_path.replace('\"', '').replace( '\'', '').replace('?', '').replace('!', '')) chapter_data['chapter_dir_path'] = chapter_dir_path self.get_chapter_contents(chapter_data) index += 1 print('Download Done') sys.exit(0) def get_image_urls(self, soup): contents = [] for content_url in soup.find('div', class_='reading-detail box_doc').find_all('img'): if content_url not in contents: if any(img_fm in content_url['src'] for img_fm in self.image_formats): img_url = content_url['src'] elif content_url.has_attr('data-original'): img_url = content_url['data-original'] elif content_url.has_attr('data-cdn') and any(img_fm in content_url['data-cdn'] for img_fm in self.image_formats): img_url = content_url['data-cdn'] else: img_url = content_url['src'] contents.append(self.format_img_url(img_url)) return contents def format_img_url(self, url): return url.replace('//', 'http://') def get_image_paths(self, chapter_dir_path, contents): img_path_list = [] image_index = 1 for img_url in contents: img_name = img_url.split('/')[-1] if any(img_fm in img_name[-4:] for img_fm in self.image_formats): img_path_name = chapter_dir_path + '/image_' + img_name else: img_path_name = chapter_dir_path + \ '/image_' + '{0:0=3d}'.format(image_index) + '.jpg' img_path_list.append(img_path_name) image_index += 1 return img_path_list def get_chapter_contents(self, chapter_data): try: # Request chapter url request = requests.get( chapter_data['chapter_url'], headers=HEADERS, timeout=10) soup = BeautifulSoup(request.text, 'html.parser') # Get image url contents = self.get_image_urls(soup) # Get image name img_path_list = self.get_image_paths( chapter_data['chapter_dir_path'], contents) image_data_list = list( map(lambda x, y: (x, y), img_path_list, contents)) # Update Dialog chapter_name = 'Downloading ' + \ chapter_data['chapter_name'] + ' .....' print(chapter_name) # Threading for download each image with ThreadPoolExecutor(max_workers=20) as executor: executor.map(self.download_image, image_data_list) if self.error403_signal: print(chapter_data['chapter_name'] + ': Can not download some images. Please check again!') self.error403_signal = 0 except Exception: print('Error get chapter info. Please try again later.') print('Finish ' + chapter_data['chapter_name']) def download_image(self, image_data_list): if not self.stop_signal: img_path_name, img_url = image_data_list # Limit download time of an image is 5 secs start = time.time() timeout = 10 while True: try: img_data = requests.get( img_url, headers=HEADERS, timeout=10) if img_data.status_code == 403: self.error403_signal = 1 else: with open(img_path_name, 'wb') as handler: handler.write(img_data.content) break except Exception: if time.time() - start > timeout: print('Error download image: ' + img_path_name) break print('Retry download image: ' + img_url) time.sleep(1) continue class Bridge(): current_manga = MangaInfo() def start_download(self, manga_url, from_chapter_input, to_chapter_input): self.manga_url = manga_url self.from_chapter_input = from_chapter_input self.to_chapter_input = to_chapter_input self.download_chapter() def download_chapter(self): if self.check_valid_url() and self.get_chapter_input(): manga_save_path = self.current_manga.manga_name manga_save_path = manga_save_path.replace( '\"', '').replace('\'', '').replace('?', '').replace('!', '') if not isdir(manga_save_path): mkdir(manga_save_path) self.current_manga.save_path = manga_save_path engine = DownloadEngine() engine.set_manga(self.current_manga) engine.run() else: return def check_valid_url(self): current_manga_url = self.manga_url result = False domain = urlparse(current_manga_url) referer_header = '{uri.scheme}://{uri.netloc}/'.format(uri=domain) HEADERS['Referer'] = referer_header if not any(substr in current_manga_url for substr in ['nhattruyen', 'nettruyen']): print('Invalid manga url. Please try again.') return result else: try: request = requests.get( current_manga_url, headers=HEADERS, timeout=5) soup = BeautifulSoup(request.text, 'html.parser') if not soup.find('div', id='nt_listchapter'): print('Invalid manga url. Please try again.') else: self.current_manga.manga_url = str(current_manga_url) self.crawl_manga_home_page() result = True return result except Exception: print('Error getting manga page. Please try again.') return False def crawl_manga_home_page(self): try: request = requests.get( self.current_manga.manga_url, headers=HEADERS, timeout=10) soup = BeautifulSoup(request.text, 'html.parser') self.current_manga.manga_name = soup.find( 'h1', class_='title-detail').text self.current_manga.chapter_name_list = [ i.find('a').text for i in soup.find_all('div', class_='chapter')] chapter_url_list = [] for chapter in soup.find('div', id='nt_listchapter').find('ul').find_all('a'): chapter_url_list.append(chapter['href']) self.current_manga.chapter_url_list = chapter_url_list except Exception: print('Error getting manga page. Please try again.') def get_chapter_index(self, chapter_input): index = None if chapter_input == 'start_chapter': index = 0 elif chapter_input == 'end_chapter': index = len(self.current_manga.chapter_name_list) - 1 else: for chapter in self.current_manga.chapter_name_list: chapter_name = chapter.split()[1] if ':' in chapter_name: chapter_name = chapter_name[:-1] if chapter_input == chapter_name: index = self.current_manga.chapter_name_list.index( chapter) return index def get_chapter_input(self): from_chapter_index = self.get_chapter_index( self.from_chapter_input) to_chapter_index = self.get_chapter_index(self.to_chapter_input) if from_chapter_index is not None and to_chapter_index is not None: if from_chapter_index > to_chapter_index: from_chapter_index, to_chapter_index = to_chapter_index, from_chapter_index self.current_manga.list_of_download_chapter = list( range(from_chapter_index, to_chapter_index + 1)) return True else: print('Invalid manga chapter input. Please try again.') return False if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('manga_url', type=str, help='url to the manga homepage') parser.add_argument('-a', '--all', action='store_true', help='download/update all chapter') parser.add_argument('-f', '--fromto', nargs=2, metavar=('from_chapter', 'to_chapter'), help='download from one chapter to another chapter') parser.add_argument('-c', '--chapter', nargs=1, metavar=('chapter'), help='download one chapter') args = parser.parse_args() bridge = Bridge() if not (args.all or args.fromto or args.chapter): parser.error('No action requested, add --all or --fromto or --chapter') elif args.all: bridge.start_download(args.manga_url, 'start_chapter', 'end_chapter') elif args.fromto: bridge.start_download( args.manga_url, args.fromto[0], args.fromto[1]) elif args.chapter: bridge.start_download( args.manga_url, args.chapter[0], args.chapter[0])
38.87619
153
0.565981
1,400
12,246
4.683571
0.180714
0.043923
0.046363
0.024554
0.295715
0.230441
0.140003
0.077322
0.049108
0.049108
0
0.011773
0.33415
12,246
314
154
39
0.792372
0.020578
0
0.173387
0
0.008065
0.137532
0.013967
0
0
0
0
0
1
0.068548
false
0
0.040323
0.004032
0.165323
0.052419
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a696dadca46d47b083f0630b23c6f4522f990b
1,237
py
Python
redash/handlers/__init__.py
fanhaipeng0403/redash
d0b074e21864b358e898d27fc2c01ebe96d7ba26
[ "BSD-2-Clause" ]
null
null
null
redash/handlers/__init__.py
fanhaipeng0403/redash
d0b074e21864b358e898d27fc2c01ebe96d7ba26
[ "BSD-2-Clause" ]
null
null
null
redash/handlers/__init__.py
fanhaipeng0403/redash
d0b074e21864b358e898d27fc2c01ebe96d7ba26
[ "BSD-2-Clause" ]
null
null
null
from flask import jsonify # redash __init__.py文件里 # setup_authentication(app) # login_manager.init_app(app) from flask_login import login_required from redash.handlers.api import api from redash.handlers.base import routes from redash.monitor import get_status from redash.permissions import require_super_admin # Handler的总入口,放一个用于诊断的ping接口,最佳实践 # routes用的也不是内置的,而是总蓝图 @routes.route('/ping', methods=['GET']) def ping(): return 'PONG.' # 系统状态。redis内存,运行了多久,日志数量,任务队列情况,文章的数目等等等(需要超级运用权限) @routes.route('/status.json') @login_required @require_super_admin def status_api(): status = get_status() return jsonify(status) # 项目http总注册接口,暴露给__init__.py的create_app使用 def init_app(app): # 蓝图以及一些分散的接口 from redash.handlers import embed, queries, static, authentication, admin, setup, organization # 仅仅初始化?? app.register_blueprint(routes) # 只注册了主的蓝图??? ####总API注册入口 api.init_app(app) # from flask_restful import Api # api是API的实例 # 路由注册, 资源注册url的两种方式 # 第一种 ####################### # >> @resource.route('/local_resource) # >> def resource(): # >> pass # 第二种 ####################### # >> resource.add_url_rule("/local_resource", view_func=LocalResource.as_view(name="get_local_resource"))
22.490909
108
0.717057
149
1,237
5.724832
0.496644
0.058617
0.03517
0.032825
0.044549
0
0
0
0
0
0
0
0.135004
1,237
54
109
22.907407
0.797196
0.404204
0
0
0
0
0.037481
0
0
0
0
0
0
1
0.157895
false
0
0.368421
0.052632
0.631579
0.052632
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
1
0
0
2
74a8fdbcc478237889a10e97a2c4357eca96a428
3,017
py
Python
tenff/__main__.py
rr-/10ff
9be5cf03eda09f1fe0920d67d9e09d4886c34ef3
[ "WTFPL" ]
53
2017-09-30T16:05:39.000Z
2022-02-04T08:19:26.000Z
tenff/__main__.py
rr-/10ff
9be5cf03eda09f1fe0920d67d9e09d4886c34ef3
[ "WTFPL" ]
2
2017-12-08T10:07:42.000Z
2020-05-20T21:46:27.000Z
tenff/__main__.py
rr-/10ff
9be5cf03eda09f1fe0920d67d9e09d4886c34ef3
[ "WTFPL" ]
2
2019-12-18T04:23:10.000Z
2020-09-25T10:31:04.000Z
"""Main executable.""" import argparse import asyncio from tenff.game import GameSettings, run_game from tenff.terminal import TerminalInputHandler from tenff.util import CORPORA_PATH, get_corpus_path, parse_corpus DEFAULT_TIME = 60 PROLOG = ( "A certain typing contest site spin-off in CLI, without all the " "advertisements, tracking and 10 megabytes of AJAX crap." ) class CustomHelpFormatter(argparse.HelpFormatter): """A HelpFormatter that uses concise syntax for short and long options help. """ def _format_action_invocation(self, action: argparse.Action) -> str: """Format action invocation. Example of the default argparse formatting: -c CORPUS, --corpus CORPUS Example of the concise formatting: -c, --corpus CORPUS """ if not action.option_strings or action.nargs == 0: return super()._format_action_invocation(action) default = self._get_default_metavar_for_optional(action) args_string = self._format_args(action, default) return ", ".join(action.option_strings) + " " + args_string def parse_args() -> argparse.Namespace: """Parse command line arguments.""" parser = argparse.ArgumentParser( prog="10ff", description=PROLOG, formatter_class=CustomHelpFormatter ) parser.add_argument( "-t", "--time", type=int, default=DEFAULT_TIME, help="how long to play the game for (in seconds)", ) parser.add_argument( "-c", "--corpus", type=str, default="english", help="path to the word list to play the game with", ) parser.add_argument( "-w", "--width", type=int, default=80, help="width of the terminal to play in", ) parser.add_argument( "-l", "--list", action="store_true", help="lists the built-in corpora" ) parser.add_argument( "-r", "--rigorous-spaces", action="store_true", help="treat double space as an error", ) return parser.parse_args() def main() -> None: """Main program logic. Start the event loop, parse the CLI arguments and run the game. """ loop = asyncio.new_event_loop() args = parse_args() if args.list: for path in sorted(CORPORA_PATH.iterdir()): if path.suffix == ".txt": print(path.stem) return input_handler = TerminalInputHandler(loop) with input_handler.enable_raw_terminal(): corpus_path = get_corpus_path(args.corpus) corpus = parse_corpus(corpus_path) settings = GameSettings( corpus=corpus, max_time=args.time, rigorous_spaces=args.rigorous_spaces, ) loop.run_until_complete( run_game( loop, input_handler, settings, ) ) loop.close() if __name__ == "__main__": main()
26.699115
78
0.608552
341
3,017
5.202346
0.410557
0.040586
0.047914
0.019166
0
0
0
0
0
0
0
0.004204
0.290355
3,017
112
79
26.9375
0.824381
0.121975
0
0.088608
0
0
0.152496
0
0
0
0
0
0
1
0.037975
false
0
0.063291
0
0.164557
0.012658
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74a90fbaab5a1695a359f7f2248d252b5d5d1dbb
2,248
py
Python
python-algorithm/leetcode/problem_76.py
isudox/nerd-algorithm
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
[ "MIT" ]
5
2017-06-11T09:19:34.000Z
2019-01-16T16:58:31.000Z
python-algorithm/leetcode/problem_76.py
isudox/leetcode-solution
60085e64deaf396a171367affc94b18114565c43
[ "MIT" ]
5
2020-03-22T13:53:54.000Z
2020-03-23T08:49:35.000Z
python-algorithm/leetcode/problem_76.py
isudox/nerd-algorithm
c1fbe153953cf3fc24395f75d102016fdf9ea0fa
[ "MIT" ]
1
2019-03-02T15:50:43.000Z
2019-03-02T15:50:43.000Z
"""76. Minimum Window Substring https://leetcode.com/problems/minimum-window-substring/ Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n). Example: Input: S = "ADOBECODEBANC", T = "ABC" Output: "BANC" Note: If there is no such window in S that covers all characters in T, return the empty string "". If there is such window, you are guaranteed that there will always be only one unique minimum window in S. """ class Solution: def min_window(self, s: str, t: str) -> str: def is_valid(d: dict): for v in d.values(): if v > 0: return False return True store = {} for c in t: if c not in store: store[c] = 1 else: store[c] = store[c] + 1 min_head = min_tail = 0 head = tail = -1 min_len = len(s) + 1 not_found = True while head <= tail: if not_found: # if not found, move the cur_tail pointer. if tail == len(s) - 1: break tail += 1 cur_char = s[tail] if cur_char in store: store[cur_char] = store[cur_char] - 1 if is_valid(store): not_found = False cur_len = tail - head if cur_len < min_len: min_head, min_tail, min_len = head, tail, cur_len else: # already found, move the cur_head pointer. head += 1 cur_char = s[head] cur_len = tail - head if cur_char in store: store[cur_char] = store[cur_char] + 1 if not is_valid(store): not_found = True else: if cur_len < min_len: min_head, min_tail, min_len = head, tail, cur_len else: if cur_len < min_len: min_head, min_tail, min_len = head, tail, cur_len return s[min_head + 1:min_tail + 1]
33.552239
77
0.479982
289
2,248
3.584775
0.276817
0.054054
0.03861
0.054054
0.299228
0.260618
0.233591
0.233591
0.233591
0.233591
0
0.012058
0.446619
2,248
66
78
34.060606
0.82074
0.25089
0
0.355556
0
0
0
0
0
0
0
0
0
1
0.044444
false
0
0
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0
74ab68465c97b9a682ec0c65d4118ab45c6835b2
4,138
py
Python
inventory/urls.py
common1/newassetcms
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
[ "MIT" ]
null
null
null
inventory/urls.py
common1/newassetcms
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
[ "MIT" ]
7
2020-06-05T20:43:46.000Z
2022-01-13T01:14:21.000Z
inventory/urls.py
common1/newassetcms
65eee3c2ed9dac4cc56bfff863a6cbaff9830d26
[ "MIT" ]
null
null
null
from django.urls import path from . import views app_name = 'inventory' urlpatterns = [ path('assettypes/', views.AssetTypesIndexView.as_view(), name='index_assettypes'), path('assettypes/create/', views.AssetTypeCreateView.as_view(), name='create_assettype'), path('assettypes/update/<int:pk>', views.AssetTypeUpdateView.as_view(), name='update_assettype'), path('assettypes/read/<int:pk>', views.AssetTypeReadView.as_view(), name='read_assettype'), path('assettypes/delete/<int:pk>', views.AssetTypeDeleteView.as_view(), name='delete_assettype'), path('featured/', views.AssetsFeaturedIndexView.as_view(), name='index_featured_assets'), path('featured/read/<int:pk>', views.AssetFeaturedReadView.as_view(), name='read_featured_asset'), path('assets/', views.AssetsIndexView.as_view(), name='index_assets'), path('assets/create/', views.AssetCreateView.as_view(), name='create_asset'), path('assets/update/<int:pk>', views.AssetUpdateView.as_view(), name='update_asset'), path('assets/read/<int:pk>', views.AssetReadView.as_view(), name='read_asset'), path('assets/delete/<int:pk>', views.AssetDeleteView.as_view(), name='delete_asset'), path('reservations/', views.ReservationsIndexView.as_view(), name='index_reservations'), path('reservations/create/', views.ReservationCreateView.as_view(), name='create_reservation'), path('reservations/update/<int:pk>', views.ReservationUpdateView.as_view(), name='update_reservation'), path('reservations/edit/<int:pk>', views.ReservationEditView.as_view(), name='edit_reservation'), path('reservations/read/<int:pk>', views.ReservationReadView.as_view(), name='read_reservation'), path('reservations/delete/<int:pk>', views.ReservationDeleteView.as_view(), name='delete_reservation'), path('reservations/addbasket/<int:pk>', views.ReservationAddBasket.as_view(), name='add_basket_reservation'), path('reservations/addbasket/', views.add_basket, name='add_basket'), path('reservations/clearreservedassets/', views.clear_reserved_assets, name='clear_reserved_assets'), path('reservations/deletereservedasset/', views.delete_reserved_asset, name='delete_reserved_asset'), path('reservedassets/', views.ReservedAssetsIndexView.as_view(), name='index_reservedassets'), path('reservedassets/create/', views.ReservedAssetCreateView.as_view(), name='create_reservedasset'), path('reservedassets/update/<int:pk>', views.ReservedAssetUpdateView.as_view(), name='update_reservedasset'), path('reservedassets/read/<int:pk>', views.ReservedAssetReadView.as_view(), name='read_reservedasset'), path('reservedassets/delete/<int:pk>', views.ReservedAssetDeleteView.as_view(), name='delete_reservedasset'), path('loanedassets/', views.LoanedAssetsIndexView.as_view(), name='index_loanedassets'), path('loanedassets/create/', views.LoanedAssetCreateView.as_view(), name='create_loanedasset'), path('loanedassets/update/<int:pk>', views.LoanedAssetUpdateView.as_view(), name='update_loanedasset'), path('loanedassets/read/<int:pk>', views.LoanedAssetReadView.as_view(), name='read_loanedasset'), path('loanedassets/delete/<int:pk>', views.LoanedAssetDeleteView.as_view(), name='delete_loanedasset'), path('returnedassets/', views.ReturnedAssetsIndexView.as_view(), name='index_returnedassets'), path('returnedassets/create/', views.ReturnedAssetCreateView.as_view(), name='create_returnedasset'), path('returnedassets/update/<int:pk>', views.ReturnedAssetUpdateView.as_view(), name='update_returnedasset'), path('returnedassets/read/<int:pk>', views.ReturnedAssetReadView.as_view(), name='read_returnedasset'), path('returnedassets/delete/<int:pk>', views.ReturnedAssetDeleteView.as_view(), name='delete_returnedasset'), ]
45.977778
80
0.694538
421
4,138
6.631829
0.175772
0.073066
0.121777
0.037607
0
0
0
0
0
0
0
0
0.149106
4,138
89
81
46.494382
0.792957
0
0
0
0
0
0.361044
0.182939
0
0
0
0
0
1
0
false
0
0.025316
0
0.025316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
1
0