hexsha
string
size
int64
ext
string
lang
string
max_stars_repo_path
string
max_stars_repo_name
string
max_stars_repo_head_hexsha
string
max_stars_repo_licenses
list
max_stars_count
int64
max_stars_repo_stars_event_min_datetime
string
max_stars_repo_stars_event_max_datetime
string
max_issues_repo_path
string
max_issues_repo_name
string
max_issues_repo_head_hexsha
string
max_issues_repo_licenses
list
max_issues_count
int64
max_issues_repo_issues_event_min_datetime
string
max_issues_repo_issues_event_max_datetime
string
max_forks_repo_path
string
max_forks_repo_name
string
max_forks_repo_head_hexsha
string
max_forks_repo_licenses
list
max_forks_count
int64
max_forks_repo_forks_event_min_datetime
string
max_forks_repo_forks_event_max_datetime
string
content
string
avg_line_length
float64
max_line_length
int64
alphanum_fraction
float64
qsc_code_num_words_quality_signal
int64
qsc_code_num_chars_quality_signal
float64
qsc_code_mean_word_length_quality_signal
float64
qsc_code_frac_words_unique_quality_signal
float64
qsc_code_frac_chars_top_2grams_quality_signal
float64
qsc_code_frac_chars_top_3grams_quality_signal
float64
qsc_code_frac_chars_top_4grams_quality_signal
float64
qsc_code_frac_chars_dupe_5grams_quality_signal
float64
qsc_code_frac_chars_dupe_6grams_quality_signal
float64
qsc_code_frac_chars_dupe_7grams_quality_signal
float64
qsc_code_frac_chars_dupe_8grams_quality_signal
float64
qsc_code_frac_chars_dupe_9grams_quality_signal
float64
qsc_code_frac_chars_dupe_10grams_quality_signal
float64
qsc_code_frac_chars_replacement_symbols_quality_signal
float64
qsc_code_frac_chars_digital_quality_signal
float64
qsc_code_frac_chars_whitespace_quality_signal
float64
qsc_code_size_file_byte_quality_signal
float64
qsc_code_num_lines_quality_signal
float64
qsc_code_num_chars_line_max_quality_signal
float64
qsc_code_num_chars_line_mean_quality_signal
float64
qsc_code_frac_chars_alphabet_quality_signal
float64
qsc_code_frac_chars_comments_quality_signal
float64
qsc_code_cate_xml_start_quality_signal
float64
qsc_code_frac_lines_dupe_lines_quality_signal
float64
qsc_code_cate_autogen_quality_signal
float64
qsc_code_frac_lines_long_string_quality_signal
float64
qsc_code_frac_chars_string_length_quality_signal
float64
qsc_code_frac_chars_long_word_length_quality_signal
float64
qsc_code_frac_lines_string_concat_quality_signal
float64
qsc_code_cate_encoded_data_quality_signal
float64
qsc_code_frac_chars_hex_words_quality_signal
float64
qsc_code_frac_lines_prompt_comments_quality_signal
float64
qsc_code_frac_lines_assert_quality_signal
float64
qsc_codepython_cate_ast_quality_signal
float64
qsc_codepython_frac_lines_func_ratio_quality_signal
float64
qsc_codepython_cate_var_zero_quality_signal
bool
qsc_codepython_frac_lines_pass_quality_signal
float64
qsc_codepython_frac_lines_import_quality_signal
float64
qsc_codepython_frac_lines_simplefunc_quality_signal
float64
qsc_codepython_score_lines_no_logic_quality_signal
float64
qsc_codepython_frac_lines_print_quality_signal
float64
qsc_code_num_words
int64
qsc_code_num_chars
int64
qsc_code_mean_word_length
int64
qsc_code_frac_words_unique
null
qsc_code_frac_chars_top_2grams
int64
qsc_code_frac_chars_top_3grams
int64
qsc_code_frac_chars_top_4grams
int64
qsc_code_frac_chars_dupe_5grams
int64
qsc_code_frac_chars_dupe_6grams
int64
qsc_code_frac_chars_dupe_7grams
int64
qsc_code_frac_chars_dupe_8grams
int64
qsc_code_frac_chars_dupe_9grams
int64
qsc_code_frac_chars_dupe_10grams
int64
qsc_code_frac_chars_replacement_symbols
int64
qsc_code_frac_chars_digital
int64
qsc_code_frac_chars_whitespace
int64
qsc_code_size_file_byte
int64
qsc_code_num_lines
int64
qsc_code_num_chars_line_max
int64
qsc_code_num_chars_line_mean
int64
qsc_code_frac_chars_alphabet
int64
qsc_code_frac_chars_comments
int64
qsc_code_cate_xml_start
int64
qsc_code_frac_lines_dupe_lines
int64
qsc_code_cate_autogen
int64
qsc_code_frac_lines_long_string
int64
qsc_code_frac_chars_string_length
int64
qsc_code_frac_chars_long_word_length
int64
qsc_code_frac_lines_string_concat
null
qsc_code_cate_encoded_data
int64
qsc_code_frac_chars_hex_words
int64
qsc_code_frac_lines_prompt_comments
int64
qsc_code_frac_lines_assert
int64
qsc_codepython_cate_ast
int64
qsc_codepython_frac_lines_func_ratio
int64
qsc_codepython_cate_var_zero
int64
qsc_codepython_frac_lines_pass
int64
qsc_codepython_frac_lines_import
int64
qsc_codepython_frac_lines_simplefunc
int64
qsc_codepython_score_lines_no_logic
int64
qsc_codepython_frac_lines_print
int64
effective
string
hits
int64
22915424775bb0c1cd95df8d2deeb30cca4451ba
1,845
py
Python
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
python_test.py
jackKiZhu/mypython
43eac97bec07338ed3b8b9473d4e4fae26f7140c
[ "MIT" ]
null
null
null
from flask import Flask, render_template, request from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) app.config["SQLALCHEMY_DATABASE_URI"] = "mysql://root:mysql@127.0.0.1:3306/python_github" app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = True db = SQLAlchemy(app) class User(db.Model): id = db.Column(db.Integer, primary_key=True) user_name = db.Column(db.String(64), unique=True) user_password = db.Column(db.String(32)) def __repr__(self): return "用户id:%s 用户名:%s" % (self.id, self.user_name) @app.route("/", methods=["post", "get"]) def index(): index_meg = "" if request.method == "POST": user_name = request.form.get("user_name", "") user_pwd = request.form.get("user_pwd", "") if not all([user_name, user_pwd]): index_meg = "请正确输入信息" else: print(request.get_data()) user_name_is_exits = User.query.filter(User.user_name == user_name).first() if user_name_is_exits: index_meg = "用户名已存在" else: user_obj = User(user_name=user_name, user_password=user_pwd) db.session.add(user_obj) db.session.commit() index_meg = "注册成功" print("注册成功") # user_name = request.args.get("user_name", "") # user_pwd = request.args.get("user_pwd", "") # user_is_login = User.query.filter_by(user_name=user_name, user_password=user_pwd).first() # if user_is_login: # index_meg = "登陆成功" # print("登陆成功") # return render_template("login_ok.html", index_meg=index_meg) # else: # # index_meg = "登陆失败" # print("登陆失败") return render_template("index.html", index_meg=index_meg) if __name__ == "__main__": db.drop_all() db.create_all() app.run(debug=True)
32.368421
95
0.614634
245
1,845
4.318367
0.338776
0.113422
0.090737
0.042533
0.173913
0.113422
0.066163
0.066163
0
0
0
0.010036
0.243902
1,845
56
96
32.946429
0.748387
0.190244
0
0.055556
0
0
0.12289
0.067522
0
0
0
0
0
1
0.055556
false
0.055556
0.055556
0.027778
0.277778
0.055556
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
2293c25414f578bb3829ecd6692177ce5d098784
1,218
py
Python
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
6
2019-07-15T13:23:57.000Z
2020-01-22T03:12:01.000Z
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
null
null
null
python/tree/0103_binary_tree_zigzag_level_order_traversal.py
linshaoyong/leetcode
ea052fad68a2fe0cbfa5469398508ec2b776654f
[ "MIT" ]
1
2019-07-24T02:15:31.000Z
2019-07-24T02:15:31.000Z
class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def zigzagLevelOrder(self, root): """ :type root: TreeNode :rtype: List[List[int]] """ if not root: return [] a = [root] b = [] c = [] r = [[root.val]] i = 1 while True: for n in a: if n.left: b.append(n.left) c.append(n.left.val) if n.right: b.append(n.right) c.append(n.right.val) if not b: break else: a = b if i & 1 == 1: c.reverse() r.append(c) b = [] c = [] i += 1 return r def test_zigzag_level_order(): a = TreeNode(3) b = TreeNode(9) c = TreeNode(20) d = TreeNode(15) e = TreeNode(7) a.left = b a.right = c c.left = d c.right = e assert Solution().zigzagLevelOrder(a) == [ [3], [20, 9], [15, 7] ]
21
46
0.374384
136
1,218
3.301471
0.352941
0.062361
0.035635
0
0
0
0
0
0
0
0
0.03005
0.50821
1,218
57
47
21.368421
0.719533
0.036125
0
0.083333
0
0
0
0
0
0
0
0
0.020833
1
0.0625
false
0
0
0
0.145833
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
22a26cac9546e3d04238eea2e14e595751d5270c
11,429
py
Python
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
geo_regions.py
saeed-moghimi-noaa/Maxelev_plot
5bb701d8cb7d64db4c89ea9d7993a8269e57e504
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Geo regions for map plot """ __author__ = "Saeed Moghimi" __copyright__ = "Copyright 2017, UCAR/NOAA" __license__ = "GPL" __version__ = "1.0" __email__ = "moghimis@gmail.com" import matplotlib.pyplot as plt from collections import defaultdict defs = defaultdict(dict) defs['elev']['var'] = 'elev' defs['elev']['vmin'] = -1 defs['elev']['vmax'] = 1 defs['elev']['label'] = 'Elev. [m]' defs['elev']['format']= '%3.1g' defs['elev']['cmap'] = plt.cm.jet_r def get_region_extent(region = 'hsofs_region'): if region == 'hsofs_region': defs['lim']['xmin'] = -99.0 defs['lim']['xmax'] = -52.8 defs['lim']['ymin'] = 5.0 defs['lim']['ymax'] = 46.3 ##IKE elif region == 'caribbean': defs['lim']['xmin'] = -78. defs['lim']['xmax'] = -74. defs['lim']['ymin'] = 20. defs['lim']['ymax'] = 24. defs['lim']['xmin'] = -82. defs['lim']['xmax'] = -71. defs['lim']['ymin'] = 18. defs['lim']['ymax'] = 26. elif region == 'ike_region': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -84.5 defs['lim']['ymin'] = 24. defs['lim']['ymax'] = 31.5 elif region == 'caribbean_bigger': defs['lim']['xmin'] = -78.0 defs['lim']['xmax'] = -58 defs['lim']['ymin'] = 10.0 defs['lim']['ymax'] = 28. elif region == 'ike_local': defs['lim']['xmin'] = -96 defs['lim']['xmax'] = -92 defs['lim']['ymin'] = 28.5 defs['lim']['ymax'] = 30.6 elif region == 'ike_wave': defs['lim']['xmin'] = -95.63 defs['lim']['xmax'] = -88.0 defs['lim']['ymin'] = 28.37 defs['lim']['ymax'] = 30.50 elif region == 'ike_hwm': defs['lim']['xmin'] = -96.15 defs['lim']['xmax'] = -88.5 defs['lim']['ymin'] = 28.45 defs['lim']['ymax'] = 30.7 elif region == 'ike_galv_bay': defs['lim']['xmin'] = -95.92 defs['lim']['xmax'] = -94.81 defs['lim']['ymin'] = 29.37 defs['lim']['ymax'] = 29.96 elif region == 'ike_galv_nwm': defs['lim']['xmin'] = -95.4 defs['lim']['xmax'] = -94.2 defs['lim']['ymin'] = 28.66 defs['lim']['ymax'] = 30.4 elif region == 'ike_wav_break': defs['lim']['xmin'] = -95 defs['lim']['xmax'] = -94.5 defs['lim']['ymin'] = 28.7 + 0.6 defs['lim']['ymax'] = 30.4 - 0.6 elif region == 'ike_f63_timeseries': defs['lim']['xmin'] = -94.2579 - 0.1 defs['lim']['xmax'] = -94.2579 + 0.1 defs['lim']['ymin'] = 29.88642 - 0.1 defs['lim']['ymax'] = 29.88642 + 0.1 elif region == 'ike_f63_timeseries_det': defs['lim']['xmin'] = -94.2300 defs['lim']['xmax'] = -94.1866 defs['lim']['ymin'] = 29.82030 defs['lim']['ymax'] = 29.84397+0.05 elif region == 'ike_cpl_paper': defs['lim']['xmin'] = -95.127481 defs['lim']['xmax'] = -93.233053 defs['lim']['ymin'] = 29.198490 defs['lim']['ymax'] = 30.132224 ##IRMA elif region == 'carib_irma': defs['lim']['xmin'] = -84.0 defs['lim']['xmax'] = -60. defs['lim']['ymin'] = 15.0 defs['lim']['ymax'] = 29. elif region == 'burbuda': defs['lim']['xmin'] = -65.0 defs['lim']['xmax'] = -60. defs['lim']['ymin'] = 15.0 defs['lim']['ymax'] = 19. elif region == 'burbuda_zoom': defs['lim']['xmin'] = -63.8 defs['lim']['xmax'] = -60.8 defs['lim']['ymin'] = 16.8 defs['lim']['ymax'] = 18.65 elif region == 'puertorico': defs['lim']['xmin'] = -67.35 defs['lim']['xmax'] = -66.531 defs['lim']['ymin'] = 18.321 defs['lim']['ymax'] = 18.674 elif region == 'puertorico_shore': defs['lim']['xmin'] = -67.284 defs['lim']['xmax'] = -66.350 defs['lim']['ymin'] = 18.360 defs['lim']['ymax'] = 18.890 elif region == 'key_west': defs['lim']['xmin'] = -82.7 defs['lim']['xmax'] = -74.5 defs['lim']['ymin'] = 21.3 defs['lim']['ymax'] = 27.2 elif region == 'key_west_zoom': defs['lim']['xmin'] = -82.2 defs['lim']['xmax'] = -79.4 defs['lim']['ymin'] = 24.1 defs['lim']['ymax'] = 26.1 elif region == 'cuba_zoom': defs['lim']['xmin'] = -82. defs['lim']['xmax'] = -77. defs['lim']['ymin'] = 21.5 defs['lim']['ymax'] = 23.5 elif region == 'key_west_timeseries': defs['lim']['xmin'] = -84.62 defs['lim']['xmax'] = -79.2 defs['lim']['ymin'] = 23.6 defs['lim']['ymax'] = 30.0 elif region == 'pr_timeseries': defs['lim']['xmin'] = -68 defs['lim']['xmax'] = -64 defs['lim']['ymin'] = 17.3 defs['lim']['ymax'] = 19.2 elif region == 'key_west_anim': defs['lim']['xmin'] = -85.5 defs['lim']['xmax'] = -74.5 defs['lim']['ymin'] = 21.0 defs['lim']['ymax'] = 31.5 ## ISABEL elif region == 'isa_region': defs['lim']['xmin'] = -80.2 defs['lim']['xmax'] = -71.6 defs['lim']['ymin'] = 31.9 defs['lim']['ymax'] = 41.9 elif region == 'isa_local': defs['lim']['xmin'] = -77.5 defs['lim']['xmax'] = -74 defs['lim']['ymin'] = 34.5 defs['lim']['ymax'] = 40.0 defs['lim']['xmin'] = -78.5 defs['lim']['xmax'] = -74 defs['lim']['ymin'] = 33.5 defs['lim']['ymax'] = 39.5 elif region == 'isa_hwm': defs['lim']['xmin'] = -76.01 defs['lim']['xmax'] = -75.93 defs['lim']['ymin'] = 36.74 defs['lim']['ymax'] = 36.93 elif region == 'isa_landfall': defs['lim']['xmin'] = -77.8 defs['lim']['xmax'] = -75.2 defs['lim']['ymin'] = 34.2 defs['lim']['ymax'] = 37.5 elif region == 'isa_landfall_zoom': defs['lim']['xmin'] = -77.8 defs['lim']['xmax'] = -75.2 defs['lim']['ymin'] = 34.2 defs['lim']['ymax'] = 36.0 ## SANDY elif region == 'san_track': defs['lim']['xmin'] = -82.0 defs['lim']['xmax'] = -67.0 defs['lim']['ymin'] = 23.0 defs['lim']['ymax'] = 43.6 elif region == 'san_area': defs['lim']['xmin'] = -77.0 defs['lim']['xmax'] = -70.0 defs['lim']['ymin'] = 37.0 defs['lim']['ymax'] = 42.0 elif region == 'san_track': defs['lim']['xmin'] = -82.0 defs['lim']['xmax'] = -67.0 defs['lim']['ymin'] = 23.0 defs['lim']['ymax'] = 43.6 elif region == 'san_area': defs['lim']['xmin'] = -77.0 defs['lim']['xmax'] = -70.0 defs['lim']['ymin'] = 37.0 defs['lim']['ymax'] = 42.0 elif region == 'san_area2': defs['lim']['xmin'] = -75.9 defs['lim']['xmax'] = -73.3 defs['lim']['ymin'] = 38.5 defs['lim']['ymax'] = 41.3 elif region == 'san_newyork': defs['lim']['xmin'] = -74.5 defs['lim']['xmax'] = -73.55 defs['lim']['ymin'] = 40.35 defs['lim']['ymax'] = 41.2 elif region == 'san_delaware': defs['lim']['xmin'] = -75.87 defs['lim']['xmax'] = -74.31 defs['lim']['ymin'] = 38.26 defs['lim']['ymax'] = 40.51 elif region == 'san_jamaica_bay': defs['lim']['xmin'] = -73.963520 defs['lim']['xmax'] = -73.731455 defs['lim']['ymin'] = 40.518074 defs['lim']['ymax'] = 40.699618 elif region == 'irn_region': defs['lim']['xmin'] = -78.41 defs['lim']['xmax'] = -73.48 defs['lim']['ymin'] = 33.55 defs['lim']['ymax'] = 41.31 elif region == 'irn_hwm': defs['lim']['xmin'] = -78.64 defs['lim']['xmax'] = -69.54 defs['lim']['ymin'] = 33.80 defs['lim']['ymax'] = 41.82 ## ANDREW elif region == 'and_region': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -77.5 defs['lim']['ymin'] = 23. defs['lim']['ymax'] = 32. elif region == 'and_fl_lu': defs['lim']['xmin'] = -98.5 defs['lim']['xmax'] = -76.5 defs['lim']['ymin'] = 21. defs['lim']['ymax'] = 32. elif region == 'and_local_lu': defs['lim']['xmin'] = -95 defs['lim']['xmax'] = -86 defs['lim']['ymin'] = 28. defs['lim']['ymax'] = 32 elif region == 'and_local_fl': defs['lim']['xmin'] = -86 defs['lim']['xmax'] = -79.5 defs['lim']['ymin'] = 24. defs['lim']['ymax'] = 34 elif region == 'and_local_lu_landfall': defs['lim']['xmin'] = -92.4 defs['lim']['xmax'] = -87.5 defs['lim']['ymin'] = 28. defs['lim']['ymax'] = 31. elif region == 'and_local_fl_landfall': defs['lim']['xmin'] = -80.0 defs['lim']['xmax'] = -80.5 defs['lim']['ymin'] = 25.34 defs['lim']['ymax'] = 25.8 ## operational upgrade # NYC area: -74.027725,40.596099 elif region == 'NYC_area': defs['lim']['xmin'] = -74.027725 - 0.25 defs['lim']['xmax'] = -74.027725 + 0.25 defs['lim']['ymin'] = 40.596099 - 0.2 defs['lim']['ymax'] = 40.596099 + 0.2 # Tampa area: -82.455511,27.921438 elif region == 'Tampa_area': defs['lim']['xmin'] = -82.455511 - 0.25 defs['lim']['xmax'] = -82.455511 + 0.25 defs['lim']['ymin'] = 27.921438 - 0.2 defs['lim']['ymax'] = 27.921438 + 0.2 # Marshall Islands: 169.107299,7.906637 elif region == 'Marshall': defs['lim']['xmin'] = 169.107299 - 0.25 defs['lim']['xmax'] = 169.107299 + 0.25 defs['lim']['ymin'] = 7.906637 - 0.2 defs['lim']['ymax'] = 7.906637 + 0.2 # Palau: 134.461436,7.436438 elif region == 'Palau': defs['lim']['xmin'] = 134.461436 - 0.25 defs['lim']['xmax'] = 134.461436 + 0.25 defs['lim']['ymin'] = 7.436438 - 0.2 defs['lim']['ymax'] = 7.436438 + 0.2 elif region == 'NYC_Area_m': defs['lim']['xmin'] = -73.55 defs['lim']['xmax'] = -74.26 defs['lim']['ymin'] = 40.55 defs['lim']['ymax'] = 40.91 elif region == 'Tampa_Area_m': defs['lim']['xmin'] = -82.37 defs['lim']['xmax'] = -82.75 defs['lim']['ymin'] = 27.63 defs['lim']['ymax'] = 28.05 elif region == 'Marshall_Islands_m': defs['lim']['xmin'] = 164.92 defs['lim']['xmax'] = 173.45 defs['lim']['ymin'] = 5.10 defs['lim']['ymax'] = 11.90 elif region == 'Palau_m': defs['lim']['xmin'] = 134.01 defs['lim']['xmax'] = 134.78 defs['lim']['ymin'] = 6.78 defs['lim']['ymax'] = 8.52 elif region == 'Port_Arthur_m': defs['lim']['xmin'] = -93.60 defs['lim']['xmax'] = -94.24 defs['lim']['ymin'] = 29.62 defs['lim']['ymax'] = 30.14 return defs['lim']
34.116418
52
0.441683
1,473
11,429
3.361847
0.157502
0.318053
0.124394
0.024233
0.30937
0.240913
0.197294
0.160339
0.122577
0.099152
0
0.125334
0.313063
11,429
334
53
34.218563
0.505413
0.021262
0
0.166667
0
0
0.209031
0.005734
0
0
0
0
0
1
0.003401
false
0
0.006803
0
0.013605
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
22ae53d11248d624a0ee5f564b8dd2e374ddaa54
606
py
Python
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
1
2020-10-15T14:44:08.000Z
2020-10-15T14:44:08.000Z
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
null
null
null
Day 2/Day_2_Python.py
giTan7/30-Days-Of-Code
f023a2bf1b5e58e1eb5180162443b9cd4b6b2ff8
[ "MIT" ]
null
null
null
#!/bin/python3 import math import os import random import re import sys # Complete the solve function below. def solve(meal_cost, tip_percent, tax_percent): tip = (meal_cost * tip_percent)/100 tax = (meal_cost * tax_percent)/100 print(int(meal_cost + tip + tax + 0.5)) # We add 0.5 because the float should be rounded to the nearest integer if __name__ == '__main__': meal_cost = float(input()) tip_percent = int(input()) tax_percent = int(input()) solve(meal_cost, tip_percent, tax_percent) # Time complexity: O(1) # Space complexity: O(1)
22.444444
76
0.663366
89
606
4.269663
0.483146
0.126316
0.115789
0.142105
0.173684
0.173684
0.173684
0
0
0
0
0.027957
0.232673
606
26
77
23.307692
0.789247
0.268977
0
0
0
0
0.019417
0
0
0
0
0
0
1
0.071429
false
0
0.357143
0
0.428571
0.071429
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e
1,133
py
Python
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
harshp8l/deep-learning-lang-detection
2a54293181c1c2b1a2b840ddee4d4d80177efb33
[ "MIT" ]
84
2017-10-25T15:49:21.000Z
2021-11-28T21:25:54.000Z
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
5
2018-03-29T11:50:46.000Z
2021-04-26T13:33:18.000Z
data/train/python/22aec8fbe47f7975a1e7f4a0caa5c88c56e4a03e__init__.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
24
2017-11-22T08:31:00.000Z
2022-03-27T01:22:31.000Z
def save_form(form, actor=None): """Allows storing a form with a passed actor. Normally, Form.save() does not accept an actor, but if you require this to be passed (is not handled by middleware), you can use this to replace form.save(). Requires you to use the audit.Model model as the actor is passed to the object's save method. """ obj = form.save(commit=False) obj.save(actor=actor) form.save_m2m() return obj #def intermediate_save(instance, actor=None): # """Allows saving of an instance, without storing the changes, but keeping the history. This allows you to perform # intermediate saves: # # obj.value1 = 1 # intermediate_save(obj) # obj.value2 = 2 # obj.save() # <value 1 and value 2 are both stored in the database> # """ # if hasattr(instance, '_audit_changes'): # tmp = instance._audit_changes # if actor: # instance.save(actor=actor) # else: # instance.save() # instance._audit_changes = tmp # else: # if actor: # instance.save(actor=actor) # else: # instance.save()
32.371429
118
0.634598
156
1,133
4.544872
0.423077
0.045134
0.059238
0.06488
0.126939
0.126939
0.126939
0.126939
0.126939
0
0
0.008383
0.263019
1,133
34
119
33.323529
0.840719
0.826125
0
0
0
0
0
0
0
0
0
0
0
1
0.2
false
0
0
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
22b2735d6e9bb2b53a0a0541af9ec0a4bc2db7e4
738
py
Python
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
pair.py
hhgarnes/python-validity
82b42e4fd152f10f75584de56502fd9ada299bb5
[ "MIT" ]
null
null
null
from time import sleep from proto9x.usb import usb from proto9x.tls import tls from proto9x.flash import read_flash from proto9x.init_flash import init_flash from proto9x.upload_fwext import upload_fwext from proto9x.calibrate import calibrate from proto9x.init_db import init_db #usb.trace_enabled=True #tls.trace_enabled=True def restart(): print('Sleeping...') sleep(3) tls.reset() usb.open() usb.send_init() tls.parseTlsFlash(read_flash(1, 0, 0x1000)) tls.open() usb.open() print('Initializing flash...') init_flash() restart() print('Uploading firmware...') upload_fwext() restart() print('Calibrating...') calibrate() print('Init database...') init_db() print('That\'s it, pairing\'s finished')
18.45
47
0.734417
105
738
5.028571
0.371429
0.145833
0.060606
0
0
0
0
0
0
0
0
0.023511
0.135501
738
39
48
18.923077
0.804075
0.059621
0
0.142857
0
0
0.141823
0
0
0
0.008683
0
0
1
0.035714
true
0
0.285714
0
0.321429
0.214286
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
22b976d4af390f9c20bc3dedbfcb6376fdbf0308
5,277
py
Python
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
hw2/deeplearning/style_transfer.py
axelbr/berkeley-cs182-deep-neural-networks
2bde27d9d5361d48dce7539d00b136209c1cfaa1
[ "MIT" ]
null
null
null
import numpy as np import torch import torch.nn.functional as F def content_loss(content_weight, content_current, content_target): """ Compute the content loss for style transfer. Inputs: - content_weight: Scalar giving the weighting for the content loss. - content_current: features of the current image; this is a PyTorch Tensor of shape (1, C_l, H_l, W_l). - content_target: features of the content image, Tensor with shape (1, C_l, H_l, W_l). Returns: - scalar content loss """ ############################################################################## # YOUR CODE HERE # ############################################################################## _, C, H, W = content_current.shape current_features = content_current.view(C, H*W) target_features = content_target.view(C, H*W) loss = content_weight * torch.sum(torch.square(current_features - target_features)) return loss ############################################################################## # END OF YOUR CODE # ############################################################################## def gram_matrix(features, normalize=True): """ Compute the Gram matrix from features. Inputs: - features: PyTorch Variable of shape (N, C, H, W) giving features for a batch of N images. - normalize: optional, whether to normalize the Gram matrix If True, divide the Gram matrix by the number of neurons (H * W * C) Returns: - gram: PyTorch Variable of shape (N, C, C) giving the (optionally normalized) Gram matrices for the N input images. """ ############################################################################## # YOUR CODE HERE # ############################################################################## C, H, W = features.shape[-3], features.shape[-2], features.shape[-1] reshaped = features.view(-1, C, H*W) G = reshaped @ reshaped.transpose(dim0=1, dim1=2) if normalize: G = G / (H*W*C) return G ############################################################################## # END OF YOUR CODE # ############################################################################## def style_loss(feats, style_layers, style_targets, style_weights): """ Computes the style loss at a set of layers. Inputs: - feats: list of the features at every layer of the current image, as produced by the extract_features function. - style_layers: List of layer indices into feats giving the layers to include in the style loss. - style_targets: List of the same length as style_layers, where style_targets[i] is a PyTorch Variable giving the Gram matrix the source style image computed at layer style_layers[i]. - style_weights: List of the same length as style_layers, where style_weights[i] is a scalar giving the weight for the style loss at layer style_layers[i]. Returns: - style_loss: A PyTorch Variable holding a scalar giving the style loss. """ # Hint: you can do this with one for loop over the style layers, and should # not be very much code (~5 lines). You will need to use your gram_matrix function. ############################################################################## # YOUR CODE HERE # ############################################################################## loss = 0 for i, l in enumerate(style_layers): A, G = style_targets[i], gram_matrix(feats[l]) loss += style_weights[i] * torch.sum(torch.square(G - A)) return loss ############################################################################## # END OF YOUR CODE # ############################################################################## def tv_loss(img, tv_weight): """ Compute total variation loss. Inputs: - img: PyTorch Variable of shape (1, 3, H, W) holding an input image. - tv_weight: Scalar giving the weight w_t to use for the TV loss. Returns: - loss: PyTorch Variable holding a scalar giving the total variation loss for img weighted by tv_weight. """ # Your implementation should be vectorized and not require any loops! ############################################################################## # YOUR CODE HERE # ############################################################################## tv = torch.square(img[..., 1:, :-1] - img[..., :-1, :-1]) + torch.square(img[..., :-1, 1:] - img[..., :-1, :-1]) return tv_weight * torch.sum(tv) ############################################################################## # END OF YOUR CODE # ##############################################################################
45.886957
116
0.437938
532
5,277
4.25188
0.246241
0.007958
0.007958
0.022989
0.181256
0.157383
0.1229
0.066313
0.037135
0.037135
0
0.005672
0.264923
5,277
114
117
46.289474
0.577468
0.481713
0
0.08
0
0
0
0
0
0
0
0.035088
0
1
0.16
false
0
0.12
0
0.44
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
22c745e9fe90945bd78c2b0b4951b89a65ce5057
3,482
py
Python
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
1
2018-09-28T00:47:52.000Z
2018-09-28T00:47:52.000Z
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
null
null
null
py_hanabi/card.py
krinj/hanabi-simulator
b77b04aa09bab8bd8d7b784e04bf8b9d5d76d1a6
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- """ A card (duh). """ import random import uuid from enum import Enum from typing import List from py_hanabi.settings import CARD_DECK_DISTRIBUTION __author__ = "Jakrin Juangbhanich" __email__ = "juangbhanich.k@gmail.com" class Color(Enum): RED = 1 BLUE = 2 GREEN = 3 YELLOW = 4 WHITE = 5 class Card: def __init__(self, number: int, color: Color): self._number: int = number self._color: Color = color self._id: str = uuid.uuid4().hex self._hint_number_counter: int = 0 self._hint_color_counter: int = 0 # self._index_hinted: List[int] = [] # self._lone_hinted: List[bool] = [] # According to hints, these are the ones we know it is NOT. self.not_color: List[Color] = [] self.not_number: List[int] = [] def __repr__(self): hint_str = "" if self.hint_received_color: hint_str += "C" if self.hint_received_number: hint_str += "N" return f"[{self.color} {self.number} {hint_str}]" def __eq__(self, other: 'Card'): return self.color == other.color and self.number == other.number def receive_hint_number(self, number: int): if number == self.number: self._hint_number_counter += 1 else: self.not_number.append(number) def receive_hint_color(self, color: Color): if color == self.color: self._hint_color_counter += 1 else: self.not_color.append(color) def remove_hint_number(self, number: int): if number == self.number: self._hint_number_counter -= 1 else: self.not_number.pop() def remove_hint_color(self, color: Color): if color == self.color: self._hint_color_counter -= 1 else: self.not_color.pop() @property def label(self): return f"{self.number} of {self.get_color_label(self.color)}" @property def id(self) -> str: return self._id @property def key(self) -> tuple: return self.get_key(self.color, self.number) @staticmethod def get_key(c: Color, n: int) -> tuple: return c, n @property def number(self) -> int: return self._number @property def color(self) -> Color: return self._color @property def observed_color(self) -> Color: return None if not self.hint_received_color else self._color @property def observed_number(self) -> int: return None if not self.hint_received_number else self._number @property def hint_received_number(self) -> bool: return self._hint_number_counter > 0 @property def hint_received_color(self) -> bool: return self._hint_color_counter > 0 @staticmethod def generate_deck() -> List['Card']: """ Generate the starting deck for the game. """ deck: List[Card] = [] for color in Color: for i in CARD_DECK_DISTRIBUTION: card = Card(i, color) deck.append(card) random.shuffle(deck) return deck @staticmethod def get_color_label(color: Color) -> str: color_labels = { Color.BLUE: "Blue", Color.RED: "Red", Color.YELLOW: "Yellow", Color.GREEN: "Green", Color.WHITE: "White", } return color_labels[color]
24.871429
72
0.587881
435
3,482
4.471264
0.222989
0.064781
0.043188
0.043188
0.243702
0.192288
0.192288
0.160411
0.160411
0.160411
0
0.006203
0.305572
3,482
139
73
25.05036
0.79818
0.059161
0
0.2
0
0
0.050952
0.017802
0
0
0
0
0
1
0.19
false
0
0.05
0.11
0.45
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
22c78686c8b8a763f3206d86fcbc87e20d6ea1aa
1,186
py
Python
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
setup.py
d2gex/distpickymodel
7acd4ffafbe592d6336d91d6e7411cd45357e41c
[ "MIT" ]
null
null
null
import setuptools import distpickymodel def get_long_desc(): with open("README.rst", "r") as fh: return fh.read() setuptools.setup( name="distpickymodel", version=distpickymodel.__version__, author="Dan G", author_email="daniel.garcia@d2garcia.com", description="A shared Mongoengine-based model library", long_description=get_long_desc(), url="https://github.com/d2gex/distpickymodel", # Exclude 'tests' and 'docs' packages=['distpickymodel'], python_requires='>=3.6', install_requires=['pymongo>=3.7.2', 'mongoengine>=0.17.0', 'six'], tests_require=['pytest>=4.4.0', 'PyYAML>=5.1'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development :: Libraries :: Python Modules', ] )
32.944444
71
0.636594
126
1,186
5.888889
0.690476
0.076819
0.101078
0.070081
0
0
0
0
0
0
0
0.02246
0.211636
1,186
35
72
33.885714
0.771123
0.021922
0
0
0
0
0.522453
0.022453
0
0
0
0
0
1
0.033333
true
0
0.066667
0
0.133333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
22cc300c5aa21f713c2ef3f3b60722cc7d238f97
1,163
py
Python
rdl/data_sources/DataSourceFactory.py
pageuppeople-opensource/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
2
2019-03-11T12:45:23.000Z
2019-04-05T05:22:43.000Z
rdl/data_sources/DataSourceFactory.py
pageuppeople-opensource/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
5
2019-02-08T03:23:25.000Z
2019-04-11T01:29:45.000Z
rdl/data_sources/DataSourceFactory.py
PageUpPeopleOrg/relational-data-loader
0bac7036d65636d06eacca4e68e09d6e1c506ea4
[ "MIT" ]
1
2019-03-04T04:08:49.000Z
2019-03-04T04:08:49.000Z
import logging from rdl.data_sources.MsSqlDataSource import MsSqlDataSource from rdl.data_sources.AWSLambdaDataSource import AWSLambdaDataSource class DataSourceFactory(object): def __init__(self, logger=None): self.logger = logger or logging.getLogger(__name__) self.sources = [MsSqlDataSource, AWSLambdaDataSource] def create_source(self, connection_string): for source in self.sources: if source.can_handle_connection_string(connection_string): self.logger.info( f"Found handler '{source}' for given connection string." ) return source(connection_string) raise RuntimeError( "There are no data sources that can handle this connection string" ) def is_prefix_supported(self, connection_string): for source in self.sources: if source.can_handle_connection_string(connection_string): return True return False def get_supported_source_prefixes(self): return list( map(lambda source: source.get_connection_string_prefix(), self.sources) )
35.242424
83
0.674979
124
1,163
6.08871
0.41129
0.211921
0.029139
0.047682
0.24106
0.24106
0.24106
0.24106
0.24106
0.24106
0
0
0.263113
1,163
32
84
36.34375
0.88098
0
0
0.153846
0
0
0.100602
0
0
0
0
0
0
1
0.153846
false
0
0.115385
0.038462
0.461538
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
22d31dc3511cd477901e03ecc8f042e8c0f688bf
1,119
py
Python
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
null
null
null
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
null
null
null
imageclassification/src/sample/splitters/_StratifiedSplitter.py
waikato-datamining/keras-imaging
f044f883242895c18cfdb31a827bc32bdb0405ed
[ "MIT" ]
1
2020-04-16T15:29:28.000Z
2020-04-16T15:29:28.000Z
from collections import OrderedDict from random import Random from typing import Set from .._types import Dataset, Split, LabelIndices from .._util import per_label from ._RandomSplitter import RandomSplitter from ._Splitter import Splitter class StratifiedSplitter(Splitter): """ TODO """ def __init__(self, percentage: float, labels: LabelIndices, random: Random = Random()): self._percentage = percentage self._labels = labels self._random = random def __str__(self) -> str: return f"strat-{self._percentage}" def __call__(self, dataset: Dataset) -> Split: subsets_per_label = per_label(dataset) sub_splits = { label: RandomSplitter(int(len(subsets_per_label[label]) * self._percentage), self._random)(subsets_per_label[label]) for label in self._labels.keys() } result = OrderedDict(), OrderedDict() for filename, label in dataset.items(): result_index = 0 if filename in sub_splits[label][0] else 1 result[result_index][filename] = label return result
28.692308
128
0.671135
127
1,119
5.637795
0.362205
0.055866
0.062849
0.055866
0
0
0
0
0
0
0
0.003521
0.238606
1,119
38
129
29.447368
0.836854
0.003575
0
0
0
0
0.021838
0.021838
0
0
0
0.026316
0
1
0.12
false
0
0.28
0.04
0.52
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
22df608412513a1bf5e311a4eae60aa3f6a7a737
1,609
py
Python
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
python/test/test_dynamic_bitset.py
hagabb/katana
a52a688b90315a79aa95cf8d279fd7f949a3b94b
[ "BSD-3-Clause" ]
null
null
null
import pytest from katana.dynamic_bitset import DynamicBitset __all__ = [] SIZE = 50 @pytest.fixture def dbs(): return DynamicBitset(SIZE) def test_set(dbs): dbs[10] = 1 assert dbs[10] def test_set_invalid_type(dbs): try: dbs[2.3] = 0 assert False except TypeError: pass def test_set_invalid_index_low(dbs): try: dbs[-1] = 1 assert False except IndexError: pass def test_set_invalid_index_high(dbs): try: dbs[SIZE] = 1 assert False except IndexError: pass def test_reset(dbs): dbs[10] = 1 dbs.reset() assert not dbs[10] assert len(dbs) == SIZE def test_reset_index(dbs): dbs[10] = 1 dbs[10] = 0 assert not dbs[10] def test_reset_begin_end(dbs): dbs[10] = 1 dbs[15] = 1 dbs[12:17] = 0 assert dbs[10] assert not dbs[15] def test_reset_begin_end_invalid_step(dbs): try: dbs[12:17:22] = 0 assert False except ValueError: pass def test_reset_none_end(dbs): dbs[10] = 1 dbs[15] = 1 dbs[:12] = 0 assert not dbs[10] assert dbs[15] def test_resize(dbs): dbs.resize(20) assert len(dbs) == 20 dbs[8] = 1 dbs.resize(20) assert len(dbs) == 20 assert dbs[8] dbs.resize(70) assert len(dbs) == 70 assert dbs[8] assert dbs.count() == 1 def test_clear(dbs): dbs[10] = 1 dbs.clear() assert len(dbs) == 0 dbs.resize(20) assert len(dbs) == 20 assert not dbs[10] def test_count(dbs): dbs[10] = 1 assert dbs.count() == 1
14.898148
47
0.580485
245
1,609
3.673469
0.208163
0.077778
0.062222
0.07
0.48
0.36
0.234444
0.206667
0.051111
0.051111
0
0.082216
0.304537
1,609
107
48
15.037383
0.722073
0
0
0.513158
0
0
0
0
0
0
0
0
0.289474
1
0.171053
false
0.052632
0.026316
0.013158
0.210526
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
22e6c10685bc8e3a610b18ebd720a7487a124de6
9,576
py
Python
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
1
2019-09-19T18:24:55.000Z
2019-09-19T18:24:55.000Z
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
null
null
null
object_detection/exporter_test.py
travisyates81/object-detection
931bebfa54798c08d2c401e9c1bad39015d8c832
[ "MIT" ]
null
null
null
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Travis Yates """Tests for object_detection.export_inference_graph.""" import os import mock import numpy as np import tensorflow as tf from object_detection import exporter from object_detection.builders import model_builder from object_detection.core import model from object_detection.protos import pipeline_pb2 class FakeModel(model.DetectionModel): def preprocess(self, inputs): return (tf.identity(inputs) * tf.get_variable('dummy', shape=(), initializer=tf.constant_initializer(2), dtype=tf.float32)) def predict(self, preprocessed_inputs): return {'image': tf.identity(preprocessed_inputs)} def postprocess(self, prediction_dict): with tf.control_dependencies(prediction_dict.values()): return { 'detection_boxes': tf.constant([[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]], tf.float32), 'detection_scores': tf.constant([[0.7, 0.6]], tf.float32), 'detection_classes': tf.constant([[0, 1]], tf.float32), 'num_detections': tf.constant([2], tf.float32) } def restore_fn(self, checkpoint_path, from_detection_checkpoint): pass def loss(self, prediction_dict): pass class ExportInferenceGraphTest(tf.test.TestCase): def _save_checkpoint_from_mock_model(self, checkpoint_path, use_moving_averages): g = tf.Graph() with g.as_default(): mock_model = FakeModel(num_classes=1) mock_model.preprocess(tf.constant([1, 3, 4, 3], tf.float32)) if use_moving_averages: tf.train.ExponentialMovingAverage(0.0).apply() saver = tf.train.Saver() init = tf.global_variables_initializer() with self.test_session() as sess: sess.run(init) saver.save(sess, checkpoint_path) def _load_inference_graph(self, inference_graph_path): od_graph = tf.Graph() with od_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(inference_graph_path) as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') return od_graph def _create_tf_example(self, image_array): with self.test_session(): encoded_image = tf.image.encode_jpeg(tf.constant(image_array)).eval() def _bytes_feature(value): return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) example = tf.train.Example(features=tf.train.Features(feature={ 'image/encoded': _bytes_feature(encoded_image), 'image/format': _bytes_feature('jpg'), 'image/source_id': _bytes_feature('image_id') })).SerializeToString() return example def test_export_graph_with_image_tensor_input(self): with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pbtxt') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=None, inference_graph_path=inference_graph_path) def test_export_graph_with_tf_example_input(self): with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pbtxt') pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, checkpoint_path=None, inference_graph_path=inference_graph_path) def test_export_frozen_graph(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) def test_export_frozen_graph_with_moving_averages(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=True) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = True exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) def test_export_and_run_inference_with_image_tensor(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='image_tensor', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: image_tensor = inference_graph.get_tensor_by_name('image_tensor:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={image_tensor: np.ones((1, 4, 4, 3)).astype(np.uint8)}) self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]) self.assertAllClose(scores, [[0.7, 0.6]]) self.assertAllClose(classes, [[1, 2]]) self.assertAllClose(num_detections, [2]) def test_export_and_run_inference_with_tf_example(self): checkpoint_path = os.path.join(self.get_temp_dir(), 'model-ckpt') self._save_checkpoint_from_mock_model(checkpoint_path, use_moving_averages=False) inference_graph_path = os.path.join(self.get_temp_dir(), 'exported_graph.pb') with mock.patch.object( model_builder, 'build', autospec=True) as mock_builder: mock_builder.return_value = FakeModel(num_classes=1) pipeline_config = pipeline_pb2.TrainEvalPipelineConfig() pipeline_config.eval_config.use_moving_averages = False exporter.export_inference_graph( input_type='tf_example', pipeline_config=pipeline_config, checkpoint_path=checkpoint_path, inference_graph_path=inference_graph_path) inference_graph = self._load_inference_graph(inference_graph_path) with self.test_session(graph=inference_graph) as sess: tf_example = inference_graph.get_tensor_by_name('tf_example:0') boxes = inference_graph.get_tensor_by_name('detection_boxes:0') scores = inference_graph.get_tensor_by_name('detection_scores:0') classes = inference_graph.get_tensor_by_name('detection_classes:0') num_detections = inference_graph.get_tensor_by_name('num_detections:0') (boxes, scores, classes, num_detections) = sess.run( [boxes, scores, classes, num_detections], feed_dict={tf_example: self._create_tf_example( np.ones((4, 4, 3)).astype(np.uint8))}) self.assertAllClose(boxes, [[0.0, 0.0, 0.5, 0.5], [0.5, 0.5, 0.8, 0.8]]) self.assertAllClose(scores, [[0.7, 0.6]]) self.assertAllClose(classes, [[1, 2]]) self.assertAllClose(num_detections, [2]) if __name__ == '__main__': tf.test.main()
44.539535
77
0.680138
1,181
9,576
5.159187
0.130398
0.105695
0.064993
0.022977
0.684064
0.669457
0.659938
0.651567
0.651567
0.651567
0
0.016752
0.22076
9,576
214
78
44.747664
0.799786
0.012949
0
0.591398
0
0
0.057497
0
0
0
0
0
0.043011
1
0.080645
false
0.010753
0.048387
0.016129
0.172043
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
22f3a0221d0e140933a57d7b71e0a66cb6793a2d
5,761
py
Python
examples/DeepWisdom/Auto_NLP/deepWisdom/transformers_/__init__.py
zichuan-scott-xu/automl-workflow
d108e55da943775953b9f1801311a86ac07e58a0
[ "Apache-2.0" ]
3
2020-12-15T02:40:43.000Z
2021-01-14T02:32:13.000Z
examples/DeepWisdom/Auto_NLP/deepWisdom/transformers_/__init__.py
zichuan-scott-xu/automl-workflow
d108e55da943775953b9f1801311a86ac07e58a0
[ "Apache-2.0" ]
null
null
null
examples/DeepWisdom/Auto_NLP/deepWisdom/transformers_/__init__.py
zichuan-scott-xu/automl-workflow
d108e55da943775953b9f1801311a86ac07e58a0
[ "Apache-2.0" ]
4
2021-01-07T05:41:38.000Z
2021-04-07T08:02:22.000Z
__version__ = "2.1.1" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. # see: https://github.com/abseil/abseil-py/issues/99 # and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493 try: import absl.logging absl.logging.set_verbosity('info') absl.logging.set_stderrthreshold('info') absl.logging._warn_preinit_stderr = False except: pass import logging logger = logging.getLogger(__name__) # pylint: disable=invalid-name # Files and general utilities from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE, cached_path, add_start_docstrings, add_end_docstrings, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME, is_tf_available, is_torch_available) # Tokenizers from .tokenization_utils import (PreTrainedTokenizer) from .tokenization_auto import AutoTokenizer from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer from .tokenization_openai import OpenAIGPTTokenizer from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus) from .tokenization_gpt2 import GPT2Tokenizer from .tokenization_ctrl import CTRLTokenizer from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE from .tokenization_xlm import XLMTokenizer from .tokenization_roberta import RobertaTokenizer from .tokenization_distilbert import DistilBertTokenizer # Configurations from .configuration_utils import PretrainedConfig from .configuration_auto import AutoConfig from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # Modeling if is_torch_available(): from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D) from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering, AutoModelWithLMHead) from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction, BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model, GPT2LMHeadModel, GPT2DoubleHeadsModel, load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel, XLNetForSequenceClassification, XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForQuestionAnswering, load_tf_weights_in_xlnet, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_xlm import (XLMPreTrainedModel , XLMModel, XLMWithLMHeadModel, XLMForSequenceClassification, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLM_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_roberta import (RobertaForMaskedLM, RobertaModel, RobertaForSequenceClassification, RobertaForMultipleChoice, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_distilbert import (DistilBertForMaskedLM, DistilBertModel, DistilBertForSequenceClassification, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP) from .modeling_albert import AlbertForSequenceClassification # Optimization from .optimization import (AdamW, ConstantLRSchedule, WarmupConstantSchedule, WarmupCosineSchedule, WarmupCosineWithHardRestartsSchedule, WarmupLinearSchedule) if not is_tf_available() and not is_torch_available(): logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found." "Models won't be available and only tokenizers, configuration" "and file/data utilities can be used.")
59.391753
109
0.740844
519
5,761
7.870906
0.387283
0.046512
0.061689
0.063647
0.188005
0.188005
0.053856
0.053856
0.053856
0.053856
0
0.007764
0.217497
5,761
96
110
60.010417
0.898403
0.063183
0
0.025641
0
0
0.030269
0
0
0
0
0
0
1
0
false
0.012821
0.5
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
22feb380588bd77256d844c8ff999d4f5568fa43
1,499
py
Python
setup.py
ovnicraft/runa
4834b7467314c51c3e8e010b47a10bdfae597a5b
[ "MIT" ]
5
2018-02-02T13:12:55.000Z
2019-12-21T04:21:10.000Z
setup.py
ovnicraft/runa
4834b7467314c51c3e8e010b47a10bdfae597a5b
[ "MIT" ]
1
2017-12-18T15:49:13.000Z
2017-12-18T15:49:13.000Z
setup.py
ovnicraft/runa
4834b7467314c51c3e8e010b47a10bdfae597a5b
[ "MIT" ]
1
2020-03-17T03:50:19.000Z
2020-03-17T03:50:19.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages with open("README.rst") as readme_file: readme = readme_file.read() with open("HISTORY.rst") as history_file: history = history_file.read() requirements = ["Click>=6.0", "suds2==0.7.1"] setup_requirements = [ # TODO(ovnicraft): put setup requirements (distutils extensions, etc.) here ] test_requirements = [ # TODO: put package test requirements here ] setup( name="runa", version="0.2.10", description="Librería para uso de WS del Bus Gubernamental de Ecuador", long_description=readme + "\n\n" + history, author="Cristian Salamea", author_email="cristian.salamea@gmail.com", url="https://github.com/ovnicraft/runa", packages=find_packages(include=["runa"]), entry_points={"console_scripts": ["runa=runa.cli:main"]}, include_package_data=True, install_requires=requirements, license="MIT license", zip_safe=False, keywords="runa webservices ecuador bgs", classifiers=[ "Development Status :: 3 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Natural Language :: English", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", ], test_suite="tests", tests_require=test_requirements, setup_requires=setup_requirements, )
28.826923
79
0.662442
177
1,499
5.491525
0.581921
0.052469
0.07716
0.080247
0
0
0
0
0
0
0
0.014913
0.194797
1,499
51
80
29.392157
0.790389
0.116745
0
0
0
0
0.384791
0.019772
0
0
0
0.019608
0
1
0
false
0
0.026316
0
0.026316
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
fe02b43015e3d0762066c7be3eb1af3c04bff4d4
2,757
py
Python
section_07_(files)/read_csv.py
govex/python-lessons
e692f48b6db008a45df0b941dee1e580f5a6c800
[ "MIT" ]
5
2019-10-25T20:47:22.000Z
2021-12-07T06:37:22.000Z
section_07_(files)/read_csv.py
govex/python-lessons
e692f48b6db008a45df0b941dee1e580f5a6c800
[ "MIT" ]
null
null
null
section_07_(files)/read_csv.py
govex/python-lessons
e692f48b6db008a45df0b941dee1e580f5a6c800
[ "MIT" ]
1
2021-07-20T18:56:15.000Z
2021-07-20T18:56:15.000Z
# If you're new to file handling, be sure to check out with_open.py first! # You'll also want to check out read_text.py before this example. This one is a bit more advanced. with open('read_csv.csv', 'r') as states_file: # Instead of leaving the file contents as a string, we're splitting the file into a list at every new line, and we save that list into the variable states states = states_file.read().split("\n") # Since this is a spreadsheet in comma separated values (CSV) format, we can think of states as a list of rows. # But we'll need to split the columns into a list as well! for index, state in enumerate(states): states[index] = state.split(",") # Now we have a nested list with all of the information! # Our file looks like this: # State, Population Estimate, Percent of Total population # California, 38332521, 11.91% # Texas, 26448193, 8.04% # ... # Our header row is at state[0], so we can use that to display the information in a prettier way. for state in states[1:]: # We use [1:] so we skip the header row. # state[0] is the first column in the row, which contains the name of the state. print("\n---{0}---".format(state[0])) for index, info in enumerate(state[1:]): # We use [1:] so we don't repeat the state name. print("{0}:\t{1}".format(states[0][index+1], info)) # states is the full list of all of the states. It's a nested list. The outer list contains the rows, each inner list contains the columns in that row. # states[0] refers to the header row of the list # So states[0][0] would refer to "State", states[0][1] would refer to "Population Estimate", and states[0][2] would refer to "Percent of total population" # state is one state within states. state is also a list, containing the name, population, and percentage of that particular state. # So the first time through the loop, state[0] would refer to "California", state[1] would refer to 38332521, and state[2] would refer to 11.91% # Since state is being create by the for loop in line 24, it gets a new value each time through. # We're using enumerate to get the index (slicing number) of the column we're on, along with the information. # That way we can pair the column name with the information, as shown in line 30. # NOTE: Since we're slicing from [1:] in line 29, we need to increase the index by + 1, otherwise our headers will be off by one. # Sample output: # ---"California"--- # "Population Estimate": 38332521 # "Percent of Total population": "11.91%" # ---"Texas"--- # "Population Estimate": 26448193 # "Percent of Total population": "8.04%" # ---"New York"--- # "Population Estimate": 19651127 # "Percent of Total population": "6.19%"
48.368421
158
0.692057
470
2,757
4.048936
0.329787
0.031529
0.037835
0.063058
0.011561
0.011561
0
0
0
0
0
0.045205
0.205658
2,757
56
159
49.232143
0.823744
0.821545
0
0
0
0
0.078775
0
0
0
0
0
0
1
0
false
0
0
0
0
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
fe0ae5c8386d6c3d6f937a81ff9888fef7e3e87d
215
py
Python
hwtest/automated/usb3_test.py
crvallance/wlanpi-hwtest
8858ef6e8fa78767238b968b121b4d5ab2155701
[ "MIT" ]
null
null
null
hwtest/automated/usb3_test.py
crvallance/wlanpi-hwtest
8858ef6e8fa78767238b968b121b4d5ab2155701
[ "MIT" ]
null
null
null
hwtest/automated/usb3_test.py
crvallance/wlanpi-hwtest
8858ef6e8fa78767238b968b121b4d5ab2155701
[ "MIT" ]
null
null
null
from hwtest.shell_utils import run_command def test_linux_usb3hub(): """ Test for Linux Foundation 3.0 root hub in `lsusb` output """ resp = run_command(["lsusb"]) assert "1d6b:0003" in resp
17.916667
60
0.665116
31
215
4.451613
0.774194
0.144928
0
0
0
0
0
0
0
0
0
0.054217
0.227907
215
11
61
19.545455
0.777108
0.260465
0
0
0
0
0.097902
0
0
0
0
0
0.25
1
0.25
false
0
0.25
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
fe1823b5cc5e17b94ed66896e05441088fc1ee56
1,503
py
Python
Class Work oop.py
fatimatswanya/fatimaCSC102
cab70bd696d39a9e16bcb57e0180e872be4f49bc
[ "MIT" ]
null
null
null
Class Work oop.py
fatimatswanya/fatimaCSC102
cab70bd696d39a9e16bcb57e0180e872be4f49bc
[ "MIT" ]
null
null
null
Class Work oop.py
fatimatswanya/fatimaCSC102
cab70bd696d39a9e16bcb57e0180e872be4f49bc
[ "MIT" ]
null
null
null
class Student: studentLevel = 'first year computer science 2020/2021 session' studentCounter = 0 registeredCourse='csc102' def __init__(self, thename, thematricno, thesex,thehostelname,theage,thecsc102examscore): self.name = thename self.matricno = thematricno self.sex = thesex self.hostelname =thehostelname self.age=theage self.csc102examscore=thecsc102examscore Student.studentCounter = Student.studentCounter + 1 def getName(self): return self.name def setName(self, thenewName): self.name = thenewName def agedeterminer(self): if self.age>16: print('Student is above 16') def finalscore(self): if self.csc102examscore < 45: print('You will carryover this course, sorry') else: print('You have passed') @classmethod def course(): print(f'Students registered course is {Student.registeredCourse}') @staticmethod def PAUNanthem(): print('Pau, here we come, Pau, here we come ') @staticmethod def ODDorEVEN(num): if num % 2==0: print('Number is even') else: print('Number is odd') @classmethod def studentnum(cls): print(Student.studentCounter) studendt1 = Student('James Kaka', '021074', 'M','Amethyst','16', '49') print(studendt1.getName()) studendt1.setName('James Gaga') print(studendt1.getName()) Student.PAUNanthem()
26.368421
93
0.632069
158
1,503
5.987342
0.487342
0.02537
0.021142
0.027484
0
0
0
0
0
0
0
0.042572
0.265469
1,503
57
94
26.368421
0.814312
0
0
0.181818
0
0
0.186959
0.017299
0
0
0
0
0
1
0.204545
false
0.022727
0
0.022727
0.318182
0.227273
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
fe18f53bb174876b9174543e0887f93aad3f8c21
6,686
py
Python
tests/test_disque.py
abdul-khalid/pydisque
a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5
[ "MIT" ]
1
2019-02-28T09:48:22.000Z
2019-02-28T09:48:22.000Z
tests/test_disque.py
abdul-khalid/pydisque
a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5
[ "MIT" ]
null
null
null
tests/test_disque.py
abdul-khalid/pydisque
a9b5caa6dac0621a0174d168f4a04c88d0e2f8b5
[ "MIT" ]
null
null
null
""" Unit Tests for the pydisque module. Currently, most of these tests require a fresh instance of Disque to be valid and pass. """ import unittest import json import time import random import six from pydisque.client import Client from redis.exceptions import ResponseError class TestDisque(unittest.TestCase): """TestCase class for pydisque.""" testID = None def setUp(self): """Setup the tests.""" self.client = Client(['localhost:7711']) self.client.connect() self.testID = "%d.%d" % (time.time(), random.randint(1000, 1000000)) def test_publish_and_receive(self): """Test the most important functions of pydisque.""" t1 = str(time.time()) self.client.add_job("test_q", t1, timeout=100) jobs = self.client.get_job(['test_q']) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job(['test_q'], timeout=100)) == 0 def test_nack(self): """Fetch the queue, return a job, check that it's back.""" t1 = str(time.time()) queuename = "test_nack." + self.testID self.client.add_job(queuename, str(t1), timeout=100) jobs = self.client.get_job([queuename]) # NACK the first read assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert len(jobs) == 1 assert job == six.b(t1) self.client.nack_job(job_id) # this time ACK it jobs = self.client.get_job([queuename]) assert len(jobs) == 1 for queue_name, job_id, job in jobs: assert job == six.b(t1) self.client.ack_job(job_id) assert len(self.client.get_job([queuename], timeout=100)) == 0 def test_qpeek(self): """ Test qpeek. Ran into some problems with an ENQUEUE/DEQUEUE test that was using qpeek, checking core functionality of qpeek(). """ queuename = "test_qpeek-%s" % self.testID job_id = self.client.add_job(queuename, "Peek A Boo") peeked = self.client.qpeek(queuename, 1) assert peeked[0][1] == job_id def test_qscan(self): """ Test the qscan function. This test relies on add_job() being functional, and the local disque not being a disque proxy to a mesh. TODO: unique the queues with self.testID. """ t1 = str(time.time()) self.client.add_job("q1", t1, timeout=100) self.client.add_job("q2", t1, timeout=100) qb = self.client.qscan() assert qb[0] assert qb[1] assert six.b("q1") in qb[1] assert six.b("q2") in qb[1] def test_jscan(self): """Simple test of the jscan function.""" t1 = time.time() queuename = "test_jscan-%s" % self.testID j1 = self.client.add_job(queuename, str(t1), timeout=100) jerbs = self.client.jscan(queue=queuename) assert j1 in jerbs[1] def test_del_job(self): """Simple test of del_job, needs qpeek. FIXME: This function has grown ugly. """ t1 = time.time() queuename = "test_del_job-%s" % self.testID j1 = self.client.add_job(queuename, str(t1)) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 in jlist self.client.del_job(j1) jerbs = self.client.qpeek(queuename, 1) jlist = [] for item in jerbs: jlist.append(item[1]) assert j1 not in jerbs def test_qlen(self): """Simple test of qlen.""" queuename = "test_qlen-%s" % self.testID lengthOfTest = 100 test_job = "Useless Job." for x in range(lengthOfTest): self.client.add_job(queuename, test_job) assert self.client.qlen(queuename) == lengthOfTest def test_qstat(self): """Testing QSTAT (default behavior).""" queuename = "test_qstat-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename) # check the basics assert 'jobs-in' in stat assert 'jobs-out' in stat def test_qstat_dict(self): """Testing QSTAT's (new dict behavior).""" queuename = "test_qstat_dict-%s" % self.testID testqueue = ["a", "b", "c"] for x in testqueue: self.client.add_job(queuename, x) stat = self.client.qstat(queuename, True) assert stat.get('jobs-in', None) is not None assert stat.get('jobs-out', None) is not None def test_shownack(self): """Test that NACK and SHOW work appropriately.""" queuename = "test_show-%s" % self.testID test_job = "Show me." self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) for queue_name, job_id, job in jobs: self.client.nack_job(job_id) shown = self.client.show(job_id, True) assert shown.get('body') == test_job assert shown.get('nacks') == 1 def test_pause(self): """Test that a PAUSE message is acknowledged.""" queuename = "test_show-%s" % self.testID test_job = "Jerbs, they are a thing" self.client.pause(queuename, kw_in=True) try: job_id = self.client.add_job(queuename, test_job) except ResponseError: pass # can we add a job again? self.client.pause(queuename, kw_none=True) job_id = self.client.add_job(queuename, test_job) jobs = self.client.get_job([queuename]) # TODO(canardleteer): add a test of PAUSE SHOW def test_get_job(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) expected = [(queue_name, job_id, job)] got = self.client.get_job([queue_name], withcounters=False) assert expected == got def test_get_job_withcounters(self): queue_name = "test_get_job." + self.testID job = str(time.time()) job_id = self.client.add_job(queue_name, job) nacks = 0 additional_deliveries = 0 expected = [(queue_name, job_id, job, nacks, additional_deliveries)] got = self.client.get_job([queue_name], withcounters=True) assert expected == got if __name__ == '__main__': unittest.main()
28.695279
76
0.588095
893
6,686
4.270997
0.195969
0.110121
0.051127
0.062926
0.455427
0.404562
0.369953
0.347142
0.272155
0.248034
0
0.017797
0.294047
6,686
232
77
28.818966
0.790254
0.147173
0
0.382353
0
0
0.053326
0
0
0
0
0.012931
0.191176
1
0.102941
false
0.007353
0.051471
0
0.169118
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
a3aceb33684c4eb53e7c078943f4c37d7dd1af91
4,321
py
Python
airspace_surgery.py
wipfli/airspaces
c2e01615fa6a065895ed04b8f342a38732e9196b
[ "Apache-2.0" ]
1
2021-12-28T23:40:51.000Z
2021-12-28T23:40:51.000Z
airspace_surgery.py
wipfli/airspaces
c2e01615fa6a065895ed04b8f342a38732e9196b
[ "Apache-2.0" ]
1
2021-01-30T13:15:14.000Z
2021-02-07T14:50:27.000Z
airspace_surgery.py
wipfli/aviation
c2e01615fa6a065895ed04b8f342a38732e9196b
[ "Apache-2.0" ]
null
null
null
import glob import json path_in = './airspaces/' path_out = './airspaces_processed/' filenames = [path.split('/')[-1] for path in glob.glob(path_in + '*')] remove = { 'france_fr.geojson': [ 314327, 314187, 314360, 314359, 314362, 314361, 314364, 314363, 314333, 314329, 314331, ], 'germany_de.geojson': [ 307563, 307638, 307639, 307640, ] } replacements = { 'france_fr.geojson': [ ['Bale10 119.35', 'Bale 10 TMA 130.9'], ['Bale1 119.35', 'Bale 1 TMA 130.9'], ['Bale2 119.35', 'Bale 2 TMA 130.9'], ['Bale3 119.35', 'Bale 3 TMA 130.9'], ['Bale4 119.35', 'Bale 4 TMA 130.9'], ['Bale5 119.35', 'Bale 5 TMA 130.9'], ['Bale5 119.35', 'Bale 5 TMA 130.9'], ['Bale6 119.35', 'Bale 6 TMA 130.9'], ['Bale7 119.35', 'Bale 7 TMA 130.9'], ['Bale8 119.35', 'Bale 8 TMA 130.9'], ['Bale9 119.35', 'Bale 9 TMA 130.9'], ['Bale AZ4T1 134.67', 'Bale T1 TMA HX 134.68'], ['Bale AZ4T2 134.67', 'Bale T2 TMA HX 134.68'], ['Bale AZ4T3 134.67', 'Bale T3 TMA HX 134.68'], ['CTR BALE', 'Bale CTR 118.3'] ], 'switzerland_ch.geojson': [ ['ZURICH 10 TMA 118.1', 'ZURICH 10 TMA 124.7'], ['ZURICH 11 TMA 118.1', 'ZURICH 11 TMA 124.7'], ['ZURICH 12 TMA 118.1', 'ZURICH 12 TMA 124.7'], ['ZURICH 13 TMA 118.1', 'ZURICH 13 TMA 124.7'], ['ZURICH 14 TMA 118.1', 'ZURICH 14 TMA HX 127.755'], ['ZURICH 15 TMA 118.1', 'ZURICH 15 TMA HX 127.755'], ['ZURICH 1 TMA 118.1', 'ZURICH 1 TMA 124.7'], ['ZURICH 2 CTR 118.1', 'ZURICH 2 CTR HX 118.975'], ['ZURICH 2 TMA 118.1', 'ZURICH 2 TMA 124.7'], ['ZURICH 3 TMA 118.1', 'ZURICH 3 TMA 124.7'], ['ZURICH 4A TMA 118.1', 'ZURICH 4A TMA 124.7'], ['ZURICH 4B TMA 118.1', 'ZURICH 4B TMA 124.7'], ['ZURICH 4C TMA 118.1', 'ZURICH 4C TMA 124.7'], ['ZURICH 5 TMA 118.1', 'ZURICH 5 TMA 124.7'], ['ZURICH 6 TMA 118.1', 'ZURICH 6 TMA 124.7'], ['ZURICH 7 TMA 118.1', 'ZURICH 7 TMA 124.7'], ['ZURICH 8 TMA 118.1', 'ZURICH 8 TMA 124.7'], ['ZURICH 9 TMA 118.1', 'ZURICH 9 TMA 124.7'], ['BERN 1 TMA 121.025', 'BERN 1 TMA HX 127.325'], ['BERN 2 TMA 121.025', 'BERN 2 TMA HX 127.325'], ['BERN CTR 121.025', 'BERN CTR HX 121.025'], ['EMMEN 1 CTR 120.425', 'EMMEN 1 CTR HX 120.425'], ['EMMEN 1 TMA 120.425', 'EMMEN 1 TMA HX 134.130'], ['EMMEN 2 CTR 120.425', 'EMMEN 2 CTR HX 120.425'], ['EMMEN 2 TMA 120.425', 'EMMEN 2 TMA HX 134.130'], ['EMMEN 3 TMA 120.425', 'EMMEN 3 TMA HX 134.130'], ['EMMEN 4 TMA 120.425', 'EMMEN 4 TMA HX 134.130'], ['EMMEN 5 TMA 120.425', 'EMMEN 5 TMA HX 134.130'], ['EMMEN 6 TMA 120.425', 'EMMEN 6 TMA HX 134.130'], ] } for filename in filenames: print(filename) with open(path_in + filename) as f: data = json.load(f) if filename in replacements: targets = [r[0] for r in replacements[filename]] for feature in data['features']: if feature['properties']['N'] in targets: print('replace ' + feature['properties']['N'] + '...') feature['properties']['N'] = next(x for x in replacements[filename] if x[0] == feature['properties']['N'])[1] if filename in remove: features_out = [f for f in data['features'] if int(f['properties']['ID']) not in remove[filename]] else: features_out = data['features'] print('removed ' + str(len(data['features']) - len(features_out)) + ' features') geojson = { 'type': 'FeatureCollection', 'features': features_out } print('write ' + filename + '...') with open(path_out + filename, 'w') as f: json.dump(geojson, f) all_features = [] for filename in filenames: print('read ' + filename + '...') with open(path_out + filename) as f: all_features += json.load(f)['features'] print('write airspaces.geojson...') with open('airspaces.geojson', 'w') as f: json.dump({ 'type': 'FeatureCollection', 'features': all_features }, f) print('done')
34.023622
125
0.532053
639
4,321
3.56964
0.197183
0.031565
0.078913
0.096887
0.187199
0.04954
0.022359
0.022359
0.022359
0.022359
0
0.200856
0.297153
4,321
126
126
34.293651
0.550214
0
0
0.090909
0
0
0.44573
0.010183
0
0
0
0
0
1
0
false
0
0.018182
0
0.018182
0.063636
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3b4f00010ceb5e0331d09eb4a19ef587eba8526
348
py
Python
groundstation/broadcast_events/__init__.py
richo/groundstation
7ed48dd355051ee6b71164fc801e3893c09d11db
[ "MIT" ]
26
2015-06-18T20:17:07.000Z
2019-09-26T09:55:35.000Z
groundstation/broadcast_events/__init__.py
richo/groundstation
7ed48dd355051ee6b71164fc801e3893c09d11db
[ "MIT" ]
null
null
null
groundstation/broadcast_events/__init__.py
richo/groundstation
7ed48dd355051ee6b71164fc801e3893c09d11db
[ "MIT" ]
5
2015-07-20T01:52:47.000Z
2017-01-08T09:54:07.000Z
from broadcast_ping import BroadcastPing EVENT_TYPES = { "PING": BroadcastPing, } class UnknownBroadcastEvent(Exception): pass def new_broadcast_event(data): event_type, payload = data.split(" ", 1) if event_type not in EVENT_TYPES: raise UnknownBroadcastEvent(event_type) return EVENT_TYPES[event_type](payload)
23.2
47
0.732759
41
348
5.97561
0.560976
0.146939
0.130612
0
0
0
0
0
0
0
0
0.003534
0.186782
348
14
48
24.857143
0.862191
0
0
0
0
0
0.014368
0
0
0
0
0
0
1
0.090909
false
0.090909
0.090909
0
0.363636
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
a3b8b5beaa0f8d8ecd98462fe75b978547dc1472
4,248
py
Python
Python X/Dictionaries in python.py
nirobio/puzzles
fda8c84d8eefd93b40594636fb9b7f0fde02b014
[ "MIT" ]
null
null
null
Python X/Dictionaries in python.py
nirobio/puzzles
fda8c84d8eefd93b40594636fb9b7f0fde02b014
[ "MIT" ]
null
null
null
Python X/Dictionaries in python.py
nirobio/puzzles
fda8c84d8eefd93b40594636fb9b7f0fde02b014
[ "MIT" ]
null
null
null
{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "# dictionaries, look-up tables & key-value pairs\n", "# d = {} OR d = dict()\n", "# e.g. d = {\"George\": 24, \"Tom\": 32}\n", "\n", "d = {}\n", "\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "d[\"George\"] = 24" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "d[\"Tom\"] = 32\n", "d[\"Jenny\"] = 16" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "{'George': 24, 'Tom': 32, 'Jenny': 16}\n" ] } ], "source": [ "print(d)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'Jenny' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m<ipython-input-5-0bdfff196d23>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mJenny\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;31mNameError\u001b[0m: name 'Jenny' is not defined" ] } ], "source": [ "print(d[Jenny])" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "32\n" ] } ], "source": [ "print(d[\"Tom\"])" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "d[\"Jenny\"] = 20" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "20\n" ] } ], "source": [ "print(d[\"Jenny\"])" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# keys are strings or numbers \n", "\n", "d[10] = 100" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "100\n" ] } ], "source": [ "print(d[10])" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "# how to iterate over key-value pairs" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "key:\n", "George\n", "value:\n", "24\n", "\n", "key:\n", "Tom\n", "value:\n", "32\n", "\n", "key:\n", "Jenny\n", "value:\n", "20\n", "\n", "key:\n", "10\n", "value:\n", "100\n", "\n" ] } ], "source": [ " for key, value in d.items():\n", " print(\"key:\")\n", " print(key)\n", " print(\"value:\")\n", " print(value)\n", " print(\"\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.6" } }, "nbformat": 4, "nbformat_minor": 4 }
18.88
354
0.439266
439
4,248
4.161731
0.246014
0.061303
0.085386
0.149425
0.490969
0.277504
0.277504
0.242474
0.08867
0.041598
0
0.078859
0.298493
4,248
224
355
18.964286
0.534228
0
0
0.392857
0
0.004464
0.535546
0.113701
0
0
0
0
0
1
0
true
0
0
0
0
0.049107
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
a3c17e6746a0528783d5b0c338fdad4e4910e00a
1,976
py
Python
misc/python/materialize/checks/insert_select.py
guswynn/materialize
f433173ed71f511d91311769ec58c2d427dd6c3b
[ "MIT" ]
null
null
null
misc/python/materialize/checks/insert_select.py
guswynn/materialize
f433173ed71f511d91311769ec58c2d427dd6c3b
[ "MIT" ]
157
2021-12-28T19:17:45.000Z
2022-03-31T17:44:27.000Z
misc/python/materialize/checks/insert_select.py
guswynn/materialize
f433173ed71f511d91311769ec58c2d427dd6c3b
[ "MIT" ]
null
null
null
# Copyright Materialize, Inc. and contributors. All rights reserved. # # Use of this software is governed by the Business Source License # included in the LICENSE file at the root of this repository. # # As of the Change Date specified in that file, in accordance with # the Business Source License, use of this software will be governed # by the Apache License, Version 2.0. from textwrap import dedent from typing import List from materialize.checks.actions import Testdrive from materialize.checks.checks import Check class InsertSelect(Check): def initialize(self) -> Testdrive: return Testdrive( dedent( """ > CREATE TABLE insert_select_destination (f1 STRING); > CREATE TABLE insert_select_source_table (f1 STRING); > INSERT INTO insert_select_source_table SELECT 'T1' || generate_series FROM generate_series(1,10000); """ ) ) def manipulate(self) -> List[Testdrive]: return [ Testdrive(dedent(s)) for s in [ """ > INSERT INTO insert_select_source_table SELECT 'T2' || generate_series FROM generate_series(1, 10000); > INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table; """, """ > INSERT INTO insert_select_source_table SELECT 'T3' || generate_series FROM generate_series(1, 10000); > INSERT INTO insert_select_destination SELECT * FROM insert_select_source_table; """, ] ] def validate(self) -> Testdrive: return Testdrive( dedent( """ > SELECT LEFT(f1, 2), COUNT(*), COUNT(DISTINCT f1) FROM insert_select_destination GROUP BY LEFT(f1, 2); T1 20000 10000 T2 20000 10000 T3 10000 10000 """ ) )
34.666667
119
0.598684
217
1,976
5.304147
0.359447
0.104257
0.093831
0.119896
0.374457
0.315378
0.315378
0.180712
0.180712
0.180712
0
0.047619
0.330466
1,976
56
120
35.285714
0.822373
0.18168
0
0.173913
0
0
0
0
0
0
0
0
0
1
0.130435
false
0
0.173913
0.130435
0.478261
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
a3c978469e28670107c4646aa77b54f6269dda05
2,244
py
Python
tests/test_prior.py
frodre/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
17
2018-08-27T18:50:36.000Z
2021-03-17T22:48:55.000Z
tests/test_prior.py
mingsongli/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
5
2018-10-15T22:13:27.000Z
2019-04-26T11:45:58.000Z
tests/test_prior.py
mingsongli/LMR
4c00d3f9db96447e69bd3f426d59524f7b5f3ef5
[ "BSD-3-Clause" ]
11
2018-10-11T19:35:34.000Z
2021-08-17T12:08:11.000Z
import sys sys.path.append('../') import LMR_config as cfg import LMR_prior import numpy as np import pytest def test_prior_seed(): cfg_obj = cfg.Config(**{'core':{'seed': 2}}) prior_cfg = cfg_obj.prior prior_source = '20cr' datadir_prior = 'data' datafile_prior = '[vardef_template]_gridded_dat.nc' state_variables = {'air': 'anom'} state_kind = 'anom' X = LMR_prior.prior_assignment(prior_source) X.prior_datadir = datadir_prior X.prior_datafile = datafile_prior X.statevars = state_variables X.Nens = 1 X.detrend = False X.kind = state_kind X.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12] X.populate_ensemble(prior_source, prior_cfg) X2 = LMR_prior.prior_assignment(prior_source) X2.prior_datadir = datadir_prior X2.prior_datafile = datafile_prior X2.statevars = state_variables X2.Nens = 1 X2.detrend = False X2.kind = state_kind X2.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12] X2.populate_ensemble(prior_source, prior_cfg) np.testing.assert_equal(X2.ens, X.ens) def test_prior_use_full_prior(): cfg_obj = cfg.Config(**{'core': {'seed': None}}) prior_cfg = cfg_obj.prior prior_source = '20cr' datadir_prior = 'data' datafile_prior = '[vardef_template]_gridded_dat.nc' state_variables = {'air': 'anom'} state_kind = 'anom' avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12] X = LMR_prior.prior_assignment(prior_source) X.prior_datadir = datadir_prior X.prior_datafile = datafile_prior X.statevars = state_variables X.Nens = None X.detrend = False X.kind = state_kind X.avgInterval = avgInterval X.populate_ensemble(prior_source, prior_cfg) X2 = LMR_prior.prior_assignment(prior_source) X2.prior_datadir = datadir_prior X2.prior_datafile = datafile_prior X2.statevars = state_variables X2.Nens = None X2.detrend = False X2.kind = state_kind X2.avgInterval = avgInterval X2.read_prior() # Transform full prior into ensemble-like shape prior_vals = X2.prior_dict['air']['value'] prior_vals = prior_vals.reshape(prior_vals.shape[0], -1) prior_vals = prior_vals.T np.testing.assert_equal(X.ens, prior_vals)
24.933333
60
0.685829
333
2,244
4.363363
0.216216
0.068135
0.035788
0.063317
0.75086
0.75086
0.695114
0.695114
0.695114
0.598761
0
0.041134
0.198307
2,244
89
61
25.213483
0.766537
0.020053
0
0.59375
0
0
0.058824
0.029184
0
0
0
0
0.03125
1
0.03125
false
0
0.078125
0
0.109375
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3d28839e9a9ab62ac7936ca858e4cb438e092b3
16,562
py
Python
tests/test_mag.py
jdddog/mag-archiver
079e735e610d6b81b3ac8dc479d4f93bb0aacb11
[ "Apache-2.0" ]
null
null
null
tests/test_mag.py
jdddog/mag-archiver
079e735e610d6b81b3ac8dc479d4f93bb0aacb11
[ "Apache-2.0" ]
null
null
null
tests/test_mag.py
jdddog/mag-archiver
079e735e610d6b81b3ac8dc479d4f93bb0aacb11
[ "Apache-2.0" ]
null
null
null
# Copyright 2020 Curtin University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: James Diprose import os import unittest from unittest.mock import patch import pendulum from azure.common import AzureMissingResourceHttpError from azure.cosmosdb.table.tableservice import TableService from azure.storage.blob import ContainerProperties from mag_archiver.azure import create_table from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \ hide_if_not_none class TestMag(unittest.TestCase): def test_hide_if_not_none(self): # Test that None is returned for None value = hide_if_not_none(None) self.assertEqual(value, None) # Test that 'hidden' is returned: string value = hide_if_not_none('hello world') self.assertEqual(value, 'hidden') # Test that 'hidden' is returned: integer value = hide_if_not_none(123) self.assertEqual(value, 'hidden') def test_make_mag_query(self): start_date = pendulum.datetime(year=2020, month=4, day=1) end_date = pendulum.datetime(year=2020, month=5, day=1) # No parameters query = make_mag_query() self.assertEqual(query, '') # State parameter query = make_mag_query(state=MagState.discovered) self.assertEqual(query, "State eq 'discovered'") query = make_mag_query(state=MagState.archived) self.assertEqual(query, "State eq 'archived'") query = make_mag_query(state=MagState.done) self.assertEqual(query, "State eq 'done'") # Start date parameter query = make_mag_query(start_date=start_date, date_type=MagDateType.release) self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'") query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered) self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'") query = make_mag_query(start_date=start_date, date_type=MagDateType.archived) self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'") query = make_mag_query(start_date=start_date, date_type=MagDateType.done) self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'") # End date parameter query = make_mag_query(end_date=end_date, date_type=MagDateType.release) self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'") query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered) self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'") query = make_mag_query(end_date=end_date, date_type=MagDateType.archived) self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'") query = make_mag_query(end_date=end_date, date_type=MagDateType.done) self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'") # Start date, end date and date type query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release) self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt " "datetime'2020-05-01T00:00Z'") query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered) self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt " "datetime'2020-05-01T00:00Z'") query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived) self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt " "datetime'2020-05-01T00:00Z'") query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done) self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt " "datetime'2020-05-01T00:00Z'") # State, start date, end date and date type query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date, date_type=MagDateType.discovered) self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' " "and DiscoveredDate lt datetime'2020-05-01T00:00Z'") query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date, date_type=MagDateType.archived) self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' " "and ArchivedDate lt datetime'2020-05-01T00:00Z'") query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date, date_type=MagDateType.done) self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' " "and DoneDate lt datetime'2020-05-01T00:00Z'") def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int): min_date = pendulum.datetime(1601, 1, 1) partition_key_ = 'mag' row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}' state_ = MagState.discovered task_ = MagTask.not_started release_date_ = pendulum.datetime(year=year, month=month, day=day) source_container_ = row_key_ source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1) release_container_ = '' release_path_ = '' discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2) archived_date_ = min_date done_date_ = min_date return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_, source_container_last_modified_, release_container_, release_path_, discovered_date_, archived_date_, done_date_, account_name=account_name, account_key=account_key) class TestMagRelease(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestMagRelease, self).__init__(*args, **kwargs) self.account_name = os.getenv('STORAGE_ACCOUNT_NAME') self.account_key = os.getenv('STORAGE_ACCOUNT_KEY') create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME) def test_secrets_hidden(self): # Check that account key is hidden account_name = 'myaccountname' secret = 'secret' # Check that account_key and sas_token are hidden release = make_mag_release(account_name, secret, 2020, 1, 1) self.assertIn('account_key=hidden', release.__repr__()) self.assertNotIn(secret, release.__str__()) self.assertNotIn(secret, release.__repr__()) # Check that account_key is None release = make_mag_release(account_name, None, 2020, 1, 1) self.assertIn('account_key=None', release.__repr__()) def test_create(self): release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1) try: success = release.create() self.assertTrue(success) finally: release.delete() def test_delete(self): release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1) # Check that we can create and then delete release.create() release.delete() # Check that second delete fails with self.assertRaises(AzureMissingResourceHttpError): release.delete() def test_update(self): release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1) try: release.create() # Update release release.state = MagState.archived release.archived_date = pendulum.utcnow().microsecond_(0) release.update() # Verify that release is updated service = TableService(account_name=self.account_name, account_key=self.account_key) entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key) updated_release = MagRelease.from_entity(entity) self.assertEqual(release.state, updated_release.state) self.assertEqual(release.archived_date, updated_release.archived_date) finally: release.delete() def make_containers(): containers = [] cp1 = ContainerProperties() cp1.name = 'mag-2020-04-17' cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18) containers.append(cp1) cp3 = ContainerProperties() cp3.name = 'mag-2020-05-01' cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1) containers.append(cp3) cp2 = ContainerProperties() cp2.name = 'mag-2020-04-24' cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25) containers.append(cp2) return containers class TestMagArchiverClient(unittest.TestCase): def __init__(self, *args, **kwargs): super(TestMagArchiverClient, self).__init__(*args, **kwargs) self.account_name = os.getenv('STORAGE_ACCOUNT_NAME') self.account_key = os.getenv('STORAGE_ACCOUNT_KEY') create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME) def test_secrets_hidden(self): # Check that account key is hidden account_name = 'myaccountname' secret = 'secret' # Check that account_key and sas_token are hidden client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret) expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)' self.assertEqual(client.__str__(), expected) self.assertEqual(client.__repr__(), expected) self.assertNotIn(secret, client.__str__()) self.assertNotIn(secret, client.__repr__()) # Check that account_key and sas_token are None client = MagArchiverClient(account_name=account_name) expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)' self.assertEqual(client.__str__(), expected) self.assertEqual(client.__repr__(), expected) @patch('mag_archiver.mag.list_containers') @patch('pendulum.datetime.now') def test_list_containers(self, mock_now, mock_list_containers): # Mock time mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10) # Mock containers containers_in = make_containers() mock_list_containers.return_value = containers_in # Test that 2 containers are returned when last_modified_thresh=1 client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key) containers_out = client.list_containers(last_modified_thresh=1) self.assertEqual(len(containers_out), 2) # Test that 3 containers are returned when last_modified_thresh=0 containers_out = client.list_containers(last_modified_thresh=0) self.assertEqual(len(containers_out), 3) # Test sort order reverse=False self.assertEqual(containers_in[0].name, containers_out[0].name) self.assertEqual(containers_in[2].name, containers_out[1].name) self.assertEqual(containers_in[1].name, containers_out[2].name) # Test sort order reverse=True containers_out = client.list_containers(last_modified_thresh=0, reverse=True) self.assertEqual(len(containers_out), 3) self.assertEqual(containers_in[1].name, containers_out[0].name) self.assertEqual(containers_in[2].name, containers_out[1].name) self.assertEqual(containers_in[0].name, containers_out[2].name) @patch('mag_archiver.mag.list_containers') @patch('pendulum.datetime.now') def test_update_releases(self, mock_now, mock_list_containers): # Mock time mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10) # Mock containers containers_in = make_containers() mock_list_containers.return_value = containers_in # Mock fetching of containers client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key) containers = client.list_containers(last_modified_thresh=1) try: # Update releases based on containers num_updated, num_errors = client.update_releases(containers) self.assertEqual(num_updated, 2) self.assertEqual(num_errors, 0) finally: # Clean up service = TableService(account_name=self.account_name, account_key=self.account_key) for container in containers: service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", "")) @patch('mag_archiver.mag.list_containers') @patch('pendulum.datetime.now') def test_list_releases(self, mock_now, mock_list_containers): # Mock time mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1) # Mock containers containers_in = make_containers() mock_list_containers.return_value = containers_in # Mock fetching of containers client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key) containers = client.list_containers(last_modified_thresh=1) try: # Update releases based on containers num_updated, num_errors = client.update_releases(containers) self.assertEqual(num_updated, 3) self.assertEqual(num_errors, 0) # Two releases start_date = pendulum.datetime(year=2020, month=4, day=17) end_date = pendulum.datetime(year=2020, month=5, day=1) releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered, date_type=MagDateType.release) self.assertEqual(len(releases), 2) # 1 release start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1) end_date = pendulum.datetime(year=2020, month=5, day=1) releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered, date_type=MagDateType.release) self.assertEqual(len(releases), 1) # Three releases start_date = pendulum.datetime(year=2020, month=4, day=17) end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1) releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered, date_type=MagDateType.release, reverse=False) self.assertEqual(len(releases), 3) # Sorting reverse=False self.assertEqual(releases[0].row_key, '2020-04-17') self.assertEqual(releases[1].row_key, '2020-04-24') self.assertEqual(releases[2].row_key, '2020-05-01') # Sorting reverse=True releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered, date_type=MagDateType.release, reverse=True) self.assertEqual(releases[0].row_key, '2020-05-01') self.assertEqual(releases[1].row_key, '2020-04-24') self.assertEqual(releases[2].row_key, '2020-04-17') finally: # Clean up service = TableService(account_name=self.account_name, account_key=self.account_key) for container in containers: service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
45.128065
112
0.676368
2,046
16,562
5.239003
0.116325
0.069969
0.028734
0.030133
0.736729
0.710234
0.680847
0.625618
0.599123
0.553037
0
0.042021
0.226965
16,562
366
113
45.251366
0.795204
0.101739
0
0.367089
0
0
0.117663
0.059641
0
0
0
0
0.244726
1
0.059072
false
0
0.037975
0
0.118143
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3d5083187f3606549524985d8222291ba30b943
4,199
py
Python
tests/unit/transport/plugins/asyncssh/test_asyncssh_transport.py
carlmontanari/nssh
fa2277ea0b8fdb81de3064e1d48bad9264f0cd64
[ "MIT" ]
1
2020-02-09T17:43:43.000Z
2020-02-09T17:43:43.000Z
tests/unit/transport/plugins/asyncssh/test_asyncssh_transport.py
carlmontanari/nssh
fa2277ea0b8fdb81de3064e1d48bad9264f0cd64
[ "MIT" ]
null
null
null
tests/unit/transport/plugins/asyncssh/test_asyncssh_transport.py
carlmontanari/nssh
fa2277ea0b8fdb81de3064e1d48bad9264f0cd64
[ "MIT" ]
null
null
null
import asyncio from io import BytesIO import pytest from asyncssh.connection import SSHClientConnection from asyncssh.stream import SSHReader from scrapli.exceptions import ScrapliConnectionNotOpened, ScrapliTimeout class DumbContainer: def __init__(self): self.preferred_auth = () def __getattr__(self, item): # options has a billion attributes, just return None, doesnt matter for this test return None def test_close(monkeypatch, asyncssh_transport): def _close(cls): pass monkeypatch.setattr( "asyncssh.connection.SSHClientConnection.close", _close, ) # lie and pretend the session is already assigned options = DumbContainer() asyncssh_transport.session = SSHClientConnection( loop=asyncio.get_event_loop_policy().get_event_loop(), options=options ) asyncssh_transport.close() assert asyncssh_transport.session is None assert asyncssh_transport.stdin is None assert asyncssh_transport.stdout is None def test_close_catch_brokenpipe(monkeypatch, asyncssh_transport): def _close(cls): raise BrokenPipeError monkeypatch.setattr( "asyncssh.connection.SSHClientConnection.close", _close, ) # lie and pretend the session is already assigned options = DumbContainer() asyncssh_transport.session = SSHClientConnection( loop=asyncio.get_event_loop_policy().get_event_loop(), options=options ) asyncssh_transport.close() assert asyncssh_transport.session is None assert asyncssh_transport.stdin is None assert asyncssh_transport.stdout is None def test_isalive_no_session(asyncssh_transport): assert asyncssh_transport.isalive() is False def test_isalive(asyncssh_transport): # lie and pretend the session is already assigned options = DumbContainer() asyncssh_transport.session = SSHClientConnection( loop=asyncio.get_event_loop_policy().get_event_loop(), options=options ) # lie and tell asyncssh auth is done asyncssh_transport.session._auth_complete = True # also have to lie and create a transport and have it return False when is_closing is called asyncssh_transport.session._transport = DumbContainer() asyncssh_transport.session._transport.is_closing = lambda: False assert asyncssh_transport.isalive() is True def test_isalive_attribute_error(asyncssh_transport): # lie and pretend the session is already assigned options = DumbContainer() asyncssh_transport.session = SSHClientConnection( loop=asyncio.get_event_loop_policy().get_event_loop(), options=options ) # lie and tell asyncssh auth is done asyncssh_transport.session._auth_complete = True assert asyncssh_transport.isalive() is False async def test_read(monkeypatch, asyncssh_transport): async def _read(cls, _): return b"somebytes" monkeypatch.setattr( "asyncssh.stream.SSHReader.read", _read, ) # lie and pretend the session is already assigned/stdout is already a thing asyncssh_transport.stdout = SSHReader("", "") assert await asyncssh_transport.read() == b"somebytes" async def test_read_exception_not_open(asyncssh_transport): with pytest.raises(ScrapliConnectionNotOpened): await asyncssh_transport.read() async def test_read_exception_timeout(monkeypatch, asyncssh_transport): async def _read(cls, _): await asyncio.sleep(0.5) monkeypatch.setattr( "asyncssh.stream.SSHReader.read", _read, ) # lie and pretend the session is already assigned/stdout is already a thing asyncssh_transport.stdout = SSHReader("", "") asyncssh_transport._base_transport_args.timeout_transport = 0.1 with pytest.raises(ScrapliTimeout): await asyncssh_transport.read() def test_write(asyncssh_transport): asyncssh_transport.stdin = BytesIO() asyncssh_transport.write(b"blah") asyncssh_transport.stdin.seek(0) assert asyncssh_transport.stdin.read() == b"blah" def test_write_exception(asyncssh_transport): with pytest.raises(ScrapliConnectionNotOpened): asyncssh_transport.write("blah")
28.958621
96
0.740414
485
4,199
6.17732
0.208247
0.226969
0.080107
0.032043
0.641522
0.614152
0.524032
0.495327
0.495327
0.495327
0
0.001468
0.188616
4,199
144
97
29.159722
0.877898
0.138128
0
0.511111
0
0
0.049889
0.041574
0
0
0
0
0.122222
1
0.122222
false
0.011111
0.066667
0.011111
0.222222
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3d593a4708a16249302174780a07f2fdc88109b
664
py
Python
dataPresenter.py
thebouv/IUS-Hacktoberfest
084634ec2feff3e81862d85b3938e1ae2c5aadff
[ "MIT" ]
3
2019-09-30T18:25:12.000Z
2019-10-01T21:47:41.000Z
dataPresenter.py
thebouv/IUS-Hacktoberfest
084634ec2feff3e81862d85b3938e1ae2c5aadff
[ "MIT" ]
3
2019-09-27T22:44:34.000Z
2019-10-09T17:00:37.000Z
dataPresenter.py
thebouv/IUS-Hacktoberfest
084634ec2feff3e81862d85b3938e1ae2c5aadff
[ "MIT" ]
6
2019-09-28T04:17:16.000Z
2019-10-08T18:47:26.000Z
from plotly.subplots import make_subplots import plotly.graph_objects as go import plotly.io as pio from dataProcessor import parseLabels, parseLangs import plotly.io as pio import os years = parseLabels() langs = parseLangs() #make the plotly results fig = make_subplots( rows=1, cols=2, specs=[[{"type": "xy"}, {"type": "domain"}]], ) fig.add_trace(go.Bar(y = list(langs.values()), x = list(langs.keys()), showlegend=False), row=1, col=1) fig.add_trace(go.Pie(values = list(years.values()), labels = list(years.keys())), row=1, col=2) fig.update_layout(height=600) pio.write_html(fig, 'index.html', auto_open=True)
22.133333
89
0.680723
99
664
4.484848
0.525253
0.081081
0.063063
0.072072
0.085586
0
0
0
0
0
0
0.016275
0.167169
664
29
90
22.896552
0.786618
0.034639
0
0.111111
0
0
0.040689
0
0
0
0
0
0
1
0
false
0
0.333333
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
a3d7408e9bd4e19d03c2fd8dc2282dcab222a6b8
2,782
py
Python
db/redis_db.py
Lifeistrange/WeiboSpider
8aa3465487ef64bb6e9bb4bd503f182a1b38c292
[ "MIT" ]
1
2018-07-23T03:58:53.000Z
2018-07-23T03:58:53.000Z
db/redis_db.py
545314690/WeiboSpider-1.6.4
d29cc9b926da5790768ddebdfdf9bf6c617a0e03
[ "MIT" ]
null
null
null
db/redis_db.py
545314690/WeiboSpider-1.6.4
d29cc9b926da5790768ddebdfdf9bf6c617a0e03
[ "MIT" ]
2
2018-06-25T09:21:24.000Z
2018-07-23T03:59:31.000Z
# coding:utf-8 import datetime import json import re import redis from config.conf import get_redis_args redis_args = get_redis_args() class Cookies(object): rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'), password=redis_args.get('password'), db=redis_args.get('cookies')) rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'), password=redis_args.get('password'), db=redis_args.get('broker')) @classmethod def store_cookies(cls, name, cookies): pickled_cookies = json.dumps( {'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()}) cls.rd_con.hset('account', name, pickled_cookies) cls.rd_con.lpush('account_queue', name) @classmethod def fetch_cookies(cls): for i in range(cls.rd_con.llen('account_queue')): name = cls.rd_con.rpop('account_queue').decode('utf-8') if name: j_account = cls.rd_con.hget('account', name).decode('utf-8') if j_account: cls.rd_con.lpush('account_queue', name) # 当账号不存在时,这个name也会清除,并取下一个name account = json.loads(j_account) login_time = datetime.datetime.fromtimestamp(account['loginTime']) if datetime.datetime.now() - login_time > datetime.timedelta(hours=20): cls.rd_con.hdel('account', name) continue # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系 return name, account['cookies'] else: return None @classmethod def delete_cookies(cls, name): cls.rd_con.hdel('account', name) return True @classmethod def check_login_task(cls): if cls.rd_con_broker.llen('login_queue') > 0: cls.rd_con_broker.delete('login_queue') class Urls(object): rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'), password=redis_args.get('password'), db=redis_args.get('urls')) @classmethod def store_crawl_url(cls, url, result): cls.rd_con.set(url, result) class IdNames(object): rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'), password=redis_args.get('password'), db=redis_args.get('id_name')) @classmethod def store_id_name(cls, user_name, user_id): cls.rd_con.set(user_name, user_id) @classmethod def fetch_uid_by_name(cls, user_name): user_id = cls.rd_con.get(user_name) if user_id: return user_id.decode('utf-8') return ''
36.12987
103
0.611431
349
2,782
4.653295
0.234957
0.105296
0.125616
0.061576
0.376232
0.361453
0.333128
0.297414
0.297414
0.2617
0
0.003423
0.264917
2,782
76
104
36.605263
0.790709
0.03271
0
0.237288
0
0
0.088202
0
0
0
0
0
0
1
0.118644
false
0.067797
0.084746
0
0.40678
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
a3d86cad8d3203300d44bd218c5b17bca9639482
409
py
Python
data/contacts.py
rgurevych/python_for_testers
04023a5d6ea480f7828aa56e8a4094b744e05721
[ "Apache-2.0" ]
null
null
null
data/contacts.py
rgurevych/python_for_testers
04023a5d6ea480f7828aa56e8a4094b744e05721
[ "Apache-2.0" ]
null
null
null
data/contacts.py
rgurevych/python_for_testers
04023a5d6ea480f7828aa56e8a4094b744e05721
[ "Apache-2.0" ]
null
null
null
from models.contact import Contact testdata = [Contact(first_name="Firstname", last_name="Lastname", mobile_phone="+12345678", work_phone="12345", home_phone="67890", fax="55443322", email_1="email_1@email.com", email_2="email_2@email.com", email_3="email_3@email.com", address="Street, 15 \n 12345 New-York")]
51.125
116
0.577017
49
409
4.591837
0.612245
0.106667
0.097778
0
0
0
0
0
0
0
0
0.134021
0.288509
409
7
117
58.428571
0.639175
0
0
0
0
0
0.301471
0
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3da86d4ce645eeb7110c2f1c12a4c42e43e9f77
2,681
py
Python
cats/types.py
AdamBrianBright/cats-python
163cbde06c0d56520c217c0d66ddca34c7e0f63b
[ "MIT" ]
2
2021-10-04T05:39:03.000Z
2021-10-07T06:44:19.000Z
cats/types.py
AdamBrianBright/cats-python
163cbde06c0d56520c217c0d66ddca34c7e0f63b
[ "MIT" ]
7
2021-08-17T17:50:23.000Z
2021-08-31T08:44:13.000Z
cats/types.py
AdamBrianBright/cats-python
163cbde06c0d56520c217c0d66ddca34c7e0f63b
[ "MIT" ]
2
2021-10-01T20:58:25.000Z
2021-10-04T05:40:35.000Z
from pathlib import Path from types import GeneratorType from typing import AsyncIterable, Iterable, TypeAlias import ujson from cats.errors import MalformedHeadersError try: from django.db.models import QuerySet, Model except ImportError: QuerySet = type('QuerySet', (list,), {}) Model = type('Model', (list,), {}) __all__ = [ 'Bytes', 'BytesGen', 'BytesAsyncGen', 'BytesAnyGen', 'Byte', 'Json', 'File', 'List', 'Missing', 'MISSING', 'QuerySet', 'Model', 'T_Headers', 'Headers', ] Bytes: TypeAlias = bytes | bytearray | memoryview BytesGen: TypeAlias = Iterable[Bytes] BytesAsyncGen: TypeAlias = AsyncIterable[Bytes] BytesAnyGen: TypeAlias = BytesGen | BytesAsyncGen Byte: TypeAlias = Bytes Json: TypeAlias = str | int | float | dict | list | bool | None File: TypeAlias = Path | str List = list | tuple | set | GeneratorType | QuerySet class Missing(str): """ Custom Missing type is required for Pydantic to work properly. IDK """ __slots__ = () def __init__(self): super().__init__() def __eq__(self, other): return isinstance(other, Missing) def __bool__(self): return False MISSING = Missing() class Headers(dict): __slots__ = () def __init__(self, *args, **kwargs): v = self._convert(*args, **kwargs) if (offset := v.get('offset', None)) and (not isinstance(offset, int) or offset < 0): raise MalformedHeadersError('Invalid offset header', headers=v) super().__init__(v) @classmethod def _key(cls, key: str) -> str: return key.replace(' ', '-').title() def __getitem__(self, item): return super().__getitem__(self._key(item)) def __setitem__(self, key, value): return super().__setitem__(self._key(key), value) def __delitem__(self, key): return super().__delitem__(self._key(key)) def __contains__(self, item): return super().__contains__(self._key(item)) @classmethod def _convert(cls, *args, **kwargs): return {cls._key(k): v for k, v in dict(*args, **kwargs).items() if isinstance(k, str)} def update(self, *args, **kwargs) -> None: super().update(self._convert(*args, **kwargs)) def encode(self) -> bytes: return ujson.dumps(self, ensure_ascii=False, escape_forward_slashes=False).encode('utf-8') @classmethod def decode(cls, headers: Bytes) -> 'Headers': try: headers = ujson.loads(headers) except ValueError: # + UnicodeDecodeError headers = None return cls(headers or {}) T_Headers: TypeAlias = Headers | dict[str]
25.056075
98
0.631481
300
2,681
5.37
0.356667
0.037244
0.014898
0.019863
0
0
0
0
0
0
0
0.000974
0.234241
2,681
106
99
25.292453
0.783731
0.032824
0
0.092105
0
0
0.058207
0
0
0
0
0
0
1
0.171053
false
0
0.092105
0.118421
0.447368
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
a3dad87fce4f18faf3a3d29b5cefbd7b89d614d5
384
py
Python
raven/utils/urlparse.py
MyCollege/raven
9447f3a55ae7703afe84c3493625e3c3fb700700
[ "BSD-3-Clause" ]
null
null
null
raven/utils/urlparse.py
MyCollege/raven
9447f3a55ae7703afe84c3493625e3c3fb700700
[ "BSD-3-Clause" ]
null
null
null
raven/utils/urlparse.py
MyCollege/raven
9447f3a55ae7703afe84c3493625e3c3fb700700
[ "BSD-3-Clause" ]
null
null
null
from __future__ import absolute_import try: import urlparse as _urlparse except ImportError: from urllib import parse as _urlparse def register_scheme(scheme): for method in filter(lambda s: s.startswith('uses_'), dir(_urlparse)): uses = getattr(_urlparse, method) if scheme not in uses: uses.append(scheme) urlparse = _urlparse.urlparse
22.588235
74
0.708333
48
384
5.416667
0.5625
0.076923
0
0
0
0
0
0
0
0
0
0
0.221354
384
16
75
24
0.869565
0
0
0
0
0
0.013021
0
0
0
0
0
0
1
0.090909
false
0
0.363636
0
0.454545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
a3db35b8c7d191b6f652e750b697cd40a1dc6c0c
516
py
Python
setup.py
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
161
2020-10-28T02:21:50.000Z
2022-03-11T05:06:16.000Z
setup.py
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
4
2020-10-28T02:27:43.000Z
2021-03-31T00:04:43.000Z
setup.py
stjordanis/MONeT-1
98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af
[ "MIT" ]
15
2020-10-28T02:32:12.000Z
2021-12-23T13:20:23.000Z
import setuptools setuptools.setup( name="monet_memory_optimized_training", version="0.0.1", description="Memory Optimized Network Training Framework", url="https://github.com/philkr/lowrank_conv", packages=setuptools.find_packages(include = ['monet', 'monet.*', 'models', 'checkmate', 'gist']), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires='>=3.6', )
32.25
101
0.655039
54
516
6.148148
0.777778
0.090361
0
0
0
0
0
0
0
0
0
0.01432
0.187985
516
15
102
34.4
0.778043
0
0
0
0
0
0.503876
0.060078
0
0
0
0
0
1
0
true
0
0.071429
0
0.071429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
a3e04f191bacc2a7d80fcd1ad9bb0b6bdef01714
788
py
Python
1.py
zweed4u/dailycodingproblem
6e40eaad347e283f86a11adeff01c6426211a0be
[ "MIT" ]
null
null
null
1.py
zweed4u/dailycodingproblem
6e40eaad347e283f86a11adeff01c6426211a0be
[ "MIT" ]
null
null
null
1.py
zweed4u/dailycodingproblem
6e40eaad347e283f86a11adeff01c6426211a0be
[ "MIT" ]
null
null
null
#!/usr/bin/python3 """ Good morning! Here's your coding interview problem for today. This problem was recently asked by Google. Given a list of numbers and a number k, return whether any two numbers from the list add up to k. For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17. Bonus: Can you do this in one pass? """ def func(l, k): sums = [] for index, element in enumerate(l): print(f'Current element: {element}') if index == 0: # first element - need another print() continue for num in range(index): print(f'Appending {l[index]} + {l[num]}') sums.append(l[num] + l[index]) print() print(sums) return k in sums print(func([10, 15, 3, 7], 17))
26.266667
97
0.593909
124
788
3.774194
0.580645
0.017094
0.021368
0.025641
0
0
0
0
0
0
0
0.041145
0.290609
788
29
98
27.172414
0.796064
0.467005
0
0.142857
0
0
0.138686
0
0
0
0
0
0
1
0.071429
false
0
0
0
0.142857
0.428571
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
1
0
1
a3f0b2c627b66a9afed0141c901b2c8dc3a62a08
1,265
py
Python
peon/tests/test_project/test_file/test_function_def/test_functions/test_reflection_at_line.py
roch1990/peon
0e9e40956c05138c0820fe380b354fdd1fe95e01
[ "MIT" ]
32
2020-05-18T14:02:59.000Z
2022-02-06T15:00:12.000Z
peon/tests/test_project/test_file/test_function_def/test_functions/test_reflection_at_line.py
roch1990/peon
0e9e40956c05138c0820fe380b354fdd1fe95e01
[ "MIT" ]
42
2020-05-22T20:29:08.000Z
2021-03-10T21:24:23.000Z
peon/tests/test_project/test_file/test_function_def/test_functions/test_reflection_at_line.py
roch1990/peon
0e9e40956c05138c0820fe380b354fdd1fe95e01
[ "MIT" ]
4
2020-07-02T06:32:42.000Z
2022-01-24T22:46:02.000Z
import _ast from peon.src.project.file.function_def.function import FunctionLint class ReflectionAtLineFixture: empty_node = _ast.Pass is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1) type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1) is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1) type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1) def test_empty_node(): assert FunctionLint( definition=ReflectionAtLineFixture.empty_node, ).reflection_at_line() == tuple() def test_is_instance_at_first_lvl(): assert FunctionLint( definition=ReflectionAtLineFixture.is_instance_at_first_lvl, ).reflection_at_line() == (1,) def test_type_at_first_lvl(): assert FunctionLint( definition=ReflectionAtLineFixture.type_at_first_lvl, ).reflection_at_line() == (1,) def test_is_instance_at_second_lvl(): assert FunctionLint( definition=ReflectionAtLineFixture.is_instance_at_second_lvl, ).reflection_at_line() == (2,) def test_type_at_second_lvl(): assert FunctionLint( definition=ReflectionAtLineFixture.type_at_second_lvl, ).reflection_at_line() == (2,)
30.119048
103
0.746245
160
1,265
5.46875
0.225
0.068571
0.082286
0.291429
0.654857
0.601143
0.541714
0.313143
0.164571
0
0
0.009234
0.143874
1,265
41
104
30.853659
0.798707
0
0
0.321429
0
0
0.022134
0
0
0
0
0
0.178571
1
0.178571
false
0.035714
0.071429
0
0.464286
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
a3f0f192476289dad6988d88e198337f911d4da1
4,419
py
Python
db2_funcs.py
Nama/A.T.S.P.-Website
658db78da1b12c01ef9ead2dc44d1ecd97b178d8
[ "MIT" ]
4
2015-10-18T18:46:13.000Z
2019-11-16T02:34:05.000Z
db2_funcs.py
Adventure-Terraria-Server-Project/A.T.S.P.-Website
658db78da1b12c01ef9ead2dc44d1ecd97b178d8
[ "MIT" ]
null
null
null
db2_funcs.py
Adventure-Terraria-Server-Project/A.T.S.P.-Website
658db78da1b12c01ef9ead2dc44d1ecd97b178d8
[ "MIT" ]
2
2015-09-30T21:53:19.000Z
2019-03-22T07:59:49.000Z
############################################################################### # # '''Website Database-connection-related features''' # # # ############################################################################### import cymysql from conf import website_db from time import gmtime from time import strftime db_host = website_db.ip db_port = website_db.port db = website_db.db db_user = website_db.user db_pw = website_db.pw ############################################################################### # # '''Databse-connect and close''' # # # ############################################################################### def db_con(): conn = cymysql.connect(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db) cur = conn.cursor() return conn, cur def db_close(conn, cur): cur.close() conn.close() ############################################################################### # # '''Donation-Page data''' # # # ############################################################################### def donate_save(nick): conn, cur = db_con() time = strftime('%Y.%m.%d - %H:%M:%S', gmtime()) cur.execute('INSERT INTO `donate` (`time`, `user`) VALUES (%s, %s)', (time, nick)) conn.commit() db_close(conn, cur) def donate_read(): conn, cur = db_con() cur.execute('SELECT * FROM `donate` ORDER BY `time` DESC LIMIT 20') nicks = list() for r in cur.fetchall(): nicks.append([r[0], r[1]]) db_close(conn, cur) return nicks ############################################################################### # # '''Short-URL data''' # # # ############################################################################### def shorturl_save(surl, url): conn, cur = db_con() cur.execute('INSERT INTO `shorturls` (`surl`, `url`) VALUES (%s, %s)', (surl, url)) conn.commit() db_close(conn, cur) def shorturl_read(): conn, cur = db_con() cur.execute('SELECT * FROM `shorturls`') urls = list() for r in cur.fetchall(): urls.append([r[0], r[0], r[1]]) db_close(conn, cur) return urls ############################################################################### # # '''Old Worlds''' # # # ############################################################################### def get_old_worlds(item): conn, cur = db_con() sql = 'SELECT * FROM `oldworlds` ORDER BY `date` DESC LIMIT {0}, {1}'.format(item, 20) cur.execute(sql) worlds = cur.fetchall() db_close(conn, cur) return worlds ############################################################################### # # '''Server Backup-Size in Dash''' # # # ############################################################################### def backup_size(): conn, cur = db_con() dbtshock = [] tserver = [] htdocs = [] cur.execute('SELECT * FROM `backups`') for r in cur.fetchall(): if r[1] == 'db': dbtshock.append([r[0] * 1000, r[2]]) elif r[1] == 'tserver': tserver.append([r[0] * 1000, r[2]]) elif r[1] == 'htdocs': htdocs.append([r[0] * 1000, r[2]]) db_close(conn, cur) return (dbtshock, tserver, htdocs)
33.992308
90
0.296673
324
4,419
3.938272
0.268519
0.076803
0.060345
0.076803
0.273511
0.22884
0.167712
0.125392
0.125392
0
0
0.012021
0.397601
4,419
129
91
34.255814
0.467318
0.009957
0
0.261538
0
0
0.129266
0
0
0
0
0
0
1
0.123077
false
0.015385
0.061538
0
0.261538
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
430006e2469bed3f7e4c977ba2de4f246799468c
1,714
py
Python
testsite/wsgi.py
stungkit/djaodjin-saas
93c8631509ffd5b0fb91283cd4a4aeaf9826e97e
[ "BSD-2-Clause" ]
null
null
null
testsite/wsgi.py
stungkit/djaodjin-saas
93c8631509ffd5b0fb91283cd4a4aeaf9826e97e
[ "BSD-2-Clause" ]
null
null
null
testsite/wsgi.py
stungkit/djaodjin-saas
93c8631509ffd5b0fb91283cd4a4aeaf9826e97e
[ "BSD-2-Clause" ]
null
null
null
""" WSGI config for testsite project. This module contains the WSGI application used by Django's development server and any production WSGI deployments. It should expose a module-level variable named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover this application via the ``WSGI_APPLICATION`` setting. Usually you will have the standard Django WSGI application here, but it also might make sense to replace the whole Django WSGI application with a custom one that later delegates to the Django one. For example, you could introduce WSGI middleware here, or combine a Django application with an application of another framework. """ import os, signal #pylint: disable=invalid-name def save_coverage(*args, **kwargs): #pylint:disable=unused-argument sys.stderr.write("saving coverage\n") cov.stop() cov.save() if os.getenv('DJANGO_COVERAGE'): import atexit, sys import coverage cov = coverage.coverage(data_file=os.path.join(os.getenv('DJANGO_COVERAGE'), ".coverage.%d" % os.getpid())) cov.start() atexit.register(save_coverage) try: signal.signal(signal.SIGTERM, save_coverage) except ValueError as e: # trapping signals does not work with manage # trying to do so fails with # ValueError: signal only works in main thread pass os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testsite.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application #pylint: disable=invalid-name application = get_wsgi_application()
34.979592
80
0.748541
239
1,714
5.309623
0.539749
0.082742
0.042553
0.037825
0
0
0
0
0
0
0
0
0.171529
1,714
48
81
35.708333
0.893662
0.596266
0
0
0
0
0.145185
0.032593
0
0
0
0
0
1
0.052632
false
0.052632
0.210526
0
0.263158
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4302245408f7928b493623fbaac5ca5daff6a97c
229
py
Python
kattis/Soda Slurper.py
jaredliw/python-question-bank
9c8c246623d8d171f875700b57772df0afcbdcdf
[ "MIT" ]
1
2021-04-08T07:49:15.000Z
2021-04-08T07:49:15.000Z
kattis/Soda Slurper.py
jaredliw/leetcode-solutions
9c8c246623d8d171f875700b57772df0afcbdcdf
[ "MIT" ]
null
null
null
kattis/Soda Slurper.py
jaredliw/leetcode-solutions
9c8c246623d8d171f875700b57772df0afcbdcdf
[ "MIT" ]
1
2022-01-23T02:12:24.000Z
2022-01-23T02:12:24.000Z
# CPU: 0.06 s possessed, found, condition = map(int, input().split()) possessed += found count = 0 while possessed >= condition: div, mod = divmod(possessed, condition) count += div possessed = div + mod print(count)
22.9
55
0.663755
30
229
5.066667
0.566667
0.184211
0
0
0
0
0
0
0
0
0
0.021858
0.200873
229
9
56
25.444444
0.808743
0.048035
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0.125
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4313de468396c7f2ca9e8be49eccd525b21cb61b
309
py
Python
test123.py
umousesonic/zinc
9e170269d3b209a80ac79d5850894ddc1d95c62f
[ "BSD-3-Clause" ]
null
null
null
test123.py
umousesonic/zinc
9e170269d3b209a80ac79d5850894ddc1d95c62f
[ "BSD-3-Clause" ]
null
null
null
test123.py
umousesonic/zinc
9e170269d3b209a80ac79d5850894ddc1d95c62f
[ "BSD-3-Clause" ]
null
null
null
from runner import runner if __name__ == '__main__': r = runner() p = 'public class main{public static void main (String[] args){' \ 'public String StudentAnswer(String myInput){' \ 'return "myOutput"; ' \ '}System.out.println("hello world!");}}' print (r.sendCode(p, ''))
34.333333
70
0.601942
35
309
5.085714
0.714286
0
0
0
0
0
0
0
0
0
0
0
0.23301
309
9
71
34.333333
0.751055
0
0
0
0
0
0.53871
0.083871
0
0
0
0
0
1
0
false
0
0.125
0
0.125
0.25
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4318e19519ef3b4ec8fbfd551e4ad75ec635df69
9,102
py
Python
src/transbigdata/CoordinatesConverter.py
cirno1w/transport
f088b4111992dd5ec6371db71cf1d26689cf8c26
[ "BSD-3-Clause" ]
1
2022-03-06T00:15:19.000Z
2022-03-06T00:15:19.000Z
src/transbigdata/CoordinatesConverter.py
anitagraser/transbigdata
0eb972c78f9154c0a3f780f197ef9af406b2bb71
[ "BSD-3-Clause" ]
null
null
null
src/transbigdata/CoordinatesConverter.py
anitagraser/transbigdata
0eb972c78f9154c0a3f780f197ef9af406b2bb71
[ "BSD-3-Clause" ]
null
null
null
import numpy as np x_pi = 3.14159265358979324 * 3000.0 / 180.0 pi = 3.1415926535897932384626 a = 6378245.0 ee = 0.00669342162296594323 def gcj02tobd09(lng, lat): """ Convert coordinates from GCJ02 to BD09 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: lng = lng.astype(float) lat = lat.astype(float) except: lng = float(lng) lat = float(lat) z = np.sqrt(lng * lng + lat * lat) + 0.00002 * np.sin(lat * x_pi) theta = np.arctan2(lat, lng) + 0.000003 * np.cos(lng * x_pi) bd_lng = z * np.cos(theta) + 0.0065 bd_lat = z * np.sin(theta) + 0.006 return bd_lng, bd_lat def bd09togcj02(bd_lon, bd_lat): """ Convert coordinates from BD09 to GCJ02 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: bd_lon = bd_lon.astype(float) bd_lat = bd_lat.astype(float) except: bd_lon = float(bd_lon) bd_lat = float(bd_lat) x = bd_lon - 0.0065 y = bd_lat - 0.006 z = np.sqrt(x * x + y * y) - 0.00002 * np.sin(y * x_pi) theta = np.arctan2(y, x) - 0.000003 * np.cos(x * x_pi) gg_lng = z * np.cos(theta) gg_lat = z * np.sin(theta) return gg_lng, gg_lat def wgs84togcj02(lng, lat): """ Convert coordinates from WGS84 to GCJ02 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: lng = lng.astype(float) lat = lat.astype(float) except: lng = float(lng) lat = float(lat) dlat = transformlat(lng - 105.0, lat - 35.0) dlng = transformlng(lng - 105.0, lat - 35.0) radlat = lat / 180.0 * pi magic = np.sin(radlat) magic = 1 - ee * magic * magic sqrtmagic = np.sqrt(magic) dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi) dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi) mglat = lat + dlat mglng = lng + dlng return mglng, mglat def gcj02towgs84(lng, lat): """ Convert coordinates from GCJ02 to WGS84 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: lng = lng.astype(float) lat = lat.astype(float) except: lng = float(lng) lat = float(lat) dlat = transformlat(lng - 105.0, lat - 35.0) dlng = transformlng(lng - 105.0, lat - 35.0) radlat = lat / 180.0 * pi magic = np.sin(radlat) magic = 1 - ee * magic * magic sqrtmagic = np.sqrt(magic) dlat = (dlat * 180.0) / ((a * (1 - ee)) / (magic * sqrtmagic) * pi) dlng = (dlng * 180.0) / (a / sqrtmagic * np.cos(radlat) * pi) mglat = lat + dlat mglng = lng + dlng return lng * 2 - mglng, lat * 2 - mglat def wgs84tobd09(lon,lat): """ Convert coordinates from WGS84 to BD09 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: lon = lon.astype(float) lat = lat.astype(float) except: lon = float(lon) lat = float(lat) lon,lat = wgs84togcj02(lon,lat) lon,lat = gcj02tobd09(lon,lat) return lon,lat def bd09towgs84(lon,lat): """ Convert coordinates from BD09 to WGS84 Parameters ------- lng : Series or number Longitude lat : Series or number Latitude return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ try: lon = lon.astype(float) lat = lat.astype(float) except: lon = float(lon) lat = float(lat) lon,lat = bd09togcj02(lon,lat) lon,lat = gcj02towgs84(lon,lat) return lon,lat def bd09mctobd09(x,y): """ Convert coordinates from BD09MC to BD09 Parameters ------- x : Series or number x coordinates y : Series or number y coordinates return ------- lng : Series or number Longitude (Converted) lat : Series or number Latitude (Converted) """ MCBAND = [12890594.86, 8362377.87, 5591021, 3481989.83, 1678043.12, 0] MC2LL = [ [1.410526172116255e-8, 0.00000898305509648872, -1.9939833816331, 200.9824383106796, -187.2403703815547, 91.6087516669843, -23.38765649603339, 2.57121317296198, -0.03801003308653, 17337981.2], [-7.435856389565537e-9, 0.000008983055097726239, -0.78625201886289, 96.32687599759846, -1.85204757529826, -59.36935905485877, 47.40033549296737, -16.50741931063887, 2.28786674699375, 10260144.86], [-3.030883460898826e-8, 0.00000898305509983578, 0.30071316287616, 59.74293618442277, 7.357984074871, -25.38371002664745, 13.45380521110908, -3.29883767235584, 0.32710905363475, 6856817.37], [-1.981981304930552e-8, 0.000008983055099779535, 0.03278182852591, 40.31678527705744, 0.65659298677277, -4.44255534477492, 0.85341911805263, 0.12923347998204, -0.04625736007561, 4482777.06], [3.09191371068437e-9, 0.000008983055096812155, 0.00006995724062, 23.10934304144901, -0.00023663490511, -0.6321817810242, -0.00663494467273, 0.03430082397953, -0.00466043876332, 2555164.4], [2.890871144776878e-9, 0.000008983055095805407, -3.068298e-8, 7.47137025468032, -0.00000353937994, -0.02145144861037, -0.00001234426596, 0.00010322952773, -0.00000323890364, 826088.5] ] y1 = y.iloc[0] for cD in range(len(MCBAND)): if y1 >= MCBAND[cD]: cE = MC2LL[cD] break cD = cE T = cD[0] + cD[1] * np.abs(x); cB = np.abs(y) / cD[9] cE = cD[2] + cD[3] * cB + cD[4] * cB * cB +\ cD[5] * cB * cB * cB + cD[6] * cB * cB * cB * cB +\ cD[7] * cB * cB * cB * cB * cB +\ cD[8] * cB * cB * cB * cB * cB * cB return T,cE def transformlat(lng, lat): ret = -100.0 + 2.0 * lng + 3.0 * lat + 0.2 * lat * lat + \ 0.1 * lng * lat + 0.2 * np.sqrt(np.fabs(lng)) ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 * np.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret += (20.0 * np.sin(lat * pi) + 40.0 * np.sin(lat / 3.0 * pi)) * 2.0 / 3.0 ret += (160.0 * np.sin(lat / 12.0 * pi) + 320 * np.sin(lat * pi / 30.0)) * 2.0 / 3.0 return ret def transformlng(lng, lat): import numpy as np ret = 300.0 + lng + 2.0 * lat + 0.1 * lng * lng + \ 0.1 * lng * lat + 0.1 * np.sqrt(np.abs(lng)) ret += (20.0 * np.sin(6.0 * lng * pi) + 20.0 * np.sin(2.0 * lng * pi)) * 2.0 / 3.0 ret += (20.0 * np.sin(lng * pi) + 40.0 * np.sin(lng / 3.0 * pi)) * 2.0 / 3.0 ret += (150.0 * np.sin(lng / 12.0 * pi) + 300.0 * np.sin(lng / 30.0 * pi)) * 2.0 / 3.0 return ret def getdistance(lon1, lat1, lon2, lat2): ''' Input the origin/destination location in the sequence of [lon1, lat1, lon2, lat2] (in decimal) from DataFrame. The output is the distance (m). Parameters ------- lon1 : Series or number Start longitude lat1 : Series or number Start latitude lon2 : Series or number End longitude lat2 : Series or number End latitude return ------- distance : Series or number The distance ''' try: lon1 = lon1.astype(float) lat1 = lat1.astype(float) lon2 = lon2.astype(float) lat2 = lat2.astype(float) except: lon1 = float(lon1) lat1 = float(lat1) lon2 = float(lon2) lat2 = float(lat2) lon1, lat1, lon2, lat2 = map(lambda r:r*pi/180, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = np.sin(dlat/2)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2)**2 c = 2 * np.arcsin(a**0.5) r = 6371 # 地球平均半径,单位为公里 return c * r * 1000 def transform_shape(gdf,method): ''' Convert coordinates of all data. The input is the geographic elements’ DataFrame. Parameters ------- gdf : GeoDataFrame Geographic elements method : function The coordinate converting function return ------- gdf : GeoDataFrame The result of converting ''' from shapely.ops import transform gdf1 = gdf.copy() gdf1['geometry'] = gdf1['geometry'].apply(lambda r:transform(method, r)) return gdf1
28.267081
202
0.568556
1,216
9,102
4.234375
0.192434
0.051272
0.089726
0.042921
0.441833
0.408623
0.376966
0.357934
0.353661
0.353661
0
0.219756
0.297078
9,102
322
203
28.267081
0.585027
0.263349
0
0.415584
0
0
0.002607
0
0
0
0
0
0
1
0.071429
false
0
0.019481
0
0.162338
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
431a878ee70ba62b9e15ce81300906f432dc9b82
406
py
Python
src/nile/core/run.py
kootsZhin/nile
5b685158c06418a126229cfbcaeaaf78a38cd8a0
[ "MIT" ]
121
2021-10-30T08:42:44.000Z
2022-03-31T13:17:58.000Z
src/nile/core/run.py
kootsZhin/nile
5b685158c06418a126229cfbcaeaaf78a38cd8a0
[ "MIT" ]
56
2021-10-31T16:45:06.000Z
2022-03-31T04:41:08.000Z
src/nile/core/run.py
kootsZhin/nile
5b685158c06418a126229cfbcaeaaf78a38cd8a0
[ "MIT" ]
22
2021-11-18T11:24:56.000Z
2022-03-30T08:15:18.000Z
"""Command to run Nile scripts.""" import logging from importlib.machinery import SourceFileLoader from nile.nre import NileRuntimeEnvironment def run(path, network): """Run nile scripts passing on the NRE object.""" logger = logging.getLogger() logger.disabled = True script = SourceFileLoader("script", path).load_module() nre = NileRuntimeEnvironment(network) script.run(nre)
27.066667
59
0.73399
47
406
6.319149
0.574468
0.047138
0.094276
0
0
0
0
0
0
0
0
0
0.165025
406
14
60
29
0.876106
0.17734
0
0
0
0
0.018576
0
0
0
0
0
0
1
0.111111
false
0
0.333333
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
431ad1cf3cfa9d05b69ae287dc97e25b7fff4c83
548
py
Python
Python/Basic Data Types/Lists/Solution.py
PawarAditi/HackerRank
fcd9d1450ee293372ce5f1d4a3b7284ecf472657
[ "MIT" ]
219
2018-06-17T19:47:22.000Z
2022-03-27T15:28:56.000Z
Python/Basic Data Types/Lists/Solution.py
PawarAditi/HackerRank
fcd9d1450ee293372ce5f1d4a3b7284ecf472657
[ "MIT" ]
2
2020-08-12T16:47:41.000Z
2020-12-15T17:05:57.000Z
Python/Basic Data Types/Lists/Solution.py
PawarAditi/HackerRank
fcd9d1450ee293372ce5f1d4a3b7284ecf472657
[ "MIT" ]
182
2018-12-12T21:36:50.000Z
2022-03-26T17:49:51.000Z
array = [] for _ in range(int(input())): command = input().strip().split(" ") cmd_type = command[0] if (cmd_type == "print"): print(array) elif (cmd_type == "sort"): array.sort() elif (cmd_type == "reverse"): array.reverse() elif (cmd_type == "pop"): array.pop() elif (cmd_type == "remove"): array.remove(int(command[1])) elif (cmd_type == "append"): array.append(int(command[1])) elif (cmd_type == "insert"): array.insert(int(command[1]), int(command[2]))
30.444444
54
0.541971
68
548
4.235294
0.338235
0.194444
0.229167
0.104167
0.152778
0.152778
0
0
0
0
0
0.012285
0.257299
548
18
54
30.444444
0.695332
0
0
0
0
0
0.069217
0
0
0
0
0
0
1
0
false
0
0
0
0
0.111111
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
43338fccc231cf2b75bc14f3df4523f468ef4c58
347
py
Python
evetool/urls.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
evetool/urls.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
evetool/urls.py
Sult/evetool
155db9f3b0ecc273fe3c75daf8f9c6f37cb3e47f
[ "MIT" ]
null
null
null
from django.conf import settings from django.conf.urls import include, url from django.conf.urls.static import static urlpatterns = [ # Examples: # url(r'^$', 'evetool.views.home', name='home'), url(r'^', include('users.urls')), url(r'^', include('apis.urls')), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
31.545455
67
0.691643
47
347
5.042553
0.425532
0.126582
0.177215
0.151899
0
0
0
0
0
0
0
0
0.135447
347
10
68
34.7
0.79
0.161383
0
0
0
0
0.072917
0
0
0
0
0
0
1
0
false
0
0.428571
0
0.428571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
4336c7b257868aa7e53dc95e1f352acf6bc002a4
175
py
Python
simple_exercises/lanesexercises/py_functions2/rep_ex3.py
ilante/programming_immanuela_englander
45d51c99b09ae335a67e03ac5ea79fc775bdf0bd
[ "MIT" ]
null
null
null
simple_exercises/lanesexercises/py_functions2/rep_ex3.py
ilante/programming_immanuela_englander
45d51c99b09ae335a67e03ac5ea79fc775bdf0bd
[ "MIT" ]
null
null
null
simple_exercises/lanesexercises/py_functions2/rep_ex3.py
ilante/programming_immanuela_englander
45d51c99b09ae335a67e03ac5ea79fc775bdf0bd
[ "MIT" ]
null
null
null
# 3. Define a function to check whether a number is even def even(num): if num%2 == 0: return True else: return False print(even(4)) print(even(-5))
15.909091
56
0.6
29
175
3.62069
0.758621
0.171429
0
0
0
0
0
0
0
0
0
0.040323
0.291429
175
10
57
17.5
0.806452
0.308571
0
0
0
0
0
0
0
0
0
0
0
1
0.142857
false
0
0
0
0.428571
0.285714
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
433c71e69aaf2d22844233c421ede8abdf861e77
241
py
Python
linter.py
dndrsn/SublimeLinter-contrib-cspell
ba2335a9282335e52282ee93f3bb2a55f9536984
[ "MIT" ]
null
null
null
linter.py
dndrsn/SublimeLinter-contrib-cspell
ba2335a9282335e52282ee93f3bb2a55f9536984
[ "MIT" ]
null
null
null
linter.py
dndrsn/SublimeLinter-contrib-cspell
ba2335a9282335e52282ee93f3bb2a55f9536984
[ "MIT" ]
null
null
null
from SublimeLinter.lint import Linter, STREAM_STDOUT class CSpell(Linter): cmd = 'cspell stdin' defaults = {'selector': 'source'} regex = r'^[^:]*:(?P<line>\d+):(?P<col>\d+) - (?P<message>.*)$' error_stream = STREAM_STDOUT
26.777778
67
0.618257
30
241
4.866667
0.733333
0.164384
0
0
0
0
0
0
0
0
0
0
0.170124
241
8
68
30.125
0.73
0
0
0
0
0
0.323651
0.136929
0
0
0
0
0
1
0
false
0
0.166667
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
43468039289e0d25ecbf534436703bc05e6e79e6
5,156
py
Python
python/app/plugins/http/Struts2/S2_052.py
taomujian/linbing
fe772a58f41e3b046b51a866bdb7e4655abaf51a
[ "MIT" ]
351
2020-02-26T05:23:26.000Z
2022-03-26T12:39:19.000Z
python/app/plugins/http/Struts2/S2_052.py
taomujian/linbing
fe772a58f41e3b046b51a866bdb7e4655abaf51a
[ "MIT" ]
15
2020-03-26T07:31:49.000Z
2022-03-09T02:12:17.000Z
python/app/plugins/http/Struts2/S2_052.py
taomujian/linbing
fe772a58f41e3b046b51a866bdb7e4655abaf51a
[ "MIT" ]
99
2020-02-28T07:30:46.000Z
2022-03-16T16:41:09.000Z
#!/usr/bin/env python3 from app.lib.utils.request import request from app.lib.utils.encode import base64encode from app.lib.utils.common import get_capta, get_useragent class S2_052_BaseVerify: def __init__(self, url): self.info = { 'name': 'S2-052漏洞,又名CVE-2017-9805漏洞', 'description': 'Struts2 Remote Code Execution Vulnerability, Struts 2.1.6 - Struts 2.3.33, Struts 2.5 - Struts 2.5.12', 'date': '2017-09-05', 'exptype': 'check', 'type': 'RCE' } self.url = url if not self.url.startswith("http") and not self.url.startswith("https"): self.url = "http://" + self.url self.capta = get_capta() self.headers = { 'User-Agent': get_useragent(), 'Content-Type': "application/xml", } self.payload =''' <map> <entry> <jdk.nashorn.internal.objects.NativeString> <flags>0</flags> <value class="com.sun.xml.internal.bind.v2.runtime.unmarshaller.Base64Data"> <dataHandler> <dataSource class="com.sun.xml.internal.ws.encoding.xml.XMLMessage$XmlDataSource"> <is class="javax.crypto.CipherInputStream"> <cipher class="javax.crypto.NullCipher"> <initialized>false</initialized> <opmode>0</opmode> <serviceIterator class="javax.imageio.spi.FilterIterator"> <iter class="javax.imageio.spi.FilterIterator"> <iter class="java.util.Collections$EmptyIterator"/> <next class="java.lang.ProcessBuilder"> <command> {cmd} </command> <redirectErrorStream>false</redirectErrorStream> </next> </iter> <filter class="javax.imageio.ImageIO$ContainsFilter"> <method> <class>java.lang.ProcessBuilder</class> <name>start</name> <parameter-types/> </method> <name>foo</name> </filter> <next class="string">foo</next> </serviceIterator> <lock/> </cipher> <input class="java.lang.ProcessBuilder$NullInputStream"/> <ibuffer></ibuffer> <done>false</done> <ostart>0</ostart> <ofinish>0</ofinish> <closed>false</closed> </is> <consumed>false</consumed> </dataSource> <transferFlavors/> </dataHandler> <dataLen>0</dataLen> </value> </jdk.nashorn.internal.objects.NativeString> <jdk.nashorn.internal.objects.NativeString reference="../jdk.nashorn.internal.objects.NativeString"/> </entry> <entry> <jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/> <jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/> </entry> </map> ''' def check(self): """ 检测是否存在漏洞 :param: :return bool True or False: 是否存在漏洞 """ try: self.check_payload = self.payload.format(cmd = '<string>calc</string>') check_req = request.post(self.url, headers = self.headers, data = self.check_payload) if check_req.status_code == 500 and 'java.security.Provider$Service' in check_req.text: return True else: return False except Exception as e: print(e) return False finally: pass if __name__ == "__main__": S2_052 = S2_052_BaseVerify('http://127.0.0.1:8088/struts2_rest_showcase_war_exploded/orders/3')
48.641509
138
0.413693
380
5,156
5.531579
0.439474
0.038059
0.068506
0.095147
0.227402
0.20647
0.161751
0.123216
0.123216
0.123216
0
0.027283
0.488169
5,156
106
139
48.641509
0.769231
0.014546
0
0.087912
0
0.065934
0.771519
0.225902
0
0
0
0
0
1
0.021978
false
0.010989
0.032967
0
0.098901
0.010989
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4346e00af4df20f2f609af7be11fe806991cbce3
905
py
Python
UPD/extension/utils.py
RIDCorix/UPD
8694d119181a4afffafbfbab510f697399c1ea13
[ "MIT" ]
null
null
null
UPD/extension/utils.py
RIDCorix/UPD
8694d119181a4afffafbfbab510f697399c1ea13
[ "MIT" ]
null
null
null
UPD/extension/utils.py
RIDCorix/UPD
8694d119181a4afffafbfbab510f697399c1ea13
[ "MIT" ]
null
null
null
import sys # def get_tools(): # manager = PluginManager() # manager.setPluginPlaces(["plugins/file_cabinet"]) # manager.collectPlugins() # return [plugin.plugin_object for plugin in manager.getAllPlugins()] def get_tools(): import importlib tools = ['file_cabinet', 'us', 'automator', 'main'] tool_installation_dir1 = 'C:/Users/User/UPD/plugins' tool_installation_dir2 = '/Users/mac/UPD/plugins' sys.path.append(tool_installation_dir1) sys.path.append(tool_installation_dir2) tool_instances = [] auto_load_modules = ['tasks', 'ui', 'models', 'renderers'] for tool in tools: tool_instances.append(importlib.import_module('.'.join([tool, 'tool'])).tool) for module in auto_load_modules: try: importlib.import_module('.'.join([tool, module])) except: pass return tool_instances
34.807692
85
0.654144
102
905
5.588235
0.45098
0.112281
0.038596
0.059649
0.203509
0
0
0
0
0
0
0.005626
0.214365
905
25
86
36.2
0.796062
0.222099
0
0
0
0
0.146132
0.067335
0
0
0
0
0
1
0.055556
false
0.055556
0.222222
0
0.333333
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
434721dba4ee0af8165b368cf20f7e199d6dcfdf
502
py
Python
lantz/drivers/tektronix/tds1002b.py
mtsolmn/lantz-drivers
f48caf9000ddd08f2abb837d832e341410af4788
[ "BSD-3-Clause" ]
4
2019-05-04T00:10:53.000Z
2020-10-22T18:08:40.000Z
lantz/drivers/tektronix/tds1002b.py
mtsolmn/lantz-drivers
f48caf9000ddd08f2abb837d832e341410af4788
[ "BSD-3-Clause" ]
3
2019-07-12T13:44:17.000Z
2020-10-22T19:32:08.000Z
lantz/drivers/tektronix/tds1002b.py
mtsolmn/lantz-drivers
f48caf9000ddd08f2abb837d832e341410af4788
[ "BSD-3-Clause" ]
9
2019-04-03T17:07:03.000Z
2021-02-15T21:53:55.000Z
# -*- coding: utf-8 -*- """ lantz.drivers.tektronix.tds1012 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Implements the drivers to control an oscilloscope. :copyright: 2015 by Lantz Authors, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from lantz.core import Feat, MessageBasedDriver class TDS1002b(MessageBasedDriver): MANUFACTURER_ID = '0x699' MODEL_CODE = '0x363' @Feat(read_once=True) def idn(self): return self.query('*IDN?')
22.818182
68
0.633466
57
502
5.526316
0.77193
0.044444
0.088889
0
0
0
0
0
0
0
0
0.052369
0.201195
502
21
69
23.904762
0.733167
0.496016
0
0
0
0
0.066667
0
0
0
0.044444
0
0
1
0.142857
false
0
0.142857
0.142857
0.857143
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
4a2f4eecfe75a9c91356c84f877db3d3e9fc53fc
2,139
py
Python
iHome/house/models.py
yeyuning1/iHome
aceb87d786ab66cf74ff47f549ec73388d21c9e3
[ "MIT" ]
2
2019-08-13T07:34:35.000Z
2019-08-13T08:11:46.000Z
iHome/house/models.py
yeyuning1/iHome
aceb87d786ab66cf74ff47f549ec73388d21c9e3
[ "MIT" ]
null
null
null
iHome/house/models.py
yeyuning1/iHome
aceb87d786ab66cf74ff47f549ec73388d21c9e3
[ "MIT" ]
null
null
null
from django.db import models # Create your models here. from utils.models import BaseModel class House(BaseModel): '''房屋信息''' user = models.ForeignKey('users.User', on_delete=models.CASCADE, verbose_name='房屋用户') area = models.ForeignKey('address.Area', on_delete=models.SET_NULL, null=True, verbose_name='房屋地区') title = models.CharField(max_length=64, null=False, verbose_name='房屋标题') price = models.IntegerField(default=0, verbose_name='房屋单价') # 单价分 address = models.CharField(max_length=512, default='', verbose_name='房屋地址') room_count = models.SmallIntegerField(default=1, verbose_name='房间数目') acreage = models.IntegerField(default=0, verbose_name='房屋面积') unit = models.CharField(max_length=32, default='', verbose_name='房屋单元') # 如几室几厅 capacity = models.SmallIntegerField(default=1, verbose_name='房屋容纳') # 房屋容纳的人数 beds = models.CharField(max_length=64, default='', verbose_name='房屋床铺配置') deposit = models.IntegerField(default=0, verbose_name='房屋押金') min_days = models.SmallIntegerField(default=1, verbose_name='最少入住天数') max_days = models.SmallIntegerField(default=0, verbose_name='最大入住天数') # 0表示不限制 order_count = models.IntegerField(default=0, verbose_name='预计该房屋的订单数') index_image_url = models.CharField(max_length=500, default='', verbose_name='房屋主图片的路径') facilities = models.ManyToManyField('Facility')#配套设施 class Meta: db_table = 'ih_house_info' verbose_name = '房屋信息' verbose_name_plural = verbose_name class Facility(models.Model): '''房屋设施信息''' name = models.CharField(max_length=32, verbose_name='设施名称') class Meta: db_table = 'ih_facility_info' verbose_name = '设施信息' verbose_name_plural = verbose_name class HouseImage(BaseModel): '''房屋图片''' house = models.ForeignKey(House, verbose_name='房屋信息', on_delete=models.CASCADE) url = models.CharField(max_length=256, null=False, verbose_name='房屋图片地址') class Meta: db_table = 'ih_house_image' verbose_name = '房屋图片' verbose_name_plural = verbose_name
41.941176
104
0.697522
260
2,139
5.523077
0.346154
0.206825
0.087744
0.116992
0.410864
0.268802
0
0
0
0
0
0.014815
0.179523
2,139
50
105
42.78
0.803419
0.033193
0
0.166667
0
0
0.087131
0
0
0
0
0
0
1
0
false
0
0.055556
0
0.75
0
0
0
0
null
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a38f4cdb8c158390444f36146a5ad23b2ae9c67
4,998
py
Python
jenkinsapi/view.py
julienduchesne/jenkinsapi
369dc54a8d5bb1f4e985c647378b9e1e62c26961
[ "MIT" ]
null
null
null
jenkinsapi/view.py
julienduchesne/jenkinsapi
369dc54a8d5bb1f4e985c647378b9e1e62c26961
[ "MIT" ]
52
2019-06-25T12:47:14.000Z
2021-04-12T12:24:08.000Z
jenkinsapi/view.py
klauern/jenkinsapi
605ad22a0109d3f51452c7abd23b0376a44682da
[ "MIT" ]
null
null
null
""" Module for jenkinsapi views """ import six import logging from jenkinsapi.jenkinsbase import JenkinsBase from jenkinsapi.job import Job from jenkinsapi.custom_exceptions import NotFound log = logging.getLogger(__name__) class View(JenkinsBase): """ View class """ def __init__(self, url, name, jenkins_obj): self.name = name self.jenkins_obj = jenkins_obj JenkinsBase.__init__(self, url) self.deleted = False def __str__(self): return self.name def __getitem__(self, job_name): assert isinstance(job_name, str) api_url = self.python_api_url(self.get_job_url(job_name)) return Job(api_url, job_name, self.jenkins_obj) def __contains__(self, job_name): """ True if view_name is the name of a defined view """ return job_name in self.keys() def delete(self): """ Remove this view object """ url = "%s/doDelete" % self.baseurl self.jenkins_obj.requester.post_and_confirm_status(url, data='') self.jenkins_obj.poll() self.deleted = True def keys(self): return self.get_job_dict().keys() def iteritems(self): it = six.iteritems(self.get_job_dict()) for name, url in it: yield name, Job(url, name, self.jenkins_obj) def values(self): return [a[1] for a in self.iteritems()] def items(self): return [a for a in self.iteritems()] def _get_jobs(self): if 'jobs' in self._data: for viewdict in self._data["jobs"]: yield viewdict["name"], viewdict["url"] def get_job_dict(self): return dict(self._get_jobs()) def __len__(self): return len(self.get_job_dict().keys()) def get_job_url(self, str_job_name): if str_job_name in self: return self.get_job_dict()[str_job_name] else: # noinspection PyUnboundLocalVariable views_jobs = ", ".join(self.get_job_dict().keys()) raise NotFound("Job %s is not known, available jobs" " in view are: %s" % (str_job_name, views_jobs)) def get_jenkins_obj(self): return self.jenkins_obj def add_job(self, str_job_name, job=None): """ Add job to a view :param str_job_name: name of the job to be added :param job: Job object to be added :return: True if job has been added, False if job already exists or job not known to Jenkins """ if not job: if str_job_name in self.get_job_dict(): log.warning( 'Job %s is already in the view %s', str_job_name, self.name) return False else: # Since this call can be made from nested view, # which doesn't have any jobs, we can miss existing job # Thus let's create top level Jenkins and ask him # http://jenkins:8080/view/CRT/view/CRT-FB/view/CRT-SCRT-1301/ top_jenkins = self.get_jenkins_obj().get_jenkins_obj_from_url( self.baseurl.split('view/')[0]) if not top_jenkins.has_job(str_job_name): log.error( msg='Job "%s" is not known to Jenkins' % str_job_name) return False else: job = top_jenkins.get_job(str_job_name) log.info(msg='Creating job %s in view %s' % (str_job_name, self.name)) url = '%s/addJobToView' % self.baseurl params = {'name': str_job_name} self.get_jenkins_obj().requester.post_and_confirm_status( url, data={}, params=params) self.poll() log.debug(msg='Job "%s" has been added to a view "%s"' % (job.name, self.name)) return True def _get_nested_views(self): for viewdict in self._data.get("views", []): yield viewdict["name"], viewdict["url"] def get_nested_view_dict(self): return dict(self._get_nested_views()) def get_config_xml_url(self): return '%s/config.xml' % self.baseurl def get_config(self): """ Return the config.xml from the view """ url = self.get_config_xml_url() response = self.get_jenkins_obj().requester.get_and_confirm_status(url) return response.text def update_config(self, config): """ Update the config.xml to the view """ url = self.get_config_xml_url() config = str(config) # cast unicode in case of Python 2 response = self.get_jenkins_obj().requester.post_url( url, params={}, data=config) return response.text @property def views(self): return self.get_jenkins_obj().get_jenkins_obj_from_url( self.baseurl).views
30.290909
79
0.580232
656
4,998
4.181402
0.204268
0.051039
0.047393
0.030623
0.302588
0.233686
0.131243
0.089683
0.068538
0.034998
0
0.00324
0.320728
4,998
164
80
30.47561
0.804713
0.131253
0
0.108911
0
0
0.061435
0
0
0
0
0
0.009901
1
0.207921
false
0
0.049505
0.09901
0.445545
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
4a41ae80cb8630870b8a540d9da1afa369fa489a
2,875
py
Python
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
36
2021-10-05T17:06:07.000Z
2022-03-29T14:11:39.000Z
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
56
2021-09-02T08:24:29.000Z
2022-03-30T07:29:07.000Z
supertokens_python/recipe_module.py
girish946/supertokens-python
ce0e7f6035941b3a8d3d1f7ae867224fd9c41c3c
[ "Apache-2.0" ]
8
2022-01-28T14:49:55.000Z
2022-03-26T01:28:38.000Z
# Copyright (c) 2021, VRAI Labs and/or its affiliates. All rights reserved. # # This software is licensed under the Apache License, Version 2.0 (the # "License") as published by the Apache Software Foundation. # # You may not use this file except in compliance with the License. You may # obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import annotations import abc from typing import Union, List, TYPE_CHECKING try: from typing import Literal except ImportError: from typing_extensions import Literal from .framework.response import BaseResponse if TYPE_CHECKING: from supertokens_python.framework.request import BaseRequest from .supertokens import AppInfo from .normalised_url_path import NormalisedURLPath from .exceptions import SuperTokensError class RecipeModule(abc.ABC): def __init__(self, recipe_id: str, app_info: AppInfo): self.recipe_id = recipe_id self.app_info = app_info def get_recipe_id(self): return self.recipe_id def get_app_info(self): return self.app_info def return_api_id_if_can_handle_request( self, path: NormalisedURLPath, method: str) -> Union[str, None]: apis_handled = self.get_apis_handled() for current_api in apis_handled: if not current_api.disabled and current_api.method == method and self.app_info.api_base_path.append( current_api.path_without_api_base_path).equals(path): return current_api.request_id return None @abc.abstractmethod def is_error_from_this_recipe_based_on_instance(self, err): pass @abc.abstractmethod def get_apis_handled(self) -> List[APIHandled]: pass @abc.abstractmethod async def handle_api_request(self, request_id: str, request: BaseRequest, path: NormalisedURLPath, method: str, response: BaseResponse): pass @abc.abstractmethod async def handle_error(self, request: BaseRequest, err: SuperTokensError, response: BaseResponse): pass @abc.abstractmethod def get_all_cors_headers(self): pass class APIHandled: def __init__(self, path_without_api_base_path: NormalisedURLPath, method: Literal['post', 'get', 'delete', 'put', 'options', 'trace'], request_id: str, disabled: bool): self.path_without_api_base_path = path_without_api_base_path self.method = method self.request_id = request_id self.disabled = disabled
34.638554
119
0.718261
378
2,875
5.240741
0.37037
0.030288
0.027764
0.036345
0.131247
0.061585
0
0
0
0
0
0.00354
0.213913
2,875
82
120
35.060976
0.873009
0.227826
0
0.192308
0
0
0.012693
0
0
0
0
0
0
1
0.153846
false
0.096154
0.211538
0.038462
0.480769
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4a54b5369073023cda9e88293fbf883952f8a99e
493
py
Python
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
1
2022-01-19T22:24:35.000Z
2022-01-19T22:24:35.000Z
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
4
2021-12-28T05:15:49.000Z
2021-12-28T05:18:25.000Z
notion/ctx.py
jfhbrook/notion-tools
dd7c7fb07f98deaf5bba236aa5b4ea3d09ff0f3d
[ "MIT" ]
null
null
null
from notion.client import NotionClient from notion.settings import Settings class Context: def __init__(self): self.settings = Settings.from_file() self._client = None def get_client(self): if not self._client: self.settings.validate() self._client = NotionClient(token_v2=self.settings.token, monitor=False) return self._client def update_settings(self, **kwargs): self.settings = self.settings.update(**kwargs)
27.388889
84
0.6714
58
493
5.5
0.413793
0.188088
0
0
0
0
0
0
0
0
0
0.002653
0.235294
493
17
85
29
0.843501
0
0
0
0
0
0
0
0
0
0
0
0
1
0.230769
false
0
0.153846
0
0.538462
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
4a5d879c71ea4b0d47b4f6335a7e75debaa68573
1,368
py
Python
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
modules/voxelman/config.py
Relintai/pandemonium_engine
3de05db75a396b497f145411f71eb363572b38ae
[ "MIT", "Apache-2.0", "CC-BY-4.0", "Unlicense" ]
null
null
null
def can_build(env, platform): return True def configure(env): pass def get_doc_classes(): return [ "WorldArea", "VoxelLight", "VoxelLightNode", "VoxelLevelGenerator", "VoxelLevelGeneratorFlat", "VoxelSurfaceMerger", "VoxelSurfaceSimple", "VoxelSurface", "VoxelLibraryMerger", "VoxelLibrarySimple", "VoxelLibrary", "VoxelLibraryMergerPCM", "VoxelMaterialCache", "VoxelMaterialCachePCM", "VoxelCubePoints", "VoxelMesherCubic", "VoxelMeshData", "MarchingCubesCellData", "VoxelMesherMarchingCubes", "VoxelMesher", "EnvironmentData", "VoxelChunk", "VoxelChunkDefault", "VoxelStructure", "BlockVoxelStructure", "VoxelWorld", "VoxelMesherBlocky", "VoxelWorldBlocky", "VoxelChunkBlocky", "VoxelMesherLiquidBlocky", "VoxelWorldMarchingCubes", "VoxelChunkMarchingCubes", "VoxelMesherCubic", "VoxelWorldCubic", "VoxelChunkCubic", "VoxelMesherDefault", "VoxelWorldDefault", "VoxelJob", "VoxelTerrainJob", "VoxelLightJob", "VoxelPropJob", "VoxelMesherJobStep", ] def get_doc_path(): return "doc_classes"
18.739726
35
0.576754
65
1,368
12.046154
0.830769
0.015326
0.022989
0
0
0
0
0
0
0
0
0
0.318713
1,368
72
36
19
0.840129
0
0
0.039216
0
0
0.506589
0.13104
0
0
0
0
0
1
0.078431
false
0.019608
0
0.058824
0.137255
0
0
0
1
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a71705f7aaede9643300a7a698cb26841f08adc
1,936
py
Python
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
null
null
null
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
null
null
null
tests/test_pandas.py
ONSdigital/ons_utils
5ff0952c174984deb601af8ad4c21f26c7b24623
[ "MIT" ]
1
2022-03-17T08:03:17.000Z
2022-03-17T08:03:17.000Z
"""Tests for the pandas helpers in the pd_helpers.py module.""" import pytest from pandas.testing import assert_frame_equal from tests.conftest import create_dataframe from ons_utils.pandas import * def test_nested_dict_to_df(): """Test for nested_dict_to_df.""" input_d = { 'bones': { 'femur': {'tendons': 24}, 'humerus': {'tendons': 14}, }, 'muscles': { 'gluteus_maximus': {'tendons': 18}, }, 'cars': 7, } actual = nested_dict_to_df( input_d, columns=['number'], level_names=('a', 'b', 'c'), ) expected = create_dataframe([ ('a', 'b', 'c', 'number'), ('bones', 'femur', 'tendons', 24), ('bones', 'humerus', 'tendons', 14), ('cars', None, None, 7), ('muscles', 'gluteus_maximus', 'tendons', 18), ]) assert_frame_equal( # Sort values as dict order not preserved. actual.sort_values(['a', 'b']), # Set index because function returns a MultiIndex. expected.set_index(['a', 'b', 'c']) ) class TestStacker: """Group of tests for Stacker.""" @pytest.mark.skip(reason="test shell") def test_Stacker(self): """Test for Stacker.""" pass @pytest.mark.skip(reason="test shell") def test_convert_level_to_datetime(): """Test for this.""" pass class TestMultiIndexSlicer: """Group of tests for MultiIndexSlicer.""" @pytest.mark.skip(reason="test shell") def test_MultiIndexSlicer(self): """Test for MultiIndexSlicer.""" pass @pytest.mark.skip(reason="test shell") def test_get_index_level_values(): """Test for this.""" pass @pytest.mark.skip(reason="test shell") def test_shifted_within_year_apply(): """Test for this.""" pass @pytest.mark.skip(reason="test shell") def test_shifted_within_year_ffill(): """Test for this.""" pass
22.776471
63
0.591426
231
1,936
4.770563
0.367965
0.044465
0.076225
0.108893
0.352087
0.297641
0.261343
0.261343
0.196007
0.123412
0
0.009642
0.25
1,936
84
64
23.047619
0.749311
0.178719
0
0.235294
0
0
0.140442
0
0
0
0
0
0.039216
1
0.137255
false
0.117647
0.078431
0
0.254902
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4a73d46ee78874a78fab6b3b0aaa918a453b1649
8,296
py
Python
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
null
null
null
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
5
2020-02-11T22:31:59.000Z
2021-06-10T17:45:14.000Z
source/accounts/views.py
kishan2064/hashpy1
2f8c6fddb07e80e9a7b37a5632ed8ab8bf68d264
[ "BSD-3-Clause" ]
null
null
null
from django.contrib.auth import login, authenticate, REDIRECT_FIELD_NAME, get_user_model from django.contrib import messages from django.contrib.auth.mixins import LoginRequiredMixin from django.contrib.auth.views import PasswordResetView as BasePasswordResetView, SuccessURLAllowedHostsMixin from django.shortcuts import get_object_or_404, resolve_url from django.utils.crypto import get_random_string from django.utils.decorators import method_decorator from django.utils.http import is_safe_url from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect from django.views.decorators.debug import sensitive_post_parameters from django.utils.translation import gettext_lazy as _ from django.views.generic import RedirectView from django.views.generic.edit import FormView from django.conf import settings from .utils import ( get_login_form, send_activation_email, get_password_reset_form, send_reset_password_email, send_activation_change_email, is_username_disabled, get_resend_ac_form ) from .forms import SignUpForm, ProfileEditForm, ChangeEmailForm from .models import Activation UserModel = get_user_model() class SuccessRedirectView(SuccessURLAllowedHostsMixin, FormView): redirect_field_name = REDIRECT_FIELD_NAME def get_success_url(self): url = self.get_redirect_url() return url or resolve_url(settings.LOGIN_REDIRECT_URL) def get_redirect_url(self): redirect_to = self.request.POST.get( self.redirect_field_name, self.request.GET.get(self.redirect_field_name, '') ) url_is_safe = is_safe_url( url=redirect_to, allowed_hosts=self.get_success_url_allowed_hosts(), require_https=self.request.is_secure(), ) return redirect_to if url_is_safe else '' def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs['request'] = self.request return kwargs class SignInView(SuccessRedirectView): template_name = 'accounts/login.html' form_class = get_login_form() success_url = '/' @method_decorator(sensitive_post_parameters('password')) @method_decorator(csrf_protect) @method_decorator(never_cache) def dispatch(self, request, *args, **kwargs): # Sets a test cookie to make sure the user has cookies enabled request.session.set_test_cookie() return super(SignInView, self).dispatch(request, *args, **kwargs) def form_valid(self, form): # If the test cookie worked, go ahead and # delete it since its no longer needed if self.request.session.test_cookie_worked(): self.request.session.delete_test_cookie() login(self.request, form.get_user()) return super(SignInView, self).form_valid(form) class SignUpView(FormView): template_name = 'accounts/register.html' form_class = SignUpForm success_url = '/' def form_valid(self, form): user = form.save(commit=False) if is_username_disabled(): # Set temporary username user.username = get_random_string() else: user.username = form.cleaned_data.get('username') if settings.ENABLE_USER_ACTIVATION: user.is_active = False user.save() # Change the username to "user_ID" form if is_username_disabled(): user.username = 'user_{}'.format(user.id) user.save() if settings.ENABLE_USER_ACTIVATION: send_activation_email(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('You are registered. To activate the account, follow the link sent to the mail.')) else: raw_password = form.cleaned_data.get('password1') user = authenticate(username=user.username, password=raw_password) login(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('You are successfully registered!')) return super(SignUpView, self).form_valid(form) class ActivateView(RedirectView): permanent = False query_string = True pattern_name = 'index' def get_redirect_url(self, *args, **kwargs): assert 'code' in kwargs act = get_object_or_404(Activation, code=kwargs['code']) # Activate user's profile user = act.user user.is_active = True user.save() # Remove activation record, it is unneeded act.delete() messages.add_message(self.request, messages.SUCCESS, _('You have successfully activated your account!')) login(self.request, user) return super(ActivateView, self).get_redirect_url() class ReSendActivationCodeView(SuccessRedirectView): template_name = 'accounts/resend_activation_code.html' form_class = get_resend_ac_form() success_url = '/' def form_valid(self, form): user = form.get_user() activation = user.activation_set.get() activation.delete() send_activation_email(self.request, user) messages.add_message(self.request, messages.SUCCESS, _('A new activation code has been sent to your e-mail.')) return super(ReSendActivationCodeView, self).form_valid(form) class PasswordResetView(BasePasswordResetView): form_class = get_password_reset_form() def form_valid(self, form): send_reset_password_email(self.request, form.get_user()) return super(PasswordResetView, self).form_valid(form) class ProfileEditView(LoginRequiredMixin, FormView): template_name = 'accounts/profile/edit.html' form_class = ProfileEditForm success_url = '/accounts/profile/edit/' def get_initial(self): initial = super(ProfileEditView, self).get_initial() user = self.request.user initial['first_name'] = user.first_name initial['last_name'] = user.last_name return initial def form_valid(self, form): user = self.request.user user.first_name = form.cleaned_data.get('first_name') user.last_name = form.cleaned_data.get('last_name') user.save() messages.add_message(self.request, messages.SUCCESS, _('Profile data has been successfully updated.')) return super(ProfileEditView, self).form_valid(form) class ChangeEmailView(LoginRequiredMixin, FormView): template_name = 'accounts/profile/change_email.html' form_class = ChangeEmailForm success_url = '/accounts/change/email/' def get_form_kwargs(self): kwargs = super(ChangeEmailView, self).get_form_kwargs() kwargs['user'] = self.request.user return kwargs def get_initial(self): initial = super(ChangeEmailView, self).get_initial() user = self.request.user initial['email'] = user.email return initial def form_valid(self, form): user = self.request.user email = form.cleaned_data.get('email') email = email.lower() if hasattr(settings, 'EMAIL_ACTIVATION_AFTER_CHANGING') and settings.EMAIL_ACTIVATION_AFTER_CHANGING: send_activation_change_email(self.request, user, email) messages.add_message(self.request, messages.SUCCESS, _('To complete the change of mail, click on the link sent to it.')) else: user.email = email user.save() messages.add_message(self.request, messages.SUCCESS, _('Email successfully changed.')) return super(ChangeEmailView, self).form_valid(form) class ChangeEmailActivateView(RedirectView): permanent = False query_string = True pattern_name = 'change_email' def get_redirect_url(self, *args, **kwargs): assert 'code' in kwargs act = get_object_or_404(Activation, code=kwargs['code']) # Change user's email user = act.user user.email = act.email user.save() # Remove activation record, it is unneeded act.delete() messages.add_message(self.request, messages.SUCCESS, _('You have successfully changed your email!')) return super(ChangeEmailActivateView, self).get_redirect_url()
32.155039
118
0.690333
988
8,296
5.573887
0.184211
0.053931
0.027238
0.031959
0.345742
0.261304
0.231524
0.200291
0.166879
0.134193
0
0.001547
0.22095
8,296
257
119
32.280156
0.850534
0.039055
0
0.319527
0
0
0.090304
0.024491
0
0
0
0
0.011834
1
0.088757
false
0.053254
0.106509
0
0.473373
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4a7c28f2d0e401facd4b7a43c6ef059a3a83d500
1,193
py
Python
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
1,080
2015-01-04T08:35:00.000Z
2022-03-27T09:15:52.000Z
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
24
2015-02-21T01:48:28.000Z
2021-11-26T02:38:56.000Z
neutron/agent/ovsdb/native/helpers.py
congnt95/neutron
6a73a362c5ff5b7c28c15a49f47a9900c0d2b4e1
[ "Apache-2.0" ]
1,241
2015-01-02T10:47:10.000Z
2022-03-27T09:42:23.000Z
# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_config import cfg from neutron.conf.agent import ovs_conf as agent_ovs_conf from neutron.conf.plugins.ml2.drivers import ovs_conf as ml2_ovs_conf from neutron.privileged.agent.ovsdb.native import helpers as priv_helpers agent_ovs_conf.register_ovs_agent_opts(cfg.CONF) ml2_ovs_conf.register_ovs_opts(cfg=cfg.CONF) enable_connection_uri = functools.partial( priv_helpers.enable_connection_uri, log_fail_as_error=False, check_exit_code=False, timeout=cfg.CONF.OVS.ovsdb_timeout, inactivity_probe=cfg.CONF.OVS.of_inactivity_probe * 1000)
37.28125
78
0.776194
186
1,193
4.811828
0.537634
0.067039
0.02905
0.035754
0
0
0
0
0
0
0
0.014911
0.156748
1,193
31
79
38.483871
0.874751
0.487846
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.416667
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
4a7c6a7695f0b0415525906b878d73cc448533e5
264
py
Python
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
console_weather.py
AlBan52/API_weather
86779a2da622ad7a4537070e5c28a04235415161
[ "MIT" ]
null
null
null
import requests locations = ['Лондон', 'Шереметьево', 'Череповец'] payload = {'mnTq': '', 'lang': 'ru'} for location in locations: response = requests.get(f'http://wttr.in/{location}', params=payload) response.raise_for_status() print(response.text)
26.4
73
0.681818
31
264
5.741935
0.741935
0
0
0
0
0
0
0
0
0
0
0
0.140152
264
9
74
29.333333
0.784141
0
0
0
0
0
0.231061
0
0
0
0
0
0
1
0
false
0
0.142857
0
0.142857
0.142857
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a85a5edb74a35f6879d8683f009ca6b7f10f18c
194
py
Python
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
126
2022-01-15T02:29:07.000Z
2022-03-30T09:57:40.000Z
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
18
2022-01-11T22:24:35.000Z
2022-03-16T00:13:01.000Z
migrations/20220114_03_Heqaz-insert-default-serverinfo.py
lin483/Funny-Nations
2bb1cd23a3d5f1e4a4854c73ac27f62c98127ef6
[ "MIT" ]
25
2022-01-22T15:06:27.000Z
2022-03-01T04:34:19.000Z
""" insert default serverInfo """ from yoyo import step __depends__ = {'20220114_02_lHBKM-new-table-serverinfo'} steps = [ step("INSERT INTO `serverInfo` (`onlineMinute`) VALUES (0);") ]
16.166667
65
0.695876
22
194
5.863636
0.818182
0
0
0
0
0
0
0
0
0
0
0.066667
0.149485
194
11
66
17.636364
0.715152
0.128866
0
0
0
0
0.565217
0.236025
0
0
0
0
0
1
0
false
0
0.2
0
0.2
0
1
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a866ef89141cc4c966674193758ad5f52e83702
551
py
Python
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
null
null
null
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
1
2019-09-10T13:58:24.000Z
2019-09-10T13:58:24.000Z
Arknights/flags.py
AlaricGilbert/ArknightsAutoHelper
9e2db0c4e0d1be30856df731ab192da396121d94
[ "MIT" ]
null
null
null
TINY_WAIT = 1 SMALL_WAIT = 3 MEDIUM_WAIT = 5 BIG_WAIT = 10 SECURITY_WAIT = 15 BATTLE_FINISH_DETECT = 12 BATTLE_NONE_DETECT_TIME = 90 BATTLE_END_SIGNAL_MAX_EXECUTE_TIME = 15 # 关键动作的偏移 FLAGS_START_BATTLE_BIAS = (50, 25) FLAGS_ENSURE_TEAM_INFO_BIAS = (25, 50) # 正方形偏移 FLAGS_CLICK_BIAS_TINY = (3, 3) FLAGS_CLICK_BIAS_SMALL = (5, 5) FLAGS_CLICK_BIAS_MEDIUM = (10, 10) FLAGS_CLICK_BIAS_BIG = (15, 15) FLAGS_CLICK_BIAS_HUGE = (30, 30) # 拖动偏移 # 用于左右拖动的偏移,也就是偏移初始坐标点 FLAGS_SWIPE_BIAS_TO_LEFT = ((1, 1), (1, 1)) FLAGS_SWIPE_BIAS_TO_RIGHT = ((1, 1), (1, 1))
21.192308
44
0.751361
97
551
3.804124
0.43299
0.03252
0.189702
0.086721
0
0
0
0
0
0
0
0.094737
0.137931
551
25
45
22.04
0.682105
0.07078
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a868fe7e98135f318566006794d9b95f620108a
3,229
py
Python
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
4
2021-05-31T19:34:27.000Z
2021-06-01T18:14:31.000Z
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
22
2021-05-15T00:01:49.000Z
2022-02-26T00:08:00.000Z
elasticsearch/client/shutdown.py
Conky5/elasticsearch-py
93543a7fee51c0da6e898c9155bdb5f965c5bb53
[ "Apache-2.0" ]
null
null
null
# Licensed to Elasticsearch B.V. under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch B.V. licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from .utils import SKIP_IN_PATH, NamespacedClient, _make_path, query_params class ShutdownClient(NamespacedClient): @query_params() def delete_node(self, node_id, params=None, headers=None): """ Removes a node from the shutdown list `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be removed from the shutdown state """ if node_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'node_id'.") return self.transport.perform_request( "DELETE", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, ) @query_params() def get_node(self, node_id=None, params=None, headers=None): """ Retrieve status of a node or nodes that are currently marked as shutting down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: Which node for which to retrieve the shutdown status """ return self.transport.perform_request( "GET", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, ) @query_params() def put_node(self, node_id, body, params=None, headers=None): """ Adds a node to be shut down `<https://www.elastic.co/guide/en/elasticsearch/reference/current>`_ .. warning:: This API is **experimental** so may include breaking changes or be removed in a future version :arg node_id: The node id of node to be shut down :arg body: The shutdown type definition to register """ for param in (node_id, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path("_nodes", node_id, "shutdown"), params=params, headers=headers, body=body, )
33.989474
85
0.637039
413
3,229
4.878935
0.346247
0.041687
0.014888
0.020844
0.451613
0.392556
0.392556
0.392556
0.392556
0.392556
0
0.001726
0.28244
3,229
94
86
34.351064
0.867933
0.531124
0
0.46875
0
0
0.116732
0
0
0
0
0
0
1
0.09375
false
0.0625
0.03125
0
0.25
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4a93ca990a939c4bbe34b2ca2569173da90ecbc7
3,598
py
Python
ansible/utils/module_docs_fragments/docker.py
EnjoyLifeFund/macHighSierra-py36-pkgs
5668b5785296b314ea1321057420bcd077dba9ea
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
1
2021-04-02T08:08:39.000Z
2021-04-02T08:08:39.000Z
ansible/utils/module_docs_fragments/docker.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
null
null
null
ansible/utils/module_docs_fragments/docker.py
EnjoyLifeFund/Debian_py36_packages
1985d4c73fabd5f08f54b922e73a9306e09c77a5
[ "BSD-3-Clause", "BSD-2-Clause", "MIT" ]
1
2020-05-03T01:13:16.000Z
2020-05-03T01:13:16.000Z
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # class ModuleDocFragment(object): # Docker doc fragment DOCUMENTATION = ''' options: docker_host: description: - "The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection string. For example, 'tcp://192.0.2.23:2376'. If TLS is used to encrypt the connection, the module will automatically replace 'tcp' in the connection URL with 'https'." required: false default: "unix://var/run/docker.sock" aliases: - docker_url tls_hostname: description: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. default: localhost required: false api_version: description: - The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by docker-py. required: false default: default provided by docker-py aliases: - docker_api_version timeout: description: - The maximum amount of time in seconds to wait on a response from the API. required: false default: 60 cacert_path: description: - Use a CA certificate when performing server verification by providing the path to a CA certificate file. required: false default: null aliases: - tls_ca_cert cert_path: description: - Path to the client's TLS certificate file. required: false default: null aliases: - tls_client_cert key_path: description: - Path to the client's TLS key file. required: false default: null aliases: - tls_client_key ssl_version: description: - Provide a valid SSL version number. Default value determined by docker-py, currently 1.0. required: false default: "1.0" tls: description: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. default: false tls_verify: description: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. default: false notes: - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define DOCKER_HOST, DOCKER_TLS_HOSTNAME, DOCKER_API_VERSION, DOCKER_CERT_PATH, DOCKER_SSL_VERSION, DOCKER_TLS, DOCKER_TLS_VERIFY and DOCKER_TIMEOUT. If you are using docker machine, run the script shipped with the product that sets up the environment. It will set these variables for you. See https://docker-py.readthedocs.org/en/stable/machine/ for more details. '''
38.276596
118
0.660645
476
3,598
4.930672
0.369748
0.019173
0.059651
0.024286
0.23562
0.224542
0.200682
0.200682
0.089476
0.089476
0
0.007029
0.288216
3,598
93
119
38.688172
0.90941
0.17871
0
0.388889
0
0.055556
0.971749
0.017699
0
0
0
0
0
1
0
false
0
0
0
0.027778
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4a9b559c6d7c0db14da8219dc9c4e053b7a27ff8
442
bzl
Python
tools/mirrors.bzl
kkiningh/slime
85853115e284bda35b3da10957823d23428b65d3
[ "Apache-2.0" ]
null
null
null
tools/mirrors.bzl
kkiningh/slime
85853115e284bda35b3da10957823d23428b65d3
[ "Apache-2.0" ]
null
null
null
tools/mirrors.bzl
kkiningh/slime
85853115e284bda35b3da10957823d23428b65d3
[ "Apache-2.0" ]
null
null
null
DEFAULT_MIRRORS = { "bitbucket": [ "https://bitbucket.org/{repository}/get/{commit}.tar.gz", ], "buildifier": [ "https://github.com/bazelbuild/buildtools/releases/download/{version}/{filename}", ], "github": [ "https://github.com/{repository}/archive/{commit}.tar.gz", ], "pypi": [ "https://files.pythonhosted.org/packages/source/{p}/{package}/{package}-{version}.tar.gz", ], }
29.466667
98
0.58371
43
442
5.976744
0.627907
0.058366
0.085603
0
0
0
0
0
0
0
0
0
0.190045
442
14
99
31.571429
0.717877
0
0
0.285714
0
0.071429
0.687783
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4aa2559e81941797f8eb297eceb0ea501eab99d6
7,104
py
Python
services/spotify-service.py
thk4711/mediamanager
8f6d21c220767aa9ee5d65635d2993dba07eceed
[ "MIT" ]
null
null
null
services/spotify-service.py
thk4711/mediamanager
8f6d21c220767aa9ee5d65635d2993dba07eceed
[ "MIT" ]
null
null
null
services/spotify-service.py
thk4711/mediamanager
8f6d21c220767aa9ee5d65635d2993dba07eceed
[ "MIT" ]
1
2022-02-07T08:09:15.000Z
2022-02-07T08:09:15.000Z
#!/usr/bin/python3 # -*- coding: utf-8 -*- import time import json import os import sys import time import urllib import socket import argparse import requests import lib.common as common base_url = 'http://localhost:24879/player/' #------------------------------------------------------------------------------# # do something on startup # #------------------------------------------------------------------------------# def init(): global port check_port() script_path = os.path.dirname(os.path.abspath(__file__)) os.chdir(script_path) parser = argparse.ArgumentParser(description='media manager spotify connect service') parser.add_argument('-p', '--port', type=int, help='WEB server port', required=True) args = parser.parse_args() port = args.port #------------------------------------------------------------------------------# # check if librespot-java is running # #------------------------------------------------------------------------------# def check_port(): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex(('localhost', 24879)) if result == 0: sock.close() return print("Please check if SpoCon is configured correctly and running", file = sys.stderr ) sock.close() exit(1) #------------------------------------------------------------------------------# # get metadata from spotify # #------------------------------------------------------------------------------# def get_metadata(): meta_data = {} global current_cover try: current_track = get_player() album = current_track['item']['album'] current_cover = album['images'][0]['url'] tmp_cover = current_cover tmp_cover=tmp_cover.replace('https://i.scdn.co/image/','') meta_data['track'] = current_track['item']['name'] meta_data['album'] = album['name'] meta_data['artist'] = album['artists'][0]['name'] meta_data['cover'] = 'external_' + tmp_cover meta_data['playstatus'] = get_play_status() if meta_data['playstatus'] == False: meta_data['track'] = '' meta_data['album'] = '' meta_data['artist'] = '' meta_data['cover'] = 'images/pause.png' return(bytes(json.dumps(meta_data), 'utf-8')) except: meta_data['track'] = '' meta_data['album'] = '' meta_data['artist'] = '' meta_data['cover'] = 'images/pause.png' meta_data['playstatus'] = False return(bytes(json.dumps(meta_data), 'utf-8')) #------------------------------------------------------------------------------# # get play status # #------------------------------------------------------------------------------# def get_play_status(mode=False): playing = False ret_val = False ret_str = 'NO' try: current_track = get_player() playing = current_track['is_playing'] except: pass if playing == True: try: path = 'http://localhost:24879/player/current/' ret = requests.post(url = path) data = ret.json() if 'current' in data: ret_str = 'YES' ret_val = True get_player() except: pass if mode: return(bytes(ret_str, 'utf-8')) return(ret_val) #------------------------------------------------------------------------------# # get whats currently playing # #------------------------------------------------------------------------------# def get_current(): path = 'http://localhost:24879/player/current/' ret = requests.post(url = path) return ret.json() #------------------------------------------------------------------------------# # get player data from API # #------------------------------------------------------------------------------# def get_player(): path = 'http://localhost:24879/web-api/v1/me/player' ret = requests.get(url = path) return ret.json() #------------------------------------------------------------------------------# # read cover image fom spotify connect web # #------------------------------------------------------------------------------# def read_cover_image(): webURL = urllib.request.urlopen(current_cover) data = webURL.read() return(data) #------------------------------------------------------------------------------# # play next song # #------------------------------------------------------------------------------# def next(): requests.post(url = base_url + 'next') #------------------------------------------------------------------------------# # play previuous song # #------------------------------------------------------------------------------# def prev(): requests.post(url = base_url + 'prev') #------------------------------------------------------------------------------# # start playing # #------------------------------------------------------------------------------# def play(): requests.post(url = base_url + 'resume') #------------------------------------------------------------------------------# # stop playing # #------------------------------------------------------------------------------# def pause(): requests.post(url = base_url + 'pause') #------------------------------------------------------------------------------# # handle http get request # #------------------------------------------------------------------------------# def respond_to_get_request(data): if 'action' not in data: return(bytes('failed', 'utf-8')) if data['action'] == 'play': play() elif data['action'] == 'pause': pause() elif data['action'] == 'prev': get_metadata() prev() elif data['action'] == 'next': get_metadata() next() elif data['action'] == 'metadata': return(get_metadata()) elif data['action'] == 'coverimage': return(read_cover_image()) elif data['action'] == 'getplaystatus': return(get_play_status(True)) return(bytes('OK', 'utf-8')) #------------------------------------------------------------------------------# # main program # #------------------------------------------------------------------------------# init() common.http_get_handler = respond_to_get_request common.run_http(port) while True: time.sleep(2000)
39.466667
91
0.366273
538
7,104
4.678439
0.29368
0.057211
0.035757
0.030195
0.191101
0.123957
0.123957
0.123957
0.09853
0.09853
0
0.007739
0.254223
7,104
179
92
39.687151
0.467346
0.433699
0
0.258065
0
0
0.163581
0
0
0
0
0
0
1
0.096774
false
0.016129
0.080645
0
0.201613
0.008065
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4aaa0313e4b848ea3e028c07ae2b856db9916524
715
py
Python
arturtamborskipl/urls.py
arturtamborski/arturtamborskipl
9b93be045f58d5802d9a61568d7ecfbb12042b59
[ "MIT" ]
1
2017-05-05T12:01:43.000Z
2017-05-05T12:01:43.000Z
arturtamborskipl/urls.py
arturtamborski/arturtamborskipl
9b93be045f58d5802d9a61568d7ecfbb12042b59
[ "MIT" ]
null
null
null
arturtamborskipl/urls.py
arturtamborski/arturtamborskipl
9b93be045f58d5802d9a61568d7ecfbb12042b59
[ "MIT" ]
null
null
null
from django.conf.urls import url, include from django.contrib import admin from django.views.generic import RedirectView from django.views.generic import TemplateView from django.contrib.sitemaps.views import sitemap from django.conf import settings from blog.sitemaps import ArticleSitemap urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain')), url(r'^sitemap\.xml$', sitemap, {'sitemaps': {'blog': ArticleSitemap}}, name='sitemap'), url(r'^', include('blog.urls')), ] if settings.DEBUG: import debug_toolbar urlpatterns += [ url(r'^__debug__/', include(debug_toolbar.urls)), ]
29.791667
103
0.721678
92
715
5.51087
0.380435
0.118343
0.055227
0.086785
0.110454
0
0
0
0
0
0
0
0.135664
715
23
104
31.086957
0.820388
0
0
0
0
0
0.131469
0
0
0
0
0
0
1
0
false
0
0.444444
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
4aadfd2d97ab81dd6472cc9d6d7741a3c62a553c
2,316
py
Python
server/server-flask/app/docs/admin/survey/survey.py
DSM-DMS/Project-DMS-Web
73a5d8fc2310bca90169414abf50f541ca0724c7
[ "MIT" ]
11
2017-07-04T07:44:07.000Z
2017-09-19T12:56:55.000Z
server/server-flask/app/docs/admin/survey/survey.py
DSM-DMS/DMS
73a5d8fc2310bca90169414abf50f541ca0724c7
[ "MIT" ]
null
null
null
server/server-flask/app/docs/admin/survey/survey.py
DSM-DMS/DMS
73a5d8fc2310bca90169414abf50f541ca0724c7
[ "MIT" ]
2
2017-10-23T06:11:16.000Z
2017-10-26T03:27:57.000Z
SURVEY_POST = { 'tags': ['설문조사 관리'], 'description': '설문조사 등록', 'parameters': [ { 'name': 'Authorization', 'description': 'JWT Token', 'in': 'header', 'type': 'str', 'required': True }, { 'name': 'title', 'description': '설문조사 제목', 'in': 'formData', 'type': 'str', 'required': True }, { 'name': 'start_date', 'description': '시작 날짜(YYYY-MM-DD)', 'in': 'formData', 'type': 'str', 'required': True }, { 'name': 'end_date', 'description': '종료 날짜(YYYY-MM-DD)', 'in': 'formData', 'type': 'str', 'required': True }, { 'name': 'target', 'description': '대상 학년', 'in': 'formData', 'type': 'list', 'required': True } ], 'responses': { '201': { 'description': '설문조사 등록 성공' }, '403': { 'description': '권한 없음' } } } QUESTION_POST = { 'tags': ['설문조사 관리'], 'description': '설문조사에 질문 등록', 'parameters': [ { 'name': 'Authorization', 'description': 'JWT Token', 'in': 'header', 'type': 'str', 'required': True }, { 'name': 'id', 'description': '질문을 추가할 설문조사 ID', 'in': 'formData', 'type': 'str', 'required': True }, { 'name': 'title', 'description': '질문 제목', 'in': 'formData', 'type': 'str', 'required': True }, { 'name': 'is_objective', 'description': '객관식 여부', 'in': 'formData', 'type': 'bool', 'required': True }, { 'name': 'choice_paper', 'description': '객관식 선택지', 'in': 'formData', 'type': 'list', 'required': False } ], 'responses': { '201': { 'description': '질문 추가 성공' }, '403': { 'description': '권한 없음' } } }
23.16
47
0.345423
161
2,316
4.931677
0.347826
0.13602
0.161209
0.167506
0.656171
0.47733
0.47733
0.395466
0.307305
0.307305
0
0.009967
0.480138
2,316
99
48
23.393939
0.649502
0
0
0.489796
0
0
0.330743
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4ab456260f6c742ad312aaa99e3e2590ddc0675c
731
py
Python
olamundo.py/exercicios_refeitos/ex029.py
gabrielviticov/exercicios-python
4068cb0029513f8ab8bd12fa3a9055f37b4040d4
[ "MIT" ]
null
null
null
olamundo.py/exercicios_refeitos/ex029.py
gabrielviticov/exercicios-python
4068cb0029513f8ab8bd12fa3a9055f37b4040d4
[ "MIT" ]
null
null
null
olamundo.py/exercicios_refeitos/ex029.py
gabrielviticov/exercicios-python
4068cb0029513f8ab8bd12fa3a9055f37b4040d4
[ "MIT" ]
null
null
null
''' ex029: Escreva um programa que leia a velocidade de uma carro. Se ele ultrapassar 80 km/h, mostre uma mensagem dizendo que ele foi multado. A multa vai custar R$ 7,00 por cada Km acima do limite. ''' from colorise import set_color, reset_color cor = { 'limpa':'\033[m', 'white':'\033[1;97m' } set_color(fg='green') velocidade_carro = int(input('Informe a velocidade do carro KM/H: ')) if velocidade_carro > 80: multa = (velocidade_carro - 80) * 7.00 print('\nMULTADO! VOCÊ ULTRAPASSOU O LIMITE PERMITIDO. LOGO TERÁ QUE PAGAR ', end='') reset_color() print('{}R${:.2f}{}'.format(cor['white'], multa, cor['limpa'])) else: set_color(fg='green') print('\nCONTINUE ASSIM. DIRIGINDO COM SEGURANÇA!')
36.55
195
0.679891
112
731
4.366071
0.625
0.04908
0.0409
0.06135
0
0
0
0
0
0
0
0.041322
0.172367
731
19
196
38.473684
0.766942
0.266758
0
0.133333
0
0
0.386364
0
0
0
0
0
0
1
0
false
0.066667
0.066667
0
0.066667
0.2
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
4ab62b5efbeb5c0a7886f27f8824551ce65f3eab
256
py
Python
fruit/mixin/drawable.py
felko/fruit
4768fd333ac3b7c0bd6d339304b23e20e312d2d1
[ "MIT" ]
4
2017-06-14T14:50:05.000Z
2019-07-29T16:51:24.000Z
fruit/mixin/drawable.py
felko/fruit
4768fd333ac3b7c0bd6d339304b23e20e312d2d1
[ "MIT" ]
null
null
null
fruit/mixin/drawable.py
felko/fruit
4768fd333ac3b7c0bd6d339304b23e20e312d2d1
[ "MIT" ]
null
null
null
#!/usr/bin/env python3.4 # coding: utf-8 class Drawable: """ Base class for drawable objects. """ def draw(self): """ Returns a Surface object. """ raise NotImplementedError( "Method `draw` is not implemented for {}".format(type(self)))
15.058824
64
0.648438
33
256
5.030303
0.848485
0
0
0
0
0
0
0
0
0
0
0.014563
0.195313
256
16
65
16
0.791262
0.375
0
0
0
0
0.282609
0
0
0
0
0
0
1
0.25
false
0
0
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
4abb4683ee2e4ff41f7985424a94c70975cdae94
356
py
Python
src/modules/python.py
fest2bash/fest2bash
008282f67d4d4415c27b3b9b6162daf54f8d6028
[ "MIT" ]
null
null
null
src/modules/python.py
fest2bash/fest2bash
008282f67d4d4415c27b3b9b6162daf54f8d6028
[ "MIT" ]
null
null
null
src/modules/python.py
fest2bash/fest2bash
008282f67d4d4415c27b3b9b6162daf54f8d6028
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import re import sys sys.dont_write_bytecode = True from pprint import pprint from base import BaseFest2Bash class Fest2Bash(BaseFest2Bash): def __init__(self, manifest): super(Fest2Bash, self).__init__(manifest) def generate(self, *args, **kwargs): return self.manifest
19.777778
49
0.710674
46
356
5.282609
0.652174
0.098765
0
0
0
0
0
0
0
0
0
0.020619
0.182584
356
17
50
20.941176
0.814433
0.120787
0
0
0
0
0
0
0
0
0
0
0
1
0.181818
false
0
0.454545
0.090909
0.818182
0.090909
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
4abf8e0a8ee60fe90e1a20e373c9a2a3d84d695d
3,504
py
Python
ssbio/databases/pdbflex.py
JoshuaMeyers/ssbio
624618602437e2c2e4adf90962adcef3af2d5b40
[ "MIT" ]
76
2017-03-06T02:50:38.000Z
2022-02-08T08:08:48.000Z
ssbio/databases/pdbflex.py
JoshuaMeyers/ssbio
624618602437e2c2e4adf90962adcef3af2d5b40
[ "MIT" ]
30
2017-03-09T14:54:05.000Z
2021-10-06T10:57:45.000Z
ssbio/databases/pdbflex.py
JoshuaMeyers/ssbio
624618602437e2c2e4adf90962adcef3af2d5b40
[ "MIT" ]
21
2017-09-01T23:00:31.000Z
2022-02-23T14:04:30.000Z
import requests import ssbio.utils import os.path as op # #### PDB stats # Request flexibility data about one particular PDB. # # http://pdbflex.org/php/api/PDBStats.php?pdbID=1a50&chainID=A # # pdbID of structure you are interested in # chainID of chain you are interested in # # [{"pdbID":"1a50", # "chainID":"A", # "parentClusterID":"4hn4A", # "avgRMSD":"0.538", # "maxRMSD":"2.616", # "flexibilityLabel":"Low", # "otherClusterMembers":["4hn4A","4hpjA","4hpxA","4kkxA",...], # "PDBFlexLink":"http:\/\/pdbflex.org\/cluster.html#!\/4hn4A\/20987\/1a50A"}] # # Note: you can omit the chainID and PDBFlex will return information for all chains. # # #### RMSD profile # Request RMSD array used for local flexibility plots # # http://pdbflex.org/php/api/rmsdProfile.php?pdbID=1a50&chainID=A # # pdbID PDB ID of structure you are interested in # chainID Chain ID of chain you are interested in # # {"queryPDB":"1a50A", # "clusterName":"4hn4A", # "profile":"[0.616,0.624,0.624,0.624,0.624,0.624,0.029,0.013,0.016,0.023,0.025,0.028,0.030,0.034,0.035,0.035,0.035,0.035,0.036,0.033,0.027,0.023,0.017...]"} # # #### PDB representatives # Request representatives for a PDB's own cluster. Returns a list of chains that represent the most distinct structures in the cluster. # # http://pdbflex.org/php/api/representatives.php?pdbID=1a50&chainID=A # # pdbID PDB ID of structure you are interested in # chainID Chain ID of chain you are interested in # # ["2trsA","3pr2A","1kfjA"] def get_pdbflex_info(pdb_id, chain_id, outdir, force_rerun=False): outfile = '{}{}_pdbflex_stats.json'.format(pdb_id, chain_id) pdbflex_link = 'http://pdbflex.org/php/api/PDBStats.php?pdbID={}&chainID={}'.format(pdb_id, chain_id) infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun) # TODO: will running with chain ID always return a single item list? assert len(infolist) == 1 newdict = {} for k, v in infolist[0].items(): if k == 'avgRMSD' and v: newdict[k] = float(v) elif k == 'maxRMSD' and v: newdict[k] = float(v) else: newdict[k] = v return newdict def get_pdbflex_rmsd_profile(pdb_id, chain_id, outdir, force_rerun=False): outfile = '{}{}_pdbflex_rmsdprofile.json'.format(pdb_id, chain_id) pdbflex_link = 'http://pdbflex.org/php/api/rmsdProfile.php?pdbID={}&chainID={}'.format(pdb_id, chain_id) infodict = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun) infodict['profile'] = [float(x) for x in infodict['profile'].strip('[]').split(',')] return infodict def get_pdbflex_representatives(pdb_id, chain_id, outdir, force_rerun=False): outfile = '{}{}_pdbflex_representatives.json'.format(pdb_id, chain_id) pdbflex_link = 'http://pdbflex.org/php/api/representatives.php?pdbID={}&chainID={}'.format(pdb_id, chain_id) infolist = ssbio.utils.request_json(link=pdbflex_link, outfile=outfile, outdir=outdir, force_rerun_flag=force_rerun) # infolist = [str(x) for x in infolist.strip('[]').split(',')] return infolist
40.275862
161
0.627854
467
3,504
4.599572
0.289079
0.039106
0.041899
0.050279
0.538641
0.538641
0.502328
0.477654
0.398976
0.389199
0
0.051699
0.227169
3,504
87
162
40.275862
0.741507
0.434646
0
0.225806
0
0
0.157403
0.044156
0
0
0
0.011494
0.032258
1
0.096774
false
0
0.096774
0
0.290323
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
4ac08cf9f315cf058d8ec6ec1e3e396023b3a1de
1,834
py
Python
desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py
yetsun/hue
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
[ "Apache-2.0" ]
5,079
2015-01-01T03:39:46.000Z
2022-03-31T07:38:22.000Z
desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py
yetsun/hue
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
[ "Apache-2.0" ]
1,623
2015-01-01T08:06:24.000Z
2022-03-30T19:48:52.000Z
desktop/core/ext-py/pyasn1-0.4.6/tests/type/test_namedval.py
yetsun/hue
2e48f0cc70e233ee0e1b40733d4b2a18d8836c66
[ "Apache-2.0" ]
2,033
2015-01-04T07:18:02.000Z
2022-03-28T19:55:47.000Z
# # This file is part of pyasn1 software. # # Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com> # License: http://snmplabs.com/pyasn1/license.html # import sys try: import unittest2 as unittest except ImportError: import unittest from tests.base import BaseTestCase from pyasn1.type import namedval class NamedValuesCaseBase(BaseTestCase): def setUp(self): BaseTestCase.setUp(self) self.e = namedval.NamedValues(('off', 0), ('on', 1)) def testDict(self): assert set(self.e.items()) == set([('off', 0), ('on', 1)]) assert set(self.e.keys()) == set(['off', 'on']) assert set(self.e) == set(['off', 'on']) assert set(self.e.values()) == set([0, 1]) assert 'on' in self.e and 'off' in self.e and 'xxx' not in self.e assert 0 in self.e and 1 in self.e and 2 not in self.e def testInit(self): assert namedval.NamedValues(off=0, on=1) == {'off': 0, 'on': 1} assert namedval.NamedValues('off', 'on') == {'off': 0, 'on': 1} assert namedval.NamedValues(('c', 0)) == {'c': 0} assert namedval.NamedValues('a', 'b', ('c', 0), d=1) == {'c': 0, 'd': 1, 'a': 2, 'b': 3} def testLen(self): assert len(self.e) == 2 assert len(namedval.NamedValues()) == 0 def testAdd(self): assert namedval.NamedValues(off=0) + namedval.NamedValues(on=1) == {'off': 0, 'on': 1} def testClone(self): assert namedval.NamedValues(off=0).clone(('on', 1)) == {'off': 0, 'on': 1} assert namedval.NamedValues(off=0).clone(on=1) == {'off': 0, 'on': 1} def testStrRepr(self): assert str(self.e) assert repr(self.e) suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]) if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite)
31.084746
96
0.598691
259
1,834
4.19305
0.301158
0.064457
0.044199
0.051565
0.303867
0.289134
0.200737
0.116022
0.116022
0.116022
0
0.034843
0.217557
1,834
58
97
31.62069
0.721951
0.078517
0
0
0
0
0.045752
0
0
0
0
0
0.459459
1
0.189189
false
0
0.162162
0
0.378378
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
1
0
0
0
0
0
0
0
0
0
1
434e153d430f769d0af982184da673ab7f398f75
6,213
py
Python
terra/terra/emails.py
dymaxionlabs/platform
98fe893d4632d62fea3e2357f16d970014037cdf
[ "BSD-3-Clause" ]
null
null
null
terra/terra/emails.py
dymaxionlabs/platform
98fe893d4632d62fea3e2357f16d970014037cdf
[ "BSD-3-Clause" ]
null
null
null
terra/terra/emails.py
dymaxionlabs/platform
98fe893d4632d62fea3e2357f16d970014037cdf
[ "BSD-3-Clause" ]
null
null
null
import os from datetime import date from django.conf import settings from django.core.mail import send_mail from django.template.loader import render_to_string from django.utils import translation from django.utils.translation import ugettext as _ from mailchimp3 import MailChimp class Email: from_email = settings.DEFAULT_FROM_EMAIL subject = None template_name = 'basic' preview_text = '' templates_basedir = os.path.join(settings.BASE_DIR, 'templates') def __init__(self, recipients, language_code='en'): self.recipients = recipients self.language_code = language_code def send_mail(self): send_mail(self.subject, self.body, self.from_email, self.recipients, html_message=self.html_body) @property def body(self): return render_to_string(self.body_template, self.template_params) @property def html_body(self): return self._reformat_mailchimp_template( render_to_string(self.htmlbody_template, self.template_params)) @property def body_template(self): return os.path.join( self.templates_basedir, '{name}.{lc}.txt'.format(name=self.template_name, lc=self.language_code)) @property def htmlbody_template(self): return os.path.join( self.templates_basedir, '{name}.{lc}.html'.format(name=self.template_name, lc=self.language_code)) @property def template_params(self): return {} def _reformat_mailchimp_template(self, html): """ Replaces MailChimp variables for Django template variables, and do some post-processing. """ for var, newvar in self.mc_variables.items(): html = html.replace(str(var), str(newvar)) return html @property def mc_variables(self): return { '*|MC:SUBJECT|*': self.subject, '*|MC_PREVIEW_TEXT|*': self.preview_text, '*|CURRENT_YEAR|*': date.today().year, '*|LIST:COMPANY|*': settings.COMPANY_NAME, '*|HTML:LIST_ADDRESS_HTML|*': settings.LIST_ADDRESS_HTML, '*|UNSUB|*': '%unsubscribe_url%', # Unused variables (for now): '*|IFNOT:ARCHIVE_PAGE|*': '', '*|LIST:DESCRIPTION|*': '', '*|END:IF|*': '', } class EarlyAccessBetaEmail(Email): template_name = 'early_access_beta' @property def signup_url(self): return '{base_url}/signup?beta=1&email={email}'.format( base_url=settings.WEBCLIENT_URL, email= self.recipients[0]) @property def subject(self): with translation.override(self.language_code): return _('validate your email') @property def template_params(self): return {**super().template_params, 'signup_url': self.signup_url} @property def mc_variables(self): return {**super().mc_variables, '*|SIGNUP_URL|*': self.signup_url} class WelcomeEmail(Email): template_name = 'welcome' link = '{base_url}/login'.format(base_url=settings.WEBCLIENT_URL) def __init__(self, user, *args, **kwargs): super().__init__(*args, **kwargs) self.user = user @property def subject(self): with translation.override(self.language_code): return _('your account is ready') % {'name': self.first_name} @property def template_params(self): return { **super().template_params, 'first_name': self.first_name, 'link': self.link, } @property def mc_variables(self): return { **super().mc_variables, '*|FNAME|*': self.first_name, '*|TEXT:LINK|*': self.link, } @property def first_name(self): return self.user.first_name or self.user.username class TrainingCompletedEmail(Email): template_name = 'training_completed' def __init__(self, estimator, *args, **kwargs): super().__init__(*args, **kwargs) self.estimator = estimator self.link = '{web_client_url}/models/new/od/select?id={uuid}'.format( web_client_url = settings.WEBCLIENT_URL, uuid = estimator.uuid ) @property def subject(self): with translation.override(self.language_code): return _('training of your model completed') @property def template_params(self): return { **super().template_params, 'name': self.estimator_name, 'num_classes': self.num_classes, 'link': self.link, } @property def mc_variables(self): return { **super().mc_variables, '*|NAME|*': self.estimator_name, '*|NUM_CLASSES|*': self.num_classes, '*|LINK|*': self.link, } @property def estimator_name(self): return self.estimator.name @property def num_classes(self): return len(self.estimator.classes) class PredictionCompletedEmail(Email): template_name = 'prediction_completed' def __init__(self, estimator, *args, **kwargs): super().__init__(*args, **kwargs) self.estimator = estimator @property def subject(self): with translation.override(self.language_code): return _('prediction of your model completed') @property def template_params(self): return { **super().template_params, 'name': self.estimator_name, 'num_classes': self.num_classes, } @property def mc_variables(self): return { **super().mc_variables, '*|NAME|*': self.estimator_name, '*|NUM_CLASSES|*': self.num_classes, } @property def estimator_name(self): return self.estimator.name @property def num_classes(self): return len(self.estimator.classes) def notify(subject, body='.'): send_mail(subject, body, 'damian@dymaxionlabs.com', ['monitor@dymaxionlabs.com'])
27.860987
77
0.600998
663
6,213
5.39819
0.208145
0.073764
0.033529
0.034926
0.507404
0.488684
0.43392
0.4247
0.4247
0.381112
0
0.000671
0.280863
6,213
222
78
27.986486
0.800358
0.018831
0
0.502924
0
0
0.11298
0.029688
0
0
0
0
0
1
0.181287
false
0
0.046784
0.116959
0.461988
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
4354188cd39459be1c39fa882aceb00bf1b969f5
1,683
py
Python
actions/lib/Template_Parser.py
pjimmybrcd/campus_ztp_nps
2ab266b32fbcddcbdf9031138aabc40942914c3a
[ "Apache-2.0" ]
null
null
null
actions/lib/Template_Parser.py
pjimmybrcd/campus_ztp_nps
2ab266b32fbcddcbdf9031138aabc40942914c3a
[ "Apache-2.0" ]
null
null
null
actions/lib/Template_Parser.py
pjimmybrcd/campus_ztp_nps
2ab266b32fbcddcbdf9031138aabc40942914c3a
[ "Apache-2.0" ]
null
null
null
""" Copyright 2016 Brocade Communications Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from jinja2 import Template, Environment, StrictUndefined, UndefinedError, meta class Template_Parser(object): def __init__(self, configuration_template_file, variables={}): ''' Loads the configuration file ''' self.profile = "" self.variables = variables try: with open(configuration_template_file, 'r') as f: self.profile = "".join(line for line in f) except: raise IOError("Template file '%s' not found!", configuration_template_file) def set_variables(self, variables): ''' Sets the variables ''' self.variables = variables def get_required_variables(self): ''' Returns a set of the required variables in the template ''' return meta.find_undeclared_variables(Environment().parse(self.profile)) def get_parsed_lines(self): ''' Returns a set of lines with all variables filed in ''' try: return Template(self.profile, undefined=StrictUndefined).render(self.variables) except UndefinedError as e: raise Exception(e)
39.139535
91
0.699346
215
1,683
5.390698
0.506977
0.051769
0.064711
0.02761
0.029336
0
0
0
0
0
0
0.00686
0.22044
1,683
42
92
40.071429
0.876524
0.434938
0
0.210526
0
0
0.032644
0
0
0
0
0
0
1
0.210526
false
0
0.052632
0
0.421053
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
435956da8c173c0f00fa6d13687b5307a4d9b2a5
499
py
Python
sync_ends/main.py
nirav1997/sync_ends
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
[ "MIT" ]
null
null
null
sync_ends/main.py
nirav1997/sync_ends
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
[ "MIT" ]
null
null
null
sync_ends/main.py
nirav1997/sync_ends
04e39ec26ac43ad4e6d4e1bdf685eb73c03b1dbb
[ "MIT" ]
null
null
null
import sys sys.path.append("..") from src.sync_ends_service import SyncEnd from src.parser import Parser def main(): # get the arguments from commadn line parser = Parser() collection_name, api_key, trigger_interval, slack_channel, slack_token = parser.get_argumenets() sync_end = SyncEnd(api_key, collection_name, trigger_interval, slack_channel, slack_token) try: sync_end.start() except Exception as e: print(e) if __name__ == "__main__": main()
22.681818
100
0.709419
67
499
4.940299
0.552239
0.042296
0.120846
0.163142
0.223565
0.223565
0
0
0
0
0
0
0.198397
499
21
101
23.761905
0.8275
0.07014
0
0
0
0
0.021645
0
0
0
0
0
0
1
0.071429
false
0
0.214286
0
0.285714
0.071429
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4364633db5685f14b086dbb59f77e9958e56ad15
2,913
py
Python
yampy/apis/groups.py
Kunal-Shah-Bose/yam-python
1d24b4b5c4bfb512804183efe741a2f7a75889e5
[ "Apache-2.0" ]
null
null
null
yampy/apis/groups.py
Kunal-Shah-Bose/yam-python
1d24b4b5c4bfb512804183efe741a2f7a75889e5
[ "Apache-2.0" ]
null
null
null
yampy/apis/groups.py
Kunal-Shah-Bose/yam-python
1d24b4b5c4bfb512804183efe741a2f7a75889e5
[ "Apache-2.0" ]
1
2019-01-10T18:50:35.000Z
2019-01-10T18:50:35.000Z
from yampy.apis.utils import ArgumentConverter, none_filter, stringify_booleans from yampy.models import extract_id class GroupsAPI(object): """ Provides an interface for accessing the groups related endpoints of the Yammer API. You should not instantiate this class directly; use the :meth:`yampy.Yammer.groups` method instead. """ def __init__(self, client): """ Initializes a new GroupsAPI that will use the given client object to make HTTP requests. """ self._client = client self._argument_converter = ArgumentConverter( none_filter, stringify_booleans, ) def all(self, mine=None, reverse=None): """ Returns all the groups in the current user's network. Customize the response using the keyword arguments: * mine -- Only return group of current user. * reverse -- return group in descending order by name. """ return self._client.get("/groups", **self._argument_converter( mine=mine, reverse=reverse, )) def find(self, group_id): """ Returns the group identified by the given group_id. """ return self._client.get(self._group_path(group_id)) def members(self, group_id, page=None, reverse=None): """ Returns the group identified by the given group_id. Customize the response using the keyword arguments: * page -- Enable pagination, and return the nth page of 50 users. """ path = "/group_memberships" return self._client.get(path, **self._argument_converter( page=page, reverse=reverse, )) def join(self, group_id): """ Join the group identified by the given group_id. Return True """ path = "/group_memberships" group_id = extract_id(group_id) return self._client.post(path, **self._argument_converter( group_id=group_id, )) def leave(self, group_id): """ Leave the group identified by the given group_id. Return True """ path = "/group_memberships" group_id = extract_id(group_id) return self._client.delete(path, **self._argument_converter( group_id=group_id, )) def create(self, name, private=False): """ Create a group. Return Group info """ path = "/groups" return self._client.post(path, **self._argument_converter( name=name, private=private, )) def delete(self, group_id): """ Delete a group. Return True if success """ return self._client.delete(self._group_path(group_id), delete="true") def _group_path(self, group_id): return "/groups/%d" % extract_id(group_id)
28.281553
79
0.599725
338
2,913
4.985207
0.295858
0.08724
0.066469
0.047478
0.389911
0.308012
0.308012
0.255786
0.226706
0.123442
0
0.000994
0.309303
2,913
102
80
28.558824
0.836481
0.310333
0
0.372093
0
0
0.047647
0
0
0
0
0
0
1
0.209302
false
0
0.046512
0.023256
0.465116
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
4368cab14b8dd0a73f5639ed6a1c9ef3a5f4c07f
354
py
Python
krispy/mod_user/models.py
jlaura/krispy
b1b2bf8a3e315608152c7dad15d384d0669f5e27
[ "0BSD" ]
2
2016-03-31T05:35:28.000Z
2017-04-12T00:11:59.000Z
krispy/mod_user/models.py
jlaura/krispy
b1b2bf8a3e315608152c7dad15d384d0669f5e27
[ "0BSD" ]
null
null
null
krispy/mod_user/models.py
jlaura/krispy
b1b2bf8a3e315608152c7dad15d384d0669f5e27
[ "0BSD" ]
null
null
null
from app import db from flask.ext.login import UserMixin class User(UserMixin, db.Model): __tablename__ = 'oauth2users' id = db.Column(db.Integer, primary_key=True) social_id = db.Column(db.String(64), nullable=False, unique=True) nickname = db.Column(db.String(64), nullable=False) email = db.Column(db.String(64), nullable=True)
29.5
69
0.717514
52
354
4.769231
0.519231
0.129032
0.16129
0.193548
0.354839
0.354839
0.25
0
0
0
0
0.023333
0.152542
354
11
70
32.181818
0.803333
0
0
0
0
0
0.03125
0
0
0
0
0
0
1
0
false
0
0.25
0
1
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
4369ad9700348a9af2bc92b402bcac16112c9914
16,746
py
Python
blog_app/blog/views.py
flxj/Django_blog
01eb12553335115fee5faecafe8cacf2f0615135
[ "MIT" ]
1
2019-03-27T02:24:22.000Z
2019-03-27T02:24:22.000Z
blog_app/blog/views.py
flxj/Django_blog
01eb12553335115fee5faecafe8cacf2f0615135
[ "MIT" ]
null
null
null
blog_app/blog/views.py
flxj/Django_blog
01eb12553335115fee5faecafe8cacf2f0615135
[ "MIT" ]
null
null
null
import markdown from comments.forms import CommentForm,BookCommentForm,MovieCommentForm from django.shortcuts import render, get_object_or_404 from.models import Post,Category,Tag, Book,Movie #from django.http import HttpResponse from django.views.generic import ListView, DetailView from django.utils.text import slugify from markdown.extensions.toc import TocExtension from django.db.models import Q """ def index(request): #post_list = Post.objects.all().order_by('-created_time') post_list = Post.objects.all() return render(request, 'blog/index.html', context={'post_list': post_list}) """ class IndexView(ListView): model = Post template_name = 'blog/index.html' context_object_name = 'post_list' paginate_by = 10 def get_context_data(self, **kwargs): """ 在视图函数中将模板变量传递给模板是通过给 render 函数的 context 参数传递一个字典实现的, 例如 render(request, 'blog/index.html', context={'post_list': post_list}), 这里传递了一个 {'post_list': post_list} 字典给模板。 在类视图中,这个需要传递的模板变量字典是通过 get_context_data 获得的, 所以我们复写该方法,以便我们能够自己再插入一些我们自定义的模板变量进去。 """ # 首先获得父类生成的传递给模板的字典。 context = super().get_context_data(**kwargs) # 父类生成的字典中已有 paginator、page_obj、is_paginated 这三个模板变量, # paginator 是 Paginator 的一个实例, # page_obj 是 Page 的一个实例, # is_paginated 是一个布尔变量,用于指示是否已分页。 # 例如如果规定每页 10 个数据,而本身只有 5 个数据,其实就用不着分页,此时 is_paginated=False。 # 关于什么是 Paginator,Page 类在 Django Pagination 简单分页:http://zmrenwu.com/post/34/ 中已有详细说明。 # 由于 context 是一个字典,所以调用 get 方法从中取出某个键对应的值。 paginator = context.get('paginator') page = context.get('page_obj') is_paginated = context.get('is_paginated') # 调用自己写的 pagination_data 方法获得显示分页导航条需要的数据,见下方。 pagination_data = self.pagination_data(paginator, page, is_paginated) # 将分页导航条的模板变量更新到 context 中,注意 pagination_data 方法返回的也是一个字典。 context.update(pagination_data) # 将更新后的 context 返回,以便 ListView 使用这个字典中的模板变量去渲染模板。 # 注意此时 context 字典中已有了显示分页导航条所需的数据。 return context def pagination_data(self, paginator, page, is_paginated): if not is_paginated: # 如果没有分页,则无需显示分页导航条,不用任何分页导航条的数据,因此返回一个空的字典 return {} # 当前页左边连续的页码号,初始值为空 left = [] # 当前页右边连续的页码号,初始值为空 right = [] # 标示第 1 页页码后是否需要显示省略号 left_has_more = False # 标示最后一页页码前是否需要显示省略号 right_has_more = False # 标示是否需要显示第 1 页的页码号。 # 因为如果当前页左边的连续页码号中已经含有第 1 页的页码号,此时就无需再显示第 1 页的页码号, # 其它情况下第一页的页码是始终需要显示的。 # 初始值为 False first = False # 标示是否需要显示最后一页的页码号。 # 需要此指示变量的理由和上面相同。 last = False # 获得用户当前请求的页码号 page_number = page.number # 获得分页后的总页数 total_pages = paginator.num_pages # 获得整个分页页码列表,比如分了四页,那么就是 [1, 2, 3, 4] page_range = paginator.page_range if page_number == 1: # 如果用户请求的是第一页的数据,那么当前页左边的不需要数据,因此 left=[](已默认为空)。 # 此时只要获取当前页右边的连续页码号, # 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 right = [2, 3]。 # 注意这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。 right = page_range[page_number:page_number + 2] # 如果最右边的页码号比最后一页的页码号减去 1 还要小, # 说明最右边的页码号和最后一页的页码号之间还有其它页码,因此需要显示省略号,通过 right_has_more 来指示。 if right[-1] < total_pages - 1: right_has_more = True # 如果最右边的页码号比最后一页的页码号小,说明当前页右边的连续页码号中不包含最后一页的页码 # 所以需要显示最后一页的页码号,通过 last 来指示 if right[-1] < total_pages: last = True elif page_number == total_pages: # 如果用户请求的是最后一页的数据,那么当前页右边就不需要数据,因此 right=[](已默认为空), # 此时只要获取当前页左边的连续页码号。 # 比如分页页码列表是 [1, 2, 3, 4],那么获取的就是 left = [2, 3] # 这里只获取了当前页码后连续两个页码,你可以更改这个数字以获取更多页码。 left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] # 如果最左边的页码号比第 2 页页码号还大, # 说明最左边的页码号和第 1 页的页码号之间还有其它页码,因此需要显示省略号,通过 left_has_more 来指示。 if left[0] > 2: left_has_more = True # 如果最左边的页码号比第 1 页的页码号大,说明当前页左边的连续页码号中不包含第一页的页码, # 所以需要显示第一页的页码号,通过 first 来指示 if left[0] > 1: first = True else: # 用户请求的既不是最后一页,也不是第 1 页,则需要获取当前页左右两边的连续页码号, # 这里只获取了当前页码前后连续两个页码,你可以更改这个数字以获取更多页码。 left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] right = page_range[page_number:page_number + 2] # 是否需要显示最后一页和最后一页前的省略号 if right[-1] < total_pages - 1: right_has_more = True if right[-1] < total_pages: last = True # 是否需要显示第 1 页和第 1 页后的省略号 if left[0] > 2: left_has_more = True if left[0] > 1: first = True data = { 'left': left, 'right': right, 'left_has_more': left_has_more, 'right_has_more': right_has_more, 'first': first, 'last': last, } return data #显示全文 """ def detail(request, pk): post = get_object_or_404(Post, pk=pk) # 阅读量 +1 post.increase_views() post.body = markdown.markdown(post.body, extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc', 'markdown.extensions.tables', ]) form = CommentForm() # 获取这篇 post 下的全部评论 comment_list = post.comment_set.all() # 将文章、表单、以及文章下的评论列表作为模板变量传给 detail.html 模板,以便渲染相应数据。 context = {'post': post, 'form': form, 'comment_list': comment_list } return render(request, 'blog/detail.html', context=context) """ class PostDetailView(DetailView): model = Post template_name = 'blog/detail.html' context_object_name = 'post' def get(self, request, *args, **kwargs): # 覆写 get 方法的目的是因为每当文章被访问一次,就得将文章阅读量 +1 # get 方法返回的是一个 HttpResponse 实例 # 之所以需要先调用父类的 get 方法,是因为只有当 get 方法被调用后, # 才有 self.object 属性,其值为 Post 模型实例,即被访问的文章 post response = super(PostDetailView, self).get(request, *args, **kwargs) # 将文章阅读量 +1 # 注意 self.object 的值就是被访问的文章 post self.object.increase_views() # 视图必须返回一个 HttpResponse 对象 return response def get_object(self, queryset=None): # 覆写 get_object 方法的目的是因为需要对 post 的 body 值进行渲染 post = super(PostDetailView, self).get_object(queryset=None) #此处先将markdown禁掉,因为显然经过markdown渲染的文本,再经过MathJax渲染就不能看了 #但是不经markdown渲染,代码段又不能正常显示,淦 #所以以后写带公式的博文,公式格式参考MathJax附带的样例,防止自己写的经过markdown渲染后抽风 md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', 'markdown.extensions.toc', TocExtension(slugify=slugify), ]) post.body = md.convert(post.body) post.toc = md.toc return post def get_context_data(self, **kwargs): # 覆写 get_context_data 的目的是因为除了将 post 传递给模板外(DetailView 已经帮我们完成), # 还要把评论表单、post 下的评论列表传递给模板。 context = super(PostDetailView, self).get_context_data(**kwargs) form = CommentForm() comment_list = self.object.comment_set.all() context.update({ 'form': form, 'comment_list': comment_list }) return context #查看归档 """ def archives(request, year, month): post_list = Post.objects.filter(created_time__year=year, created_time__month=month ).order_by('-created_time') return render(request, 'blog/index.html', context={'post_list': post_list}) """ class ArchivesView(ListView): model = Post template_name = 'blog/index.html' context_object_name = 'post_list' def get_queryset(self): year = self.kwargs.get('year') month = self.kwargs.get('month') return super(ArchivesView, self).get_queryset().filter(created_time__year=year, created_time__month=month ) #查看分类文章 """ def category(request, pk): cate = get_object_or_404(Category, pk=pk) post_list = Post.objects.filter(category=cate).order_by('-created_time') return render(request, 'blog/index.html', context={'post_list': post_list}) """ class CategoryView(ListView): model = Post template_name = 'blog/index.html' context_object_name = 'post_list' def get_queryset(self): cate = get_object_or_404(Category, pk=self.kwargs.get('pk')) return super(CategoryView, self).get_queryset().filter(category=cate) #查看标签文章 class TagView(ListView): model = Post template_name = 'blog/index.html' context_object_name = 'post_list' def get_queryset(self): tag = get_object_or_404(Tag, pk=self.kwargs.get('pk')) return super(TagView, self).get_queryset().filter(tags=tag) #文章搜索 def search(request): q = request.GET.get('q') error_msg = '' if not q: error_msg = "请输入关键词" return render(request, 'blog/index.html', {'error_msg': error_msg}) post_list = Post.objects.filter(Q(title__icontains=q) | Q(body__icontains=q)) return render(request, 'blog/index.html', {'error_msg': error_msg, 'post_list': post_list}) #查看书评 class BookView(ListView): model = Book template_name = 'blog/book.html' context_object_name = 'book_list' paginate_by = 20 def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) paginator = context.get('paginator') page = context.get('page_obj') is_paginated = context.get('is_paginated') pagination_data = self.pagination_data(paginator, page, is_paginated) context.update(pagination_data) return context def pagination_data(self, paginator, page, is_paginated): if not is_paginated: return {} left = [] right = [] left_has_more = False right_has_more = False first = False last = False page_number = page.number total_pages = paginator.num_pages page_range = paginator.page_range if page_number == 1: right = page_range[page_number:page_number + 2] if right[-1] < total_pages - 1: right_has_more = True if right[-1] < total_pages: last = True elif page_number == total_pages: left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] if left[0] > 2: left_has_more = True if left[0] > 1: first = True else: left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] right = page_range[page_number:page_number + 2] if right[-1] < total_pages - 1: right_has_more = True if right[-1] < total_pages: last = True if left[0] > 2: left_has_more = True if left[0] > 1: first = True data = { 'left': left, 'right': right, 'left_has_more': left_has_more, 'right_has_more': right_has_more, 'first': first, 'last': last, } return data class BookDetailView(DetailView): model = Book template_name = 'blog/bookdetail.html' context_object_name = 'book' def get_object(self, queryset=None): # 覆写 get_object 方法的目的是因为需要对 book 的 review 值进行渲染 book = super(BookDetailView, self).get_object(queryset=None) md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', #'markdown.extensions.toc', #TocExtension(slugify=slugify), ]) book.review = md.convert(book.review) #book.toc = md.toc return book def get_context_data(self, **kwargs): context = super(BookDetailView, self).get_context_data(**kwargs) form = BookCommentForm() comment_list = self.object.bookcomment_set.all() context.update({ 'form': form, 'comment_list': comment_list }) return context #书评归档 class BookArchivesView(ListView): model = Book template_name = 'blog/book.html' context_object_name = 'book_list' def get_queryset(self): year = self.kwargs.get('year') month = self.kwargs.get('month') return super(BookArchivesView, self).get_queryset().filter(created_time__year=year, created_time__month=month ) ###影评相关 class FilmView(ListView): model = Movie template_name = 'blog/film.html' context_object_name = 'film_list' paginate_by = 36 def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) paginator = context.get('paginator') page = context.get('page_obj') is_paginated = context.get('is_paginated') pagination_data = self.pagination_data(paginator, page, is_paginated) context.update(pagination_data) return context def pagination_data(self, paginator, page, is_paginated): if not is_paginated: return {} left = [] right = [] left_has_more = False right_has_more = False first = False last = False page_number = page.number total_pages = paginator.num_pages page_range = paginator.page_range if page_number == 1: right = page_range[page_number:page_number + 2] if right[-1] < total_pages - 1: right_has_more = True if right[-1] < total_pages: last = True elif page_number == total_pages: left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] if left[0] > 2: left_has_more = True if left[0] > 1: first = True else: left = page_range[(page_number - 3) if (page_number - 3) > 0 else 0:page_number - 1] right = page_range[page_number:page_number + 2] if right[-1] < total_pages - 1: right_has_more = True if right[-1] < total_pages: last = True if left[0] > 2: left_has_more = True if left[0] > 1: first = True data = { 'left': left, 'right': right, 'left_has_more': left_has_more, 'right_has_more': right_has_more, 'first': first, 'last': last, } return data class FilmDetailView(DetailView): model = Movie template_name = 'blog/filmdetail.html' context_object_name = 'film' def get_object(self, queryset=None): film = super(FilmDetailView, self).get_object(queryset=None) md = markdown.Markdown(extensions=[ 'markdown.extensions.extra', 'markdown.extensions.codehilite', #'markdown.extensions.toc', #TocExtension(slugify=slugify), ]) film.review = md.convert(film.review) #film.toc = md.toc return film def get_context_data(self, **kwargs): context = super(FilmDetailView, self).get_context_data(**kwargs) form = MovieCommentForm() comment_list = self.object.moviecomment_set.all() context.update({ 'form': form, 'comment_list': comment_list }) return context #影评归档 class FilmArchivesView(ListView): model = Movie template_name = 'blog/film.html' context_object_name = 'film_list' def get_queryset(self): year = self.kwargs.get('year') month = self.kwargs.get('month') return super(FilmArchivesView, self).get_queryset().filter(created_time__year=year, created_time__month=month ) def about(request): return render(request, 'blog/about.html')
33.967546
96
0.582706
1,813
16,746
5.184225
0.163817
0.044686
0.018725
0.024258
0.604107
0.571763
0.545803
0.528035
0.513565
0.498457
0
0.012035
0.320256
16,746
493
97
33.967546
0.81367
0.15341
0
0.746711
0
0
0.066185
0.0151
0
0
0
0
0
1
0.065789
false
0
0.026316
0.003289
0.325658
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
436c11b07a0ae268fa5c1da96fe20213b0b714a7
3,501
py
Python
aiogram/types/inline_query.py
SvineruS/aiogram
7892edf45302fa195544430ac5db11dcbcbf7ae6
[ "MIT" ]
1
2021-01-10T18:04:25.000Z
2021-01-10T18:04:25.000Z
aiogram/types/inline_query.py
SvineruS/aiogram
7892edf45302fa195544430ac5db11dcbcbf7ae6
[ "MIT" ]
5
2021-02-13T14:30:27.000Z
2021-02-13T17:27:58.000Z
aiogram/types/inline_query.py
SvineruS/aiogram
7892edf45302fa195544430ac5db11dcbcbf7ae6
[ "MIT" ]
1
2022-02-10T14:57:27.000Z
2022-02-10T14:57:27.000Z
import typing from . import base from . import fields from .inline_query_result import InlineQueryResult from .location import Location from .user import User class InlineQuery(base.TelegramObject): """ This object represents an incoming inline query. When the user sends an empty query, your bot could return some default or trending results. https://core.telegram.org/bots/api#inlinequery """ id: base.String = fields.Field() from_user: User = fields.Field(alias='from', base=User) location: Location = fields.Field(base=Location) query: base.String = fields.Field() offset: base.String = fields.Field() async def answer(self, results: typing.List[InlineQueryResult], cache_time: typing.Optional[base.Integer] = None, is_personal: typing.Optional[base.Boolean] = None, next_offset: typing.Optional[base.String] = None, switch_pm_text: typing.Optional[base.String] = None, switch_pm_parameter: typing.Optional[base.String] = None): """ Use this method to send answers to an inline query. No more than 50 results per query are allowed. Source: https://core.telegram.org/bots/api#answerinlinequery :param results: A JSON-serialized array of results for the inline query :type results: :obj:`typing.List[types.InlineQueryResult]` :param cache_time: The maximum amount of time in seconds that the result of the inline query may be cached on the server. Defaults to 300. :type cache_time: :obj:`typing.Optional[base.Integer]` :param is_personal: Pass True, if results may be cached on the server side only for the user that sent the query. By default, results may be returned to any user who sends the same query :type is_personal: :obj:`typing.Optional[base.Boolean]` :param next_offset: Pass the offset that a client should send in the next query with the same text to receive more results. Pass an empty string if there are no more results or if you don‘t support pagination. Offset length can’t exceed 64 bytes. :type next_offset: :obj:`typing.Optional[base.String]` :param switch_pm_text: If passed, clients will display a button with specified text that switches the user to a private chat with the bot and sends the bot a start message with the parameter switch_pm_parameter :type switch_pm_text: :obj:`typing.Optional[base.String]` :param switch_pm_parameter: Deep-linking parameter for the /start message sent to the bot when user presses the switch button. 1-64 characters, only A-Z, a-z, 0-9, _ and - are allowed. :type switch_pm_parameter: :obj:`typing.Optional[base.String]` :return: On success, True is returned :rtype: :obj:`base.Boolean` """ return await self.bot.answer_inline_query(self.id, results=results, cache_time=cache_time, is_personal=is_personal, next_offset=next_offset, switch_pm_text=switch_pm_text, switch_pm_parameter=switch_pm_parameter)
52.253731
118
0.625821
445
3,501
4.826966
0.32809
0.040968
0.083799
0.067039
0.15689
0.116387
0.070764
0.037244
0
0
0
0.004928
0.304484
3,501
66
119
53.045455
0.877207
0.053985
0
0
0
0
0.002805
0
0
0
0
0
0
1
0
true
0
0.230769
0
0.5
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
437021d671825e959375a0374106a655349dffb0
7,803
py
Python
PassWord.py
IQUBE-X/passGenerator
a56a5928c1e8ee503d2757ecf0ab4108a52ec677
[ "MIT" ]
1
2020-07-11T07:59:54.000Z
2020-07-11T07:59:54.000Z
PassWord.py
dhruvaS-hub/passGenerator
a56a5928c1e8ee503d2757ecf0ab4108a52ec677
[ "MIT" ]
null
null
null
PassWord.py
dhruvaS-hub/passGenerator
a56a5928c1e8ee503d2757ecf0ab4108a52ec677
[ "MIT" ]
1
2021-06-02T10:11:19.000Z
2021-06-02T10:11:19.000Z
# PassWord - The Safe Password Generator App! # importing the tkinter module for GUI from tkinter import * # importing the message box widget from tkinter from tkinter import messagebox # importing sqlite3 for database import sqlite3 # importing random for password generation import random # creating fonts font = ('Fixedsys', 10) font2 = ('Comic Sans MS', 9) font3 = ('System', 9) font4 = ('Two Cen MT', 9) # creating a database and establishing a connection conn = sqlite3.connect('password.db') # creating a cursor to navigate through database c = conn.cursor() # creating the table ''' c.execute("""CREATE TABLE passwords ( password text )""") ''' # defining the root variable root = Tk() # Naming the app root.title('PassWord') # creating a label frame to organize content label_frame = LabelFrame(root, padx=10, pady=10, text='Password Generator', font=font) # printing the label frame onto the screen or window label_frame.grid(row=0, column=0, columnspan=1, padx=10, pady=10, sticky=E + W) # creating a separate label frame to perform delete functions delete_labelframe = LabelFrame(root, text='Delete Password', padx=10, pady=10, font=font4) # printing delete labelframe onto the screen delete_labelframe.grid(row=5, column=0, columnspan=1, padx=10, pady=10, sticky=E + W) # making the text box where password is going to be displayed e = Entry(label_frame, fg='black', bg='white') # printing the text box to the screen e.grid(row=0, column=0, padx=10, pady=10, columnspan=1) # (for the delete function) to give information on input for delete function # (for the delete function) to give information on input for delete function info = Label(delete_labelframe, text='Password ID', fg='black', font=font2) # printing the label onto the screen info.grid(row=6, column=0, pady=10) # making the entry for user to input which password e2 = Entry(delete_labelframe, fg='black', bg='white') # printing the entry onto the screen e2.grid(row=6, column=1, pady=10) # making the password generate function def generate(): # creating lists lowercase_letters = ['a', 'b', 'c', 'd', 'e' 'f' 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u' 'v', 'w', 'x', 'y', 'z'] # creating lists uppercase_letters = ['A', 'B', 'C', 'D', 'E' 'F' 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U' 'V', 'W', 'X', 'Y', 'Z'] # creating lists symbols_list = ['-', '@', '!' '$', '%' '&' '?', '#', '^'] # creating lists numbers_list = ['1', '2', '3', '4', '5', '6', '7' '8', '9' '0'] # generating a random value from the lists lowercase_letter = random.choice(lowercase_letters) # generating a random value from the lists lowercase_letter2 = random.choice(lowercase_letters) # generating a random value from the lists uppercase_letter = random.choice(uppercase_letters) # generating a random value from the lists uppercase2_letter = random.choice(uppercase_letters) # generating a random value from the lists symbol = random.choice(symbols_list) # generating a random value from the lists symbol2 = random.choice(symbols_list) # generating a random value from the lists number = random.choice(numbers_list) # generating a random value from the lists number2 = random.choice(numbers_list) # creating a password list made of random values from previous lists password = [lowercase_letter, uppercase_letter, uppercase2_letter, lowercase_letter2, symbol, symbol2, number, number2] # shuffling password list password1 = random.sample(password, 8) # concatenating and making final list final_password = password1[0] + password1[1] + password1[2] + password1[3] + password1[4] + password1[5] + \ password1[6] + password1[7] # deleting previous item from entry e.delete(0, END) # inserting the final password e.insert(0, final_password) # making a function to save the password into the database def save_password(): conn = sqlite3.connect('password.db') c = conn.cursor() c.execute("INSERT INTO passwords VALUES (?)", (e.get(),)) e.delete(0, END) conn.commit() conn.close() # making a function to show all the saved passwords def show_password(): global passcode_label conn = sqlite3.connect('password.db') c = conn.cursor() c.execute("SELECT rowid, * FROM passwords") passcodes = c.fetchall() print_code = '' for passcode in passcodes: print_code += str(passcode[0]) + '.' + ' ' + str(passcode[1]) + '\n' passcode_label = Text(label_frame, height=15, width=25) passcode_label.configure(state='normal') passcode_label.insert(1.0, print_code) passcode_label.grid(row=5, column=0, padx=10, pady=10) passcode_label.configure(state='disabled') conn.commit() conn.close() # making a function to hide the saved passwords def hide_password(): passcode_label.destroy() # making a function to delete passwords from database def delete(): conn = sqlite3.connect('password.db') c = conn.cursor() c.execute("DELETE from passwords WHERE oid = (?)", (e2.get(),)) e2.delete(0, END) passcode_label.destroy() conn.commit() conn.close() # making a function to delete all the passwords in the database def delete_all(): global number_of_passwords conn = sqlite3.connect('password.db') c = conn.cursor() c.execute("SELECT rowid FROM passwords") number_of_passwords = c.fetchall() num_of_passwords = len(number_of_passwords) confirmation = messagebox.askyesno('Delete All Passwords?', 'You have chosen to delete ' + str( num_of_passwords) + ' passwords. This action cannot be reversed. Do you wish to proceed?') if confirmation == 1: c.execute("DELETE FROM passwords") conn.commit() conn.close() # button for generating password generate_password = Button(label_frame, text='Generate Strong Password', command=generate, font=font2) # printing the button onto the screen generate_password.grid(row=1, padx=10, pady=10, column=0) # button to save password save = Button(label_frame, text='Save Password', command=save_password, font=font2) # printing the button onto the screen save.grid(row=2, padx=10, pady=10, column=0) # making a button to show all the passwords show = Button(label_frame, text='Show Passwords', command=show_password, font=font2) # printing the button onto the screen show.grid(row=4, padx=10, pady=10, column=0) # making a button to hide the shown passwords hide = Button(label_frame, text='Hide Passwords', command=hide_password, font=font2) # printing the button onto the screen hide.grid(row=6, column=0, padx=10, pady=10) # making a button to delete a password delete = Button(delete_labelframe, text='Delete Password', command=delete, font=font2) # printing the button onto the screen delete.grid(row=8, padx=10, pady=10, column=1) # making a button to delete all the passwords delete_all = Button(delete_labelframe, text='Delete All', command=delete_all, fg='dark red', width=20, anchor=CENTER, font=font3) # printing the button onto the screen delete_all.grid(row=9, column=1, padx=10, pady=10, ipadx=15) # committing the changes to the database conn.commit() # closing the connection with database conn.close() # making the final loop root.mainloop()
32.648536
134
0.656927
1,065
7,803
4.748357
0.213146
0.016611
0.023729
0.028475
0.371564
0.307099
0.282183
0.273878
0.2193
0.191418
0
0.026251
0.22376
7,803
238
135
32.785714
0.808651
0.301679
0
0.222222
0
0
0.12163
0
0
0
0
0
0
1
0.055556
false
0.407407
0.037037
0
0.092593
0.027778
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
437727aaebd2b60da03893cf1960a1dac044f4b8
14,215
py
Python
train.py
MEfeTiryaki/trpo
e1c7bc25165730afa60d9733555398e078a13e67
[ "MIT" ]
2
2020-03-26T23:36:41.000Z
2020-03-27T03:04:27.000Z
train.py
MEfeTiryaki/trpo
e1c7bc25165730afa60d9733555398e078a13e67
[ "MIT" ]
null
null
null
train.py
MEfeTiryaki/trpo
e1c7bc25165730afa60d9733555398e078a13e67
[ "MIT" ]
1
2020-03-27T03:04:28.000Z
2020-03-27T03:04:28.000Z
import argparse from itertools import count import signal import sys import os import time import numpy as np import gym import torch import torch.autograd as autograd from torch.autograd import Variable import scipy.optimize import matplotlib.pyplot as plt from value import Value from policy import Policy from utils import * from trpo import trpo_step parser = argparse.ArgumentParser(description='PyTorch actor-critic example') # Algorithm Parameters parser.add_argument('--gamma', type=float, default=0.995, metavar='G', help='discount factor (default: 0.995)') parser.add_argument('--lambda-', type=float, default=0.97, metavar='G', help='gae (default: 0.97)') # Value Function Learning Parameters parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G', help='(NOT USED)l2 regularization regression (default: 1e-3)') parser.add_argument('--val-opt-iter', type=int, default=200, metavar='G', help='iteration number for value function learning(default: 200)') parser.add_argument('--lr', type=float, default=1e-3, metavar='G', help='learning rate for value function (default: 1e-3)') parser.add_argument('--value-memory', type=int, default=1, metavar='G', help='ratio of past value to be used to batch size (default: 1)') parser.add_argument('--value-memory-shuffle', action='store_true',help='if not shuffled latest memory stay') # TODO: implement # Policy Optimization parameters parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G', help='max kl value (default: 1e-2)') parser.add_argument('--damping', type=float, default=1e-1, metavar='G', help='damping (default: 1e-1)') parser.add_argument('--fisher-ratio', type=float, default=1, metavar='G', help='ratio of data to calcualte fisher vector product (default: 1)') # Environment parameters parser.add_argument('--env-name', default="Pendulum-v0", metavar='G', help='name of the environment to run') parser.add_argument('--seed', type=int, default=543, metavar='N', help='random seed (default: 1)') # Training length parser.add_argument('--batch-size', type=int, default=5000, metavar='N', help='number of steps per iteration') parser.add_argument('--episode-length', type=int, default=1000, metavar='N', help='max step size for one episode') parser.add_argument('--max-iteration-number', type=int, default=200, metavar='N', help='max policy iteration number') # Rendering parser.add_argument('--render', action='store_true', help='render the environment') # Logging parser.add_argument('--log-interval', type=int, default=1, metavar='N', help='interval between training status logs (default: 10)') parser.add_argument('--log', action='store_true', help='log the results at the end') parser.add_argument('--log-dir', type=str, default=".", metavar='N', help='log directory') parser.add_argument('--log-prefix', type=str, default="log", metavar='N', help='log file prefix') # Load parser.add_argument('--load', action='store_true', help='load models') parser.add_argument('--save', action='store_true', help='load models') parser.add_argument('--load-dir', type=str, default=".", metavar='N', help='') args = parser.parse_args() env = gym.make(args.env_name) env.seed(args.seed) num_inputs = env.observation_space.shape[0] num_actions = env.action_space.shape[0] torch.set_printoptions(profile="full") if args.load: policy_net = Policy(num_inputs, num_actions,30) value_net = Value(num_inputs,30) set_flat_params_to(value_net, loadParameterCsv(args.load_dir+"/ValueNet")) set_flat_params_to(policy_net, loadParameterCsv(args.load_dir+"/PolicyNet")) print("Networks are loaded from "+args.load_dir+"/") else: policy_net = Policy(num_inputs, num_actions,30) value_net = Value(num_inputs,30) def signal_handler(sig, frame): """ Signal Handler to save the networks when shutting down via ctrl+C Parameters: Returns: """ if(args.save): valueParam = get_flat_params_from(value_net) policyParam = get_flat_params_from(policy_net) saveParameterCsv(valueParam,args.load_dir+"/ValueNet") saveParameterCsv(policyParam,args.load_dir+"/PolicyNet") print("Networks are saved in "+args.load_dir+"/") print('Closing!!') env.close() sys.exit(0) def prepare_data(batch,valueBatch,previousBatch): """ Get the batch data and calculate value,return and generalized advantage Detail: TODO Parameters: batch (dict of arrays of numpy) : TODO valueBatch (dict of arrays of numpy) : TODO previousBatch (dict of arrays of numpy) : TODO Returns: """ # TODO : more description above stateList = [ torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["states"]] actionsList = [torch.from_numpy(np.concatenate(x,axis=0)) for x in batch["actions"]] for states in stateList: value = value_net.forward(states) batch["values"].append(value) advantagesList = [] returnsList = [] rewardsList = [] for rewards,values,masks in zip(batch["rewards"],batch["values"],batch["mask"]): returns = torch.Tensor(len(rewards),1) advantages = torch.Tensor(len(rewards),1) deltas = torch.Tensor(len(rewards),1) prev_return = 0 prev_value = 0 prev_advantage = 0 for i in reversed(range(len(rewards))): returns[i] = rewards[i] + args.gamma * prev_value * masks[i] # TD # returns[i] = rewards[i] + args.gamma * prev_return * masks[i] # Monte Carlo deltas[i] = rewards[i] + args.gamma * prev_value * masks[i]- values.data[i] advantages[i] = deltas[i] + args.gamma * args.lambda_* prev_advantage* masks[i] prev_return = returns[i, 0] prev_value = values.data[i, 0] prev_advantage = advantages[i, 0] returnsList.append(returns) advantagesList.append(advantages) rewardsList.append(torch.Tensor(rewards)) batch["states"] = torch.cat(stateList,0) batch["actions"] = torch.cat(actionsList,0) batch["rewards"] = torch.cat(rewardsList,0) batch["returns"] = torch.cat(returnsList,0) advantagesList = torch.cat(advantagesList,0) batch["advantages"] = (advantagesList- advantagesList.mean()) / advantagesList.std() valueBatch["states"] = torch.cat(( previousBatch["states"],batch["states"]),0) valueBatch["targets"] = torch.cat((previousBatch["returns"],batch["returns"]),0) def update_policy(batch): """ Get advantage , states and action and calls trpo step Parameters: batch (dict of arrays of numpy) : TODO (batch is different than prepare_data by structure) Returns: """ advantages = batch["advantages"] states = batch["states"] actions = batch["actions"] trpo_step(policy_net, states,actions,advantages , args.max_kl, args.damping) def update_value(valueBatch): """ Get valueBatch and run adam optimizer to learn value function Parameters: valueBatch (dict of arrays of numpy) : TODO Returns: """ # shuffle the data dataSize = valueBatch["targets"].size()[0] permutation = torch.randperm(dataSize) input = valueBatch["states"][permutation] target = valueBatch["targets"][permutation] iter = args.val_opt_iter batchSize = int(dataSize/ iter) loss_fn = torch.nn.MSELoss(reduction='sum') optimizer = torch.optim.Adam(value_net.parameters(), lr=args.lr) for t in range(iter): prediction = value_net(input[t*batchSize:t*batchSize+batchSize]) loss = loss_fn(prediction, target[t*batchSize:t*batchSize+batchSize]) # XXX : Comment out for debug # if t%100==0: # print("\t%f"%loss.data) optimizer.zero_grad() loss.backward() optimizer.step() def save_to_previousBatch(previousBatch,batch): """ Save previous batch to use in future value optimization Details: TODO Parameters: Returns: """ if args.value_memory<0: print("Value memory should be equal or greater than zero") elif args.value_memory>0: if previousBatch["returns"].size() == 0: previousBatch= {"states":batch["states"], "returns":batch["returns"]} else: previous_size = previousBatch["returns"].size()[0] size = batch["returns"].size()[0] if previous_size/size == args.value_memory: previousBatch["states"] = torch.cat([previousBatch["states"][size:],batch["states"]],0) previousBatch["returns"] = torch.cat([previousBatch["returns"][size:],batch["returns"]],0) else: previousBatch["states"] = torch.cat([previousBatch["states"],batch["states"]],0) previousBatch["returns"] = torch.cat([previousBatch["returns"],batch["returns"]],0) if args.value_memory_shuffle: permutation = torch.randperm(previousBatch["returns"].size()[0]) previousBatch["states"] = previousBatch["states"][permutation] previousBatch["returns"] = previousBatch["returns"][permutation] def calculate_loss(reward_sum_mean,reward_sum_std,test_number = 10): """ Calculate mean cummulative reward for test_nubmer of trials Parameters: reward_sum_mean (list): holds the history of the means. reward_sum_std (list): holds the history of the std. Returns: list: new value appended means list: new value appended stds """ rewardSum = [] for i in range(test_number): state = env.reset() rewardSum.append(0) for t in range(args.episode_length): state, reward, done, _ = env.step(policy_net.get_action(state)[0] ) state = np.transpose(state) rewardSum[-1] += reward if done: break reward_sum_mean.append(np.array(rewardSum).mean()) reward_sum_std.append(np.array(rewardSum).std()) return reward_sum_mean, reward_sum_std def log(rewards): """ Saves mean and std over episodes in log file Parameters: Returns: """ # TODO : add duration to log filename = args.log_dir+"/"+ args.log_prefix \ + "_env_" + args.env_name \ + "_maxIter_" + str(args.max_iteration_number) \ + "_batchSize_" + str(args.batch_size) \ + "_gamma_" + str(args.gamma) \ + "_lambda_" + str(args.lambda_) \ + "_lr_" + str(args.lr) \ + "_valOptIter_" + str(args.val_opt_iter) if os.path.exists(filename + "_index_0.csv"): id = 0 file = filename + "_index_" + str(id) while os.path.exists(file + ".csv"): id = id +1 file = filename + "_index_" + str(id) filename = file else: filename = filename + "_index_0" import csv filename = filename+ ".csv" pythonVersion = sys.version_info[0] if pythonVersion == 3: with open(filename, 'w', newline='') as csvfile: spamwriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(rewards) elif pythonVersion == 2: with open(filename, 'w', ) as csvfile: spamwriter = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL) spamwriter.writerow(rewards) def main(): """ Parameters: Returns: """ signal.signal(signal.SIGINT, signal_handler) time_start = time.time() reward_sum_mean,reward_sum_std = [], [] previousBatch= {"states":torch.Tensor(0) , "returns":torch.Tensor(0)} reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std) print("Initial loss \n\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) ) for i_episode in range(args.max_iteration_number): time_episode_start = time.time() # reset batches batch = {"states":[] , "actions":[], "next_states":[] , "rewards":[], "returns":[], "values":[], "advantages":[], "mask":[]} valueBatch = {"states" :[], "targets" : []} num_steps = 0 while num_steps < args.batch_size: state = env.reset() reward_sum = 0 states,actions,rewards,next_states,masks = [],[],[],[],[] steps = 0 for t in range(args.episode_length): action = policy_net.get_action(state)[0] # agent next_state, reward, done, info = env.step(action) next_state = np.transpose(next_state) mask = 0 if done else 1 masks.append(mask) states.append(state) actions.append(action) next_states.append(next_state) rewards.append(reward) state = next_state reward_sum += reward steps+=1 if args.render: env.render() if done: break batch["states"].append(np.expand_dims(states, axis=1) ) batch["actions"].append(actions) batch["next_states"].append(np.expand_dims(next_states, axis=1)) batch["rewards"].append(rewards) batch["mask"].append(masks) num_steps += steps prepare_data(batch,valueBatch,previousBatch) update_policy(batch) # First policy update to avoid overfitting update_value(valueBatch) save_to_previousBatch(previousBatch,batch) print("episode %d | total: %.4f "%( i_episode, time.time()-time_episode_start)) reward_sum_mean,reward_sum_std = calculate_loss(reward_sum_mean,reward_sum_std) print("\tloss | mean : %6.4f / std : %6.4f"%(reward_sum_mean[-1],reward_sum_std[-1]) ) if args.log: print("Data is logged in "+args.log_dir+"/") log(reward_sum_mean) print("Total training duration: %.4f "%(time.time()-time_start)) env.close() if __name__ == '__main__': main()
38.838798
143
0.636722
1,758
14,215
5.010808
0.191695
0.025542
0.044386
0.014531
0.281644
0.221137
0.181746
0.138949
0.10353
0.071972
0
0.012913
0.226381
14,215
365
144
38.945205
0.788124
0.105241
0
0.112903
0
0
0.153175
0.003519
0
0
0
0.027397
0
1
0.032258
false
0
0.072581
0
0.108871
0.040323
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
437c6a6a6d5abf3db9e497007b852df839401638
2,075
py
Python
setup.py
phaustin/MyST-Parser
181e921cea2794f10ca612df6bf2a2057b66c372
[ "MIT" ]
null
null
null
setup.py
phaustin/MyST-Parser
181e921cea2794f10ca612df6bf2a2057b66c372
[ "MIT" ]
null
null
null
setup.py
phaustin/MyST-Parser
181e921cea2794f10ca612df6bf2a2057b66c372
[ "MIT" ]
null
null
null
"""myst-parser package setup.""" from importlib import import_module from setuptools import find_packages, setup setup( name="myst-parser", version=import_module("myst_parser").__version__, description=( "An extended commonmark compliant parser, " "with bridges to docutils & sphinx." ), long_description=open("README.md").read(), long_description_content_type="text/markdown", url="https://github.com/executablebooks/MyST-Parser", project_urls={"Documentation": "https://myst-parser.readthedocs.io"}, author="Chris Sewell", author_email="chrisj_sewell@hotmail.com", license="MIT", packages=find_packages(), entry_points={ "console_scripts": ["myst-benchmark = myst_parser.cli.benchmark:main"] }, classifiers=[ "Development Status :: 3 - Alpha", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Text Processing :: Markup", "Framework :: Sphinx :: Extension", ], keywords="markdown lexer parser development docutils sphinx", python_requires=">=3.6", install_requires=["markdown-it-py~=0.4.5"], extras_require={ "sphinx": ["pyyaml", "docutils>=0.15", "sphinx>=2,<3"], "code_style": ["flake8<3.8.0,>=3.7.0", "black", "pre-commit==1.17.0"], "testing": [ "coverage", "pytest>=3.6,<4", "pytest-cov", "pytest-regressions", "beautifulsoup4", ], "rtd": ["sphinxcontrib-bibtex", "ipython", "sphinx-book-theme", "sphinx_tabs"], }, zip_safe=True, )
37.727273
88
0.61012
215
2,075
5.772093
0.567442
0.107172
0.141015
0.104754
0.043513
0
0
0
0
0
0
0.021848
0.227952
2,075
54
89
38.425926
0.752809
0.01253
0
0.039216
0
0
0.554577
0.0372
0
0
0
0
0
1
0
true
0
0.058824
0
0.058824
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
437e1e0973bde8b1e251b37ffc137a684d4dc2b8
436
py
Python
blog/models.py
tomitokko/django-blog-with-astradb
236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f
[ "Apache-2.0" ]
3
2021-12-13T21:40:32.000Z
2022-03-28T08:08:36.000Z
blog/models.py
tomitokko/django-blog-with-astradb
236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f
[ "Apache-2.0" ]
null
null
null
blog/models.py
tomitokko/django-blog-with-astradb
236aaf625ceb854345b6d6bbdd6d17b81e0e3c4f
[ "Apache-2.0" ]
1
2022-02-11T20:49:08.000Z
2022-02-11T20:49:08.000Z
from django.db import models import uuid from datetime import datetime from cassandra.cqlengine import columns from django_cassandra_engine.models import DjangoCassandraModel # Create your models here. class PostModel(DjangoCassandraModel): id = columns.UUID(primary_key=True, default=uuid.uuid4) title = columns.Text(required=True) body = columns.Text(required=True) created_at = columns.DateTime(default=datetime.now)
36.333333
63
0.802752
56
436
6.178571
0.535714
0.057803
0.109827
0.132948
0
0
0
0
0
0
0
0.002618
0.123853
436
12
64
36.333333
0.903141
0.055046
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0.5
0
1
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
438e9f1f07ffd73b9b9fd9f25c52f215537b1381
1,358
py
Python
NumPy/Array Basics/Random Shuffle/tests/test_task.py
jetbrains-academy/Python-Libraries-NumPy
7ce0f2d08f87502d5d97bbc6921f0566184d4ebb
[ "MIT" ]
null
null
null
NumPy/Array Basics/Random Shuffle/tests/test_task.py
jetbrains-academy/Python-Libraries-NumPy
7ce0f2d08f87502d5d97bbc6921f0566184d4ebb
[ "MIT" ]
4
2022-01-14T10:40:47.000Z
2022-02-14T13:01:13.000Z
NumPy/Array Basics/Random Shuffle/tests/test_task.py
jetbrains-academy/Python-Libraries-NumPy
7ce0f2d08f87502d5d97bbc6921f0566184d4ebb
[ "MIT" ]
null
null
null
import unittest import numpy as np from task import arr, permuted_2d, fully_random class TestCase(unittest.TestCase): def test_shape(self): self.assertEqual((5, 20), arr.shape, msg="Wrong shape of the array 'arr'.") self.assertEqual((5, 20), permuted_2d.shape, msg="Wrong shape of the array 'permuted_2d'.") self.assertEqual((5, 20), fully_random.shape, msg="Wrong shape of the array 'fully_random'.") def test_arr(self): for i in arr: # This test checks if in each row the minimum element goes first and maximum - last. self.assertTrue(i[0] == min(i) and i[-1] == max(i), msg="'arr' should be shuffled along the 0th axis.") def test_two_d(self): for i in permuted_2d: # This test checks that differences between all neighboring elements in rows of the array # are not equal to 1 (in non-shuffled rows they would be). self.assertFalse(all([(x - i[i.tolist().index(x) - 1]) == 1 for x in i if i.tolist().index(x) > 0]), msg="'permuted_2d' should be shuffled along the 1st axis.") def test_random(self): # This test checks if elements were also randomized between the rows. for i in fully_random: self.assertTrue(max(i) - min(i) > 19, "'fully_random' needs to be fully shuffled.")
46.827586
115
0.635493
209
1,358
4.057416
0.373206
0.058962
0.04717
0.063679
0.15566
0.099057
0.099057
0
0
0
0
0.023692
0.25405
1,358
28
116
48.5
0.813425
0.217231
0
0
0
0
0.234405
0
0
0
0
0
0.333333
1
0.222222
false
0
0.166667
0
0.444444
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
4392cd17a2182a5ad123dad587354133d5fbcf62
3,471
py
Python
open/users/serializers.py
lawrendran/open
d136f694bafab647722c78be6f39ec79d589f774
[ "MIT" ]
105
2019-06-01T08:34:47.000Z
2022-03-15T11:48:36.000Z
open/users/serializers.py
lawrendran/open
d136f694bafab647722c78be6f39ec79d589f774
[ "MIT" ]
111
2019-06-04T15:34:14.000Z
2022-03-12T21:03:20.000Z
open/users/serializers.py
lawrendran/open
d136f694bafab647722c78be6f39ec79d589f774
[ "MIT" ]
26
2019-09-04T06:06:12.000Z
2022-01-03T03:40:11.000Z
import pytz from rest_auth.serializers import TokenSerializer from rest_framework.authtoken.models import Token from rest_framework.exceptions import ValidationError from rest_framework.fields import ( CharField, CurrentUserDefault, HiddenField, UUIDField, ChoiceField, ) from rest_framework.serializers import ModelSerializer, Serializer from rest_framework.validators import UniqueValidator from django.contrib.auth.hashers import check_password from open.users.models import User class SimpleUserReadSerializer(ModelSerializer): class Meta: model = User fields = ( "name", "uuid", ) class UserReadSerializer(ModelSerializer): class Meta: model = User fields = ( "name", "uuid", "signed_up_from", "date_joined", "username", "email", "created", "modified", ) class UserTokenSerializer(TokenSerializer): user = UserReadSerializer() class Meta: model = Token fields = ["key", "user"] # TODO - this view and serializer is on hold as you figure out registration (later) class UserCreateSerializer(ModelSerializer): username = CharField(validators=[UniqueValidator(queryset=User.objects.all())]) # need to make email optional ... prob should think through signup form a little email = CharField( validators=[UniqueValidator(queryset=User.objects.all())], required=False ) password = CharField(write_only=True, min_length=8) signed_up_from = CharField( write_only=True, min_length=8, required=False, default="", trim_whitespace=True ) timezone_string = ChoiceField( choices=pytz.all_timezones, required=False, default="US/Eastern" ) class Meta: model = User fields = ["username", "email", "password", "signed_up_from", "timezone_string"] # TODO test - does this work with just username / no email, etc. def create(self, validated_data): username = validated_data.pop("username") password = validated_data.pop("password") is_betterself_user = False if validated_data["signed_up_from"] == "betterself": is_betterself_user = True validated_data["is_betterself_user"] = is_betterself_user user = User.objects.create(username=username, **validated_data) user.set_password(password) user.save() return user class UserDeleteSerializer(Serializer): # most of this is actually redundant, i don't need to have a validation step, but i do this # out of paranoia reasons that someone may delete their account by mistake password = CharField() user = HiddenField(default=CurrentUserDefault()) uuid = UUIDField() def validate(self, data): user = data["user"] validated_password = check_password(data["password"], user.password) if not validated_password: raise ValidationError("Invalid Password Entered") validated_uuid = str(user.uuid) == str(data["uuid"]) if not validated_uuid: raise ValidationError("Invalid UUID", str(user.uuid)) validate_user = user.username != "demo-testing@senrigan.io" if not validate_user: raise ValidationError( f"This is a protected user and cannot be deleted. {user.username}" ) return data
30.182609
95
0.661481
374
3,471
6.016043
0.40107
0.021333
0.037778
0.024
0.130667
0.12
0.12
0.041778
0
0
0
0.000769
0.25036
3,471
114
96
30.447368
0.863951
0.111207
0
0.154762
0
0
0.108152
0.007795
0
0
0
0.008772
0
1
0.02381
false
0.107143
0.107143
0
0.369048
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
43956cd7582f0725f3e08ed11af962dc403ba2f7
402
py
Python
archetype/settings/local_stg.py
kingsdigitallab/archetype-django
6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc
[ "MIT" ]
1
2018-11-18T22:42:09.000Z
2018-11-18T22:42:09.000Z
archetype/settings/local_stg.py
kingsdigitallab/archetype-django
6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc
[ "MIT" ]
null
null
null
archetype/settings/local_stg.py
kingsdigitallab/archetype-django
6315c8f38e873e2d3b2d99fcfd47d01ce0ae35bc
[ "MIT" ]
null
null
null
from .base import * # noqa CACHE_REDIS_DATABASE = '1' CACHES['default']['LOCATION'] = '127.0.0.1:6379:' + CACHE_REDIS_DATABASE INTERNAL_IPS = INTERNAL_IPS + ('', ) ALLOWED_HOSTS = [''] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'app_archetype_stg', 'USER': 'app_archetype', 'PASSWORD': '', 'HOST': '' }, }
22.333333
72
0.58209
42
402
5.309524
0.761905
0.089686
0.161435
0
0
0
0
0
0
0
0
0.03871
0.228856
402
17
73
23.647059
0.680645
0.00995
0
0
0
0
0.333333
0.09596
0
0
0
0
0
1
0
false
0.071429
0.071429
0
0.071429
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
439c484fa1d9a64793cf4da644af68eabbc13295
13,932
py
Python
omtk/models/model_avar_surface_lips.py
CDufour909/omtk_unreal
64ae76a7b0a3f73a4b32d3b330f3174d02c54234
[ "MIT" ]
null
null
null
omtk/models/model_avar_surface_lips.py
CDufour909/omtk_unreal
64ae76a7b0a3f73a4b32d3b330f3174d02c54234
[ "MIT" ]
null
null
null
omtk/models/model_avar_surface_lips.py
CDufour909/omtk_unreal
64ae76a7b0a3f73a4b32d3b330f3174d02c54234
[ "MIT" ]
null
null
null
import math import pymel.core as pymel from omtk.core.classNode import Node from omtk.libs import libAttr from omtk.libs import libRigging from . import model_avar_surface class SplitterNode(Node): """ A splitter is a node network that take the parameterV that is normally sent through the follicles and split it between two destination: the follicles and the jaw ref constraint. The more the jaw is opened, the more we'll transfer to the jaw ref before sending to the follicle. This is mainly used to ensure that any lip movement created by the jaw is canceled when the animator try to correct the lips and the jaw is open. Otherwise since the jaw space and the surface space To compute the displacement caused by the was, we'll usethe circumference around the jaw pivot. This create an 'approximation' that might be wrong if some translation also occur in the jaw. todo: test with corrective jaw translation """ def __init__(self): super(SplitterNode, self).__init__() # useless self.attr_inn_jaw_pt = None self.attr_inn_jaw_radius = None self.attr_inn_surface_v = None self.attr_inn_surface_range_v = None self.attr_inn_jaw_default_ratio = None self.attr_out_surface_v = None self.attr_out_jaw_ratio = None def build(self, nomenclature_rig, **kwargs): super(SplitterNode, self).build(**kwargs) # # Create inn and out attributes. # grp_splitter_inn = pymel.createNode( 'network', name=nomenclature_rig.resolve('udSplitterInn') ) # The jaw opening amount in degree. self.attr_inn_jaw_pt = libAttr.addAttr(grp_splitter_inn, 'innJawOpen') # The relative uv coordinates normally sent to the follicles. # Note that this value is expected to change at the output of the SplitterNode (see outSurfaceU and outSurfaceV) self.attr_inn_surface_u = libAttr.addAttr(grp_splitter_inn, 'innSurfaceU') self.attr_inn_surface_v = libAttr.addAttr(grp_splitter_inn, 'innSurfaceV') # Use this switch to disable completely the splitter. self.attr_inn_bypass = libAttr.addAttr(grp_splitter_inn, 'innBypassAmount') # The arc length in world space of the surface controlling the follicles. self.attr_inn_surface_range_v = libAttr.addAttr(grp_splitter_inn, 'innSurfaceRangeV') # How many degree does take the jaw to create 1 unit of surface deformation? (ex: 20) # How much inn percent is the lips following the jaw by default. # Note that this value is expected to change at the output of the SplitterNode (see attr_out_jaw_ratio) self.attr_inn_jaw_default_ratio = libAttr.addAttr(grp_splitter_inn, 'jawDefaultRatio') # The radius of the influence circle normally resolved by using the distance between the jaw and the avar as radius. self.attr_inn_jaw_radius = libAttr.addAttr(grp_splitter_inn, 'jawRadius') grp_splitter_out = pymel.createNode( 'network', name=nomenclature_rig.resolve('udSplitterOut') ) self.attr_out_surface_u = libAttr.addAttr(grp_splitter_out, 'outSurfaceU') self.attr_out_surface_v = libAttr.addAttr(grp_splitter_out, 'outSurfaceV') self.attr_out_jaw_ratio = libAttr.addAttr(grp_splitter_out, 'outJawRatio') # How much percent this influence follow the jaw after cancellation. # # Connect inn and out network nodes so they can easily be found from the SplitterNode. # attr_inn = libAttr.addAttr(grp_splitter_inn, longName='inn', attributeType='message') attr_out = libAttr.addAttr(grp_splitter_out, longName='out', attributeType='message') pymel.connectAttr(self.node.message, attr_inn) pymel.connectAttr(self.node.message, attr_out) # # Create node networks # Step 1: Get the jaw displacement in uv space (parameterV only). # attr_jaw_circumference = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getJawCircumference'), input1X=self.attr_inn_jaw_radius, input2X=(math.pi * 2.0) ).outputX attr_jaw_open_circle_ratio = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getJawOpenCircleRatio'), operation=2, # divide input1X=self.attr_inn_jaw_pt, input2X=360.0 ).outputX attr_jaw_active_circumference = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getJawActiveCircumference'), input1X=attr_jaw_circumference, input2X=attr_jaw_open_circle_ratio ).outputX attr_jaw_v_range = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getActiveJawRangeInSurfaceSpace'), operation=2, # divide input1X=attr_jaw_active_circumference, input2X=self.attr_inn_surface_range_v ).outputX # # Step 2: Resolve the output jaw_ratio # # Note that this can throw a zero division warning in Maya. # To prevent that we'll use some black-magic-ugly-ass-trick. attr_jaw_ratio_cancelation = libRigging.create_safe_division( self.attr_inn_surface_v, attr_jaw_v_range, nomenclature_rig, 'getJawRatioCancellation' ) attr_jaw_ratio_out_raw = libRigging.create_utility_node( 'plusMinusAverage', name=nomenclature_rig.resolve('getJawRatioOutUnlimited'), operation=2, # substraction, input1D=( self.attr_inn_jaw_default_ratio, attr_jaw_ratio_cancelation ) ).output1D attr_jaw_ratio_out_limited = libRigging.create_utility_node( 'clamp', name=nomenclature_rig.resolve('getJawRatioOutLimited'), inputR=attr_jaw_ratio_out_raw, minR=0.0, maxR=1.0 ).outputR # # Step 3: Resolve attr_out_surface_u & attr_out_surface_v # attr_inn_jaw_default_ratio_inv = libRigging.create_utility_node( 'reverse', name=nomenclature_rig.resolve('getJawDefaultRatioInv'), inputX=self.attr_inn_jaw_default_ratio ).outputX util_jaw_uv_default_ratio = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getJawDefaultRatioUvSpace'), input1X=self.attr_inn_jaw_default_ratio, input1Y=attr_inn_jaw_default_ratio_inv, input2X=attr_jaw_v_range, input2Y=attr_jaw_v_range ) attr_jaw_uv_default_ratio = util_jaw_uv_default_ratio.outputX attr_jaw_uv_default_ratio_inv = util_jaw_uv_default_ratio.outputY attr_jaw_uv_limit_max = libRigging.create_utility_node( 'plusMinusAverage', name=nomenclature_rig.resolve('getJawSurfaceLimitMax'), operation=2, # substract input1D=(attr_jaw_v_range, attr_jaw_uv_default_ratio_inv) ).output1D attr_jaw_uv_limit_min = libRigging.create_utility_node( 'plusMinusAverage', name=nomenclature_rig.resolve('getJawSurfaceLimitMin'), operation=2, # substract input1D=(attr_jaw_uv_default_ratio, attr_jaw_v_range) ).output1D attr_jaw_cancel_range = libRigging.create_utility_node( 'clamp', name=nomenclature_rig.resolve('getJawCancelRange'), inputR=self.attr_inn_surface_v, minR=attr_jaw_uv_limit_min, maxR=attr_jaw_uv_limit_max ).outputR attr_out_surface_v_cancelled = libRigging.create_utility_node( 'plusMinusAverage', name=nomenclature_rig.resolve('getCanceledUv'), operation=2, # substraction input1D=(self.attr_inn_surface_v, attr_jaw_cancel_range) ).output1D # # Connect output attributes # attr_inn_bypass_inv = libRigging.create_utility_node( 'reverse', name=nomenclature_rig.resolve('getBypassInv'), inputX=self.attr_inn_bypass ).outputX # Connect output jaw_ratio attr_output_jaw_ratio = libRigging.create_utility_node( 'blendWeighted', input=(attr_jaw_ratio_out_limited, self.attr_inn_jaw_default_ratio), weight=(attr_inn_bypass_inv, self.attr_inn_bypass) ).output pymel.connectAttr(attr_output_jaw_ratio, self.attr_out_jaw_ratio) # Connect output surface u pymel.connectAttr(self.attr_inn_surface_u, self.attr_out_surface_u) # Connect output surface_v attr_output_surface_v = libRigging.create_utility_node( 'blendWeighted', input=(attr_out_surface_v_cancelled, self.attr_inn_surface_v), weight=(attr_inn_bypass_inv, self.attr_inn_bypass) ).output pymel.connectAttr(attr_output_surface_v, self.attr_out_surface_v) class AvarSurfaceLipModel(model_avar_surface.AvarSurfaceModel): """ Custom avar model for the complex situation that is the lips. This ensure that we are moving according to the jaw before sliding on the surface. """ def __init__(self, *args, **kwargs): super(AvarSurfaceLipModel, self).__init__(*args, **kwargs) self._attr_inn_jaw_bindpose = None self._attr_inn_jaw_pitch = None self._attr_inn_jaw_ratio_default = None self._attr_inn_bypass_splitter = None self._attr_out_jaw_ratio = None def _create_interface(self): super(AvarSurfaceLipModel, self)._create_interface() self._attr_inn_jaw_bindpose = libAttr.addAttr(self.grp_rig, 'innJawBindPose', dataType='matrix') self._attr_inn_jaw_pitch = libAttr.addAttr(self.grp_rig, 'innJawPitch', defaultValue=0) self._attr_inn_jaw_ratio_default = libAttr.addAttr(self.grp_rig, 'innJawRatioDefault', defaultValue=0) self._attr_inn_bypass_splitter = libAttr.addAttr(self.grp_rig, 'innBypassSplitter') self._attr_inn_ud_bypass = libAttr.addAttr(self.grp_rig, 'innBypassUD') # self._attr_inn_surface_length_u = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthU', defaultValue=0) # self._attr_inn_surface_length_v = libAttr.addAttr(self.grp_rig, 'innSurfaceLengthV', defaultValue=0) self._attr_out_jaw_ratio = libAttr.addAttr(self.grp_rig, 'outJawRatio') def connect_avar(self, avar): super(AvarSurfaceLipModel, self).connect_avar(avar) # Note: We expect a FaceLipAvar pymel.connectAttr(avar._attr_jaw_bind_tm, self._attr_inn_jaw_bindpose) pymel.connectAttr(avar._attr_jaw_pitch, self._attr_inn_jaw_pitch) pymel.connectAttr(avar._attr_inn_jaw_ratio_default, self._attr_inn_jaw_ratio_default) pymel.connectAttr(avar._attr_bypass_splitter, self._attr_inn_bypass_splitter) pymel.connectAttr(avar.attr_ud_bypass, self._attr_inn_ud_bypass) def _get_follicle_relative_uv_attr(self, **kwargs): nomenclature_rig = self.get_nomenclature_rig() attr_u, attr_v = super(AvarSurfaceLipModel, self)._get_follicle_relative_uv_attr(**kwargs) util_decompose_jaw_bind_tm = libRigging.create_utility_node( 'decomposeMatrix', inputMatrix=self._attr_inn_jaw_bindpose, ) # # Create and connect Splitter Node # splitter = SplitterNode() splitter.build( nomenclature_rig, name=nomenclature_rig.resolve('splitter') ) splitter.setParent(self.grp_rig) # Resolve the radius of the jaw influence. Used by the splitter. attr_jaw_radius = libRigging.create_utility_node( 'distanceBetween', name=nomenclature_rig.resolve('getJawRadius'), point1=self.grp_offset.translate, point2=util_decompose_jaw_bind_tm.outputTranslate ).distance # Resolve the jaw pitch. Used by the splitter. attr_jaw_pitch = self._attr_inn_jaw_pitch # Connect the splitter inputs pymel.connectAttr(attr_u, splitter.attr_inn_surface_u) pymel.connectAttr(attr_v, splitter.attr_inn_surface_v) pymel.connectAttr(self._attr_inn_jaw_ratio_default, splitter.attr_inn_jaw_default_ratio) pymel.connectAttr(self._attr_length_v, splitter.attr_inn_surface_range_v) pymel.connectAttr(attr_jaw_radius, splitter.attr_inn_jaw_radius) pymel.connectAttr(attr_jaw_pitch, splitter.attr_inn_jaw_pt) pymel.connectAttr(self._attr_inn_bypass_splitter, splitter.attr_inn_bypass) attr_u = splitter.attr_out_surface_u attr_v = splitter.attr_out_surface_v # Create constraint to controller the jaw reference pymel.connectAttr(splitter.attr_out_jaw_ratio, self._attr_out_jaw_ratio) # # Implement the 'bypass' avars. # Thoses avars bypass the splitter, used in corner cases only. # attr_attr_ud_bypass_adjusted = libRigging.create_utility_node( 'multiplyDivide', name=nomenclature_rig.resolve('getAdjustedUdBypass'), input1X=self._attr_inn_ud_bypass, input2X=self.multiplier_ud ).outputX attr_v = libRigging.create_utility_node( 'addDoubleLinear', name=nomenclature_rig.resolve('addBypassAvar'), input1=attr_v, input2=attr_attr_ud_bypass_adjusted ).output return attr_u, attr_v
42.090634
162
0.680879
1,672
13,932
5.313397
0.184809
0.04964
0.059433
0.037821
0.497636
0.293224
0.208802
0.159838
0.146105
0.079244
0
0.004972
0.249282
13,932
330
163
42.218182
0.84444
0.196454
0
0.205607
0
0
0.07727
0.020942
0
0
0
0.00303
0
1
0.028037
false
0.079439
0.028037
0
0.070093
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
439e1a09f9246f51a2f4aa291d6172d1d6ae55e7
808
py
Python
DQM/L1TMonitor/python/L1TGCT_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
DQM/L1TMonitor/python/L1TGCT_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
DQM/L1TMonitor/python/L1TGCT_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer l1tGct = DQMEDAnalyzer('L1TGCT', gctCentralJetsSource = cms.InputTag("gctDigis","cenJets"), gctForwardJetsSource = cms.InputTag("gctDigis","forJets"), gctTauJetsSource = cms.InputTag("gctDigis","tauJets"), gctIsoTauJetsSource = cms.InputTag("gctDigis","fake"), gctEnergySumsSource = cms.InputTag("gctDigis"), gctIsoEmSource = cms.InputTag("gctDigis","isoEm"), gctNonIsoEmSource = cms.InputTag("gctDigis","nonIsoEm"), monitorDir = cms.untracked.string("L1T/L1TGCT"), verbose = cms.untracked.bool(False), stage1_layer2_ = cms.bool(False), DQMStore = cms.untracked.bool(True), disableROOToutput = cms.untracked.bool(True), filterTriggerType = cms.int32(1) )
38.47619
62
0.72896
79
808
7.43038
0.518987
0.131175
0.226576
0.068143
0
0
0
0
0
0
0
0.012821
0.131188
808
20
63
40.4
0.823362
0
0
0
0
0
0.136476
0
0
0
0
0
0
1
0
false
0
0.117647
0
0.117647
0
0
0
0
null
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
439e723ba661ca0696137f422b31b51f63930e6a
387
py
Python
OLD/karma_module/text.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
1
2021-12-12T02:50:20.000Z
2021-12-12T02:50:20.000Z
OLD/karma_module/text.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
17
2020-02-07T23:40:36.000Z
2020-12-22T16:38:44.000Z
OLD/karma_module/text.py
alentoghostflame/StupidAlentoBot
c024bfb79a9ecb0d9fda5ddc4e361a0cb878baba
[ "MIT" ]
null
null
null
ADDED_KARMA_TO_MEMBER = "Gave {} karma to {}, their karma is now at {}." REMOVED_KARMA_FROM_MEMBER = "Removed {} karma from {}, their karma is now at {}." LIST_KARMA_OWN = "You currently have {} karma." LIST_KARMA_OBJECT = "\"{}\" currently has {} karma." LIST_KARMA_MEMBER = "{} currently has {} karma." KARMA_TOP_START = "Top karma in server:\n" KARMA_TOP_FORMAT = "{}. {} \\| {}\n"
38.7
81
0.669251
55
387
4.418182
0.436364
0.111111
0.098765
0.123457
0.139918
0
0
0
0
0
0
0
0.157623
387
9
82
43
0.745399
0
0
0
0
0
0.550388
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
43a255b174f2f6995694a3ff518d32d995c17049
981
py
Python
setup.py
sdu-cfei/modest-py
dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a
[ "BSD-2-Clause" ]
37
2017-06-21T19:09:11.000Z
2022-03-13T09:26:07.000Z
setup.py
sdu-cfei/modest-py
dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a
[ "BSD-2-Clause" ]
51
2017-06-21T17:40:42.000Z
2021-10-31T09:16:21.000Z
setup.py
sdu-cfei/modest-py
dc14091fb8c20a8b3fa5ab33bbf597c0b566ba0a
[ "BSD-2-Clause" ]
12
2017-10-02T12:32:50.000Z
2022-03-13T09:26:15.000Z
from setuptools import setup setup( name='modestpy', version='0.1', description='FMI-compliant model identification package', url='https://github.com/sdu-cfei/modest-py', keywords='fmi fmu optimization model identification estimation', author='Krzysztof Arendt, Center for Energy Informatics SDU', author_email='krzysztof.arendt@gmail.com, veje@mmmi.sdu.dk', license='BSD', platforms=['Windows', 'Linux'], packages=[ 'modestpy', 'modestpy.estim', 'modestpy.estim.ga_parallel', 'modestpy.estim.ga', 'modestpy.estim.ps', 'modestpy.estim.scipy', 'modestpy.fmi', 'modestpy.utilities', 'modestpy.test'], include_package_data=True, install_requires=[ 'fmpy[complete]', 'scipy', 'pandas', 'matplotlib', 'numpy', 'pyDOE', 'modestga' ], classifiers=[ 'Programming Language :: Python :: 3' ] )
26.513514
68
0.59633
96
981
6.041667
0.71875
0.112069
0.051724
0
0
0
0
0
0
0
0
0.004138
0.260958
981
36
69
27.25
0.795862
0
0
0
0
0
0.494393
0.054027
0
0
0
0
0
1
0
true
0
0.028571
0
0.028571
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
1
0
0
0
0
0
0
1
43a8bd9cb32de8f8138b7b033dc19e078566fbea
426
py
Python
src/enum/__init__.py
NazarioJL/faker_enum
c2703cae232b229b4d4ab2b73757102453d541ab
[ "MIT" ]
5
2019-08-02T17:59:10.000Z
2021-05-14T08:30:55.000Z
src/enum/__init__.py
NazarioJL/faker_enum
c2703cae232b229b4d4ab2b73757102453d541ab
[ "MIT" ]
4
2018-10-26T06:52:05.000Z
2022-01-31T20:31:17.000Z
src/enum/__init__.py
NazarioJL/faker_enum
c2703cae232b229b4d4ab2b73757102453d541ab
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from enum import Enum from typing import TypeVar, Type, List, Iterable, cast from faker.providers import BaseProvider TEnum = TypeVar("TEnum", bound=Enum) class EnumProvider(BaseProvider): """ A Provider for enums. """ def enum(self, enum_cls: Type[TEnum]) -> TEnum: members: List[TEnum] = list(cast(Iterable[TEnum], enum_cls)) return self.random_element(members)
22.421053
68
0.676056
54
426
5.277778
0.555556
0.049123
0
0
0
0
0
0
0
0
0
0.002924
0.197183
426
18
69
23.666667
0.830409
0.103286
0
0
0
0
0.013661
0
0
0
0
0
0
1
0.125
false
0
0.375
0
0.75
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
43af38bca718e0df9a99877c70ee02be87e0d3d0
842
py
Python
shopping_cart_test/shoppingcart2.py
Simbadeveloper/studious-octo-waddle.io
7ace6bb93e3b87c97d59df858e3079ec7a2db30e
[ "MIT" ]
null
null
null
shopping_cart_test/shoppingcart2.py
Simbadeveloper/studious-octo-waddle.io
7ace6bb93e3b87c97d59df858e3079ec7a2db30e
[ "MIT" ]
null
null
null
shopping_cart_test/shoppingcart2.py
Simbadeveloper/studious-octo-waddle.io
7ace6bb93e3b87c97d59df858e3079ec7a2db30e
[ "MIT" ]
null
null
null
class ShoppingCart(object): def __init__(self): self.total = 0 self.items = dict() def add_item(self, item_name, quantity, price): if item_name != None and quantity >= 1: self.items.update({item_name: quantity}) if quantity and price >= 1: self.total += (quantity * price) def remove_item(self, item_name, quantity, price): if item_name in self.items: if quantity < self.items[item_name] and quantity > 0: self.items[item_name] -= quantity self.total -= price*quantity def checkout(self, cash_paid): balance = 0 if cash_paid < self.total: return "Cash paid not enough" balance = cash_paid - self.total return balance class Shop(ShoppingCart): def __init__(self): self.quantity = 100 def remove_item(self): self.quantity -= 1
26.3125
59
0.644893
114
842
4.578947
0.27193
0.10728
0.122605
0.057471
0.237548
0.149425
0.149425
0.149425
0.149425
0
0
0.014241
0.249406
842
31
60
27.16129
0.811709
0
0
0.08
0
0
0.023753
0
0
0
0
0
0
1
0.24
false
0
0
0
0.4
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
43b32db495f046dd61a5bbd3592b8806b465b229
785
py
Python
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
def solution(bridge_length, weight, truck_weights): answer = 0 # { weight, time } wait = truck_weights[:] bridge = [] passed = 0 currWeight = 0 while True: if passed == len(truck_weights) and len(wait) == 0: return answer answer += 1 # sth needs to be passed if bridge: if bridge[0]['t'] + bridge_length == answer: front = bridge.pop(0) currWeight -= front['w'] passed += 1 # add new truck if wait: if currWeight + wait[0] <= weight: bridge.append({ 'w' : wait[0], 't' : answer }) currWeight += wait[0] wait.pop(0) # print(solution(2, 10, [7, 4, 5, 6])) print(solution(100, 100, [10]))
28.035714
73
0.49172
93
785
4.096774
0.419355
0.052493
0.07874
0
0
0
0
0
0
0
0
0.055102
0.375796
785
27
74
29.074074
0.722449
0.11465
0
0
0
0
0.005797
0
0
0
0
0
0
1
0.05
false
0.15
0
0
0.05
0.05
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
1
0
0
0
0
0
1
43b5471678e7c510bd2a55fdced1140414dcd734
440
py
Python
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
2
2021-09-02T13:13:35.000Z
2021-12-19T11:35:03.000Z
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
null
null
null
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
2
2021-09-29T02:53:03.000Z
2022-03-27T09:55:35.000Z
"""Holds the device gemoetry parameters (Table 5), taken from Wu et al., >> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP. """ node_names = [16, 7, 5, 4, 3] GP = [64, 56, 48, 44, 41] FP = [40, 30, 28, 24, 22] GL = [20, 18, 16, 15, 14] FH = [26, 35, 45, 50, 55] FW = [12, 6.5, 6, 5.5, 5.5] vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
36.666667
163
0.615909
92
440
2.934783
0.782609
0.022222
0.022222
0
0
0
0
0
0
0
0
0.226361
0.206818
440
11
164
40
0.547278
0.529545
0
0
0
0
0
0
0
0
0
0
0
1
0
false
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
43cee9ce3aeb6af7cef400c841ab802c88461d4b
8,148
py
Python
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for stet_util.py.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import shutil from gslib import storage_url from gslib.tests import testcase from gslib.tests import util from gslib.tests.util import unittest from gslib.utils import execution_util from gslib.utils import stet_util import mock class TestStetUtil(testcase.GsUtilUnitTestCase): """Test STET utils.""" @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_uses_binary_and_config_from_boto( self, mock_execute_external_command): fake_config_path = self.CreateTempFile() mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'encrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_runs_with_binary_from_path_with_correct_settings( self, mock_execute_external_command): fake_config_path = self.CreateTempFile() temporary_path_directory = self.CreateTempDir() fake_stet_binary_path = self.CreateTempFile(tmpdir=temporary_path_directory, file_name='stet') previous_path = os.getenv('PATH') os.environ['PATH'] += os.path.pathsep + temporary_path_directory mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path', fake_config_path), ]): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ fake_stet_binary_path, 'encrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') os.environ['PATH'] = previous_path @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_uses_config_from_default_path_with_correct_settings( self, mock_execute_external_command): mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', None), ]): with mock.patch.object(os.path, 'exists', new=mock.Mock(return_value=True)): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'encrypt', '--config-file={}'.format( os.path.expanduser(stet_util.DEFAULT_STET_CONFIG_PATH)), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') @mock.patch.object(shutil, 'move') @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_download_runs_binary_and_replaces_temp_file( self, mock_execute_external_command, mock_move): fake_config_path = self.CreateTempFile() mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('gs://bucket/obj') destination_url = storage_url.StorageUrlFromString('out') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): stet_util.decrypt_download(source_url, destination_url, mock_logger) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'decrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'out', 'out_.stet_tmp' ]) mock_logger.debug.assert_called_once_with('stderr') mock_move.assert_called_once_with('out_.stet_tmp', 'out') @mock.patch.object(stet_util, '_get_stet_binary_from_path', new=mock.Mock(return_value=None)) def test_stet_util_errors_if_no_binary(self): fake_config_path = self.CreateTempFile() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path', fake_config_path), ]): with self.assertRaises(KeyError): stet_util.encrypt_upload(source_url, destination_url, None) def test_stet_util_errors_if_no_config(self): source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', None), ]): with mock.patch.object(os.path, 'exists', new=mock.Mock(return_value=False)): with self.assertRaises(KeyError): stet_util.encrypt_upload(source_url, destination_url, None) @mock.patch.object(os.path, 'expanduser', autospec=True) @mock.patch.object(execution_util, 'ExecuteExternalCommand', new=mock.Mock(return_value=('stdout', 'stderr'))) def test_stet_util_expands_home_directory_symbol(self, mock_expanduser): fake_config_path = self.CreateTempFile() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): stet_util.encrypt_upload(source_url, destination_url, mock.Mock()) mock_expanduser.assert_has_calls( [mock.call('fake_binary_path'), mock.call(fake_config_path)])
40.74
80
0.689494
962
8,148
5.468815
0.179834
0.041817
0.042007
0.106634
0.710511
0.689793
0.667554
0.665463
0.655199
0.621555
0
0.00138
0.199435
8,148
199
81
40.944724
0.805151
0.077688
0
0.68125
0
0
0.151649
0.031504
0
0
0
0
0.09375
1
0.04375
false
0
0.08125
0
0.13125
0.00625
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
43e2d67fdf43b1951abb85a9aaab6711fb8852be
1,132
py
Python
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
""" Tests for plugins in core module. Only unit tests for now. """ from unittest.mock import patch import click from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit def test_skip_click_exit(): def dummy_method(a, b): return a + b dummy_result = dummy_method(1, 2) decorated = skip_click_exit(dummy_method) decorated_result = decorated(1, 2) assert callable(decorated) assert dummy_result == decorated_result def testget_installed_plugins(): class Dummy: value = "nile.core.plugins.get_installed_plugins" name = "get_installed_plugins" with patch("nile.core.plugins.entry_points", return_value=[Dummy()]): installed_plugins = get_installed_plugins() assert "get_installed_plugins" in installed_plugins def test_load_plugins(): @click.group() def cli(): """Nile CLI group.""" pass def dummy(): print("dummy_result") with patch( "nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy} ): app = load_plugins(cli) assert callable(app)
22.64
82
0.681095
144
1,132
5.090278
0.319444
0.196453
0.155525
0.106412
0.13779
0.092769
0
0
0
0
0
0.00453
0.219965
1,132
49
83
23.102041
0.825595
0.066254
0
0
0
0
0.159962
0.143678
0
0
0
0
0.137931
1
0.206897
false
0.034483
0.103448
0.034483
0.448276
0.034483
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
78db0363110019cfe555b18f1fdc95de024b7945
19,306
py
Python
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
2
2022-01-12T23:08:52.000Z
2022-01-12T23:21:23.000Z
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
null
null
null
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
null
null
null
from collections.abc import Callable as _Callable import networkx as _nx from opencog.type_constructors import AtomSpace as _AtomSpace from .args import check_arg as _check_arg def convert(data, graph_annotated=True, graph_directed=True, node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None, node_border_color=None, node_border_size=None, node_label_color=None, node_label_size=None, node_hover=None, node_click=None, node_image=None, node_properties=None, edge_label=None, edge_color=None, edge_opacity=None, edge_size=None, edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None): """Convert an Atomspace or list of Atoms to a NetworkX graph with annotations. Several arguments accept a Callable. - In case of node annotations, the Callable gets an Atom as input, which the node represents in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. - In case of edge annotations, the Callable gets two Atoms as input, which the edge connects in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. Several arguments accept a color, which can be in following formats: - Name: ``"black"``, ``"red"``, ``"green"``, ... - Color code - 6 digit hex RGB code: ``"#05ac05"`` - 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``) Parameters ---------- data : Atomspace, list of Atoms Input that gets converted to a graph. graph_annotated : bool If ``False``, no annotations are added to the graph. This could be used for converting large AtomSpaces quickly to graphs that use less RAM and can be exported to smaller files (e.g. also compressed as gml.gz) for inspection with other tools. graph_directed : bool If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created. node_label : str, Callable Set a label for each node, which is shown as text below it. node_color : str, Callable Set a color for each node, which becomes the fill color of its shape. node_opacity : float between 0.0 and 1.0 Set an opacity for each node, which becomes the opacity of its shape. Caution: This is only supported by d3. node_size : int, float, Callable Set a size for each node, which becomes the height and width of its shape. node_shape : str, Callable Set a shape for each node, which is some geometrical form that has the node coordinates in its center. Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"`` node_border_color : str, Callable Set a border color for each node, which influences the border drawn around its shape. node_border_size : int, float, Callable Set a border size for each node, which influences the border drawn around its shape. node_label_color : str, Callable Set a label color for each node, which determines the font color of the text below the node. node_label_size : int, float, Callable Set a label size for each node, which determines the font size of the text below the node. node_hover : str, Callable Set a hover text for each node, which shows up besides the mouse cursor when hovering over a node. node_click : str, Callable Set a click text for each node, which shows up in a div element below the plot when clicking on a node and can easily be copied and pasted. node_image : str, Callable Set an image for each node, which appears within its shape. Possible values: - URL pointing to an image - Data URL encoding the image node_properties : str, dict, Callable Set additional properties for each node, which may not immediately be translated into a visual element, but can be chosen in the data selection menu in the interactive HTML visualizations to map them on some plot element. These properties also appear when exporting a graph to a file in a format such as GML and may be recognized by external visualization tools. Note that a Callable needs to return a dict in this case, and each key becomes a property, which is equivalent to the other properties such as node_size and node_color. Special cases: - ``node_properties="tv"`` is a shortcut for using a function that returns ``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}`` - Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates. Examples: - ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the JavaScript layout algorithm does not influence it, but the nodes remain free to move in the y and z directions. - ``lambda atom: dict(x=2.0) if atom.is_node() else None``: This fixes the x coordinate of each Atom of type Node to 2.0 but allows each Atom of type Link to move freely. - ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)`` This fixes the y coordinates of Atoms at different heights. Atoms of type Node are put at the bottom and Atoms of type Link are ordered by the number of their outgoing edges. The results is a hierarchical visualization that has some similarity with the "dot" layout. - ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``: This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100. The results is a visualization with two lines of nodes that has some similarity with the "bipartite" layout. edge_label : str, Callable Set a label for each edge, which becomes the text plotted in the middle of the edge. edge_color : str, Callable Set a color for each edge, which becomes the color of the line representing the edge. edge_opacity : int, float, Callable Set an opacity for each edge, which allows to make it transparent to some degree. edge_size : int, float, Callable Set a size for each edge, which becomes the width of the line representing the edge. edge_label_color : str, Callable Set a color for each edge label, which becomes the color of the text in the midpoint of the edge. edge_label_size : int, float, Callable Set a size for each edge label, which becomes the size of the text in the midpoint of the edge. edge_hover : str, Callable edge_click : str, Callable Returns ------- graph : NetworkX Graph or DiGraph Whether an undirected or directed graph is created depends on the argument "directed". """ # Argument processing _check_arg(data, 'data', (list, _AtomSpace)) _check_arg(graph_annotated, 'graph_annotated', bool) _check_arg(graph_directed, 'graph_directed', bool) _check_arg(node_label, 'node_label', (str, _Callable), allow_none=True) _check_arg(node_color, 'node_color', (str, _Callable), allow_none=True) _check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True) _check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True) _check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True) _check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True) _check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True) _check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True) _check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True) _check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True) _check_arg(node_click, 'node_click', (str, _Callable), allow_none=True) _check_arg(node_image, 'node_image', (str, _Callable), allow_none=True) _check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True) _check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True) _check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True) _check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True) _check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True) _check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True) _check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True) _check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True) _check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True) # Prepare annoation functions if graph_annotated: node_ann = prepare_node_func( node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties) edge_ann = prepare_edge_func( edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click) else: empty = dict() def node_ann(atom): return empty def edge_ann(atom1, atom2): return empty # Create the NetworkX graph graph = _nx.DiGraph() if graph_directed else _nx.Graph() # 0) Set graph annotations graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover # 1) Add vertices and their annotations for atom in data: graph.add_node(to_uid(atom), **node_ann(atom)) # 2) Add edges and their annotations (separate step to exclude edges to filtered vertices) for atom in data: uid = to_uid(atom) if atom.is_link(): # for all that is incoming to the Atom for atom2 in atom.incoming: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid2, uid, **edge_ann(atom2, atom)) # for all that is outgoing of the Atom for atom2 in atom.out: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid, uid2, **edge_ann(atom, atom2)) return graph def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties): """Prepare a function that calculates all annoations for a node representing an Atom.""" # individual node annotation functions node_label = use_node_def_or_str(node_label, node_label_default) node_color = use_node_def_or_str(node_color, node_color_default) node_opacity = use_node_def_or_num(node_opacity, node_opacity_default) node_size = use_node_def_or_num(node_size, node_size_default) node_shape = use_node_def_or_str(node_shape, node_shape_default) node_border_color = use_node_def_or_str(node_border_color, node_border_color_default) node_border_size = use_node_def_or_num(node_border_size, node_border_size_default) node_label_color = use_node_def_or_str(node_label_color, node_label_color_default) node_label_size = use_node_def_or_num(node_label_size, node_label_size_default) node_hover = use_node_def_or_str(node_hover, node_hover_default) node_click = use_node_def_or_str(node_click, node_click_default) node_image = use_node_def_or_str(node_image, node_image_default) # special case: additional user-defined node properties by a function that returns a dict if node_properties is None: node_properties = node_properties_default elif isinstance(node_properties, dict): val = node_properties def node_properties(atom): return val elif node_properties == 'tv': node_properties = node_properties_tv # combined node annotation function: calls each of the individual ones name_func = ( ('label', node_label), ('color', node_color), ('opacity', node_opacity), ('size', node_size), ('shape', node_shape), ('border_color', node_border_color), ('border_size', node_border_size), ('label_color', node_label_color), ('label_size', node_label_size), ('hover', node_hover), ('click', node_click), ('image', node_image), ) def func(atom): data = {} for n, f in name_func: val = f(atom) if val is not None: data[n] = val try: data.update(node_properties(atom)) except Exception: pass return data return func def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click): """Prepare a function that calculates all annoations for an edge between Atoms.""" # individual edge annotation functions edge_label = use_edge_def_or_str(edge_label, edge_label_default) edge_color = use_edge_def_or_str(edge_color, edge_color_default) edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default) edge_size = use_edge_def_or_num(edge_size, edge_size_default) edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default) edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default) edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default) edge_click = use_edge_def_or_str(edge_click, edge_click_default) # combined edge annotation function: calls each of the individual ones name_func = ( ('label', edge_label), ('color', edge_color), ('opacity', edge_opacity), ('size', edge_size), ('label_color', edge_label_color), ('label_size', edge_label_size), ('hover', edge_hover), ('click', edge_click), ) def func(atom1, atom2): data = {} for n, f in name_func: val = f(atom1, atom2) if val is not None: data[n] = val return data return func def use_node_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_node_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func def to_uid(atom): """Return a unique identifier for an Atom.""" return atom.id_string() # Default functions for node annotations # - "return None" means that the attribute and value won't be included # to the output data, so that defaults of the JS library are used and files get smaller # - A return of a value in some cases and None in other cases means that the # default value of the JS library is used in None cases and again files get smaller def node_label_default(atom): # None => no node labels return '{} "{}"'.format(atom.type_name, atom.name) if atom.is_node() else atom.type_name def node_color_default(atom): # None => black return 'red' if atom.is_node() else None def node_opacity_default(atom): # None => 1.0 return None def node_size_default(atom): # None => 10 return None def node_shape_default(atom): # None => circle return 'rectangle' if atom.is_node() else None def node_border_color_default(atom): # None => black return None def node_border_size_default(atom): # None => 0.0 return None def node_label_color_default(atom): # None => black return None def node_label_size_default(atom): # None => 12.0 return None def node_hover_default(atom): # None => no hover text return atom.short_string() def node_click_default(atom): # None => no click text (in addition to always shown "Node: <id>" in header) return None def node_image_default(atom): # None => no image inside node return None def node_properties_default(atom): # None => no extra node annotations return None def node_properties_tv(atom): return dict(mean=atom.tv.mean, confidence=atom.tv.confidence) # Default functions for edge annotations def edge_label_default(atom1, atom2): # None => no edge label return None def edge_color_default(atom1, atom2): # None => black return None if atom1.is_link() and atom2.is_link() else 'red' def edge_opacity_default(atom1, atom2): # None => 1.0 return None def edge_size_default(atom1, atom2): # None => 1.0 return None def edge_label_color_default(atom1, atom2): # None => black return None def edge_label_size_default(atom1, atom2): # None => 8.0 return None def edge_hover_default(atom1, atom2): # None => no hover text return None def edge_click_default(atom1, atom2): # None => no click text (in addition to always shown "Edge: <id>" in header) return None
38.923387
97
0.682897
2,803
19,306
4.474492
0.123439
0.025833
0.028464
0.035162
0.521049
0.460533
0.400175
0.345718
0.286557
0.232977
0
0.006731
0.238113
19,306
495
98
39.00202
0.845945
0.43919
0
0.337719
0
0
0.044872
0
0
0
0
0
0
1
0.171053
false
0.004386
0.017544
0.127193
0.359649
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
78df4f62738c15a3903b9ac814a118e7bd487166
1,214
py
Python
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
import unittest from pkg import Linear_Algebra import numpy as np class TestLU(unittest.TestCase): def setUp(self): self.U_answer = np.around(np.array([[2,1,0],[0,3/2,1],[0,0,4/3]], dtype=float), decimals=2).tolist() self.L_answer = np.around(np.array([[1,0,0],[1/2,1,0],[0,2/3,1]], dtype=float), decimals=2).tolist() def test_perm(self): answer = np.array([[0,1,0], [1,0,0], [0,0,1]], dtype=float).tolist() result = Linear_Algebra.make_perm_mx(3, 0, 1).tolist() self.assertEqual(result, answer) def test_LU(self): L_result, U_result = np.around(Linear_Algebra.LU(np.array([[2,1,0],[1,2,1],[0,1,2]], dtype=float)), decimals=2).tolist() self.assertEqual(U_result, self.U_answer) self.assertEqual(L_result, self.L_answer) class TestDet(unittest.TestCase): def setUp(self): self.input_mx = np.array([[2,-1,0,0],[-1,2,-1,0],[0,-1,2,-1],[0,0,-1,2]], dtype=float) def test_find_det(self): result = np.around(Linear_Algebra.find_det(A = self.input_mx), decimals=2).tolist() answer = np.around(5, decimals=2).tolist() self.assertEqual(result, answer) if __name__ == '__main__': unittest.main()
41.862069
128
0.629325
205
1,214
3.585366
0.219512
0.029932
0.032653
0.032653
0.530612
0.223129
0.032653
0.032653
0.021769
0.021769
0
0.064484
0.169687
1,214
29
129
41.862069
0.664683
0
0
0.166667
0
0
0.006584
0
0
0
0
0
0.166667
1
0.208333
false
0
0.125
0
0.416667
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
1
0
0
0
0
0
0
0
1
78ed1b7fc24c0d300d3ad14111db8c17f3c020fd
5,401
py
Python
app/routes/router.py
nityagautam/ReportDashboard-backend
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
1
2021-05-06T09:48:46.000Z
2021-05-06T09:48:46.000Z
app/routes/router.py
nityagautam/ReportDashboard
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
2
2021-09-09T05:34:33.000Z
2021-12-13T15:31:36.000Z
app/routes/router.py
nityagautam/ReportDashboard
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
null
null
null
#=============================================================== # @author: nityanarayan44@live.com # @written: 08 December 2021 # @desc: Routes for the Backend server #=============================================================== # Import section with referecne of entry file or main file; from __main__ import application from flask import jsonify, render_template, url_for, request, redirect # Local sample data import from app.config.uiconfig import app_ui_config from app import sample_data # ============================================================== # App Routes/Gateways # ============================================================== @application.route('/test', methods=['GET']) def test(): return '<h4>HELLO WORLD!</h4><hr/> it works!' @application.route('/', methods=['GET']) @application.route('/home', methods=['GET']) @application.route('/dashboard', methods=['GET']) def root(): return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data) @application.route('/history', methods=['GET']) def history(): return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data) @application.route('/about', methods=['GET']) def about(): return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data) @application.route('/get-notes', methods=['POST']) def get_todo(): print("KEY :: VALUE (from the received form data)") print([(key, val) for key, val in zip(request.form.keys(), request.form.values())]) return redirect("/notes", code=302) @application.route('/notes') def info(): return render_template("notes.html", app_data=app_ui_config) @application.route('/sample-data') def get_sample_data(): return jsonify(app_ui_config) # ============================================================== # Error Handlers Starts # ============================================================== # 404 Handler; We can also pass the specific request errors codes to the decorator; @application.errorhandler(404) def not_found(err): return render_template("error.html", app_data=app_ui_config, error_data=err), 400 # Exception/Error handler; We can also pass the specific errors to the decorator; @application.errorhandler(TypeError) def server_error(err): application.logger.exception(err) return render_template("error.html", app_data=app_ui_config, error_data=err), 500 # Exception/Error handler; We can also pass the specific errors to the decorator; @application.errorhandler(Exception) def server_error(err): application.logger.exception(err) return render_template("error.html", app_data=app_ui_config, error_data=err), 500 # ============================================================== # Error Handlers Ends # ============================================================== # Route For Sample data @application.route('/data') def get_data(): data = { "reports": [ { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_1", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"}, "total_time": "35 min." }, { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_2", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"}, "total_time": "0.2345 secs." }, { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_3", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"}, "total_time": "5 days" } ] } return jsonify(data) # ============================================================== # Extra routes starts # ============================================================== @application.route('/sample1') def sample1(): return render_template("web-analytics-overview.html") @application.route('/sample2') def sample2(): return render_template("web-analytics-real-time.html") @application.route('/logo') def get_logo(): """ Queries the snapshot data for both Serenity and JMeter projects from the MongoDB. Renders the Snapshot view of html :return: N/A """ # set template directory of the Flask App to the path set by the user as command line arg. return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \ f'Building image from static public location: <br/> ' \ f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
38.035211
99
0.549713
581
5,401
4.967298
0.311532
0.072072
0.034304
0.033957
0.45738
0.422384
0.388773
0.378032
0.378032
0.365558
0
0.025018
0.20811
5,401
141
100
38.304965
0.649755
0.255323
0
0.211765
0
0.011765
0.306288
0.071484
0
0
0
0
0
1
0.164706
false
0.035294
0.117647
0.105882
0.447059
0.023529
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
1
0
0
0
1
78eed98843af7c2acb54d95dbb60b3f984e9337b
15,624
py
Python
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
112
2019-02-11T23:16:36.000Z
2022-03-23T20:59:57.000Z
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
621
2019-03-01T14:44:12.000Z
2022-03-31T19:49:25.000Z
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
154
2019-02-01T23:46:33.000Z
2022-03-23T15:07:10.000Z
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ Air separation phase equilibrium package using Peng-Robinson EoS. Example property package using the Generic Property Package Framework. This example shows how to set up a property package to do air separation phase equilibrium in the generic framework using Peng-Robinson equation along with methods drawn from the pre-built IDAES property libraries. The example includes two dictionaries. 1. The dictionary named configuration contains parameters obtained from The Properties of Gases and Liquids (1987) 4th edition and NIST. 2. The dictionary named configuration_Dowling_2015 contains parameters used in A framework for efficient large scale equation-oriented flowsheet optimization (2015) Dowling. The parameters are extracted from Properties of Gases and Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors and converted values from the Properties of Gases and Liquids (1977) 3rd edition to j. """ # Import Python libraries import logging # Import Pyomo units from pyomo.environ import units as pyunits # Import IDAES cores from idaes.core import LiquidPhase, VaporPhase, Component from idaes.generic_models.properties.core.state_definitions import FTPx from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType from idaes.generic_models.properties.core.phase_equil import SmoothVLE from idaes.generic_models.properties.core.phase_equil.bubble_dew import \ LogBubbleDew from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity from idaes.generic_models.properties.core.pure import RPP4 from idaes.generic_models.properties.core.pure import NIST from idaes.generic_models.properties.core.pure import RPP3 # Set up logger _log = logging.getLogger(__name__) # --------------------------------------------------------------------- # Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system # Data Sources: # [1] The Properties of Gases and Liquids (1987) # 4th edition, Chemical Engineering Series - Robert C. Reid # [2] NIST, https://webbook.nist.gov/ # Retrieved 16th August, 2020 # [3] The Properties of Gases and Liquids (1987) # 3rd edition, Chemical Engineering Series - Robert C. Reid # Cp parameters where converted to j in Dowling 2015 # [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015) # Computers and Chemical Engineering - Alexander W. Dowling configuration = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (34e5, pyunits.Pa), # [1] "temperature_crit": (126.2, pyunits.K), # [1] "omega": 0.037, # [1] "cp_mol_ig_comp_coeff": { "A": (3.115E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (-1.357E-2, pyunits.J/pyunits.mol/pyunits.K**2), "C": (2.680E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.168E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.7362, None), # [2] "B": (264.651, pyunits.K), "C": (-6.788, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (48.98e5, pyunits.Pa), # [1] "temperature_crit": (150.86, pyunits.K), # [1] "omega": 0.001, # [1] "cp_mol_ig_comp_coeff": { "A": (2.050E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2), "C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3), "D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": {"A": (3.29555, None), # [2] "B": (215.24, pyunits.K), "C": (-22.233, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (50.43e5, pyunits.Pa), # [1] "temperature_crit": (154.58, pyunits.K), # [1] "omega": 0.025, # [1] "cp_mol_ig_comp_coeff": { "A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K), "B": (-3.680E-6, pyunits.J/pyunits.mol/pyunits.K**2), "C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.065E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.85845, None), # [2] "B": (325.675, pyunits.K), "C": (-5.667, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}} configuration_Dowling_2015 = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (33.943875e5, pyunits.Pa), # [4] "temperature_crit": (126.2, pyunits.K), # [4] "omega": 0.04, # [3] "cp_mol_ig_comp_coeff": { 'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (14.9342, None), # [3] 'B': (588.72, pyunits.K), 'C': (-6.60, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (48.737325e5, pyunits.Pa), # [4] "temperature_crit": (150.86, pyunits.K), # [4] "omega": -0.004, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [3] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [3] "pressure_sat_comp_coeff": { 'A': (15.2330, None), # [3] 'B': (700.51, pyunits.K), 'C': (-5.84, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (50.45985e5, pyunits.Pa), # [4] "temperature_crit": (154.58, pyunits.K), # [4] "omega": 0.021, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (15.4075, None), # [3] 'B': (734.55, pyunits.K), 'C': (-6.45, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}}
51.394737
91
0.473374
1,624
15,624
4.395936
0.198892
0.060513
0.075641
0.090769
0.738339
0.724051
0.683569
0.660877
0.556661
0.53621
0
0.069528
0.379544
15,624
303
92
51.564356
0.666907
0.171595
0
0.686099
0
0
0.184698
0.060673
0
0
0
0
0
1
0
false
0
0.049327
0
0.049327
0
0
0
0
null
0
0
0
0
1
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1
78f33bf3b80a0a0d98e998f783441284fa1b3068
3,503
py
Python
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
1
2022-03-02T10:37:29.000Z
2022-03-02T10:37:29.000Z
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
9
2020-08-25T12:03:08.000Z
2020-10-20T11:45:32.000Z
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
null
null
null
"""Blueprint definitions for maDMP integration.""" from flask import Blueprint, jsonify, request from invenio_db import db from .convert import convert_dmp from .models import DataManagementPlan def _summarize_dmp(dmp: DataManagementPlan) -> dict: """Create a summary dictionary for the given DMP.""" res = {"dmp_id": dmp.dmp_id, "datasets": []} for ds in dmp.datasets: dataset = {"dataset_id": ds.dataset_id, "record": None} if ds.record: dataset["record"] = ds.record.model.json res["datasets"].append(dataset) return res def create_rest_blueprint(app) -> Blueprint: """Create the blueprint for the REST endpoints using the current app extensions.""" # note: using flask.current_app isn't directly possible, because Invenio-MaDMP is # registered as an extension in the API app, not the "normal" app # (which is the one usually returned by current_app) rest_blueprint = Blueprint("invenio_madmp", __name__) auth = app.extensions["invenio-madmp"].auth @rest_blueprint.route("/dmps", methods=["GET"]) @auth.login_required def list_dmps(): """Give a summary of all stored DMPs.""" dmps = DataManagementPlan.query.all() res = [_summarize_dmp(dmp) for dmp in dmps] return jsonify(res) @rest_blueprint.route("/dmps", methods=["POST"]) @auth.login_required def create_dmp(): """Create a new DMP from the maDMP JSON in the request body.""" if request.json is None: return jsonify({"error": "no json body supplied"}), 400 elif request.json.get("dmp") is None: return jsonify({"error": "dmp not found in the body"}), 400 dmp_json = request.json.get("dmp", {}) dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier") if DataManagementPlan.get_by_dmp_id(dmp_json_id) is not None: return jsonify({"error": "dmp with the same id already exists"}), 409 dmp = convert_dmp(dmp_json) db.session.add(dmp) db.session.commit() # TODO change the returned value return jsonify(_summarize_dmp(dmp)), 201 @rest_blueprint.route("/dmps/<dmp_id>", methods=["PATCH"]) @auth.login_required def update_dmp(dmp_id: str = None): """Update the specified DMP using the maDMP JSON in the request body.""" hard_sync = request.args.get("sync", "soft") == "hard" if request.json is None: return jsonify({"error": "no json body supplied"}), 400 elif request.json.get("dmp") is None: return jsonify({"error": "dmp not found in the body"}), 400 dmp_json = request.json.get("dmp", {}) dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier") if dmp_id and dmp_json_id and dmp_id != dmp_json_id: return jsonify({"error": "mismatch between dmp id from url and body"}), 400 dmp_id = dmp_id or dmp_json_id if DataManagementPlan.get_by_dmp_id(dmp_id) is None: return jsonify({"error": "dmp not found"}), 404 dmp = convert_dmp(dmp_json, hard_sync) db.session.commit() # TODO change the returned value return jsonify(_summarize_dmp(dmp)) @rest_blueprint.route("/dmps", methods=["PATCH"]) @auth.login_required def update_dmp_without_id(): """Update the specified DMP using the maDMP JSON in the request body.""" return update_dmp(None) return rest_blueprint
35.744898
87
0.643163
475
3,503
4.581053
0.24
0.032169
0.057904
0.060662
0.45864
0.382813
0.382813
0.340533
0.286765
0.286765
0
0.008989
0.237796
3,503
97
88
36.113402
0.805993
0.190123
0
0.305085
0
0
0.13872
0
0
0
0
0.010309
0
1
0.101695
false
0
0.067797
0
0.389831
0.135593
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
1
0
0
0
0
0
0
0
0
0
0
1
78f83610f02792ce2cf026a72886ebff9b5ef71f
579
py
Python
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
null
null
null
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
null
null
null
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
2
2021-07-26T20:22:31.000Z
2021-07-29T12:58:03.000Z
import sys sys.path = ['', '..'] + sys.path[1:] import daemon from assistance_bot import core from functionality.voice_processing import speaking, listening from functionality.commands import * if __name__ == '__main__': speaking.setup_assistant_voice(core.ttsEngine, core.assistant) while True: # start speech recording and speech recognition recognized_speech = listening.get_listening_and_recognition_result( core.recognizer, core.microphone) # executing the given command execute_command(recognized_speech)
32.166667
75
0.723661
64
579
6.25
0.59375
0.035
0
0
0
0
0
0
0
0
0
0.002165
0.202073
579
17
76
34.058824
0.863636
0.126079
0
0
0
0
0.019881
0
0
0
0
0
0
1
0
false
0
0.384615
0
0.384615
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
null
0
0
0
0
0
0
0
0
1
0
0
0
0
1
78fc7bd4cfef4c55a9ccedee325481258419cb94
11,929
py
Python
ee/clickhouse/sql/person.py
wanderlog/posthog
a88b81d44ab31d262be07e84a85d045c4e28f2a3
[ "MIT" ]
null
null
null
ee/clickhouse/sql/person.py
wanderlog/posthog
a88b81d44ab31d262be07e84a85d045c4e28f2a3
[ "MIT" ]
null
null
null
ee/clickhouse/sql/person.py
wanderlog/posthog
a88b81d44ab31d262be07e84a85d045c4e28f2a3
[ "MIT" ]
null
null
null
from ee.clickhouse.sql.clickhouse import KAFKA_COLUMNS, STORAGE_POLICY, kafka_engine from ee.clickhouse.sql.table_engines import CollapsingMergeTree, ReplacingMergeTree from ee.kafka_client.topics import KAFKA_PERSON, KAFKA_PERSON_DISTINCT_ID, KAFKA_PERSON_UNIQUE_ID from posthog.settings import CLICKHOUSE_CLUSTER, CLICKHOUSE_DATABASE TRUNCATE_PERSON_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'" DROP_PERSON_TABLE_SQL = f"DROP TABLE IF EXISTS person ON CLUSTER '{CLICKHOUSE_CLUSTER}'" TRUNCATE_PERSON_DISTINCT_ID_TABLE_SQL = f"TRUNCATE TABLE IF EXISTS person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}'" TRUNCATE_PERSON_DISTINCT_ID2_TABLE_SQL = ( f"TRUNCATE TABLE IF EXISTS person_distinct_id2 ON CLUSTER '{CLICKHOUSE_CLUSTER}'" ) PERSONS_TABLE = "person" PERSONS_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, created_at DateTime64, team_id Int64, properties VARCHAR, is_identified Int8, is_deleted Int8 DEFAULT 0 {extra_fields} ) ENGINE = {engine} """ PERSONS_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSONS_TABLE, ver="_timestamp") PERSONS_TABLE_SQL = lambda: ( PERSONS_TABLE_BASE_SQL + """Order By (team_id, id) {storage_policy} """ ).format( table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSONS_TABLE_ENGINE(), extra_fields=KAFKA_COLUMNS, storage_policy=STORAGE_POLICY(), ) KAFKA_PERSONS_TABLE_SQL = lambda: PERSONS_TABLE_BASE_SQL.format( table_name="kafka_" + PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON), extra_fields="", ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSONS_TABLE_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT id, created_at, team_id, properties, is_identified, is_deleted, _timestamp, _offset FROM {database}.kafka_{table_name} """.format( table_name=PERSONS_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) GET_LATEST_PERSON_SQL = """ SELECT * FROM person JOIN ( SELECT id, max(_timestamp) as _timestamp, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id ) as person_max ON person.id = person_max.id AND person._timestamp = person_max._timestamp WHERE team_id = %(team_id)s AND person_max.is_deleted = 0 {query} """ GET_LATEST_PERSON_ID_SQL = """ (select id from ( {latest_person_sql} )) """.format( latest_person_sql=GET_LATEST_PERSON_SQL ) # # person_distinct_id table - use this still in queries, but this will eventually get removed. # PERSONS_DISTINCT_ID_TABLE = "person_distinct_id" PERSONS_DISTINCT_ID_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Int8 DEFAULT 1, is_deleted Int8 ALIAS if(_sign==-1, 1, 0) {extra_fields} ) ENGINE = {engine} """ PERSONS_DISTINCT_ID_TABLE_SQL = lambda: ( PERSONS_DISTINCT_ID_TABLE_BASE_SQL + """Order By (team_id, distinct_id, person_id) {storage_policy} """ ).format( table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=CollapsingMergeTree(PERSONS_DISTINCT_ID_TABLE, ver="_sign"), extra_fields=KAFKA_COLUMNS, storage_policy=STORAGE_POLICY(), ) # :KLUDGE: We default is_deleted to 0 for backwards compatibility for when we drop `is_deleted` from message schema. # Can't make DEFAULT if(_sign==-1, 1, 0) because Cyclic aliases error. KAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = lambda: """ CREATE TABLE {table_name} ON CLUSTER '{cluster}' ( distinct_id VARCHAR, person_id UUID, team_id Int64, _sign Nullable(Int8), is_deleted Nullable(Int8) ) ENGINE = {engine} """.format( table_name="kafka_" + PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID), ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSONS_DISTINCT_ID_TABLE_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT distinct_id, person_id, team_id, coalesce(_sign, if(is_deleted==0, 1, -1)) AS _sign, _timestamp, _offset FROM {database}.kafka_{table_name} """.format( table_name=PERSONS_DISTINCT_ID_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) # # person_distinct_ids2 - new table! # PERSON_DISTINCT_ID2_TABLE = "person_distinct_id2" PERSON_DISTINCT_ID2_TABLE_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( team_id Int64, distinct_id VARCHAR, person_id UUID, is_deleted Int8, version Int64 DEFAULT 1 {extra_fields} ) ENGINE = {engine} """ PERSON_DISTINCT_ID2_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_DISTINCT_ID2_TABLE, ver="version") PERSON_DISTINCT_ID2_TABLE_SQL = lambda: ( PERSON_DISTINCT_ID2_TABLE_BASE_SQL + """ ORDER BY (team_id, distinct_id) SETTINGS index_granularity = 512 """ ).format( table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSON_DISTINCT_ID2_TABLE_ENGINE(), extra_fields=KAFKA_COLUMNS + "\n, _partition UInt64", ) KAFKA_PERSON_DISTINCT_ID2_TABLE_SQL = lambda: PERSON_DISTINCT_ID2_TABLE_BASE_SQL.format( table_name="kafka_" + PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=kafka_engine(KAFKA_PERSON_DISTINCT_ID), extra_fields="", ) # You must include the database here because of a bug in clickhouse # related to https://github.com/ClickHouse/ClickHouse/issues/10471 PERSON_DISTINCT_ID2_MV_SQL = """ CREATE MATERIALIZED VIEW {table_name}_mv ON CLUSTER '{cluster}' TO {database}.{table_name} AS SELECT team_id, distinct_id, person_id, is_deleted, version, _timestamp, _offset, _partition FROM {database}.kafka_{table_name} """.format( table_name=PERSON_DISTINCT_ID2_TABLE, cluster=CLICKHOUSE_CLUSTER, database=CLICKHOUSE_DATABASE, ) # # Static Cohort # PERSON_STATIC_COHORT_TABLE = "person_static_cohort" PERSON_STATIC_COHORT_BASE_SQL = """ CREATE TABLE IF NOT EXISTS {table_name} ON CLUSTER '{cluster}' ( id UUID, person_id UUID, cohort_id Int64, team_id Int64 {extra_fields} ) ENGINE = {engine} """ PERSON_STATIC_COHORT_TABLE_ENGINE = lambda: ReplacingMergeTree(PERSON_STATIC_COHORT_TABLE, ver="_timestamp") PERSON_STATIC_COHORT_TABLE_SQL = lambda: ( PERSON_STATIC_COHORT_BASE_SQL + """Order By (team_id, cohort_id, person_id, id) {storage_policy} """ ).format( table_name=PERSON_STATIC_COHORT_TABLE, cluster=CLICKHOUSE_CLUSTER, engine=PERSON_STATIC_COHORT_TABLE_ENGINE(), storage_policy=STORAGE_POLICY(), extra_fields=KAFKA_COLUMNS, ) TRUNCATE_PERSON_STATIC_COHORT_TABLE_SQL = ( f"TRUNCATE TABLE IF EXISTS {PERSON_STATIC_COHORT_TABLE} ON CLUSTER '{CLICKHOUSE_CLUSTER}'" ) INSERT_PERSON_STATIC_COHORT = ( f"INSERT INTO {PERSON_STATIC_COHORT_TABLE} (id, person_id, cohort_id, team_id, _timestamp) VALUES" ) # # Other queries # GET_TEAM_PERSON_DISTINCT_IDS = """ SELECT distinct_id, argMax(person_id, _timestamp) as person_id FROM ( SELECT distinct_id, person_id, max(_timestamp) as _timestamp FROM person_distinct_id WHERE team_id = %(team_id)s %(extra_where)s GROUP BY person_id, distinct_id, team_id HAVING max(is_deleted) = 0 ) GROUP BY distinct_id """ # Query to query distinct ids using the new table, will be used if 0003_fill_person_distinct_id2 migration is complete GET_TEAM_PERSON_DISTINCT_IDS_NEW_TABLE = """ SELECT distinct_id, argMax(person_id, version) as person_id FROM person_distinct_id2 WHERE team_id = %(team_id)s %(extra_where)s GROUP BY distinct_id HAVING argMax(is_deleted, version) = 0 """ GET_PERSON_IDS_BY_FILTER = """ SELECT DISTINCT p.id FROM ({latest_person_sql}) AS p INNER JOIN ({GET_TEAM_PERSON_DISTINCT_IDS}) AS pdi ON p.id = pdi.person_id WHERE team_id = %(team_id)s {distinct_query} {limit} {offset} """.format( latest_person_sql=GET_LATEST_PERSON_SQL, distinct_query="{distinct_query}", limit="{limit}", offset="{offset}", GET_TEAM_PERSON_DISTINCT_IDS="{GET_TEAM_PERSON_DISTINCT_IDS}", ) INSERT_PERSON_SQL = """ INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 0 """ INSERT_PERSON_DISTINCT_ID = """ INSERT INTO person_distinct_id SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, %(_sign)s, now(), 0 VALUES """ INSERT_PERSON_DISTINCT_ID2 = """ INSERT INTO person_distinct_id2 (distinct_id, person_id, team_id, is_deleted, version, _timestamp, _offset, _partition) SELECT %(distinct_id)s, %(person_id)s, %(team_id)s, 0, %(version)s, now(), 0, 0 VALUES """ DELETE_PERSON_BY_ID = """ INSERT INTO person (id, created_at, team_id, properties, is_identified, _timestamp, _offset, is_deleted) SELECT %(id)s, %(created_at)s, %(team_id)s, %(properties)s, %(is_identified)s, %(_timestamp)s, 0, 1 """ DELETE_PERSON_EVENTS_BY_ID = """ ALTER TABLE events DELETE WHERE distinct_id IN ( SELECT distinct_id FROM person_distinct_id WHERE person_id=%(id)s AND team_id = %(team_id)s ) AND team_id = %(team_id)s """ INSERT_COHORT_ALL_PEOPLE_THROUGH_PERSON_ID = """ INSERT INTO {cohort_table} SELECT generateUUIDv4(), actor_id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT actor_id FROM ({query}) ) """ INSERT_COHORT_ALL_PEOPLE_SQL = """ INSERT INTO {cohort_table} SELECT generateUUIDv4(), id, %(cohort_id)s, %(team_id)s, %(_timestamp)s, 0 FROM ( SELECT id FROM ( {latest_person_sql} ) as person INNER JOIN ( SELECT person_id, distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE person_id IN ({content_sql}) ) as pdi ON person.id = pdi.person_id WHERE team_id = %(team_id)s GROUP BY id ) """ GET_DISTINCT_IDS_BY_PROPERTY_SQL = """ SELECT distinct_id FROM ( {GET_TEAM_PERSON_DISTINCT_IDS} ) WHERE person_id IN ( SELECT id FROM ( SELECT id, argMax(properties, person._timestamp) as properties, max(is_deleted) as is_deleted FROM person WHERE team_id = %(team_id)s GROUP BY id HAVING is_deleted = 0 ) WHERE {filters} ) """ GET_DISTINCT_IDS_BY_PERSON_ID_FILTER = """ SELECT distinct_id FROM ({GET_TEAM_PERSON_DISTINCT_IDS}) WHERE {filters} """ GET_PERSON_PROPERTIES_COUNT = """ SELECT tupleElement(keysAndValues, 1) as key, count(*) as count FROM person ARRAY JOIN JSONExtractKeysAndValuesRaw(properties) as keysAndValues WHERE team_id = %(team_id)s GROUP BY tupleElement(keysAndValues, 1) ORDER BY count DESC, key ASC """ GET_ACTORS_FROM_EVENT_QUERY = """ SELECT {id_field} AS actor_id {matching_events_select_statement} FROM ({events_query}) GROUP BY actor_id {limit} {offset} """ COMMENT_DISTINCT_ID_COLUMN_SQL = ( lambda: f"ALTER TABLE person_distinct_id ON CLUSTER '{CLICKHOUSE_CLUSTER}' COMMENT COLUMN distinct_id 'skip_0003_fill_person_distinct_id2'" ) SELECT_PERSON_PROP_VALUES_SQL = """ SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} IS NOT NULL AND {property_field} != '' ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 """ SELECT_PERSON_PROP_VALUES_SQL_WITH_FILTER = """ SELECT value, count(value) FROM ( SELECT {property_field} as value FROM person WHERE team_id = %(team_id)s AND is_deleted = 0 AND {property_field} ILIKE %(value)s ORDER BY id DESC LIMIT 100000 ) GROUP BY value ORDER BY count(value) DESC LIMIT 20 """
28.200946
206
0.748093
1,696
11,929
4.893868
0.107311
0.033976
0.043012
0.034458
0.641566
0.569157
0.483133
0.433253
0.35494
0.315663
0
0.012078
0.15324
11,929
422
207
28.267773
0.809623
0.071255
0
0.460452
0
0.016949
0.610925
0.06566
0
0
0
0
0
1
0
false
0
0.011299
0
0.011299
0
0
0
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1
0
null
0
0
0
0
0
0
0
0
0
0
0
0
0
1