hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
633cfa8e0c87a2f94a51376a2674ae1d228de67a | 1,211 | py | Python | dev_tools/text_translate.py | Torivon/MiniAdventure | a0f9873e5a3ae772ef9dc47cfaa944a48fae6bb4 | [
"MIT"
] | 5 | 2015-12-20T15:07:50.000Z | 2021-08-12T18:20:53.000Z | dev_tools/text_translate.py | Torivon/MiniAdventure | a0f9873e5a3ae772ef9dc47cfaa944a48fae6bb4 | [
"MIT"
] | 86 | 2016-01-07T22:28:53.000Z | 2016-12-13T19:19:28.000Z | dev_tools/text_translate.py | Torivon/MiniAdventure | a0f9873e5a3ae772ef9dc47cfaa944a48fae6bb4 | [
"MIT"
] | 3 | 2016-01-09T04:15:52.000Z | 2016-12-31T02:47:18.000Z | import sys
import struct
if len(sys.argv) == 1:
sys.exit("Must include a file to process")
with open(sys.argv[1]) as input_file:
header_file = ""
data_file = ""
text_dict = {}
count = 0
for line in input_file.readlines():
if count == 0:
header_file = line.strip()
elif count == 1:
data_file = line.strip()
else:
mapping = line.split(',')
text_dict[mapping[0]] = mapping[1].encode()
count += 1
binary_size = 0
with open(header_file, 'w') as header:
with open(data_file, 'wb') as data:
header.write("#pragma once\n\n")
header.write("enum\n")
header.write("{\n")
data.write(struct.pack('h', len(text_dict)))
binary_size += 2 + 4 * (len(text_dict))
text_items = text_dict.items()
for k, v in text_items:
data.write(struct.pack('h', binary_size))
data.write(struct.pack('h', len(v)))
binary_size += len(v)
for k, v in text_items:
header.write("\t" + k + ",\n")
data.write(v)
header.write("};\n")
| 25.229167 | 57 | 0.501239 | 157 | 1,211 | 3.738854 | 0.324841 | 0.068143 | 0.076661 | 0.097104 | 0.166951 | 0.132879 | 0 | 0 | 0 | 0 | 0 | 0.014103 | 0.355904 | 1,211 | 47 | 58 | 25.765957 | 0.738462 | 0 | 0 | 0.057143 | 0 | 0 | 0.058678 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.057143 | 0 | 0.057143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
633fe84973c2e35383e77383db0029a500fbf5d1 | 578 | py | Python | daemon.py | hyoungseok/corona | 19c327ab338c64335fe7f5352b8a07de7f65a153 | [
"Apache-2.0"
] | null | null | null | daemon.py | hyoungseok/corona | 19c327ab338c64335fe7f5352b8a07de7f65a153 | [
"Apache-2.0"
] | null | null | null | daemon.py | hyoungseok/corona | 19c327ab338c64335fe7f5352b8a07de7f65a153 | [
"Apache-2.0"
] | null | null | null | import os
import re
import time
import util
start_pattern = re.compile("^start_[a-zA-Z0-9]+$")
print(f"startDaemon={int(time.time())}")
while os.path.exists(f"state/daemon"):
time.sleep(5)
print(f"aliveCheck={int(time.time())}")
file_list = os.listdir("state")
start_match = list(filter(start_pattern.match, file_list))
if start_match:
print(f"startEval={int(time.time())}")
token = start_match[0].split("_")[-1]
os.rename(f"state/start_{token}", f"state/finish_{token}")
util.export_pdf(token)
util.zip_all(token)
| 27.52381 | 66 | 0.650519 | 86 | 578 | 4.22093 | 0.476744 | 0.049587 | 0.090909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010373 | 0.16609 | 578 | 20 | 67 | 28.9 | 0.742739 | 0 | 0 | 0 | 0 | 0 | 0.283737 | 0.150519 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.235294 | 0 | 0.235294 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63411ef904c5bb1a60b8f39984a7821e83fe65a2 | 2,868 | py | Python | Project-:-Mars-Crater/code.py | sureshchaudhari13/ga-learner-dsmp-repo | 0a2d726b1f7be8c31592f5650e7dbd3f23cdb609 | [
"MIT"
] | null | null | null | Project-:-Mars-Crater/code.py | sureshchaudhari13/ga-learner-dsmp-repo | 0a2d726b1f7be8c31592f5650e7dbd3f23cdb609 | [
"MIT"
] | null | null | null | Project-:-Mars-Crater/code.py | sureshchaudhari13/ga-learner-dsmp-repo | 0a2d726b1f7be8c31592f5650e7dbd3f23cdb609 | [
"MIT"
] | null | null | null | # import libraries
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
# Code starts here
# Read the data
df = pd.read_csv(filepath_or_buffer=path,compression='zip')
print(df.head())
# Dependent variable
y = df['attr1089']
# Independent variable
X = df.drop(columns=['attr1089'])
# Split the data
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=4)
# Standardize the data
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print(X_test[45,5])
# --------------
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
# instantiate LogisticRegression model
lr = LogisticRegression()
# fit & predict the values using model
lr.fit(X_train,y_train)
y_pred = lr.predict(X_test)
# find the roc_score
roc_score = roc_auc_score(y_test,y_pred)
print('roc_score : ',roc_score)
# ------------
from sklearn.tree import DecisionTreeClassifier
# create model
dt = DecisionTreeClassifier(random_state=4)
# fit & predict using model
dt.fit(X_train,y_train)
y_pred = dt.predict(X_test)
# find the roc_auc_score for DTree
roc_score = roc_auc_score(y_test,y_pred)
print('roc_score using DTree : ',roc_score)
# --------------
from sklearn.ensemble import RandomForestClassifier
# Code strats here
# Create RF model
rfc = RandomForestClassifier(random_state=4)
# fit & predict using model
rfc.fit(X_train,y_train)
y_pred = rfc.predict(X_test)
# find the roc_auc_score score
roc_score = roc_auc_score(y_test,y_pred)
print('roc_score using DTree : ',roc_score)
# Code ends here
# --------------
# Import Bagging Classifier
from sklearn.ensemble import BaggingClassifier
# Code starts here
bagging_clf = BaggingClassifier(base_estimator= DecisionTreeClassifier(), n_estimators=100 , max_samples=100 , random_state=0)
# fit & predict using model
bagging_clf.fit(X_train,y_train)
# find the score
score_bagging = bagging_clf.score(X_test,y_test)
print('score_bagging : ',score_bagging)
# Code ends here
# --------------
# Import libraries
from sklearn.ensemble import VotingClassifier
# Various models
clf_1 = LogisticRegression()
clf_2 = DecisionTreeClassifier(random_state=4)
clf_3 = RandomForestClassifier(random_state=4)
model_list = [('lr',clf_1),('DT',clf_2),('RF',clf_3)]
# Code starts here
# create Voting classifier with hard voting
voting_clf_hard = VotingClassifier(estimators=model_list,voting='hard')
# fit & predict using model
voting_clf_hard.fit(X_train,y_train)
# find the score on test data using hard voting
hard_voting_score = voting_clf_hard.score(X_test,y_test)
print('hard_voting_score : ',hard_voting_score)
# Code ends here
| 21.89313 | 126 | 0.766736 | 424 | 2,868 | 4.941038 | 0.238208 | 0.038186 | 0.025776 | 0.023866 | 0.225298 | 0.225298 | 0.195704 | 0.136516 | 0.0821 | 0.0821 | 0 | 0.012272 | 0.119247 | 2,868 | 130 | 127 | 22.061538 | 0.817102 | 0.258368 | 0 | 0.102041 | 0 | 0 | 0.062709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.22449 | 0 | 0.22449 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63495a91beceb69204b6a7d9ec3a3f68467b0207 | 4,611 | py | Python | peaksIdentification/plotter.py | andreagia/peaks-identification | ddcc3523f9a1154f6c2f69035dde1bfb9eac0f2a | [
"MIT"
] | null | null | null | peaksIdentification/plotter.py | andreagia/peaks-identification | ddcc3523f9a1154f6c2f69035dde1bfb9eac0f2a | [
"MIT"
] | null | null | null | peaksIdentification/plotter.py | andreagia/peaks-identification | ddcc3523f9a1154f6c2f69035dde1bfb9eac0f2a | [
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import plotly.express as px
import pandas as pd
# from scipy.spatial import distance_matrix
import plotly.graph_objects as go
from peaksIdentification.postprocessing import sliding_avg
import numpy as np
def plotplot(spettro_old, spettro_new, associations):
print(associations)
print(len(associations))
"""
:param spettro_old: dataframe
:param spettro_new: dataframe
:param associations: [('k_old_1', 'k_new_1', d), ('k_old_2', 'k_new_2', d), ..., ('k_old_N', 'k_new_N', d),]
"""
X = spettro_old.dd.to_numpy()
keys_X = spettro_old.dd.index.tolist()
Y = spettro_new.dd.to_numpy()
keys_Y = spettro_new.dd.index.tolist()
fig = go.Figure()
fig.update_layout(width=1300, height=1000)
fig['layout']['xaxis']['autorange'] = "reversed"
fig['layout']['yaxis']['autorange'] = "reversed"
fig.add_trace(
go.Scatter(
mode='markers+text',
x=X[:, 0],
y=X[:, 1]*0.2,
marker=dict(
color='LightSkyBlue',
size=4,
line=dict(
color='MediumPurple',
width=1
)
),
name='Spectra1',
text=keys_X, textposition="bottom center"
))
fig.add_trace(
go.Scatter(
mode='markers+text',
x=Y[:, 0],
y=Y[:, 1]*0.2,
marker=dict(
color='Coral',
size=4,
line=dict(
color='MediumPurple',
width=1
)
),
name='Spectra2',
text=keys_Y, textposition="bottom center"
))
fig.show()
hist = []
histn = []
histi = []
for triple in associations:
# print(triple)
key_old = triple[0]
key_new = triple[1]
dist = triple[2]
hist.append(dist)
histn.append(key_old)
strnkey1 = ''.join(char for char in key_old if char.isnumeric())
histi.append(int(strnkey1))
old_p_xy = spettro_old.dd.loc[key_old].to_numpy(dtype=float)
new_p_xy = spettro_new.dd.loc[key_new].to_numpy(dtype=float)
ddist = np.sqrt(((old_p_xy - new_p_xy)**2).sum())
#print("??", old_p_xy, new_p_xy, ddist)
if key_old == key_new:
color = "MediumPurple"
else:
color = "red"
fig.add_trace(go.Scatter(x=[old_p_xy[0], new_p_xy[0]], y=[old_p_xy[1]*0.2, new_p_xy[1]*0.2],
mode='lines',
showlegend=False,
text='provaaa',
line=dict(color=color)))
fig.show()
'''
df1 = pd.DataFrame({"DistanceC": hist, "Index":histi, "Name":histn})
df1 = df1.sort_values(by=['Index'])
print(df1)
fig2 = px.bar(df1, x = "Name", y="DistanceC")
fig2.show()
'''
def plotHistogram(df1, real_dist_dict=None):
#print("REAL DIST DICT ", real_dist_dict) # real distance dictionary
#print("SIZE REAL DIST DICT ", len(real_dist_dict))
#print("= = = = =>\n",df1)
#distances = df1['Distance'].tolist()
#sl_avg = sliding_avg(distances, half_window_size=3) # sliding window avg on our estimated shift distances
#print(sl_avg)
#print(len(df1['Name'].tolist()), df1['Name'].tolist())
#print(len(df1['Index'].tolist()), df1['Index'].tolist())
#print("==============")
#df1['window_avg'] = sl_avg
sl_avg = df1['window_avg']
# plot dello istogramma
#fig2 = px.bar(df1, x = "Name", y="Distance")
fig2 = go.Figure()
fig2.add_trace(go.Bar(
x=df1["Name"],
y=df1["Distance"],
name='DistanceGG',
marker_color='blue'
))
if real_dist_dict is not None:
#fig2 = px.bar(df1, x="Name", y="Real_dist")
fig2.add_trace(go.Bar(
x=df1["Name"],
y=df1["Real_dist"],
name='Distance real',
marker_color='lightgray'
))
# plotta il punto indicante il valore della media mobile
fig2.add_trace(
go.Scatter(
mode='markers+text+lines',
x=df1['Name'],
y=sl_avg,
marker=dict(
color='Coral',
size=4,
line=dict(color='MediumPurple',width=1
)
),
name='Window avg', text='', textposition="bottom center"
))
fig2.show()
#return sl_avg, df1['Name'].tolist(), df1['Index'].tolist()
| 27.446429 | 112 | 0.52288 | 560 | 4,611 | 4.141071 | 0.2625 | 0.012937 | 0.025873 | 0.029323 | 0.195343 | 0.181544 | 0.162139 | 0.125054 | 0.125054 | 0.076757 | 0 | 0.024162 | 0.326827 | 4,611 | 167 | 113 | 27.610778 | 0.722938 | 0.166558 | 0 | 0.336538 | 0 | 0 | 0.094527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.057692 | 0 | 0.076923 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6350381060c9588e76e613d273657138ea46c5c5 | 6,311 | py | Python | ledgereth/__main__.py | unparalleled-js/ledger-eth-lib | ec9e4d0e801c98d09e5c8b04983e12540cff1650 | [
"MIT"
] | 10 | 2020-10-26T05:46:47.000Z | 2022-03-07T22:28:50.000Z | ledgereth/__main__.py | unparalleled-js/ledger-eth-lib | ec9e4d0e801c98d09e5c8b04983e12540cff1650 | [
"MIT"
] | 13 | 2020-02-04T09:08:45.000Z | 2022-03-21T00:05:28.000Z | ledgereth/__main__.py | unparalleled-js/ledger-eth-lib | ec9e4d0e801c98d09e5c8b04983e12540cff1650 | [
"MIT"
] | 6 | 2020-02-04T18:22:56.000Z | 2022-03-01T13:44:51.000Z | import argparse
import sys
from enum import IntEnum
from eth_utils import decode_hex, encode_hex
from ledgereth import (
create_transaction,
find_account,
get_account_by_path,
get_accounts,
sign_message,
sign_typed_data_draft,
)
from ledgereth.comms import init_dongle
class ExitCodes(IntEnum):
SUCCESS = 0
GENERAL_ERROR = 1
INVALID_ARGUMENT = 2
def get_args(argv):
parser = argparse.ArgumentParser(description="Do some ledger ops")
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="Print extra debugging information",
)
subparsers = parser.add_subparsers(
title="Commands", dest="command", help="Available commands"
)
# `accounts` command
accounts_parser = subparsers.add_parser(
"accounts", help="Print accounts from the Ledger"
)
accounts_parser.add_argument(
"path",
metavar="PATH",
nargs="?",
help="Get the account for a specific path",
)
accounts_parser.add_argument(
"-c",
"--count",
type=int,
default=3,
help="How many accounts to fetch (default: 3)",
)
# `send` command
send_parser = subparsers.add_parser(
"send", help="Send a value transaction from a Ledger account"
)
send_parser.add_argument(
"from_address",
metavar="FROM",
help="Account to sign with",
)
send_parser.add_argument(
"to_address",
metavar="TO",
help="Account to send to",
)
send_parser.add_argument(
"wei",
metavar="WEI",
type=int,
help="Amount to send (in wei)",
)
send_parser.add_argument(
"-n",
"--nonce",
type=int,
required=True,
help="Nonce to use for the transaction",
)
send_parser.add_argument(
"-c",
"--chainid",
type=int,
default=1,
help="Chain ID (default: 1)",
)
send_parser.add_argument(
"-g",
"--gas",
type=int,
default=22000,
help="The gas limit to use for the tx (default: 22000)",
)
send_parser.add_argument(
"-p",
"--gasprice",
type=int,
help="The gas price to use for the tx",
)
send_parser.add_argument(
"-f",
"--max-fee",
type=int,
help="The max fee per gas to use for the tx",
)
send_parser.add_argument(
"-b",
"--priority-fee",
type=int,
default=0,
help="The priority fee per gas to use for the tx (default: 0)",
)
send_parser.add_argument(
"-d",
"--data",
type=str,
help="The hex data to send with the tx (default: empty)",
)
# `sign` command
sign_parser = subparsers.add_parser(
"sign", help="Sign a text message with a Ledger account using EIP-191 v0"
)
sign_parser.add_argument(
"account_address",
metavar="ADDRESS",
help="Address of the account to sign with",
)
sign_parser.add_argument(
"message",
metavar="MESSAGE",
help="Message to sign",
)
# `signtyped` command
signtyped_parser = subparsers.add_parser(
"signtyped", help="Sign a text message with a Ledger account using EIP-191 v0"
)
signtyped_parser.add_argument(
"account_address",
metavar="ADDRESS",
help="Address of the account to sign with",
)
signtyped_parser.add_argument(
"domain_hash",
metavar="DOMAIN",
help="Domain hash to sign",
)
signtyped_parser.add_argument(
"message_hash",
metavar="MESSAGE",
help="Message hash to sign",
)
return parser.parse_args(argv)
def print_accounts(dongle, args):
if args.path:
account = get_account_by_path(args.path, dongle)
print(f"Account {account.path} {account.address}")
else:
accounts = get_accounts(dongle, count=args.count)
for i, a in enumerate(accounts):
print(f"Account {i}: {a.path} {a.address}")
def send_value(dongle, args):
print(f"Sending {args.wei} ETH from {args.from_address} to {args.to_address}")
account = find_account(args.from_address, dongle)
if not account:
print("Account not found on device", file=sys.stderr)
dongle.close()
sys.exit(ExitCodes.INVALID_ARGUMENT)
if not args.gasprice and not args.max_fee:
print("Either --gasprice or --max-fee must be provided", file=sys.stderr)
dongle.close()
sys.exit(ExitCodes.INVALID_ARGUMENT)
to_address = args.to_address
signed = create_transaction(
destination=to_address,
amount=args.wei,
gas=args.gas,
gas_price=args.gasprice,
max_fee_per_gas=args.max_fee,
max_priority_fee_per_gas=args.priority_fee,
data=args.data or "",
nonce=args.nonce,
chain_id=args.chainid,
sender_path=account.path,
)
print(f"Signed Raw Transaction: {signed.raw_transaction()}")
def sign_text_message(dongle, args):
print(f'Signing "{args.message}" with {args.account_address}')
account = find_account(args.account_address, dongle)
signed = sign_message(args.message, account.path)
print(f"Signature: {signed.signature}")
def sign_typed_data(dongle, args):
print(f"Signing typed data with account {args.account_address}")
print(f"Domain hash: {args.domain_hash}")
print(f"Message hash: {args.message_hash}")
account = find_account(args.account_address, dongle)
signed = sign_typed_data_draft(
decode_hex(args.domain_hash), decode_hex(args.message_hash), account.path
)
print(f"Signature: {signed.signature}")
COMMANDS = {
"accounts": print_accounts,
"send": send_value,
"sign": sign_text_message,
"signtyped": sign_typed_data,
}
def main(argv=sys.argv[1:]):
args = get_args(argv)
command = args.command
if command not in COMMANDS:
print(f"Invalid command: {command}", file=sys.stderr)
sys.exit(ExitCodes.INVALID_ARGUMENT)
dongle = init_dongle(debug=args.debug)
COMMANDS[command](dongle, args)
dongle.close()
if __name__ == "__main__":
main()
| 25.447581 | 86 | 0.61369 | 775 | 6,311 | 4.821935 | 0.192258 | 0.045759 | 0.081884 | 0.056195 | 0.224512 | 0.181964 | 0.174739 | 0.152796 | 0.144501 | 0.098475 | 0 | 0.006086 | 0.270955 | 6,311 | 247 | 87 | 25.550607 | 0.806129 | 0.010775 | 0 | 0.229268 | 0 | 0 | 0.26034 | 0.011222 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029268 | false | 0 | 0.029268 | 0 | 0.082927 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63504c8288f6b3e29c374e6692908482fa639432 | 1,324 | py | Python | src/es/utils/nodes.py | DefenseStorm/es-cli | ad48c778dc426d5504fff7c36e9a2d61aeafb85f | [
"MIT"
] | null | null | null | src/es/utils/nodes.py | DefenseStorm/es-cli | ad48c778dc426d5504fff7c36e9a2d61aeafb85f | [
"MIT"
] | 2 | 2018-09-17T18:18:44.000Z | 2019-01-30T22:01:21.000Z | src/es/utils/nodes.py | DefenseStorm/es-cli | ad48c778dc426d5504fff7c36e9a2d61aeafb85f | [
"MIT"
] | 1 | 2018-09-07T17:19:08.000Z | 2018-09-07T17:19:08.000Z |
import argparse
import os
import re
import requests
import subprocess
from . import config as es_config
def _match_to_node(match):
return {"node": match.group("node"),
"node-type": match.group("nodetype"),
"zone": match.group("zone")}
def _get_nodes_by_type(nodetype):
env = es_config.env()
command = ['knife',
'search',
'role:elasticsearch6_{} AND chef_environment:{}'.format(nodetype, env),
'-a',
'ec2.placement_availability_zone']
with open(os.devnull, 'w') as devnull:
output = subprocess.check_output(command, stderr=devnull)
pattern = re.compile(r'(?P<node>{}-es-(?P<nodetype>data-warm|data-hot|percolate)-\S+):\s+'
r'ec2.placement_availability_zone:\s+us-west-2(?P<zone>[abc])'.format(env))
return [_match_to_node(match) for match in re.finditer(pattern, output.decode('utf8'))]
def get_nodes(include_hot=True, include_warm=True, include_percolate=True):
hot_nodes = _get_nodes_by_type('data_hot') if include_hot else []
warm_nodes = _get_nodes_by_type('data_warm') if include_warm else []
percolate_nodes = _get_nodes_by_type('percolate') if include_percolate else []
return {node['node']: node for node in hot_nodes + warm_nodes + percolate_nodes}
| 34.842105 | 100 | 0.664653 | 179 | 1,324 | 4.659218 | 0.363128 | 0.047962 | 0.047962 | 0.067146 | 0.077938 | 0.055156 | 0 | 0 | 0 | 0 | 0 | 0.004713 | 0.19864 | 1,324 | 37 | 101 | 35.783784 | 0.781338 | 0 | 0 | 0 | 0 | 0.037037 | 0.213908 | 0.134543 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0.037037 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6353578656d78a5bb7c9aaf1ae55612bd73bf0e5 | 6,840 | py | Python | run_klue.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 71 | 2021-07-29T11:34:50.000Z | 2022-03-21T08:17:21.000Z | run_klue.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 3 | 2021-08-20T14:19:58.000Z | 2021-12-03T06:42:27.000Z | run_klue.py | tucan9389/KLUE-baseline | add61158e61f86adfca65087237443828b650090 | [
"Apache-2.0"
] | 16 | 2021-08-01T02:29:11.000Z | 2022-02-25T07:51:03.000Z | import argparse
import logging
import os
import sys
from pathlib import Path
from typing import List, Optional, Dict, Any
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger
from klue_baseline import KLUE_TASKS
from klue_baseline.utils import Command, LoggingCallback
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def add_general_args(parser: argparse.ArgumentParser, root_dir: str) -> argparse.ArgumentParser:
parser.add_argument(
"--task",
type=str,
required=True,
help=f"Run one of the task in {list(KLUE_TASKS.keys())}",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--gpus",
default=None,
nargs="+",
type=int,
help="Select specific GPU allocated for this, it is by default [] meaning none",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--num_sanity_val_steps",
type=int,
default=2,
help="Sanity check validation steps (default 2 steps)",
)
parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int)
parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm")
parser.add_argument(
"--gradient_accumulation_steps",
dest="accumulate_grad_batches",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--metric_key", type=str, default="loss", help="The name of monitoring metric")
parser.add_argument(
"--patience",
default=5,
type=int,
help="The number of validation epochs with no improvement after which training will be stopped.",
)
parser.add_argument(
"--early_stopping_mode",
choices=["min", "max"],
default="max",
type=str,
help="In min mode, training will stop when the quantity monitored has stopped decreasing; in max mode it will stop when the quantity monitored has stopped increasing;",
)
return parser
def make_klue_trainer(
args: argparse.Namespace,
extra_callbacks: List = [],
checkpoint_callback: Optional[pl.Callback] = None,
logging_callback: Optional[pl.Callback] = None,
**extra_train_kwargs,
) -> pl.Trainer:
pl.seed_everything(args.seed)
# Logging
csv_logger = CSVLogger(args.output_dir, name=args.task)
args.output_dir = csv_logger.log_dir
if logging_callback is None:
logging_callback = LoggingCallback()
# add custom checkpoints
metric_key = f"valid/{args.metric_key}"
if checkpoint_callback is None:
filename_for_metric = "{" + metric_key + ":.2f}"
checkpoint_callback = ModelCheckpoint(
dirpath=Path(args.output_dir).joinpath("checkpoint"),
monitor=metric_key,
filename="{epoch:02d}-{step}=" + filename_for_metric,
save_top_k=1,
mode="max",
)
early_stopping_callback = EarlyStopping(monitor=metric_key, patience=args.patience, mode=args.early_stopping_mode)
extra_callbacks.append(early_stopping_callback)
train_params: Dict[str, Any] = {}
if args.fp16:
train_params["precision"] = 16
# Set GPU & Data Parallel
args.num_gpus = 0 if args.gpus is None else len(args.gpus)
if args.num_gpus > 1:
train_params["accelerator"] = "dp"
train_params["val_check_interval"] = 0.25 # check validation set 4 times during a training epoch
train_params["num_sanity_val_steps"] = args.num_sanity_val_steps
train_params["accumulate_grad_batches"] = args.accumulate_grad_batches
train_params["profiler"] = extra_train_kwargs.get("profiler", None)
return pl.Trainer.from_argparse_args(
args,
weights_summary=None,
callbacks=[logging_callback] + extra_callbacks,
logger=csv_logger,
checkpoint_callback=checkpoint_callback,
**train_params,
)
def log_args(args: argparse.Namespace) -> None:
args_dict = vars(args)
max_len = max([len(k) for k in args_dict.keys()])
fmt_string = "\t%" + str(max_len) + "s : %s"
logger.info("Arguments:")
for key, value in args_dict.items():
logger.info(fmt_string, key, value)
def main() -> None:
command = sys.argv[1].lower()
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"command",
type=str,
help=f"Whether to run klue with command ({Command.tolist()})",
)
if command in ["--help", "-h"]:
parser.parse_known_args()
elif command not in Command.tolist():
raise ValueError(f"command is positional argument. command list: {Command.tolist()}")
# Parser (general -> data -> model)
parser = add_general_args(parser, os.getcwd())
parsed, _ = parser.parse_known_args()
task_name = parsed.task
task = KLUE_TASKS.get(task_name, None)
if not task:
raise ValueError(f"task_name is positional argument. task list: {list(KLUE_TASKS.keys())}")
parser = task.processor_type.add_specific_args(parser, os.getcwd())
parser = task.model_type.add_specific_args(parser, os.getcwd())
args = parser.parse_args()
log_args(args)
trainer = make_klue_trainer(args)
task.setup(args, command)
if command == Command.Train:
logger.info("Start to run the full optimization routine.")
trainer.fit(**task.to_dict())
# load the best checkpoint automatically
trainer.get_model().eval_dataset_type = "valid"
val_results = trainer.test(test_dataloaders=task.val_loader, verbose=False)[0]
print("-" * 80)
output_val_results_file = os.path.join(args.output_dir, "val_results.txt")
with open(output_val_results_file, "w") as writer:
for k, v in val_results.items():
writer.write(f"{k} = {v}\n")
print(f" - {k} : {v}")
print("-" * 80)
elif command == Command.Evaluate:
trainer.test(task.model, test_dataloaders=task.val_loader)
elif command == Command.Test:
trainer.test(task.model, test_dataloaders=task.test_loader)
if __name__ == "__main__":
main()
| 34.029851 | 176 | 0.660088 | 870 | 6,840 | 4.990805 | 0.296552 | 0.029019 | 0.050898 | 0.011746 | 0.096039 | 0.05251 | 0.05251 | 0.019346 | 0 | 0 | 0 | 0.006399 | 0.223246 | 6,840 | 200 | 177 | 34.2 | 0.810841 | 0.026316 | 0 | 0.139394 | 0 | 0.006061 | 0.223508 | 0.028709 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024242 | false | 0.006061 | 0.066667 | 0 | 0.10303 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63537ba60e3a4d8d1007a7de63eba806a0202456 | 1,917 | py | Python | common/net_simple.py | scotty1373/Torcs_PPO | 864e30e7d02bef852907cd2abb9bba7a0630b53a | [
"MIT"
] | 3 | 2022-01-07T09:04:42.000Z | 2022-02-06T16:32:56.000Z | common/net_simple.py | scotty1373/Torcs_PPO | 864e30e7d02bef852907cd2abb9bba7a0630b53a | [
"MIT"
] | null | null | null | common/net_simple.py | scotty1373/Torcs_PPO | 864e30e7d02bef852907cd2abb9bba7a0630b53a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import torch
from torch import nn
from torch.distributions import Normal
class Actor_Model(nn.Module):
def __init__(self, state_dim):
super(Actor_Model, self).__init__()
self.Dense1 = nn.Linear(state_dim, 512)
self.Dense1act = nn.ReLU(inplace=True)
self.Dense2 = nn.Linear(512, 128)
self.Dense2act = nn.ReLU(inplace=True)
self.mean_out = nn.Linear(128, 1)
self.meanact = nn.Tanh()
torch.nn.init.uniform_(self.mean_out.weight, a=-3e-3, b=3e-3)
self.sigma_out = nn.Linear(128, 1)
torch.nn.init.uniform_(self.sigma_out.weight, a=-3e-3, b=3e-3)
self.sigmaact = nn.Softplus()
def forward(self, state_vector):
common_vector = self.Dense1(state_vector)
common_vector = self.Dense1act(common_vector)
common_vector = self.Dense2(common_vector)
common_vector = self.Dense2act(common_vector)
mean_out = self.mean_out(common_vector)
mean_out = self.meanact(mean_out)
sigma_out = self.sigma_out(common_vector)
sigma_out = self.sigmaact(sigma_out)
return mean_out, sigma_out
class Critic_Model(nn.Module):
def __init__(self, state_dim):
super(Critic_Model, self).__init__()
self.Dense1 = nn.Linear(state_dim, 512)
self.Dense1act = nn.ReLU(inplace=True)
self.Dense2 = nn.Linear(512, 128)
self.Dense2act = nn.ReLU(inplace=True)
self.Dense3 = nn.Linear(128, 1)
self.Dense3act = nn.ReLU(inplace=True)
def forward(self, state_vector):
value_vector = self.Dense1(state_vector)
value_vector = self.Dense1act(value_vector)
value_vector = self.Dense2(value_vector)
value_vector = self.Dense2act(value_vector)
value_vector = self.Dense3(value_vector)
value_out = self.Dense3act(value_vector)
return value_out
| 35.5 | 70 | 0.661972 | 262 | 1,917 | 4.59542 | 0.198473 | 0.091362 | 0.053987 | 0.070598 | 0.66113 | 0.312292 | 0.312292 | 0.312292 | 0.312292 | 0.215947 | 0 | 0.039811 | 0.226917 | 1,917 | 53 | 71 | 36.169811 | 0.772605 | 0.010955 | 0 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63561da871584dc80cc681224fdd788cac77302f | 892 | py | Python | lms/pshell.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 38 | 2017-12-30T23:49:53.000Z | 2022-02-15T21:07:49.000Z | lms/pshell.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 1,733 | 2017-11-09T18:46:05.000Z | 2022-03-31T11:05:50.000Z | lms/pshell.py | mattdricker/lms | 40b8a04f95e69258c6c0d7ada543f4b527918ecf | [
"BSD-2-Clause"
] | 10 | 2018-07-11T17:12:46.000Z | 2022-01-07T20:00:23.000Z | import sys
from contextlib import suppress
from transaction.interfaces import NoTransaction
from lms import models
def setup(env):
sys.path = ["."] + sys.path
from tests import factories # pylint:disable=import-outside-toplevel
sys.path = sys.path[1:]
request = env["request"]
request.tm.begin()
env["tm"] = request.tm
env["tm"].__doc__ = "Active transaction manager (a transaction is already begun)."
env["db"] = env["session"] = request.db
env["db"].__doc__ = "Active DB session."
env["m"] = env["models"] = models
env["m"].__doc__ = "The lms.models package."
env["f"] = env["factories"] = factories
env["f"].__doc__ = "The test factories for quickly creating objects."
factories.set_sqlalchemy_session(request.db)
try:
yield
finally:
with suppress(NoTransaction):
request.tm.abort()
| 23.473684 | 86 | 0.64574 | 111 | 892 | 5.027027 | 0.441441 | 0.050179 | 0.035842 | 0.050179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001433 | 0.217489 | 892 | 37 | 87 | 24.108108 | 0.797994 | 0.042601 | 0 | 0 | 0 | 0 | 0.224178 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.208333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6358b098420866d24c9e82073283db2a21735a5a | 666 | py | Python | database/sql_server/create_table.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 14 | 2020-02-07T21:36:39.000Z | 2022-03-12T22:37:04.000Z | database/sql_server/create_table.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 19 | 2019-05-18T23:58:30.000Z | 2022-01-09T16:45:35.000Z | database/sql_server/create_table.py | miguelgfierro/pybase | de8e4f11ed5c655e748178e65195c7e70a9c98af | [
"BSD-3-Clause"
] | 5 | 2020-10-06T06:10:27.000Z | 2021-07-08T12:58:46.000Z | import pyodbc
def create_table(table_name, cursor):
"""Create a table and drop it if it exists.
Args:
table_name (str): Table name.
cursor (object): pyobdc cursor.
**Examples**
.. code-block:: python
conn = pyodbc.connect(connection_string)
cur = conn.cursor()
create_table(tab_name, cur)
"""
query = (
"IF OBJECT_ID('" + table_name + "') IS NOT NULL DROP TABLE " + table_name + " "
)
query += "CREATE TABLE " + table_name
query += " ( user_id VARCHAR(50) not null, num INT not null, array VARBINARY(MAX) not null )"
cursor.execute(query)
cursor.commit()
| 23.785714 | 97 | 0.588589 | 83 | 666 | 4.590361 | 0.506024 | 0.141732 | 0.110236 | 0.104987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004237 | 0.291291 | 666 | 27 | 98 | 24.666667 | 0.802966 | 0.388889 | 0 | 0 | 0 | 0.111111 | 0.385269 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
635b340138a681c68148582d1c62f125d6b4f774 | 10,896 | py | Python | vbox/src/VBox/ValidationKit/testmanager/core/useraccount.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/core/useraccount.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/core/useraccount.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# $Id: useraccount.py 69111 2017-10-17 14:26:02Z vboxsync $
"""
Test Manager - User DB records management.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 69111 $"
# Standard python imports.
import unittest;
# Validation Kit imports.
from testmanager import config;
from testmanager.core.base import ModelDataBase, ModelLogicBase, ModelDataBaseTestCase, TMTooManyRows, TMRowNotFound;
class UserAccountData(ModelDataBase):
"""
User account data
"""
ksIdAttr = 'uid';
ksParam_uid = 'UserAccount_uid'
ksParam_tsExpire = 'UserAccount_tsExpire'
ksParam_tsEffective = 'UserAccount_tsEffective'
ksParam_uidAuthor = 'UserAccount_uidAuthor'
ksParam_sLoginName = 'UserAccount_sLoginName'
ksParam_sUsername = 'UserAccount_sUsername'
ksParam_sEmail = 'UserAccount_sEmail'
ksParam_sFullName = 'UserAccount_sFullName'
ksParam_fReadOnly = 'UserAccount_fReadOnly'
kasAllowNullAttributes = ['uid', 'tsEffective', 'tsExpire', 'uidAuthor'];
def __init__(self):
"""Init parameters"""
ModelDataBase.__init__(self);
self.uid = None;
self.tsEffective = None;
self.tsExpire = None;
self.uidAuthor = None;
self.sUsername = None;
self.sEmail = None;
self.sFullName = None;
self.sLoginName = None;
self.fReadOnly = None;
def initFromDbRow(self, aoRow):
"""
Init from database table row
Returns self. Raises exception of the row is None.
"""
if aoRow is None:
raise TMRowNotFound('User not found.');
self.uid = aoRow[0];
self.tsEffective = aoRow[1];
self.tsExpire = aoRow[2];
self.uidAuthor = aoRow[3];
self.sUsername = aoRow[4];
self.sEmail = aoRow[5];
self.sFullName = aoRow[6];
self.sLoginName = aoRow[7];
self.fReadOnly = aoRow[8];
return self;
def initFromDbWithId(self, oDb, uid, tsNow = None, sPeriodBack = None):
"""
Initialize the object from the database.
"""
oDb.execute(self.formatSimpleNowAndPeriodQuery(oDb,
'SELECT *\n'
'FROM Users\n'
'WHERE uid = %s\n'
, ( uid, ), tsNow, sPeriodBack));
aoRow = oDb.fetchOne()
if aoRow is None:
raise TMRowNotFound('uid=%s not found (tsNow=%s sPeriodBack=%s)' % (uid, tsNow, sPeriodBack,));
return self.initFromDbRow(aoRow);
def _validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb):
# Custom handling of the email field.
if sAttr == 'sEmail':
return ModelDataBase.validateEmail(oValue, aoNilValues = aoNilValues, fAllowNull = fAllowNull);
# Automatically lowercase the login name if we're supposed to do case
# insensitive matching. (The feature assumes lower case in DB.)
if sAttr == 'sLoginName' and oValue is not None and config.g_kfLoginNameCaseInsensitive:
oValue = oValue.lower();
return ModelDataBase._validateAndConvertAttribute(self, sAttr, sParam, oValue, aoNilValues, fAllowNull, oDb);
class UserAccountLogic(ModelLogicBase):
"""
User account logic (for the Users table).
"""
def __init__(self, oDb):
ModelLogicBase.__init__(self, oDb)
self.dCache = None;
def fetchForListing(self, iStart, cMaxRows, tsNow, aiSortColumns = None):
"""
Fetches user accounts.
Returns an array (list) of UserAccountData items, empty list if none.
Raises exception on error.
"""
_ = aiSortColumns;
if tsNow is None:
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE tsExpire = \'infinity\'::TIMESTAMP\n'
'ORDER BY sUsername DESC\n'
'LIMIT %s OFFSET %s\n'
, (cMaxRows, iStart,));
else:
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE tsExpire > %s\n'
' AND tsEffective <= %s\n'
'ORDER BY sUsername DESC\n'
'LIMIT %s OFFSET %s\n'
, (tsNow, tsNow, cMaxRows, iStart,));
aoRows = [];
for _ in range(self._oDb.getRowCount()):
aoRows.append(UserAccountData().initFromDbRow(self._oDb.fetchOne()));
return aoRows;
def addEntry(self, oData, uidAuthor, fCommit = False):
"""
Add user account entry to the DB.
"""
self._oDb.callProc('UserAccountLogic_addEntry',
(uidAuthor, oData.sUsername, oData.sEmail, oData.sFullName, oData.sLoginName, oData.fReadOnly));
self._oDb.maybeCommit(fCommit);
return True;
def editEntry(self, oData, uidAuthor, fCommit = False):
"""
Modify user account.
"""
self._oDb.callProc('UserAccountLogic_editEntry',
( uidAuthor, oData.uid, oData.sUsername, oData.sEmail,
oData.sFullName, oData.sLoginName, oData.fReadOnly));
self._oDb.maybeCommit(fCommit);
return True;
def removeEntry(self, uidAuthor, uid, fCascade = False, fCommit = False):
"""
Delete user account
"""
self._oDb.callProc('UserAccountLogic_delEntry', (uidAuthor, uid));
self._oDb.maybeCommit(fCommit);
_ = fCascade;
return True;
def _getByField(self, sField, sValue):
"""
Get user account record by its field value
"""
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE tsExpire = \'infinity\'::TIMESTAMP\n'
' AND ' + sField + ' = %s'
, (sValue,))
aRows = self._oDb.fetchAll()
if len(aRows) not in (0, 1):
raise TMTooManyRows('Found more than one user account with the same credentials. Database structure is corrupted.')
try:
return aRows[0]
except IndexError:
return []
def getById(self, idUserId):
"""
Get user account information by ID.
"""
return self._getByField('uid', idUserId)
def tryFetchAccountByLoginName(self, sLoginName):
"""
Try get user account information by login name.
Returns UserAccountData if found, None if not.
Raises exception on DB error.
"""
if config.g_kfLoginNameCaseInsensitive:
sLoginName = sLoginName.lower();
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE sLoginName = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, (sLoginName, ));
if self._oDb.getRowCount() != 1:
if self._oDb.getRowCount() != 0:
raise self._oDb.integrityException('%u rows in Users with sLoginName="%s"'
% (self._oDb.getRowCount(), sLoginName));
return None;
return UserAccountData().initFromDbRow(self._oDb.fetchOne());
def cachedLookup(self, uid):
"""
Looks up the current UserAccountData object for uid via an object cache.
Returns a shared UserAccountData object. None if not found.
Raises exception on DB error.
"""
if self.dCache is None:
self.dCache = self._oDb.getCache('UserAccount');
oUser = self.dCache.get(uid, None);
if oUser is None:
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE uid = %s\n'
' AND tsExpire = \'infinity\'::TIMESTAMP\n'
, (uid, ));
if self._oDb.getRowCount() == 0:
# Maybe it was deleted, try get the last entry.
self._oDb.execute('SELECT *\n'
'FROM Users\n'
'WHERE uid = %s\n'
'ORDER BY tsExpire DESC\n'
'LIMIT 1\n'
, (uid, ));
elif self._oDb.getRowCount() > 1:
raise self._oDb.integrityException('%s infinity rows for %s' % (self._oDb.getRowCount(), uid));
if self._oDb.getRowCount() == 1:
oUser = UserAccountData().initFromDbRow(self._oDb.fetchOne());
self.dCache[uid] = oUser;
return oUser;
def resolveChangeLogAuthors(self, aoEntries):
"""
Given an array of ChangeLogEntry instances, set sAuthor to whatever
uidAuthor resolves to.
Returns aoEntries.
Raises exception on DB error.
"""
for oEntry in aoEntries:
oUser = self.cachedLookup(oEntry.uidAuthor)
if oUser is not None:
oEntry.sAuthor = oUser.sUsername;
return aoEntries;
#
# Unit testing.
#
# pylint: disable=C0111
class UserAccountDataTestCase(ModelDataBaseTestCase):
def setUp(self):
self.aoSamples = [UserAccountData(),];
if __name__ == '__main__':
unittest.main();
# not reached.
| 37.187713 | 127 | 0.55112 | 1,072 | 10,896 | 5.518657 | 0.282649 | 0.035497 | 0.024341 | 0.018932 | 0.255747 | 0.189317 | 0.137255 | 0.127113 | 0.102772 | 0.092123 | 0 | 0.008262 | 0.355727 | 10,896 | 292 | 128 | 37.315068 | 0.834473 | 0.122797 | 0 | 0.21118 | 0 | 0 | 0.140929 | 0.024927 | 0 | 0 | 0 | 0 | 0 | 1 | 0.093168 | false | 0 | 0.018634 | 0 | 0.291925 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
635c4ec1c60bd1d815137a50f68717285f1fe57d | 11,129 | py | Python | Bip39Generator.py | crc32/AlphaSeed | 10089f7324cde8b52c9bc02c7c85421f82888d4f | [
"MIT"
] | 3 | 2022-01-04T02:21:37.000Z | 2022-01-22T23:49:06.000Z | Bip39Generator.py | crc32/AlphaSeed | 10089f7324cde8b52c9bc02c7c85421f82888d4f | [
"MIT"
] | null | null | null | Bip39Generator.py | crc32/AlphaSeed | 10089f7324cde8b52c9bc02c7c85421f82888d4f | [
"MIT"
] | 1 | 2022-01-09T16:50:42.000Z | 2022-01-09T16:50:42.000Z | #!/usr/bin/env python
# entropy generating geigercounter
# Modified from https://apollo.open-resource.org/mission:log:2014:06:13:generating-entropy-from-radioactive-decay-with-pigi
import geiger
import time
import datetime
import textwrap
import binascii # for conversion between Hexa and bytes
import qrcode
import io, os
from bip_utils import Bip39MnemonicGenerator, Bip39SeedGenerator, Bip44Coins, Bip44, Bip84, Bip84Coins, Bip32Secp256k1
from rich.console import Console, Group
from rich.table import Column, Table
from rich.progress import Progress
from rich.panel import Panel
from rich.prompt import Prompt
from rich import print
class Bip39Generator(geiger.GeigerCounter):
def __init__(self):
#setup vars for randomness production
self.toggle = False
self.t0 = self.t1 = self.t2 = datetime.datetime.now()
self.bitstring = ""
self.bip39_bits = ""
self.bip39_string = ""
#self.bip39_entropy = ""
self.bits = 256
self.bip39_hex = ""
self.mnemonic = ""
self.xpub = ""
self.zpub = ""
self.keyFingerPrint = ""
self.seed_timeout = 5 * 60
self.qr_timeout = 60
self.console = Console(record = True)
self.make_file = False
#call __init__ of superclass
geiger.GeigerCounter.__init__(self)
def old_tick (self,pin=0):
# This works like this:
# time: |------------|-------------|-----------|-----------|
# tick 0: t0
# tick 1: t0 t1
# tick 2: t2 t1 t0
# d0 d1
# tick 3: t2 t0 t1
# tick 4: t2 t1 t0
# dO d1
self.tick_counter += 1
if (self.tick_counter % 2) == 0:
self.t2 = self.t0
self.t0 = datetime.datetime.now()
d0 = self.t1 - self.t2
d1 = self.t0 - self.t1
if d0 > d1:
self.bitstring += "1" if self.toggle else "0"
elif d0 < d1:
self.bitstring += "0" if self.toggle else "1"
else: #d0 = d1
#print("Collision")
1+1
self.toggle = not self.toggle
else:
self.t1 = datetime.datetime.now()
def tick (self,pin=0):
# New method works like this:
# time: |------------|-------------|-----------|-----------|
# tick 0: t0
# tick 1: t0 t1
# tick 2: t0 t1 t2
# tick 3: t0 t1 t2 t3
# d1 d2
# tick 4: t0
#
self.tick_counter += 1
if (self.tick_counter % 4) == 0:
d0 = self.t1 - self.t0
d1 = self.t3 - self.t2
if d0 > d1:
self.bitstring += "1" if self.toggle else "0"
elif d0 < d1:
self.bitstring += "0" if self.toggle else "1"
else: #d0 = d1
#print("Collision")
1+1
self.toggle = not self.toggle
self.t0 = datetime.datetime.now()
elif (self.tick_counter % 4) == 1:
self.t1 = datetime.datetime.now()
elif (self.tick_counter % 4) == 2:
self.t2 = datetime.datetime.now()
elif (self.tick_counter % 4) == 3:
self.t3 = datetime.datetime.now()
def generate_bip39(self,max_entropy):
collected_entropy = 0
#bar = IncrementalBar('Entropy', max=max_entropy)
with Progress() as progress:
task = progress.add_task("[green]Gathering Entropy...", total=max_entropy)
while len(self.bip39_bits) <= max_entropy:
if len(self.bitstring)>=8:
self.bip39_bits += self.bitstring[:8]
self.bitstring = self.bitstring[8:]
#bar.next(n=8)
progress.update(task, advance=8)
else:
continue
time.sleep(0.01)
self.split_and_xor(max_entropy)
self.calculate_keys()
self.display_results()
def split_and_xor(self,max_entropy):
# If you collect enough extra entropy, we'll fold the extra over and XOR to generate
# the 256 bits we need
#
num_split = max_entropy / self.bits
working_bits = textwrap.wrap(self.bip39_bits, self.bits)
# Discard any elements less than self.bytes (256)
for x in working_bits:
if len(x) < self.bits: continue
if len(self.bip39_string) == 0:
self.bip39_string = x
continue
self.bip39_string = ''.join('0' if i == j else '1' for i, j in zip(self.bip39_string,x))
temp_bip39 = [self.bip39_string[i:i+8] for i in range(0, len(self.bip39_string), 8)]
for x in temp_bip39:
self.bip39_hex = self.bip39_hex + str(hex(int(x,2)))[2:].zfill(2)
def calculate_keys(self):
# Put the hex into a binary format for the mnemonic generation
temp_bin = binascii.unhexlify(self.bip39_hex)
# build mnemonic
self.mnemonic = Bip39MnemonicGenerator().FromEntropy(temp_bin)
# get the seed bytes to build the xpub and zpub
seed_bytes = Bip39SeedGenerator(self.mnemonic).Generate()
# Create bip32 and bip84 compliant base master keys
bip32_ctx = Bip32Secp256k1.FromSeed(seed_bytes)
bip84_mst_ctx = Bip84.FromSeed(seed_bytes, Bip84Coins.BITCOIN)
# Store the bip32 Key Fingerprint (needed by Sparrow)
self.keyFingerPrint = bip32_ctx.FingerPrint()
# select the correct bip84 branch key (m/84/0/0) for both bip84 and bip32
bip84_acc_ctx = bip84_mst_ctx.Purpose().Coin().Account(0)
bip32_ctx = bip32_ctx.DerivePath("84'/0'/0'")
# Store the xpub and zpub
self.xpub = bip32_ctx.PublicKey().ToExtended()
self.zpub = bip84_acc_ctx.PublicKey().ToExtended()
def display_results(self):
print(self.mnemonic)
phrase_array = self.mnemonic.ToStr().split(" ")
print("\n")
phrase_table = Table(show_header=False, title="Seed Phrase", show_lines=True)
phrase_table.add_column("1")
phrase_table.add_column("2")
phrase_table.add_column("3")
phrase_table.add_column("4")
phrase_table.add_column("5")
phrase_table.add_row(
"01. " + phrase_array[0],
"02. " + phrase_array[1],
"03. " + phrase_array[2],
"04. " + phrase_array[3],
"05. " + phrase_array[4],
)
phrase_table.add_row(
"06. " + phrase_array[5],
"07. " + phrase_array[6],
"08. " + phrase_array[7],
"09. " + phrase_array[8],
"10. " + phrase_array[9],
)
phrase_table.add_row(
"11. " + phrase_array[10],
"12. " + phrase_array[11],
"13. " + phrase_array[12],
"14. " + phrase_array[13],
"15. " + phrase_array[14],
)
phrase_table.add_row(
"16. " + phrase_array[15],
"17. " + phrase_array[16],
"18. " + phrase_array[17],
"19. " + phrase_array[18],
"20. " + phrase_array[19],
)
phrase_table.add_row(
"21. " + phrase_array[20],
"22. " + phrase_array[21],
"23. " + phrase_array[22],
"24. " + phrase_array[23],
)
details_table = Table(show_header=False, title="Public Keys", show_lines=True)
details_table.add_column("1",overflow="fold")
details_table.add_row(
"Key Fingerprint: " + str(self.keyFingerPrint),
)
details_table.add_row(
"xpub: " + self.xpub,
)
details_table.add_row(
"zpub: " + self.zpub,
)
os.system("clear")
self.console.print(phrase_table)
self.console.print(details_table)
self.ProgressBar("Mnemonic & Pub Keys (Ctrl-C to end early): ", self.seed_timeout)
# Build QR code of the Fingerprint & public keys.
# For security, we do NOT include the mneumonic in the QR Code.
qr = qrcode.QRCode(box_size=10, border=0, error_correction=qrcode.constants.ERROR_CORRECT_L)
qr.add_data(self.xpub, optimize=50)
xpub_qr = io.StringIO()
qr.print_ascii(out=xpub_qr)
xpub_qr.seek(0)
qr.clear()
qr.add_data(self.zpub, optimize=50)
zpub_qr = io.StringIO()
qr.print_ascii(out=zpub_qr)
zpub_qr.seek(0)
#layout["xpub"].update(
xpub_table = Table(show_header=False, title="xPub Key", show_lines=True)
xpub_table.add_column("1",overflow="fold")
xpub_table.add_row(
xpub_qr.read(),
)
zpub_table = Table(show_header=False, title="zPub Key", show_lines=True)
zpub_table.add_column("1",overflow="fold")
zpub_table.add_row(
zpub_qr.read(),
)
self.console.print(xpub_table)
self.ProgressBar("xpub (Ctrl-C to end early): ", self.qr_timeout)
self.console.print(zpub_table)
self.ProgressBar("zpub (Ctrl-C to end early): ", self.qr_timeout)
if self.make_file: self.console.save_text("Last_Seed.txt")
def ProgressBar(self, message, to):
with Progress(transient=True) as progress:
task = progress.add_task(message, total=to)
i = 0
try:
while(i < to):
progress.update(task, advance=1)
time.sleep(1)
i += 1
except KeyboardInterrupt:
pass
os.system("clear")
def GenerateFile(self, generate="N"):
if generate == "Y" or generate == "y":
self.make_file = True
self.qr_timeout = 30
self.seed_timeout = 30
else:
self.make_file = False
if __name__ == "__main__":
keygen = Bip39Generator()
print(Panel("Entropy needs to be collected in blocks of 256.\n" +
"After collection, the values will be displayed on screen,\n" +
"and a file will be generated (Last_Seed.txt) if you request it.\n" +
"Saving as a file will reduce the on-screen display times to [green]30 seconds[white].\n" +
"[red]IF YOU ALREADY HAVE A 'Last_Seed.txt' FILE, THIS WILL BE OVERWRITTEN IF YOU ANSWER YES![white]",
title="Geiger Entropy Collection"))
max_entropy = int(Prompt.ask("How much oversampling do you want (1x, 2x, &c.)")) * 256
print(keygen.isSimulation())
keygen.GenerateFile(Prompt.ask("Generate file (y/N)", default="N"))
keygen.generate_bip39(max_entropy)
| 35.218354 | 123 | 0.538952 | 1,332 | 11,129 | 4.354354 | 0.243994 | 0.047414 | 0.024138 | 0.017241 | 0.17 | 0.153966 | 0.106034 | 0.096724 | 0.055517 | 0.055517 | 0 | 0.051405 | 0.344505 | 11,129 | 315 | 124 | 35.330159 | 0.74366 | 0.152574 | 0 | 0.168182 | 0 | 0.009091 | 0.089495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040909 | false | 0.004545 | 0.063636 | 0 | 0.109091 | 0.054545 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
635d54351361f86080532efa30b9a8d9dcf38427 | 3,908 | py | Python | crawto/classification_visualization.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 1 | 2020-04-03T12:43:27.000Z | 2020-04-03T12:43:27.000Z | crawto/classification_visualization.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 21 | 2020-02-14T04:29:03.000Z | 2020-07-14T02:19:37.000Z | crawto/classification_visualization.py | crawftv/CRAwTO | 8c6fdb93ed963cbddfe967b041e8beb578d1e94d | [
"BSD-3-Clause"
] | 1 | 2019-10-25T01:06:58.000Z | 2019-10-25T01:06:58.000Z | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
import sklearn
from sklearn import metrics
from sklearn.metrics import (
auc,
average_precision_score,
classification_report,
confusion_matrix,
precision_recall_curve,
roc_auc_score,
roc_curve,
)
from sklearn.utils.multiclass import unique_labels
"""
IMPORTANT must upgrade Seaborn to use in google Colab.
Classification_report is just the sklearn classification report
Classification_report will show up in the shell and notebooks
Results from confusion_viz will appear in notebooks only
"""
def classification_visualization(y_true, y_pred, y_pred_prob,identifier):
"""
Prints the results of the functions. That's it
"""
clr = classification_report(y_true, y_pred,output_dict=True)
y_true = np.array(y_true).ravel()
labels = unique_labels(y_true, y_pred)
matrix = confusion_matrix(y_true, y_pred)
#fig, ax = plt.subplots()
fig = plt.figure(figsize=(16,5))
fig.suptitle(f"{identifier}",x=0,y=1,fontsize=16)
ax1 = fig.add_subplot(1,2,1)
ax1.set_title(f"confusion matrix".title(),loc="left")
with sns.plotting_context(font_scale=2):
sns.heatmap(
matrix,
annot=True,
fmt=",",
linewidths=1,
linecolor="grey",
square=False,
xticklabels=["Predicted\n" + str(i) for i in labels],
yticklabels=["Actual\n" + str(i) for i in labels],
robust=True,
cmap=sns.color_palette("coolwarm"),
)
plt.yticks(rotation=0)
plt.xticks(rotation=0)
ax2 = fig.add_subplot(1,2,2)
#ax2.set_title(f"Model: {identifier} decision matrix".title(),loc="center")
ddf = pd.DataFrame(clr).T.drop(columns = ["support"],axis=1)
ax2.axis('tight')
ax2.axis('off')
_table = ax2.table(cellText=np.round(ddf.values,2),loc="right",colLabels=ddf.columns,rowLabels=ddf.index)
_table.auto_set_font_size(False)
_table.set_fontsize(16)
_table.scale(1,5)
fig.tight_layout()
plt.show();
def confusion_viz(y_true, y_pred):
"""
Uses labels as given
Pass y_true,y_pred, same as any sklearn classification problem
Inspired from code from a Ryan Herr Lambda School Lecture
"""
y_true = np.array(y_true).ravel()
labels = unique_labels(y_true, y_pred)
matrix = confusion_matrix(y_true, y_pred)
sns.set(font_scale=2)
return sns.heatmap(
matrix,
annot=True,
fmt=",",
linewidths=1,
linecolor="grey",
square=False,
xticklabels=["Predicted\n" + str(i) for i in labels],
yticklabels=["Actual\n" + str(i) for i in labels],
robust=True,
cmap=sns.color_palette("coolwarm"),
)
def plt_prc(y_true, y_pred):
aps = round(average_precision_score(y_true, y_pred) * 100, 2)
a, b, c = precision_recall_curve(y_true, y_pred)
plt.figure()
lw = 2
plt.plot(a, b, color="darkorange", lw=lw, label="Precision Recall curve")
plt.plot([0, 1], [1, 0], color="navy", lw=lw, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title(f"Precision Recall Curve\nAverage Precision Score = {aps}%")
plt.legend(loc="lower right")
plt.show()
def plt_roc(y_true, y_pred):
plt.figure()
lw = 2
fpr, tpr, threshold = roc_curve(y_true, y_pred)
ras = round(roc_auc_score(y_true, y_pred) * 100, 2)
plt.plot(fpr, tpr, color="darkorange", lw=lw, label="ROC curve ")
plt.plot([0, 1], [0, 1], color="navy", lw=lw, linestyle="--")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title(f"ROC AUC Curve\nROC AUC Score = {ras}%")
plt.legend(loc="lower right")
plt.show()
| 30.771654 | 109 | 0.64739 | 577 | 3,908 | 4.242634 | 0.317158 | 0.036765 | 0.034314 | 0.05719 | 0.400327 | 0.348856 | 0.348856 | 0.309641 | 0.291667 | 0.291667 | 0 | 0.022222 | 0.216991 | 3,908 | 126 | 110 | 31.015873 | 0.777778 | 0.073439 | 0 | 0.442105 | 0 | 0 | 0.110612 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042105 | false | 0 | 0.084211 | 0 | 0.136842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6361be6747f74fe445a4fb5c773189efbc6d790b | 6,673 | py | Python | tests/test_subscription_service.py | rukzuk/paymill-python | 637cdb2a56e75e2c062611907982d786fb258bb5 | [
"MIT"
] | null | null | null | tests/test_subscription_service.py | rukzuk/paymill-python | 637cdb2a56e75e2c062611907982d786fb258bb5 | [
"MIT"
] | null | null | null | tests/test_subscription_service.py | rukzuk/paymill-python | 637cdb2a56e75e2c062611907982d786fb258bb5 | [
"MIT"
] | null | null | null | __author__ = 'yalnazov'
try:
import unittest2 as unittest
except ImportError:
import unittest
from paymill.paymill_context import PaymillContext
from paymill.models.subscription import Subscription
import test_config
class TestSubscriptionService(unittest.TestCase):
currency = 'EUR'
interval = '2 DAY'
amount = 4200
sub_dict = {u'livemode': False,
u'updated_at': 1409647372,
u'currency': u'EUR',
u'id': u'sub_edacd9959b10c8f6eb5d',
u'is_canceled': False,
u'next_capture_at': 1409820171,
u'temp_amount': None,
u'status': u'active',
u'trial_start': None,
u'offer': {u'subscription_count': {u'active': u'1', u'inactive': 0},
u'name': u'', u'created_at': 1409647371,
u'interval': u'2 DAY',
u'updated_at': 1409647371,
u'app_id': None,
u'currency': u'EUR',
u'amount': u'4200',
u'trial_period_days': None,
u'id': u'offer_bb33ea77b942f570997b'},
u'app_id': None,
u'trial_end': None,
u'payment': {u'app_id': None,
u'is_recurring': True,
u'expire_month': u'12',
u'country': None,
u'created_at': 1409647371,
u'updated_at': 1409647371,
u'card_holder': u'',
u'card_type': u'visa',
u'last4': u'1111',
u'is_usable_for_preauthorization': True,
u'client': u'client_4b6fc054e35ba1548959',
u'expire_year': u'2015',
u'type': u'creditcard',
u'id': u'pay_3dca8cabea90e752ed7be662'},
u'is_deleted': False,
u'name': u'',
u'end_of_period': None,
u'canceled_at': None,
u'created_at': 1409647371,
u'interval': u'2 DAY',
u'amount': 4200,
u'client': {u'description': None,
u'payment': [u'pay_3dca8cabea90e752ed7be662'],
u'created_at': 1409647371,
u'updated_at': 1409647371,
u'app_id': None,
u'email': None,
u'id': u'client_4b6fc054e35ba1548959',
u'subscription': None},
u'period_of_validity': None}
def setUp(self):
self.p = PaymillContext(test_config.api_key)
self.payment = self.p.get_payment_service().create(test_config.magic_token)
self.subscription = self.p.get_subscription_service().create_with_amount(self.payment.id,
TestSubscriptionService.amount,
TestSubscriptionService.currency,
TestSubscriptionService.interval)
def tearDown(self):
del self.p, self.payment, self.subscription
def test_serialize_subscription_to_dict(self):
self.assertIsInstance(TestSubscriptionService.sub_dict, dict)
def test_serialize_subscription_to_Subscription(self):
s = Subscription(TestSubscriptionService.sub_dict)
self.assertIsInstance(s, Subscription)
self.assertEqual(1409647372, s.updated_at)
def test_create_subscription_sets_client_id(self):
self.assertIsNotNone(self.subscription.client.id)
def test_subscription_create_sets_payment_id(self):
self.assertEqual(self.payment.id, self.subscription.payment.id)
def test_subscription_create_sets_amount(self):
self.assertEqual(TestSubscriptionService.amount, self.subscription.amount)
def test_subscription_create_sets_currency(self):
self.assertEqual(TestSubscriptionService.currency, self.subscription.currency)
def test_subscription_create_sets_interval(self):
self.assertEqual(TestSubscriptionService.interval, self.subscription.interval)
def test_subscription_update_default_sets_interval(self):
p = PaymillContext(test_config.api_key)
payment = p.get_payment_service().create(test_config.magic_token)
subscription = p.get_subscription_service().create_with_amount(payment.id,
TestSubscriptionService.amount,
TestSubscriptionService.currency,
TestSubscriptionService.interval)
subscription.interval = '1 MONTH,FRIDAY'
s = p.get_subscription_service().update(subscription)
self.assertEqual('1 MONTH,FRIDAY', s.interval)
def test_subscription_update_with_amount_sets_amount(self):
p = PaymillContext(test_config.api_key)
payment = p.get_payment_service().create(test_config.magic_token)
subscription = p.get_subscription_service().create_with_amount(payment.id,
TestSubscriptionService.amount,
TestSubscriptionService.currency,
TestSubscriptionService.interval)
subscription.amount = 5600
s = p.get_subscription_service().update_with_amount(subscription, amount_change_type=1)
self.assertEqual(5600, s.amount)
def test_subscription_cancel(self):
p = PaymillContext(test_config.api_key)
payment = p.get_payment_service().create(test_config.magic_token)
subscription = p.get_subscription_service().create_with_amount(payment.id,
TestSubscriptionService.amount,
TestSubscriptionService.currency,
TestSubscriptionService.interval)
s = p.get_subscription_service().cancel(subscription)
self.assertIsInstance(s, Subscription)
| 50.172932 | 114 | 0.528848 | 592 | 6,673 | 5.717905 | 0.195946 | 0.020679 | 0.026883 | 0.047563 | 0.452585 | 0.376662 | 0.338257 | 0.316396 | 0.286854 | 0.237814 | 0 | 0.052037 | 0.38948 | 6,673 | 132 | 115 | 50.55303 | 0.778841 | 0 | 0 | 0.310345 | 0 | 0 | 0.112543 | 0.028473 | 0 | 0 | 0 | 0 | 0.094828 | 1 | 0.103448 | false | 0 | 0.051724 | 0 | 0.198276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6363c6bbeaa5d8ba27ac83c8c4a9925fa4151cdf | 1,824 | py | Python | examples/animate/recamanSeq.py | beidongjiedeguang/manim-express | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 12 | 2021-06-14T07:28:29.000Z | 2022-02-25T02:49:49.000Z | examples/animate/recamanSeq.py | beidongjiedeguang/manim-kunyuan | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 1 | 2022-02-01T12:30:14.000Z | 2022-02-01T12:30:14.000Z | examples/animate/recamanSeq.py | beidongjiedeguang/manim-express | e9c89b74da3692db3ea9b568727e78d5cbcef503 | [
"MIT"
] | 2 | 2021-05-13T13:24:15.000Z | 2021-05-18T02:56:22.000Z | from examples.example_imports import *
VIOLET = "#EE82EE"
INDIGO = "#4B0082"
VIBGYOR = [VIOLET, INDIGO, BLUE, GREEN, YELLOW, ORANGE, RED]
class RecamanSequence(EagerModeScene):
CONFIG = {
"n": 100, # number of iterations
}
def clip1(self):
self.count = 0
visited = [0] + [None] * self.n
arcs = VGroup()
self.index = 0 # acts as a pointer
self.highest = max(self.index, 3) # use to define the frame width
for i in range(self.n):
index = self.index - self.count
if index < 0 or index in visited:
index = self.index + self.count
# defining the start and end of the arc
start = np.array([visited[self.count], 0, 0])
end = np.array([index, 0, 0])
# defining the angle of arc (i.e., upwards arc or downwards arc)
angle = -PI if self.count % 2 == 0 else PI
if index < visited[self.count]:
angle *= -1
arc = ArcBetweenPoints(start, end, angle)
arcs.add(arc)
# updating variables
self.index = index
self.count += 1
visited[self.count] = self.index
VIBGYOR.reverse()
arcs.set_color_by_gradient(*VIBGYOR)
# rendering
for i, arc in enumerate(arcs):
self.highest = max(self.highest, max(visited[:i + 2]))
arc.set_stroke(opacity=1 - (np.sqrt(i) / self.n))
self.play(
ShowCreation(arc),
self.camera.frame.set_width, self.highest,
self.camera.frame.move_to, self.highest / 2 * RIGHT,
rate_func=linear,
run_time=0.25
)
if __name__ == "__main__":
import os
RecamanSequence().render()
| 27.223881 | 76 | 0.533443 | 220 | 1,824 | 4.345455 | 0.440909 | 0.075314 | 0.043933 | 0.037657 | 0.048117 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025597 | 0.357456 | 1,824 | 66 | 77 | 27.636364 | 0.790102 | 0.108553 | 0 | 0 | 0 | 0 | 0.014215 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.046512 | 0 | 0.116279 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6365ef6932b09a9ca176a69f9f72ff21e3cd604b | 5,182 | py | Python | validator/txnserver/web_pages/prevalidation_page.py | gabykyei/GC_BlockChain_T_Rec | b72cb483064852d0a60286943ff55233462fea08 | [
"Apache-2.0"
] | 1 | 2019-03-18T13:31:11.000Z | 2019-03-18T13:31:11.000Z | validator/txnserver/web_pages/prevalidation_page.py | gabykyei/GC_BlockChain_T_Rec | b72cb483064852d0a60286943ff55233462fea08 | [
"Apache-2.0"
] | null | null | null | validator/txnserver/web_pages/prevalidation_page.py | gabykyei/GC_BlockChain_T_Rec | b72cb483064852d0a60286943ff55233462fea08 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import copy
import logging
import traceback
from zope.interface import Interface, Attribute, implements
from twisted.python.components import registerAdapter
from twisted.web import http
from twisted.web.error import Error
from twisted.web.server import Session
from sawtooth.exceptions import InvalidTransactionError
from txnserver.web_pages.base_page import BasePage
LOGGER = logging.getLogger(__name__)
# pylint: disable=inherit-non-class
class ITempTransactionTypeStore(Interface):
count = Attribute("An int value")
my_store = Attribute("A store value")
class TempTransactionTypeStoreInstance(object):
implements(ITempTransactionTypeStore)
def __init__(self, session):
self.count = 0
self.my_store = None
registerAdapter(TempTransactionTypeStoreInstance, Session,
ITempTransactionTypeStore)
class PrevalidationPage(BasePage):
isLeaf = True
def __init__(self, validator, page_name=None):
BasePage.__init__(self, validator, page_name)
def render_get(self, request, components, msg):
session = request.getSession()
if request.method == 'HEAD':
if not session:
raise \
Error(http.BAD_REQUEST, 'Session has not been started')
session.expire()
LOGGER.info('Session: %s has ended.', session.uid)
return 'Session: {} has ended.'.format(session.uid)
temp_store_session = ITempTransactionTypeStore(session)
return temp_store_session.my_store.dump(True)
def render_post(self, request, components, msg):
"""
Do server-side session prevalidation.
"""
session = request.getSession()
if not session:
raise \
Error(http.BAD_REQUEST, 'Session has not been started')
data = request.content.getvalue()
msg = self._get_message(request)
# if it is an error response message, returns it immediately
if isinstance(msg, dict) and 'status' in msg:
return msg
mymsg = copy.deepcopy(msg)
if hasattr(mymsg, 'Transaction') and mymsg.Transaction is not None:
mytxn = mymsg.Transaction
LOGGER.info('starting server-side prevalidation '
'for txn id: %s type: %s',
mytxn.Identifier,
mytxn.TransactionTypeName)
transaction_type = mytxn.TransactionTypeName
temp_store_session = ITempTransactionTypeStore(session)
temp_store_session.count += 1
LOGGER.debug('visit %d times in the session.uid: %s',
temp_store_session.count, session.uid)
if not temp_store_session.my_store:
temp_store_map = self._get_store_map()
if transaction_type not in temp_store_map.transaction_store:
LOGGER.info('transaction type %s not in global store map',
transaction_type)
raise Error(http.BAD_REQUEST,
'unable to prevalidate enclosed '
'transaction {0}'.format(data))
tstore = temp_store_map.get_transaction_store(transaction_type)
temp_store_session.my_store = tstore.clone_store()
try:
if not mytxn.is_valid(temp_store_session.my_store):
raise InvalidTransactionError('invalid transaction')
except InvalidTransactionError as e:
LOGGER.info('submitted transaction fails transaction '
'family validation check: %s; %s',
request.path, mymsg.dump())
raise Error(http.BAD_REQUEST,
'InvalidTransactionError: failed transaction '
'family validation check: {}'.format(str(e)))
except:
LOGGER.info('submitted transaction is '
'not valid %s; %s; %s',
request.path, mymsg.dump(),
traceback.format_exc(20))
raise Error(http.BAD_REQUEST,
'enclosed transaction is not '
'valid {}'.format(data))
LOGGER.info('transaction %s is valid',
msg.Transaction.Identifier)
mytxn.apply(temp_store_session.my_store)
return msg
| 37.014286 | 80 | 0.604979 | 540 | 5,182 | 5.67037 | 0.344444 | 0.035271 | 0.047028 | 0.02776 | 0.162312 | 0.05356 | 0.03919 | 0.03919 | 0.03919 | 0.03919 | 0 | 0.003615 | 0.306059 | 5,182 | 139 | 81 | 37.280576 | 0.847887 | 0.147626 | 0 | 0.168539 | 0 | 0 | 0.135907 | 0.005482 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044944 | false | 0 | 0.11236 | 0 | 0.269663 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6368cb3cf505c5009517da98c8fe412698d9997a | 2,052 | py | Python | additional_models/model2.py | srikanth311/CarND-Behavioral-Cloning-P3 | 2d6f7f73f646b5d83d283d33a84770ea6fae11d0 | [
"MIT"
] | null | null | null | additional_models/model2.py | srikanth311/CarND-Behavioral-Cloning-P3 | 2d6f7f73f646b5d83d283d33a84770ea6fae11d0 | [
"MIT"
] | null | null | null | additional_models/model2.py | srikanth311/CarND-Behavioral-Cloning-P3 | 2d6f7f73f646b5d83d283d33a84770ea6fae11d0 | [
"MIT"
] | null | null | null | # https://stackoverflow.com/questions/34518656/how-to-interpret-loss-and-accuracy-for-a-machine-learning-model
from additional_models.data2 import read_csv_data, get_samples
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import Adam
import sys
train_data = read_csv_data('datasets/srikanth/driving_log.csv')
valid_data = read_csv_data('datasets/data_from_udacity_site/driving_log.csv')
train_images_path = 'datasets/srikanth/IMG/'
test_images_path = 'datasets/data_from_udacity_site/IMG/'
INIT='glorot_uniform'
model = Sequential()
model.add(Lambda (lambda X: X/255-0.5, input_shape=(32, 128, 3)))
model.add(Conv2D(16, (3, 3), activation='relu', input_shape=(32, 128, 3) ))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.25))
model.add(Dense(20, activation='relu'))
model.add(Dense(1))
model.summary()
model.compile(optimizer=Adam(lr=1e-04), loss='mean_squared_error')
batch_size = 128
# https://stackoverflow.com/questions/34518656/how-to-interpret-loss-and-accuracy-for-a-machine-learning-model
model_fit_gen = model.fit_generator(
get_samples(train_data, train_images_path, batch_size),
steps_per_epoch=train_data.shape[0] // batch_size,
epochs = 18,
validation_data=get_samples(valid_data, test_images_path, batch_size),
validation_steps=valid_data.shape[0] // batch_size
)
model.save('model.h5')
print("Model saved.")
sys.exit(0) | 34.779661 | 110 | 0.694444 | 286 | 2,052 | 4.800699 | 0.356643 | 0.081573 | 0.069192 | 0.080117 | 0.413693 | 0.276766 | 0.276766 | 0.247633 | 0.247633 | 0.247633 | 0 | 0.049296 | 0.169591 | 2,052 | 59 | 111 | 34.779661 | 0.756455 | 0.10575 | 0 | 0.075 | 0 | 0 | 0.116749 | 0.075286 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.175 | 0 | 0.175 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
636b12312c9285edfa8c85a571b4320d339a5502 | 4,254 | py | Python | networks/recognition/models.py | FaceID-Dongguk-Univ/FaceID | 61147b11e70b1c172cbbff618d10f7e518c2c801 | [
"MIT"
] | null | null | null | networks/recognition/models.py | FaceID-Dongguk-Univ/FaceID | 61147b11e70b1c172cbbff618d10f7e518c2c801 | [
"MIT"
] | null | null | null | networks/recognition/models.py | FaceID-Dongguk-Univ/FaceID | 61147b11e70b1c172cbbff618d10f7e518c2c801 | [
"MIT"
] | 2 | 2021-09-30T14:02:04.000Z | 2021-10-08T07:44:20.000Z | """
MIT License
Copyright (c) 2019 Kuan-Yu Huang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Dense,
Dropout,
Flatten,
Input,
)
from tensorflow.keras.applications import (
MobileNetV2,
ResNet50
)
from .layers import (
BatchNormalization,
ArcMarginPenaltyLogists
)
def _regularizer(weights_decay=5e-4):
return tf.keras.regularizers.l2(weights_decay)
def Backbone(backbone_type='ResNet50', use_pretrain=True):
"""Backbone Model"""
weights = None
if use_pretrain:
weights = 'imagenet'
def backbone(x_in):
if backbone_type == 'ResNet50':
return ResNet50(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
elif backbone_type == 'MobileNetV2':
return MobileNetV2(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
else:
raise TypeError('backbone_type error!')
return backbone
def OutputLayer(embd_shape, w_decay=5e-4, name='OutputLayer'):
"""Output Later"""
def output_layer(x_in):
x = inputs = Input(x_in.shape[1:])
x = BatchNormalization()(x)
x = Dropout(rate=0.5)(x)
x = Flatten()(x)
x = Dense(embd_shape, kernel_regularizer=_regularizer(w_decay))(x)
x = BatchNormalization()(x)
return Model(inputs, x, name=name)(x_in)
return output_layer
def ArcHead(num_classes, margin=0.5, logist_scale=64, name='ArcHead'):
"""Arc Head"""
def arc_head(x_in, y_in):
x = inputs1 = Input(x_in.shape[1:])
y = Input(y_in.shape[1:])
x = ArcMarginPenaltyLogists(num_classes=num_classes,
margin=margin,
logist_scale=logist_scale)(x, y)
return Model((inputs1, y), x, name=name)((x_in, y_in))
return arc_head
def NormHead(num_classes, w_decay=5e-4, name='NormHead'):
"""Norm Head"""
def norm_head(x_in):
x = inputs = Input(x_in.shape[1:])
x = Dense(num_classes, kernel_regularizer=_regularizer(w_decay))(x)
return Model(inputs, x, name=name)(x_in)
return norm_head
def ArcFaceModel(size=None, channels=3, num_classes=None, name='arcface_model',
margin=0.5, logist_scale=64, embd_shape=512,
head_type='ArcHead', backbone_type='ResNet50',
w_decay=5e-4, use_pretrain=True, training=False):
"""Arc Face Model"""
x = inputs = Input([size, size, channels], name='input_image')
x = Backbone(backbone_type=backbone_type, use_pretrain=use_pretrain)(x)
embds = OutputLayer(embd_shape, w_decay=w_decay)(x)
if training:
assert num_classes is not None
labels = Input([], name='label')
if head_type == 'ArcHead':
logits = ArcHead(num_classes=num_classes, margin=margin,
logist_scale=logist_scale)(embds, labels)
else:
logits = NormHead(num_classes=num_classes, w_decay=w_decay)(embds)
return Model((inputs, labels), logits, name=name)
else:
return Model(inputs, embds, name=name)
| 35.45 | 79 | 0.666432 | 572 | 4,254 | 4.816434 | 0.312937 | 0.015245 | 0.017423 | 0.016334 | 0.19637 | 0.160799 | 0.120508 | 0.120508 | 0.120508 | 0.120508 | 0 | 0.014751 | 0.235073 | 4,254 | 119 | 80 | 35.747899 | 0.831899 | 0.265867 | 0 | 0.146667 | 0 | 0 | 0.042718 | 0 | 0 | 0 | 0 | 0 | 0.013333 | 1 | 0.133333 | false | 0 | 0.066667 | 0.013333 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
636c08dd9863e232a0e141c5bf49a4049685e262 | 6,693 | py | Python | magnum/tests/unit/db/test_x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | null | null | null | magnum/tests/unit/db/test_x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | null | null | null | magnum/tests/unit/db/test_x509keypair.py | mjbrewer/testIndex | 420dc071d4240a89b6f266e8d2575cedb39bfea0 | [
"Apache-2.0"
] | 1 | 2020-09-09T14:35:08.000Z | 2020-09-09T14:35:08.000Z | # Copyright 2015 NEC Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating X509KeyPairs via the DB API"""
import six
from magnum.common import context
from magnum.common import exception
from magnum.common import utils as magnum_utils
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils
class DbX509KeyPairTestCase(base.DbTestCase):
def test_create_x509keypair(self):
utils.create_test_x509keypair()
def test_create_x509keypair_nullable_bay_uuid(self):
utils.create_test_x509keypair(bay_uuid=None)
def test_create_x509keypair_already_exists(self):
utils.create_test_x509keypair()
self.assertRaises(exception.X509KeyPairAlreadyExists,
utils.create_test_x509keypair)
def test_get_x509keypair_by_id(self):
x509keypair = utils.create_test_x509keypair()
res = self.dbapi.get_x509keypair_by_id(self.context, x509keypair.id)
self.assertEqual(x509keypair.id, res.id)
self.assertEqual(x509keypair.uuid, res.uuid)
def test_get_x509keypair_by_name(self):
x509keypair = utils.create_test_x509keypair()
res = self.dbapi.get_x509keypair_by_name(self.context,
x509keypair.name)
self.assertEqual(x509keypair.name, res.name)
self.assertEqual(x509keypair.uuid, res.uuid)
def test_get_x509keypair_by_uuid(self):
x509keypair = utils.create_test_x509keypair()
res = self.dbapi.get_x509keypair_by_uuid(self.context,
x509keypair.uuid)
self.assertEqual(x509keypair.id, res.id)
self.assertEqual(x509keypair.uuid, res.uuid)
def test_get_x509keypair_that_does_not_exist(self):
self.assertRaises(exception.X509KeyPairNotFound,
self.dbapi.get_x509keypair_by_id,
self.context, 999)
self.assertRaises(exception.X509KeyPairNotFound,
self.dbapi.get_x509keypair_by_uuid,
self.context,
'12345678-9999-0000-aaaa-123456789012')
def test_get_x509keypair_list(self):
uuids = []
for i in range(1, 6):
x509keypair = utils.create_test_x509keypair(
uuid=magnum_utils.generate_uuid())
uuids.append(six.text_type(x509keypair['uuid']))
res = self.dbapi.get_x509keypair_list(self.context)
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), sorted(res_uuids))
def test_get_x509keypair_list_with_filters(self):
bay1 = utils.get_test_bay(id=1, uuid=magnum_utils.generate_uuid())
bay2 = utils.get_test_bay(id=2, uuid=magnum_utils.generate_uuid())
self.dbapi.create_bay(bay1)
self.dbapi.create_bay(bay2)
x509keypair1 = utils.create_test_x509keypair(
name='x509keypair-one',
uuid=magnum_utils.generate_uuid(),
bay_uuid=bay1['uuid'])
x509keypair2 = utils.create_test_x509keypair(
name='x509keypair-two',
uuid=magnum_utils.generate_uuid(),
bay_uuid=bay2['uuid'])
x509keypair3 = utils.create_test_x509keypair(
name='x509keypair-three',
bay_uuid=bay2['uuid'])
res = self.dbapi.get_x509keypair_list(
self.context, filters={'bay_uuid': bay1['uuid']})
self.assertEqual([x509keypair1.id], [r.id for r in res])
res = self.dbapi.get_x509keypair_list(
self.context, filters={'bay_uuid': bay2['uuid']})
self.assertEqual([x509keypair2.id, x509keypair3.id],
[r.id for r in res])
res = self.dbapi.get_x509keypair_list(
self.context, filters={'name': 'x509keypair-one'})
self.assertEqual([x509keypair1.id], [r.id for r in res])
res = self.dbapi.get_x509keypair_list(
self.context, filters={'name': 'bad-x509keypair'})
self.assertEqual([], [r.id for r in res])
def test_get_x509keypair_list_by_admin_all_tenants(self):
uuids = []
for i in range(1, 6):
x509keypair = utils.create_test_x509keypair(
uuid=magnum_utils.generate_uuid(),
project_id=magnum_utils.generate_uuid(),
user_id=magnum_utils.generate_uuid())
uuids.append(six.text_type(x509keypair['uuid']))
ctx = context.make_admin_context(all_tenants=True)
res = self.dbapi.get_x509keypair_list(ctx)
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), sorted(res_uuids))
def test_get_x509keypair_list_bay_not_exist(self):
utils.create_test_x509keypair()
self.assertEqual(1, len(self.dbapi.get_x509keypair_list(self.context)))
res = self.dbapi.get_x509keypair_list(self.context, filters={
'bay_uuid': magnum_utils.generate_uuid()})
self.assertEqual(0, len(res))
def test_destroy_x509keypair(self):
x509keypair = utils.create_test_x509keypair()
self.assertIsNotNone(self.dbapi.get_x509keypair_by_id(
self.context, x509keypair.id))
self.dbapi.destroy_x509keypair(x509keypair.id)
self.assertRaises(exception.X509KeyPairNotFound,
self.dbapi.get_x509keypair_by_id,
self.context, x509keypair.id)
def test_destroy_x509keypair_by_uuid(self):
x509keypair = utils.create_test_x509keypair()
self.assertIsNotNone(self.dbapi.get_x509keypair_by_uuid(
self.context, x509keypair.uuid))
self.dbapi.destroy_x509keypair(x509keypair.uuid)
self.assertRaises(exception.X509KeyPairNotFound,
self.dbapi.get_x509keypair_by_uuid, self.context,
x509keypair.uuid)
def test_destroy_x509keypair_that_does_not_exist(self):
self.assertRaises(exception.X509KeyPairNotFound,
self.dbapi.destroy_x509keypair,
'12345678-9999-0000-aaaa-123456789012')
| 42.903846 | 79 | 0.659346 | 785 | 6,693 | 5.391083 | 0.185987 | 0.082703 | 0.048204 | 0.092391 | 0.692344 | 0.613422 | 0.516304 | 0.500236 | 0.491257 | 0.475662 | 0 | 0.073647 | 0.249365 | 6,693 | 155 | 80 | 43.180645 | 0.76871 | 0.097415 | 0 | 0.405172 | 0 | 0 | 0.034706 | 0.011956 | 0 | 0 | 0 | 0 | 0.189655 | 1 | 0.12069 | false | 0 | 0.051724 | 0 | 0.181034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
636f486add13f8c9416be0f98507743cabd74049 | 2,769 | py | Python | src/rysia/core/tensorflow/helper.py | vdeuschle/rysia | c8c5adc2c770424b3a328a936f23a80a38c9f0f2 | [
"Apache-2.0"
] | 2 | 2020-08-17T15:46:30.000Z | 2020-08-20T19:20:57.000Z | src/rysia/core/tensorflow/helper.py | vdeuschle/rysia | c8c5adc2c770424b3a328a936f23a80a38c9f0f2 | [
"Apache-2.0"
] | null | null | null | src/rysia/core/tensorflow/helper.py | vdeuschle/rysia | c8c5adc2c770424b3a328a936f23a80a38c9f0f2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Vincent Deuschle. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import tensorflow as tf
#https://stackoverflow.com/questions/49409488/tensorflow-tensor-reshape-and-pad-with-zeros-at-the-end-of-some-rows?rq=1
def stack_sequences(input: tf.Tensor, seq_length: tf.Tensor) -> tf.Tensor:
data_len = tf.shape(input)[0]
out_dim0 = tf.size(seq_length)
out_dim1 = tf.reduce_max(seq_length)
out_dim2 = input.get_shape()[-1]
start_idxs = tf.concat([tf.constant([0]), tf.cumsum(seq_length)], axis=0)[:-1]
pads = tf.fill([out_dim0], out_dim1) - seq_length
reconstruction_metadata = tf.stack([start_idxs, seq_length, pads], axis=1)
reconstruction_data = tf.map_fn(lambda x: tf.concat([tf.range(x[0],x[0]+x[1]),
tf.fill([x[2]], data_len)],
axis=0),
reconstruction_metadata)
output = tf.gather(tf.concat([input, tf.zeros((1, out_dim2))], axis=0),
tf.reshape(reconstruction_data, [out_dim0*out_dim1]))
output = tf.reshape(output, [out_dim0, out_dim1, out_dim2]) - tf.constant(1, dtype=tf.float32)
return output
def unstack_sequences(input: tf.Tensor, seq_length: tf.Tensor) -> tf.Tensor:
input_shape = tf.shape(input)
num_seqs = input_shape[0]
time_steps = input_shape[1]
input_dim = input.get_shape()[-1]
input = tf.reshape(input, [num_seqs*time_steps, input_dim])
start_idx = tf.concat([[0], tf.cumsum(tf.fill([num_seqs], time_steps))], axis=0)[:-1]
idx = tf.stack([start_idx, start_idx + seq_length, seq_length], axis=1)
idx_ranges = tf.map_fn(lambda x: tf.concat([tf.ones([x[-2] - x[-3]]),
tf.zeros(time_steps-x[-1])], axis=0), idx)
idx_ranges = tf.reshape(idx_ranges, [-1])
output = tf.boolean_mask(input, idx_ranges)
return output
def get_optimizer(optimizer: str, learning_rate: float) -> tf.train.Optimizer:
if optimizer == 'sgd':
return tf.train.GradientDescentOptimizer(learning_rate)
elif optimizer == 'adam':
return tf.train.AdamOptimizer(learning_rate)
else:
raise RuntimeError(f'Unknown optimizer {optimizer} in get_train_step') | 48.578947 | 119 | 0.653666 | 400 | 2,769 | 4.3725 | 0.355 | 0.046312 | 0.017153 | 0.024014 | 0.081189 | 0.081189 | 0.081189 | 0.081189 | 0.053745 | 0.053745 | 0 | 0.025795 | 0.215962 | 2,769 | 57 | 120 | 48.578947 | 0.779825 | 0.234742 | 0 | 0.054054 | 0 | 0 | 0.025641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.027027 | 0 | 0.216216 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
637294b6bbc4dcbace3b74bbe577dbb797c776c8 | 1,670 | py | Python | examples/stdicons.py | diegomvh/pyqt | 435756fab9751eab2e30f5f3351d7fc43589533f | [
"MIT"
] | 1 | 2022-03-07T20:20:51.000Z | 2022-03-07T20:20:51.000Z | examples/stdicons.py | diegomvh/pyqt | 435756fab9751eab2e30f5f3351d7fc43589533f | [
"MIT"
] | null | null | null | examples/stdicons.py | diegomvh/pyqt | 435756fab9751eab2e30f5f3351d7fc43589533f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt4 import QtCore, QtGui
def get_std_icon(name):
if not name.startswith('SP_'):
name = 'SP_' + name
standardIconName = getattr(QtGui.QStyle, name, None)
if standardIconName is not None:
return QtGui.QWidget().style().standardIcon( standardIconName )
class ShowStdIcons(QtGui.QWidget):
"""
Dialog showing standard icons
"""
def __init__(self, parent):
QtGui.QWidget.__init__(self, parent)
layout = QtGui.QHBoxLayout()
row_nb = 14
cindex = 0
for child in dir(QtGui.QStyle):
if child.startswith('SP_'):
if cindex == 0:
col_layout = QtGui.QVBoxLayout()
icon_layout = QtGui.QHBoxLayout()
icon = get_std_icon(child)
label = QtGui.QLabel()
label.setPixmap(icon.pixmap(16, 16))
icon_layout.addWidget( label )
icon_layout.addWidget( QtGui.QLineEdit(child.replace('SP_', '')) )
col_layout.addLayout(icon_layout)
cindex = (cindex+1) % row_nb
if cindex == 0:
layout.addLayout(col_layout)
self.setLayout(layout)
self.setWindowTitle('Standard Platform Icons')
self.setWindowIcon(get_std_icon('TitleBarMenuButton'))
def show_std_icons():
"""
Show all standard Icons
"""
app = QtGui.QApplication([])
dialog = ShowStdIcons(None)
print(get_std_icon("cacho"))
dialog.show()
import sys
sys.exit(app.exec_())
if __name__ == "__main__":
show_std_icons() | 31.509434 | 82 | 0.578443 | 178 | 1,670 | 5.191011 | 0.432584 | 0.025974 | 0.04329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010381 | 0.307784 | 1,670 | 53 | 83 | 31.509434 | 0.788927 | 0.057485 | 0 | 0.051282 | 0 | 0 | 0.042774 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.051282 | 0 | 0.179487 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6372c851f000a2f462e3b75cd28ab4a885253034 | 3,616 | py | Python | examples/run_homographs.py | vinnamkim/GEM | cac74b711ef1547e5bc68b85fdf7f7437933b325 | [
"BSD-3-Clause"
] | null | null | null | examples/run_homographs.py | vinnamkim/GEM | cac74b711ef1547e5bc68b85fdf7f7437933b325 | [
"BSD-3-Clause"
] | null | null | null | examples/run_homographs.py | vinnamkim/GEM | cac74b711ef1547e5bc68b85fdf7f7437933b325 | [
"BSD-3-Clause"
] | null | null | null | '''
Run the graph embedding methods on Karate graph and evaluate them on
graph reconstruction and visualization. Please copy the
gem/data/karate.edgelist to the working directory
'''
import matplotlib.pyplot as plt
from time import time
from gem.utils import graph_util, plot_util
from gem.evaluation import visualize_embedding as viz
from gem.evaluation import evaluate_graph_reconstruction as gr
from gem.embedding.gf import GraphFactorization
from gem.embedding.hope import HOPE
from gem.embedding.lap import LaplacianEigenmaps
from gem.embedding.lle import LocallyLinearEmbedding
from gem.embedding.node2vec import node2vec
from gem.embedding.sdne import SDNE
from argparse import ArgumentParser
import networkx as nx
from collections import namedtuple
Stats = namedtuple("stats", "MAP prec_curv err err_baseline")
if __name__ == '__main__':
''' Sample usage
python run_karate.py -node2vec 1
'''
parser = ArgumentParser(
description='Graph Embedding Experiments on Homo Graphs')
# Specify whether the edges are directed
isDirected = True
import os
dirpath = 'data/homo_graph'
gfiles = [gfile for gfile in os.listdir(
dirpath) if os.path.splitext(gfile)[1] == '.csv']
results = {}
for gfile in gfiles:
if gfile != 'soc-pokec-relationships-4096.csv':
continue
models = []
# Load the models you want to run
models.append(GraphFactorization(d=64, max_iter=50000,
eta=1 * 10**-4, regu=1.0, data_set=gfile))
models.append(HOPE(d=64, beta=0.01))
models.append(LaplacianEigenmaps(d=64))
models.append(LocallyLinearEmbedding(d=64))
models.append(
node2vec(d=64, max_iter=100, walk_len=80, num_walks=10,
con_size=10, ret_p=1, inout_p=1, data_set=gfile)
)
models.append(SDNE(d=64, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3, n_units=[500, 300, ], rho=0.3, n_iter=100, xeta=0.001, n_batch=500,
modelfile=['enc_model.json', 'dec_model.json'],
weightfile=['enc_weights.hdf5', 'dec_weights.hdf5']))
G = graph_util.loadGraphFromEdgeListTxt(
os.path.join(dirpath, gfile), directed=isDirected, has_prefix=True)
G = G.to_directed()
G = nx.relabel.convert_node_labels_to_integers(G)
print('Num nodes: %d, num edges: %d' %
(G.number_of_nodes(), G.number_of_edges()))
results[gfile] = {}
for embedding in models:
t1 = time()
# Learn embedding - accepts a networkx graph or file with edge list
Y, t = embedding.learn_embedding(
graph=G, edge_f=None, is_weighted=True, no_python=True)
print(embedding._method_name+':\n\tTraining time: %f' %
(time() - t1))
# Evaluate on graph reconstruction
MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(
G, embedding, Y, None)
results[gfile][embedding._method_name] = Stats(
MAP, prec_curv, err, err_baseline)
# ---------------------------------------------------------------------------------
print(("\tMAP: {} \t preccision curve: {}\n\n\n\n" +
'-'*100).format(MAP, prec_curv[:5]))
# ---------------------------------------------------------------------------------
# Visualize
break
import pickle
with open('results.pickle', 'wb') as f:
pickle.dump(results, f)
| 39.304348 | 145 | 0.601217 | 443 | 3,616 | 4.778781 | 0.419865 | 0.029759 | 0.045347 | 0.019839 | 0.062825 | 0.040151 | 0.028342 | 0 | 0 | 0 | 0 | 0.030786 | 0.254425 | 3,616 | 91 | 146 | 39.736264 | 0.754451 | 0.144082 | 0 | 0 | 0 | 0 | 0.100662 | 0.010596 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.253968 | 0 | 0.253968 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63748db7a5ced0b64fd8d94829c93ed18e0dccd7 | 1,149 | py | Python | src/Classes/MSDS400/Module 2/gas_mix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/Classes/MSDS400/Module 2/gas_mix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | src/Classes/MSDS400/Module 2/gas_mix.py | bmoretz/Python-Playground | a367ec7659b85c24363c21b5c0ac25db08ffa1f6 | [
"MIT"
] | null | null | null | # A company is developing a new additive for gasoline.
# The additive is a mixture of three liquid ingredients, I, II, and III.
# For proper performance, the total amount of additive must be at least 7 oz per gal of gasoline.
# However, for safety reasons, the amount of additive should not exceed 19 oz per gal of gasoline.
# At least one sixth oz of ingredient I must be used for every ounce of ingredient II, and at least 1 oz of ingredient III must be used for every ounce of ingredient I.
# The costs of I, II, and III are $0.40, $0.24, and $0.64 per oz, respectively.
from pulp import *
gax_mix = LpProblem( "Gas Mixture Problem", LpMinimize )
y1 = LpVariable( "y1", 0 )
y2 = LpVariable( "y2", 0 )
y3 = LpVariable( "y3", 0 )
w = LpVariable( "w" )
gax_mix += .40*y1 + .16*y2 + .64*y3
# Constraints
gax_mix += y1 + y2 + y3 >= 10
gax_mix += y1 + y2 + y3 <= 16
gax_mix += 6*y1 >= y2
gax_mix += y1 <= y3
gax_mix.solve()
gax_mix.LpStatus[gax_mix.status]
for variable in gax_mix.variables():
print("{0} = {1}".format(variable.name, variable.varValue))
print( 'Optimal Sln: {0}'.format(pulp.value(gax_mix.objective))) | 33.794118 | 170 | 0.684073 | 197 | 1,149 | 3.93401 | 0.42132 | 0.085161 | 0.030968 | 0.023226 | 0.167742 | 0.090323 | 0.090323 | 0.090323 | 0 | 0 | 0 | 0.052632 | 0.18973 | 1,149 | 34 | 171 | 33.794118 | 0.779807 | 0.51436 | 0 | 0 | 0 | 0 | 0.092559 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6376ae7819f9e26b72824b9b1885ad2420eb93b4 | 3,135 | py | Python | SndClass.py | fit087/scientific_python | 17b128c064e49581043ef3c1cf84e61c25287065 | [
"MIT"
] | 1 | 2020-02-28T22:57:54.000Z | 2020-02-28T22:57:54.000Z | SndClass.py | fit087/scientific_python | 17b128c064e49581043ef3c1cf84e61c25287065 | [
"MIT"
] | null | null | null | SndClass.py | fit087/scientific_python | 17b128c064e49581043ef3c1cf84e61c25287065 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 13:07:06 2020
@author: Adolfo Correa
Professor: Dr. Iuri Segtovich
"""
import numpy as np
print('\nExercise 1\n')
# Exercise 1
# =============================================
x = [1, 346432, 68, 1223, 5, 47, 678]
max_val = x[0]
for elem in x:
if elem > max_val:
max_val = elem
print ('The maximum value of the list is\n', max_val)
# -------------------------------------------
max_val = x[0]
position = 0
for i in range(1,len(x)):
if x[i] > max_val:
max_val = x[i]
position = i
print ('The maximum value of the list is\n', max_val, 'position ', position+1)
# ----------------------------------------
maximo = max(x)
print('max function ', maximo)
print(sum(x))
print(np.size(x))
print(np.shape(x))
print(len(x))
# Exercise 2
# ============================================
print('\nExercise 2\n')
import math as m
y = [0.1, 0.9]
a = [1.2, 1.6]
# k = [[0,0.12], [0.12, 0]]
# line 1 Line 2
k = [[0,0.12], [0.12, 0]]
n = len(y)
am = 0
for i in range(n):
for j in range(n):
am += y[i]*y[j]*m.sqrt(a[i]*a[j])*(1-k[i][j])
print('a_m = ', am)
import sympy as sy
sy.init_printing()
kij = sy.MatrixSymbol('k', 2, 2)
yi = sy.MatrixSymbol('y', 2, 1)
ai = sy.MatrixSymbol('a', 2, 1)
#y, a, k = sy.symbols('y a k')
am_sym = yi[i]*yi[j]*sy.sqrt(ai[i]*ai[j])*(1-kij[i,j])
print(am_sym)
# Exercise 3 - serie de Taylor
# ============================================
print('\nExercise 3 - Serie de Taylor\n')
#error = 1
#tol = 1e-6
#n = 8
#while abs(residuo) > tol:
#
#acum = 0
#x = 0.5
#for i in range(n):
# acum += x**i / m.factorial(i)
#
#residuo = old_result - acum
#old_result = acum
x, y = 0.5, 0
y=(x**0)/m.factorial(0)
y_ant=y
y+=(x**1)/m.factorial(1)
res = y - y_ant
tol = 1e-6
i = 2
while(abs(res)>tol):
y_ant = y
y+=(x**i)/m.factorial(i)
res=y-y_ant
i+=1
#n += 1
print(y)
# List Comprehension
# ==================================
x = [i for i in range(1,3)]
print(x)
# Exercise 4 - Custom function
# ============================================
print('\nExercise 4 - Funçao\n')
def farentheit(*c):
c=np.array(c)
return 1.8*c+32
print(farentheit(37, 100, 0, 21))
# Dictionaries
# Exercise 5 - Secador
# ============================================
print('\nExercise 4 - Secador\n')
def secador(xw_in, xw_out, F=100):
# Local Variables
xs_in = 1 - xw_in
xs_out = 1 - xw_out
# Computing
p = F*xs_in/xs_out
w = F*xw_in-p*xw_out
r=100*w/F/xw_in
return p,w,r
p,w,r = secador(.2,.4)
print(p,w,r)
# Variable Global
global T
def mecher():
global T
T=T+273
return T
# Função recebe outra funcao como argumento
def trapz(f,a,b):
return (b-a)*(f(a)+f(b))/2
def y(x):
return (x-1)**2+x*2-3
ans = trapz(f=y,a=0,b=1)
# runfile('C:/Users/Aula/Documents/adolfo-correa/2ndClass.py', wdir='C:/Users/Aula/Documents/adolfo-correa')
| 19.233129 | 109 | 0.482616 | 512 | 3,135 | 2.898438 | 0.277344 | 0.032345 | 0.016173 | 0.02965 | 0.178571 | 0.105121 | 0.063342 | 0.051213 | 0.051213 | 0.051213 | 0 | 0.057652 | 0.247528 | 3,135 | 162 | 110 | 19.351852 | 0.571429 | 0.326954 | 0 | 0.1 | 0 | 0 | 0.108193 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0375 | 0.025 | 0.1625 | 0.2375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6377a7cfc960f1b8f02d09cde803ef13fad43a69 | 4,495 | py | Python | backups/button.py | lsjsg/People_counting_raspberry_pi | 5b2b8c4807e72bc1a0f86b0afeb5abf58fe07ed4 | [
"MIT"
] | 2 | 2019-05-19T20:09:47.000Z | 2019-08-27T15:45:10.000Z | backups/button.py | roskeys/People_counting_raspberry_pi | 5b2b8c4807e72bc1a0f86b0afeb5abf58fe07ed4 | [
"MIT"
] | null | null | null | backups/button.py | roskeys/People_counting_raspberry_pi | 5b2b8c4807e72bc1a0f86b0afeb5abf58fe07ed4 | [
"MIT"
] | 1 | 2020-03-21T15:08:10.000Z | 2020-03-21T15:08:10.000Z | from time import sleep
import pyrebase
import RPi.GPIO as GPIO
url = 'https://dw-1d-kivy.firebaseio.com' # URL to Firebase database
apikey = '"AIzaSyADs22Rdhef_5w28Y4oOvx0Aat1NiKCl5U"' # unique token used for authentication
config = {"apiKey": apikey, "databaseURL": url, }
# Create a firebase object by specifying the URL of the database and its secret token.
# The firebase object has functions put and get, that allows user to put data onto
# the database and also retrieve data from the database.
# Green = 0 ,Red = 1
firebase = pyrebase.initialize_app(config)
db = firebase.database()
db.child("1").set('0')
db.child("2").set('0')
db.child("3").set('0')
db.child("4").set('0')
db.child("5").set('0')
db.child("6").set('0')
db.child("7").set('0')
db.child("8").set('0')
db.child("9").set('0')
db.child("10").set('0')
db.child("11").set('0')
db.child("12").set('0')
# Use the BCM GPIO numbers as the numbering scheme.
GPIO.setmode(GPIO.BCM)
# Use different GPIO for the buttons.
red, yellow, green, black, blue, orange, red2, yellow2, green2, black2, blue2, orange2 = [12, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26]
# Set GPIO numbers in the list: [12, 16, 20, 21] as OUTPUT
# If one end of the switch is wired to ground and the other to a gpio you need a pull-up on the gpio.
# Then the gpio will read high normally and will read low when the switch is closed.
#
# If one end of the switch is wired to 3V3 and the other to a gpio you need a pull-down on the gpio.
# Then the gpio will read low normally and will read high when the switch is closed.
switch1 = 4 # white
switch2 = 17 # red
switch3 = 22 # yellow
switch4 = 5 # green
switch5 = 13 # blue
switch6 = 26 # black
switch7 = 18 # white
switch8 = 23 # red
switch9 = 24 # yellow
switch10 = 25 # green
switch11 = 16 # blue
switch12 = 21 # black
GPIO.setup(switch1, GPIO.IN, GPIO.PUD_DOWN) # Floor 1
GPIO.setup(switch2, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch3, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch4, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch5, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch6, GPIO.IN, GPIO.PUD_DOWN) # Floor 6
GPIO.setup(switch7, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch8, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch9, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch10, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch11, GPIO.IN, GPIO.PUD_DOWN)
GPIO.setup(switch12, GPIO.IN, GPIO.PUD_DOWN) # Floor 12
while True:
if GPIO.input(switch1) == True:
db.child("1").set("1")
print("1f pressed")
elif GPIO.input(switch1) == False:
db.child("1").set("0")
if GPIO.input(switch2) == True:
db.child("2").set("1")
print("2 pressed")
elif GPIO.input(switch2) == False:
db.child("2").set("0")
if GPIO.input(switch3) == True:
db.child("3").set("1")
print("3 pressed")
elif GPIO.input(switch3) == False:
db.child("3").set("0")
if GPIO.input(switch4) == True:
db.child("4").set("1")
print("4 pressed")
elif GPIO.input(switch4) == False:
db.child("4").set("0")
if GPIO.input(switch5) == True:
db.child("5").set("1")
print("5 pressed")
elif GPIO.input(switch5) == False:
db.child("5").set("0")
if GPIO.input(switch6) == True:
db.child("6").set("1")
print("6 pressed")
elif GPIO.input(switch6) == False:
db.child("6").set("0")
if GPIO.input(switch7) == True:
db.child("7").set("1")
print("7 pressed")
elif GPIO.input(switch7) == False:
db.child("7").set("0")
if GPIO.input(switch8) == True:
db.child("8").set("1")
print("8 pressed")
elif GPIO.input(switch8) == False:
db.child("8").set("0")
if GPIO.input(switch9) == True:
db.child("9").set("1")
print("9 pressed")
elif GPIO.input(switch9) == False:
db.child("9").set("0")
if GPIO.input(switch10) == True:
db.child("10").set("1")
print("10 pressed")
elif GPIO.input(switch10) == False:
db.child("10").set("0")
if GPIO.input(switch11) == True:
db.child("11").set("1")
print("11 pressed")
elif GPIO.input(switch11) == False:
db.child("11").set("0")
if GPIO.input(switch12) == True:
db.child("12").set("1")
print("12 pressed")
elif GPIO.input(switch12) == False:
db.child("12").set("0")
| 30.371622 | 117 | 0.606007 | 697 | 4,495 | 3.888092 | 0.213773 | 0.092989 | 0.04428 | 0.057565 | 0.342804 | 0.17417 | 0.149816 | 0.063469 | 0.042804 | 0.02214 | 0 | 0.067126 | 0.224472 | 4,495 | 147 | 118 | 30.578231 | 0.71027 | 0.200667 | 0 | 0 | 0 | 0 | 0.079775 | 0.011517 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63787af43ccc0aea55b2d299610f4773a1860ff2 | 2,185 | py | Python | release/scripts/addons/oscurart_tools/object/search_and_select.py | simileV/blenderStereo29 | 09b993449aaca671a9eb2a6a22327246936eb3db | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2020-07-20T15:41:58.000Z | 2020-07-20T15:41:58.000Z | release/scripts/addons/oscurart_tools/object/search_and_select.py | ringsce/Rings3D | 8059d1e2460fc8d6f101eff8e695f68a99f6671d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | release/scripts/addons/oscurart_tools/object/search_and_select.py | ringsce/Rings3D | 8059d1e2460fc8d6f101eff8e695f68a99f6671d | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Operator
from bpy.props import BoolProperty
from bpy.props import StringProperty
# ------------------------ SEARCH AND SELECT ------------------------
class SearchAndSelectOt(bpy.types.Operator):
"""Search and select objects, by name"""
bl_idname = "object.search_and_select_osc"
bl_label = "Search And Select"
bl_options = {"REGISTER", "UNDO"}
keyword : StringProperty(name="Keyword", default="Type Here")
start : BoolProperty(name="Start With", default=True)
count : BoolProperty(name="Contain", default=True)
end : BoolProperty(name="End", default=True)
def execute(self, context):
for objeto in bpy.context.scene.objects:
variableNombre = self.keyword
if self.start:
if objeto.name.startswith(variableNombre):
objeto.select_set(True)
if self.count:
if objeto.name.count(variableNombre):
objeto.select_set(True)
if self.end:
if objeto.name.count(variableNombre):
objeto.select_set(True)
return {'FINISHED'}
def invoke(self, context, event):
self.keyword = "Type Here"
self.start = True
self.count = True
self.end = True
return context.window_manager.invoke_props_dialog(self)
| 36.416667 | 74 | 0.652632 | 276 | 2,185 | 5.123188 | 0.456522 | 0.02546 | 0.042433 | 0.040311 | 0.160537 | 0.14215 | 0.102546 | 0.070721 | 0.070721 | 0 | 0 | 0.007813 | 0.238444 | 2,185 | 59 | 75 | 37.033898 | 0.841947 | 0.393593 | 0 | 0.16129 | 0 | 0 | 0.08587 | 0.021858 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.129032 | 0 | 0.516129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
637a49b80162d3c9d3ede67a0b42de3e48ef8458 | 6,605 | py | Python | find_facilities.py | tayuny/the-wandering-chicago | d861f0de4ce0ebc23d246768e45690fbda055be6 | [
"Unlicense"
] | null | null | null | find_facilities.py | tayuny/the-wandering-chicago | d861f0de4ce0ebc23d246768e45690fbda055be6 | [
"Unlicense"
] | null | null | null | find_facilities.py | tayuny/the-wandering-chicago | d861f0de4ce0ebc23d246768e45690fbda055be6 | [
"Unlicense"
] | null | null | null | '''
Main code for finding the nearest target spots and
export an webpage
'''
import os
import webbrowser
import pandas as pd
import util
import gmplot
COLOR_MAP = {'family support (children services)': '#FFFACD',
'family support (senior services)':'#FFFACD',
'family support (domestic violence)':'#FFFACD',
'family support (homeless services)':'#FFFACD',
'family support (human services delivery)':'#FFFACD',
'family support (workforce services)':'#FFFACD',
'family support (youth services)':'#FFFACD',
'health service':'#87CEFA',
'senior center':'#FFFACD',
'community service':'#FFFACD',
'park':'#8FBC8F',
'warming center':'#DC143C',
'cooling center':'#4169E1',
'condom distribution site':'#87CEFA',
'mental health clinic':'#87CEFA',
'sti specialty clinic':'#87CEFA',
'wic clinic':'#87CEFA',
'food pantry':'#FFD700',
'shelter':'#FFA500'}
COL_TYPES = {'facility_name':str, 'address':str, 'community_area':str,
'phone_number':str, 'zipcode':str, 'operation_time':str,
'longitude':float, 'latitude':float, 'x_coordinate':float,
'y_coordinate':float, 'service_type':str, 'notes':str}
def compute_distance(df, address, categories=None, walking_time=None):
'''
Compute the distance between user's location and all the facilities in
our dataset
Inputs:
df: the full dataframe
address:(str) address of user
categories: (list of strings) the list of attempted service types
walking_time: (int) the desired maximum walking time
Returns:
the new data frame
'''
user_lon, user_lat = util.get_user_location(address)
df = util.select_categories(df, categories)
df = df[df["longitude"].notnull() & df["latitude"].notnull()]
df['distance'] = df.apply(lambda x: util.haversine(x['longitude'],\
x['latitude'], user_lon, user_lat), axis=1)
if walking_time is not None:
df['walking_time'] = df.apply(lambda x: util.compute_walking_time(\
x['distance']), axis=1)
filter_cond = df['walking_time'] <= walking_time
df = df[filter_cond]
return df
def get_nearest_spots(df, address, categories=None, walking_time=None,
full_info=False):
'''
Get the spots that satisfies user's demands
Inputs:
df: the full dataframe
address:(str) address of user
categories: (list of strings) the list of attempted service types
walking_time: (int) the desired maximum walking time
full_info: (boolean) whether or not to return the full information
Returns: dataframe with nearest spots of each selected category
'''
df = compute_distance(df, address, categories, walking_time)
nearest_df = pd.DataFrame(columns=df.columns)
coordinate_dict = {}
for category in df["service_type"].unique():
current_df = df[df["service_type"] == category].sort_values(by=["distance"]).iloc[:30]
current_df = current_df.drop_duplicates("facility_name", keep="first")
nearest_df = pd.concat([nearest_df, current_df.iloc[:3]], join="inner")
nearest_df["coordinates"] = nearest_df.apply(\
lambda x: tuple([x['latitude'], x['longitude']]), axis=1)
for category in nearest_df["service_type"].unique():
coordinate_dict[category] = list(nearest_df[\
nearest_df["service_type"] == category]["coordinates"])
if full_info:
difference_col = ['longitude', 'latitude', 'x_coordinate',
'y_coordinate', 'coordinates']
else:
difference_col = ['longitude', 'latitude', 'x_coordinate',
'y_coordinate', 'coordinates', 'notes']
return nearest_df.drop(difference_col, axis=1), coordinate_dict
def map_plot(address, dict_of_geocode):
'''
Plotting the location of facilities on a google map
Inputs:
user_location: the latitude and longitude of user
dict_of_geocode:(dict) the dictionary mapping categories to list of geocodes
Ouputs:
Pop up the html of google map
'''
user_location = util.get_user_location(address)
user_lon, user_lat = user_location
gmap = gmplot.GoogleMapPlotter(user_lat, user_lon, 14)
gmap.coloricon = "http://www.googlemapsmarkers.com/v1/%s/"
for cat, geocodes in dict_of_geocode.items():
facility_lats, facility_lons = zip(*geocodes)
color = COLOR_MAP[cat]
gmap.scatter(facility_lats, facility_lons, color, size=60, marker=True)
gmap.marker(user_lat, user_lon, '#000000', title='User Location')
gmap.draw("my_map.html")
util.insert_apikey("my_map.html", 'AIzaSyBu5c2MH9Rj8tPzYmr14VC87Jp3xY-estc')
return "my_map.html"
def export_output(df, map_url, address):
'''
Given the dataframe that is filtered, then output the html
representing the dataframe and its corresponding points in map.
Inputs:
df: the filtered dataframe
address:(str) address of user
map_url: html representing the points corresponding to the
facilities in map
'''
filename = 'output.html'
cur_path = os.getcwd()
html = util.generate_html(df, map_url, address)
body = '\r\n\n<br>'.join(html)
html_output = open(filename, 'w')
html_output.write(body)
file_path = 'file://' + cur_path + '/' + filename
webbrowser.open(file_path)
def go(filename, address, categories=None, walking_time=None,
full_info=False):
'''
Process the raw data and run the whole program.
Inputs:
filename: (str) filename of the data
address:(str) address of user
categories: (list of strings) the list of attempted service types
walking_time: (int) the desired maximum walking time
full_info: (boolean) whether or not to return the full information
'''
try:
pd.options.display.max_colwidth = 650
df = pd.read_csv(filename, dtype=COL_TYPES, index_col=0)
except IOError:
print("Could not read file: {}".format(filename))
assert util.check_value(address, categories, walking_time, full_info) \
is True, 'Value Error!'
df, dict_of_geocode = get_nearest_spots(\
df, address, categories, walking_time, full_info)
map_url = map_plot(address, dict_of_geocode)
export_output(df, map_url, address)
| 39.550898 | 94 | 0.640575 | 811 | 6,605 | 5.060419 | 0.305795 | 0.045565 | 0.027778 | 0.026316 | 0.283626 | 0.247563 | 0.188109 | 0.178363 | 0.178363 | 0.123782 | 0 | 0.011421 | 0.24436 | 6,605 | 166 | 95 | 39.789157 | 0.81086 | 0.242544 | 0 | 0.041667 | 0 | 0 | 0.242696 | 0.008139 | 0 | 0 | 0 | 0 | 0.010417 | 1 | 0.052083 | false | 0 | 0.052083 | 0 | 0.135417 | 0.010417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
637a981be4eccfd221f02a9a564f08b97ca045a8 | 765 | py | Python | tests/run_tests.py | lbanawa/photo_filters | a9729d96929770599ddca2715026b69d8837168f | [
"MIT"
] | null | null | null | tests/run_tests.py | lbanawa/photo_filters | a9729d96929770599ddca2715026b69d8837168f | [
"MIT"
] | 7 | 2020-03-02T01:48:58.000Z | 2022-01-13T02:21:10.000Z | tests/run_tests.py | lbanawa/photo_filters | a9729d96929770599ddca2715026b69d8837168f | [
"MIT"
] | null | null | null | import inspect
import os
import sys
import unittest
__author__ = "David Juboor"
__version__ = "1.0"
# Can't use __file__ because when running with coverage via command line, ___file__ is not the full path
my_location = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
def test_runner_suite():
tests_sources_root = my_location #os.path.join(my_location,'tests')
# region Needed to run when using coverage.py so the imports are properly resolved
python_sources_root = os.path.join(my_location,'..', 'src')
sys.path.append(python_sources_root)
# endregion
tests = unittest.TestLoader().discover(tests_sources_root)
return unittest.runner.TextTestRunner().run(tests)
if __name__ == '__main__':
print(test_runner_suite())
| 27.321429 | 104 | 0.745098 | 109 | 765 | 4.853211 | 0.587156 | 0.075614 | 0.045369 | 0.060491 | 0.075614 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006144 | 0.14902 | 765 | 27 | 105 | 28.333333 | 0.806452 | 0.295425 | 0 | 0 | 0 | 0 | 0.052533 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.266667 | 0 | 0.4 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
637a9ea66b82d2beaeba55846d62607bb1b78e7b | 8,500 | py | Python | core/core_service.py | greatkeke/V2ray.FunPi | e58f78331f2fa19eb2af0082bccdc8e904e73217 | [
"MIT"
] | 576 | 2020-08-10T07:02:49.000Z | 2022-03-21T05:21:12.000Z | core/core_service.py | raydoom/V2ray.FunPi | 53d8c205e5dd260abd59f95a633d3c5062ace0e0 | [
"MIT"
] | 13 | 2020-08-14T00:55:28.000Z | 2022-03-21T05:23:27.000Z | core/core_service.py | raydoom/V2ray.FunPi | 53d8c205e5dd260abd59f95a633d3c5062ace0e0 | [
"MIT"
] | 52 | 2020-08-10T13:20:58.000Z | 2022-03-17T08:56:34.000Z | # encoding: utf-8
"""
File: core_service
Author: twotrees.us@gmail.com
Date: 2020年7月30日 31周星期四 10:55
Desc:
"""
import psutil
import os
import os.path
from .package import jsonpickle
from typing import List
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.schedulers.base import *
import requests
from urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import random
import time
from .app_config import AppConfig
from .v2ray_controller import V2rayController, make_controller
from .node_manager import NodeManager
from .keys import Keyword as K
from .v2ray_user_config import V2RayUserConfig
class CoreService:
app_config : AppConfig = None
user_config: V2RayUserConfig = V2RayUserConfig()
v2ray:V2rayController = make_controller()
node_manager:NodeManager = NodeManager()
scheduler:BackgroundScheduler = BackgroundScheduler(
{
'apscheduler.executors.default': {
'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
'max_workers': '1'
}
})
@classmethod
def load(cls):
config_path = 'config/'
if not os.path.exists(config_path):
os.mkdir(config_path)
cls.app_config = AppConfig().load()
cls.node_manager = NodeManager().load()
cls.user_config = V2RayUserConfig().load()
cls.restart_auto_detect()
@classmethod
def status(cls) -> dict:
running = cls.v2ray.running()
version = cls.v2ray.version()
result = {
K.running: running,
K.version: version,
K.proxy_mode: cls.user_config.proxy_mode,
}
node = cls.user_config.node.dump()
result.update(node)
return result
@classmethod
def performance(cls) -> dict:
result = {}
cpu_usage = psutil.cpu_percent(interval=0.2, percpu=True)
result_cpu = {}
core = 0
for u in cpu_usage:
core += 1
result_cpu["core {0}".format(core)] = u
result['cpu'] = result_cpu
memory_usage = psutil.virtual_memory()
result['memory'] = {
"percent" : memory_usage.percent,
"total" : int(memory_usage.total / (1024 * 1024)),
"used" : int((memory_usage.total - memory_usage.available) / (1024 * 1024))
}
return result
@classmethod
def add_subscribe(cls, url):
cls.node_manager.add_subscribe(url)
cls.re_apply_node()
@classmethod
def remove_subscribe(cls, url):
cls.node_manager.remove_subscribe(url)
cls.re_apply_node()
@classmethod
def update_all_subscribe(cls):
cls.node_manager.update_all()
cls.re_apply_node()
@classmethod
def update_subscribe(cls, url):
cls.node_manager.update(url)
cls.re_apply_node()
@classmethod
def add_manual_node(cls, url):
cls.node_manager.add_manual_node(url)
cls.re_apply_node()
@classmethod
def delete_node(cls, url, index):
cls.node_manager.delete_node(url, index)
cls.re_apply_node()
@classmethod
def re_apply_node(cls, restart_auto_detect=True) -> bool:
result = cls.v2ray.apply_node(cls.user_config, cls.node_manager.all_nodes())
if restart_auto_detect:
cls.restart_auto_detect()
return result
@classmethod
def restart_auto_detect(cls):
cls.auto_detect_cancel()
if cls.user_config.advance_config.auto_detect.enabled :
cls.auto_detect_start()
@classmethod
def stop_v2ray(cls) -> bool:
result = cls.v2ray.stop()
cls.auto_detect_cancel()
return result
@classmethod
def apply_node(cls, url:str, index: int, restart_auto_detect=True) -> bool:
result = False
node = cls.node_manager.find_node(url, index)
cls.user_config.node = node
if cls.re_apply_node(restart_auto_detect):
cls.user_config.save()
if not cls.app_config.inited:
cls.v2ray.enable_iptables()
cls.app_config.inited = True
cls.app_config.save()
result = True
return result
@classmethod
def switch_mode(cls, proxy_mode: int) -> bool:
cls.user_config.proxy_mode = proxy_mode
result = True
result = cls.re_apply_node()
if result:
cls.user_config.save()
return result
@classmethod
def apply_advance_config(cls, config:dict):
result = True
new_advance = cls.user_config.advance_config.load_data(config)
cls.user_config.advance_config = new_advance
result = cls.re_apply_node()
if result:
cls.user_config.save()
return result
@classmethod
def reset_advance_config(cls):
result = True
cls.user_config.advance_config = V2RayUserConfig.AdvanceConfig()
result = cls.re_apply_node()
if result:
cls.user_config.save()
return result
@classmethod
def make_policy(cls, contents:List[str], type:str, outbound:str) -> dict:
type = V2RayUserConfig.AdvanceConfig.Policy.Type[type]
outbound = V2RayUserConfig.AdvanceConfig.Policy.Outbound[outbound]
policy = V2RayUserConfig.AdvanceConfig.Policy()
policy.contents = contents
policy.type = type.name
policy.outbound = outbound.name
return jsonpickle.encode(policy, indent=4)
@classmethod
def auto_detect_start(cls):
cls.scheduler.add_job(CoreService.auto_detect_job, trigger='interval', seconds=cls.user_config.advance_config.auto_detect.detect_span, id=K.auto_detect)
if cls.scheduler.state is not STATE_RUNNING :
cls.scheduler.start()
@classmethod
def auto_detect_cancel(cls):
job = cls.scheduler.get_job(K.auto_detect)
if job:
job.remove()
@classmethod
def auto_detect_job(cls):
detect:V2RayUserConfig.AdvanceConfig.AutoDetectAndSwitch = cls.user_config.advance_config.auto_detect
DEFAULT_TIMEOUT = 5 # seconds
class TimeoutHTTPAdapter(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.timeout = DEFAULT_TIMEOUT
if "timeout" in kwargs:
self.timeout = kwargs["timeout"]
del kwargs["timeout"]
super().__init__(*args, **kwargs)
def send(self, request, **kwargs):
timeout = kwargs.get("timeout")
if timeout is None:
kwargs["timeout"] = self.timeout
return super().send(request, **kwargs)
# begin detect
retries = Retry(total=detect.failed_count, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504])
http = requests.Session()
http.mount("https://", TimeoutHTTPAdapter(max_retries=retries, timeout=detect.timeout))
try:
http.get(detect.detect_url)
print('detected connetion success, nothing to do, just return')
return
except Exception as e:
print('detected connetion failed, detail:\n{0}'.format(e))
# failed prepare to switch node
ping_groups = cls.node_manager.ping_test_all()
class NodePingInfo:
def __init__(self, group_key:str, node_ps:str, ping:int):
self.group_key:str = group_key
self.node_ps:str = node_ps
self.ping:int = ping
def __lt__(self, other):
return self.ping < other.ping
ping_results = []
for group in ping_groups:
group_key = group[K.subscribe]
nodes = group[K.nodes]
for node_ps in nodes.keys():
ping = nodes[node_ps]
info = NodePingInfo(group_key, node_ps, ping)
ping_results.append(info)
ping_results.sort()
best_nodes = ping_results[:5]
random.shuffle(best_nodes)
best_node = best_nodes[0]
node_index = cls.node_manager.find_node_index(best_node.group_key, best_node.node_ps)
cls.apply_node(best_node.group_key, node_index, restart_auto_detect=False)
detect.last_switch_time = '{0} ---- {1}'.format(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), best_node.node_ps)
cls.user_config.save() | 32.442748 | 160 | 0.626588 | 994 | 8,500 | 5.135815 | 0.220322 | 0.054848 | 0.043291 | 0.027424 | 0.184525 | 0.138296 | 0.098531 | 0.057395 | 0.041724 | 0.041724 | 0 | 0.013014 | 0.276824 | 8,500 | 262 | 161 | 32.442748 | 0.817472 | 0.019882 | 0 | 0.248826 | 0 | 0 | 0.036534 | 0.008893 | 0 | 0 | 0 | 0 | 0 | 1 | 0.112676 | false | 0 | 0.079812 | 0.004695 | 0.286385 | 0.00939 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
637b2a10deb0627e199f297b833ffac4a9356c63 | 4,282 | py | Python | src/dropSQL/ast/join.py | LakshyaaSoni/dropSQL | da07ad3edf2d55f0521a385ad10678fc353b4b2b | [
"MIT"
] | 35 | 2017-11-27T22:24:46.000Z | 2022-01-16T23:50:01.000Z | src/dropSQL/ast/join.py | LakshyaaSoni/dropSQL | da07ad3edf2d55f0521a385ad10678fc353b4b2b | [
"MIT"
] | null | null | null | src/dropSQL/ast/join.py | LakshyaaSoni/dropSQL | da07ad3edf2d55f0521a385ad10678fc353b4b2b | [
"MIT"
] | 2 | 2018-02-20T06:06:12.000Z | 2021-10-16T18:30:15.000Z | import abc
from typing import *
from dropSQL.engine.row_set import *
from dropSQL.engine.types import *
from dropSQL.generic import *
from dropSQL.parser.streams import *
from dropSQL.parser.tokens import *
from .alias import AliasedTable
from .ast import *
from .expression import Expression
if TYPE_CHECKING:
from dropSQL import fs
__all__ = [
'JoinClausesParser',
'JoinAst',
'CrossJoin',
'InnerJoin',
]
class JoinClausesParser(FromSQL[List['JoinAst']]):
@classmethod
def from_sql(cls, tokens: Stream[Token]) -> IResult[List['JoinAst']]:
"""
/from_body
: /aliased_table /join_clauses
;
/join_clauses
: /* empty */
| /join_clauses /join_clause
;
"""
# take while next is not err. if err is Empty, return the list, otherwise return error.
joins: List[JoinAst] = []
join = JoinAst.from_sql(tokens)
while join.is_ok():
joins.append(join.ok())
join = JoinAst.from_sql(tokens)
if join.err().is_empty(): return IOk(joins)
return IErr(join.err())
class JoinAst(Ast, FromSQL['JoinAst'], metaclass=abc.ABCMeta):
def __init__(self, table: AliasedTable) -> None:
super().__init__()
self.table = table
@abc.abstractmethod
def join(self, lhs: RowSet, db: 'fs.DBFile', args: ARGS_TYPE = ()) -> Result[RowSet, str]:
"""
Join `lhs` on the left with self on the right.
"""
@classmethod
def from_sql(cls, tokens: Stream[Token]) -> IResult['JoinAst']:
"""
/join_clause
: /cross_join
| /inner_join
;
"""
t = tokens.peek()
if not t: return IErr(t.err())
tok = t.ok()
if isinstance(tok, Comma):
return CrossJoin.from_sql(tokens)
if isinstance(tok, Join):
return InnerJoin.from_sql(tokens)
return IErr(Empty())
class CrossJoin(JoinAst, FromSQL['CrossJoin']):
def __init__(self, table: AliasedTable) -> None:
super().__init__(table)
def join(self, lhs: RowSet, db: 'fs.DBFile', args: ARGS_TYPE = ()) -> Result[RowSet, str]:
rhs = self.table.row_set(db)
if not rhs: return Err(rhs.err())
rs = CrossJoinRowSet(lhs, rhs.ok())
return Ok(rs)
def to_sql(self) -> str:
join = ', '
join += self.table.to_sql()
return join
@classmethod
def from_sql(cls, tokens: Stream[Token]) -> IResult['JoinAst']:
"""
/cross_join
: "," /aliased_table
;
"""
t = tokens.next().and_then(Cast(Comma))
if not t: return IErr(t.err())
t = AliasedTable.from_sql(tokens)
if not t: return IErr(t.err().empty_to_incomplete())
table = t.ok()
return IOk(CrossJoin(table))
class InnerJoin(CrossJoin, FromSQL['InnerJoin']):
def __init__(self, table: AliasedTable, constraint: Expression) -> None:
super().__init__(table)
self.constraint = constraint
def join(self, lhs: RowSet, db: 'fs.DBFile', args: ARGS_TYPE = ()) -> Result[RowSet, str]:
r = self.table.row_set(db)
if not r: return Err(r.err())
rhs = r.ok()
return Ok(InnerJoinRowSet(lhs, rhs, self.constraint, args))
def to_sql(self) -> str:
join = ' /join '
join += self.table.to_sql()
if self.constraint is not None:
join += ' /on '
join += self.constraint.to_sql()
return join
@classmethod
def from_sql(cls, tokens: Stream[Token]) -> IResult['InnerJoin']:
"""
/inner_join
: "/join" /aliased_table "/on" expr
;
"""
t = tokens.next().and_then(Cast(Join))
if not t: return IErr(t.err())
t = AliasedTable.from_sql(tokens)
if not t: return IErr(t.err().empty_to_incomplete())
table = t.ok()
t = tokens.next().and_then(Cast(On))
if not t: return IErr(t.err().empty_to_incomplete())
t = Expression.from_sql(tokens)
if not t: return IErr(t.err().empty_to_incomplete())
constraint = t.ok()
return IOk(InnerJoin(table, constraint))
| 26.432099 | 96 | 0.570061 | 511 | 4,282 | 4.626223 | 0.18591 | 0.032572 | 0.038494 | 0.035533 | 0.438663 | 0.406514 | 0.367174 | 0.320643 | 0.285956 | 0.265651 | 0 | 0 | 0.296123 | 4,282 | 161 | 97 | 26.596273 | 0.78434 | 0.096684 | 0 | 0.344086 | 0 | 0 | 0.039748 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.11828 | 0 | 0.397849 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63806501475b50be287ea81ec9eea6bfef44daae | 6,363 | py | Python | astromodels/core/property.py | giacomov/astromodels | bfad6993465df66d9c071c00d6e0c747266bd3a7 | [
"BSD-3-Clause"
] | 11 | 2015-12-02T17:28:57.000Z | 2018-10-08T02:33:32.000Z | astromodels/core/property.py | giacomov/astromodels | bfad6993465df66d9c071c00d6e0c747266bd3a7 | [
"BSD-3-Clause"
] | 73 | 2016-05-29T10:17:30.000Z | 2018-12-21T19:35:00.000Z | astromodels/core/property.py | giacomov/astromodels | bfad6993465df66d9c071c00d6e0c747266bd3a7 | [
"BSD-3-Clause"
] | 15 | 2016-06-03T21:28:10.000Z | 2019-01-07T14:23:15.000Z | import collections
import copy
from typing import Any, Dict, List, Optional, Tuple, Union
from astromodels.utils.logging import setup_logger
from .tree import Node
log = setup_logger(__name__)
# Exception for when a parameter is out of its bounds
class SettingUnknownValue(RuntimeError):
pass
class PropertyBase(Node):
def __init__(self,
name: str,
desc: str,
value: Optional[str] = None,
allowed_values: Optional[List[str]] = None,
defer: bool = False,
eval_func: Optional[str] = None
):
# Make this a node
Node.__init__(self, name)
self._allowed_values: Optional[List[str]] = allowed_values
self._defer: bool = defer
self._eval_func: Optional[str] = eval_func
if (value is None) and (not self._defer):
log.error(f"property {name} was given no initial value but is NOT deferred")
# now we set the value
self.value = value
self.__doc__ = desc
self._desc = desc
def _get_value(self) -> Any:
"""
Return current parameter value
"""
log.debug(f"accessing the property {self.name} with value {self._internal_value}")
return self._internal_value
def _set_value(self, new_value) -> None:
"""
Sets the current value of the parameter, ensuring that it is within the allowed range.
"""
if (self._defer) and (new_value is None):
# this is ok
pass
elif self._allowed_values is not None:
if not new_value in self._allowed_values:
log.error(f"{self.name} can only take the values {','.join(self._allowed_values)} not {new_value}")
raise SettingUnknownValue()
self._internal_value = new_value
# if there is an eval func value
# then we need to execute the function
# on the parent
if (self._internal_value == "_tmp") and self._defer:
# do not execute in this mode
return
if self._eval_func is not None:
# if there is a parent
if self._parent is not None:
if self._parent.name == "composite":
# ok, we have a composite function
func_idx = int(self._name.split("_")[-1]) - 1
getattr(self._parent._functions[func_idx], str(self._eval_func))()
else:
getattr(self._parent, str(self._eval_func))()
# other wise this will run when the parent is set
value = property(
_get_value,
_set_value,
doc=
"Get and sets the current value for the propert",)
def _set_parent(self, parent):
# we intecept here becuase we want
# to make sure the eval works
super(PropertyBase, self)._set_parent(parent)
# now we want to update because we have a parent
self.value = self._internal_value
@property
def is_deferred(self) -> bool:
return self._defer
@property
def description(self) -> Optional[str]:
"""
Return a description of this parameter
:return: a string cointaining a description of the meaning of this parameter
"""
return self._desc
def duplicate(self) -> "FunctionProperty":
"""
Returns an exact copy of the current property
"""
# Deep copy everything to make sure that there are no ties between the new instance and the old one
new_property = copy.deepcopy(self)
return new_property
def _repr__base(self, rich_output): # pragma: no cover
raise NotImplementedError(
"You need to implement this for the actual Property class")
@staticmethod
def _to_python_type(variable):
"""
Returns the value in the variable handling also np.array of one element
:param variable: input variable
:return: the value of the variable having a python type (int, float, ...)
"""
# Assume variable is a np.array, fall back to the case where variable is already a primitive type
try:
return variable.item()
except AttributeError:
return variable
def to_dict(self, minimal=False) -> Dict[str, Any]:
"""Returns the representation for serialization"""
data = collections.OrderedDict()
if minimal:
# In the minimal representation we just output the value
data["value"] = self._to_python_type(self.value)
else:
# In the complete representation we output everything is needed to re-build the object
data["value"] = str(self.value)
data["desc"] = str(self._desc)
data["allowed values"] = self._to_python_type(self._allowed_values)
data["defer"] = self._to_python_type(self._defer)
data["function"] = str(self._eval_func)
return data
class FunctionProperty(PropertyBase):
def __init__(self,
name: str,
desc: str,
value: Optional[str] = None,
allowed_values: Optional[List[Any]] = None,
defer: bool = False,
eval_func: Optional[str] = None
):
super(FunctionProperty, self).__init__(name=name,desc=desc,
value=value,
allowed_values=allowed_values,
defer=defer,
eval_func=eval_func)
def _repr__base(self, rich_output=False):
representation = (
f"Property {self.name} = {self.value}\n"
f"(allowed values = {'all' if self._allowed_values is None else ' ,'.join(self._allowed_values)})")
return representation
| 27.786026 | 115 | 0.54534 | 707 | 6,363 | 4.724187 | 0.267327 | 0.054491 | 0.035629 | 0.022455 | 0.108683 | 0.081437 | 0.066467 | 0.066467 | 0.066467 | 0.041916 | 0 | 0.000506 | 0.378595 | 6,363 | 228 | 116 | 27.907895 | 0.844208 | 0.200692 | 0 | 0.196078 | 0 | 0.019608 | 0.105627 | 0.017266 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.019608 | 0.04902 | 0.009804 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6381b46ba4699339e587d7107a305cb39d4ea409 | 4,354 | py | Python | modules/import_ships.py | sprintska/menagerie | 43a92f161f4b6aba9841ef7edb6fe10e84ab1f77 | [
"MIT"
] | null | null | null | modules/import_ships.py | sprintska/menagerie | 43a92f161f4b6aba9841ef7edb6fe10e84ab1f77 | [
"MIT"
] | null | null | null | modules/import_ships.py | sprintska/menagerie | 43a92f161f4b6aba9841ef7edb6fe10e84ab1f77 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import json
import logging
import logging.handlers
import os
import pickle
import requests
import sqlite3
import shutil
from contextlib import closing
_handler = logging.handlers.WatchedFileHandler("/var/log/menagerie.log")
logging.basicConfig(handlers=[_handler], level=logging.INFO)
API_TARGET_URL = "https://www.dropzonecommander.com:3001/ships/"
LOCAL_MIRROR_PATH = os.path.join(os.getcwd(), "data", "ships.sqlite3")
def create_db(db_path):
"""Create the db at the path if it doesn't exist"""
if not os.path.exists(db_path):
open(db_path, "w+").close()
logging.info(f"Created ships db at {db_path}.")
with closing(sqlite3.connect(db_path)) as conn:
success = conn.execute('''CREATE TABLE dropfleet_ships (
_id text,
_rev text,
Name text,
Faction text,
Designation text,
Scan text,
Signal text,
Thrust text,
Hull int,
Armour text,
PointDefence int,
GroupMin int,
GroupMax int,
Tonnage text,
TonnageClass int,
Points int,
HardPoints int,
Special text,
Weapons text,
LaunchAssets text,
SpecRules text,
MinHardPoints int,
MaxBroadSides int,
icons text
)''')
conn.commit()
return success
def request_update(api_url):
"""Requests the updated set of ships from DZC.com and returns them as a giant ass nested dict."""
with requests.get(api_url, stream=True, verify=False) as r:
ships = json.loads(r.text)
for ship in ships:
for keyword in ['Weapons','Special','LaunchAssets','SpecRules','icons']:
ship[keyword] = pickle.dumps(ship[keyword])
return ships
def update_db(ships_obj,db_path):
"""Updates the db with the new ships."""
with closing(sqlite3.connect(db_path)) as conn:
for ship in ships_obj:
for field_key in [
"_id",
"_rev",
"Name",
"Faction",
"Designation",
"Scan",
"Signal",
"Thrust",
"Hull",
"Armour",
"PointDefence",
"GroupMin",
"GroupMax",
"Tonnage",
"TonnageClass",
"Points",
"HardPoints",
"Special",
"Weapons",
"LaunchAssets",
"SpecRules",
"MinHardPoints",
"MaxBroadSides",
"icons"]:
if field_key not in ship.keys():
ship[field_key] = 0
out = conn.execute('''INSERT INTO dropfleet_ships VALUES (
:_id,
:_rev,
:Name,
:Faction,
:Designation,
:Scan,
:Signal,
:Thrust,
:Hull,
:Armour,
:PointDefence,
:GroupMin,
:GroupMax,
:Tonnage,
:TonnageClass,
:Points,
:HardPoints,
:Special,
:Weapons,
:LaunchAssets,
:SpecRules,
:MinHardPoints,
:MaxBroadSides,
:icons
)''', ship)
conn.commit()
def main(api_url,ship_db):
create_db(ship_db)
ships = request_update(api_url)
update_db(ships,ship_db)
if __name__ == "__main__":
main(API_TARGET_URL,LOCAL_MIRROR_PATH) | 29.821918 | 101 | 0.426964 | 353 | 4,354 | 5.121813 | 0.365439 | 0.02323 | 0.013274 | 0.027655 | 0.242257 | 0.242257 | 0.242257 | 0.242257 | 0.201327 | 0.201327 | 0 | 0.004507 | 0.490354 | 4,354 | 146 | 102 | 29.821918 | 0.810275 | 0.043638 | 0 | 0.034483 | 0 | 0 | 0.477946 | 0.005302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.077586 | 0 | 0.12931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63821dda649cfe4db5f1c008cef5ae903f0936db | 2,296 | py | Python | source/custom_scripts/backfill_app_stats_ee2_reserved_cpu.py | kbase/metrics | 33b134d6d9dea8cc4e9cf0ebe6b370aef1dc3486 | [
"MIT"
] | 1 | 2015-12-16T13:33:04.000Z | 2015-12-16T13:33:04.000Z | source/custom_scripts/backfill_app_stats_ee2_reserved_cpu.py | kbase/metrics | 33b134d6d9dea8cc4e9cf0ebe6b370aef1dc3486 | [
"MIT"
] | 54 | 2016-06-01T19:28:09.000Z | 2022-03-31T00:08:38.000Z | source/custom_scripts/backfill_app_stats_ee2_reserved_cpu.py | kbase/metrics | 33b134d6d9dea8cc4e9cf0ebe6b370aef1dc3486 | [
"MIT"
] | 8 | 2015-12-16T13:33:04.000Z | 2020-02-07T23:30:33.000Z | import os
import mysql.connector as mysql
sql_host = os.environ["SQL_HOST"]
query_on = os.environ["QUERY_ON"]
metrics_mysql_password = os.environ["METRICS_MYSQL_PWD"]
def make_lookup_dict():
cpu_file = open("custom_scripts/app_reserved_cpu_lookup_file.txt", "r")
lines = cpu_file.readlines()
count = 0
reserved_cpu_lookup_dict = dict()
for line in lines:
elements = line.strip().split("\t")
func_name = elements[0] + "/" + elements[1]
parts = elements[2].split()
for part in parts:
if part.startswith("request_cpus="):
sub_parts = part.split("=")
reserved_cpu_lookup_dict[func_name] = int(sub_parts[1])
# print(str(reserved_cpu_lookup_dict))
return reserved_cpu_lookup_dict
def backfill_reserved_cpu(reserved_cpu_lookup_dict):
# connect to mysql
db_connection = mysql.connect(
host=sql_host, user="metrics", passwd=metrics_mysql_password, database="metrics"
)
cursor = db_connection.cursor()
query = "use " + query_on
cursor.execute(query)
distinct_apps_list = list()
get_distinct_list_of_apps_q = (
"select distinct func_name "
"from user_app_usage_ee2_cpu "
"where reserved_cpu is null"
)
cursor.execute(get_distinct_list_of_apps_q)
for row in cursor:
distinct_apps_list.append(row[0])
# print(str(distinct_apps_list))
reserved_cpu_update_prep_cursor = db_connection.cursor(prepared=True)
reserved_cpu_update_stmt = (
"update metrics.user_app_usage_ee2_cpu set reserved_cpu = %s "
"where func_name = %s and reserved_cpu is null;"
)
unfound_counter = 0
found_counter = 0
reserved_cpu = 4
for app in distinct_apps_list:
if app in reserved_cpu_lookup_dict:
found_counter += 1
reserved_cpu = reserved_cpu_lookup_dict[app]
else:
unfound_counter += 1
input = [reserved_cpu, app]
reserved_cpu_update_prep_cursor.execute(reserved_cpu_update_stmt, input)
print("FOUND : " + str(found_counter))
print("UNFOUND : " + str(unfound_counter))
db_connection.commit()
return 1
reserved_cpu_lookup_dict = make_lookup_dict()
backfill_reserved_cpu(reserved_cpu_lookup_dict)
| 30.613333 | 88 | 0.678571 | 306 | 2,296 | 4.70915 | 0.303922 | 0.167939 | 0.117974 | 0.131159 | 0.170715 | 0.108258 | 0.055517 | 0 | 0 | 0 | 0 | 0.007905 | 0.228659 | 2,296 | 74 | 89 | 31.027027 | 0.805759 | 0.037892 | 0 | 0 | 0 | 0 | 0.145322 | 0.044959 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0.035088 | 0.035088 | 0 | 0.105263 | 0.035088 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6383bbf4486f5639d8906a1645240b576336ba7a | 7,157 | py | Python | rein_train.py | jhkim6467/input_distill | 0e49e7552fa17cf72143fcc5ff84a8b25cb02232 | [
"MIT"
] | null | null | null | rein_train.py | jhkim6467/input_distill | 0e49e7552fa17cf72143fcc5ff84a8b25cb02232 | [
"MIT"
] | null | null | null | rein_train.py | jhkim6467/input_distill | 0e49e7552fa17cf72143fcc5ff84a8b25cb02232 | [
"MIT"
] | null | null | null | from __future__ import division, unicode_literals
import argparse
import time
import math
import random
import torch.nn as nn, torch
import torch.nn.init as init
import torch.optim as optim
import os
import numpy as np
import pickle
from torch.autograd import Variable
from torch.utils.data import DataLoader
from scipy import spatial
from sklearn.feature_extraction.text import CountVectorizer
from onmt.utils.logging import init_logger
from onmt.translate.translator_new import build_translator
import onmt.inputters
import onmt.translate
import onmt
import onmt.model_builder
import onmt.modules
import onmt.opts
class BaseEncoder(nn.Module):
def __init__(self, vocab_size, emb_size, hid_size):
super(BaseEncoder, self).__init__()
self.hid_size = hid_size
self.num_lyr = 1
self.drop = nn.Dropout(0.4)
self.direction = 2 # 22
self.embed = nn.Embedding(vocab_size, emb_size, padding_idx=0, sparse=False)
self.rnn = nn.GRU(input_size=emb_size, hidden_size=hid_size, num_layers=self.num_lyr, bidirectional=True, batch_first=True, dropout=0.4)
def forward(self, inp):
x = inp.view(-1, inp.size(2))
x_emb = self.embed(x)
x_emb = self.drop(x_emb)
bt_siz, seq_len = x_emb.size(0), x_emb.size(1)
h_0 = Variable(torch.zeros(self.direction * self.num_lyr, bt_siz, self.hid_size))
if use_cuda:
h_0 = h_0.cuda()
x_o, x_hid = self.rnn(x_emb, h_0)
if self.direction == 2:
x_hids = []
for i in range(self.num_lyr):
x_hid_temp, _ = torch.max(x_hid[2 * i:2 * i + 2, :, :], 0, keepdim=True)
x_hids.append(x_hid_temp)
x_hid = torch.cat(x_hids, 0)
x_hid = x_hid[self.num_lyr - 1, :, :].unsqueeze(0)
x_hid = x_hid.transpose(0, 1)
return x_hid
class Policy_Network(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_size, batch_size, translator):
super(Policy_Network, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.embedding = BaseEncoder(vocab_size, 300, 400).cuda()
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers=1)
self.hidden2out = nn.Linear(hidden_dim, output_size)
self.softmax = nn.LogSoftmax(dim=1)
self.dropout_layer = nn.Dropout(p=0.2)
self.total_reward = 0
self.num_reward = 0
self.total_batch = 0
self.hidden = self.init_hidden(batch_size)
self.translator = translator
def init_hidden(self, batch_size):
return(torch.randn(1, batch_size, self.hidden_dim).cuda(), torch.randn(1, batch_size, self.hidden_dim).cuda())
def baseline_score(self, reward, num_reward):
return reward / num_reward
def calculate_reward(self, list, pred):
max = 0
for line in list:
cos = cos_sim(translator._translate_batch(line), pred)
if cos > max:
max = cos
return max
def forward(self, input):
input = self.embedding(input)
input = input.transpose(0, 1)
outputs, self.hidden = self.lstm(input, self.hidden)
output = self.dropout_layer(self.hidden[0][-1])
output = self.hidden2out(output)
output = self.softmax(output)
pred_index = (output.max(1)[1])
# Calculate reward & base_score
reward_list = []
base_list = []
for batch_index in range(len(pred_index)):
response_file = open("distill_files/Response.txt{}".format(pred_index[batch_index]), 'r')
predict_output = open("distill_files/train_output_{}.txt".format(pred_index[batch_index]), 'r')
reward = calculate_reward(response_file.readlines(), predict_output.readlines()[total_batch])
total_batch += 1
predict_output.close()
response_file.close()
# addtional line
reward = 1 - reward
reward_list.append(reward)
self.total_reward += reward
self.num_reward += 1
base_list.append(self.baseline_score(self.total_reward, self.num_reward))
reward_list = torch.Tensor(reward_list).cuda()
base_list = torch.Tensor(base_list).cuda()
new_output = output.transpose(0, 1) * (reward_list - base_list)
new_output = new_output.transpose(0, 1)
return new_output, output
def cos_sim(list1, list2):
return nn.functional.cosine_similarity(list1, list2)
use_cuda = torch.cuda.is_available()
torch.manual_seed(123)
np.random.seed(123)
if use_cuda:
torch.cuda.manual_seed(123)
##############################################################################################################################################
def RL_train_model(RL_model, optimizer, dataloader, num_epochs, inv_dict):
criterion = nn.NLLLoss()
if use_cuda:
criterion.cuda()
RL_model.train()
for epoch in range(num_epochs):
total_loss = 0
start_index = 0
temp_loss = 10000
for i_batch, sample_batch in enumerate(dataloader):
temp_list = rele_list[start_index : start_index + len(sample_batch)]
start_index += len(sample_batch)
RL_model.zero_grad()
RL_model.hidden = RL_model.init_hidden(len(sample_batch))
pred_rele, pred_base = RL_model(sample_batch)
pred_base = pred_base.max(1)[1]
loss = criterion(pred_rele, pred_base)
if temp_loss > loss:
loss.backward()
temp_loss = loss.item()
optimizer.step()
total_loss += loss.item()
#print("Epoch : {} / Train loss: {}".format(epoch, total_loss))
if (epoch + 1) % 10 == 0:
model_fname = './save/new_RL_model_epoch{}.pt'.format(epoch)
torch.save(RL_model.state_dict(), model_fname)
##############################################################################################################################################
def main():
with open('./distill_files/w2i', 'rb') as f:
inv_dict = pickle.load(f)
# parameter
N = 4
folder_path = "distill_files/"
f = open(folder_path + "src-train.0", 'rb')
line_list = pickle.load(f)
f.close()
new_list = []
for line in line_list:
new_list.append(Variable(torch.LongTensor([line])).cuda())
dataloader = DataLoader(new_list, 64, shuffle=False)
parser = argparse.ArgumentParser(
description='rein_train.py',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
onmt.opts.add_md_help_argument(parser)
onmt.opts.translate_opts(parser)
opt = parser.parse_args()
translator = build_translator(opt, report_score=True)
RL_model = Policy_Network(len(inv_dict), 400, 128, N, 64, translator).cuda()
optimizer = optim.SGD(RL_model.parameters(), lr=0.1, weight_decay=1e-4)
num_epochs = 300
RL_train_model(RL_model, optimizer, dataloader, num_epochs, inv_dict)
main()
| 31.808889 | 144 | 0.619254 | 943 | 7,157 | 4.447508 | 0.230117 | 0.01836 | 0.011922 | 0.007153 | 0.086314 | 0.070577 | 0.070577 | 0.043395 | 0.043395 | 0.025751 | 0 | 0.019041 | 0.236831 | 7,157 | 224 | 145 | 31.950893 | 0.74881 | 0.016627 | 0 | 0.01875 | 0 | 0 | 0.022822 | 0.013485 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.14375 | 0.01875 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6385ed5301b4cb8b06313076f50b0982e7fb2002 | 2,605 | py | Python | pyblas/level2/icamax.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | pyblas/level2/icamax.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | 1 | 2020-10-10T23:23:06.000Z | 2020-10-10T23:23:06.000Z | pyblas/level2/icamax.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | # > \brief \b ICAMAX
#
# =========== DOCUMENTATION ===========
#
# Online html documentation available at
# http://www.netlib.org/lapack/explore-html/
#
# Definition:
# ===========
#
# INTEGER FUNCTION ICAmax(N,CX,INCX)
#
# .. Scalar Arguments ..
# INTEGER INCX,N
# ..
# .. Array Arguments ..
# COMPLEX CX(*)
# ..
#
#
# > \par Purpose:
# =============
# >
# > \verbatim
# >
# > ICAMAX finds the index of the first element having maximum |Re(.)| + |Im(.)|
# > \endverbatim
#
# Arguments:
# ==========
#
# > \param[in] N
# > \verbatim
# > N is INTEGER
# > number of elements in input vector(s)
# > \endverbatim
# >
# > \param[in] CX
# > \verbatim
# > CX is COMPLEX array, dimension ( 1 + ( N - 1 )*abs( INCX ) )
# > \endverbatim
# >
# > \param[in] INCX
# > \verbatim
# > INCX is INTEGER
# > storage spacing between elements of CX
# > \endverbatim
#
# Authors:
# ========
#
# > \author Univ. of Tennessee
# > \author Univ. of California Berkeley
# > \author Univ. of Colorado Denver
# > \author NAG Ltd.
#
# > \date November 2017
#
# > \ingroup aux_blas
#
# > \par Further Details:
# =====================
# >
# > \verbatim
# >
# > jack dongarra, linpack, 3/11/78.
# > modified 3/93 to return if incx <= 0.
# > modified 12/3/93, array(1) declarations changed to array(*)
# > \endverbatim
# >
# =====================================================================
from scabs1 import scabs1
def ICAmax(N, CX, INCX):
#
# -- Reference BLAS level1 routine (version 3.8.0) --
# -- Reference BLAS is a software package provided by Univ. of Tennessee, --
# -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
# November 2017
#
# .. Scalar Arguments ..
# INTEGER INCX,N
# ..
# .. Array Arguments ..
# COMPLEX CX(*)
# ..
#
# =====================================================================
if N < 1 or INCX <= 0:
return 0
if N == 1:
return 1
ICAMAX = 1
if INCX == 1:
# code for increment equal to 1
SMAX = scabs1(CX[1])
for I in range(1, N):
if scabs1CX[I] > SMAX:
ICAMAX = I
SMAX = scabs1CX[I]
else:
# code for increment not equal to 1
IX = 1
SMAX = scabs1(CX[1])
IX += INCX
for I in range(1, N):
if scabs1CX[IX] > SMAX:
ICAMAX = I
SMAX = scabs1CX[IX]
IX += INCX
return ICAMAX
| 22.652174 | 84 | 0.471785 | 274 | 2,605 | 4.481752 | 0.408759 | 0.029316 | 0.029316 | 0.021173 | 0.179153 | 0.118893 | 0.118893 | 0.118893 | 0.081433 | 0 | 0 | 0.028668 | 0.317083 | 2,605 | 114 | 85 | 22.850877 | 0.661608 | 0.68906 | 0 | 0.347826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.043478 | 0 | 0.217391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63872c71ea983b463c94d5253e8b2aae917c2ae9 | 8,708 | py | Python | iepy/extraction/active_learning_core.py | silky/iepy | 5967d025ce47d28e1d8725c14d5b05dda36afa3c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-06-27T11:32:36.000Z | 2019-06-27T11:32:36.000Z | iepy/extraction/active_learning_core.py | silky/iepy | 5967d025ce47d28e1d8725c14d5b05dda36afa3c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | iepy/extraction/active_learning_core.py | silky/iepy | 5967d025ce47d28e1d8725c14d5b05dda36afa3c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | import logging
import random
import sys
import numpy
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import precision_recall_curve
from iepy import defaults
from iepy.extraction.relation_extraction_classifier import RelationExtractionClassifier
logger = logging.getLogger(__name__)
HIPREC = (10, 1) # Precision is 10x more important than recall
HIREC = (1, 2) # Recall is 2x more important than precision
class ActiveLearningCore:
"""
IEPY's main class. Implements an active learning information extraction
pipeline.
From the user's point of view this class is meant to be used like this::
extractor = ActiveLearningCore(relation, lbl_evidences)
extractor.start() # blocking
while UserIsNotTired and extractor.questions:
question = extractor.questions[0]
answer = ask_user(question)
extractor.add_answer(question, answer)
extractor.process()
predictions = extractor.predict() # profit
"""
#
# IEPY User API
#
def __init__(self, relation, labeled_evidences, extractor_config=None,
performance_tradeoff=None, extractor=None, classifier=None):
if extractor is None:
extractor = RelationExtractionClassifier
self.extractor = extractor
self.relation = relation
self.relation_classifier = classifier
self._setup_labeled_evidences(labeled_evidences)
self._questions = list(self.candidate_evidence)
if extractor_config is None:
extractor_config = defaults.extractor_config
self.extractor_config = extractor_config
self.tradeoff = performance_tradeoff
self.aimed_tradeoff = None
self.threshold = None
def start(self):
"""
Organizes the internal information, and prepares the first "questions" that
need to be answered.
"""
# API compliance. Nothing is done on current implementation.s
pass
@property
def questions(self):
"""Returns a list of candidate evidences that would be good to have
labels for.
Order is important: labels for evidences listed firsts are more valuable.
"""
return self._questions
def add_answer(self, evidence, answer):
"""
Not blocking.
Informs to the Core the evidence label (True or False) decided
from the outside.
"""
assert answer in (True, False)
self.labeled_evidence[evidence] = answer
for list_ in (self._questions, self.candidate_evidence): # TODO: Check performance. Should use set?
list_.remove(evidence)
# TODO: Save labeled evidence into database?
def process(self):
"""
Blocking.
With all the labeled evidences, new questions are generated, optimizing the
future gain of having those evidences labeled.
After calling this method the values returned by `questions`
and `predict` will change.
"""
yesno = set(self.labeled_evidence.values())
if len(yesno) > 2:
msg = "Evidence is not binary! Can't proceed."
logger.error(msg)
raise ValueError(msg)
if len(yesno) < 2:
logger.debug("Not enough labels to train.")
return
if self.tradeoff:
self.estimate_threshold()
self.train_relation_classifier()
self.rank_candidate_evidence()
self.choose_questions()
def predict(self):
"""
Blocking (ie, not fast).
With all the labeled evidence a classifier is trained and used for automatically
labeling all other evidences.
Returns a dict {evidence: True/False}, where the boolean label indicates if
the relation is present on that evidence or not.
"""
if not self.relation_classifier:
logger.info("There is no classifier. Can't predict")
return {}
if self.threshold is None:
labels = self.relation_classifier.predict(self.candidate_evidence)
else:
scores = self.relation_classifier.decision_function(self.candidate_evidence)
labels = scores >= self.threshold
prediction = dict(zip(self.candidate_evidence, map(bool, labels)))
prediction.update(self.labeled_evidence)
return prediction
def estimate_threshold(self):
scores, y_true = self.get_kfold_data()
if scores is None:
return
prec, rec, thres = precision_recall_curve(y_true, scores)
prec[-1] = 0.0 # To avoid choosing the last phony value
c_prec, c_rec = self.tradeoff
# Below is a linear function on precision and recall, expressed using
# numpy notation, we're maximizing it.
i = (prec * c_prec + rec * c_rec).argmax() # Index of the maximum
assert i < len(thres) # Because prec[-1] is 0.0
self.aimed_tradeoff = (prec[i], rec[i])
self.threshold = thres[i]
s = "Using {} samples, threshold aiming at precision={:.4f} and recall={:.4f}"
logger.debug(s.format(len(scores), prec[i], rec[i]))
# Instance attributes:
# questions: A list of evidence
# ranked_candidate_evidence: A dict candidate_evidence -> float
# aimed_tradeoff: A (prec, rec) tuple with the precision/recall tradeoff
# that the threshold aims to achieve.
#
# Private methods
#
def _setup_labeled_evidences(self, labeled_evidences):
self.candidate_evidence = []
self.labeled_evidence = {}
for e, lbl in labeled_evidences.items():
if lbl is None:
self.candidate_evidence.append(e)
else:
self.labeled_evidence[e] = bool(lbl)
if not self.candidate_evidence:
raise ValueError("Cannot start core without candidate evidence")
logger.info("Loaded {} candidate evidence and {} labeled evidence".format(
len(self.candidate_evidence), len(self.labeled_evidence)))
def train_relation_classifier(self):
X = []
y = []
for evidence, score in self.labeled_evidence.items():
X.append(evidence)
y.append(int(score))
assert y[-1] in (True, False)
self.relation_classifier = self.extractor(**self.extractor_config)
self.relation_classifier.fit(X, y)
def rank_candidate_evidence(self):
if not self.candidate_evidence:
self.ranked_candidate_evidence = {}
logger.info("No evidence left to rank.")
return
N = min(10 * len(self.labeled_evidence), len(self.candidate_evidence))
logger.info("Ranking a sample of {} candidate evidence".format(N))
sample = random.sample(self.candidate_evidence, N)
ranks = self.relation_classifier.decision_function(sample)
self.ranked_candidate_evidence = dict(zip(self.candidate_evidence, ranks))
ranks = [abs(x) for x in ranks]
logger.info("Ranking completed, lowest absolute rank={}, "
"highest absolute rank={}".format(min(ranks), max(ranks)))
def choose_questions(self):
# Criteria: Answer first candidates with decision function near 0
# because they are the most uncertain for the classifier.
self._questions = sorted(self.ranked_candidate_evidence,
key=lambda x: abs(self.ranked_candidate_evidence[x]))
def get_kfold_data(self):
"""
Perform k-fold cross validation and return (scores, y_true) where
scores is a numpy array with decision function scores and y_true
is a numpy array with the true label for that evidence.
"""
allX = []
ally = []
for evidence, score in self.labeled_evidence.items():
allX.append(evidence)
ally.append(int(score))
assert ally[-1] in (True, False)
allX = numpy.array(allX)
ally = numpy.array(ally)
if numpy.bincount(ally).min() < 5:
return None, None # Too little data to do 5-fold cross validation
logger.debug("Performing 5-fold cross validation")
scores = []
y_true = []
for train_index, test_index in StratifiedKFold(ally, 5):
X = allX[train_index]
y = ally[train_index]
c = self.extractor(**self.extractor_config)
c.fit(X, y)
y_true.append(ally[test_index])
scores.append(c.decision_function(allX[test_index]))
return numpy.hstack(scores), numpy.hstack(y_true)
| 38.875 | 108 | 0.636082 | 1,025 | 8,708 | 5.282927 | 0.272195 | 0.075346 | 0.050416 | 0.019945 | 0.06759 | 0.015512 | 0.015512 | 0.015512 | 0 | 0 | 0 | 0.004473 | 0.281121 | 8,708 | 223 | 109 | 39.049327 | 0.860543 | 0.271704 | 0 | 0.066667 | 0 | 0 | 0.07283 | 0 | 0 | 0 | 0 | 0.004484 | 0.02963 | 1 | 0.088889 | false | 0.007407 | 0.059259 | 0 | 0.214815 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6389f2d088c36850ddd534900c1ca48ec6be6f71 | 3,211 | py | Python | core/src/autogluon/core/utils/mo_hbo_utils.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 4,462 | 2019-12-09T17:41:07.000Z | 2022-03-31T22:00:41.000Z | core/src/autogluon/core/utils/mo_hbo_utils.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 1,408 | 2019-12-09T17:48:59.000Z | 2022-03-31T20:24:12.000Z | core/src/autogluon/core/utils/mo_hbo_utils.py | zhiqiangdon/autogluon | 71ee7ef0f05d8f0aad112d8c1719174aa33194d9 | [
"Apache-2.0"
] | 623 | 2019-12-10T02:04:18.000Z | 2022-03-20T17:11:01.000Z | """ Methods helpful for multi-objective HPO.
"""
import numpy as np
def retrieve_pareto_front(training_history, objectives):
"""Retrieves the pareto efficient points discovered during a
scheduler's search process.
Parameters
----------
training_history:
A training history dictionary generated by the scheduler's
search process.
objectives : dict
Dictionary with the names of objectives of interest. The corresponding
values are allowed to be either "MAX" or "MIN" and indicate if an
objective is to be maximized or minimized.
Returns
----------
front: list
A list containing the Pareto efficient points among all points
that were found during the search process.
"""
vals = []
refs = []
for task_id, task_res in training_history.items():
for step, res_dict in enumerate(task_res):
vals.append([res_dict[k] for k in objectives])
refs.append((task_id, step + 1))
vals = np.array(vals)
# We determine the Pareto front assuming pure minimization we adapt
# the signs accordingly
sign_vector = prepare_sign_vector(objectives)
a_vals = vals * (-sign_vector)
eff_mask = np.ones(vals.shape[0], dtype=bool)
for i, c in enumerate(a_vals):
if eff_mask[i]:
eff_mask[eff_mask] = np.any(a_vals[eff_mask] <= c, axis=1)
indices = [i for i, b in enumerate(eff_mask) if b]
front = []
for e in indices:
r = {"task_id-ressource": refs[e]}
for i, o in enumerate(objectives):
r[o] = vals[e][i]
front.append(r)
return front
def prepare_sign_vector(objectives):
"""Generates a numpy vector which can be used to flip the signs of the
objectives values which are intended to be minimized.
Parameters
----------
objectives: dict
The dictionary keys name the objectives of interest. The associated
values can be either "MIN" or "MAX" and indicate if an objective is
to be minimized or maximized.
Returns
----------
sign_vector: np.array
A numpy array containing 1 for objectives to be maximized and -1 for
objectives to be minimized.
"""
converter = {
"MIN": -1.0,
"MAX": 1.0
}
try:
sign_vector = np.array([converter[objectives[k]] for k in objectives])
except KeyError:
raise ValueError("Error, in conversion of objective dict. Allowed \
values are 'MIN' and 'MAX'")
return sign_vector
def uniform_from_unit_simplex(dim):
"""Samples a point uniformly at random from the unit simplex using the
Kraemer Algorithm. The algorithm is described here:
https://www.cs.cmu.edu/~nasmith/papers/smith+tromble.tr04.pdf
Parameters
----------
dim: int
Dimension of the unit simplex to sample from.
Returns:
sample: np.array
A point sampled uniformly from the unit simplex.
"""
uni = np.random.uniform(size=(dim))
uni = np.sort(uni)
sample = np.diff(uni, prepend=0) / uni[-1]
assert sum(sample) - 1 < 1e-6, "Error in weight sampling routine."
return np.array(sample)
| 31.174757 | 78 | 0.63625 | 438 | 3,211 | 4.589041 | 0.372146 | 0.034826 | 0.019403 | 0.023881 | 0.064677 | 0.029851 | 0.029851 | 0.029851 | 0 | 0 | 0 | 0.006809 | 0.268141 | 3,211 | 102 | 79 | 31.480392 | 0.848511 | 0.476487 | 0 | 0 | 0 | 0 | 0.041723 | 0 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.075 | false | 0 | 0.025 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
638a0baf11ca5164a0488e66a941c341fef23f5f | 1,191 | py | Python | twinpy/interfaces/phonopy.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | null | null | null | twinpy/interfaces/phonopy.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | 5 | 2021-01-19T13:08:28.000Z | 2021-02-20T12:03:59.000Z | twinpy/interfaces/phonopy.py | kei0822kei/twinpy | 14b47df1fa5b57a54f57d5c2120ed3fe9502a9bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Interfaces for Phonopy.
"""
from phonopy.structure.atoms import PhonopyAtoms
def get_phonopy_structure(cell:tuple) -> PhonopyAtoms:
"""
Return phonopy structure.
Args:
cell: (lattice, scaled_positions, symbols).
Returns:
PhonopyAtoms: Phonopy structure.
"""
ph_structure = PhonopyAtoms(cell=cell[0],
scaled_positions=cell[1],
symbols=cell[2])
return ph_structure
def get_cell_from_phonopy_structure(ph_structure:PhonopyAtoms,
use_atomic_number:bool=False) -> tuple:
"""
Get cell from phonopy structure
Args:
ph_structure: PhonopyAtoms object
use_atomic_number: if True, use atomic number intead of atomic symbol
Returns:
tuple: (lattice, scaled_positions, symbols).
"""
lattice = ph_structure.get_cell()
scaled_positions = ph_structure.get_scaled_positions()
if use_atomic_number:
elements = list(ph_structure.get_atomic_numbers())
else:
elements = ph_structure.get_chemical_symbols()
return (lattice, scaled_positions, elements)
| 25.891304 | 77 | 0.645676 | 127 | 1,191 | 5.80315 | 0.338583 | 0.119403 | 0.081411 | 0.078697 | 0.157395 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003448 | 0.269521 | 1,191 | 45 | 78 | 26.466667 | 0.843678 | 0.320739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6394f2f9f555e0a8608e94bc903806f7f599f382 | 547 | py | Python | consultas/consultas/report/reporte_ars/reporte_ars.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/report/reporte_ars/reporte_ars.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | consultas/consultas/report/reporte_ars/reporte_ars.py | Lewinta/Consultas | e01ad870a2bad0eb5938d8800e3e2934402fce62 | [
"MIT"
] | null | null | null | # Copyright (c) 2013, Lewin Villar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
fields = ["paciente","ars_nombre","diferencia","reclamado","autorizado","medico"]
filters = {"name":"CLS-0000001795"}
result = frappe.get_list("Consulta Seguro",fields,filters)
columns, data = [" ","Paciente","ARS Nombre","Diferencia","Monto Reclamado","Monto Autorizado","Medico"], []
data.extend(result or [])
frappe.msgprint(data)
return columns, data | 36.466667 | 109 | 0.740402 | 66 | 547 | 6.030303 | 0.69697 | 0.055276 | 0.085427 | 0.135678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02863 | 0.106033 | 547 | 15 | 110 | 36.466667 | 0.785276 | 0.177331 | 0 | 0 | 0 | 0 | 0.339286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63962ee9e6bd04454a63c550017a084b6549e70a | 2,014 | py | Python | features/list.py | magnusstubman/mal00 | e99cad410657bf0452cae02d81d89c211732a789 | [
"MIT"
] | 3 | 2021-03-29T13:28:31.000Z | 2021-10-12T09:33:14.000Z | features/list.py | magnusstubman/mal00 | e99cad410657bf0452cae02d81d89c211732a789 | [
"MIT"
] | null | null | null | features/list.py | magnusstubman/mal00 | e99cad410657bf0452cae02d81d89c211732a789 | [
"MIT"
] | 1 | 2021-04-05T10:19:34.000Z | 2021-04-05T10:19:34.000Z | from features import addFeature
from lib import implants
from lib import globals
import tableprint
import timeago
import sys
import time
global lastImplantCount
lastImplantCount = 0
def delLast(num):
return
num += 4
ERASE_LINE = '\x1b[2K'
CURSOR_UP_ONE = '\x1b[1A'
CURSOR_DOWN_ONE = '\x1b[1B'
print((CURSOR_UP_ONE + ERASE_LINE) * num, end='', flush=True)
#print((CURSOR_UP_ONE + ERASE_LINE) * num, end='', flush=False)
#print((CURSOR_UP_ONE ) * num, end='', flush=True)
#print((CURSOR_DOWN_ONE ) * num, end='', flush=True)
#sys.stdout.flush()
time.sleep(2)
class ListCommand:
command = 'list'
def run(arguments, inf):
if not arguments or (arguments and len(arguments) == 0):
if len(implants.implants) > 0:
global lastImplantCount
if globals.lastCommand == ListCommand.command:
delLast(lastImplantCount)
#headers = ['last seen', 'name', 'external', 'internal', 'user' 'host', 'os']
#headers = ['name', 'last seen', 'external', 'computer', 'user', 'domain', 'admin', 'ip']
headers = ['name', 'last seen', 'external']
data = []
for i in implants.implants:
#entry = [timeago.format(i.lastSeen), i.name, i.host]
#entry = [i.name, timeago.format(i.lastSeen), i.host]
#entry = [i.name, timeago.format(i.lastSeen), i.host, i.computerName, i.userName, str(i.domain), str(i.highIntegrity), i.ip]
entry = [i.name, timeago.format(i.lastSeen), i.host]
data.append(entry)
tableprint.table(data, headers, width=16)
print('', end='', flush=True)
lastImplantCount = len(implants.implants)
else:
print(str(len(implants.implants)) + ' implants', flush=True)
else:
if implants.implants:
implants.implants.clear()
implants.currentImplant = None
print('table cleared.')
def help():
return 'lists implants. "list clear" clear the current table'
addFeature(ListCommand)
| 30.515152 | 134 | 0.626614 | 247 | 2,014 | 5.048583 | 0.352227 | 0.102646 | 0.035285 | 0.070569 | 0.253408 | 0.17482 | 0.150762 | 0.150762 | 0.150762 | 0.063352 | 0 | 0.008366 | 0.228401 | 2,014 | 65 | 135 | 30.984615 | 0.79408 | 0.283515 | 0 | 0.093023 | 0 | 0 | 0.084379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069767 | false | 0 | 0.162791 | 0.023256 | 0.325581 | 0.139535 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
639784db5f7b3e514273818d74c8edc0b31fcc00 | 2,883 | py | Python | cst_toolbox/misc.py | danielberenberg/system-triangle-citizen | 9b3aef8ce61c798119456f7c0febbabd15d4b054 | [
"AFL-1.1"
] | null | null | null | cst_toolbox/misc.py | danielberenberg/system-triangle-citizen | 9b3aef8ce61c798119456f7c0febbabd15d4b054 | [
"AFL-1.1"
] | null | null | null | cst_toolbox/misc.py | danielberenberg/system-triangle-citizen | 9b3aef8ce61c798119456f7c0febbabd15d4b054 | [
"AFL-1.1"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import secrets
from pathlib import Path
class text_color:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
LT_RED = '\033[91m'
# special
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def exists(f):
"""Existing path 'type'"""
f = Path(f)
if not f.exists():
raise FileNotFoundError(f"{f} doesn't exist")
return f
def nat(x):
"""Natural number > 0 'type'"""
x = int(x)
if x <= 0:
raise TypeError(f"Expected natural number, not {x}")
return x
class MockClient(object):
def __init__(self, cluster, **client_params):
self.cluster = cluster
def submit(self, func, *args, **kwargs):
return func(*args)
def gather(self, futures):
return futures
class MockCluster(object):
def __init__(self, **cluster_params):
for k, v in cluster_params.items():
self.__setattr__(k, v)
def scale(self, val):
self.absolutely_critical_value = val
class WorkingDirectory(object):
"""Context manager for setting up intermediate disk usage
with the option to clean up afterwards."""
def __init__(self, name=None, parent=None, path=None, cleanup=True, **setup_params):
parent = Path("/tmp" or parent)
name = secrets.token_hex(16) or str(name)
if path is not None:
self.__dirname = Path(path)
else:
self.__dirname = Path(parent / name)
self.cleanup = cleanup
self.__params = setup_params
self.set_defaults(exist_ok=True, parents=True)
@property
def setup_params(self):
return self.__params
def set_defaults(self, **defaults):
for k, v in defaults.items():
self.__params.setdefault(k, v)
@property
def dirname(self):
return self.__dirname
def setup(self, exist_ok=True, parents=True):
self.dirname.mkdir(exist_ok=exist_ok, parents=parents)
return self
def run_cleanup(self):
# TODO: add exclude option?
clean = False
if self.cleanup:
shutil.rmtree(self.dirname)
clean = True
return clean
def __enter__(self):
self.setup(**self.setup_params)
return self
def __exit__(self, type, value, traceback):
self.run_cleanup()
return self
def __str__(self):
return self.dirname.__str__()
def __repr__(self):
return f"WorkingDirectory({self.dirname}, cleanup={self.cleanup})"
if __name__ == '__main__':
pass
| 23.826446 | 88 | 0.58897 | 359 | 2,883 | 4.518106 | 0.4039 | 0.047472 | 0.020345 | 0.020962 | 0.05672 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042295 | 0.286507 | 2,883 | 120 | 89 | 24.025 | 0.746232 | 0.075616 | 0 | 0.058824 | 0 | 0 | 0.094661 | 0.020825 | 0 | 0 | 0 | 0.008333 | 0 | 1 | 0.2 | false | 0.011765 | 0.035294 | 0.070588 | 0.623529 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6398388ca17e95891e06f879a77203f8c6d615b1 | 226 | py | Python | tests/xtests/view_base/apps.py | Linuxbody/Xadmin | 09f588dda215c9e872d7c007622c1a8d32956265 | [
"BSD-3-Clause"
] | null | null | null | tests/xtests/view_base/apps.py | Linuxbody/Xadmin | 09f588dda215c9e872d7c007622c1a8d32956265 | [
"BSD-3-Clause"
] | 1 | 2021-02-08T20:51:30.000Z | 2021-02-08T20:51:30.000Z | tests/xtests/view_base/apps.py | Linuxbody/Xadmin | 09f588dda215c9e872d7c007622c1a8d32956265 | [
"BSD-3-Clause"
] | 1 | 2017-08-23T06:49:46.000Z | 2017-08-23T06:49:46.000Z | #!/usr/bin/env python
#coding:utf-8
import sys
if sys.getdefaultencoding()=='ascii':
reload(sys)
sys.setdefaultencoding('utf-8')
from django.apps import AppConfig
class ViewBaseApp(AppConfig):
name = "view_base"
| 18.833333 | 37 | 0.721239 | 30 | 226 | 5.4 | 0.766667 | 0.049383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010309 | 0.141593 | 226 | 11 | 38 | 20.545455 | 0.824742 | 0.141593 | 0 | 0 | 0 | 0 | 0.098958 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63992d2753f85baec5addc65ebdd06abc6f26af5 | 2,569 | py | Python | equidistantpoints/cli.py | ksbg/edpoints | f0534ad7deb6ec50992dc382bf3395e7e494aace | [
"MIT"
] | 5 | 2019-04-01T12:15:35.000Z | 2021-11-23T21:56:14.000Z | equidistantpoints/cli.py | ksbg/edpoints | f0534ad7deb6ec50992dc382bf3395e7e494aace | [
"MIT"
] | null | null | null | equidistantpoints/cli.py | ksbg/edpoints | f0534ad7deb6ec50992dc382bf3395e7e494aace | [
"MIT"
] | 1 | 2019-03-23T08:43:03.000Z | 2019-03-23T08:43:03.000Z | """Command-line usage of the package"""
from argparse import ArgumentParser
from . import EquidistantPoints
def parse_args():
"""Command-line arguments parsing"""
parser = ArgumentParser()
parser.add_argument(
'n_points',
help='Number of points to be generated',
metavar='N',
type=int
)
parser.add_argument(
'-f', '--file-name',
help='Path to a file for the result to be stored.',
type=str
)
parser.add_argument(
'-r', '--equatorial-radius',
type=float,
help='Specify a custom equatorial radius (default: WGS-84 standard)',
default=6378137.0)
parser.add_argument(
'-p', '--polar-radius',
type=float,
help='Specify a custom polar radius (default: WGS-84 standard)',
default=6356752.3)
type_group = parser.add_mutually_exclusive_group()
type_group.add_argument(
'-g', '--geojson',
action='store_true',
help='Indicates that the output should be stored in GeoJSON format (default: CSV)')
type_group.add_argument(
'-c', '--cartesian',
action='store_true',
help='Indicates that the coordinates to be stored should be in cartesian format '
'(default: geodetic)',
)
type_group.add_argument(
'-e', '--ecef',
action='store_true',
help='Indicates that the coordinates to be stored should be in ECEF format '
'(default: geodetic)',
)
args = parser.parse_args().__dict__
if args['geojson'] and not args['file_name']:
parser.error('If `-g`/`--geojson` is given, `-f`/`--file-name` must also be specified.')
return args
def cli_generate_points():
"""Command-line entry function"""
args = parse_args()
ed_points = EquidistantPoints(n_points=args['n_points'],
equatorial_radius=args['equatorial_radius'],
polar_radius=args['polar_radius'])
if args['file_name']:
if args['geojson']:
ed_points.write_geodetic_to_geojson(file_path=args['file_name'])
elif args['cartesian']:
ed_points.write_cartesian_to_csv(file_path=args['file_name'])
elif args['ecef']:
ed_points.write_ecef_to_csv(file_path=args['file_name'])
else:
ed_points.write_geodetic_to_csv(file_path=args['file_name'])
else:
print(ed_points.cartesian if args['cartesian'] else ed_points.ecef
if args['ecef'] else ed_points.geodetic)
| 32.518987 | 96 | 0.609965 | 311 | 2,569 | 4.836013 | 0.305466 | 0.042553 | 0.047872 | 0.042553 | 0.307181 | 0.277926 | 0.234043 | 0.12633 | 0.087766 | 0.087766 | 0 | 0.010604 | 0.265862 | 2,569 | 78 | 97 | 32.935897 | 0.78685 | 0.035812 | 0 | 0.253968 | 0 | 0.015873 | 0.313694 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031746 | false | 0 | 0.031746 | 0 | 0.079365 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
639a947ce1dccf8c0d1cdff0245bd69ac98bd8c8 | 346 | py | Python | py/kgc/csv_numbered_fields.py | bcgov/diutils | caf510c81f7f43372d4a8e18f77eaa86cdede6a5 | [
"Apache-2.0"
] | 3 | 2020-02-13T21:34:28.000Z | 2021-01-11T17:24:56.000Z | py/kgc/csv_numbered_fields.py | bcgov/diputils | 6a491e8f1dd9330b58083953a274544b6c0f5bea | [
"Apache-2.0"
] | 14 | 2020-04-17T19:33:50.000Z | 2021-12-12T22:03:43.000Z | py/kgc/csv_numbered_fields.py | bcgov/diutils | caf510c81f7f43372d4a8e18f77eaa86cdede6a5 | [
"Apache-2.0"
] | 1 | 2021-11-19T01:18:41.000Z | 2021-11-19T01:18:41.000Z | # write new version of csv with numbered (not named) fields
import os
import sys
args = sys.argv
lines = open(args[1]).readlines()
lines = [line.strip() for line in lines]
hdr = lines[0].split(',')
hdr = ','.join([str(i) for i in range(len(hdr))])
lines[0] = hdr
open(args[1] + "_numbered_fields.csv", "wb").write(('\n'.join(lines)).encode())
| 24.714286 | 79 | 0.656069 | 58 | 346 | 3.87931 | 0.586207 | 0.071111 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013423 | 0.138728 | 346 | 13 | 80 | 26.615385 | 0.741611 | 0.16474 | 0 | 0 | 0 | 0 | 0.090592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
639c1fc6dce0c0de8ecc27f3959b4ad5d2555fed | 2,815 | py | Python | molecool/visualize.py | janash/sample_python_package | 367a1f446671aca81b789077adfb771385786d48 | [
"BSD-3-Clause"
] | 1 | 2020-05-01T18:43:56.000Z | 2020-05-01T18:43:56.000Z | molecool/visualize.py | janash/sample_python_package | 367a1f446671aca81b789077adfb771385786d48 | [
"BSD-3-Clause"
] | null | null | null | molecool/visualize.py | janash/sample_python_package | 367a1f446671aca81b789077adfb771385786d48 | [
"BSD-3-Clause"
] | null | null | null | """
Functions for visualization of molecules
"""
import numpy as np
import matplotlib.pyplot as plt
from .atom_data import atom_colors
def draw_molecule(coordinates, symbols, draw_bonds=None, save_location=None, dpi=300):
"""Draw a picture of a molecule.
Parameters
----------
coordinates : np.ndarray
The coordinates of the molecule.
symbols : list
The element of each atom in the molecule.
draw_bonds : dict, (optional)
Bonds to draw. Bonds should be indicated in a dictionary where the indices
of bonded atoms are given as the keys of the dictionary. The default is None -
no bonds are drawn.
save_location : str, (optional)
The location to save the image
dpi : int, (optional)
The resolution of the saved image
Returns
-------
ax : matplotlib axis
The axis of the plot.
"""
# Create figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Get colors - based on atom name
colors = []
for atom in symbols:
colors.append(atom_colors[atom])
size = np.array(plt.rcParams['lines.markersize'] ** 2)*200/(len(coordinates))
ax.scatter(coordinates[:,0], coordinates[:,1], coordinates[:,2], marker="o",
edgecolors='k', facecolors=colors, alpha=1, s=size)
# Draw bonds
if draw_bonds:
for atoms, bond_length in draw_bonds.items():
atom1 = atoms[0]
atom2 = atoms[1]
ax.plot(coordinates[[atom1,atom2], 0], coordinates[[atom1,atom2], 1],
coordinates[[atom1,atom2], 2], color='k')
plt.axis('square')
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi, graph_min=0, graph_max=2)
return ax
def bond_histogram(bond_list, save_location=None, dpi=300, graph_min=0, graph_max=2):
"""Draw a histogram of bonds lengths in a molecule.
Parameters
---------
bond_list : dict
Bonds to draw. Bonds should be indicated in a dictionary where the indices
of bonded atoms are given as the keys of the dictionary. The default is None -
no bonds are drawn.
save_location : str, (optional)
The location to save the image
dpi : int, (optional)
The resolution of the saved image
"""
lengths = []
for atoms, bond_length in bond_list.items():
lengths.append(bond_length)
bins = np.linspace(graph_min, graph_max)
fig = plt.figure()
ax = fig.add_subplot(111)
plt.xlabel('Bond Length (angstrom)')
plt.ylabel('Number of Bonds')
ax.hist(lengths, bins=bins)
# Save figure
if save_location:
plt.savefig(save_location, dpi=dpi)
return ax | 28.72449 | 86 | 0.617052 | 372 | 2,815 | 4.586022 | 0.311828 | 0.056272 | 0.036928 | 0.022274 | 0.432591 | 0.383353 | 0.362251 | 0.362251 | 0.327081 | 0.327081 | 0 | 0.018821 | 0.282771 | 2,815 | 98 | 87 | 28.72449 | 0.826152 | 0.393606 | 0 | 0.171429 | 0 | 0 | 0.040947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.085714 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
639d17d627a197d64b7fb4116f0b3b64aa07ae6b | 2,185 | py | Python | interesting_info.py | jicarretero/fiware-users | cedb87550fc3a1e95b0087e3174160301fe3b80d | [
"Apache-2.0"
] | null | null | null | interesting_info.py | jicarretero/fiware-users | cedb87550fc3a1e95b0087e3174160301fe3b80d | [
"Apache-2.0"
] | null | null | null | interesting_info.py | jicarretero/fiware-users | cedb87550fc3a1e95b0087e3174160301fe3b80d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##
# Copyright 2017 FIWARE Foundation, e.V.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
##
__author__ = "José Ignacio Carretero Guarde"
import json
import sys
from functions import *
from OpenstackQueries import OpenstackQueries
from collections import Counter
if __name__ == "__main__":
q = OpenstackQueries('fiware-users.ini')
token = q.token
sys.stderr.write(q.token + "\n")
d={}
endpoint_groups = q.get_all_endpoint_groups(token=token)
d['endpoint_groups'] = endpoint_groups
project_list = q.get_all_projects(token=token)
d['projects']=project_list
user_list = q.get_all_users(token=token)
d['users']=user_list
role_list = q.get_role_list(token=token)
d['roles']=role_list
role_assignment_list = q.get_role_assignment_list(token=token)
d['role_assignments']=role_assignment_list
q.get_all_endpoint_groups_projects(token, endpoint_groups)
servers = q.get_all_servers(token)
d['servers'] = servers
routers = q.get_all_routers(token)
d['routers'] = routers
networks = q.get_all_networks(token)
d['networks'] = networks
images = q.get_all_images(token)
d['images'] = images
ports = q.get_all_ports(token)
d['ports'] = ports
## Print how many users there are for every role
role_assingments_count = Counter([k['role_id'] for k in d['role_assignments'] if k.get('role_id')])
d['sum_up'] = {'users': len(d['users']), 'projects': len(d['projects']),
'role_assignments_count': role_assingments_count}
sys.stdout.write(json.dumps(d))
sys.stdout.flush()
| 28.012821 | 103 | 0.70389 | 313 | 2,185 | 4.71246 | 0.41853 | 0.029831 | 0.042712 | 0.022373 | 0.055593 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005034 | 0.181693 | 2,185 | 77 | 104 | 28.376623 | 0.819911 | 0.309382 | 0 | 0 | 0 | 0 | 0.148621 | 0.014795 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.135135 | 0 | 0.135135 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
639f02b61ea216bdda84b7ae424cd5d8a516bacb | 1,955 | py | Python | stats/management/commands/import_historical_daily_compute_ligo.py | ARCCA/cogs3 | bcfe4397e1c91607e231f97df7d05b924e6598da | [
"MIT"
] | 1 | 2020-03-28T23:55:02.000Z | 2020-03-28T23:55:02.000Z | stats/management/commands/import_historical_daily_compute_ligo.py | ARCCA/cogs3 | bcfe4397e1c91607e231f97df7d05b924e6598da | [
"MIT"
] | 60 | 2018-04-16T13:40:23.000Z | 2020-06-05T18:02:01.000Z | stats/management/commands/import_historical_daily_compute_ligo.py | ARCCA/cogs3 | bcfe4397e1c91607e231f97df7d05b924e6598da | [
"MIT"
] | 10 | 2018-03-14T22:25:50.000Z | 2020-01-09T21:32:22.000Z | import os
from datetime import datetime
from django.core.management.base import BaseCommand
from . import find_date_range_of_ligo_file
class Command(BaseCommand):
help = 'Import historical daily compute stats for LIGO from bz2 files.'
def add_arguments(self, parser):
parser.add_argument(
'--input_dir',
required=True,
help='Path to bz2 files to import',
type=str,
)
def parse_file(self, filepath):
try:
# Call daily compute ligo import script
os.system(f"python3 manage.py import_daily_compute_ligo --file {filepath}")
self.stdout.write(self.style.SUCCESS(f'Finished processing {filepath}'))
except Exception as e:
self.stderr.write(self.style.ERROR(e))
def handle(self, *args, **options):
try:
input_dir = options['input_dir']
# Validate path
if os.path.exists(input_dir) is False:
raise Exception(f'{input_dir} does not exist.')
# Process bz2 files
for filename in os.listdir(input_dir):
if filename.endswith('.bz2'):
try:
filepath = os.path.join(input_dir, filename)
self.stdout.write(self.style.SUCCESS(f'Processing {filepath}'))
# Extract file
os.system(f'bzip2 -d {filepath}')
# Process extracted file
filepath = filepath[:-4] # Remove .bz2 extension
self.parse_file(filepath)
except Exception as e:
self.stderr.write(self.style.ERROR(e))
except Exception as e:
self.stderr.write(self.style.ERROR(e))
self.stdout.write(self.style.SUCCESS('Finished processing bz2 files.'))
| 34.298246 | 88 | 0.547315 | 214 | 1,955 | 4.911215 | 0.401869 | 0.053283 | 0.079924 | 0.054234 | 0.242626 | 0.242626 | 0.21313 | 0.152236 | 0.152236 | 0.152236 | 0 | 0.007229 | 0.363171 | 1,955 | 56 | 89 | 34.910714 | 0.836948 | 0.064962 | 0 | 0.243243 | 0 | 0 | 0.170538 | 0.014164 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.189189 | 0 | 0.324324 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a0c556655cbddce507c8203d32acd238d0abbf | 11,486 | py | Python | Meters/IEC/crm_getter.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Meters/IEC/crm_getter.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Meters/IEC/crm_getter.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | import json
try:
from .Helpers.create_logger import create_logger
except ImportError:
from Helpers.create_logger import create_logger
try:
from .emhmeter import *
except ModuleNotFoundError:
from emhmeter import *
from time import sleep
import datetime
class CRMtoRedis:
url = "http://10.11.30.97:5000/api/MeteringPointWithSchedule"
generic_log = "/var/log/eg/crm_to_redis.log"
file_cache = "/tmp/_crm_to_redis_cache_1wed.txt"
def __init__(self, llevel):
self.logger = create_logger(loglevel=llevel, instance_name="CRMtoRedis", log_filename=self.generic_log)
def transform_meter(self, meter):
"""
Transforms data to the format expected by emhmeter.py
More or less
API:
{
"installedCommunicationModule":"20001070",
"customer":"Plan_E GmbH",
"dgo":"E.DIS AG",
"ip":"10.124.2.34",
"meteringPointGuid":"9337106a-9949-e911-810f-00155d15ce15",
"meteringPointLabel":"DE00100016831S0000000000001057725",
"operator":"Plan_E GmbH",
"installedRouter":"NONE",
"shortName":"PAR-1900226-03 Rheinsberg",
"installedSim":"8934075100252491857",
"installedMeter":{
"name":"10001802",
"type":"MCS301-CW31B-2EMIS-024100",
"manufacturer":"Metcom"
},
"transformerFactors":{
"current":150,
"voltage":5
},
"schedule":{
"p01":"24 Hours",
"p200":"24 Hours",
"p211":"24 Hours",
"table1":"24 Hours",
"table2":"24 Hours",
"table3":"24 Hours",
"table4":"24 Hours",
"time":"24 Hours"
}
}
Expected:
{
"meterNumber": "06205102",
"Manufacturer": "EMH",
"ip": "10.124.2.111",
"voltageRatio": 200,
"currentRatio": 10,
"totalFactor": 210
"schedule":{
"p01":"3600",
"p200":"3600",
"p211":"3600",
"table1":"3600",
"table2":"3600",
"table3":"3600",
"table4":"3600",
"time":"3600"
}
}
"""
try:
location = meter["meteringPointLabel"]
ip = meter["ip"]
name = meter["installedMeter"]["name"]
manufacturer = meter["installedMeter"]["manufacturer"]
transform_curent = meter["transformerFactors"]["current"]
transform_voltage = meter["transformerFactors"]["voltage"]
schedule = meter["schedule"]
except KeyError:
self.logger.error(f"Some key is missing in {meter}")
return None
result = dict()
result["meterNumber"] = name
result["location"] = location
result["Manufacturer"] = manufacturer
result["ip"] = ip
result["voltageRatio"] = transform_voltage
result["currentRatio"] = transform_curent
result["totalFactor"] = str(int(transform_voltage) * int(transform_voltage))
result["schedule"] = schedule
return result
@staticmethod
def get_cache():
with open(CRMtoRedis.file_cache, 'r') as f:
return f.read()
@staticmethod
def update_cahce(value):
with open(CRMtoRedis.file_cache, 'w') as f:
f.write(value)
return
def get_crm_data(self):
"""
Parses data returned by API
Transforms data to the format expected by emhmeter.py
"""
try:
results = requests.get(self.url, timeout=10).json()
self.update_cahce(json.dumps(results))
except Exception as e:
self.logger.error(f"{e} error when getting data from {self.url}")
self.logger.warning(f"Reading cached data")
results = json.loads(self.get_cache())
meter_list = []
for meter in results:
self.logger.debug(f"Found meter {meter}")
new_meter = self.transform_meter(meter)
if new_meter:
self.logger.debug(f"Transform to {new_meter}")
meter_list.append(new_meter)
return json.dumps(meter_list)
def push_to_redis(self, data):
try:
r = Redis(charset="utf-8", decode_responses=True)
r.set("crm_response", data)
self.logger.info(f"Pushed data to redis")
except Exception as e:
self.logger.error(f"{e} error when connecting to Redis")
raise e
def run(self):
d = self.get_crm_data()
self.push_to_redis(d)
class RedistoJob:
log_dir = "/var/log/eg"
job_functions = {
"p01": rq_create_p01_jobs,
"p200": rq_create_logbook_jobs,
"p211": None, # Collected with p200, additional job not required
"table1": rq_create_table1_jobs,
"table2": None,
"table3": None,
"table4": rq_create_table4_jobs,
"time": rq_create_time_jobs
}
not_implemented = ["p211", "table2", "table3"]
def __init__(self, llevel, istest=False):
self.logger = create_logger(loglevel=llevel,
instance_name="RedisToJob",
log_filename=CRMtoRedis.generic_log
)
self.test = istest
try:
self.redis_conn = Redis(charset="utf-8", decode_responses=True)
except Exception as e:
self.logger.error(f"{e} error when connecting to Redis")
raise e
def get_from_redis(self):
"""
Tries to get data from redis by "crm_response" key
Converts JSON string to python object
:returns [{meter obj}, {meter obj}, {meter obj}]
"""
data = json.loads(self.redis_conn.get("crm_response"))
if not data:
self.logger.error("Empty response from Redis")
raise ValueError
self.logger.debug(data)
return data
def create_jobs(self, meter):
"""
Create redis jobs according to schedule in meter object
"""
self.logger.debug(f"Meter {meter['meterNumber']}, jobs {meter['schedule']}")
for job_type in meter["schedule"].keys():
self.check_for_job(meter, job_type)
def check_for_job(self, meter, job_type):
"""
Check if a jobs can be executed - the time in schedule has passed
:param meter - meter dict
:param job_type - string from schedule
"""
push = False
meter_number = meter["meterNumber"]
if job_type == "table1":
if not self.requeue_jobs(job_type, meter_number):
# If the job was not requeued and it's not running
if datetime.datetime.now().day == 1:
# Table1 should only be pushed at 1st day of month
push = True
else:
self.logger.debug(f"Meter {meter_number} skipping table1, not 1st day of month")
# Skip all, except for P01 for Metcom - they are not responding
elif meter['Manufacturer'].lower() == 'metcom' and job_type != 'p01':
push = False
else:
interval = meter["schedule"][job_type]
# Search for job in redis
job_time = self.redis_conn.get(f"CRM:{meter_number}:{job_type}")
if job_time:
# Such job was pushed to redis, should check time
if self.check_time(job_time, interval):
push = True
else:
# Job was never pushed to redis
push = True
if push:
self.push_job(meter, job_type)
else:
self.logger.debug(f"Meter {meter_number}, skipping job {job_type}")
def check_time(self, job_time, interval):
"""
Checks if the interval has passed since job_time
:param job_time: epoch received from redis
:param interval: interval from API (now 24 Hours, will be epoch)
:return: boolean
"""
now = datetime.datetime.utcnow()
# self.logger.debug(f"Job time {job_time}")
job_dt = datetime.datetime.fromtimestamp(int(job_time))
interval = self.normalize_interval(interval)
delta = datetime.timedelta(seconds=interval)
return (now - delta) > job_dt
def normalize_interval(self, interval):
"""
Fix interval so in is the number of seconds
:param interval: API response,
:return: interval in seconds
"""
if interval == "24 Hours":
interval = "86400"
return int(interval)
def push_job(self, meter, job_type):
"""
Push job to redis queue
:param meter: meter dict
:param job_type: string job from API schedule
"""
meter_number = meter["meterNumber"]
epoch = datetime.datetime.strftime(datetime.datetime.utcnow(), "%s")
# Skip jobs that we don't want to do
if job_type not in self.not_implemented:
meter_list = [meter] # rq_create... expects list of meters
job_function = self.job_functions[job_type] # Determine job push function
try:
job_function(meter_list, test=self.test) # Push job
self.redis_conn.set(f"CRM:{meter_number}:{job_type}", epoch)
except Exception as e:
self.logger.error(f"Meter {meter_number} error {e} while pushing job")
raise e
self.logger.debug(f"Meter {meter_number} pushed {job_type} job")
def requeue_jobs(self, q_name, meter_number):
if self.test:
q = Queue(name=f"test-{q_name}", connection=Redis())
else:
q = Queue(name=q_name, connection=Redis())
running_jobs, failed_jobs = get_job_meta(q)
self.logger.debug(f"Meter {meter_number} running jobs {running_jobs.get(meter_number)}, failed jobs {failed_jobs.get(meter_number)}")
if meter_number in failed_jobs:
# Meter query is failed, shall not be queued, but shall be requeued
job_id = failed_jobs[meter_number]["job_id"]
timestamp = failed_jobs[meter_number]["timestamp"]
q.failed_job_registry.requeue(job_id)
self.logger.debug(f"Meter {meter_number} requeued failed table1 job {job_id} at {timestamp}")
return True
elif meter_number in running_jobs:
logger.debug(f"Meter {meter_number} query for {q_name} is running")
# Query is running, no need to requeue or queue
return True
return False
def run(self):
# Get data from redis, should be a list of dicts
meters = self.get_from_redis()
for meter in meters:
self.create_jobs(meter)
if __name__ == "__main__":
for _ in range(5):
start = datetime.datetime.now()
a = CRMtoRedis(llevel="DEBUG")
a.run()
test = False
b = RedistoJob(llevel="DEBUG", istest=test)
b.run()
time_spent = int((datetime.datetime.now() - start).seconds)
if time_spent >= 10:
sleep(0)
else:
sleep(10 - time_spent)
| 34.911854 | 141 | 0.562946 | 1,303 | 11,486 | 4.816577 | 0.22947 | 0.031867 | 0.023901 | 0.022945 | 0.159975 | 0.145315 | 0.1297 | 0.090185 | 0.05768 | 0.029318 | 0 | 0.037476 | 0.330925 | 11,486 | 328 | 142 | 35.018293 | 0.77918 | 0.233502 | 0 | 0.184211 | 0 | 0.005263 | 0.165091 | 0.025285 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084211 | false | 0 | 0.042105 | 0 | 0.226316 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a1738a8546e5c03125ba171676d6c0f49767de | 1,280 | py | Python | custom_components/vivint/lock.py | cameroncockrell/hacs-vivint | e0d3b053a2247d7b506e8e3edfbdc4b5f6de7e0b | [
"MIT"
] | 25 | 2021-02-08T16:13:38.000Z | 2022-02-22T01:32:31.000Z | custom_components/vivint/lock.py | cameroncockrell/hacs-vivint | e0d3b053a2247d7b506e8e3edfbdc4b5f6de7e0b | [
"MIT"
] | 37 | 2021-01-24T22:37:25.000Z | 2022-03-16T03:08:09.000Z | custom_components/vivint/lock.py | cameroncockrell/hacs-vivint | e0d3b053a2247d7b506e8e3edfbdc4b5f6de7e0b | [
"MIT"
] | 1 | 2021-09-02T18:49:06.000Z | 2021-09-02T18:49:06.000Z | """Support for Vivint door locks."""
from homeassistant.components.lock import LockEntity
from vivintpy.devices.door_lock import DoorLock
from .const import DOMAIN
from .hub import VivintEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Vivint door locks using config entry."""
entities = []
hub = hass.data[DOMAIN][config_entry.entry_id]
for system in hub.account.systems:
for alarm_panel in system.alarm_panels:
for device in alarm_panel.devices:
if type(device) is DoorLock:
entities.append(VivintLockEntity(device=device, hub=hub))
if not entities:
return
async_add_entities(entities, True)
class VivintLockEntity(VivintEntity, LockEntity):
"""Vivint Lock."""
@property
def is_locked(self):
"""Return true if the lock is locked."""
return self.device.is_locked
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self.device.alarm_panel.id}-{self.device.id}"
async def async_lock(self, **kwargs):
"""Lock the lock."""
await self.device.lock()
async def async_unlock(self, **kwargs):
"""Unlock the lock."""
await self.device.unlock()
| 27.826087 | 77 | 0.657813 | 162 | 1,280 | 5.080247 | 0.358025 | 0.060753 | 0.047388 | 0.038882 | 0.053463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.234375 | 1,280 | 45 | 78 | 28.444444 | 0.839796 | 0.076563 | 0 | 0.076923 | 0 | 0 | 0.042095 | 0.042095 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a25b18c73a26d39d698b2c55164a02e5eac2cc | 1,482 | py | Python | fastapi-master-api/app/service/adapters.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | 1 | 2021-12-25T09:19:25.000Z | 2021-12-25T09:19:25.000Z | fastapi-master-api/app/service/adapters.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | null | null | null | fastapi-master-api/app/service/adapters.py | SionAbes/fullstack-porfolio | 6ca74da425a0f6e2d9b65b2aeb8d5452ff1565a9 | [
"MIT"
] | null | null | null | import json
from typing import List, Union
from app.domain.adapter import (
Adapter,
CreateVolvoCaretrackAdapter,
CreateWackerNeusonKramerAdapter,
)
from app.repository.database.adapters import PolymorphicAdaptersBase, adapters_repo
from kafka import KafkaProducer
from sqlalchemy.orm import Session
class CreateAdapter:
def __init__(
self,
db: Session,
bootstrap_server: str,
create_adapter: Union[
CreateVolvoCaretrackAdapter, CreateWackerNeusonKramerAdapter
],
adapters_repo: PolymorphicAdaptersBase = adapters_repo,
):
self.db = db
self.bootstrap_server = bootstrap_server
self.create_adapter = create_adapter
self.adapters_repo = adapters_repo
def create(self) -> Adapter:
adapter = self._create()
self._send_create_message_to_kafka()
return adapter
def _create(self) -> Adapter:
return self.adapters_repo.create(
db=self.db,
obj_in=self.create_adapter,
)
def _send_create_message_to_kafka(self):
producer = KafkaProducer(
bootstrap_servers=self.bootstrap_server,
api_version=(0, 11, 5),
value_serializer=lambda x: json.dumps(x).encode("utf-8"),
)
producer.send("adapter", {"status": "created"})
def fetch_adapters(
*,
db: Session,
) -> List[Adapter]:
adapters = adapters_repo.list(db=db)
return adapters
| 26.945455 | 83 | 0.662618 | 152 | 1,482 | 6.223684 | 0.361842 | 0.088795 | 0.073996 | 0.042283 | 0.05074 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004533 | 0.255735 | 1,482 | 54 | 84 | 27.444444 | 0.853128 | 0 | 0 | 0.043478 | 0 | 0 | 0.016869 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.130435 | 0.021739 | 0.326087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a466972fbe138345c3a144affd2ff678d1060a | 592 | py | Python | gamabr.py | shalahuddin/sketch | e8c051e09d48348bab1594e9538f719940fba52e | [
"Unlicense"
] | null | null | null | gamabr.py | shalahuddin/sketch | e8c051e09d48348bab1594e9538f719940fba52e | [
"Unlicense"
] | 3 | 2020-07-13T09:59:41.000Z | 2020-07-13T10:24:34.000Z | gamabr.py | shalahuddin/sketch | e8c051e09d48348bab1594e9538f719940fba52e | [
"Unlicense"
] | null | null | null | import cv2
import sys
# do instal modul opencv-python
image = cv2.imread("gambar.jpg")
grayImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
grayImageInv = 255 - grayImage
grayImageInv = cv2.GaussianBlur(grayImageInv, (21, 21), 0)
output = cv2.divide(grayImage, 255-grayImageInv, scale = 256.0)
resizeImg = cv2.resize(output, (int(h/2), int(w/2)))
cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
cv2.namedWindow("pencilsketch", cv2.WINDOW_AUTOSIZE)
cv2.imshow("image", image)
cv2.imshow("pencilsketch", resizeImg)
#
cv2.imrite("save_pencilsketch", output)
cv2.waitKey()
cv2.destroyAllWindows()
| 31.157895 | 63 | 0.760135 | 79 | 592 | 5.64557 | 0.506329 | 0.071749 | 0.076233 | 0.089686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.063197 | 0.091216 | 592 | 18 | 64 | 32.888889 | 0.765799 | 0.050676 | 0 | 0 | 0 | 0 | 0.109123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a544bf153a8063e56192d1b7de1591ce27ac14 | 2,111 | py | Python | ioe_api/user/token.py | mohsinalimat/frappe_ioe_api | d21941ac6050644c8ccbcc21a9ccc24f1a432712 | [
"MIT"
] | 1 | 2021-02-24T13:48:01.000Z | 2021-02-24T13:48:01.000Z | ioe_api/user/token.py | mohsinalimat/frappe_ioe_api | d21941ac6050644c8ccbcc21a9ccc24f1a432712 | [
"MIT"
] | 1 | 2019-12-16T09:08:20.000Z | 2019-12-17T08:10:50.000Z | ioe_api/user/token.py | mohsinalimat/frappe_ioe_api | d21941ac6050644c8ccbcc21a9ccc24f1a432712 | [
"MIT"
] | 3 | 2020-05-31T21:08:56.000Z | 2021-12-22T10:42:21.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Dirk Chang and contributors
# For license information, please see license.txt
#
# Api for user.token
#
from __future__ import unicode_literals
import frappe
import uuid
from ioe_api.helper import throw
@frappe.whitelist(allow_guest=True)
def test():
frappe.response.update({
"ok": True,
"data": "test_ok_result",
"source": "user.token.test"
})
@frappe.whitelist()
def create():
try:
code = frappe.get_value("IOT User Api", frappe.session.user, "authorization_code")
if code:
throw("authorization_code_exists")
auth_code = str(uuid.uuid1()).lower()
doc = frappe.get_doc({
"doctype": "IOT User Api",
"user": frappe.session.user,
"authorization_code": auth_code
}).insert()
frappe.response.update({
"ok": True,
"data": auth_code,
"info": "authorization_code_created"
})
except Exception as ex:
frappe.response.update({
"ok": False,
"error": str(ex)
})
@frappe.whitelist()
def read():
try:
code = frappe.get_value("IOT User Api", frappe.session.user, "authorization_code")
if not code:
throw("authorization_code_not_exists")
frappe.response.update({
"ok": True,
"data": code,
})
except Exception as ex:
frappe.response.update({
"ok": False,
"error": str(ex)
})
@frappe.whitelist()
def update():
try:
auth_code = frappe.get_value("IOT User Api", frappe.session.user, "authorization_code")
if not auth_code:
throw("authorization_code_not_exists")
doc = frappe.get_doc("IOT User Api", frappe.session.user)
new_token = str(uuid.uuid1()).lower()
doc.set("authorization_code", new_token)
doc.save()
frappe.response.update({
"ok": True,
"data": new_token,
})
except Exception as ex:
frappe.response.update({
"ok": False,
"error": str(ex)
})
@frappe.whitelist()
def remove():
try:
frappe.delete_doc("IOT User Api", frappe.session.user)
frappe.response.update({
"ok": True,
"info": "auth_code_removed"
})
except Exception as ex:
frappe.response.update({
"ok": False,
"error": "exception",
"exception": str(ex)
})
| 20.104762 | 89 | 0.670298 | 278 | 2,111 | 4.946043 | 0.266187 | 0.091636 | 0.130909 | 0.144 | 0.601455 | 0.528727 | 0.390545 | 0.346909 | 0.346909 | 0.346909 | 0 | 0.004 | 0.171009 | 2,111 | 104 | 90 | 20.298077 | 0.781714 | 0.064424 | 0 | 0.567901 | 0 | 0 | 0.210473 | 0.055414 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.049383 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a5feffe2c2078e241a498bb5eb36898788c2b3 | 1,608 | py | Python | Lab5/RobieLavy - AICV LAB 5/Task-2.py | kubabujak48/AICV_LABS | 75468410a56252de28c256f1d7a732c51a7239fb | [
"MIT"
] | null | null | null | Lab5/RobieLavy - AICV LAB 5/Task-2.py | kubabujak48/AICV_LABS | 75468410a56252de28c256f1d7a732c51a7239fb | [
"MIT"
] | null | null | null | Lab5/RobieLavy - AICV LAB 5/Task-2.py | kubabujak48/AICV_LABS | 75468410a56252de28c256f1d7a732c51a7239fb | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
from random import randrange
def noise(img):
s = salt(img)
p = pepper(img)
noise = cv.addWeighted(s, 0.5, p, 0.5, 0)
output = cv.addWeighted(img, 0.5, noise, 0.5, 0)
return output
def salt(img):
rows, cols = img.shape[:2]
dens = (rows*cols*0.5)//2
dens = int(dens)
print(dens)
for d in range(0, dens):
x = randrange(rows)
y = randrange(cols)
img[x, y] = 255
return img
def pepper(img):
rows, cols = img.shape[:2]
dens = (rows * cols * 0.2)//2
dens = int(dens)
print(dens)
for d in range(0, dens):
x = randrange(rows)
y = randrange(cols)
img[x, y] = 0
return img
def filter(img):
blur = cv.medianBlur(img, 3)
return blur
def kernel_sharp(img):
kernel_sharpening = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
sharpened = cv.filter2D(img, -1, kernel_sharpening)
return sharpened
#########################################################
# Load images
img1 = cv.imread('lena.png', 0)
img2 = cv.imread('pic.png', 0)
# Copy one of the images (select one)
img = img1.copy()
#img = img2.copy()
cv.imshow('Original image', img)
# Adding S&P noise to image
img_with_noise = noise(img)
cv.imshow('Image noise', img_with_noise)
#Removing noise
img_filter = filter(img)
cv.imshow('Image median filtering', img_filter)
#Sharpening the picture
img_sharp = kernel_sharp(img_filter)
cv.imshow('Sharp', img_sharp)
cv.waitKey()
cv.destroyAllWindows() | 18.697674 | 57 | 0.567164 | 234 | 1,608 | 3.84188 | 0.307692 | 0.013348 | 0.013348 | 0.031146 | 0.229143 | 0.229143 | 0.229143 | 0.229143 | 0.229143 | 0.229143 | 0 | 0.036195 | 0.261194 | 1,608 | 86 | 58 | 18.697674 | 0.720539 | 0.078358 | 0 | 0.28 | 0 | 0 | 0.047216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.06 | 0 | 0.26 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a76f1c1a9231b2370d39956bed3335b3730a64 | 2,302 | py | Python | natural-computing/genetic-programming/constants.py | ramongonze/ufmg-practical-assignments | d85821e330a0f089a7601168bbbf8b4aef174f27 | [
"MIT"
] | null | null | null | natural-computing/genetic-programming/constants.py | ramongonze/ufmg-practical-assignments | d85821e330a0f089a7601168bbbf8b4aef174f27 | [
"MIT"
] | null | null | null | natural-computing/genetic-programming/constants.py | ramongonze/ufmg-practical-assignments | d85821e330a0f089a7601168bbbf8b4aef174f27 | [
"MIT"
] | null | null | null | import math
import operator
import random
# Seed used to control the tests
random.seed()
# A set of nodes composes a binary tree
class Node(object):
def __init__(self, item, right=None, left=None, level=1):
self.right = right # Right child
self.left = left # Left child
self.item = item
self.level = level # The level of the node in the tree
# The representation of an individual
class Individual(object):
def __init__(self, tree=None, fitness=None, fitness_niching=None, size=0):
self.tree = tree
self.fitness = fitness
self.fitness_niching = fitness_niching # The f' function
self.size = size # The number of nodes in the tree
################## CONSTANTS ####################
MAX_DEPTH = 7 # Max depth of an individual, a binary tree
SIGMA = 15000 # Limit to calculate de relative distance between two individuals, used in fitness sharing
POPULATION_SIZE = 100
NUMBER_OF_GENERATIONS = 500
CROSSOVER_RATE = 20 # In percentage
MUTATION_RATE = 75 # In percentage
REPRODUCTION_RATE = 5 # In percentage
K = 10 # Tournament selection
ELITISM = 1 # The number of individuals will be passed to the next generation
# MIN and MAX are the limits of the interval to get one random number when 'num' is called in terminals_set
MIN = -1
MAX = 1
################## DATABASE ####################
FILE_NAME = None # The database which will be used
file_lines = None # List with all the data
file_size = None # Number of examples
variables_amount = None # Number of variables in the database
#################### SETS #######################
functions_set = ['+', '-', '*', '/', 'log']
functions_for_two_terminals = ['+', '-', '*', '/']
function_for_one_terminal = ['log'] # For example, sin, cos, log
variables_set = []
terminals_set = ['num'] #'num' is a real number between MIN and MAX
t_and_f_set = None #Both sets, used in grow method
############################################## RANDOM ELEMENTS ##################################################
# Each function below returns one random element from a set
def random_function():
return functions_set[random.randint(0, (len(functions_set)-1))]
def random_terminal():
return terminals_set[random.randint(0, (len(terminals_set)-1))]
def random_terminal_and_function():
return t_and_f_set[random.randint(0, (len(t_and_f_set)-1))]
| 37.129032 | 113 | 0.670721 | 327 | 2,302 | 4.568807 | 0.379205 | 0.026774 | 0.01004 | 0.016064 | 0.068273 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015377 | 0.152476 | 2,302 | 61 | 114 | 37.737705 | 0.750384 | 0.384014 | 0 | 0 | 0 | 0 | 0.014431 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.069767 | 0.069767 | 0.302326 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63a7dc72e2d08886f4dcef0209986deb895e47ef | 1,290 | py | Python | LeetCode/python-R1/0005-最长回文子串D/V2.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-08-27T11:28:03.000Z | 2020-12-24T07:10:22.000Z | LeetCode/python-R1/0005-最长回文子串D/V2.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | null | null | null | LeetCode/python-R1/0005-最长回文子串D/V2.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-09-20T09:44:01.000Z | 2020-12-24T07:10:23.000Z | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
以每个元素为中心,分别向外扩散寻找奇数回文子串和偶数回文子串
结果:
执行用时 : 868 ms, 在所有 Python3 提交中击败了82.03%的用户
内存消耗 : 13.7 MB, 在所有 Python3 提交中击败了19.28%的用户
"""
class Solution:
def longestPalindrome(self, s):
def help(s, left, right):
"""
1. 针对s, 从left向左扩散, right向右扩散, 得到当前最大的回文子串
2. 返回其子串的长度
"""
N = len(s)
while left >= 0 and right < N and s[left] == s[right]:
left -= 1
right += 1
return right - left - 1
if not s:
return ""
start, end = 0, 0
for i in range(len(s)):
# 分别以s中的每个字母为中心进行扩散判断
# 回文子串存在奇数和偶数两种情况
# 分别以help(s, i, i)表示奇数子串(以s[i]为扩散中心),以help(s, i, i + 1)表示偶数子串(以s[i]s[i+1]表示扩散中心)
len1, len2 = help(s, i, i), help(s, i, i + 1)
len0 = max(len1, len2)
if len0 > end - start:
# 定位到当前子串的start, end索引
start = i - (len0 - 1) // 2
end = i + len0 // 2
return s[start:end + 1]
if __name__ == "__main__":
s = "babad"
answer = Solution().longestPalindrome(s)
print(answer)
| 25.8 | 92 | 0.489922 | 164 | 1,290 | 3.804878 | 0.512195 | 0.016026 | 0.019231 | 0.012821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061174 | 0.37907 | 1,290 | 49 | 93 | 26.326531 | 0.717853 | 0.233333 | 0 | 0 | 0 | 0 | 0.016477 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63ae651d3be857f3edd0aaef054f1eeeb0a39829 | 6,141 | py | Python | findy/utils/request.py | doncat99/FinanceDataCenter | 1538c8347ed5bff9a99a3cca07507a7605108124 | [
"MIT"
] | null | null | null | findy/utils/request.py | doncat99/FinanceDataCenter | 1538c8347ed5bff9a99a3cca07507a7605108124 | [
"MIT"
] | null | null | null | findy/utils/request.py | doncat99/FinanceDataCenter | 1538c8347ed5bff9a99a3cca07507a7605108124 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
from http import client
from retrying import retry
import requests
from aiohttp import ClientSession, ClientTimeout, TCPConnector
# from aiohttp_client_cache import CachedSession, SQLiteBackend
# from findy.interface import RunMode
# from findy.utils.cache import hashable_lru
logger = logging.getLogger(__name__)
client.HTTPConnection._http_vsn = 11
client.HTTPConnection._http_vsn_str = 'HTTP/1.1'
http_timeout = (20, 60)
max_retries = 3
class TimeoutRequestsSession(requests.Session):
def request(self, *args, **kwargs):
kwargs.setdefault('timeout', http_timeout)
return super(TimeoutRequestsSession, self).request(*args, **kwargs)
def get_proxy():
return requests.get("http://127.0.0.1:5010/get/").json()
def retry_if_connection_error(exception):
""" Specify an exception you need. or just True"""
logger.debug(f'request exception: {exception}')
return True
# return isinstance(exception, ConnectionError)
def get_sync_http_session():
return requests.Session()
def get_async_http_session():
# if fetch_mode == RunMode.Sync:
# http_session = TimeoutRequestsSession()
# http_session.mount('http://', requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=0))
# http_session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100, max_retries=0))
# return http_session
timeout = ClientTimeout(total=None,
connect=None,
sock_connect=None,
sock_read=None)
# cache = SQLiteBackend(
# cache_name='~/.cache/aiohttp-requests.db', # For SQLite, this will be used as the filename
# expire_after=24, # By default, cached responses expire in a day
# # expire_after_urls={'*.site.com/static': 24 * 7}, # Requests with this pattern will expire in a week
# # ignored_params=['auth_token'], # Ignore this param when caching responses
# )
return ClientSession(connector=TCPConnector(limit=30, limit_per_host=10, ttl_dns_cache=50,
ssl=False, force_close=True, enable_cleanup_closed=True),
trust_env=True,
# headers={"Connection": "close"},
timeout=timeout
)
def sync_get(http_session: requests.Session, url, headers=None, encoding='utf-8', params={}, enable_proxy=False, return_type=None):
@retry(retry_on_exception=retry_if_connection_error, stop_max_attempt_number=max_retries, wait_fixed=2000)
def _sync_get(http_session: requests.Session, url, enable_proxy, headers=None, encoding='utf-8', params={}):
proxies = get_proxy() if enable_proxy else None
logger.debug(f'proxies: {proxies}')
return http_session.get(url, headers=headers, params=params, proxies=proxies, verify=False)
def _sync_get_no_proxy(http_session: requests.Session, url, headers=None, encoding='utf-8', params={}):
try:
return http_session.get(url, headers=headers, params=params, verify=False)
except:
pass
return None
logger.debug(f'HTTP GET: {url}')
try:
resp = _sync_get(http_session, url, enable_proxy=enable_proxy, headers=headers, encoding=encoding, params=params)
except Exception as e:
if enable_proxy:
resp = _sync_get_no_proxy(http_session, url, headers=headers, encoding=encoding, params=params)
if resp is None:
logger.error(f'url: {url}, error: {e}')
return None
else:
logger.error(f'url: {url}, error: {e}')
return None
resp.encoding = encoding
if return_type == 'text':
return resp.text
elif return_type == 'content':
return resp.content
else:
return resp
def sync_post(http_session: requests.Session, url, json=None, encoding=['utf-8', 'gbk'], enable_proxy=False):
@retry(retry_on_exception=retry_if_connection_error, stop_max_attempt_number=max_retries, wait_fixed=2000)
def _sync_post(http_session: requests.Session, url, enable_proxy, json=None, encoding=['utf-8', 'gbk']):
proxies = get_proxy() if enable_proxy else None
logger.debug(f'proxies: {proxies}')
return http_session.post(url=url, json=json, proxies=proxies, verify=False)
def _sync_post_no_proxy(http_session: requests.Session, url, json=None, encoding=['utf-8', 'gbk']):
try:
return http_session.post(url=url, json=json, verify=False)
except:
return None
logger.debug(f'HTTP POST: {url}, json: {json}')
if json is None:
return None
try:
resp = _sync_post(http_session, url=url, enable_proxy=enable_proxy, json=json)
except Exception as e:
if enable_proxy:
resp = _sync_post_no_proxy(http_session, url, json=json)
if resp is None:
logger.error(f'url: {url}, error: {e}')
return None
else:
logger.error(f'url: {url}, error: {e}')
return None
for encode in encoding:
try:
resp.encoding = encode
origin_result = resp.json().get('Result')
if origin_result is not None:
return origin_result
except Exception as e:
logger.error(f'json decode failed, code: {resp.status_code}, codec: {encode}, content: {resp.text}, error: {e}')
return None
def chrome_copy_header_to_dict(src):
lines = src.split('\n')
header = {}
if lines:
for line in lines:
if len(line) > 0:
try:
index = line.index(':')
key = line[:index]
value = line[index + 1:]
if key and value:
header.setdefault(key.strip(), value.strip())
except Exception:
pass
return header
| 37.674847 | 131 | 0.623351 | 744 | 6,141 | 4.961022 | 0.258065 | 0.059604 | 0.030886 | 0.042265 | 0.423733 | 0.408561 | 0.326741 | 0.316987 | 0.279328 | 0.23625 | 0 | 0.013589 | 0.269012 | 6,141 | 162 | 132 | 37.907407 | 0.808643 | 0.171796 | 0 | 0.342342 | 0 | 0.009009 | 0.077912 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0.018018 | 0.045045 | 0.018018 | 0.36036 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63aef9c2f41a87230a16543f6b852c81c6bc1313 | 1,290 | py | Python | tools/export_weights.py | hlld/classification | 327a67661ac2fc6302809dc8caab12b7a8195c64 | [
"MIT"
] | 7 | 2021-07-29T09:05:38.000Z | 2021-07-29T11:01:03.000Z | tools/export_weights.py | zhaojiankuan/classification | f5e90107aadb35be7056d75b6b01d48bd1370800 | [
"MIT"
] | null | null | null | tools/export_weights.py | zhaojiankuan/classification | f5e90107aadb35be7056d75b6b01d48bd1370800 | [
"MIT"
] | 4 | 2021-07-29T09:05:40.000Z | 2021-07-29T10:51:50.000Z | import argparse
import sys
import os
import torch
import struct
from pathlib import Path
sys.path.append('../')
from classification.tools import load_model
def export_weights(model, wts_file):
# Export model to TensorRT compatible format
with open(wts_file, 'w') as fd:
fd.write('{}\n'.format(len(model.state_dict().keys())))
for key, val in model.state_dict().items():
vec = val.reshape(-1).cpu().numpy()
fd.write('{} {} '.format(key, len(vec)))
for x in vec:
fd.write(' ')
fd.write(struct.pack('>f', float(x)).hex())
fd.write('\n')
print('Export done, weights saved to %s' % wts_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str, default='',
help='weights path')
parser.add_argument('--save_path', type=str, default='../results',
help='path to outputs')
opt = parser.parse_args()
device = torch.device("cpu")
print('Loading model from %s ...' % opt.weights)
model = load_model(opt.weights, device)
wts_file = os.path.join(opt.save_path,
Path(opt.weights).stem + '.wts')
export_weights(model, wts_file)
| 32.25 | 70 | 0.589147 | 165 | 1,290 | 4.460606 | 0.448485 | 0.047554 | 0.048913 | 0.057065 | 0.067935 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001045 | 0.25814 | 1,290 | 39 | 71 | 33.076923 | 0.768025 | 0.032558 | 0 | 0 | 0 | 0 | 0.11878 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.21875 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63b125702a1b2571faf102a9e6061df6610c3d1e | 1,213 | py | Python | Network.py | LonglongaaaGo/CNN-python | 02b0f9e0e5044796bc858b75421cbc4e2e3cc653 | [
"MIT"
] | null | null | null | Network.py | LonglongaaaGo/CNN-python | 02b0f9e0e5044796bc858b75421cbc4e2e3cc653 | [
"MIT"
] | null | null | null | Network.py | LonglongaaaGo/CNN-python | 02b0f9e0e5044796bc858b75421cbc4e2e3cc653 | [
"MIT"
] | null | null | null | from Layer import *
class Network(BasicLayer):
def __init__(self,para={}):
self.model = []
#para={
# "Conv_layer":{XX..}
# }
for key in para.keys():
if "Conv" in key:
self.model.append(Conv_layer(para[key]))
elif "Fully" in key:
self.model.append(Fully_connect_layer(para[key]))
elif "Pool" in key:
self.model.append(Pooling(para[key]))
elif "Relu" in key:
self.model.append(Relu())
elif "Sigmoid" in key:
self.model.append(Sigmoid())
self.loss_fun = Cross_Entropy_Loss()
def forward(self,X):
feature = X
for layer in self.model:
feature = layer.forward(feature)
return feature
def get_loss(self,X,Y):
return self.loss_fun.forward(X,Y)
def backward(self,X,Y):
loss = self.loss_fun.forward(X, Y)
gradient = self.loss_fun.backward()
for layer in self.model[::-1]:
gradient = layer.backward(gradient)
def update(self,learning_rate = 0.1):
for layer in self.model[::-1]:
layer.update(learning_rate)
| 28.209302 | 65 | 0.538335 | 150 | 1,213 | 4.24 | 0.286667 | 0.127358 | 0.070755 | 0.110063 | 0.312893 | 0.125786 | 0 | 0 | 0 | 0 | 0 | 0.004975 | 0.337181 | 1,213 | 42 | 66 | 28.880952 | 0.78607 | 0.024732 | 0 | 0.064516 | 0 | 0 | 0.020339 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.032258 | 0.032258 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63b1840d56f09a5d5a9c20bc118d0207eadb7b11 | 1,882 | py | Python | spyder/plugins/statusbar/container.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | 1 | 2021-03-13T04:36:43.000Z | 2021-03-13T04:36:43.000Z | spyder/plugins/statusbar/container.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | null | null | null | spyder/plugins/statusbar/container.py | ximion/spyder | 50911555cefd95947c887f8a412a58ad96ff8d9e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Status bar container.
"""
# Third-party imports
from qtpy.QtCore import Signal
# Local imports
from spyder.api.widgets import PluginMainContainer
from spyder.plugins.statusbar.widgets.status import (
ClockStatus, CPUStatus, MemoryStatus
)
class StatusBarContainer(PluginMainContainer):
DEFAULT_OPTIONS = {
'show_status_bar': True,
'memory_usage/enable': True,
'memory_usage/timeout': 2000,
'cpu_usage/enable': False,
'cpu_usage/timeout': 2000,
'clock/enable': False,
'clock/timeout': 1000,
}
sig_show_status_bar_requested = Signal(bool)
"""
This signal is emmitted when the user wants to show/hide the
status bar.
"""
def setup(self, options):
# Basic status widgets
self.mem_status = MemoryStatus(parent=self)
self.cpu_status = CPUStatus(parent=self)
self.clock_status = ClockStatus(parent=self)
def on_option_update(self, option, value):
if option == 'memory_usage/enable':
self.mem_status.setVisible(value)
elif option == 'memory_usage/timeout':
self.mem_status.set_interval(value)
elif option == 'cpu_usage/enable':
self.cpu_status.setVisible(value)
elif option == 'cpu_usage/timeout':
self.cpu_status.set_interval(value)
elif option == 'clock/enable':
self.clock_status.setVisible(value)
elif option == 'clock/timeout':
self.clock_status.set_interval(value)
elif option == 'show_status_bar':
self.sig_show_status_bar_requested.emit(value)
def update_actions(self):
pass
| 30.354839 | 65 | 0.635494 | 214 | 1,882 | 5.406542 | 0.392523 | 0.046672 | 0.077787 | 0.064823 | 0.220398 | 0.082973 | 0 | 0 | 0 | 0 | 0 | 0.0094 | 0.265143 | 1,882 | 61 | 66 | 30.852459 | 0.826464 | 0.116897 | 0 | 0 | 0 | 0 | 0.149933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.027027 | 0.081081 | 0 | 0.243243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63b3401ea17e27ce0e9f4bb65e298c006a065ac2 | 589 | py | Python | yaga_ga/evolutionary_algorithm/selectors/ranking.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | yaga_ga/evolutionary_algorithm/selectors/ranking.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | yaga_ga/evolutionary_algorithm/selectors/ranking.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | from typing import Iterable, Sequence, Tuple, TypeVar, Generic
from yaga_ga.evolutionary_algorithm.details import Comparable
from yaga_ga.evolutionary_algorithm.individuals import IndividualType
ScoreType = TypeVar("ScoreType", bound=Comparable)
class Ranking(Generic[IndividualType, ScoreType]):
def __init__(self, selection_size: int):
self.selection_size = selection_size
def __call__(
self, population: Sequence[Tuple[IndividualType, ScoreType]]
) -> Iterable[IndividualType]:
for i in population[: self.selection_size]:
yield i[0]
| 32.722222 | 69 | 0.748727 | 65 | 589 | 6.538462 | 0.507692 | 0.122353 | 0.12 | 0.103529 | 0.145882 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002041 | 0.168081 | 589 | 17 | 70 | 34.647059 | 0.865306 | 0 | 0 | 0 | 0 | 0 | 0.01528 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63b8f2381a48d0a34e089f59b031e0d0cf2ef305 | 5,548 | py | Python | serene_schema/serene_schema/helpers/markdown.py | NICTA/serene-etl | 1d446012c0d08a95b8fbbbe8237735320a2fe2a4 | [
"Apache-2.0"
] | null | null | null | serene_schema/serene_schema/helpers/markdown.py | NICTA/serene-etl | 1d446012c0d08a95b8fbbbe8237735320a2fe2a4 | [
"Apache-2.0"
] | null | null | null | serene_schema/serene_schema/helpers/markdown.py | NICTA/serene-etl | 1d446012c0d08a95b8fbbbe8237735320a2fe2a4 | [
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__" and __package__ is None:
#use the local version of schema
import sys, os
if os.getcwd().endswith('serene_schema'):
sys.path.insert(0, os.getcwd())
elif os.getcwd().endswith('helpers'):
sys.path.insert(0, os.path.join(os.getcwd(), '..', '..'))
from serene_schema import attributes, VALID_TYPES, VALID_TRANSACTIONS
def make_link(name, title):
return '[{}](#{})'.format(name, title.lower()
.replace('.', '')
.replace(' > ', '-')
.replace(' ', '-')
.replace('`', ''))
def object_title(o):
path = o.class_path()
path[-1] = '`{}`'.format(path[-1])
return '{}'.format(' > '.join(path))
def link_title(o):
path = o.class_path()
path[-1] = '`{}`'.format(path[-1])
return 'Link > {}'.format(' > '.join(path))
def print_doc(cls):
if cls.__doc__:
for line in cls.__doc__.strip().split('\n'):
print(' > {}'.format(line))
print('\n\n')
def print_objects():
for cls in sorted(VALID_TYPES, key=lambda _: '.'.join(_.class_path())):
attrs = cls.attrs
print('## {}'.format(object_title(cls)))
print_doc(cls)
if cls.label.__doc__:
print(' > Label format: {}\n'.format(cls.label.__doc__))
print(' > Search syntax: `{}:"search term"`\n\n'.format(cls.name()))
if attrs:
print('| property | type | description | \n| --- | --- | --- |')
for k in attrs:
txt = attrs[k].__doc__.strip() if attrs[k].__doc__ else '-'
assert '\n' not in txt, attrs[k].__name__
assert len(txt.strip().split('\n')) <= 1, attrs[k].__name__
print('| {} | {} | {} |'.format(k, attrs[k].__name__, txt))
print('\n\n')
links = getattr(cls, 'links', [])
rev_links = cls.reverse_links()
if links or rev_links:
print('| link | description | \n| --- | --- |')
for l in sorted(links, key=lambda k:k.name()):
assert l.__doc__,l
txt = l.__doc__.strip()
title = make_link(l.name(), link_title(l))
assert len(txt.strip().split('\n')) <= 1, l.__doc__
print('| {} ⇒ {} ⇒ ?? | {} |'.format(cls.name(),title, txt))
for l in sorted(rev_links, key=lambda k:k.name()):
txt = l.__doc__.strip()
title = make_link(l.name(), link_title(l))
assert len(txt.strip().split('\n')) <= 1, l.__doc__
print('| ?? ⇒ {} ⇒ {} | {} |'.format(title, cls.name(), txt))
print('\n\n* * *\n')
def print_transactions():
for cls in sorted(VALID_TRANSACTIONS, key=lambda _: '.'.join(_.class_path())):
attrs = cls.attrs
print('## {}'.format(link_title(cls)))
print_doc(cls)
print('To limit results to documents containing this type of link `link_types:{}`\n\n'.format(cls.name()))
if attrs:
print('| property | type | description | \n| --- | --- | --- |')
for k in attrs:
txt = attrs[k].__doc__.strip() if attrs[k].__doc__ else '-'
assert '\n' not in txt, attrs[k].__name__
assert len(txt.strip().split('\n')) <= 1, attrs[k].__name__
print('| {} | {} | {} |'.format(k, attrs[k].__name__, txt))
print('\n\n')
links = getattr(cls, 'links', [])
rev_links = cls.reverse_links()
if links or rev_links:
print('| link | \n| --- | '.format(cls.name()))
for link in sorted(rev_links, key=lambda k:k.name()):
title = make_link(link.name(), object_title(link))
print('| {} ⇒ {} ⇒ ?? |'.format(title, cls.name()))
for link in sorted(links, key=lambda k: k.name()):
if link in rev_links:
continue
title = make_link(link.name(), object_title(link))
print('| ?? ⇒ {} ⇒ {} |'.format(cls.name(), title))
print('\n\n* * *\n')
def main():
print("""
# Ontology Definition
This file documents the ontology:
* all known objects that can be represented (Class Nodes)
* the attributes those objects can have (Data Nodes)
* the links between objects (transactions, edges, etc)
The ontology is hierarchical and is based on 6 **base** object types:
* [Accounts](#account) - accounts give entities access to systems (typically online) and represent the entities activities within the particular system
* [Documents](#document) - documents are used to identify entities or embody information at a point in time
* [Entities](#entity) - entities are persons or organisations
* [Events](#event) - events are things that happen at a point in time
* [Locations](#location) - locations are physical locations in the real world
* [Objects](#object) - objects are physical objects
Every object is represented in the system by a label or identifier and properties or attributes.
Properties or attributes are typically things that make an object unique.
The ontology is hierarchical in that all Account objects based on the Account base type **inherit** the properties and links
from the Account base type. This applies throughout the ontology below.
# Object Types
""")
print_objects()
print("""
# Link types
""")
print_transactions()
if __name__ == '__main__':
main()
| 33.421687 | 151 | 0.556957 | 682 | 5,548 | 4.325513 | 0.218475 | 0.020339 | 0.020339 | 0.023051 | 0.45322 | 0.391525 | 0.384407 | 0.368136 | 0.349153 | 0.328136 | 0 | 0.010691 | 0.275054 | 5,548 | 165 | 152 | 33.624242 | 0.722775 | 0.005588 | 0 | 0.401786 | 0 | 0.026786 | 0.333757 | 0.007981 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.0625 | false | 0 | 0.017857 | 0.008929 | 0.107143 | 0.267857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63bc2d0d047391a080297053dfaf4fe899a6e18a | 252 | py | Python | src/chapter10/soft.py | ManuelA1000/BCS-2021 | 0bdf8a165b6e9e79c33257919a44b4be3cd49a57 | [
"MIT"
] | null | null | null | src/chapter10/soft.py | ManuelA1000/BCS-2021 | 0bdf8a165b6e9e79c33257919a44b4be3cd49a57 | [
"MIT"
] | null | null | null | src/chapter10/soft.py | ManuelA1000/BCS-2021 | 0bdf8a165b6e9e79c33257919a44b4be3cd49a57 | [
"MIT"
] | null | null | null | txt = 'but soft what light in yonder window breaks'
words = txt.split ()
t = list ()
for word in words:
t.append ((len (word), word))
print(t)
t.sort (reverse = True)
print(t)
res = list ()
for length, word in t:
res.append (word)
print (res) | 18 | 51 | 0.638889 | 43 | 252 | 3.744186 | 0.534884 | 0.086957 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 252 | 14 | 52 | 18 | 0.813131 | 0 | 0 | 0.166667 | 0 | 0 | 0.16996 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63bce539de4f9c34cc18cec63222346a4f46825b | 3,923 | py | Python | workspaces/voyage/server/src/core/rediscover.py | raychorn/svn_hp-projects | d5547906354e2759a93b8030632128e8c4bf3880 | [
"CC0-1.0"
] | null | null | null | workspaces/voyage/server/src/core/rediscover.py | raychorn/svn_hp-projects | d5547906354e2759a93b8030632128e8c4bf3880 | [
"CC0-1.0"
] | null | null | null | workspaces/voyage/server/src/core/rediscover.py | raychorn/svn_hp-projects | d5547906354e2759a93b8030632128e8c4bf3880 | [
"CC0-1.0"
] | null | null | null | '''
Created on Oct 18, 2011
@author: IslamM
'''
from logging import getLogger
log = getLogger(__name__)
from util import catalog
from util.config import config
from portlets.collector import Collector, remove_collector
from core.uim import UIManager
from urlparse import urlparse
from threading import Thread
import time
class RediscoverNode:
def __init__(self, server=True, ilo=True, oa=True, vcm=True):
self.rediscover_server = server
self.rediscover_ilo = ilo
self.rediscover_oa = oa
self.rediscover_vcm = vcm
self.task_name = 'Rediscover Node'
self.taskid = None
self.dc = Collector.get_collector()
self.start_time = time.time()
self.user = self.dc.vcntr.get_current_session_username(self.dc.sessionId)
self.objpath = 'moref=' + self.dc.moref + '&' + 'serverGuid=' + self.dc.serverGuid
self.uim_url = urlparse(config().get_uim_root())
self.uimgr = UIManager(port=self.uim_url.port, protocol=self.uim_url.scheme )
self.desc = 'Rediscover node triggered by ' + self.user
def get_devices(self):
nodes = {}
if self.dc.server:
nodes['server'] = True
if self.dc.ilo:
nodes['ilo'] = True
if self.dc.oa:
nodes['oa'] = True
if self.dc.vcm:
nodes['vcm'] = True
return nodes
def start_rediscovery(self):
t = Thread(target=self.rediscover, name="Run Node Rediscovery")
t.daemon = True
t.start()
def rediscover(self):
self.taskid = self.uimgr.create_task(self.objpath, self.task_name, [], self.start_time, "RUNNING", self.user, self.desc, [])
#print self.dc.csce.entities
try:
if self.rediscover_server and self.dc.server:
ip = self.dc.server.get('address', {}).get('ipv4','')
if ip:
self.dc.csce.delete_entity('server', self.dc.server['uuid'])
self.dc.csce.entities[ip].discovered = False
except:
log.exception('Error rediscovering server')
try:
if self.rediscover_ilo and self.dc.ilo:
ip = self.dc.ilo.get('address', {}).get('ipv4','')
if ip:
self.dc.csce.delete_entity('ilo', self.dc.ilo['uuid'])
self.dc.csce.entities[ip].discovered = False
except:
log.exception('Error rediscovering ilo')
try:
if self.rediscover_oa and self.dc.oa:
ip = self.dc.oa.get('address', {}).get('ipv4','')
if ip:
self.dc.csce.delete_entity('oa', self.dc.oa['uuid'])
self.dc.csce.entities[ip].discovered = False
except:
log.exception('Error rediscovering oa')
try:
if self.rediscover_vcm and self.dc.vcm:
ip = self.dc.vcm.get('address', {}).get('ipv4','')
if ip:
self.dc.csce.delete_entity('vcm', self.dc.vcm['uuid'])
self.dc.csce.entities[ip].discovered = False
except:
log.exception('Error rediscovering vcm')
time.sleep(30)
self.dc.csce.start_cs_discovery()
self.uimgr.post_event(self.objpath, self.desc, 'User Action', 'INFORMATION', time.time())
self.uimgr.update_task(self.objpath, self.taskid['ids'][0], self.task_name, [], self.start_time, time.time(), "COMPLETED", self.user, self.desc, [])
remove_collector( '%s:%s' %(self.dc.sessionId, self.dc.moref) )
def fail_task(self):
if self.taskid:
self.uimgr.update_task(self.objpath, self.taskid['ids'][0], self.task_name, [], self.start_time, time.time(), "ERROR", self.user, self.desc, [])
| 39.626263 | 158 | 0.567678 | 476 | 3,923 | 4.577731 | 0.218487 | 0.090867 | 0.045893 | 0.041303 | 0.308398 | 0.298761 | 0.287288 | 0.287288 | 0.287288 | 0.287288 | 0 | 0.005109 | 0.301555 | 3,923 | 99 | 159 | 39.626263 | 0.790146 | 0.017334 | 0 | 0.197531 | 0 | 0 | 0.0848 | 0 | 0.012346 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.098765 | 0 | 0.185185 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63bd3e2bbbdf441ba8d0ab08f9a5c6b52a9e7b44 | 13,798 | py | Python | sem/__init__.py | YoannDupont/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 22 | 2016-11-13T21:08:58.000Z | 2021-04-26T07:04:54.000Z | sem/__init__.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 15 | 2016-11-15T10:21:07.000Z | 2021-11-08T10:08:05.000Z | sem/__init__.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 8 | 2016-11-15T10:21:41.000Z | 2022-03-04T21:28:05.000Z | #-*- encoding: utf-8-*-
"""
file: __init__.py
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import os.path
import argparse
from os.path import dirname, abspath, join, expanduser
import platform
SYSTEM = platform.system().lower()
ON_WINDOWS = (SYSTEM == "windows")
PY2 = sys.version_info.major == 2
if PY2:
SEM_HOME = dirname(abspath(__file__)).decode(sys.getfilesystemencoding())
else:
SEM_HOME = dirname(abspath(__file__))
SEM_DATA_DIR = join(expanduser(u"~"), u"sem_data")
SEM_RESOURCE_DIR = join(SEM_DATA_DIR, u"resources")
SEM_EXT_DIR = join(SEM_DATA_DIR, u"ext")
SEM_HOMEPAGE = u"http://www.lattice.cnrs.fr/sites/itellier/SEM.html"
argument_parser = argparse.ArgumentParser()
argument_subparsers = argument_parser.add_subparsers()
_name = u"SEM"
u"""
The name of the software. Obviously, it is SEM.
"""
_version_major = 3
u"""
The major version number.
Is only incremented when deep changes (that usually lead to a change of how the whole software is used) are made to the program.
Such changes include various feature additions / deletions / modifications, source code reorganisation and so on.
On a more NLP side, such changes could also include a change in corpora used in learning (if going from proprietary to free for example).
If this number is incremented, _version_minor and _version_patch are to be reseted to 0.
"""
_version_minor = 3
u"""
The minor version number.
Is only incremented when medium changes are made to the program.
Such changes include feature addition / deletion, creation of a new language entry for manual.
If this number is incremented, _version_patch is to be reseted to 0.
"""
_version_patch = 0
u"""
The patch version number.
Is only incremented when shallow changes are made to the program.
Such changes include bug correction, typo correction and any modification to existing manual(s) are made.
On a more NLP side, such changes would also include model changes.
"""
_main_features = [
[u"A GUI for manual annotation (requires TkInter)",
[
u'from terminal: run ```python -m sem annotation_gui```',
u'fast annotation: keyboard shortcuts and document-wide annotation broadcast',
u'can load pre-annotated files',
u'support for hierarchical tags (dot-separated, eg: "noun.common")',
u'handles multiple input format',
u'export in different formats'
]
],
[u"A GUI for easier use (requires TkInter)",
[
u'on Linux: double-clic on sem_gui.sh',
u'on Windows: double-clic on sem_gui.bat',
u'from terminal: run ```python -m sem gui```',
]
],
[u"segmentation",
[
u"segmentation for: French, English",
u"easy creation and integration of new tokenisers"
]
],
[
u"feature generation",
[
u"XML file to write features without coding them",
u"single-token and multi-token dictionary features",
u"Regular expression features",
u"sequenced features",
u"train/label mode",
u"display option for features that are useful for generation, but not needed in output"
]
],
[u"exporting output",
[
u"supported export formats: CoNLL, text, HTML (from plain text), two XML-TEI (one specific to NP-chunks and another one for the rest)",
u"easy creation and integration of new exporters"
]
],
[u"extension of existing features",
[
u"automatic integration of new segmenters and exporters",
u"semi automatic integration of new feature functions",
u"easy creation of new CSS formats for HTML exports"
]
]
]
_first_steps = [
[u"install SEM",
[
u"see [install.md](install.md)",
u"It will compile Wapiti and create necessary directories. Currently, SEM datas are located in ```~/sem_data```"
],
],
[u"run tests",
["run ```python -m sem --test``` in a terminal"]
],
[u"run SEM",
[
'run GUI (see "main features" above) and annotate "non-regression/fr/in/segmentation.txt"',
'or run: ```python -m sem tagger resources/master/fr/NER.xml ./non-regression/fr/in/segmentation.txt -o sem_output```'
]
]
]
_external_resources = [
[u"[French Treebank](http://www.llf.cnrs.fr/fr/Gens/Abeille/French-Treebank-fr.php) by [Abeillé et al. (2003)](http://link.springer.com/chapter/10.1007%2F978-94-010-0201-1_10): corpus used for POS and chunking.", []],
[u"NER annotated French Treebank by [Sagot et al. (2012)](https://halshs.archives-ouvertes.fr/file/index/docid/703108/filename/taln12ftbne.pdf): corpus used for NER.", []],
[u"[Lexique des Formes Fléchies du Français (LeFFF)](http://alpage.inria.fr/~sagot/lefff.html) by [Clément et al. (2004)](http://www.labri.fr/perso/clement/lefff/public/lrec04ClementLangSagot-1.0.pdf): french lexicon of inflected forms with various informations, such as their POS tag and lemmatization.", []],
[u"[Wapiti](http://wapiti.limsi.fr) by [Lavergne et al. (2010)](http://www.aclweb.org/anthology/P10-1052): linear-chain CRF library.", []],
[u"[setuptools](https://pypi.python.org/pypi/setuptools): to install SEM.", []],
[u"[Tkinter](https://wiki.python.org/moin/TkInter): for GUI modules (they will not be installed if Tkinter is not present).", []],
[u"Windows only: [MinGW64](https://sourceforge.net/projects/mingw-w64/?source=navbar): used to compile Wapiti on Windows.", []],
[u"Windows only: [POSIX threads for Windows](https://sourceforge.net/p/pthreads4w/wiki/Home/): if you want to multithread Wapiti on Windows.", []],
[u"GUI-specific: [TkInter](https://wiki.python.org/moin/TkInter): if you want to launch SEM's GUI.", []]
]
_planned_changes = [
[u'Add a tutorial. Some of it done in section "retrain SEM" in manual.', []],
[u"add lemmatiser.", []],
[u'have more unit tests', []],
[
"improve segmentation",
[
'handle URLs starting with country indicator (ex: "en.wikipedia.org")',
'handle URLs starting with subdomain (ex: "blog.[...]")',
]
]
]
_references = [
[u'[DUPONT, Yoann et PLANCQ, Clément. Un étiqueteur en ligne du Français. session démonstration de TALN-RECITAL, 2017, p. 15.](http://taln2017.cnrs.fr/wp-content/uploads/2017/06/actes_TALN_2017-vol3.pdf#page=25)',
[
u"Online interface"
]
],
[u'(best RECITAL paper award) [DUPONT, Yoann. Exploration de traits pour la reconnaissance d’entités nommées du Français par apprentissage automatique. RECITAL, 2017, p. 42.](http://taln2017.cnrs.fr/wp-content/uploads/2017/06/actes_RECITAL_2017.pdf#page=52)',
[
u"Named Entity Recognition (new, please use this one)"
]
],
[u"[TELLIER, Isabelle, DUCHIER, Denys, ESHKOL, Iris, et al. Apprentissage automatique d'un chunker pour le français. In : TALN2012. 2012. p. 431–438.](https://hal.archives-ouvertes.fr/hal-01174591/document)",
[
"Chunking"
]
],
[u"[TELLIER, Isabelle, DUPONT, Yoann, et COURMET, Arnaud. Un segmenteur-étiqueteur et un chunker pour le français. JEP-TALN-RECITAL 2012](http://anthology.aclweb.org/F/F12/F12-5.pdf#page=27)",
[
u"Part-Of-Speech Tagging",
u"chunking"
]
],
[u"[DUPONT, Yoann et TELLIER, Isabelle. Un reconnaisseur d’entités nommées du Français. session démonstration de TALN, 2014, p. 40.](http://www.aclweb.org/anthology/F/F14/F14-3.pdf#page=42)",
[
u"Named Entity Recognition (old, please do not use)"
]
],
]
_bibtex = [
[u"""\n```latex
@inproceedings{dupont2017etiqueteur,
title={Un {\'e}tiqueteur en ligne du fran{\c{c}}ais},
author={Dupont, Yoann and Plancq, Cl{\'e}ment},
booktitle={24e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles (TALN)},
pages={15--16},
year={2017}
}
```""", []
],
[u"""\n```latex
@inproceedings{dupont2018exploration,
title={Exploration de traits pour la reconnaissance d’entit{\'e}s nomm{\'e}es du Fran{\c{c}}ais par apprentissage automatique},
author={Dupont, Yoann},
booktitle={24e Conf{\'e}rence sur le Traitement Automatique des Langues Naturelles (TALN)},
pages={42},
year={2018}
}
```""", []
],
[u"""\n```latex
@inproceedings{tellier2012apprentissage,
title={Apprentissage automatique d'un chunker pour le fran{\c{c}}ais},
author={Tellier, Isabelle and Duchier, Denys and Eshkol, Iris and Courmet, Arnaud and Martinet, Mathieu},
booktitle={TALN2012},
volume={2},
pages={431--438},
year={2012}
}
```""", []
],
[u"""\n```latex
@inproceedings{tellier2012segmenteur,
title={Un segmenteur-{\'e}tiqueteur et un chunker pour le fran{\c{c}}ais (A Segmenter-POS Labeller and a Chunker for French)[in French]},
author={Tellier, Isabelle and Dupont, Yoann and Courmet, Arnaud},
booktitle={Proceedings of the Joint Conference JEP-TALN-RECITAL 2012, volume 5: Software Demonstrations},
pages={7--8},
year={2012}
}
```""", []
],
[u"""\n```latex
@article{dupont2014reconnaisseur,
title={Un reconnaisseur d’entit{\'e}s nomm{\'e}es du Fran{\c{c}}ais (A Named Entity recognizer for French)[in French]},
author={Dupont, Yoann and Tellier, Isabelle},
journal={Proceedings of TALN 2014 (Volume 3: System Demonstrations)},
volume={3},
pages={40--41},
year={2014}
}
```""", []
],
]
def name():
return _name
def version():
return u".".join([str(x) for x in [_version_major, _version_minor, _version_patch]])
def full_name():
return u"{0} v{1}".format(name(), version())
def informations():
def make_md(element_list):
accumulator = []
for i_index, element in enumerate(element_list, 1):
accumulator.append(u"{0}. {1}".format(i_index, element[0]))
for ii_index, subelement in enumerate(element[1], 1):
accumulator.append(u" {0}. {1}".format(ii_index, subelement))
return u"\n".join(accumulator)
return u"""# {0}
[SEM (Segmenteur-Étiqueteur Markovien)]({1}) is a free NLP tool relying on Machine Learning technologies, especially CRFs. SEM provides powerful and configurable preprocessing and postprocessing. [SEM also has an online version](http://apps.lattice.cnrs.fr/sem/index).
## Main SEM features
{2}
## First steps with SEM
{3}
## External resources used by SEM
{4}
## Planned changes (for latest changes, see changelog.md)
{5}
## SEM references (with task[s] of interest)
{6}
## SEM references (bibtex format)
{7}""".format(full_name(), SEM_HOMEPAGE, make_md(_main_features), make_md(_first_steps), make_md(_external_resources), make_md(_planned_changes), make_md(_references), make_md(_bibtex))
| 46.302013 | 334 | 0.582983 | 1,656 | 13,798 | 4.798913 | 0.355676 | 0.012458 | 0.004404 | 0.005663 | 0.200831 | 0.161696 | 0.113879 | 0.059016 | 0.04832 | 0.037247 | 0 | 0.02731 | 0.307363 | 13,798 | 297 | 335 | 46.457912 | 0.804123 | 0.082041 | 0 | 0.142222 | 0 | 0.093333 | 0.578592 | 0.036573 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.022222 | 0.013333 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c2b7aa2638b5f2da8a026a57c38590a6974584 | 6,539 | py | Python | d_parser/d_spider_29dui.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | null | null | null | d_parser/d_spider_29dui.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | 2 | 2018-03-28T19:47:46.000Z | 2021-12-13T20:56:31.000Z | d_parser/d_spider_29dui.py | Holovin/D_GrabDemo | 6adb03fb42ae03e7896eb2eacb342cf9660feb92 | [
"MIT"
] | null | null | null | import re
from d_parser.d_spider_common import DSpiderCommon
from d_parser.helpers.re_set import Ree
from helpers.url_generator import UrlGenerator
from d_parser.helpers.stat_counter import StatCounter as SC
VERSION = 29
# Warn: Don't remove task argument even if not use it (it's break grab and spider crashed)
# Warn: noinspection PyUnusedLocal
class DSpider(DSpiderCommon):
re_unit = re.compile('\d+(?P<unit>.+)')
def __init__(self, thread_number, try_limit=0):
super().__init__(thread_number, try_limit)
# parse global list
def task_initial(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# parse cats
categories_list = grab.doc.select('//div[@class="main-links"]//p[@class="home_subcatalog_links_box"]/a')
for link in categories_list:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {'SET_PAGE_COUNT': '99999'})
yield self.do_task('parse_page', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse page
def task_parse_page(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# parse items links
items_list = grab.doc.select('//div[@class="tovar-table tovar_basic"]//div[@class="tovar-col tovar2"]/a')
for link in items_list:
link = UrlGenerator.get_page_params(self.domain, link.attr('href'), {})
yield self.do_task('parse_item', link, DSpider.get_next_task_priority(task))
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
# parse single item
def task_parse_item(self, grab, task):
try:
if self.check_body_errors(grab, task):
yield self.check_errors(task)
return
# common block with info
product_info = grab.doc.select('//div[@itemscope]')
# parse fields
# A = name
product_name = product_info.select('.//h1').text()
# B = count (quantity)
# C = status (delivery)
product_count_string = product_info.select('.//div[@class="popup-in"]/div[3]/p[1]').text('')
# in stock
if product_count_string == 'В наличии':
product_count = '-1'
product_status = '0'
# on request without delivery days
elif product_count_string == 'Под заказ':
product_count = '-1'
product_status = '-1'
# on request with days...
elif product_count_string != '':
product_count_raw = Ree.extract_int.match(product_count_string)
if not product_count_raw:
self.log_warn(SC.MSG_UNKNOWN_COUNT, f'Wrong delivery date {product_count_string}, skip...', task)
return
product_count_raw = product_count_raw.groupdict()['int']
# only [1..120] days
if 1 <= int(product_count_raw) <= 120:
product_count = '-1'
product_status = product_count_raw
else:
self.log_warn(SC.MSG_UNKNOWN_COUNT, f'Skip delivery date {product_count_string}, skip...', task)
return
else:
self.log_warn(SC.MSG_UNKNOWN_COUNT, f'Unknown count status {product_count_string}, skip...', task)
return
# D = unit (measure) [const if no stock, else parse]
product_unit_raw = DSpider.re_unit.search(product_info.select('.//div[@class="popup-in"]/div[3]/p[2]').text(''))
if product_unit_raw:
product_unit = product_unit_raw.groupdict()['unit'].strip()
else:
product_unit = 'ед.'
# E = price
product_price_raw = product_info.select('.//div[@class="popup-in"]//span[@class="price1 bold"]').attr('content', '')
if not product_price_raw:
self.log_warn(SC.MSG_UNKNOWN_PRICE, f'Unknown price #1 status {product_price_raw}, skip...', task)
return
if not Ree.float.match(product_price_raw):
self.log_warn(SC.MSG_UNKNOWN_PRICE, f'Unknown price #2 status {product_price_raw}, skip...', task)
return
product_price = product_price_raw
# F = vendor code (sku)
product_vendor_code = product_info.select('.//span[@itemprop="sku"]').text('')
# G = vendor (manufacture)
tab_block = product_info.select('.//div[@id="tab1_"]')
product_vendor = tab_block.select('.//dd[@itemprop="brand"]').text('')
# H = photo url
product_photo_url_raw = product_info.select('.//div[@class="slider-for-cont"]/a[1]').attr('href', '')
if product_photo_url_raw:
product_photo_url = UrlGenerator.get_page_params(self.domain, product_photo_url_raw, {})
else:
product_photo_url = ''
# I = description (properties)
product_description = {}
table_keys = tab_block.select('.//dt')
table_values = tab_block.select('.//dd')
if table_keys and table_values:
for index, row_keys in enumerate(table_keys):
product_description[row_keys.text('')] = table_values[index].text('')
# ID
product_id = product_info.select('.//input[@class="submit2 add_to_cart"]').attr('element_id', '')
# save
self.result.add({
'name': product_name,
'quantity': product_count,
'delivery': product_status,
'measure': product_unit,
'price': product_price,
'sku': product_vendor_code,
'manufacture': product_vendor,
'photo': product_photo_url,
'id': product_id,
'properties': product_description
})
except Exception as e:
self.process_error(grab, task, e)
finally:
self.process_finally(task)
| 36.327778 | 128 | 0.564765 | 753 | 6,539 | 4.64409 | 0.250996 | 0.061767 | 0.03889 | 0.028596 | 0.419788 | 0.359737 | 0.317415 | 0.287389 | 0.253932 | 0.235059 | 0 | 0.007427 | 0.320538 | 6,539 | 179 | 129 | 36.530726 | 0.779653 | 0.080899 | 0 | 0.330275 | 0 | 0 | 0.147201 | 0.071178 | 0 | 0 | 0 | 0 | 0 | 1 | 0.036697 | false | 0 | 0.045872 | 0 | 0.174312 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c6855c80193d1b3280b06dc0b0691722907cfc | 1,444 | py | Python | data/process.py | jbchouinard/untitled-word-game | dc4433318d9d811c3e9d47d51934fed608d96522 | [
"MIT"
] | null | null | null | data/process.py | jbchouinard/untitled-word-game | dc4433318d9d811c3e9d47d51934fed608d96522 | [
"MIT"
] | null | null | null | data/process.py | jbchouinard/untitled-word-game | dc4433318d9d811c3e9d47d51934fed608d96522 | [
"MIT"
] | null | null | null | def frequencies():
freqs = {}
with open("frequency.txt", "r") as f:
for line in f:
word, freq = line.strip().split("\t")
freq = int(freq)
freqs[word] = freq
return freqs
def matches(word, letter_count):
if len(word) != letter_count:
return False
for letter in word:
if letter < "a" or letter > "z":
return False
return True
def make_word_list(in_file, out_file, letter_count):
freqs = frequencies()
with open(in_file, "r") as f:
words = [line.strip() for line in f]
words = [w for w in words if matches(w, letter_count)]
words.sort(key=lambda w: freqs.get(w, 0), reverse=True)
with open(out_file, "w") as f:
f.write("WORDS=[\n")
for word in words:
f.write(f" '{word}',\n")
f.write("]\n")
if __name__ == "__main__":
make_word_list("scrabble.txt", "scrabble4.py", 4)
make_word_list("scrabble.txt", "scrabble5.py", 5)
make_word_list("scrabble.txt", "scrabble6.py", 6)
make_word_list("scrabble.txt", "scrabble7.py", 7)
make_word_list("scrabble.txt", "scrabble8.py", 8)
make_word_list("dictionary.txt", "dictionary4.py", 4)
make_word_list("dictionary.txt", "dictionary5.py", 5)
make_word_list("dictionary.txt", "dictionary6.py", 6)
make_word_list("dictionary.txt", "dictionary7.py", 7)
make_word_list("dictionary.txt", "dictionary8.py", 8)
| 32.818182 | 59 | 0.608033 | 208 | 1,444 | 4.038462 | 0.307692 | 0.104762 | 0.157143 | 0.119048 | 0.314286 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019039 | 0.23615 | 1,444 | 43 | 60 | 33.581395 | 0.74252 | 0 | 0 | 0.054054 | 0 | 0 | 0.218144 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c68621abf624a4f16177142cfd89a3892f13a1 | 1,459 | py | Python | megatron/fp16_deprecated/loss_scaler.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 2,869 | 2019-03-22T04:45:32.000Z | 2022-03-31T14:47:42.000Z | megatron/fp16_deprecated/loss_scaler.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 161 | 2019-04-23T21:00:16.000Z | 2022-03-27T15:33:17.000Z | megatron/fp16_deprecated/loss_scaler.py | adammoody/Megatron-DeepSpeed | 972211163608818fe9e5ba821246f18d0a5dc264 | [
"MIT"
] | 567 | 2019-04-05T22:17:47.000Z | 2022-03-31T04:45:25.000Z | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""For backward compatibility, we need the class definitions to deserialize."""
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000,
min_scale=1,
delayed_shift=1,
consecutive_hysteresis=False):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
self.min_scale = min_scale
self.delayed_shift = delayed_shift
self.cur_hysteresis = delayed_shift
self.consecutive_hysteresis = consecutive_hysteresis
| 36.475 | 80 | 0.662097 | 186 | 1,459 | 5.021505 | 0.526882 | 0.06424 | 0.027837 | 0.034261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020657 | 0.270048 | 1,459 | 39 | 81 | 37.410256 | 0.856338 | 0.459904 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c74a7746328169f8af4e57a44eb64b67d34e0f | 3,485 | py | Python | mosfit/modules/transforms/diffusion_csm.py | bxg682/MOSFiT | c65ab943bd0024b3d72c992c01a19f4f82956e98 | [
"MIT"
] | 25 | 2017-05-01T10:26:00.000Z | 2021-12-29T09:36:06.000Z | mosfit/modules/transforms/diffusion_csm.py | bxg682/MOSFiT | c65ab943bd0024b3d72c992c01a19f4f82956e98 | [
"MIT"
] | 156 | 2016-09-19T21:06:42.000Z | 2022-02-07T02:32:39.000Z | mosfit/modules/transforms/diffusion_csm.py | bxg682/MOSFiT | c65ab943bd0024b3d72c992c01a19f4f82956e98 | [
"MIT"
] | 48 | 2016-10-05T20:32:31.000Z | 2022-02-16T16:57:56.000Z | """Definitions for the `DiffusionCSM` class."""
from collections import OrderedDict
import numpy as np
from scipy.interpolate import interp1d
from mosfit.constants import C_CGS, DAY_CGS, M_SUN_CGS, AU_CGS
from mosfit.modules.transforms.transform import Transform
# Important: Only define one ``Module`` class per file.
class DiffusionCSM(Transform):
"""Photon diffusion transform for CSM model."""
N_INT_TIMES = 3000
MIN_LOG_SPACING = -3
def process(self, **kwargs):
"""Process module."""
Transform.process(self, **kwargs)
self._kappa = kwargs[self.key('kappa')]
self._mass = kwargs[self.key('mcsm')] * M_SUN_CGS
self._R0 = kwargs[self.key('r0')] * AU_CGS # AU to cm
self._s = kwargs[self.key('s')]
self._rho = kwargs[self.key('rho')]
self._mejecta = kwargs[self.key('mejecta')] * M_SUN_CGS # Msol to grms
# scaling constant for CSM density profile
self._q = self._rho * self._R0 ** self._s
# outer radius of CSM shell
self._Rcsm = (
(3.0 - self._s) / (4.0 * np.pi * self._q) * self._mass +
self._R0 ** (3.0 - self._s)) ** (1.0 / (3.0 - self._s))
# radius of photosphere (should be within CSM)
self._Rph = abs(
(-2.0 * (1.0 - self._s) / (3.0 * self._kappa * self._q) +
self._Rcsm ** (1.0 - self._s)) ** (1.0 / (1.0 - self._s)))
self._tau_diff = (
self._kappa * self._mass) / (13.8 * C_CGS * self._Rph) / DAY_CGS
# mass of the optically thick CSM (tau > 2/3).
self._Mcsm_th = np.abs(4.0 * np.pi * self._q / (3.0 - self._s) * (
self._Rph**(3.0 - self._s) - self._R0 **
(3.0 - self._s)))
beta = 4. * np.pi ** 3. / 9.
td2 = self._tau_diff**2
td = self._tau_diff
t0 = self._kappa * (self._Mcsm_th) \
/ (beta * C_CGS * self._Rph) / DAY_CGS
new_lums = np.zeros_like(self._times_to_process)
if len(self._dense_times_since_exp) < 2:
return {self.dense_key('luminosities'): new_lums}
min_te = min(self._dense_times_since_exp)
tb = max(0.0, min_te)
linterp = interp1d(
self._dense_times_since_exp, self._dense_luminosities, copy=False,
assume_sorted=True, bounds_error=False, fill_value=0.0)
uniq_times = np.unique(self._times_to_process[
(self._times_to_process >= tb) & (
self._times_to_process <= self._dense_times_since_exp[-1])])
lu = len(uniq_times)
num = int(round(self.N_INT_TIMES / 2.0))
lsp = np.logspace(
np.log10(t0 /
self._dense_times_since_exp[-1]) +
self.MIN_LOG_SPACING, 0, num)
xm = np.unique(np.concatenate((lsp, 1 - lsp)))
int_times = np.clip(
tb + (uniq_times.reshape(lu, 1) - tb) * xm, tb,
self._dense_times_since_exp[-1])
int_times = tb + (uniq_times.reshape(lu, 1) - tb) * xm
int_tes = int_times[:, -1]
int_lums = linterp(int_times) # noqa: F841
int_args = int_lums * np.exp((int_times) / t0)
int_args[np.isnan(int_args)] = 0.0
uniq_lums = np.trapz(int_args, int_times)
uniq_lums*= np.exp(-int_tes/t0)/t0
new_lums = uniq_lums[np.searchsorted(uniq_times,
self._times_to_process)]
return {self.dense_key('luminosities'): new_lums}
| 38.722222 | 79 | 0.573314 | 496 | 3,485 | 3.741935 | 0.27621 | 0.029634 | 0.029095 | 0.022629 | 0.228448 | 0.148168 | 0.06681 | 0.02694 | 0 | 0 | 0 | 0.031868 | 0.288666 | 3,485 | 89 | 80 | 39.157303 | 0.716821 | 0.098709 | 0 | 0.03125 | 0 | 0 | 0.014753 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.078125 | 0 | 0.171875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c7c198233f90418ed5f82dcfeaec9570780535 | 5,905 | py | Python | preprocess.py | floydhub/textutil-preprocess-text | 9cf4e29004970a45ed2f35c63b8cb3d66ac063a9 | [
"MIT"
] | null | null | null | preprocess.py | floydhub/textutil-preprocess-text | 9cf4e29004970a45ed2f35c63b8cb3d66ac063a9 | [
"MIT"
] | null | null | null | preprocess.py | floydhub/textutil-preprocess-text | 9cf4e29004970a45ed2f35c63b8cb3d66ac063a9 | [
"MIT"
] | null | null | null | """
Preprocess text
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import codecs
import csv
import textacy
def str2bool(val):
"""
Helper method to convert string to bool
"""
if val is None:
return False
val = val.lower().strip()
if val in ['true', 't', 'yes', 'y', '1', 'on']:
return True
elif val in ['false', 'f', 'no', 'n', '0', 'off']:
return False
def main():
"""
Normalizes the contents of a text file using a simple naive normalization scheme.
Designed for English
"""
# Parse command line args
parser = argparse.ArgumentParser(description='Normalize text in the given columns')
parser.add_argument(
'-i', '--input', required=True,
help='Path to input file')
parser.add_argument(
'-c', '--cols', required=True, type=str, default=0,
help='Comma separated list of columns indices to normalize')
parser.add_argument(
'-d', '--delimiter', required=True, default='\t',
help='Column delimiter between row and label')
parser.add_argument(
'-header', '--hasheader', required=False, type=str2bool,
default='False', help='File has header row?')
parser.add_argument('-o', '--output', required=True, help='Path to output file')
# Text preprocess args
parser.add_argument(
'--fix_unicode', required=False, type=str2bool,
default='False', help='if True, fix “broken” unicode such as mojibake and garbled HTML entities')
parser.add_argument(
'--lowercase', required=False, type=str2bool,
default='False', help='if True, all text is lower-cased')
parser.add_argument(
'--transliterate', required=False, type=str2bool,
default='False', help='if True, convert non-ascii characters into their closest ascii equivalents')
parser.add_argument(
'--no_urls', required=False, type=str2bool,
default='False', help='if True, replace all URL strings with ‘URL‘')
parser.add_argument(
'--no_emails', required=False, type=str2bool,
default='False', help='if True, replace all email strings with ‘EMAIL‘')
parser.add_argument(
'--no_phone_numbers', required=False, type=str2bool,
default='False', help='if True, replace all phone number strings with ‘PHONE‘')
parser.add_argument(
'--no_numbers', required=False, type=str2bool,
default='False', help='if True, replace all number-like strings with ‘NUMBER‘')
parser.add_argument(
'--no_currency_symbols', required=False, type=str2bool,
default='False', help='if True, replace all currency symbols with their standard 3-letter abbreviations')
parser.add_argument(
'--no_punct', required=False, type=str2bool,
default='False', help='if True, remove all punctuation (replace with empty string)')
parser.add_argument(
'--no_contractions', required=False, type=str2bool,
default='False', help='if True, replace English contractions with their unshortened forms')
parser.add_argument(
'--no_accents', required=False, type=str2bool,
default='False', help='if True, replace all accented characters with unaccented versions; NB: \
if transliterate is True, this option is redundant')
parser.add_argument(
'--normalize_whitespace', required=False, type=str2bool,
default='False', help='if True, Fix unicode text that’s “broken” using ftfy; this includes \
mojibake, HTML entities and other code cruft, and non-standard forms for display purposes')
args = parser.parse_args()
# Unescape the delimiter
args.delimiter = codecs.decode(args.delimiter, "unicode_escape")
# Parse cols into list of ints
args.cols = [int(x) for x in args.cols.split(',')]
# Convert args to dict
vargs = vars(args)
print("\nArguments:")
for arg in vargs:
print("{}={}".format(arg, getattr(args, arg)))
# Read the input file
with open(args.input, 'r', encoding='iso-8859-1') as inputfile:
with open(args.output, 'w') as outputfile:
reader = csv.reader(inputfile, delimiter=args.delimiter)
writer = csv.writer(outputfile, delimiter=args.delimiter)
# If has header, write it unprocessed
if args.hasheader:
headers = next(reader, None)
if headers:
writer.writerow(headers)
print("\nProcessing input")
for row in reader:
row = [textacy.preprocess_text(col,
fix_unicode=args.fix_unicode,
lowercase=args.lowercase,
transliterate=args.transliterate,
no_urls=args.no_urls,
no_emails=args.no_emails,
no_phone_numbers=args.no_phone_numbers,
no_numbers=args.no_numbers,
no_currency_symbols=args.no_currency_symbols,
no_punct=args.no_punct,
no_contractions=args.no_contractions,
no_accents=args.no_accents)
if idx in args.cols else col for idx, col in enumerate(row)]
if args.normalize_whitespace:
row = [textacy.preprocess.normalize_whitespace(col)
if idx in args.cols else col for idx, col in enumerate(row)]
writer.writerow(row)
print("\nDone. Bye!")
if __name__ == '__main__':
main()
| 43.419118 | 113 | 0.594073 | 672 | 5,905 | 5.114583 | 0.293155 | 0.044516 | 0.084085 | 0.094559 | 0.240326 | 0.227524 | 0.227524 | 0.215595 | 0.215595 | 0.174571 | 0 | 0.005552 | 0.298391 | 5,905 | 135 | 114 | 43.740741 | 0.824041 | 0.056393 | 0 | 0.194175 | 0 | 0 | 0.211079 | 0.007784 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019417 | false | 0 | 0.048544 | 0 | 0.097087 | 0.048544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63c98447c20ad02d24ae44cfb39b2b5fe1101422 | 641 | py | Python | tests/test_performance.py | ramonhagenaars/nptying | 99927e1007b5d5d14fbd57559ec3ad2f7c352478 | [
"MIT"
] | null | null | null | tests/test_performance.py | ramonhagenaars/nptying | 99927e1007b5d5d14fbd57559ec3ad2f7c352478 | [
"MIT"
] | null | null | null | tests/test_performance.py | ramonhagenaars/nptying | 99927e1007b5d5d14fbd57559ec3ad2f7c352478 | [
"MIT"
] | null | null | null | from timeit import Timer
from unittest import TestCase
import numpy as np
from nptyping import (
Float,
NDArray,
Shape,
)
class PerformanceTest(TestCase):
def test_instance_check_performance(self):
arr = np.random.randn(42, 42, 3, 5)
def _check_inst():
isinstance(arr, NDArray[Shape["A, *, [a, b, c], 5"], Float])
first_time_sec = Timer(_check_inst).timeit(number=1)
second_time_sec = Timer(_check_inst).timeit(number=1)
self.assertLess(first_time_sec, 0.02)
self.assertLess(second_time_sec, first_time_sec)
self.assertLess(second_time_sec, 0.0002)
| 23.740741 | 72 | 0.669267 | 88 | 641 | 4.636364 | 0.477273 | 0.102941 | 0.088235 | 0.083333 | 0.29902 | 0.166667 | 0.166667 | 0.166667 | 0 | 0 | 0 | 0.034205 | 0.224649 | 641 | 26 | 73 | 24.653846 | 0.78672 | 0 | 0 | 0 | 0 | 0 | 0.028081 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63ca287602c1af9c49104b0c48b37549ad2032ff | 3,596 | py | Python | __init__.py | rhammond17/Blender2MSFS2 | 51ed5e6624e6e072ee03bb7123013ccce4159fbd | [
"Apache-2.0"
] | 25 | 2021-07-01T04:48:08.000Z | 2022-03-15T00:20:55.000Z | __init__.py | rhammond17/Blender2MSFS2 | 51ed5e6624e6e072ee03bb7123013ccce4159fbd | [
"Apache-2.0"
] | 9 | 2021-10-14T12:58:11.000Z | 2022-03-22T05:10:05.000Z | __init__.py | rhammond17/Blender2MSFS2 | 51ed5e6624e6e072ee03bb7123013ccce4159fbd | [
"Apache-2.0"
] | 4 | 2021-12-18T22:26:00.000Z | 2022-03-16T14:00:27.000Z | ###################################################################################################
#
# Copyright 2020 Otmar Nitsche
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###################################################################################################
bl_info = {
"name" : "MSFSToolkit",
"author" : "Otmar Nitsche",
"description" : "This toolkit prepares your 3D assets to be used for Microsoft Flight Simulator. Copyright (c) 2020 Otmar Nitsche",
"blender" : (2, 82, 3),
"version" : (0, 41, 4),
"location" : "View3D",
"warning" : "This version of the addon is work-in-progress. Don't use it in your active development cycle, as it adds variables and objects to the scene that may cause issues further down the line.",
"category" : "3D View",
"wiki_url": "https://www.fsdeveloper.com/wiki/index.php?title=Blender2MSFS"
}
from . import auto_load
from . func_behavior import *
from . func_xml import *
from . func_properties import *
from . li_material import *
from . li_properties import *
from . ui_materials import *
#from . ui_properties import *
##################################################################################
# Load custom glTF exporter and activate Asobo extensions:
from . exporter import *
from . extensions import *
##################################################################################
auto_load.init()
## class to add the preference settings
class addSettingsPanel(bpy.types.AddonPreferences):
bl_idname = __package__
export_texture_dir: bpy.props.StringProperty (
name = "Default Texture Location",
description = "Default Texture Location",
default = ""
)
export_copyright: bpy.props.StringProperty (
name = "Default Copyright Name",
description = "Default Copyright Name",
default = ""
)
## draw the panel in the addon preferences
def draw(self, context):
layout = self.layout
row = layout.row()
row.label(text="Optional - You can set here the default values. This will be used in the export window", icon='INFO')
box = layout.box()
col = box.column(align = False)
## texture default location
col.prop(self, 'export_texture_dir', expand=False)
## default copyright
col.prop(self, 'export_copyright', expand=False)
def register():
auto_load.register()
from .extensions import register
register()
from .exporter import register
register()
bpy.utils.register_class(addSettingsPanel)
#removed by request of scenery designers.
#bpy.types.Scene.msfs_guid = bpy.props.StringProperty(name="GUID",default="")
def register_panel():
from .extensions import register_panel
register_panel()
def unregister():
#from .extensions import unregister
#unregister()
#from .exporter import unregister
#unregister()
auto_load.unregister()
bpy.utils.unregister_class(addSettingsPanel)
def unregister_panel():
from .extensions import unregister_panel
unregister_panel()
| 32.690909 | 203 | 0.626808 | 411 | 3,596 | 5.403893 | 0.454988 | 0.031517 | 0.045025 | 0.035119 | 0.029716 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008225 | 0.188543 | 3,596 | 109 | 204 | 32.990826 | 0.752913 | 0.267241 | 0 | 0.070175 | 0 | 0.035088 | 0.301651 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.087719 | false | 0 | 0.22807 | 0 | 0.385965 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63ca797f894cb6d4d5eb308094d39bb41a5d11cc | 6,250 | py | Python | bin/calendar_builder/process/read_calc_request_queue.py | rsterbin/liturgicalendar | 9ac1d779cf9bedaaeeb00eea83deff98445d0af2 | [
"MIT"
] | 2 | 2016-09-17T09:03:41.000Z | 2018-02-24T03:09:38.000Z | bin/calendar_builder/process/read_calc_request_queue.py | rsterbin/liturgicalendar | 9ac1d779cf9bedaaeeb00eea83deff98445d0af2 | [
"MIT"
] | null | null | null | bin/calendar_builder/process/read_calc_request_queue.py | rsterbin/liturgicalendar | 9ac1d779cf9bedaaeeb00eea83deff98445d0af2 | [
"MIT"
] | null | null | null | import argparse
import boto3
import datetime
import dateutil
import json
import logging
import signal
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine.url import URL
import sys
import time
from ..fetch.calculated import Calculated
from ..fetch.overrides import Overrides
from ..resolution import Resolution
from ..storage import Storage
# Make sure we have a config
try:
from ..config import config
except IOError:
raise RuntimeError("Cannot find configuration")
def handle_args():
""" Gathers commmand line options and sets up logging according to the verbose param. Returns the parsed args """
parser = argparse.ArgumentParser(description='Checks the queue for new messages and caclulates the calendar as needed')
parser.add_argument('--verbose', '-v', action='count')
args = parser.parse_args()
if args.verbose == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbose == 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose >= 3:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
return args
def handle_signal(signal, frame):
"""Finish properly in case of signal"""
logger.info('Stopped')
sys.exit(0)
def db_connect():
""" Performs database connection using database settings from settings.py. Returns sqlalchemy engine instance """
return create_engine(URL(**config['database']))
def fetch_message(sqs, queue_url):
"""Fetches a message from the queue and parses it into a dictionary (keys: ok, start, end, handle)"""
response = sqs.receive_message(
QueueUrl = queue_url,
AttributeNames = [ 'SentTimestamp' ],
MaxNumberOfMessages = 1,
MessageAttributeNames = [ 'All' ],
WaitTimeSeconds = 5
)
parsed = { 'ok': True }
if response.get('Messages'):
m = response.get('Messages')[0]
body = m['Body']
receipt_handle = m['ReceiptHandle']
try:
query = json.loads(body)
except ValueError:
logger.warn('Message recieved was not valid json: ' + body)
if query and 'start' in query and 'end' in query:
dts = dateutil.parser.parse(query['start'])
if dts is None:
logger.warn('Could not parse start date string ' + query['start'])
parsed['ok'] = False
dte = dateutil.parser.parse(query['end'])
if dte is None:
logger.warn('Could not parse end date string ' + query['start'])
parsed['ok'] = False
if parsed['ok']:
logger.info('Received message: ' + dts.isoformat() + ' to ' + dte.isoformat())
parsed['start'] = dts
parsed['end'] = dte
else:
logger.warn('Message recieved was not a valid calc request: ' + body)
parsed['ok'] = False
parsed['handle'] = receipt_handle
return parsed
else:
logger.debug('No message')
return None
def handle_request(message):
"""Looks at the start and end dates and builds as needed"""
if not message['ok']:
logger.error('Cannot handle bad request')
return
engine = db_connect()
Session = sessionmaker(bind=engine)
check_session = Session()
calc = Calculated(check_session)
# Static calendar already calculated? Skip the calculation step
if calc.check_window(message['start'], message['end']):
logger.info('Date range has been calculated')
# No? Calculate each year that we need
else:
logger.info('Start year: ' + str(message['start'].year))
logger.info('End year: ' + str(message['end'].year))
plural = message['start'].year != message['end'].year
if plural:
logger.info('Checking years...')
else:
logger.info('Checking year...')
curr = message['start'].year
found = {}
while curr <= message['end'].year:
found[curr] = calc.check_year(curr)
curr += 1
logger.info('done')
for y in found:
if not found[y]:
# If we need to calculate this year, do so
logger.info('Year ' + str(y) + ': calculating base schedule...')
fetching_session = Session()
resolution = Resolution(fetching_session)
static = resolution.calculate_year(y)
logger.info('done')
# Save what we did
logger.info('Saving the cacluclated year...')
calc_save_session = Session()
storage = Storage(y, calc_save_session)
storage.save_calculated(static)
calc_save_session.commit()
logger.info('done')
# Load up the requested window
static = calc.load_static_range(message['start'], message['end'])
logger.debug('Loaded the static range')
# Add overrides
fetching_session = Session()
overrides = Overrides(fetching_session)
static.override(overrides.get_range(message['start'], message['end']))
logger.debug('Added overrides')
# Save to the cache
logger.info('Saving the completed year...')
cache_save_session = Session()
storage = Storage(message['start'].year, cache_save_session)
storage.save_cached(static)
cache_save_session.commit()
logger.info('done')
def main():
"""Checks the queue for new messages and caclulates as needed"""
global logger
logger = logging.getLogger(__name__)
logger.info('Reading from the queue...')
args = handle_args()
signal.signal(signal.SIGINT, handle_signal)
sqs = boto3.client('sqs')
queue_url = config['message_queue']['calc_request_url']
while True:
message = fetch_message(sqs, queue_url)
if message is not None:
if message['ok']:
handle_request(message)
sqs.delete_message(
QueueUrl = queue_url,
ReceiptHandle = message['handle']
)
time.sleep(1)
| 32.552083 | 123 | 0.61792 | 721 | 6,250 | 5.278779 | 0.295423 | 0.039411 | 0.024172 | 0.031529 | 0.161324 | 0.106674 | 0.074094 | 0.021545 | 0 | 0 | 0 | 0.002419 | 0.27232 | 6,250 | 191 | 124 | 32.722513 | 0.834433 | 0.11264 | 0 | 0.138889 | 0 | 0 | 0.145686 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.118056 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63cb2c56d23fea40ba77a5d5c270072879d21082 | 2,609 | py | Python | aiotruenas_client/websockets/interfaces.py | colemamd/aiotruenas-client | 26f754fcceadbacbfc87a19465d5a8d035e4bd00 | [
"MIT"
] | 11 | 2020-12-30T00:33:01.000Z | 2022-01-25T07:56:55.000Z | aiotruenas_client/websockets/interfaces.py | colemamd/aiotruenas-client | 26f754fcceadbacbfc87a19465d5a8d035e4bd00 | [
"MIT"
] | 35 | 2020-09-29T07:45:49.000Z | 2022-03-29T15:02:52.000Z | aiotruenas_client/websockets/interfaces.py | colemamd/aiotruenas-client | 26f754fcceadbacbfc87a19465d5a8d035e4bd00 | [
"MIT"
] | 3 | 2020-12-30T18:19:03.000Z | 2021-09-18T17:32:22.000Z | from __future__ import annotations
import asyncio
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from ..job import Job, TJobId
from ..machine import Machine
class StateFetcher(ABC):
@classmethod
@abstractmethod
async def create(
cls,
machine: WebsocketMachine,
) -> StateFetcher:
"""Factory method to create the state fetcher and setup any subscriptions."""
class WebsocketMachine(Machine):
@classmethod
@abstractmethod
async def create(
cls,
host: str,
api_key: Optional[str] = None,
password: Optional[str] = None,
username: Optional[str] = None,
secure: bool = True,
) -> WebsocketMachine:
"""Factory method to create a Websocket-based Machine class.
This method will automatically connect to the remote machine.
Only one of `api_key` or (`password` and `username`) can be provided.
"""
@abstractmethod
async def connect(
self,
host: str,
api_key: Optional[str],
password: Optional[str],
username: Optional[str],
secure: bool,
) -> None:
"""Initializes the connection to the server.
Only one of `api_key` or (`password` and `username`) can be provided.
"""
@abstractmethod
async def close(self) -> None:
"""Closes the conenction to the server."""
@property
@abstractmethod
def closed(self) -> bool:
"""If the connection to the server is closed or not."""
@abstractmethod
async def wait_for_job(self, id: TJobId) -> Job:
"""Wait for the specified Job from the remote machine to complete, and return it."""
@abstractmethod
async def invoke_method(self, method: str, params: List[Any] = []) -> Any:
"""Invokes a method and returns its result.
This should only be used by internal classes to this library.
"""
@abstractmethod
async def subscribe(self, subscriber: Subscriber, name: str) -> asyncio.Queue:
"""Subscribes to a topic and populates a `Queue` of data from it.
This should only be used by internal classes to this library.
"""
@abstractmethod
async def unsubscribe(self, subscriber: Subscriber, name: str) -> None:
"""Unsubscribes from a topic.
This should only be used by internal classes to this library.
"""
class Subscriber(ABC):
@abstractmethod
async def unsubscribe(self) -> None:
"""Called when the connection is closing and the class needs to unsubscribe."""
| 28.358696 | 92 | 0.637026 | 307 | 2,609 | 5.37785 | 0.32899 | 0.103574 | 0.119927 | 0.029073 | 0.379164 | 0.283465 | 0.203513 | 0.203513 | 0.203513 | 0.203513 | 0 | 0 | 0.272135 | 2,609 | 91 | 93 | 28.67033 | 0.869405 | 0.018781 | 0 | 0.367347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0.040816 | 0.122449 | 0 | 0.204082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63cde583e31d98eb0acd90cf10aba92b87ffc53d | 6,409 | py | Python | greparl/SearchEngine/backend/similarity/similarity_manager.py | GeorgeVasiliadis/GreParl | 10c085c9892ff156aeb13401063c9e0e20d0baff | [
"MIT"
] | null | null | null | greparl/SearchEngine/backend/similarity/similarity_manager.py | GeorgeVasiliadis/GreParl | 10c085c9892ff156aeb13401063c9e0e20d0baff | [
"MIT"
] | null | null | null | greparl/SearchEngine/backend/similarity/similarity_manager.py | GeorgeVasiliadis/GreParl | 10c085c9892ff156aeb13401063c9e0e20d0baff | [
"MIT"
] | null | null | null | import pickle
import typing
from dataclasses import dataclass
import numpy as np
@dataclass
class SimilarityMember:
"""
Represents a member of parliament. Contains their name and a list of the parties they have served with
"""
name: str
parties: list[str]
@dataclass
class SimilarityResult:
"""
Represent the results of a similarity query. Contains:
original_member: The member that the similarity query was performed for
similar_members: List containing the members that are most similar to the original_member. Ordered from highest
to lowest similarity.
scores: Contains the similarity score for each member in similar_members
"""
original_member: SimilarityMember
similar_members: list[SimilarityMember]
scores: list[float]
class SimilarityManager:
def __init__(self, matrix_file_name="similarity/matrix.pkl", names_file_name="similarity/names.csv"):
self.similarity_matrix = pickle.load(open(matrix_file_name, "rb"))
# Dictionary matching the name of every member to their line in the similarity matrix
self.names_to_rows = {}
# Dictionary matching the row of every member to their name
self.rows_to_names = {}
# Dictionary matching each row of the similarity matrix to the parties of the given member
self.rows_to_parties = {}
self._initialize_names_and_parties(open(names_file_name, "r", encoding="utf8"))
def _initialize_names_and_parties(self, names_file: typing.IO) -> None:
"""
Reads the names and parties from the given names_file. Populates the names and parties dicts
:param names_file: file
"""
row = 0
while line := names_file.readline():
split = line.split(",")
self.rows_to_names[row] = split[0]
self.names_to_rows[split[0]] = row
self.rows_to_parties[row] = [party_name.removesuffix("\n") for party_name in split[1:]]
row += 1
def get_similarity_between_members(self, member_name1: str, member_name2: str):
self._verify_member_exists(member_name1)
self._verify_member_exists(member_name2)
member_row1 = self.names_to_rows[member_name1]
member_row2 = self.names_to_rows[member_name2]
member1 = SimilarityMember(member_name1, self.rows_to_parties[member_row1])
member2 = SimilarityMember(member_name2, self.rows_to_parties[member_row2])
similarity_score = self.similarity_matrix[member_row1][member_row2]
match = SimilarityResult(original_member=member1, similar_members=[member2], scores=[float(similarity_score)])
return match
def _verify_member_exists(self, member_name):
"""
Checks if the given member exists. If not throws RuntimeError
"""
if member_name not in self.names_to_rows.keys():
raise RuntimeError("Member {} does not exist".format(member_name))
def get_most_similar_to(self, member_name: str, k=10) -> SimilarityResult:
"""
Get the k most similar members to the given member.
:param member_name: search for members similar to this member
:param k: get the k most similar members
:return: the results of the search
"""
self._verify_member_exists(member_name)
member_row = self.names_to_rows[member_name]
member_similarities = self.similarity_matrix[member_row]
# The document with itself has similarity 1, the maximum. So, we need to get the top k+1 similarities and
# then ignore the first.
top_similarities_indexes = np.argpartition(member_similarities, -k - 1)[(-k - 1):]
# https://stackoverflow.com/questions/6910641/how-do-i-get-indices-of-n-maximum-values-in-a-numpy-array
# Sort the indexes according to the scores
top_similarities_indexes = top_similarities_indexes[np.argsort(member_similarities[top_similarities_indexes])]
original_member = SimilarityMember(member_name, self.rows_to_parties[member_row])
similar_members = []
scores = []
# Ignore the most similar document
top_similarities_indexes = top_similarities_indexes[:-1]
for other_member_row in top_similarities_indexes[::-1]:
score = float(member_similarities[other_member_row])
other_member = self._get_member_from_row(other_member_row)
similar_members.append(other_member)
scores.append(score)
return SimilarityResult(original_member, similar_members, scores)
def _get_member_from_row(self, row: int) -> SimilarityMember:
"""
Get a SimilarityMember object from a row number
"""
member_name = self.rows_to_names[row]
parties = self.rows_to_parties[row]
return SimilarityMember(member_name, parties)
def get_most_similar(self, k=50) -> list[tuple[SimilarityMember, SimilarityMember, float]]:
"""
Get the k most similar member pairs.
:return list with k elements from most to least similar
"""
# The similarity matrix is symmetric, so we can search through the upper triangular matrix to find the pairs.
upper_triangular = self.similarity_matrix
for i in range(0, upper_triangular.shape[1]):
for j in range(0, i+1):
upper_triangular[i][j] = 0
# Get the k largest elements.
indexes = np.argpartition(-upper_triangular.ravel(), k)[:k]
indexes = np.column_stack(np.unravel_index(indexes, upper_triangular.shape))
# Create a list containing the similarity score of each pair and the indexes of the pairs.
scores_with_indexes = []
for index in indexes:
scores_with_indexes.append([upper_triangular[index[0], index[1]], *index])
# Sort the list according to the scores
scores_with_indexes = np.array(scores_with_indexes)
scores_with_indexes = scores_with_indexes[scores_with_indexes[:, 0].argsort()]
# Assemble the results. Scores with indexes is sorted ascending.
results = []
for score, member_row1, member_row2 in scores_with_indexes[::-1]:
member1 = self._get_member_from_row(member_row1)
member2 = self._get_member_from_row(member_row2)
results.append((member1, member2, score))
return results
| 46.442029 | 118 | 0.688407 | 825 | 6,409 | 5.115152 | 0.210909 | 0.023697 | 0.021327 | 0.021327 | 0.158057 | 0.061137 | 0.016114 | 0.016114 | 0.016114 | 0 | 0 | 0.011786 | 0.232174 | 6,409 | 137 | 119 | 46.781022 | 0.845763 | 0.284444 | 0 | 0.025316 | 0 | 0 | 0.017155 | 0.004803 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088608 | false | 0 | 0.050633 | 0 | 0.291139 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63cea53033390cd1b8049400439d92676e1209c5 | 607 | py | Python | substrabac/libs/versioning.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | substrabac/libs/versioning.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | substrabac/libs/versioning.py | GuillaumeCisco/substra-backend | 777ec0cfc10a1aad34cccba449e4923c20786d32 | [
"Apache-2.0"
] | null | null | null | from rest_framework import exceptions
from rest_framework.utils.mediatypes import _MediaType
from rest_framework.versioning import AcceptHeaderVersioning
class AcceptHeaderVersioningRequired(AcceptHeaderVersioning):
def determine_version(self, request, *args, **kwargs):
media_type = _MediaType(request.accepted_media_type)
version = media_type.params.get(self.version_param, None)
if version is None:
raise exceptions.NotAcceptable('A version is required.')
return super(AcceptHeaderVersioningRequired, self).determine_version(request, *args, **kwargs)
| 37.9375 | 102 | 0.7743 | 64 | 607 | 7.15625 | 0.53125 | 0.052402 | 0.111354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153213 | 607 | 15 | 103 | 40.466667 | 0.891051 | 0 | 0 | 0 | 0 | 0 | 0.036244 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63d2d0462174b9a239ed087ce1d36b74f3a8a55d | 1,445 | py | Python | processor/lib/logging.py | Torbay-Tech/sbkup | 5da307db5dc5b47c3ba11b1c4e53113cbc34bfca | [
"0BSD"
] | null | null | null | processor/lib/logging.py | Torbay-Tech/sbkup | 5da307db5dc5b47c3ba11b1c4e53113cbc34bfca | [
"0BSD"
] | null | null | null | processor/lib/logging.py | Torbay-Tech/sbkup | 5da307db5dc5b47c3ba11b1c4e53113cbc34bfca | [
"0BSD"
] | null | null | null | import logging
import os
import sys
from lib.readCfg import ConfigReader
'''
Simple Logger utility
add this line to py file
log = logging.getLogger(__file__)
and use the log object by calling
log.error("My error message")
log.info("My info message")
'''
class Logger:
def __init__(self):
readCfg = ConfigReader()
config = readCfg.read_config(['processor/config/application.properties'])
if config == None:
print("Config files not found")
sys.exit()
LOG_FILE_PATH = config.get('common', 'logpath')
# set up logging to file - see previous section for more details
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
filename=LOG_FILE_PATH,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def getLogger(self,fileName):
return logging.getLogger(os.path.basename(fileName)) | 36.125 | 89 | 0.636678 | 174 | 1,445 | 5.212644 | 0.522989 | 0.052922 | 0.024256 | 0.039691 | 0.057332 | 0.057332 | 0 | 0 | 0 | 0 | 0 | 0.00565 | 0.265052 | 1,445 | 40 | 90 | 36.125 | 0.848399 | 0.173702 | 0 | 0 | 0 | 0 | 0.164846 | 0.038729 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.173913 | 0.043478 | 0.347826 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63d2fa5f2f28f8b7f760b3c3cb14ecd0069dc120 | 15,789 | py | Python | Ball-in-Maze/Tut-Ball-in-Maze.py | francholi/PandaExamples | 729674387cc482f25296f08bbb5fb571e59fc4ea | [
"MIT"
] | null | null | null | Ball-in-Maze/Tut-Ball-in-Maze.py | francholi/PandaExamples | 729674387cc482f25296f08bbb5fb571e59fc4ea | [
"MIT"
] | null | null | null | Ball-in-Maze/Tut-Ball-in-Maze.py | francholi/PandaExamples | 729674387cc482f25296f08bbb5fb571e59fc4ea | [
"MIT"
] | null | null | null | # Author: Shao Zhang, Phil Saltzman
# Last Updated: 5/2/2005
#
# This tutorial shows how to detect and respond to collisions. It uses solids
# create in code and the egg files, how to set up collision masks, a traverser,
# and a handler, how to detect collisions, and how to dispatch function based
# on the collisions. All of this is put together to simulate a labyrinth-style
# game
import direct.directbase.DirectStart
from panda3d.core import CollisionTraverser,CollisionNode
from panda3d.core import CollisionHandlerQueue,CollisionRay
from panda3d.core import Material,LRotationf,NodePath
from panda3d.core import AmbientLight,DirectionalLight
from panda3d.core import TextNode
from panda3d.core import Vec3,Vec4,BitMask32
from direct.gui.OnscreenText import OnscreenText
from direct.showbase.DirectObject import DirectObject
from direct.interval.MetaInterval import Sequence,Parallel
from direct.interval.LerpInterval import LerpFunc
from direct.interval.FunctionInterval import Func,Wait
from direct.task.Task import Task
import sys
# Some constants for the program
ACCEL = 70 # Acceleration in ft/sec/sec
MAX_SPEED = 5 # Max speed in ft/sec
MAX_SPEED_SQ = MAX_SPEED ** 2 # Squared to make it easier to use lengthSquared
# Instead of length
UP = Vec3(0,0,1) # We need this vector a lot, so its better to just have one
# instead of creating a new one every time we need it
class World(DirectObject):
def __init__(self):
# This code puts the standard title and instruction text on screen
self.title = OnscreenText(text="Panda3D: Tutorial - Collision Detection",
style=1, fg=(1,1,1,1),
pos=(0.7,-0.95), scale = .07)
self.instructions = OnscreenText(text="Mouse pointer tilts the board",
pos = (-1.3, .95), fg=(1,1,1,1),
align = TextNode.ALeft, scale = .05)
self.accept("escape", sys.exit) # Escape quits
base.disableMouse() # Disable mouse-based camera control
camera.setPosHpr(0, 0, 25, 0, -90, 0) # Place the camera
# Load the maze and place it in the scene
self.maze = loader.loadModel("models/maze")
self.maze.reparentTo(render)
# Most times, you want collisions to be tested against invisible geometry
# rather than every polygon. This is because testing against every polygon
# in the scene is usually too slow. You can have simplified or approximate
# geometry for the solids and still get good results.
#
# Sometimes you'll want to create and position your own collision solids in
# code, but it's often easier to have them built automatically. This can be
# done by adding special tags into an egg file. Check maze.egg and ball.egg
# and look for lines starting with <Collide>. The part is brackets tells
# Panda exactly what to do. Polyset means to use the polygons in that group
# as solids, while Sphere tells panda to make a collision sphere around them
# Keep means to keep the polygons in the group as visable geometry (good
# for the ball, not for the triggers), and descend means to make sure that
# the settings are applied to any subgroups.
#
# Once we have the collision tags in the models, we can get to them using
# NodePath's find command
# Find the collision node named wall_collide
self.walls = self.maze.find("**/wall_collide")
# Collision objects are sorted using BitMasks. BitMasks are ordinary numbers
# with extra methods for working with them as binary bits. Every collision
# solid has both a from mask and an into mask. Before Panda tests two
# objects, it checks to make sure that the from and into collision masks
# have at least one bit in common. That way things that shouldn't interact
# won't. Normal model nodes have collision masks as well. By default they
# are set to bit 20. If you want to collide against actual visable polygons,
# set a from collide mask to include bit 20
#
# For this example, we will make everything we want the ball to collide with
# include bit 0
self.walls.node().setIntoCollideMask(BitMask32.bit(0))
# CollisionNodes are usually invisible but can be shown. Uncomment the next
# line to see the collision walls
# self.walls.show()
# We will now find the triggers for the holes and set their masks to 0 as
# well. We also set their names to make them easier to identify during
# collisions
self.loseTriggers = []
for i in range(6):
trigger = self.maze.find("**/hole_collide" + str(i))
trigger.node().setIntoCollideMask(BitMask32.bit(0))
trigger.node().setName("loseTrigger")
self.loseTriggers.append(trigger)
# Uncomment this line to see the triggers
# trigger.show()
# Ground_collide is a single polygon on the same plane as the ground in the
# maze. We will use a ray to collide with it so that we will know exactly
# what height to put the ball at every frame. Since this is not something
# that we want the ball itself to collide with, it has a different
# bitmask.
self.mazeGround = self.maze.find("**/ground_collide")
self.mazeGround.node().setIntoCollideMask(BitMask32.bit(1))
# Load the ball and attach it to the scene
# It is on a root dummy node so that we can rotate the ball itself without
# rotating the ray that will be attached to it
self.ballRoot = render.attachNewNode("ballRoot")
self.ball = loader.loadModel("models/ball")
self.ball.reparentTo(self.ballRoot)
# Find the collison sphere for the ball which was created in the egg file
# Notice that it has a from collision mask of bit 0, and an into collison
# mask of no bits. This means that the ball can only cause collisions, not
# be collided into
self.ballSphere = self.ball.find("**/ball")
self.ballSphere.node().setFromCollideMask(BitMask32.bit(0))
self.ballSphere.node().setIntoCollideMask(BitMask32.allOff())
# No we create a ray to start above the ball and cast down. This is to
# Determine the height the ball should be at and the angle the floor is
# tilting. We could have used the sphere around the ball itself, but it
# would not be as reliable
self.ballGroundRay = CollisionRay() # Create the ray
self.ballGroundRay.setOrigin(0,0,10) # Set its origin
self.ballGroundRay.setDirection(0,0,-1) # And its direction
# Collision solids go in CollisionNode
self.ballGroundCol = CollisionNode('groundRay') # Create and name the node
self.ballGroundCol.addSolid(self.ballGroundRay) # Add the ray
self.ballGroundCol.setFromCollideMask(BitMask32.bit(1)) # Set its bitmasks
self.ballGroundCol.setIntoCollideMask(BitMask32.allOff())
# Attach the node to the ballRoot so that the ray is relative to the ball
# (it will always be 10 feet over the ball and point down)
self.ballGroundColNp = self.ballRoot.attachNewNode(self.ballGroundCol)
# Uncomment this line to see the ray
# self.ballGroundColNp.show()
# Finally, we create a CollisionTraverser. CollisionTraversers are what
# do the job of calculating collisions
self.cTrav = CollisionTraverser()
# Collision traverservs tell collision handlers about collisions, and then
# the handler decides what to do with the information. We are using a
# CollisionHandlerQueue, which simply creates a list of all of the
# collisions in a given pass. There are more sophisticated handlers like
# one that sends events and another that tries to keep collided objects
# apart, but the results are often better with a simple queue
self.cHandler = CollisionHandlerQueue()
# Now we add the collision nodes that can create a collision to the
# traverser. The traverser will compare these to all others nodes in the
# scene. There is a limit of 32 CollisionNodes per traverser
# We add the collider, and the handler to use as a pair
self.cTrav.addCollider(self.ballSphere, self.cHandler)
self.cTrav.addCollider(self.ballGroundColNp, self.cHandler)
# Collision traversers have a built in tool to help visualize collisions.
# Uncomment the next line to see it.
# self.cTrav.showCollisions(render)
# This section deals with lighting for the ball. Only the ball was lit
# because the maze has static lighting pregenerated by the modeler
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor(Vec4(.55, .55, .55, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(Vec3(0, 0, -1))
directionalLight.setColor(Vec4(0.375, 0.375, 0.375, 1))
directionalLight.setSpecularColor(Vec4(1, 1, 1, 1))
self.ballRoot.setLight(render.attachNewNode(ambientLight))
self.ballRoot.setLight(render.attachNewNode(directionalLight))
# This section deals with adding a specular highlight to the ball to make
# it look shiny
m = Material()
m.setSpecular(Vec4(1,1,1,1))
m.setShininess(96)
self.ball.setMaterial(m, 1)
# Finally, we call start for more initialization
self.start()
def start(self):
# The maze model also has a locator in it for where to start the ball
# To access it we use the find command
startPos = self.maze.find("**/start").getPos()
self.ballRoot.setPos(startPos) # Set the ball in the starting position
self.ballV = Vec3(0,0,0) # Initial velocity is 0
self.accelV = Vec3(0,0,0) # Initial acceleration is 0
# For a traverser to actually do collisions, you need to call
# traverser.traverse() on a part of the scene. Fortunatly, base has a
# task that does this for the entire scene once a frame. This sets up our
# traverser as the one to be called automatically
base.cTrav = self.cTrav
# Create the movement task, but first make sure it is not already running
taskMgr.remove("rollTask")
self.mainLoop = taskMgr.add(self.rollTask, "rollTask")
self.mainLoop.last = 0
# This function handles the collision between the ray and the ground
# Information about the interaction is passed in colEntry
def groundCollideHandler(self, colEntry):
# Set the ball to the appropriate Z value for it to be exactly on the ground
newZ = colEntry.getSurfacePoint(render).getZ()
self.ballRoot.setZ(newZ+.4)
# Find the acceleration direction. First the surface normal is crossed with
# the up vector to get a vector perpendicular to the slope
norm = colEntry.getSurfaceNormal(render)
accelSide = norm.cross(UP)
# Then that vector is crossed with the surface normal to get a vector that
# points down the slope. By getting the acceleration in 3D like this rather
# than in 2D, we reduce the amount of error per-frame, reducing jitter
self.accelV = norm.cross(accelSide)
# This function handles the collision between the ball and a wall
def wallCollideHandler(self, colEntry):
# First we calculate some numbers we need to do a reflection
norm = colEntry.getSurfaceNormal(render) * -1 # The normal of the wall
curSpeed = self.ballV.length() # The current speed
inVec = self.ballV / curSpeed # The direction of travel
velAngle = norm.dot(inVec) # Angle of incidance
hitDir = colEntry.getSurfacePoint(render) - self.ballRoot.getPos()
hitDir.normalize()
hitAngle = norm.dot(hitDir) # The angle between the ball and the normal
# Ignore the collision if the ball is either moving away from the wall
# already (so that we don't accidentally send it back into the wall)
# and ignore it if the collision isn't dead-on (to avoid getting caught on
# corners)
if velAngle > 0 and hitAngle > .995:
# Standard reflection equation
reflectVec = (norm * norm.dot(inVec * -1) * 2) + inVec
# This makes the velocity half of what it was if the hit was dead-on
# and nearly exactly what it was if this is a glancing blow
self.ballV = reflectVec * (curSpeed * (((1-velAngle)*.5)+.5))
# Since we have a collision, the ball is already a little bit buried in
# the wall. This calculates a vector needed to move it so that it is
# exactly touching the wall
disp = (colEntry.getSurfacePoint(render) -
colEntry.getInteriorPoint(render))
newPos = self.ballRoot.getPos() + disp
self.ballRoot.setPos(newPos)
# This is the task that deals with making everything interactive
def rollTask(self, task):
# Standard technique for finding the amount of time since the last frame
dt = task.time - task.last
task.last = task.time
# If dt is large, then there has been a # hiccup that could cause the ball
# to leave the field if this functions runs, so ignore the frame
if dt > .2: return Task.cont
# The collision handler collects the collisions. We dispatch which function
# to handle the collision based on the name of what was collided into
for i in range(self.cHandler.getNumEntries()):
entry = self.cHandler.getEntry(i)
name = entry.getIntoNode().getName()
if name == "wall_collide": self.wallCollideHandler(entry)
elif name == "ground_collide": self.groundCollideHandler(entry)
elif name == "loseTrigger": self.loseGame(entry)
# Read the mouse position and tilt the maze accordingly
if base.mouseWatcherNode.hasMouse():
mpos = base.mouseWatcherNode.getMouse() # get the mouse position
self.maze.setP(mpos.getY() * -10)
self.maze.setR(mpos.getX() * 10)
# Finally, we move the ball
# Update the velocity based on acceleration
self.ballV += self.accelV * dt * ACCEL
# Clamp the velocity to the maximum speed
if self.ballV.lengthSquared() > MAX_SPEED_SQ:
self.ballV.normalize()
self.ballV *= MAX_SPEED
# Update the position based on the velocity
self.ballRoot.setPos(self.ballRoot.getPos() + (self.ballV * dt))
# This block of code rotates the ball. It uses something called a quaternion
# to rotate the ball around an arbitrary axis. That axis perpendicular to
# the balls rotation, and the amount has to do with the size of the ball
# This is multiplied on the previous rotation to incrimentally turn it.
prevRot = LRotationf(self.ball.getQuat())
axis = UP.cross(self.ballV)
newRot = LRotationf(axis, 45.5 * dt * self.ballV.length())
self.ball.setQuat(prevRot * newRot)
return Task.cont # Continue the task indefinitely
# If the ball hits a hole trigger, then it should fall in the hole.
# This is faked rather than dealing with the actual physics of it.
def loseGame(self, entry):
# The triggers are set up so that the center of the ball should move to the
# collision point to be in the hole
toPos = entry.getInteriorPoint(render)
taskMgr.remove('rollTask') # Stop the maze task
# Move the ball into the hole over a short sequence of time. Then wait a
# second and call start to reset the game
Sequence(
Parallel(
LerpFunc(self.ballRoot.setX, fromData = self.ballRoot.getX(),
toData = toPos.getX(), duration = .1),
LerpFunc(self.ballRoot.setY, fromData = self.ballRoot.getY(),
toData = toPos.getY(), duration = .1),
LerpFunc(self.ballRoot.setZ, fromData = self.ballRoot.getZ(),
toData = self.ballRoot.getZ() - .9, duration = .2)),
Wait(1),
Func(self.start)).start()
# Finally, create an instance of our class and start 3d rendering
w = World()
run()
| 49.495298 | 80 | 0.699981 | 2,275 | 15,789 | 4.85011 | 0.275604 | 0.019666 | 0.002175 | 0.011419 | 0.043321 | 0.016494 | 0.007432 | 0 | 0 | 0 | 0 | 0.013957 | 0.228577 | 15,789 | 318 | 81 | 49.650943 | 0.891954 | 0.535436 | 0 | 0 | 0 | 0 | 0.038365 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.099291 | 0 | 0.156028 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63d4f1afadb072b78a9829303ff58b7feeb731b4 | 1,766 | py | Python | simplectf/__main__.py | alberttxu/simplectf | 719ceccd9c1e5b0b868f2e9a7e342d3fe2a7c664 | [
"MIT"
] | 1 | 2020-10-29T04:43:24.000Z | 2020-10-29T04:43:24.000Z | simplectf/__main__.py | alberttxu/simplectf | 719ceccd9c1e5b0b868f2e9a7e342d3fe2a7c664 | [
"MIT"
] | null | null | null | simplectf/__main__.py | alberttxu/simplectf | 719ceccd9c1e5b0b868f2e9a7e342d3fe2a7c664 | [
"MIT"
] | null | null | null | import argparse
import time
from . import CTFModel, find_ctf
def main():
default_amplitude_contrast = 0.07
parser = argparse.ArgumentParser()
parser.add_argument("mrc", help="input aligned mrc file")
parser.add_argument(
"--pixelsize", type=float, help="pixelsize in angstroms", required=True
)
parser.add_argument(
"--voltage", type=int, help="accelerating voltage in kV", required=True
)
parser.add_argument(
"--cs", type=float, help="spherical abberation in millimeters", required=True
)
parser.add_argument(
"--amplitude_contrast",
type=float,
default=default_amplitude_contrast,
help=f"(default = {default_amplitude_contrast})",
)
parser.add_argument(
"--search-phase",
help="search for an additional phase shift",
action="store_true",
)
args = parser.parse_args()
print("creating ctf model")
start_time_model = time.time()
ctf_model = CTFModel(
args.mrc, args.pixelsize, args.voltage, args.cs, args.amplitude_contrast
)
print("searching for optimal values")
start_time_search = time.time()
z1, z2, angle_astig, phase_shift = find_ctf(ctf_model, args.search_phase)
end_time = time.time()
print(f"preprocessing time: {start_time_search - start_time_model:.2f}")
print(f"search time: {end_time - start_time_search:.2f}")
print(f"total time: {end_time - start_time_model:.2f}")
print()
print(f"defocus values: {z1:.4f}, {z2:.4f} microns")
print(f"astigmatism angle: {angle_astig:.2f} degrees")
print(f"phase shift: {phase_shift:.2f} degrees")
print(f"cross correlation: {ctf_model.cross_correlation_score:.6f}")
if __name__ == "__main__":
main() | 32.703704 | 85 | 0.669309 | 223 | 1,766 | 5.071749 | 0.336323 | 0.037135 | 0.090186 | 0.055703 | 0.141468 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010638 | 0.201586 | 1,766 | 54 | 86 | 32.703704 | 0.791489 | 0 | 0 | 0.106383 | 0 | 0 | 0.363328 | 0.074703 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.06383 | 0 | 0.085106 | 0.212766 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
63d5fb092f0cc24e2c27f0886b0cd5ba71e075d6 | 4,033 | py | Python | tests/test_hvac.py | joro75/mytoyota | 405f7d84b3737846124aac6e7692aa6da52838a1 | [
"MIT"
] | null | null | null | tests/test_hvac.py | joro75/mytoyota | 405f7d84b3737846124aac6e7692aa6da52838a1 | [
"MIT"
] | 1 | 2022-02-12T16:46:48.000Z | 2022-02-12T16:46:48.000Z | tests/test_hvac.py | joro75/mytoyota | 405f7d84b3737846124aac6e7692aa6da52838a1 | [
"MIT"
] | null | null | null | """pytest tests for mytoyota.models.hvac.Hvac"""
from mytoyota.models.hvac import Hvac
# pylint: disable=no-self-use
class TestHvac:
"""pytest functions to test Hvac"""
@staticmethod
def _create_example_data():
"""Create hvac with predefined data"""
return Hvac(
{
"currentTemperatureIndication": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 22,
},
"targetTemperature": {
"timestamp": "2020-10-16T03:50:15Z",
"unit": "string",
"value": 21,
},
"startedAt": "",
"status": "",
"type": "",
"duration": 1,
"options": {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
},
"commandId": "",
}
)
@staticmethod
def _create_example_legacy_data():
"""Create legacy hvac with predefined data"""
return Hvac(
{
"BlowerStatus": 0,
"FrontDefoggerStatus": 0,
"InsideTemperature": 22,
"LatestAcStartTime": "2020-10-16T03:50:15Z",
"RearDefoggerStatus": 0,
"RemoteHvacMode": 0,
"RemoteHvacProhibitionSignal": 1,
"SettingTemperature": 21,
"TemperatureDisplayFlag": 0,
"Temperaturelevel": 29,
},
legacy=True,
)
def test_hvac(self):
"""Test Hvac"""
hvac = self._create_example_data()
assert hvac.legacy is False
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.started_at == ""
assert hvac.status == ""
assert hvac.type == ""
assert hvac.duration == 1
assert hvac.command_id == ""
assert isinstance(hvac.options, dict)
assert hvac.options == {
"frontDefogger": "",
"frontDriverSeatHeater": "",
"frontPassengerSeatHeater": "",
"mirrorHeater": "",
"rearDefogger": "",
"rearDriverSeatHeater": "",
"rearPassengerSeatHeater": "",
"steeringHeater": "",
}
assert hvac.last_updated == "2020-10-16T03:50:15Z"
assert hvac.front_defogger_is_on is None
assert hvac.rear_defogger_is_on is None
assert hvac.blower_on is None
def test_hvac_legacy(self):
"""Test legacy Hvac"""
hvac = self._create_example_legacy_data()
assert hvac.legacy is True
assert hvac.current_temperature == 22
assert hvac.target_temperature == 21
assert hvac.blower_on == 0
assert hvac.front_defogger_is_on is False
assert hvac.rear_defogger_is_on is False
assert hvac.last_updated is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.options is None
assert hvac.command_id is None
def test_hvac_no_data(self):
"""Test Hvac with no initialization data"""
hvac = Hvac({})
assert hvac.legacy is False
assert hvac.current_temperature is None
assert hvac.target_temperature is None
assert hvac.started_at is None
assert hvac.status is None
assert hvac.type is None
assert hvac.duration is None
assert hvac.command_id is None
assert hvac.options is None
assert hvac.last_updated is None
| 31.023077 | 60 | 0.521944 | 362 | 4,033 | 5.679558 | 0.245856 | 0.175097 | 0.093385 | 0.124514 | 0.627918 | 0.548638 | 0.503891 | 0.429961 | 0.418288 | 0.171206 | 0 | 0.033107 | 0.378378 | 4,033 | 129 | 61 | 31.263566 | 0.786996 | 0.059261 | 0 | 0.431373 | 0 | 0 | 0.179351 | 0.056679 | 0 | 0 | 0 | 0 | 0.362745 | 1 | 0.04902 | false | 0.039216 | 0.009804 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
891ecd9b4c5792da34f463d0d4884a7fedddb4b9 | 883 | py | Python | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/importHelper.py | chsamala2/PowerShell-DSC-for-Linux | 5cac971f1877dd545c71be50b0ffd29199ac1acc | [
"MIT"
] | 110 | 2019-05-06T21:17:02.000Z | 2022-03-27T12:57:57.000Z | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/importHelper.py | chsamala2/PowerShell-DSC-for-Linux | 5cac971f1877dd545c71be50b0ffd29199ac1acc | [
"MIT"
] | 82 | 2019-05-09T00:41:23.000Z | 2022-03-22T07:35:26.000Z | Providers/nxOMSAutomationWorker/automationworker/3.x/worker/importHelper.py | chsamala2/PowerShell-DSC-for-Linux | 5cac971f1877dd545c71be50b0ffd29199ac1acc | [
"MIT"
] | 65 | 2019-05-21T21:37:26.000Z | 2022-03-19T01:25:38.000Z | #!/usr/bin/env python
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
PY_MAJOR_VERSION = 0
PY_MINOR_VERSION = 2
CONFIGPARSER_KEY = "configparser"
QUEUE_KEY = "queue"
import sys
def install_aliases():
"""
Imports the relevant modules based on the python runtime version.
After calling this method the caller can import the latest packages without having to consider about the
backward compatibility.
Currently supported packages:
1. ConfigParser
2. Queue
"""
version_info = sys.version
if int(version_info[PY_MAJOR_VERSION]) == 2 and int(version_info[PY_MINOR_VERSION]) <= 7:
import ConfigParser
import Queue
sys.modules[CONFIGPARSER_KEY] = ConfigParser
sys.modules[QUEUE_KEY] = Queue
else:
return
| 26.757576 | 109 | 0.640997 | 102 | 883 | 5.392157 | 0.558824 | 0.06 | 0.050909 | 0.058182 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008621 | 0.211778 | 883 | 32 | 110 | 27.59375 | 0.781609 | 0.458664 | 0 | 0 | 0 | 0 | 0.038902 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8921919e62754476a6b499fb99cccaec5396399a | 416 | py | Python | 01_introductory_problems/10_trailing_zeroes.py | hariharanragothaman/CSES | fa3478a71fbf66f695673e2a644d84084f6a3b90 | [
"MIT"
] | 1 | 2021-06-17T17:14:13.000Z | 2021-06-17T17:14:13.000Z | 01_introductory_problems/10_trailing_zeroes.py | hariharanragothaman/CSES | fa3478a71fbf66f695673e2a644d84084f6a3b90 | [
"MIT"
] | null | null | null | 01_introductory_problems/10_trailing_zeroes.py | hariharanragothaman/CSES | fa3478a71fbf66f695673e2a644d84084f6a3b90 | [
"MIT"
] | null | null | null | """
Calculate the number of trailing zeroes in a factorial
"""
"""
Solution approaches:
> Consider prime factors of n!
- 2's and 5's contribute to zeroes. if we count the number of 2's and 5's we can count zeroes.
- If we can count the number of 5's we should be done - since number of 2's will always be more ni factorial
"""
n = int(input())
count = 0
while n >= 5:
n //= 5
count += n
print(count)
| 18.086957 | 108 | 0.663462 | 77 | 416 | 3.584416 | 0.493506 | 0.115942 | 0.119565 | 0.043478 | 0.050725 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028302 | 0.235577 | 416 | 22 | 109 | 18.909091 | 0.839623 | 0.129808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89248949c4bf2e73af24eb041ea87a139342c58e | 2,530 | py | Python | tests/unit/test_role_repository.py | joeseggie/resourceidea | aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f | [
"MIT"
] | null | null | null | tests/unit/test_role_repository.py | joeseggie/resourceidea | aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f | [
"MIT"
] | 21 | 2019-01-26T20:39:34.000Z | 2019-06-20T10:09:57.000Z | tests/unit/test_role_repository.py | joeseggie/resourceidea | aae6120e3ec84f3fc7e1ab1bc833ce37bd06685f | [
"MIT"
] | null | null | null | from app.role.models.role import Role
from app.role.repositories.role_repository import RoleRepository
def test_update(session):
"""
Test the role repository update function.
"""
# Arrange
test_model = RoleRepository.create(Role(name='Super User'))
test_model_id = test_model.id
update_fields = ('name',)
# Act
result = RoleRepository.update(
test_model_id, update_fields, name='Admin User')
# Assert
assert isinstance(result, Role)
assert result.normalized_name == 'admin-user'
assert result.name == 'Admin User'
def test_get_all(session):
"""
Test role repository get_all function
"""
# Arrange
sort_key = 'name'
sort_order = 'asc'
# Act
result = RoleRepository.get_all(sort_key=sort_key, sort_order=sort_order)
# Assert
assert isinstance(result, list)
def test_get_by_name(session):
"""
Test role repository get_by_name function.
"""
# Arrange
role_name = 'Super User'
normalized_role_name = 'super-user'
RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
# Act
result = RoleRepository.get_by_name(role_name)
# Assert
assert isinstance(result, Role)
assert result.name == role_name
assert result.normalized_name == normalized_role_name
def test_create(session):
"""
Test role repository create function.
"""
# Arrange
role_name = 'Super User'
normalized_role_name = 'super-user'
# Act
result = RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
# Assert
assert isinstance(result, Role)
assert result.name == role_name
assert result.normalized_name == normalized_role_name
def test_update_by_id(session):
"""
Test role repository update_by_id function.
"""
# Arrange
role_name = 'New Role'
normalized_role_name = 'new-role'
test_stub = RoleRepository.create(
Role(name=role_name, normalized_name=normalized_role_name))
update_fields = ('name', 'normalized_name')
role_update = 'Role update'
normalized_role_update = 'role-update'
# Act
result = RoleRepository.update_by_id(
model_id=test_stub.id,
fields_for_update=update_fields,
name=role_update,
normalized_name=normalized_role_update
)
# Assert
assert isinstance(result, Role)
assert result.normalized_name == normalized_role_update
assert result.name == role_update
| 25.049505 | 77 | 0.687747 | 306 | 2,530 | 5.408497 | 0.127451 | 0.101511 | 0.087009 | 0.118429 | 0.501511 | 0.467674 | 0.398792 | 0.398792 | 0.398792 | 0.328701 | 0 | 0 | 0.220949 | 2,530 | 100 | 78 | 25.3 | 0.839675 | 0.118577 | 0 | 0.3 | 0 | 0 | 0.069159 | 0 | 0 | 0 | 0 | 0 | 0.26 | 1 | 0.1 | false | 0 | 0.04 | 0 | 0.14 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8925352e9bd235c372978fa797a57bca0150adfc | 7,661 | py | Python | python/craftassist/ttad-annotate/process_results.py | satyamedh/craftassist | d97cbc14bc25149d3ef41737231ab9f3cb7e392a | [
"MIT"
] | 626 | 2019-07-18T18:40:44.000Z | 2022-03-29T17:34:43.000Z | python/craftassist/ttad-annotate/process_results.py | satyamedh/craftassist | d97cbc14bc25149d3ef41737231ab9f3cb7e392a | [
"MIT"
] | 42 | 2019-07-27T11:04:15.000Z | 2021-02-23T03:15:14.000Z | python/craftassist/ttad-annotate/process_results.py | satyamedh/craftassist | d97cbc14bc25149d3ef41737231ab9f3cb7e392a | [
"MIT"
] | 89 | 2019-07-19T15:07:39.000Z | 2022-02-15T18:44:24.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
import csv
import argparse
import json
from collections import defaultdict, Counter
import re
from ttad_annotate import MAX_WORDS
def process_result(full_d):
worker_id = full_d["WorkerId"]
d = with_prefix(full_d, "Answer.root.")
try:
action = d["action"]
except KeyError:
return None, None, None
action_dict = {action: process_dict(with_prefix(d, "action.{}.".format(action)))}
##############
# repeat dict
##############
if d.get("loop") not in [None, "Other"]:
repeat_dict = process_repeat_dict(d)
# Some turkers annotate a repeat dict for a repeat_count of 1.
# Don't include the repeat dict if that's the case
if repeat_dict.get("repeat_count"):
a, b = repeat_dict["repeat_count"]
repeat_count_str = " ".join(
[full_d["Input.word{}".format(x)] for x in range(a, b + 1)]
)
if repeat_count_str not in ("a", "an", "one", "1"):
action_val = list(action_dict.values())[0]
if action_val.get("schematic"):
action_val["schematic"]["repeat"] = repeat_dict
elif action_val.get("action_reference_object"):
action_val["action_reference_object"]["repeat"] = repeat_dict
else:
action_dict["repeat"] = repeat_dict
##################
# post-processing
##################
# Fix Build/Freebuild mismatch
if action_dict.get("Build", {}).get("Freebuild") == "Freebuild":
action_dict["FreeBuild"] = action_dict["Build"]
del action_dict["Build"]
action_dict.get("Build", {}).pop("Freebuild", None)
action_dict.get("FreeBuild", {}).pop("Freebuild", None)
# Fix empty words messing up spans
words = [full_d["Input.word{}".format(x)] for x in range(MAX_WORDS)]
action_dict, words = fix_spans_due_to_empty_words(action_dict, words)
return worker_id, action_dict, words
def process_dict(d):
r = {}
# remove key prefixes
d = remove_key_prefixes(d, ["copy.yes.", "copy.no."])
if "location" in d:
r["location"] = {"location_type": d["location"]}
if r["location"]["location_type"] == "location_reference_object":
r["location"]["location_type"] = "BlockObject"
r["location"]["relative_direction"] = d.get(
"location.location_reference_object.relative_direction"
)
if r["location"]["relative_direction"] in ("EXACT", "NEAR", "Other"):
del r["location"]["relative_direction"]
d["location.location_reference_object.relative_direction"] = None
r["location"].update(process_dict(with_prefix(d, "location.")))
for k, v in d.items():
if (
k == "location"
or k.startswith("location.")
or k == "copy"
or (k == "relative_direction" and v in ("EXACT", "NEAR", "Other"))
):
continue
# handle span
if re.match("[^.]+.span#[0-9]+", k):
prefix, rest = k.split(".", 1)
idx = int(rest.split("#")[-1])
if prefix in r:
a, b = r[prefix]
r[prefix] = [min(a, idx), max(b, idx)] # expand span to include idx
else:
r[prefix] = [idx, idx]
# handle nested dict
elif "." in k:
prefix, rest = k.split(".", 1)
prefix_snake = snake_case(prefix)
r[prefix_snake] = r.get(prefix_snake, {})
r[prefix_snake].update(process_dict(with_prefix(d, prefix + ".")))
# handle const value
else:
r[k] = v
return r
def process_repeat_dict(d):
if d["loop"] == "ntimes":
return {
"repeat_key": "FOR",
"repeat_count": process_dict(with_prefix(d, "loop.ntimes."))["repeat_for"],
}
if d["loop"] == "repeat_all":
return {"repeat_key": "ALL"}
if d["loop"] == "forever":
return {"stop_condition": {"condition_type": "NEVER"}}
raise NotImplementedError("Bad repeat dict option: {}".format(d["loop"]))
def with_prefix(d, prefix):
return {
k.split(prefix)[1]: v
for k, v in d.items()
if k.startswith(prefix) and v not in ("", None, "None")
}
def snake_case(s):
return re.sub("([a-z])([A-Z])", "\\1_\\2", s).lower()
def remove_key_prefixes(d, ps):
d = d.copy()
rm_keys = []
add_items = []
for p in ps:
for k, v in d.items():
if k.startswith(p):
rm_keys.append(k)
add_items.append((k[len(p) :], v))
for k in rm_keys:
del d[k]
for k, v in add_items:
d[k] = v
return d
def fix_spans_due_to_empty_words(action_dict, words):
"""Return modified (action_dict, words)"""
def reduce_span_vals_gte(d, i):
for k, v in d.items():
if type(v) == dict:
reduce_span_vals_gte(v, i)
continue
try:
a, b = v
if a >= i:
a -= 1
if b >= i:
b -= 1
d[k] = [a, b]
except ValueError:
pass
except TypeError:
pass
# remove trailing empty strings
while words[-1] == "":
del words[-1]
# fix span
i = 0
while i < len(words):
if words[i] == "":
reduce_span_vals_gte(action_dict, i)
del words[i]
else:
i += 1
return action_dict, words
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("results_csv")
parser.add_argument(
"--min-votes", type=int, default=1, help="Required # of same answers, defaults to 2/3"
)
parser.add_argument(
"--only-show-disagreements",
action="store_true",
help="Only show commands that did not meet the --min-votes requirement",
)
parser.add_argument("--debug", action="store_true", help="Show debug information")
parser.add_argument(
"--tsv", action="store_true", help="Show each result with worker id in tsv format"
)
args = parser.parse_args()
result_counts = defaultdict(Counter) # map[command] -> Counter(dict)
with open(args.results_csv, "r") as f:
r = csv.DictReader(f)
for d in r:
command = d["Input.command"]
try:
worker_id, action_dict, words = process_result(d)
except:
continue
if action_dict is None:
continue
command = " ".join(words)
result = json.dumps(action_dict)
result_counts[command][result] += 1
if args.debug:
for k, v in with_prefix(d, "Answer.").items():
print((k, v))
# show each result with worker info
if args.tsv:
print(command, worker_id, result, "", sep="\t")
# results by command
if not args.tsv:
for command, counts in sorted(result_counts.items()):
if not any(v >= args.min_votes for v in counts.values()):
if args.only_show_disagreements:
print(command)
continue
elif args.only_show_disagreements:
continue
print(command)
for result, count in counts.items():
if count >= args.min_votes:
print(result)
print()
| 30.521912 | 94 | 0.531784 | 933 | 7,661 | 4.198285 | 0.21865 | 0.048507 | 0.026806 | 0.010722 | 0.16773 | 0.107991 | 0.059995 | 0.056165 | 0.052081 | 0.038805 | 0 | 0.004268 | 0.327111 | 7,661 | 250 | 95 | 30.644 | 0.755577 | 0.065918 | 0 | 0.147541 | 0 | 0 | 0.163788 | 0.028596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043716 | false | 0.010929 | 0.032787 | 0.010929 | 0.131148 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
892751e84a7f6a1e839a96fb0ef3b05f1a088251 | 9,110 | py | Python | lanzou/gui/others.py | akalasysx/lanzou-gui | 4eea55828954ce372de1491288df44ad279994de | [
"MIT"
] | 1 | 2020-12-29T20:58:39.000Z | 2020-12-29T20:58:39.000Z | lanzou/gui/others.py | akalasysx/lanzou-gui | 4eea55828954ce372de1491288df44ad279994de | [
"MIT"
] | null | null | null | lanzou/gui/others.py | akalasysx/lanzou-gui | 4eea55828954ce372de1491288df44ad279994de | [
"MIT"
] | null | null | null | '''
重新封装的控件
'''
import os
from PyQt5.QtCore import Qt, pyqtSignal, QTimer, QSize
from PyQt5.QtGui import QTextDocument, QAbstractTextDocumentLayout, QPalette, QFontMetrics, QIcon
from PyQt5.QtWidgets import (QApplication, QAbstractItemView, QStyle, QListView, QLineEdit, QTableView,
QPushButton, QStyledItemDelegate, QStyleOptionViewItem, QTextEdit, QSizePolicy)
from lanzou.debug import SRC_DIR
def set_file_icon(name):
suffix = name.split(".")[-1]
ico_path = SRC_DIR + f"{suffix}.gif"
if os.path.isfile(ico_path):
return QIcon(ico_path)
else:
return QIcon(SRC_DIR + "file.ico")
class QDoublePushButton(QPushButton):
"""加入了双击事件的按钮"""
doubleClicked = pyqtSignal()
clicked = pyqtSignal()
def __init__(self, *args, **kwargs):
QPushButton.__init__(self, *args, **kwargs)
self.timer = QTimer()
self.timer.setSingleShot(True)
self.timer.timeout.connect(self.clicked.emit)
super().clicked.connect(self.checkDoubleClick)
def checkDoubleClick(self):
if self.timer.isActive():
self.doubleClicked.emit()
self.timer.stop()
else:
self.timer.start(250)
class MyLineEdit(QLineEdit):
"""添加单击事件的输入框,用于设置下载路径"""
clicked = pyqtSignal()
def __init__(self, parent):
super(MyLineEdit, self).__init__(parent)
def mouseReleaseEvent(self, QMouseEvent):
if QMouseEvent.button() == Qt.LeftButton:
self.clicked.emit()
class MyListView(QListView):
"""加入拖拽功能的列表显示器"""
drop_files = pyqtSignal(object)
def __init__(self):
QListView.__init__(self)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def dragEnterEvent(self, event):
m = event.mimeData()
if m.hasUrls():
for url in m.urls():
if url.isLocalFile():
event.accept()
return
event.ignore()
def dropEvent(self, event):
if event.source():
QListView.dropEvent(self, event)
else:
m = event.mimeData()
if m.hasUrls():
urls = [url.toLocalFile() for url in m.urls() if url.isLocalFile()]
if urls:
self.drop_files.emit(urls)
event.acceptProposedAction()
class AutoResizingTextEdit(QTextEdit):
"""添加单击事件的自动改变大小的文本输入框,用于显示描述与下载直链
https://github.com/cameel/auto-resizing-text-edit
https://gist.github.com/hahastudio/4345418
"""
clicked = pyqtSignal()
editingFinished = pyqtSignal()
def __init__(self, parent=None):
super(AutoResizingTextEdit, self).__init__(parent)
# This seems to have no effect. I have expected that it will cause self.hasHeightForWidth()
# to start returning True, but it hasn't - that's why I hardcoded it to True there anyway.
# I still set it to True in size policy just in case - for consistency.
size_policy = self.sizePolicy()
size_policy.setHeightForWidth(True)
size_policy.setVerticalPolicy(QSizePolicy.Preferred)
self.setSizePolicy(size_policy)
self.textChanged.connect(self.updateGeometry)
self._changed = False
self.setTabChangesFocus(True)
self.textChanged.connect(self._handle_text_changed)
def setMinimumLines(self, num_lines):
""" Sets minimum widget height to a value corresponding to specified number of lines
in the default font. """
self.setMinimumSize(self.minimumSize().width(), self.lineCountToWidgetHeight(num_lines))
def heightForWidth(self, width):
margins = self.contentsMargins()
if width >= margins.left() + margins.right():
document_width = width - margins.left() - margins.right()
else:
# If specified width can't even fit the margin, there's no space left for the document
document_width = 0
# Cloning the whole document only to check its size at different width seems wasteful
# but apparently it's the only and preferred way to do this in Qt >= 4. QTextDocument does not
# provide any means to get height for specified width (as some QWidget subclasses do).
# Neither does QTextEdit. In Qt3 Q3TextEdit had working implementation of heightForWidth()
# but it was allegedly just a hack and was removed.
#
# The performance probably won't be a problem here because the application is meant to
# work with a lot of small notes rather than few big ones. And there's usually only one
# editor that needs to be dynamically resized - the one having focus.
document = self.document().clone()
document.setTextWidth(document_width)
return margins.top() + document.size().height() + margins.bottom()
def sizeHint(self):
original_hint = super(AutoResizingTextEdit, self).sizeHint()
return QSize(original_hint.width(), self.heightForWidth(original_hint.width()))
def mouseReleaseEvent(self, QMouseEvent):
if QMouseEvent.button() == Qt.LeftButton:
if not self.toPlainText():
self.clicked.emit()
def lineCountToWidgetHeight(self, num_lines):
""" Returns the number of pixels corresponding to the height of specified number of lines
in the default font. """
# ASSUMPTION: The document uses only the default font
assert num_lines >= 0
widget_margins = self.contentsMargins()
document_margin = self.document().documentMargin()
font_metrics = QFontMetrics(self.document().defaultFont())
# font_metrics.lineSpacing() is ignored because it seems to be already included in font_metrics.height()
return (
widget_margins.top() +
document_margin +
max(num_lines, 1) * font_metrics.height() +
self.document().documentMargin() +
widget_margins.bottom()
)
def focusOutEvent(self, event):
if self._changed:
self.editingFinished.emit()
super(AutoResizingTextEdit, self).focusOutEvent(event)
def _handle_text_changed(self):
self._changed = True
class TableDelegate(QStyledItemDelegate):
"""Table 富文本"""
def __init__(self, parent=None):
super(TableDelegate, self).__init__(parent)
self.doc = QTextDocument(self)
def paint(self, painter, option, index):
painter.save()
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
self.doc.setHtml(options.text)
options.text = "" # 原字符
style = QApplication.style() if options.widget is None else options.widget.style()
style.drawControl(QStyle.CE_ItemViewItem, options, painter)
ctx = QAbstractTextDocumentLayout.PaintContext()
if option.state & QStyle.State_Selected:
ctx.palette.setColor(QPalette.Text, option.palette.color(
QPalette.Active, QPalette.HighlightedText))
else:
ctx.palette.setColor(QPalette.Text, option.palette.color(
QPalette.Active, QPalette.Text))
text_rect = style.subElementRect(QStyle.SE_ItemViewItemText, options)
the_fuck_your_shit_up_constant = 3 #  ̄へ ̄ #
margin = (option.rect.height() - options.fontMetrics.height()) // 2
margin = margin - the_fuck_your_shit_up_constant
text_rect.setTop(text_rect.top() + margin)
painter.translate(text_rect.topLeft())
painter.setClipRect(text_rect.translated(-text_rect.topLeft()))
self.doc.documentLayout().draw(painter, ctx)
painter.restore()
def sizeHint(self, option, index):
options = QStyleOptionViewItem(option)
self.initStyleOption(options, index)
self.doc.setHtml(options.text)
self.doc.setTextWidth(options.rect.width())
return QSize(self.doc.idealWidth(), self.doc.size().height())
class MyTableView(QTableView):
"""加入拖拽功能的表格显示器"""
drop_files = pyqtSignal(object)
def __init__(self, parent):
super(MyTableView, self).__init__(parent)
self.setDragDropMode(QAbstractItemView.InternalMove)
self.setDragEnabled(True)
self.setAcceptDrops(True)
self.setDropIndicatorShown(True)
def dragEnterEvent(self, event):
m = event.mimeData()
if m.hasUrls():
for url in m.urls():
if url.isLocalFile():
event.accept()
return
event.ignore()
def dropEvent(self, event):
if event.source():
QListView.dropEvent(self, event)
else:
m = event.mimeData()
if m.hasUrls():
urls = [url.toLocalFile() for url in m.urls() if url.isLocalFile()]
if urls:
self.drop_files.emit(urls)
event.acceptProposedAction()
| 35.585938 | 112 | 0.641493 | 982 | 9,110 | 5.839104 | 0.329939 | 0.011161 | 0.01151 | 0.011859 | 0.292292 | 0.267178 | 0.24939 | 0.236833 | 0.223579 | 0.200558 | 0 | 0.003264 | 0.260154 | 9,110 | 255 | 113 | 35.72549 | 0.847181 | 0.169923 | 0 | 0.417647 | 0 | 0 | 0.002816 | 0 | 0 | 0 | 0 | 0 | 0.005882 | 1 | 0.129412 | false | 0 | 0.029412 | 0 | 0.282353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89279f2c5ae2941cabd43bcd029a5a17b5f9cae4 | 30,355 | py | Python | fltk/synthpriv/attacks/nasr.py | tudelft-eemcs-dml/fltk-testbed-gr-1 | bbe01d5d82a447ad8b6d9e3f350f831922a8b8b7 | [
"BSD-2-Clause"
] | null | null | null | fltk/synthpriv/attacks/nasr.py | tudelft-eemcs-dml/fltk-testbed-gr-1 | bbe01d5d82a447ad8b6d9e3f350f831922a8b8b7 | [
"BSD-2-Clause"
] | null | null | null | fltk/synthpriv/attacks/nasr.py | tudelft-eemcs-dml/fltk-testbed-gr-1 | bbe01d5d82a447ad8b6d9e3f350f831922a8b8b7 | [
"BSD-2-Clause"
] | null | null | null | """
Membership inference attack based on https://github.com/privacytrustlab/ml_privacy_meter/blob/master/ml_privacy_meter/attack/meminf.py
"""
import datetime
from itertools import zip_longest
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torchextractor as tx
from sklearn.cluster import SpectralClustering
from sklearn.metrics import accuracy_score, auc, roc_curve
from torch import nn
from tqdm import tqdm
class ReshapeFCForGradConv(nn.Module):
def forward(self, x):
if x.dim() == 3:
return x[:, None, ...] # add channel dimension
if x.dim() == 4:
return x
else:
raise Exception("Only 3D and 4D inputs are supported to gradient convolution modules!")
class Print(nn.Module):
def forward(self, x):
print(x.shape)
return x
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
nn.init.normal_(m.weight, mean=0, std=0.01)
m.bias.data.fill_(0)
def fcn_module(inputsize, layer_size=128):
"""
Creates a FCN submodule. Used in different attack components.
"""
fcn = nn.Sequential(
nn.Linear(inputsize, layer_size),
nn.ReLU(),
nn.Linear(layer_size, 64),
nn.ReLU(),
)
fcn.apply(init_weights)
return fcn
def cnn_for_fcn_gradients(input_shape):
"""
Creates a CNN submodule for Linear layer gradients.
"""
dim1, dim2 = input_shape
cnn = nn.Sequential(
ReshapeFCForGradConv(),
nn.Dropout(0.2),
nn.Conv2d(1, 100, kernel_size=(1, dim2)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(100 * dim1, 2024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(2024, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
def cnn_for_cnn_layeroutputs(input_shape):
"""
Creates a CNN submodule for Conv Layer outputs
"""
print("CNN 4 CNN")
_, c, h, w = input_shape
cnn = nn.Sequential(
nn.Conv2d(c, c, kernel_size=(h, w)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(c, 1024),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
class PermuteCNNGradient(nn.Module):
def forward(self, x):
b, c_out, c_in, k1, k2 = x.shape
return x.reshape(b, c_out, c_in, k1 * k2).permute(0, 2, 1, 3)
def cnn_for_cnn_gradients(input_shape):
"""
Creates a CNN submodule for Conv layer gradients
"""
print("CNN 4 CNN grads")
c_out, c_in, k1, k2 = input_shape
cnn = nn.Sequential(
PermuteCNNGradient(),
nn.Conv2d(c_in, c_out, kernel_size=(c_out, k1 * k2)),
nn.ReLU(),
nn.Flatten(),
nn.Dropout(0.2),
nn.Linear(c_out, 64),
nn.ReLU(),
)
cnn.apply(init_weights)
return cnn
def transpose(l):
return list(map(list, zip_longest(*l, fillvalue=None)))
# Decide what attack component (FCN or CNN) to use on the basis of the layer name.
# CNN_COMPONENTS_LIST are the layers requiring each input in 3 dimensions.
# GRAD_COMPONENTS_LIST are the layers which have trainable components for computing gradients
CNN_COMPONENT_LIST = ["Conv", "MaxPool"]
GRAD_LAYERS_LIST = ["Conv", "Linear"]
class NasrAttack(nn.Module):
"""
This attack was originally proposed by Nasr et al. It exploits one-hot encoding of true labels, loss value,
intermediate layer activations and gradients of intermediate layers of the target model on data points, for training
the attack model to infer membership in training data.
Paper link: https://arxiv.org/abs/1812.00910
Args:
------
device: torch.device() to use for training and testing
target_model: The target classification model that'll be attacked
train_dataloader: Dataloader with samples for training
test_dataloader: Dataloader with samples for testing
layers_to_exploit: a list of integers specifying the indices of layers, whose activations will be exploited by the
attack model. If the list has only a single element and it is equal to the index of last layer,
the attack can be considered as a "blackbox" attack.
gradients_to_exploit: a list of integers specifying the indices of layers whose gradients will be exploited by the
attack model
exploit_loss: boolean; whether to exploit loss value of target model or not
exploit_label: boolean; whether to exploit one-hot encoded labels or not
optimizer: The optimizer for training the attack model
learning_rate: learning rate for training the attack model
epochs: Number of epochs to train the attack model
"""
def __init__(
self,
device,
target_model,
train_dataloader,
test_dataloader,
layers_to_exploit=[],
gradients_to_exploit=[],
exploit_loss=True,
exploit_label=True,
optimizer=torch.optim.Adam,
learning_rate=0.001,
epochs=30,
):
super().__init__()
self.target_model = target_model.requires_grad_(False)
self.device = device
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.train_shape = next(iter(self.train_dataloader[0]))[0].shape
self.layers_to_exploit = layers_to_exploit
self.gradients_to_exploit = gradients_to_exploit
self.exploit_loss = exploit_loss
self.exploit_label = exploit_label
self.n_labels = list(target_model.parameters())[-1].shape[0]
self.create_attack_model()
self.optimizer = optimizer(
[p for n, p in self.named_parameters() if not "target_model" in n and not "feature_extractor" in n],
lr=learning_rate,
)
self.epochs = epochs
self.out_name = "_".join(
[
self.__class__.__name__,
self.target_model.__class__.__name__,
f"label={exploit_label}",
f"loss={exploit_loss}",
f"layers={','.join([str(l) for l in layers_to_exploit])}" if layers_to_exploit else "nolayers",
f"gradients={','.join([str(g) for g in gradients_to_exploit])}" if gradients_to_exploit else "nograds",
str(datetime.datetime.now()).replace(" ", "-").split(".")[0],
]
)
def create_attack_model(self):
self.input_modules = nn.ModuleList()
classifier_input_size = 0
if len(self.layers_to_exploit):
layer_names_and_classes = [
(n, m.__class__.__name__)
for i, (n, m) in enumerate(self.target_model.named_modules())
if i in self.layers_to_exploit
]
self.layers_to_exploit, layer_classes = transpose(layer_names_and_classes)
self.intermediate_feature_extractor = tx.Extractor(self.target_model, self.layers_to_exploit)
example = next(iter(self.train_dataloader[0]))[0]
layer_shapes = [v.shape for v in self.intermediate_feature_extractor(example)[1].values()]
for shape, type in zip(layer_shapes, layer_classes):
requires_cnn = map(lambda i: i in type, CNN_COMPONENT_LIST)
if any(requires_cnn):
module = cnn_for_cnn_layeroutputs(shape)
else:
module = fcn_module(shape[1], 100)
self.input_modules.append(module)
classifier_input_size += 64
if len(self.gradients_to_exploit):
layers = list(self.target_model.modules())
self.grad_exploit_layers = []
for l in self.gradients_to_exploit:
layer = layers[l]
assert any(
map(lambda i: i in layer.__class__.__name__, GRAD_LAYERS_LIST)
), f"Only Linear & Conv layers are supported for gradient-based attacks"
requires_cnn = map(lambda i: i in layer.__class__.__name__, CNN_COMPONENT_LIST)
self.grad_exploit_layers.append(layer.weight)
if any(requires_cnn):
module = cnn_for_cnn_gradients(layer.weight.shape)
classifier_input_size += 64
else:
module = cnn_for_fcn_gradients(layer.weight.shape)
classifier_input_size += 256
self.input_modules.append(module)
if self.exploit_loss:
self.input_modules.append(fcn_module(1, 100))
classifier_input_size += 64
if self.exploit_label:
self.input_modules.append(fcn_module(self.n_labels))
classifier_input_size += 64
classifier = nn.Sequential(
nn.Linear(classifier_input_size, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 1),
nn.Sigmoid(),
)
classifier.apply(init_weights)
self.classifier = classifier
# print(self)
def compute_gradients(self, model, features, labels):
gradients = []
model.requires_grad_(True)
logits = model(features)
for l, label in enumerate(labels):
loss = F.cross_entropy(logits[None, l], label[None])
grads = torch.autograd.grad(loss, self.target_model.parameters(), retain_graph=True)
gradients.append(grads)
model.requires_grad_(False)
return gradients
def get_gradient_norms(self, model, features, labels):
return [torch.norm(grads[-1]) for grads in self.compute_gradients(model, features, labels)]
def forward(self, model, features, labels):
i = 0
attack_input = []
if len(self.gradients_to_exploit):
model.requires_grad_(True)
if len(self.layers_to_exploit):
self.logits, intermediate_feature = self.intermediate_feature_extractor(features)
else:
self.logits = model(features)
if len(self.layers_to_exploit):
for layer_output in intermediate_feature.values():
attack_input.append(self.input_modules[i](layer_output))
i += 1
individual_losses = []
for l, label in enumerate(labels):
individual_losses.append(F.cross_entropy(self.logits[None, l], label[None]))
if len(self.gradients_to_exploit):
gradients = [
torch.autograd.grad(loss, self.grad_exploit_layers, retain_graph=True) for loss in individual_losses
]
gradients = [torch.stack(grads) for grads in transpose(gradients)]
for grads in gradients:
attack_input.append(self.input_modules[i](grads))
i += 1
model.requires_grad_(False)
if self.exploit_loss:
self.loss = torch.tensor(individual_losses, device=self.device).mean()[None, None]
loss_feature = self.input_modules[i](self.loss)
loss_feature = torch.tile(loss_feature, (len(features), 1))
attack_input.append(loss_feature)
i += 1
if self.exploit_label:
self.preds = torch.argmax(self.logits, axis=1)
self.preds = F.one_hot(self.preds, num_classes=self.n_labels).float()
attack_input.append(self.input_modules[i](self.preds))
i += 1
return self.classifier(torch.cat(attack_input, axis=1))
def attack_accuracy(self, members, nonmembers):
"""
Computes attack accuracy of the attack model.
"""
preds, targets = [], []
for (membatch, nonmembatch) in zip(members, nonmembers):
mfeatures, mlabels = membatch
nmfeatures, nmlabels = nonmembatch
# Computing the membership probabilities
mprobs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
nmprobs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
probs = torch.cat((mprobs, nmprobs)).cpu()
target_ones = torch.ones(mprobs.shape, dtype=bool)
target_zeros = torch.zeros(nmprobs.shape, dtype=bool)
target = torch.cat((target_ones, target_zeros))
preds.append(probs > 0.5)
targets.append(target)
return accuracy_score(np.concatenate(preds), np.concatenate(targets))
def train_attack(self):
"""
Trains the attack model
"""
best_state_dict = self.state_dict()
self.to(self.device)
self.input_modules.train()
self.classifier.train()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtestset, nmtestset = self.test_dataloader
member_loader, nonmember_loader = self.train_dataloader
nmfeat, nmlbl = transpose(nonmember_loader)
preds = np.argmax(self.target_model(torch.cat(nmfeat).to(self.device)).cpu(), axis=1)
acc = accuracy_score(np.concatenate(nmlbl), preds.cpu())
print("Target model test accuracy", acc)
best_accuracy = 0
pbar = tqdm(range(self.epochs), desc="Training attack model...")
for e in pbar:
for (mfeatures, mlabels), (nmfeatures, nmlabels) in zip(member_loader, nonmember_loader):
self.optimizer.zero_grad()
moutputs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
nmoutputs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
memtrue = torch.ones(moutputs.shape, device=self.device)
nonmemtrue = torch.zeros(nmoutputs.shape, device=self.device)
target = torch.cat((memtrue, nonmemtrue))
probs = torch.cat((moutputs, nmoutputs))
attackloss = F.mse_loss(target, probs)
attackloss.backward()
self.optimizer.step()
attack_accuracy = self.attack_accuracy(mtestset, nmtestset)
if attack_accuracy > best_accuracy:
best_accuracy = attack_accuracy
best_state_dict = self.state_dict()
pbar.write(f"Epoch {e} : Attack test accuracy: {attack_accuracy:.3f}, Best accuracy : {best_accuracy:.3f}")
self.out_name += f"_{best_accuracy:.3f}"
self.load_state_dict(best_state_dict)
torch.save(
self.cpu().eval().requires_grad_(False).state_dict(),
f"models/{self.out_name}.pt",
)
def test_attack(self):
"""
Test the attack model on dataset and save plots for visualization.
"""
self.to(self.device)
self.input_modules.eval()
self.classifier.eval()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtrainset, nmtrainset = self.test_dataloader
mpreds, mlab, nmpreds, nmlab, mfeat, nmfeat, mtrue, nmtrue = [], [], [], [], [], [], [], []
mgradnorm, nmgradnorm = [], []
for (mfeatures, mlabels) in mtrainset:
moutputs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device)).detach()
mpreds.extend(moutputs.cpu().numpy())
mlab.extend(mlabels.cpu().numpy())
mfeat.extend(mfeatures.cpu().numpy())
mtrue.extend(np.ones(moutputs.shape))
if len(self.gradients_to_exploit):
mgradientnorm = self.get_gradient_norms(
self.target_model, mfeatures.to(self.device), mlabels.to(self.device)
)
mgradnorm.extend(mgradientnorm)
for (nmfeatures, nmlabels) in nmtrainset:
nmoutputs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device)).detach()
nmpreds.extend(nmoutputs.cpu().numpy())
nmlab.extend(nmlabels.cpu().numpy())
nmfeat.extend(nmfeatures.cpu().numpy())
nmtrue.extend(np.zeros(nmoutputs.shape))
if len(self.gradients_to_exploit):
nmgradientnorm = self.get_gradient_norms(
self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device)
)
nmgradnorm.extend(nmgradientnorm)
target = np.concatenate((np.concatenate(mtrue), np.concatenate(nmtrue)))
probs = np.concatenate((np.concatenate(mpreds), np.concatenate(nmpreds)))
self.plot(mpreds, nmpreds, target, probs, mlab, nmlab, mgradnorm, nmgradnorm)
def plot(self, mpreds, nmpreds, target, probs, mlab, nmlab, mgradientnorm, nmgradientnorm):
font = {"weight": "bold", "size": 10}
matplotlib.rc("font", **font)
unique_mem_lab = sorted(np.unique(mlab))
unique_nmem_lab = sorted(np.unique(nmlab))
# Creates a histogram for Membership Probability
fig = plt.figure(1)
plt.hist(
np.array(mpreds).flatten(),
color="xkcd:blue",
alpha=0.7,
bins=20,
histtype="bar",
range=(0, 1),
weights=(np.ones_like(mpreds) / len(mpreds)),
label="Training Data (Members)",
)
plt.hist(
np.array(nmpreds).flatten(),
color="xkcd:light blue",
alpha=0.7,
bins=20,
histtype="bar",
range=(0, 1),
weights=(np.ones_like(nmpreds) / len(nmpreds)),
label="Population Data (Non-members)",
)
plt.xlabel("Membership Probability")
plt.ylabel("Fraction")
plt.title("Privacy Risk")
plt.legend(loc="upper left")
plt.savefig(f"output/{self.out_name}_privacy_risk.png")
plt.close()
# Creates ROC curve for membership inference attack
fpr, tpr, _ = roc_curve(target, probs)
roc_auc = auc(fpr, tpr)
plt.title("ROC of Membership Inference Attack")
plt.plot(fpr, tpr, "b", label="AUC = %0.2f" % roc_auc)
plt.legend(loc="lower right")
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.savefig(f"output/{self.out_name}_roc.png")
plt.close()
# Creates plot for gradient norm per label
if len(self.gradients_to_exploit):
xs = []
ys = []
for lab in unique_mem_lab:
gradnorm = []
for l, p in zip(mlab, mgradientnorm):
if l == lab:
gradnorm.append(p.cpu().numpy())
xs.append(lab)
ys.append(np.mean(gradnorm))
plt.plot(xs, ys, "g.", label="Training Data (Members)")
plt.hlines(np.mean(ys), np.min(xs), np.max(xs), color="g", label="Members mean")
xs = []
ys = []
for lab in unique_nmem_lab:
gradnorm = []
for l, p in zip(nmlab, nmgradientnorm):
if l == lab:
gradnorm.append(p.cpu().numpy())
xs.append(lab)
ys.append(np.mean(gradnorm))
plt.plot(xs, ys, "r.", label="Population Data (Non-Members)")
plt.hlines(np.mean(ys), np.min(xs), np.max(xs), color="r", label="Non-members mean")
plt.title("Average Gradient Norms per Label")
plt.xlabel("Label")
plt.ylabel("Average Gradient Norm")
plt.legend(loc="upper left")
plt.savefig(f"output/{self.out_name}_gradient_norm.png")
plt.close()
class UnsupervisedNasrAttack(NasrAttack):
"""
This attack was originally proposed by Nasr et al. It exploits one-hot encoding of true labels, loss value,
intermediate layer activations and gradients of intermediate layers of the target model on data points, for training
the attack model to infer membership in training data.
Paper link: https://arxiv.org/abs/1812.00910
Args:
------
device: torch.device() to use for training and testing
target_model: The target classification model that'll be attacked
train_dataloader: Dataloader with samples for training
test_dataloader: Dataloader with samples for testing
layers_to_exploit: a list of integers specifying the indices of layers, whose activations will be exploited by the
attack model. If the list has only a single element and it is equal to the index of last layer,
the attack can be considered as a "blackbox" attack.
gradients_to_exploit: a list of integers specifying the indices of layers whose gradients will be exploited by the
attack model
exploit_loss: boolean; whether to exploit loss value of target model or not
exploit_label: boolean; whether to exploit one-hot encoded labels or not
optimizer: The optimizer for training the attack model
learning_rate: learning rate for training the attack model
epochs: Number of epochs to train the attack model
"""
def __init__(
self,
device,
target_model,
train_dataloader,
test_dataloader,
layers_to_exploit=[],
gradients_to_exploit=[],
exploit_loss=True,
exploit_label=True,
optimizer=torch.optim.Adam,
learning_rate=0.001,
epochs=30,
):
self.decoder = nn.Sequential(
nn.Linear(1, 64),
nn.Dropout(0.2),
nn.ReLU(),
nn.Linear(64, 4),
)
super().__init__(
device=device,
target_model=target_model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
layers_to_exploit=layers_to_exploit,
gradients_to_exploit=gradients_to_exploit,
exploit_loss=exploit_loss,
exploit_label=exploit_label,
optimizer=optimizer,
learning_rate=learning_rate,
epochs=epochs,
)
def forward(self, model, features, labels):
self.member_prob = super().forward(model, features, labels)
return self.member_prob
def decode(self):
self.target_model.requires_grad_(True)
gradients = torch.autograd.grad(self.loss, self.target_model.parameters())
true_grad_norm = sum(torch.norm(g) for g in gradients)
self.target_model.requires_grad_(False)
pred_loss, pred_correct, pred_uncertainty, pred_grad_norm = torch.split(
self.decoder(self.member_prob), 4, dim=-1
)
return pred_loss, pred_correct, pred_uncertainty, pred_grad_norm, true_grad_norm
def get_entropy(self, logits):
"""
Calculates the prediction uncertainty
"""
entropyarr = []
for logit in logits:
predictions = torch.F.softmax(logit[None])
mterm = torch.sum(predictions * torch.log(predictions))
entropy = (-1 / torch.log(self.n_labels)) * mterm
entropyarr.append(entropy)
return entropyarr
def attack_accuracy(self, members, nonmembers):
"""
Computes attack accuracy of the attack model.
"""
preds, targets = [], []
for (membatch, nonmembatch) in zip(members, nonmembers):
mfeatures, mlabels = membatch
nmfeatures, nmlabels = nonmembatch
# Computing the membership probabilities
mprobs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
_, _, _, _, mgradnorms = self.decode()
nmprobs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
_, _, _, _, nmgradnorms = self.decode()
probs = torch.cat((mprobs, nmprobs)).cpu()
gradnorms = torch.cat((mgradnorms, nmgradnorms)).cpu()
pred = SpectralClustering(n_clusters=2).fit_predict(probs)
if gradnorms[1 - pred] > gradnorms[1 - pred]:
pred = 1 - pred
target_ones = torch.ones(mprobs.shape, dtype=bool)
target_zeros = torch.zeros(nmprobs.shape, dtype=bool)
target = torch.cat((target_ones, target_zeros))
preds.append(pred)
targets.append(target)
return accuracy_score(np.concatenate(preds), np.concatenate(targets))
def train_attack(self):
"""
Trains the attack model
"""
best_state_dict = self.state_dict()
self.to(self.device)
self.input_modules.train()
self.classifier.train()
self.decoder.train()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtestset, nmtestset = self.test_dataloader
nmfeat, nmlbl = transpose(mtestset + nmtestset)
preds = np.argmax(self.target_model(torch.cat(nmfeat).to(self.device)).cpu(), axis=1)
acc = accuracy_score(np.concatenate(nmlbl), preds.cpu())
print("Target model test accuracy", acc)
best_accuracy = 0
pbar = tqdm(range(self.epochs), desc="Training attack model...")
for e in pbar:
for (features, labels) in self.train_dataloader:
self.optimizer.zero_grad()
self.forward(self.target_model, features.to(self.device), labels.to(self.device))
pred_loss, pred_correct, pred_uncertainty, pred_grad_norm, true_grad_norm = self.decode()
true_correct = (self.preds * F.one_hot(self.labels, self.n_labels)).sum(-1)
true_uncertainty = self.get_entropy(self.logits)
print(pred_loss, self.loss)
print(pred_correct, true_correct)
print(pred_uncertainty, true_uncertainty)
print(pred_grad_norm, true_grad_norm)
sum(
[
F.l1_loss(pred_loss, self.loss),
F.l1_loss(pred_correct, true_correct),
F.l1_loss(pred_uncertainty, true_uncertainty),
F.l1_loss(pred_grad_norm, true_grad_norm),
]
).backward()
self.optimizer.step()
attack_accuracy = self.attack_accuracy(mtestset, nmtestset)
if attack_accuracy > best_accuracy:
best_accuracy = attack_accuracy
best_state_dict = self.state_dict()
pbar.write(f"Epoch {e} : Attack test accuracy: {attack_accuracy:.3f}, Best accuracy : {best_accuracy:.3f}")
self.out_name += f"_{best_accuracy:.3f}"
self.load_state_dict(best_state_dict)
torch.save(
self.cpu().eval().requires_grad_(False).state_dict(),
f"models/{self.out_name}.pt",
)
def test_attack(self):
"""
Test the attack model on dataset and save plots for visualization.
"""
self.to(self.device)
self.input_modules.eval()
self.classifier.eval()
self.decoder.eval()
self.target_model.eval()
try:
self.intermediate_feature_extractor.eval()
except AttributeError:
pass
mtrainset, nmtrainset = self.test_dataloader
mpreds, mlab, nmpreds, nmlab, mfeat, nmfeat, mtrue, nmtrue = [], [], [], [], [], [], [], []
mgradnorm, nmgradnorm = [], []
preds, targets = [], []
for (membatch, nonmembatch) in zip(mtrainset, nmtrainset):
mfeatures, mlabels = membatch
nmfeatures, nmlabels = nonmembatch
# Computing the membership probabilities
mprobs = self.forward(self.target_model, mfeatures.to(self.device), mlabels.to(self.device))
_, _, _, _, mgradnorms = self.decode()
nmprobs = self.forward(self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device))
_, _, _, _, nmgradnorms = self.decode()
probs = torch.cat((mprobs, nmprobs)).cpu()
gradnorms = torch.cat((mgradnorms, nmgradnorms)).cpu()
pred = SpectralClustering(n_clusters=2).fit_predict(probs)
if gradnorms[1 - pred] > gradnorms[1 - pred]:
pred = 1 - pred
target_ones = torch.ones(mprobs.shape, dtype=bool)
target_zeros = torch.zeros(nmprobs.shape, dtype=bool)
target = torch.cat((target_ones, target_zeros))
mpreds.extend(mprobs.cpu().numpy())
mlab.extend(mlabels.cpu().numpy())
mfeat.extend(mfeatures.cpu().numpy())
mtrue.extend(np.ones(mprobs.shape))
nmpreds.extend(nmprobs.cpu().numpy())
nmlab.extend(nmlabels.cpu().numpy())
nmfeat.extend(nmfeatures.cpu().numpy())
nmtrue.extend(np.zeros(nmprobs.shape))
if len(self.gradients_to_exploit):
mgradientnorm = self.get_gradient_norms(
self.target_model, mfeatures.to(self.device), mlabels.to(self.device)
)
mgradnorm.extend(mgradientnorm)
nmgradientnorm = self.get_gradient_norms(
self.target_model, nmfeatures.to(self.device), nmlabels.to(self.device)
)
nmgradnorm.extend(nmgradientnorm)
target = np.concatenate((np.concatenate(mtrue), np.concatenate(nmtrue)))
probs = np.concatenate((np.concatenate(mpreds), np.concatenate(nmpreds)))
self.plot(mpreds, nmpreds, target, probs, mlab, nmlab, mgradnorm, nmgradnorm)
| 37.154223 | 134 | 0.602306 | 3,574 | 30,355 | 4.949916 | 0.127588 | 0.02798 | 0.024419 | 0.013057 | 0.64123 | 0.594087 | 0.546097 | 0.518625 | 0.506359 | 0.489967 | 0 | 0.011299 | 0.291484 | 30,355 | 816 | 135 | 37.199755 | 0.811271 | 0.127887 | 0 | 0.501712 | 0 | 0.003425 | 0.048749 | 0.011439 | 0 | 0 | 0 | 0 | 0.001712 | 1 | 0.042808 | false | 0.006849 | 0.020548 | 0.003425 | 0.101027 | 0.015411 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
892a2f69832448fa92e158619e9bc1ae462b36c7 | 1,183 | py | Python | examples/routes.py | StanislavMitrofanov/fastapi-admin | 844d982c6c80dc7a313e2540cc043d6265646ba6 | [
"Apache-2.0"
] | null | null | null | examples/routes.py | StanislavMitrofanov/fastapi-admin | 844d982c6c80dc7a313e2540cc043d6265646ba6 | [
"Apache-2.0"
] | null | null | null | examples/routes.py | StanislavMitrofanov/fastapi-admin | 844d982c6c80dc7a313e2540cc043d6265646ba6 | [
"Apache-2.0"
] | null | null | null | from fastapi import Depends, HTTPException
from starlette.requests import Request
from starlette.responses import RedirectResponse
from starlette.status import HTTP_303_SEE_OTHER, HTTP_404_NOT_FOUND
from examples.models import Config
from fastapi_admin.app import app
from fastapi_admin.depends import get_resources
from fastapi_admin.template import templates
@app.get("/")
async def home(
request: Request,
resources=Depends(get_resources),
):
return templates.TemplateResponse(
"dashboard.html",
context={
"request": request,
"resources": resources,
"resource_label": "Dashboard",
"page_pre_title": "overview",
"page_title": "Dashboard",
},
)
@app.put("/config/action/switch_status/{config_id}")
async def switch_config_status(request: Request, config_id: int):
config = await Config.get_or_none(pk=config_id)
if not config:
raise HTTPException(status_code=HTTP_404_NOT_FOUND)
config.status = not config.status
await config.save(update_fields=["status"])
return RedirectResponse(url=request.headers.get("referer"), status_code=HTTP_303_SEE_OTHER)
| 31.972973 | 95 | 0.723584 | 144 | 1,183 | 5.722222 | 0.409722 | 0.053398 | 0.058252 | 0.036408 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01241 | 0.182587 | 1,183 | 36 | 96 | 32.861111 | 0.83971 | 0 | 0 | 0 | 0 | 0 | 0.125106 | 0.033812 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.258065 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
892ba219404b4a36c279e6259dff3417d0449bd6 | 435 | py | Python | LeetCode_Python/graph_algorithms/breadthfirst_printer.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | LeetCode_Python/graph_algorithms/breadthfirst_printer.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | LeetCode_Python/graph_algorithms/breadthfirst_printer.py | nlantau/Codewars_2020_2021 | 055fbf8785ddd52b9f8e8c2b59294ead01852467 | [
"MIT"
] | null | null | null | # nlantau, 2022-01-01
from collections import deque
def breadth_depth_print(graph, source):
queue = deque([source])
while queue:
curr = queue.popleft()
print(curr)
for neighbor in graph[curr]:
queue.append(neighbor)
g = {
'a' : ['b', 'c'],
'b' : ['d'],
'c' : ['e'],
'd' : ['f'],
'e' : [],
'f' : []
}
if __name__ == "__main__":
breadth_depth_print(g, 'a')
| 15.535714 | 39 | 0.498851 | 52 | 435 | 3.942308 | 0.596154 | 0.117073 | 0.165854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02649 | 0.305747 | 435 | 27 | 40 | 16.111111 | 0.652318 | 0.043678 | 0 | 0 | 0 | 0 | 0.048309 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.111111 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
892dc54a723d08d5259a8460a94c096a75910404 | 3,660 | py | Python | ark_nlp/nn/biaffine_bert.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | 1 | 2022-03-23T05:10:55.000Z | 2022-03-23T05:10:55.000Z | ark_nlp/nn/biaffine_bert.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | null | null | null | ark_nlp/nn/biaffine_bert.py | yubuyuabc/ark-nlp | 165d35cfacd7476791c0aeba19bf43f4f8079553 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 DataArk Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Xiang Wang, xiangking1995@163.com
# Status: Active
import torch
from torch import nn
from transformers import BertModel
from ark_nlp.nn.base.bert import BertForTokenClassification
from ark_nlp.nn.layer.biaffine_block import Biaffine
class BiaffineBert(BertForTokenClassification):
"""
Biaffine的命名实体识别模型
Args:
config: 模型的配置对象
encoder_trained (:obj:`bool`, optional, defaults to True):
bert参数是否可训练,默认可训练
biaffine_size (:obj:`int`, optional, defaults to 128):
biaffine输入的embedding size
lstm_dropout (:obj:`float`, optional, defaults to 0.4):
lstm的dropout rate
select_bert_layer (:obj:`int`, optional, defaults to -1):
获取哪一层的bert embedding
Reference:
[1] Named Entity Recognition as Dependency Parsing
[2] https://github.com/suolyer/PyTorch_BERT_Biaffine_NER
""" # noqa: ignore flake8"
def __init__(
self,
config,
encoder_trained=True,
biaffine_size=128,
lstm_dropout=0.4,
select_bert_layer=-1
):
super(BiaffineBert, self).__init__(config)
self.num_labels = config.num_labels
self.select_bert_layer = select_bert_layer
self.bert = BertModel(config)
for param in self.bert.parameters():
param.requires_grad = encoder_trained
self.lstm = torch.nn.LSTM(
input_size=config.hidden_size,
hidden_size=config.hidden_size,
num_layers=1,
batch_first=True,
dropout=lstm_dropout,
bidirectional=True
)
self.start_encoder = torch.nn.Sequential(
torch.nn.Linear(
in_features=2*config.hidden_size,
out_features=biaffine_size),
torch.nn.ReLU()
)
self.end_encoder = torch.nn.Sequential(
torch.nn.Linear(
in_features=2*config.hidden_size,
out_features=biaffine_size),
torch.nn.ReLU()
)
self.biaffne = Biaffine(biaffine_size, self.num_labels)
self.reset_params()
def reset_params(self):
nn.init.xavier_uniform_(self.start_encoder[0].weight)
nn.init.xavier_uniform_(self.end_encoder[0].weight)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
**kwargs
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
return_dict=True,
output_hidden_states=True
)
sequence_output = outputs.hidden_states[self.select_bert_layer]
# lstm编码
sequence_output, _ = self.lstm(sequence_output)
start_logits = self.start_encoder(sequence_output)
end_logits = self.end_encoder(sequence_output)
span_logits = self.biaffne(start_logits, end_logits)
span_logits = span_logits.contiguous()
return span_logits
| 29.756098 | 74 | 0.646175 | 434 | 3,660 | 5.237327 | 0.412442 | 0.021557 | 0.032996 | 0.014078 | 0.131104 | 0.089749 | 0.089749 | 0.089749 | 0.089749 | 0.089749 | 0 | 0.013163 | 0.273497 | 3,660 | 122 | 75 | 30 | 0.84167 | 0.327596 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.075758 | 0 | 0.151515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
892e48833b5baafde56ca7f950f5d8a10d6829a8 | 278 | py | Python | src/py/fi3201/intro/for-nested.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | null | null | null | src/py/fi3201/intro/for-nested.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | 2 | 2020-08-08T13:57:20.000Z | 2020-08-08T14:18:05.000Z | src/py/fi3201/intro/for-nested.py | butiran/butiran.github.io | bf99f55819a140190e5bda8f9675109ef607eb9d | [
"MIT"
] | 1 | 2020-08-08T13:54:23.000Z | 2020-08-08T13:54:23.000Z | #
# for-nested.py
# Use nested for to display numbers in Python 3.7
#
# Sparisoma Viridi | https://github.com/dudung
#
# Execute: python3.7 for.py
#
# 20210122
# 0933 Create this example, test it, and it works.
#
for i in range(1, 5):
for j in range(1, 4):
print(i, j)
| 17.375 | 50 | 0.654676 | 49 | 278 | 3.714286 | 0.714286 | 0.076923 | 0.087912 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091324 | 0.21223 | 278 | 15 | 51 | 18.533333 | 0.739726 | 0.694245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8935615b837a49fef702e885fe7f7711b2999e69 | 5,910 | py | Python | trainer/task.py | estherGomezCantarero/DLProductionKSchool | 4e6c0fe366e533403163c9820bb1f49a5fe8546f | [
"Apache-2.0"
] | null | null | null | trainer/task.py | estherGomezCantarero/DLProductionKSchool | 4e6c0fe366e533403163c9820bb1f49a5fe8546f | [
"Apache-2.0"
] | null | null | null | trainer/task.py | estherGomezCantarero/DLProductionKSchool | 4e6c0fe366e533403163c9820bb1f49a5fe8546f | [
"Apache-2.0"
] | null | null | null | """A simple main file to showcase the template."""
'''Modulo por defecto para poner linea de comandos es argparse'''
import logging.config
import argparse
import os
import time
import sys
import tensorflow as tf
from tensorflow.keras import datasets
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import activations
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras import utils
from tensorflow.keras import callbacks
from . import __version__
#constante para creo logger
LOGGER = logging.getLogger()
VERSION = __version__
#Download data function
def __download_data():
LOGGER.info("Downloading data...")
train, test = datasets.mnist.load_data()
X_train, y_train = train
X_test, y_test = test
return X_train, y_train,X_test,y_test
#Preprocess data function
def _preprocess_data(x,y,needs_reshape):
LOGGER.info("Preprocessing data...")
x = x / 255.0
y = utils.to_categorical(y)
if needs_reshape:
x= x.reshape(-1, 28, 28, 1)
return x,y
#Build the model of the neuronal network
def _build_dense_model():
m = models.Sequential()
m.add(layers.Input((28,28), name='input_layer'))
m.add(layers.Flatten())
m.add(layers.Dense(128,activation=activations.relu))
m.add(layers.Dense(64,activation=activations.relu))
m.add(layers.Dense(32,activation=activations.relu))
m.add(layers.Dense(10,activation=activations.softmax))
return m
#New Models
def _build_conv_model():
m = models.Sequential()
m.add(layers.Input((28, 28, 1), name='my_input_layer'))
m.add(layers.Conv2D(32, (3, 3), activation=activations.relu))
m.add(layers.MaxPooling2D((2, 2)))
m.add(layers.Conv2D(16, (3, 3), activation=activations.relu))
m.add(layers.MaxPooling2D((2, 2)))
m.add(layers.Conv2D(8, (3, 3), activation=activations.relu))
m.add(layers.MaxPooling2D((2, 2)))
m.add(layers.Flatten())
m.add(layers.Dense(10, activation=activations.softmax))
return m
'''Aquí necesitamos una función train and evaluate por convención. AI va a llamar a esta función'''
def train_and_evaluate(batch_size,epochs,job_dir,output_path,is_hypertune,model_type):
#Download the data
X_train, y_train, X_test, y_test = __download_data()
#if model is conv
if model_type == 'cnn':
#Preprocess the data
needs_reshape = True
model = _build_conv_model()
# Build the model
elif model_type == 'dense':
needs_reshape = False
model = _build_dense_model()
else:
LOGGER.error("Unknown model type")
sys.exit(1)
#Preprocess the data
X_train, y_train = _preprocess_data(X_train,y_train,needs_reshape)
X_test, y_test =_preprocess_data(X_test,y_test,needs_reshape)
model.compile(optimizer=optimizers.Adam(), metrics=[metrics.categorical_accuracy], loss=losses.categorical_crossentropy)
#Train the model
#decidimos dir donde escribiremos los logs
logdir = os.path.join(job_dir,"logs/scalars/"+ time.strftime("%Y%m%d-%H%M%S"))
#creamos call back para tensor board
td_callback = callbacks.TensorBoard(log_dir=logdir)
model.fit(X_train,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=[td_callback]
)
#Evaluate the model
loss_value, accuracy = model.evaluate(X_test,y_test)
LOGGER.info("LOSS VALUE: %f ACCURACY: %.4f" % (loss_value,accuracy))
#Communicate results from model evaluation
if is_hypertune:
'''Writer es un summary de tensro flow'''
metric_tag = "accuracy_live_class"
eval_path = os.path.join(job_dir,metric_tag)
writer = tf.summary.create_file_writer(eval_path)
with writer.as_default():
'''
En este caso usamos accuracy porque es la metrica que hemos puesto en nuestro modelo(se podría poner otras)
y hay que poner un valor de steps, vamos a usar las epocas.#
'''
tf.summary.scalar(metric_tag, accuracy,step=epochs)
writer.fluch()
#hypertune option
if not is_hypertune:
#Save model in TF SaveModel format
model_dir = os.path.join(output_path,VERSION)
models.save_model(model, model_dir, save_format='tf')
def main():
'''Argumentos que se van a introducir (recordar que en yaml los hiper parametros se llaman igual que estos param)'''
parser = argparse.ArgumentParser()
parser.add_argument('--batch-size',type=int, help='Batch size for the training')
parser.add_argument('--epochs',type=int,help='Number of epochs for the training')
parser.add_argument('--job-dir',default=None,required=False, help='Option for AI platform')
'''El resultado se va a escribir en un fichero save modele de tensorflow y hay que indicar elpath'''
parser.add_argument('--model-output-path', help='Path to write the SaveModel format',default='None')
#Opcion para el tuneo de hiperparametros
parser.add_argument('--hypertune',action='store_true',help='This is a hypertuning job')
parser.add_argument('--model_type',help='Model type to choose',default='dense')
#Recuperamos las opciones
args = parser.parse_args()
batch_size = args.batch_size
epochs = args.epochs
job_dir = args.job_dir
output_path = args.model_output_path
#hypertune variable
is_hypertune = args.hypertune
model_type = args.model_type
if not model_type in ['dense','cnn']:
print('Model error')
sys.exit(1)
if not is_hypertune and output_path is None:
print('Please set --model-output-path')
sys.exit(1)
train_and_evaluate(batch_size,epochs,job_dir,output_path,is_hypertune,model_type)
if __name__ == "__main__":
main()
| 35.178571 | 124 | 0.692217 | 830 | 5,910 | 4.759036 | 0.298795 | 0.01519 | 0.037975 | 0.056962 | 0.237468 | 0.209873 | 0.177975 | 0.151392 | 0.13038 | 0.13038 | 0 | 0.012071 | 0.201015 | 5,910 | 167 | 125 | 35.389222 | 0.824439 | 0.108968 | 0 | 0.127273 | 0 | 0 | 0.10301 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054545 | false | 0 | 0.145455 | 0 | 0.236364 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8936f5392f9c16187ea3e034a6f07baffa5228d7 | 6,188 | py | Python | orm/sql.py | ChanthMiao/inSpyder | 4916e944d675094fc812cfbd9a5d7c283a1fc64a | [
"MIT"
] | 3 | 2019-06-16T13:07:52.000Z | 2019-10-16T13:29:52.000Z | orm/sql.py | ChanthMiao/inSpyder | 4916e944d675094fc812cfbd9a5d7c283a1fc64a | [
"MIT"
] | null | null | null | orm/sql.py | ChanthMiao/inSpyder | 4916e944d675094fc812cfbd9a5d7c283a1fc64a | [
"MIT"
] | 1 | 2020-03-15T18:48:17.000Z | 2020-03-15T18:48:17.000Z | # encoding="utf-8"
from sqlalchemy import Integer, String
from sqlalchemy import Column, create_engine, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
Base = declarative_base()
class User(Base):
__tablename__ = "users"
id = Column(String(length=12), primary_key=True)
username = Column(String(length=30))
posts = Column(Integer, default=0)
following = Column(Integer, default=0)
followers = Column(Integer, default=0)
biography = Column(String(length=300))
r_post = relationship("Post", backref="users")
class Post(Base):
__tablename__ = "posts"
id = Column(String(length=24), primary_key=True)
timestamp = Column(Integer, default=0)
stars = Column(Integer, default=0)
comments = Column(Integer, default=0)
url = Column(String(length=300))
uid = Column(String(length=12), ForeignKey("users.id"))
class manager(object):
def __init__(self, con_string: str):
self._engine = create_engine(con_string)
Base.metadata.create_all(self._engine)
self._Session = sessionmaker(bind=self._engine)
self._session = self._Session()
def close(self):
self._session.close()
def insert_or_update_user(self,
uid: str,
name: str,
posts_n: int,
following_n: int,
followers_n: int,
bio: str,
commit=True):
line = self._session.query(User)\
.filter(User.id == uid)\
.one_or_none()
if line is None:
user = User(id=uid,
username=name,
posts=posts_n,
following=followers_n,
followers=followers_n,
biography=bio)
self._session.add(user)
else:
line.username = name
line.posts = posts_n
line.following = followers_n
line.followers = followers_n
line.biography = bio
if commit:
self._session.commit()
def insert_or_update_post(self,
pic_id: str,
pic_timestamp: int,
pic_stars: int,
pic_comments: int,
pic_url: str,
pic_uid: str,
commit=True):
line = self._session.query(Post)\
.filter(Post.id == pic_id)\
.one_or_none()
if line is None:
post = Post(id=pic_id,
timestamp=pic_timestamp,
stars=pic_stars,
comments=pic_comments,
url=pic_url,
uid=pic_uid)
self._session.add(post)
else:
line.timestamp = pic_timestamp
line.stars = pic_stars
line.comments = pic_comments
line.url = pic_url
line.uid = pic_uid
if commit:
self._session.commit()
def get_uid_list(self) -> list:
sqlRT = self._session.query(User.id).all()
rt = []
for line in sqlRT:
rt.append(line[0])
return rt
def get_user_list(self) -> list:
sqlRT = self._session.query(User.id, User.username).all()
rt = []
for line in sqlRT:
rt.append({"uid": line[0], "username": line[1]})
return rt
def get_pic_id_list(self) -> list:
sqlRT = self._session.query(Post.id).all()
rt = []
for line in sqlRT:
rt.append(line[0])
return rt
def get_pic_list(self) -> list:
sqlRT = self._session.query(Post.id, Post.url).all()
return sqlRT
def check_uid_exist(self, Uid: str) -> bool:
sqlRT = self._session\
.query(User.id)\
.filter(User.id == Uid)\
.one_or_none()
return (sqlRT is not None)
def update_one_user_data(self, data: dict):
self.insert_or_update_user(data["uid"], data["username"],
data["posts"], data["following"],
data["followers"], data["biography"])
posts_list = []
posts_list = data["blogs"]
for blog in posts_list:
self.insert_or_update_post(blog["pic_id"], blog["pic_time_stamp"],
blog["pic_stars"], blog["pic_comments"],
blog["pic_url"], data["uid"])
# self._session.commit()
def get_one_user_data(self, uid) -> dict:
rt = {}
if self.check_uid_exist(uid):
sqlRT0 = self._session.query(User)\
.filter(User.id == uid).one()
rt["uid"] = sqlRT0.id
rt["username"] = sqlRT0.username
rt["biography"] = sqlRT0.biography
rt["posts"] = sqlRT0.posts
rt["following"] = sqlRT0.following
rt["followers"] = sqlRT0.followers
sqlRT1 = self._session.query(Post)\
.filter(Post.uid == uid)\
.all()
blogs = []
for line in sqlRT1:
blog = {}
blog["pic_id"] = line.id
blog["pic_time_stamp"] = line.timestamp
blog["pic_stars"] = line.stars
blog["pic_comments"] = line.comments
blog["pic_url"] = line.url
blogs.append(blog)
rt["blogs"] = blogs
return rt
else:
return None
def get_userless_data(self) -> list:
sqlRT = self._session\
.query(User.posts, User.following, User.followers, User.biography)\
.all()
return sqlRT
def get_idless_data(self) -> list:
sqlRT = self._session\
.query(Post.timestamp, Post.comments, Post.stars)\
.all()
return sqlRT
| 34.377778 | 79 | 0.506949 | 656 | 6,188 | 4.58689 | 0.150915 | 0.073114 | 0.058491 | 0.048853 | 0.248255 | 0.221004 | 0.178797 | 0.11665 | 0.107677 | 0.031904 | 0 | 0.008957 | 0.386555 | 6,188 | 179 | 80 | 34.569832 | 0.78372 | 0.006303 | 0 | 0.221519 | 0 | 0 | 0.037911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.082278 | false | 0 | 0.025316 | 0 | 0.278481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8938bff55d9a25b0a08b3653bc1b868f9bcab4d6 | 716 | py | Python | 30_days_of_Code/15_linked_list.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | 30_days_of_Code/15_linked_list.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | 30_days_of_Code/15_linked_list.py | byung-u/HackerRank | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
class Node:
def __init__(self, data):
self.data = data
self.next = None
class Solution:
def display(self, head):
current = head
while current:
print(current.data, end=' ')
current = current.next
def insert(self, head, data):
if head:
cur = head
while cur.next:
cur = cur.next
N = Node(data=data)
cur.next = N
return head
else:
head = Node(data=data)
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head, data)
mylist.display(head);
| 20.457143 | 40 | 0.519553 | 87 | 716 | 4.229885 | 0.37931 | 0.065217 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002222 | 0.371508 | 716 | 34 | 41 | 21.058824 | 0.815556 | 0.02933 | 0 | 0.071429 | 0 | 0 | 0.001441 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.25 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
893b3513d5d0f6590aeea57f9fde89eef1fb1744 | 19,636 | py | Python | Author_Template/TexScanner.py | ygrange/ADASSProceedings | 61502d4ba827845c65e98588d84c69d3596ae958 | [
"MIT"
] | 1 | 2019-09-18T08:10:25.000Z | 2019-09-18T08:10:25.000Z | Author_Template/TexScanner.py | ygrange/ADASSProceedings | 61502d4ba827845c65e98588d84c69d3596ae958 | [
"MIT"
] | 10 | 2019-09-29T13:55:38.000Z | 2021-12-27T16:23:49.000Z | Author_Template/TexScanner.py | ygrange/ADASSProceedings | 61502d4ba827845c65e98588d84c69d3596ae958 | [
"MIT"
] | 3 | 2020-10-05T19:10:22.000Z | 2021-12-25T09:51:07.000Z | #!/usr/bin/env python
# T e x S c a n n e r . p y
#
# Defines a TexScanner class that can be used to extract LaTeX directives
# from a .tex file. Call SetFile() to supply a reference to an already
# open .tex file, then use successive calls to GetNextTexCommand() to get
# all the tex directives and their parameters from the file. This probably
# isn't of very general utility when it comes to parsing .tex files, but
# it is useful for the ADASS editing purposes for which it was written,
# where all that was wanted was to find graphics or citation commands and
# see what files or references they were using.
#
# Parsing LaTeX files is tricky, and this code isn't perfect by any means.
# There are lots of constructs that will fool it, usually into missing
# commands that it ought to spot. If it manages to spot any problems, this
# can be checked using a call to ParsedOK() and details of any problem can
# be obtained by calling GetReport(). In testing, I have seen this parser
# complain about unclosed braces that turn out to be due to an unescaped
# comment character that LaTeX has not complained about. (Parsing problems
# often seem to be associated with the use of '{','[' and '%' in math
# expressions, and it may be that the parser needs to know about math mode.)
# This code would benefit from a proper review and possible reworking some day.
#
# History:
# 14th Jan 2016. Original version, KS.
# 28th Jan 2016. GetNextWord() now allows for nesting. GetNextTexCommand()
# now allows for any number of required and/or optional
# arguments. The list it returns can be of any length, not
# always one of three items, so calling code will need to
# be modified. KS.
# 1st Feb 2016. Interface to GetNextTexCommand() reworked to use a
# callback for each new command found. This should make it
# easier to introduce a recursive scan that catches commands
# included within the arguments to other commands, although
# at the moment this is not implemented. KS.
# 11th Feb 2016. GetTexCommand() now does do a recursive scan through the
# arguments of the commands it finds. KS.
# 16th Feb 2016. Now catches multiple LaTeX directives in one argument, eg
# \citetext{\citealp{l1980}, implemented in \citealp{w12}}
# 30th Mar 2016. Now checks to see if '%' characters are comment characters
# or just literal '%' that have been escaped. KS.
# 7th Apr 2016. Fixed obscure parsing bug triggered by the sequence
# "$\mu$m" which caused the scanning of the string containing
# it to be terminated prematurely. It's because the code
# had assumed that all \directives would be terminated by
# a line break, space, or a '{' or '[', which is not of
# course the case. Strange it took this long to show up. KS.
# 12th Apr 2016. GetNextChar() now intercepts "\n" characters and treats
# them as spaces - this is essentially what LaTeX does. KS.
# 2nd May 2016. Fixed a parsing problem where a slightly unusual sequence
# (involving a \newcommand definition on a single line} sent
# the parser into infinite recursion. KS.
# 24th Jul 2017. Fixed a problem seen in a .tex file that had an equation
# that involved an escaped brace '\{' character. This was
# being treated as a delimiter, with unfortunate results.
# WasEscaped now records if the last character, as returned
# by GetNextChar(), was escaped, and GetNextWord() uses this
# to ignore escaped braces and parentheses. KS.
# 25th Jul 2017. Added a check for a parser runaway, and the routines
# ParsedOK() and GetReport(). GetNextWordFromString{} now
# has the same tests for escaped braces as used by
# GetNextWord(). KS.
# 15th Aug 2017. Converted to run under Python3, using 2to3. Added
# the importing of items from __future__ to allow this to
# run under either Python2 or Python3. (In actuality, this
# code worked unchanged under Python3, and since it doesn't
# use print, doesn't really need that future import, but it
# seems to be good practice anyway.) KS.
#
from __future__ import (print_function,division,absolute_import)
import os
import sys
import string
class TexScanner(object):
def __init__(self):
self.FileIdSet = False
self.Escaped = False
self.WasEscaped = False
self.LastChar = ""
self.LastWord = ""
self.Line = 0
self.Problems = []
def SetFile(self,FileId) :
# Needs to be called before any of the Get... routines. This passes
# the Id of an open .tex file to the scanner.
self.FileId = FileId
self.FileIdSet = True
self.Line = 0
self.Problems = []
def ParsedOK(self) :
# Returns True if the .tex file parsed without problems. If it returns
# False, GetReport() can be called to get a description of what happened.
return (len(self.Problems) == 0)
def GetReport(self) :
# If the file parsed with problems, this returns a list of strings that
# describe what happened. If the file parsed OK, this returns an empty
# list.
return self.Problems
def GetNextChar(self) :
# Returns the next character from a .tex file. If a comment character
# ('%') is encountered, this skips to the end of the current line and
# returns the newline character at the end. If the end of the file is
# reached, or if the file is not open, this returns an empty string.
# Allow for the case where the comment character was escaped, in which
# case treat it as a literal '%'. LaTeX treats an end of line like a
# space, and we intercept "\n" characters and turn them into spaces to
# get the same effect.
Char = ""
if (self.FileIdSet) :
Char = self.FileId.read(1)
if (Char == "%") :
if (not self.Escaped) :
while (True) :
Char = self.FileId.read(1)
if (Char == "\n" or Char == "") : break
self.WasEscaped = self.Escaped
self.Escaped = (Char == "\\")
if (Char == "\n") :
Char = " "
self.Line = self.Line + 1
return Char
def GetNextLine(self) :
# Returns the next line from a .tex file, with comments stripped out.
# This means anything in a line from the first '%' character up to but
# not including the final newline character is removed from the line.
# It does mean than a line that starts with a '%' is returned as a
# blank line - just a newline; it is not ignored completely. If the
# end of the file is reached, or the file is not open, this returns
# an empty string. (Note that this routine isn't used any more by the
# other routines in this file, although it was originally.)
Result = ""
while (True) :
Char = self.GetNextChar()
Result = Result + Char
if (Char == "\n" or Char == "") : break
return Result
def GetNextWord(self):
# Returns the next 'word' from a .tex file. Comments are ignored, and
# a 'word' is defined slightly unusually here in order to help with
# processing LaTeX directives. Anything enclosed in {} or in []
# braces or brackets, including the enclosing {} or [] is considered
# a word. Blanks and { and [ characters delimit words, as do the
# ends of lines, which are assumed to be one or more of \n and \r
# characters. Ends of lines are removed when encountered within
# {} or [] characters.
Word = ""
# Find the first non-blank character (treating newline characters
# and carriage returns as blanks).
while (True) :
if (self.LastChar != "") :
Char = self.LastChar
else :
Char = self.GetNextChar()
self.LastChar = ""
if (Char != " " and Char != "\n" and Char != "\r") : break
if (Char != "") :
Word = Word + Char
# If the word started with a { or [, then we ignore blanks and
# keep going until we hit the corresponding closing character
# (or the end of the file). Allow for nesting. Note that when
# GetNextChar() returns an empty string, that's the end of the file.
Escaped = self.WasEscaped
if ((Char == "{" or Char == "[") and not Escaped) :
Start = Char
if (Start == "{") : End = "}"
if (Start == "[") : End = "]"
Nesting = 1
Line = self.Line + 1
while (True) :
Char = self.GetNextChar()
if (Char == "") :
self.Problems.append(
"The file appears to have an unclosed '" \
+ Start + "' in line " + str(Line))
self.Problems.append("The file may be missing a '" + End + \
"' character or there may")
self.Problems.append(
"be a problem with nested braces or with '%' characters")
break
Escaped = self.WasEscaped
if (Char != '\n' and Char != '\r') : Word = Word + Char
if (Char == Start and not Escaped) : Nesting = Nesting + 1
if (Char == End and not Escaped) :
Nesting = Nesting - 1
if (Nesting <= 0) : break
else :
# Otherwise, just keep going until we hit a blank or one of
# the delimiting braces. Either of these will terminate the word,
# but if it was a brace, we need to remember it for the next
# time we're called.
while (True) :
Char = self.GetNextChar()
Escaped = self.WasEscaped
if (Char == "") : break
if (Char == " " or Char == '\r' or Char == '\n') : break
if ((Char == "{" or Char == "[") and not Escaped) :
self.LastChar = Char
break
Word = Word + Char
return Word
def GetNextTexCommand(self,Callback,ClientData,ClientExtra) :
# Searches for the next Tex/LaTeX command read from the open .tex file.
# and calls the specified callback routine with the details of the
# command. The callback routine is called with a first argument that
# gives the command details, followed by the arguments supplied
# as ClientData and ClientExtra. The command details are supplied
# as a list of strings. The first is the LaTeX directive, beginning
# with '\'. Subsequent strings are the arguments that followed
# the directive, either {required} (in curly braces) or [optional]
# (in square brackets). The arguments are returned with the beginning
# and ending braces included. If the end of the file is reached, this
# routine returns True; otherwise it returns False.
#
# Because the arguments for the LaTeX command may themselves contain
# further LaTeX commands, this routine also searches recursively
# through each argument, and will call the callback routine for each
# command found. To get every LaTeX command in the .tex file, this
# routine should continue to be called until it returns True.
#
# Callback can be passed as None, in which case no callback is made -
# this can be used for a quick check that the file can be parsed.
Finished = True
Command = []
Directive = ""
while (True) :
if (self.LastWord == "") :
Word = self.GetNextWord()
else :
Word = self.LastWord
self.LastWord = ""
if (Word == "") : break
# (This doesn't handle the case where there are multiple directives
# in the one word, eg "\it{text}\emph{text}". It will only find
# the first.)
BSlashIndex = Word.find('\\')
if (BSlashIndex >= 0) :
Directive = Word[BSlashIndex:]
break
if (Directive != "") :
Command.append(Directive)
Word = self.GetNextWord()
while (Word != "") :
if (Word[0] == '[' or Word[0] == '{') :
Command.append(Word)
# Search each argument recursively for any LaTeX commands
# it may contain.
Word = Word[1:len(Word) - 1]
self.GetNextTexCommandFromString(Word,\
Callback,ClientData,ClientExtra)
Word = self.GetNextWord()
else :
self.LastWord = Word
break
if (Callback != None) : Callback(Command,ClientData,ClientExtra)
Finished = False
return Finished
def GetNextWordFromString(self, String, Posn):
# Returns the next 'word' from a string, given the string and a start
# position in the string. It is assumed that the string contains no
# comments. A 'word' is defined slightly unusually here in order to
# help with processing LaTeX directives. Anything enclosed in {} or
# in [] braces or brackets, including the enclosing {} or [] is
# considered a word. Blanks and { and [ characters delimit words.
# This routine returns a pair comprising the word and the value
# for Posn to be used for the next call. When the end of the string
# is reached, the pair returned is ("",0). The Posn value starts from
# 0, in the usual Python way.
#
# This code is similar in structure to GetNextWord(), but it's easier
# to move around in a string than it is in a file. Note the assumption
# that this will be used mainly on words that have been obtained via
# GetNextWord() and so will already have things like comments stripped
# out. The intent is that this will be used recursively to split up
# words that themselves contain LaTeX commands, eg
# \center{\it{text}\cite{ref}}
# where GetNextWord will find "\center" and then "{\it{text}\cite{ref}}"
# and we need to use this routine to split up that nested second word.
Word = ""
Char = ""
Escaped = False
if (Posn > 0) :
Escaped = (String[Posn - 1] == '\\')
# Find the first non-blank character.
Index = Posn
while (Index < len(String)) :
Char = String[Index]
Index = Index + 1
if (Char != " ") :
Word = Word + Char
Escaped = (Char == '\\')
break
# See if we found anything. If not, quit now.
if (Word != "") :
# If the word started with a { or [, then we ignore blanks and
# keep going until we hit the corresponding closing character
# (or the end of the string). Allow for nesting.
if (Char == "{" or Char == "[") :
Start = Char
if (Start == "{") : End = "}"
if (Start == "[") : End = "]"
Nesting = 1
while (Index < len(String)) :
Char = String[Index]
Index = Index + 1
if (Char == "") : break
Word = Word + Char
if ((Char == Start) and not Escaped) : Nesting = Nesting + 1
if ((Char == End) and not Escaped) :
Nesting = Nesting - 1
if (Nesting <= 0) : break
Escaped = (Char == '\\')
else :
# Otherwise, just keep going until we hit a blank or one of
# the delimiting braces. Either of these will terminate the word,
while (Index < len(String)) :
Char = String[Index]
if (Char == " ") : break
if ((Char == "{" or Char == "[") and not Escaped) : break
Index = Index + 1
Word = Word + Char
Escaped = (Char == '\\')
if (Word == "") : Index = 0
return (Word,Index)
def GetNextTexCommandFromString(self,\
String,Callback,ClientData,ClientExtra) :
# This is similar to GetNextTexCommand(), except that it works not on
# a file but on a string, and it works recursively, calling itself
# to search the arguments of any command to see if they themselves
# contain more commands. Each time a command is found, the specified
# callback routine is called with the command details and the client
# arguments, just as for GetNextTexCommand(). Callback can be passed
# as None, in which case no callback is made - this can be used for a
# quick check that the file can be parsed.
Posn = 0
More = True
while (More) :
Directive = ""
Command = []
# Apart from the recursion, this code follows the general lines of
# GetNextTexCommand().
while (True) :
WordPair = self.GetNextWordFromString(String,Posn)
Word = WordPair[0]
Posn = WordPair[1]
if (Word == "") : break
if (Word[0] == '[' or Word[0] == '{') :
self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\
Callback,ClientData,ClientExtra)
BSlashIndex = Word.find('\\')
if (BSlashIndex >= 0) :
Directive = Word[BSlashIndex:]
break
More = False
if (Directive != "") :
Command.append(Directive)
WordPair = self.GetNextWordFromString(String,Posn)
Word = WordPair[0]
Posn = WordPair[1]
if (Word != "") :
if (Word[0] == '[' or Word[0] == '{') :
self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\
Callback,ClientData,ClientExtra)
while (Word != "") :
if (Word[0] == '[' or Word[0] == '{') :
Command.append(Word)
WordPair = self.GetNextWordFromString(String,Posn)
Word = WordPair[0]
Posn = WordPair[1]
if (Word != "") :
if (Word[0] == '[' or Word[0] == '{') :
self.GetNextTexCommandFromString(Word[1:len(Word) - 1],\
Callback,ClientData,ClientExtra)
More = False
if (String[Posn:].strip(" \\r\\n") != "") : More = True
else :
More = True
break
if (Callback != None) : Callback(Command,ClientData,ClientExtra)
| 45.24424 | 80 | 0.557242 | 2,369 | 19,636 | 4.612917 | 0.204306 | 0.01153 | 0.005857 | 0.008053 | 0.297218 | 0.249085 | 0.23243 | 0.219802 | 0.201867 | 0.196742 | 0 | 0.010442 | 0.361122 | 19,636 | 433 | 81 | 45.34873 | 0.860662 | 0.536769 | 0 | 0.65534 | 0 | 0 | 0.025101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048544 | false | 0 | 0.019417 | 0.009709 | 0.106796 | 0.004854 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
893cd792efe2e1b8b3a79e8bc57f11210491e2f5 | 446 | py | Python | perceus.py | Naman-Biswajit/Discord-Bot-Perceus | 338826e085653e3cd4f972bfb156a7ca129c1f49 | [
"MIT"
] | null | null | null | perceus.py | Naman-Biswajit/Discord-Bot-Perceus | 338826e085653e3cd4f972bfb156a7ca129c1f49 | [
"MIT"
] | null | null | null | perceus.py | Naman-Biswajit/Discord-Bot-Perceus | 338826e085653e3cd4f972bfb156a7ca129c1f49 | [
"MIT"
] | null | null | null | import os
from discord.ext import commands
perceus = commands.Bot(command_prefix = '$', description = 'A demo Discord Chat Bot built using Discord.py')
perceus.remove_command('help')
token = ''
for cog in os.listdir('./cogs'):
if cog.endswith('.py') and not cog.startswith('_'):
perceus.load_extension(f'cogs.{cog[:-3]}')
print(f'Loaded {cog[:-3]}')
else:
print(f'unable to load {cog[:-3]}')
perceus.run(token) | 27.875 | 108 | 0.650224 | 64 | 446 | 4.46875 | 0.625 | 0.041958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008152 | 0.174888 | 446 | 16 | 109 | 27.875 | 0.769022 | 0 | 0 | 0 | 0 | 0 | 0.263982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
893db583f78ea487b4e94e5b59e694fb51ec16c7 | 1,231 | py | Python | src/simulations/sim_relay.py | m-rubik/Grow-Space | f87673cf13498e4dc8266cf29a7b61c857e54894 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2020-01-06T19:26:03.000Z | 2021-06-03T18:31:25.000Z | src/simulations/sim_relay.py | m-rubik/Grow-Space | f87673cf13498e4dc8266cf29a7b61c857e54894 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | src/simulations/sim_relay.py | m-rubik/Grow-Space | f87673cf13498e4dc8266cf29a7b61c857e54894 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """!
This code is for the simulation of relays.
"""
import atexit
class Relay():
"""!
This is the class for all relay devices
@param pin: The RPi pin that acts as the signal pin to the relay
@param name: The name of the relay.
@param is_off: The current state of the relay
"""
pin: int
name: str = "default"
is_off: int = False
def __init__(self, pin, name="default", is_off=False):
"""!
Standard initialization.
@param pin: The RPi pin that acts as the signal pin to the relay
@param name: The name of the relay.
@param is_off: The current state of the relay
"""
self.pin = pin
self.name = name
self.is_off = is_off
atexit.register(self.shutdown)
def toggle(self):
if self.is_off:
self.turn_on()
self.is_off = False
else:
self.turn_off()
self.is_off = True
def turn_on(self):
print("Turning on", self.name)
self.is_off = False
def turn_off(self):
print("Turning off", self.name)
self.is_off = True
def shutdown(self):
print(self.name, "shutting down.")
self.is_off = True
| 23.673077 | 72 | 0.575142 | 175 | 1,231 | 3.931429 | 0.268571 | 0.087209 | 0.09157 | 0.056686 | 0.402616 | 0.319767 | 0.319767 | 0.319767 | 0.319767 | 0.319767 | 0 | 0 | 0.329813 | 1,231 | 51 | 73 | 24.137255 | 0.833939 | 0.330626 | 0 | 0.192308 | 0 | 0 | 0.065772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192308 | false | 0 | 0.038462 | 0 | 0.384615 | 0.115385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
893e33d2fc8008166eb18fef3e6dac851364c38c | 1,386 | py | Python | tests/test_primer/SPARQLEndpoints.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 25 | 2018-01-11T10:59:16.000Z | 2021-07-02T03:44:02.000Z | tests/test_primer/SPARQLEndpoints.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 66 | 2018-03-12T01:12:02.000Z | 2022-03-18T07:56:31.000Z | tests/test_primer/SPARQLEndpoints.py | cmungall/PyShEx | 43026c4b0393362e770b868794c5d9071e691d6f | [
"CC0-1.0"
] | 12 | 2018-04-06T11:29:40.000Z | 2021-12-17T22:48:07.000Z | from pyshex.shex_evaluator import ShExEvaluator
from pyshex.user_agent import SlurpyGraphWithAgent
from pyshex.utils.sparql_query import SPARQLQuery
# SPARQL Endpoint
endpoint = 'http://wifo5-04.informatik.uni-mannheim.de/drugbank/sparql'
# SPARQL Query
sparql = """
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX vocabClass: <http://wifo5-04.informatik.uni-mannheim.de/drugbank/vocab/resource/class/>
SELECT DISTINCT ?item WHERE {
?item rdf:type vocabClass:Offer
}
LIMIT 10
"""
# ShEx Expression
shex = """
PREFIX drugbank: <http://wifo5-04.informatik.uni-mannheim.de/drugbank/resource/drugbank/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX : <http://example.org/t1/>
START=@:S1
:S1 {foaf:page IRI+ ; # one or more foaf pages
drugbank:limsDrugId xsd:string # ane exactly one drug id
}"""
# Do the evaluation
result = ShExEvaluator(SlurpyGraphWithAgent(endpoint), # RDF source
shex, # ShEx definition
SPARQLQuery(endpoint, sparql).focus_nodes()).evaluate() # Source off focus nodes
# Print the results
for r in result:
print(f"{r.focus}: ", end="")
if not r.result:
print(f"FAIL: {r.reason}")
else:
print("PASS")
| 30.8 | 104 | 0.63925 | 172 | 1,386 | 5.127907 | 0.534884 | 0.034014 | 0.037415 | 0.071429 | 0.142857 | 0.142857 | 0.142857 | 0.142857 | 0 | 0 | 0 | 0.028169 | 0.231602 | 1,386 | 44 | 105 | 31.5 | 0.8 | 0.093795 | 0 | 0 | 0 | 0.1 | 0.547715 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.033333 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
8943e67f4389bee0dec96ce61196abd28cab6ae4 | 795 | py | Python | setup.py | possibilities/CQ-editor | dc950180b365ae39840f6787c8f5a061492734ed | [
"Apache-2.0"
] | 351 | 2018-06-08T14:36:35.000Z | 2022-03-29T22:03:04.000Z | setup.py | possibilities/CQ-editor | dc950180b365ae39840f6787c8f5a061492734ed | [
"Apache-2.0"
] | 315 | 2018-06-08T14:35:08.000Z | 2022-03-31T15:45:27.000Z | setup.py | possibilities/CQ-editor | dc950180b365ae39840f6787c8f5a061492734ed | [
"Apache-2.0"
] | 71 | 2018-06-19T02:00:24.000Z | 2022-03-25T08:55:02.000Z | import codecs
import os.path
from setuptools import setup, find_packages
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
setup(name='CQ-editor',
version=get_version('cq_editor/_version.py'),
packages=find_packages(),
entry_points={
'gui_scripts': [
'cq-editor = cq_editor.__main__:main',
'CQ-editor = cq_editor.__main__:main'
]}
)
| 28.392857 | 62 | 0.613836 | 102 | 795 | 4.490196 | 0.480392 | 0.104803 | 0.048035 | 0.069869 | 0.104803 | 0.104803 | 0 | 0 | 0 | 0 | 0 | 0.001686 | 0.254088 | 795 | 27 | 63 | 29.444444 | 0.770658 | 0 | 0 | 0 | 0 | 0 | 0.196226 | 0.084277 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
894c3833caea3640946da0558f06c859beb0aaf4 | 4,146 | py | Python | MNIST&Fashion/adv/blackModel.py | LX-doctorAI/LR1 | bd4377058c8806b3ff332ca2aaf5473c2b1e6efb | [
"MIT"
] | 1 | 2022-01-03T09:12:47.000Z | 2022-01-03T09:12:47.000Z | MNIST&Fashion/adv/blackModel.py | ZhangAIPI/LR_Experiment | 46b7d7be9a739139eee4f3038e3a6ee3e5e5687d | [
"MIT"
] | null | null | null | MNIST&Fashion/adv/blackModel.py | ZhangAIPI/LR_Experiment | 46b7d7be9a739139eee4f3038e3a6ee3e5e5687d | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from abc import ABCMeta, abstractmethod
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
import os
from torch.utils.data import Dataset
import time
import pickle as pkl
from torch import nn
import torch.optim as optim
import torch.nn.init
import math
import torchvision
from olds import dataLoader
device = torch.device('cuda:4')
def LoadMNIST(root, transform, batch_size, download=True):
train_dataset = datasets.MNIST(root=root, train=True, transform=transform, download=download)
test_dataset = datasets.MNIST(root=root, train=False, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
return train_loader, test_loader
class Network(nn.Module):
def __init__(self):
super(Network, self).__init__()
self.fc1 = nn.Linear(28 * 28, 100)
self.fc2 = nn.Linear(100,50)
self.fc3 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
if __name__ == '__main__':
model = Network().to(device)
transform = transforms.Compose([transforms.ToTensor()])
batch_size = 128
epoches = 10
loss = 0.
data_dir = './'
tranform = transforms.Compose([transforms.ToTensor()])
model_path = 'BP.pkl'
train_dataset = torchvision.datasets.FashionMNIST(data_dir, download=True, train=True, transform=tranform)
val_dataset = torchvision.datasets.FashionMNIST(root=data_dir, download=True, train=False, transform=tranform)
# train_dataloader = DataLoader(dataset=train_dataset, batch_size=128, shuffle=True, num_workers=0)
# test_dataloader = DataLoader(dataset=val_dataset, batch_size=128, num_workers=0, shuffle=False)
train_dataloader, test_dataloader = dataLoader.LoadMNIST('../data/MNIST', tranform, batch_size, False)
print('数据准备完成!')
trainLoss = 0.
testLoss = 0.
learning_rate = 1e-2
start_epoch = 0
SoftmaxWithXent = nn.CrossEntropyLoss()
# define optimization algorithm
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=1e-04)
print('epoch to run:{} learning rate:{}'.format(epoches, learning_rate))
for epoch in range(start_epoch, start_epoch + epoches):
train_N = 0.
train_n = 0.
trainLoss = 0.
model.train()
for batch, [trainX, trainY] in enumerate(tqdm(train_dataloader, ncols=10)):
train_n = len(trainX)
train_N += train_n
trainX = trainX.to(device)
trainY = trainY.to(device).long()
trainX = trainX.reshape(len(trainX), -1)
optimizer.zero_grad()
predY = model(trainX)
loss = SoftmaxWithXent(predY, trainY)
loss.backward() # get gradients on params
optimizer.step() # SGD update
trainLoss += loss.detach().cpu().numpy()
trainLoss /= train_N
test_N = 0.
testLoss = 0.
correct = 0.
model.eval()
for batch, [testX, testY] in enumerate(tqdm(test_dataloader, ncols=10)):
test_n = len(testX)
test_N += test_n
testX = testX.to(device)
testY = testY.to(device).long()
testX = testX.reshape(len(testX), -1)
predY = model(testX)
loss = SoftmaxWithXent(predY, testY)
testLoss += loss.detach().cpu().numpy()
_, predicted = torch.max(predY.data, 1)
correct += (predicted == testY).sum()
testLoss /= test_N
acc = correct / test_N
print('epoch:{} train loss:{} testloss:{} acc:{}'.format(epoch, trainLoss, testLoss, acc))
if not os.path.exists('./mnist_model'):
os.mkdir('mnist_model')
torch.save(model.state_dict(), './mnist_model/blackBP.pth')
print('模型已经保存')
| 37.351351 | 114 | 0.655813 | 523 | 4,146 | 5.057361 | 0.288719 | 0.030624 | 0.024197 | 0.017391 | 0.093762 | 0.049149 | 0 | 0 | 0 | 0 | 0 | 0.018762 | 0.228654 | 4,146 | 110 | 115 | 37.690909 | 0.808318 | 0.062229 | 0 | 0.040816 | 0 | 0 | 0.043792 | 0.00644 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030612 | false | 0 | 0.204082 | 0 | 0.265306 | 0.040816 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
894f3fc69273e878e3689fd473a5d926384286e8 | 325 | py | Python | Algorithms/Sorting/Insertion_Sort_Part2.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | 1 | 2017-12-02T14:23:44.000Z | 2017-12-02T14:23:44.000Z | Algorithms/Sorting/Insertion_Sort_Part2.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | Algorithms/Sorting/Insertion_Sort_Part2.py | gauthamkrishna-g/HackerRank | 472d7a56fc1c1c4f8f03fcabc09d08da4000efde | [
"MIT"
] | null | null | null | n = int(input())
ar = [int(ar_temp) for ar_temp in input().strip().split(' ')]
for i in range(n):
temp = ar[i]
j = i
while j > 0 and ar[j-1] > temp:
ar[j] = ar[j-1]
j -= 1
ar[j] = temp
if i > 0:
for k in ar:
print (k, end=' ')
print ()
| 23.214286 | 62 | 0.396923 | 53 | 325 | 2.396226 | 0.377358 | 0.094488 | 0.062992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027174 | 0.433846 | 325 | 13 | 63 | 25 | 0.663043 | 0 | 0 | 0 | 0 | 0 | 0.00641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
89544fe5be471dfc862fbd416e5ab8a4d1aa2ce5 | 1,152 | py | Python | patchsim_experiment/experiment_extra_patches_1time_patchsim/exp_patchsim.py | poracle100/poracle-experiments | 2582e7a0b0380bac810d49a75eb33f7a0626d6d8 | [
"Apache-2.0"
] | null | null | null | patchsim_experiment/experiment_extra_patches_1time_patchsim/exp_patchsim.py | poracle100/poracle-experiments | 2582e7a0b0380bac810d49a75eb33f7a0626d6d8 | [
"Apache-2.0"
] | null | null | null | patchsim_experiment/experiment_extra_patches_1time_patchsim/exp_patchsim.py | poracle100/poracle-experiments | 2582e7a0b0380bac810d49a75eb33f7a0626d6d8 | [
"Apache-2.0"
] | 2 | 2021-03-29T08:13:34.000Z | 2021-04-23T08:51:18.000Z | #!/usr/bin/python
from datetime import datetime
import os
import shutil, sys
from os.path import join
import json
i = 0
configs_dir = '/poracle-experiments/extra_configs/'
for subroot, dirs, files in os.walk(configs_dir):
# run('{}/cleanup'.format(root_dir))
for config_file in files:
read_file = open(join(configs_dir, config_file), "r")
data = json.load(read_file)
if data['correctness'] == 'Correct':
continue
str = data['project'] + ' ' + data['bug_id'] + ' ' + data['ID']
cmd = 'timeout 3h python3 run.py ' + str
print(cmd)
#os.system(cmd)
i = i + 1
print(i)
for config_file in files:
read_file = open(join(configs_dir, config_file), "r")
data = json.load(read_file)
if data['correctness'] == 'Incorrect':
continue
str = data['project'] + ' ' + data['bug_id'] + ' ' + data['ID']
cmd = 'timeout 3h python3 run.py ' + str
print(cmd)
#os.system(cmd)
i = i + 1
print(i)
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current Time =", current_time)
| 30.315789 | 71 | 0.577257 | 154 | 1,152 | 4.201299 | 0.38961 | 0.061824 | 0.040185 | 0.046368 | 0.574961 | 0.574961 | 0.574961 | 0.574961 | 0.574961 | 0.574961 | 0 | 0.008373 | 0.274306 | 1,152 | 37 | 72 | 31.135135 | 0.76555 | 0.068576 | 0 | 0.580645 | 0 | 0 | 0.171188 | 0.032741 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16129 | 0 | 0.16129 | 0.16129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |