hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
76959b57c9321fc06a543414d55880c4bcc1b129 | 249 | py | Python | zeromq_rpc/client_pushpull.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
] | 5 | 2018-05-10T19:50:29.000Z | 2018-05-10T20:07:08.000Z | zeromq_rpc/client_pushpull.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
] | null | null | null | zeromq_rpc/client_pushpull.py | cr0hn/TestingBench | 37975343cf9ccb019e8dc42404b5b321285b04b3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import zerorpc
class TestPuller(object):
def test(self, msg):
print(msg.split('.', 1)[0])
def main():
p = zerorpc.Puller(TestPuller())
p.connect('tcp://127.0.0.1:8080')
p.run()
if __name__ == '__main__':
main()
| 13.105263 | 34 | 0.606426 | 37 | 249 | 3.864865 | 0.702703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.062201 | 0.160643 | 249 | 18 | 35 | 13.833333 | 0.62201 | 0.084337 | 0 | 0 | 0 | 0 | 0.128319 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.1 | 0 | 0.4 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7696820fd42b35f43ad696c61bc30700ea9558d4 | 744 | py | Python | textts.py | maanksa/text-to-speach-using-python | 3708d1253b8bbce85074804331e21394b04d1e08 | [
"Apache-2.0"
] | null | null | null | textts.py | maanksa/text-to-speach-using-python | 3708d1253b8bbce85074804331e21394b04d1e08 | [
"Apache-2.0"
] | null | null | null | textts.py | maanksa/text-to-speach-using-python | 3708d1253b8bbce85074804331e21394b04d1e08 | [
"Apache-2.0"
] | 1 | 2021-07-16T07:19:53.000Z | 2021-07-16T07:19:53.000Z |
from ibm_watson import TextToSpeechV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
url= 'https://api.us-south.text-to-speech.watson.cloud.ibm.com/instances/b5ca51fb-56d7-4e2b-827d-3ea50ad17dc3'
apikey= 'J-PsrlP1DwzglNm-kwiEsNuAjHXmGdpKe-roAbUjjjra'
# Setup Service
authenticator = IAMAuthenticator(apikey)
tts = TextToSpeechV1(authenticator=authenticator)
tts.set_service_url(url)
with open ('churchill.txt','r') as f:
text = f.readlines()
text = [line.replace('\n', '') for line in text]
text = ''.join(str(line) for line in text)
with open('./churchill.mp3', 'wb') as audio_file:
res = tts.synthesize(text, accept='audio/mp3', voice='en-US_AllisonV3Voice').get_result()
audio_file.write(res.content) | 41.333333 | 111 | 0.752688 | 102 | 744 | 5.392157 | 0.627451 | 0.025455 | 0.061818 | 0.047273 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034639 | 0.107527 | 744 | 18 | 112 | 41.333333 | 0.793675 | 0.017473 | 0 | 0 | 0 | 0.071429 | 0.293539 | 0.061798 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
769908792b127abeed255023abcee6228e8ccf59 | 598 | py | Python | teamwake.py | chibacchi01/discordpy-startup | c3e5d9f6b9c73a0b304e08a061c36984623f354f | [
"MIT"
] | null | null | null | teamwake.py | chibacchi01/discordpy-startup | c3e5d9f6b9c73a0b304e08a061c36984623f354f | [
"MIT"
] | null | null | null | teamwake.py | chibacchi01/discordpy-startup | c3e5d9f6b9c73a0b304e08a061c36984623f354f | [
"MIT"
] | null | null | null | import random
import sys
def doTeamwake(lst):
team = list(lst)
random.shuffle(team)
#print(len(team))
team1 = team[0:len(team)//2]
team2 = team[len(team)//2:len(team)]
#print(team)
return team1,team2
def makeResult(team,team1,team2):
mes1 = 'チーム' + team1[0] + ': \n'
mes2 = 'チーム' + team2[0] + ': \n'
for i in range (len(team1)):
mes1 += team1[i] + '\n'
for i in range (len(team2)):
if(i == len(team2) - 1):
mes2 += team2[i]
else:
mes2 += team2[i] + '\n'
result = (mes1 + '\n' + mes2)
return result | 26 | 40 | 0.521739 | 84 | 598 | 3.714286 | 0.345238 | 0.089744 | 0.051282 | 0.044872 | 0.096154 | 0.096154 | 0 | 0 | 0 | 0 | 0 | 0.063981 | 0.294314 | 598 | 23 | 41 | 26 | 0.675355 | 0.045151 | 0 | 0 | 0 | 0 | 0.035088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
769aad2c61d0547afe4d66a3ecb918c9dbccb772 | 6,637 | py | Python | notebooks_for_development/teff_retrieval_plots.py | mwanakijiji/rrlyrae_metallicity | 1aa867eb9c96dba433271207efdf758cc7849360 | [
"MIT"
] | null | null | null | notebooks_for_development/teff_retrieval_plots.py | mwanakijiji/rrlyrae_metallicity | 1aa867eb9c96dba433271207efdf758cc7849360 | [
"MIT"
] | 15 | 2019-11-05T17:43:00.000Z | 2022-01-12T16:29:59.000Z | notebooks_for_development/teff_retrieval_plots.py | mwanakijiji/rrlyrae_metallicity | 1aa867eb9c96dba433271207efdf758cc7849360 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This makes plots showing the effective temperature retrievals based on synthetic spectra
# produced by R.W.
# Created from parent restacking_scraped_data.ipynb 2021 March 17 by E.S.
# In[1]:
import pandas as pd
#from astropy.io import fits
from astropy.io.fits import getdata
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
# In[2]:
# name of csv file with EWs as produced by pipeline
ew_good_data_poststack_file_name = "/Users/bandari/Documents/git.repos/rrlyrae_metallicity/rrlyrae_metallicity/ew_products/20210225_restacked_ew_info_good_only.csv"
# read in
df_poststack = pd.read_csv(ew_good_data_poststack_file_name)
# In[49]:
def line_fit(x_data_pass, y_data_pass):
# remove the stuff outside of 6000-7250 K
#x_data_rrl = x_data_pass.where(np.logical_and(x_data_pass>=5900,x_data_pass<=7350))
#y_data_rrl = x_data_pass.where(np.logical_and(x_data_pass>=5900,x_data_pass<=7350))
x_data_rrl = x_data_pass[np.where(np.logical_and(y_data_pass>=5900,y_data_pass<=7350))]
y_data_rrl = y_data_pass[np.where(np.logical_and(y_data_pass>=5900,y_data_pass<=7350))]
coeff, cov = np.polyfit(x_data_rrl, y_data_rrl, 1, full=False, cov=True)
m = coeff[0]
b = coeff[1]
err_m = np.sqrt(np.diag(cov))[0]
err_b = np.sqrt(np.diag(cov))[1]
print("---------")
print("Note stuff outside of 6000-7350 K is not being considered")
print("m:")
print(m)
print("err_m:")
print(err_m)
print("b:")
print(b)
print("err_b:")
print(err_b)
return m, b
# In[44]:
# plot: how do Balmer lines scale with Teff?
plt.clf()
plt.title("Scaling of lines with Hdelta")
plt.scatter(df_poststack["Teff"],df_poststack["EW_Hbeta"], s=3, label="Hbeta")
plt.scatter(df_poststack["Teff"],np.add(df_poststack["EW_Hgamma"],6), s=3, label="Hgamma+6")
plt.scatter(df_poststack["Teff"],np.add(df_poststack["EW_Hdelta"],12), s=3, label="Hdel+12")
plt.scatter(df_poststack["Teff"],np.add(df_poststack["EW_Balmer"],18), s=3, label="Net Balmer+18")
plt.scatter(df_poststack["Teff"],np.add(df_poststack["EW_Heps"],24), s=3, label="Heps+24")
#plt.ylim([0,70])
plt.xlabel("Teff (K)")
plt.ylabel("EW (Angstr)")
plt.title("Balmer EW trend with Teff")
plt.legend(ncol=5)
plt.show()
#plt.savefig("junk_balmer_rescalings.pdf")
# In[55]:
y_data_metalrich = df_poststack["Teff"].where(df_poststack["FeH"] > -2.9).dropna().values.astype(float)
# In[61]:
x_data_Balmer_metalrich = df_poststack["EW_Balmer"].where(df_poststack["FeH"] > -2.9).dropna()
# In[62]:
x_data_Balmer_metalrich
# In[56]:
y_data_metalrich
# In[63]:
# find linear trends of {net Balmer, Hdelta, and Hgamma} EW with Teff, entire Teff range
y_data = df_poststack["Teff"].values.astype(float)
# fit a straight line: net Balmer
x_data_Balmer = df_poststack["EW_Balmer"].values.astype(float)
m_Balmer, b_Balmer = line_fit(x_data_Balmer,y_data)
# same, except that [Fe/H] = -3 is neglected
x_data_Balmer_metalrich = df_poststack["EW_Balmer"].where(df_poststack["FeH"] > -2.9).dropna().values.astype(float)
y_data_metalrich = df_poststack["Teff"].where(df_poststack["FeH"] > -2.9).dropna().values.astype(float)
m_Balmer_metalrich, b_Balmer_metalrich = line_fit(x_data_Balmer_metalrich,y_data_metalrich)
# fit a straight line: Hdelta
x_data_Hdelta = df_poststack["EW_Hdelta"].values.astype(float)
m_Hdelta, b_Hdelta = line_fit(x_data_Hdelta,y_data)
# fit a straight line: Hgamma
x_data_Hgamma = df_poststack["EW_Hgamma"].values.astype(float)
m_Hgamma, b_Hgamma = line_fit(x_data_Hgamma,y_data)
# In[67]:
# calculate retrieved Teff and add new columns to DataFrame to make the plotting easier
df_poststack["Teff_retrieved_Balmer"] = np.add(np.multiply(df_poststack["EW_Balmer"],m_Balmer),b_Balmer)
df_poststack["Teff_retrieved_Hdelta"] = np.add(np.multiply(df_poststack["EW_Hdelta"],m_Hdelta),b_Hdelta)
df_poststack["Teff_retrieved_Hgamma"] = np.add(np.multiply(df_poststack["EW_Hgamma"],m_Hgamma),b_Hgamma)
df_poststack["Teff_retrieved_Balmer_metalrich"] = np.add(np.multiply(df_poststack["EW_Balmer"],m_Balmer_metalrich),b_Balmer_metalrich)
colormap = "Reds"
# array of metallicities
feh_values = np.sort(df_poststack["FeH"].drop_duplicates().values)
norm = matplotlib.colors.Normalize(vmin=np.min(feh_values),vmax=np.max(feh_values))
# retrieved Balmer values
# retrieved Balmer values
plt.clf()
colormap="Reds"
norm = matplotlib.colors.Normalize(vmin=np.min(feh_values),vmax=np.max(feh_values))
f, (a0, a1) = plt.subplots(nrows=2, ncols=1, gridspec_kw={'height_ratios': [2, 1]}, sharex=True)
a0.axvspan(6000, 7250, color='y', alpha=0.5, lw=0,zorder=0) # RRLs in instability strip (Catelan 2015)
a1.axvspan(6000, 7250, color='y', alpha=0.5, lw=0,zorder=0)
a0.plot(df_poststack["Teff"],df_poststack["Teff"],zorder=1,linestyle="--",color="k")
a1.plot([np.min(df_poststack["Teff"]),np.max(df_poststack["Teff"])],[0,0],zorder=1,linestyle="--",color="k")
a0.scatter(df_poststack["Teff"],
df_poststack["Teff_retrieved_Balmer"],
c=df_poststack["FeH"],
cmap=colormap, norm=norm, edgecolor="k",zorder=2)
a1.scatter(df_poststack["Teff"],
np.subtract(df_poststack["Teff_retrieved_Balmer_metalrich"],df_poststack["Teff"]),
c=df_poststack["FeH"],
cmap=colormap, norm=norm, edgecolor="k",zorder=2)
'''
# annotation to check the color mapping
for t in range(0,len(df_poststack["FeH"])):
plt.annotate(str(df_poststack["FeH"][t]), (df_poststack["Teff"][t],df_poststack["Teff_retrieved_Balmer"][t]))
'''
# kludge to add legend while mapping colors correctly
for i in range(0,len(feh_values)):
# indices reversed to get the order descending in the legend
a0.scatter([0], [0], cmap=colormap, norm=norm, c=feh_values[-i-1],
edgecolor="k", label="[Fe/H]="+str(feh_values[-i-1]))
print(feh_values[i])
a0.set_ylabel("Retrieved T$_{eff}$")
a1.set_xlabel("Injected T$_{eff}$")
a1.set_ylabel("Retrieved T$_{eff}$ - Injected T$_{eff}$\n(based on trend for [Fe/H] $\geq$ -2.5)")
f.canvas.draw() # need before legend to render
a0.set_xlim([5500,8000])
a0.set_ylim([5500,8500])
a0.legend(loc="lower right")
plt.show()
print("USE NOTEBOOK VERSION OF THIS! OTHERWISE THE LEGEND DOESN'T HAVE RIGHT HANDLES!")
#plt.savefig("junk.pdf")
#import ipdb; ipdb.set_trace()
f.savefig("junk.pdf")
# In[ ]:
# calculate BIC to find best model
# In[ ]:
'''
def pred_teff(EW_pass,m_pass,b_pass):
teff_pass = np.add(np.multiply(EW_pass,m_pass),b_pass)
return teff_pass
# In[ ]:
Teff_model = pred_teff(df_poststack["EW_Balmer"])
'''
| 28.363248 | 164 | 0.717644 | 1,099 | 6,637 | 4.1101 | 0.250227 | 0.116892 | 0.076378 | 0.034093 | 0.394731 | 0.32743 | 0.252159 | 0.239761 | 0.239761 | 0.239761 | 0 | 0.033408 | 0.120536 | 6,637 | 233 | 165 | 28.484979 | 0.740449 | 0.20235 | 0 | 0.157303 | 0 | 0.011236 | 0.190486 | 0.056219 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0.033708 | 0.05618 | 0 | 0.078652 | 0.134831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
769c61429859f2722eb0d1cae80d73e5e4f5da1f | 5,704 | py | Python | models/lstm/model.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | 4 | 2019-05-09T19:47:48.000Z | 2020-04-11T13:58:31.000Z | models/lstm/model.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | null | null | null | models/lstm/model.py | ErikHumphrey/sustain-seq2seq | c4787f0ca1047d01385e4fa4ffde59c6a8ab4cc4 | [
"Apache-2.0"
] | 4 | 2018-12-05T01:52:22.000Z | 2019-11-01T01:01:52.000Z | import sys, os
sys.path.insert(0, '../..')
from collections import OrderedDict
import torch
import torch.nn as nn
from models.components.encodersdecoders.EncoderDecoder import EncoderDecoder
class MyEncoderDecoder(EncoderDecoder):
def __init__(self, src_lookup, tgt_lookup, encoder, decoder, dec_transfer_hidden, device):
super().__init__(src_lookup, tgt_lookup, encoder, decoder, device)
self.dec_transfer_hidden = dec_transfer_hidden
if dec_transfer_hidden == True:
assert encoder.num_layers == decoder.num_layers, "For transferring the last hidden state from encoder to decoder, both must have the same number of layers."
# Transform h from encoder's [num_layers * 2, batch_size, enc_hidden_dim/2] to decoder's [num_layers * 1, batch_size, dec_hidden_dim], same for c; batch_size = 1 (last timestep only)
self.h_state_linear = nn.Linear(int(encoder.hidden_dim * encoder.num_layers/1), decoder.hidden_dim * decoder.num_layers * 1)
self.c_state_linear = nn.Linear(int(encoder.hidden_dim * encoder.num_layers/1), decoder.hidden_dim * decoder.num_layers * 1)
self.to(self.device)
def forward(self, x_tuple, y_tuple, teacher_forcing_ratio=0.):
"""
Args:
x (tensor): The input of the decoder. Shape: [batch_size, seq_len_enc].
y (tensor): The input of the decoder. Shape: [batch_size, seq_len_dec].
Returns:
The output of the Encoder-Decoder with attention. Shape: [batch_size, seq_len_dec, n_class].
"""
x, x_lenghts, x_mask = x_tuple[0], x_tuple[1], x_tuple[2]
y, y_lenghts, y_mask = y_tuple[0], y_tuple[1], y_tuple[2]
batch_size = x.shape[0]
# Calculates the output of the encoder
encoder_dict = self.encoder.forward(x_tuple)
enc_output = encoder_dict["output"]
enc_states = encoder_dict["states"]
# enc_states is a tuple of size ( h=[enc_num_layers*2, batch_size, enc_hidden_dim/2], c=[same-as-h] )
if self.dec_transfer_hidden == True:
dec_states = self.transfer_hidden_from_encoder_to_decoder(enc_states)
else:
hidden = Variable(next(self.parameters()).data.new(batch_size, self.decoder.num_layers, self.decoder.hidden_dim), requires_grad=False)
cell = Variable(next(self.parameters()).data.new(batch_size, self.decoder.num_layers, self.decoder.hidden_dim), requires_grad=False)
dec_states = ( hidden.zero_(), cell.zero_() )
# Calculates the output of the decoder.
encoder_dict = self.decoder.forward(x_tuple, y_tuple, enc_output, dec_states, teacher_forcing_ratio)
output = encoder_dict["output"]
attention_weights = encoder_dict["attention_weights"]
# Creates a BOS tensor that must be added to the beginning of the output. [batch_size, 1, dec_vocab_size]
bos_tensor = torch.zeros(batch_size, 1, self.decoder.vocab_size).to(self.device)
# Marks the corresponding BOS position with a probability of 1.
bos_tensor[:, :, self.tgt_bos_token_id] = 1
# Concatenates the BOS tensor with the output. [batch_size, dec_seq_len-1, dec_vocab_size] -> [batch_size, dec_seq_len, dec_vocab_size]
output = torch.cat((bos_tensor, output), dim=1)
return output, attention_weights
def run_batch(self, X_tuple, y_tuple, criterion=None, tf_ratio=.0, aux_loss_weight = 0.5):
(x_batch, x_batch_lenghts, x_batch_mask) = X_tuple
(y_batch, y_batch_lenghts, y_batch_mask) = y_tuple
if hasattr(self.decoder.attention, 'reset_coverage'):
self.decoder.attention.reset_coverage(x_batch.size()[0], x_batch.size()[1])
output, attention_weights = self.forward((x_batch, x_batch_lenghts, x_batch_mask), (y_batch, y_batch_lenghts, y_batch_mask), tf_ratio)
if criterion is not None:
loss = criterion(output.view(-1, self.decoder.vocab_size), y_batch.contiguous().flatten())
else:
loss = 0
return output, loss, attention_weights, {}
def transfer_hidden_from_encoder_to_decoder(self, enc_states):
batch_size = enc_states[0].shape[1]
# Reshapes the shape of the hidden and cell state of the encoder LSTM layers. Permutes the batch_size to
# the first dimension, and reshapes them to a 2-D tensor.
# [enc_num_layers * 2, batch_size, enc_hidden_dim] -> [batch_size, enc_num_layers * enc_hidden_dim * 2].
enc_states = (enc_states[0].permute(1, 0, 2).reshape(batch_size, -1),
enc_states[1].permute(1, 0, 2).reshape(batch_size, -1))
# Transforms the hidden and the cell state of the encoder lstm layer to correspond to the decoder lstm states dimensions.
# [batch_size, enc_num_layers * enc_hidden_dim * 2] -> [batch_size, dec_num_layers * dec_hidden_dim].
dec_states = (torch.tanh(self.h_state_linear(enc_states[0])), torch.tanh(self.c_state_linear(enc_states[1])))
# Reshapes the states to have the correct shape for the decoder lstm states dimension. Reshape the states from
# 2-D to 3-D sequence. Permutes the batch_size to the second dimension.
# [batch_size, dec_num_layers * dec_hidden_dim] -> [dec_num_layers, batch_size, dec_hidden_dim].
dec_states = (dec_states[0].reshape(batch_size, self.decoder.num_layers, self.decoder.hidden_dim).permute(1, 0, 2),
dec_states[1].reshape(batch_size, self.decoder.num_layers, self.decoder.hidden_dim).permute(1, 0, 2))
return dec_states
| 54.846154 | 190 | 0.677069 | 826 | 5,704 | 4.394673 | 0.181598 | 0.071901 | 0.030854 | 0.014325 | 0.436364 | 0.35124 | 0.280165 | 0.280165 | 0.233333 | 0.166942 | 0 | 0.013529 | 0.222475 | 5,704 | 104 | 191 | 54.846154 | 0.804961 | 0.296809 | 0 | 0.037037 | 0 | 0 | 0.040335 | 0 | 0 | 0 | 0 | 0 | 0.018519 | 1 | 0.074074 | false | 0 | 0.092593 | 0 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76a0ced258b379bc8d32233ac40e74815c8cb7f3 | 468 | py | Python | bot/cogs/dota.py | tsalmela/Rankaisijabot | 7feb3522a2770314fe98d40a0bf361b6b60a7386 | [
"MIT"
] | 1 | 2021-02-06T16:50:02.000Z | 2021-02-06T16:50:02.000Z | bot/cogs/dota.py | tsalmela/Rankaisijabot | 7feb3522a2770314fe98d40a0bf361b6b60a7386 | [
"MIT"
] | 1 | 2022-01-20T09:50:32.000Z | 2022-01-20T09:50:32.000Z | bot/cogs/dota.py | tsalmela/Rankaisijabot | 7feb3522a2770314fe98d40a0bf361b6b60a7386 | [
"MIT"
] | 1 | 2022-01-20T08:42:41.000Z | 2022-01-20T08:42:41.000Z | import discord
from discord.ext import commands
class Dota(commands.Cog, name="dota"):
def __init__(self, bot):
self.bot = bot
@commands.command(name="dotaukkoja", aliases=["ukkoja"])
async def ukkoja(self, ctx):
await ctx.send(file=discord.File("images/dota_ukkoja.png"))
@commands.command(name="ei")
async def ei(self, ctx):
await ctx.send(file=discord.File("images/ei.png"))
def setup(bot):
bot.add_cog(Dota(bot)) | 27.529412 | 67 | 0.66453 | 67 | 468 | 4.552239 | 0.402985 | 0.045902 | 0.12459 | 0.098361 | 0.262295 | 0.262295 | 0.262295 | 0.262295 | 0.262295 | 0 | 0 | 0 | 0.179487 | 468 | 17 | 68 | 27.529412 | 0.794271 | 0 | 0 | 0 | 0 | 0 | 0.121535 | 0.046908 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76a41b6ac26a0b5b24f85bacc5004a2ef1d5c340 | 6,555 | py | Python | src/theory/compute_parameters.py | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
] | 2 | 2020-08-21T04:06:12.000Z | 2022-02-09T07:36:58.000Z | src/theory/compute_parameters.py | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
] | null | null | null | src/theory/compute_parameters.py | RPGroup-PBoC/chann_cap | f2a826166fc2d47c424951c616c46d497ed74b39 | [
"MIT"
] | 2 | 2020-04-29T17:43:28.000Z | 2020-09-09T00:20:16.000Z | import pickle
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Seaborn, useful for graphics
import seaborn as sns
# Import the utils for this project
import ccutils
# Define mRNA rate
# gm = 0.00284 # s**-1
# http://bionumbers.hms.harvard.edu/bionumber.aspx?id=105717&ver=3&trm=lacZ%20mRNA%20lifetime&org=
gm = 1 / (3 * 60)
# Define cell volume
Vcell = 2.15 # fL
# Define diffusion limiting rate
k0 = 2.7E-3
# =============================================================================
# Single promoter
# =============================================================================
# Load the flat-chain
with open('../../data/mcmc/lacUV5_constitutive_mRNA_prior.pkl', 'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# reasign the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kpon, kpoff, rm = df_mcmc.iloc[max_idx, :]
# ea range
kpon_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 0], 0.95)
kpoff_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 1], 0.95)
rm_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 2], 0.95)
# Print results
print('Single gene copy parameters: ')
print("""
The most probable parameters for the model
------------------------------------------
kp_on = {0:.1f} -{1:0.1f} +{2:0.1f}
kp_off = {3:.1f} -{4:0.1f} +{5:0.1f}
rm = {6:.1f} -{7:0.1f} +{8:0.1f}
""".format(kpon, np.abs(kpon-kpon_hpd[0]), np.abs(kpon-kpon_hpd[1]),\
kpoff, np.abs(kpoff-kpoff_hpd[0]), np.abs(kpoff-kpoff_hpd[1]),\
rm, np.abs(rm-rm_hpd[0]), np.abs(rm-rm_hpd[1])))
# Print results
print("""
The most probable parameters for the model in seconds^-1
--------------------------------------------------------
kp_on = {0:.3f} -{1:0.3f} +{2:0.3f} s^-1
kp_off = {3:.3f} -{4:0.3f} +{5:0.3f} s^-1
rm = {6:.3f} -{7:0.3f} +{8:0.3f} s^-1
""".format(kpon * gm, np.abs(kpon-kpon_hpd[0]) * gm,
np.abs(kpon-kpon_hpd[1]) * gm,
kpoff * gm, np.abs(kpoff-kpoff_hpd[0]) * gm,
np.abs(kpoff-kpoff_hpd[1]) * gm,
rm * gm, np.abs(rm-rm_hpd[0]) * gm, np.abs(rm-rm_hpd[1]) * gm))
# =============================================================================
# Double promoter
# =============================================================================
# Load the flat-chain
with open('../../data/mcmc/lacUV5_constitutive_mRNA_double_expo.pkl',
'rb') as file:
unpickler = pickle.Unpickler(file)
gauss_flatchain = unpickler.load()
gauss_flatlnprobability = unpickler.load()
# Generate a Pandas Data Frame with the mcmc chain
index = ['kp_on', 'kp_off', 'rm']
# Generate a data frame out of the MCMC chains
df_mcmc = pd.DataFrame(gauss_flatchain, columns=index)
# rerbsine the index with the new entries
index = df_mcmc.columns
# map value of the parameters
max_idx = np.argmax(gauss_flatlnprobability, axis=0)
kpon_double, kpoff_double, rm_double = df_mcmc.iloc[max_idx, :]
# ea range
kpon_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 0], 0.95)
kpoff_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 1], 0.95)
rm_hpd = ccutils.stats.hpd(df_mcmc.iloc[:, 2], 0.95)
# Print results
print('Two-promoter model')
print("""
The most probable parameters for the model
------------------------------------------
kp_on = {0:.1f} -{1:0.1f} +{2:0.1f}
kp_off = {3:.1f} -{4:0.1f} +{5:0.1f}
rm = {6:.1f} -{7:0.1f} +{8:0.1f}
""".format(kpon_double, np.abs(kpon_double-kpon_hpd[0]),
np.abs(kpon_double-kpon_hpd[1]),
kpoff_double, np.abs(kpoff_double-kpoff_hpd[0]),
np.abs(kpoff_double-kpoff_hpd[1]),
rm_double, np.abs(rm_double-rm_hpd[0]),
np.abs(rm_double-rm_hpd[1])))
# Print results
print("""
The most probable parameters for the model in seconds^-1
--------------------------------------------------------
kp_on = {0:.3f} -{1:0.3f} +{2:0.3f} s^-1
kp_off = {3:.2f} -{4:0.2f} +{5:0.2f} s^-1
rm = {6:.1f} -{7:0.1f} +{8:0.1f} s^-1
""".format(kpon_double * gm, np.abs(kpon_double-kpon_hpd[0]) * gm,
np.abs(kpon_double-kpon_hpd[1]) * gm,
kpoff_double * gm, np.abs(kpoff_double-kpoff_hpd[0]) * gm,
np.abs(kpoff_double-kpoff_hpd[1]) * gm,
rm_double * gm, np.abs(rm_double-rm_hpd[0]) * gm,
np.abs(rm_double-rm_hpd[1]) * gm))
# =============================================================================
# Repressor rates
# =============================================================================
# Define binding energies of the different operators
energies = {'Oid': -17, 'O1': -15.3, 'O2': -13.9, 'O3': -9.7}
# Compute the rates for each repressor
kr_offs = {key: ccutils.model.kr_off_fun(value, k0,
kpon_double,
kpoff_double,
Vcell=Vcell) for key, value in
energies.items()}
# Print repressor rates
print("""
The most probable parameters for the repressor in seconds^-1
------------------------------------------------------------
""")
for key, value in kr_offs.items():
print('kr_off {0:s} = {1:.5f} s^-1'.format(key, value))
# =============================================================================
# Compute probability of each of the states
# =============================================================================
def prob_promoter(kr_on, kr_off, kp_on, kp_off, rm):
'''
Computes the probability of the three promoter states for a regulated
promoter
'''
P_B = (kr_off * kp_on) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
P_E = (kp_off * kr_off) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
P_R = (kp_off * kr_on) / (kp_off * kr_off + kp_off * kr_on + kr_off * kp_on)
return {'P_B': P_B, 'P_E': P_E, 'P_R': P_R}
# O1
R = 22
kr_on = 1 / Vcell / 0.6022 * k0 * R
probs_O1 = prob_promoter(kr_on, kr_offs['O1'], kpon_double,
kpoff_double, rm_double)
print('''
Probability of each promoter state for O1 - R{:d}
-------------------------------------------------
'''.format(R))
for key, value in probs_O1.items():
print('State {0:s} = {1:.5f}'.format(key, value))
| 34.140625 | 98 | 0.544165 | 962 | 6,555 | 3.554054 | 0.179834 | 0.035098 | 0.024569 | 0.031588 | 0.656332 | 0.640538 | 0.574437 | 0.464756 | 0.464756 | 0.460954 | 0 | 0.040911 | 0.175896 | 6,555 | 191 | 99 | 34.319372 | 0.592003 | 0.252632 | 0 | 0.37963 | 0 | 0.111111 | 0.275584 | 0.08497 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009259 | false | 0 | 0.064815 | 0 | 0.083333 | 0.092593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76a675361f75929eaef0ac54e1cad4d801933fa3 | 1,340 | py | Python | syne_tune/blackbox_repository/serialize.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 97 | 2021-11-18T17:14:30.000Z | 2022-03-29T00:33:12.000Z | syne_tune/blackbox_repository/serialize.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 54 | 2021-11-18T17:14:12.000Z | 2022-03-22T08:11:48.000Z | syne_tune/blackbox_repository/serialize.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2021-11-29T11:47:32.000Z | 2022-02-24T15:28:11.000Z | from pathlib import Path
from typing import Optional, Dict
import json
import syne_tune.config_space as sp
def serialize_configspace(
path: str, configuration_space: Dict, fidelity_space: Optional[Dict] = None
):
path = Path(path)
with open(path / "configspace.json", "w") as f:
json.dump({k: sp.to_dict(v) for k, v in configuration_space.items()}, f)
if fidelity_space is not None:
with open(path / "fidelityspace.json", "w") as f:
json.dump({k: sp.to_dict(v) for k, v in fidelity_space.items()}, f)
def deserialize_configspace(path: str):
def open_if_exists(name):
config_path = Path(path) / name
if config_path.exists():
with open(config_path, "r") as file:
cs_space = json.load(file)
return {k: sp.from_dict(v) for k, v in cs_space.items()}
else:
return None
configuration_space = open_if_exists("configspace.json")
fidelity_space = open_if_exists("fidelityspace.json")
return configuration_space, fidelity_space
def serialize_metadata(path: str, metadata):
with open(path / "metadata.json", "w") as f:
json.dump(metadata, f)
def deserialize_metadata(path: str):
with open(Path(path) / "metadata.json", "r") as f:
metadata = json.load(f)
return metadata
| 31.162791 | 80 | 0.651493 | 190 | 1,340 | 4.442105 | 0.242105 | 0.077014 | 0.056872 | 0.028436 | 0.111374 | 0.111374 | 0.078199 | 0.078199 | 0.078199 | 0.078199 | 0 | 0 | 0.234328 | 1,340 | 42 | 81 | 31.904762 | 0.822612 | 0 | 0 | 0 | 0 | 0 | 0.073881 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.125 | 0 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76a770e1dff189dc92f8b5b60eccb33c71ae9285 | 3,356 | py | Python | src/trainer.py | alcaster/Wassa2018 | 216fa943c3f4489d320e73c0ff46ff0b6e1f4c5a | [
"MIT"
] | null | null | null | src/trainer.py | alcaster/Wassa2018 | 216fa943c3f4489d320e73c0ff46ff0b6e1f4c5a | [
"MIT"
] | null | null | null | src/trainer.py | alcaster/Wassa2018 | 216fa943c3f4489d320e73c0ff46ff0b6e1f4c5a | [
"MIT"
] | null | null | null | import logging
from typing import Optional
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from paths import OUTPUTS
from src.data import one_hot_labels_fn, get_data_fn, labels_map
from src.net import Model
from src.utils.types import path
import tensorflow as tf
log = logging.getLogger(__name__)
class NetworkTrainer:
def __init__(self):
self.params = Params()
self.config = Config()
def train(self, train_path: path, test_path: path, model_name: Optional[str] = None):
if model_name:
self.config.checkpoint_path = self.config.checkpoint_path / model_name
self.config.checkpoint_path.mkdir(exist_ok=True, parents=True)
run_config = tf.estimator.RunConfig(
save_checkpoints_steps=self.config.save_checkpoints_steps,
save_summary_steps=self.config.save_summary_steps)
def model_fn(features, labels, mode):
model = Model(features, labels, self.params, mode)
return tf.estimator.EstimatorSpec(
mode,
{'label': model.prediction},
model.loss,
model.train_op,
{"acc": model.acc}
)
train_spec = tf.estimator.TrainSpec(self.create_input_fn(train_path), max_steps=self.params.max_steps)
eval_spec = tf.estimator.EvalSpec(self.create_input_fn(test_path), steps=100)
estimator = tf.estimator.Estimator(model_fn, self.config.checkpoint_path, run_config)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
def export(self):
assert self.config.checkpoint_path is not None
model_dir = str(self.config.checkpoint_path)
def model_fn(features, labels, mode):
sentence = features['sentence']
model = Model(sentence, labels, self.params, mode)
return tf.estimator.EstimatorSpec(
mode,
{'label': model.prediction}
)
estimator = tf.estimator.Estimator(model_fn, model_dir)
sentence = tf.placeholder(tf.string, [None], 'sentence')
serving_input_receiver_fn = build_raw_serving_input_receiver_fn(
{'sentence': sentence}
)
estimator.export_saved_model(
str(self.config.checkpoint_path / 'exported'),
serving_input_receiver_fn
)
log.info("Export complete")
def create_input_fn(self, src: path):
def input_fn():
dataset = (tf.data.Dataset.from_generator(get_data_fn(src),
output_types=(tf.string, tf.uint8))
.shuffle(buffer_size=300)
.map(one_hot_labels_fn(self.params.num_classes), num_parallel_calls=8)
.batch(self.params.batch_size)
.repeat()
.prefetch(1)
)
return dataset
return input_fn
class Params:
def __init__(self):
self.num_classes = len(labels_map)
self.learning_rate = 1e-4
self.batch_size = 32
self.max_steps = 40_000
class Config:
def __init__(self):
self.checkpoint_path = OUTPUTS.path
self.save_checkpoints_steps = 500
self.save_summary_steps = 100
| 34.958333 | 110 | 0.625447 | 393 | 3,356 | 5.058524 | 0.287532 | 0.050302 | 0.070423 | 0.084507 | 0.229376 | 0.202213 | 0.074447 | 0.074447 | 0.074447 | 0.074447 | 0 | 0.010046 | 0.288141 | 3,356 | 95 | 111 | 35.326316 | 0.822101 | 0 | 0 | 0.118421 | 0 | 0 | 0.017878 | 0 | 0 | 0 | 0 | 0 | 0.013158 | 1 | 0.118421 | false | 0 | 0.105263 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76a9dbfc53f8c5660ceebd58e3b28042193f92c9 | 5,920 | py | Python | setup.py | ankane/isotree-1 | 67940e7afaf8332bf2ca95722cec90c56280b04f | [
"BSD-2-Clause"
] | 1 | 2020-09-18T17:21:49.000Z | 2020-09-18T17:21:49.000Z | setup.py | ankane/isotree-1 | 67940e7afaf8332bf2ca95722cec90c56280b04f | [
"BSD-2-Clause"
] | null | null | null | setup.py | ankane/isotree-1 | 67940e7afaf8332bf2ca95722cec90c56280b04f | [
"BSD-2-Clause"
] | null | null | null | try:
from setuptools import setup
from setuptools.extension import Extension
except:
from distutils.core import setup
from distutils.extension import Extension
import numpy as np
from Cython.Distutils import build_ext
from sys import platform
import sys, os, re
from os import environ
has_cereal = True
try:
import cycereal
cereal_dir = cycereal.get_cereal_include_dir()
except:
has_cereal = False
cereal_dir = "." ## <- placeholder
## https://stackoverflow.com/questions/724664/python-distutils-how-to-get-a-compiler-that-is-going-to-be-used
class build_ext_subclass( build_ext ):
def build_extensions(self):
c = self.compiler.compiler_type
# TODO: add entries for intel's ICC
if c == 'msvc': # visual studio
for e in self.extensions:
e.extra_compile_args = ['/openmp', '/O2', '/std:c++14']
### Note: MSVC never implemented C++11
elif (c == "clang") or (c == "clang++"):
for e in self.extensions:
e.extra_compile_args = ['-fopenmp', '-O3', '-march=native', '-std=c++17']
e.extra_link_args = ['-fopenmp']
### Note: when passing C++11 to CLANG, it complains about C++17 features in CYTHON_FALLTHROUGH
else: # gcc
for e in self.extensions:
e.extra_compile_args = ['-fopenmp', '-O3', '-march=native', '-std=c++11']
e.extra_link_args = ['-fopenmp']
### when testing with clang:
# e.extra_compile_args = ['-fopenmp=libiomp5', '-O3', '-march=native', '-std=c++11']
# e.extra_link_args = ['-fopenmp=libiomp5']
# e.extra_compile_args = ['-fopenmp=libiomp5', '-O2', '-march=native', '-std=c++11', '-stdlib=libc++', '-lc++abi']
# e.extra_link_args = ['-fopenmp=libiomp5', '-lc++abi']
# e.extra_compile_args = ['-O2', '-march=native', '-std=c++11']
### for testing (run with `LD_PRELOAD=libasan.so python script.py`)
# e.extra_compile_args = ["-std=c++11", "-fsanitize=address", "-static-libasan", "-ggdb"]
# e.extra_link_args = ["-fsanitize=address", "-static-libasan"]
### when testing for oneself
# e.extra_compile_args += ["-Wno-sign-compare", "-Wno-switch", "-Wno-maybe-uninitialized"]
## Note: apple will by default alias 'gcc' to 'clang', and will ship its own "special"
## 'clang' which has no OMP support and nowadays will purposefully fail to compile when passed
## '-fopenmp' flags. If you are using mac, and have an OMP-capable compiler,
## comment out the code below, or set 'use_omp' to 'True'.
if not use_omp:
for e in self.extensions:
e.extra_compile_args = [arg for arg in e.extra_compile_args if arg != '-fopenmp']
e.extra_link_args = [arg for arg in e.extra_link_args if arg != '-fopenmp']
build_ext.build_extensions(self)
use_omp = (("enable-omp" in sys.argv)
or ("-enable-omp" in sys.argv)
or ("--enable-omp" in sys.argv))
if use_omp:
sys.argv = [a for a in sys.argv if a not in ("enable-omp", "-enable-omp", "--enable-omp")]
if environ.get('ENABLE_OMP') is not None:
use_omp = True
if platform[:3] != "dar":
use_omp = True
### Shorthand for apple computer:
### uncomment line below
# use_omp = True
setup(
name = "isotree",
packages = ["isotree"],
version = '0.2.6',
description = 'Isolation-Based Outlier Detection, Distance, and NA imputation',
author = 'David Cortes',
author_email = 'david.cortes.rivera@gmail.com',
url = 'https://github.com/david-cortes/isotree',
keywords = ['isolation-forest', 'anomaly', 'outlier'],
cmdclass = {'build_ext': build_ext_subclass},
ext_modules = [Extension(
"isotree._cpp_interface",
sources=["isotree/cpp_interface.pyx",
"src/dealloc.cpp",
"src/merge_models.cpp", "src/serialize.cpp", "src/sql.cpp"],
include_dirs=[np.get_include(), ".", "./src", cereal_dir],
language="c++",
install_requires = ["numpy", "pandas>=0.24.0", "cython", "scipy"],
define_macros = [("_USE_MERSENNE_TWISTER", None),
("_ENABLE_CEREAL", None) if has_cereal else ("NO_CEREAL", None),
("_FOR_PYTHON", None),
("PY_GEQ_3_3", None)
if (sys.version_info[0] >= 3 and sys.version_info[1] >= 3) else
("PY_LT_3_3", None)]
)]
)
if not use_omp:
import warnings
apple_msg = "\n\n\nMacOS detected. Package will be built without multi-threading capabilities, "
apple_msg += "due to Apple's lack of OpenMP support in default clang installs. In order to enable it, "
apple_msg += "install the package directly from GitHub: https://www.github.com/david-cortes/isotree\n"
apple_msg += "Using 'python setup.py install enable-omp'. "
apple_msg += "You'll also need an OpenMP-capable compiler.\n\n\n"
warnings.warn(apple_msg)
if not has_cereal:
import warnings
msg = "\n\nWarning: cereal library not found. Package will be built "
msg += "without serialization (importing/exporting models) capabilities. "
msg += "In order to enable cereal, install package 'cycereal' and reinstall "
msg += "'isotree' by downloading the source files and running "
msg += "'python setup.py install'.\n"
warnings.warn(msg)
| 46.984127 | 130 | 0.568581 | 728 | 5,920 | 4.489011 | 0.340659 | 0.031212 | 0.03978 | 0.05202 | 0.174113 | 0.151163 | 0.108629 | 0.097001 | 0.097001 | 0.074357 | 0 | 0.012539 | 0.299493 | 5,920 | 125 | 131 | 47.36 | 0.7755 | 0.234291 | 0 | 0.181818 | 0 | 0.011364 | 0.282148 | 0.021618 | 0 | 0 | 0 | 0.008 | 0 | 1 | 0.011364 | false | 0 | 0.147727 | 0 | 0.170455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76ab9b88c9f7dc5c6592c990a9aea52a667da955 | 7,025 | py | Python | Mengascini-Spina/Sistemi-Digitali-M/Models/BaseModel.py | mattpoggi/SistemiDigitaliM20-21 | 202e520a571a2bb961851763f37e9293c3af400d | [
"MIT"
] | 9 | 2021-02-07T22:53:34.000Z | 2022-03-14T21:47:30.000Z | Mengascini-Spina/Sistemi-Digitali-M/Models/BaseModel.py | mattpoggi/SistemiDigitaliM20-21 | 202e520a571a2bb961851763f37e9293c3af400d | [
"MIT"
] | null | null | null | Mengascini-Spina/Sistemi-Digitali-M/Models/BaseModel.py | mattpoggi/SistemiDigitaliM20-21 | 202e520a571a2bb961851763f37e9293c3af400d | [
"MIT"
] | 18 | 2021-02-07T18:30:47.000Z | 2022-01-22T16:57:40.000Z | import time
from abc import ABC, abstractmethod
from contextlib import redirect_stdout
from pathlib import Path
from tensorflow.python.keras.backend import clear_session
from tensorflow.python.keras.models import Model
import tensorflow as tf
from Generators import DataGenerator
class BaseModel(ABC):
"""
Base class defining the endpoint to use to interact with a model
"""
def __init__(self, model_name: str, log_dir: Path = False, verbose: bool = True):
"""
:param model_name: name of the model, used for logging and saving it
:param log_dir: path of the dir in which to save the model and the tensorboard log
:param verbose: boolean indicating if it is necessary to print extensive information
in the console
"""
# check if the log folder is a valid folder
if log_dir is not None or log_dir != False:
assert (log_dir.is_dir())
self.logs = True
else:
self.logs = False
# the verbose parameter controls how many log info will be printed in the console
self.verbose = verbose
# save the time of creation of this class, it will help us to uniquelly identify this specific train run
self.str_time = time.strftime("%b%d%Y%H%M%S", time.gmtime())
# save the model name and the directory in which to save the Logs
self.name = model_name
self.checkpoint_path = None
if self.logs:
self.parent_log_dir = log_dir
# create the path of the log folder for this train run
self.log_dir = self.parent_log_dir / "models" / self.name / self.str_time
# create the log folder
if not self.log_dir.is_dir():
self.log_dir.mkdir(parents=True, exist_ok=True)
# tensorboard has its own log directory
self.tensorboard_log_dir = self.parent_log_dir / "tensorboard" / self.name / self.str_time
#set the path to use to save a checkpoint
self.checkpoint_path = self.log_dir / 'best_model.h5'
# generating a unique name for the model depending on the time of its creation
self.name_with_time = self.name + " " + self.str_time
@abstractmethod
def build_model(self, input_shape, output_shape) -> Model:
"""
Function in charge of defining the model structure
:param input_shape: tuple containing the shape of the data this model will recive as input
:param output_shape: tuple containing the shape of the output produced by this model
:return: Keras Sequential Model
"""
raise NotImplementedError
@property
@abstractmethod
def input_shape(self) -> tuple:
"""
This property returns the input shape of the model
:return: tuple
"""
raise NotImplementedError
@property
@abstractmethod
def output_shape(self) -> tuple:
"""
This property returns the output shape of the model
:return:
"""
raise NotImplementedError
def _get_callbacks(self) -> list:
"""
Function defining all the callbacks for the given model and returning them as a list.
In particular by default each model uses the following 3 callbacks
- early stopping -> to stop the train early if the model has not improved in the past 10 epochs
- checkpoint -> to save the model each time we find better weights
- tensorboard -> to save the model Logs and be able to confront the models
:return: list(keras.Callbacks)
"""
callbacks = []
if self.logs:
callbacks += [
tf.keras.callbacks.ModelCheckpoint(self.checkpoint_path, monitor='val_accuracy',
save_best_only=True,verbose=self.verbose),
tf.keras.callbacks.TensorBoard(log_dir=self.tensorboard_log_dir),
]
return callbacks
def _on_before_train(self):
"""
Set of actions to do right before the training phase
:return:
"""
self.training_start_time = time.time()
if self.verbose:
print("Model structure:")
print(self.model.summary())
print("The training phase of the model {} has started at:{}".format(self.name, self.training_start_time))
def _on_after_train(self):
"""
Set of actions to do right after the training phase
:return:
"""
self.training_time = time.time() - self.training_start_time
if self.verbose:
print("The model:{} has completed the training phase in: {}".format(self.name, self.training_time))
def train_model(self, training_data: DataGenerator, validation_data: DataGenerator, epochs: int, loss_function,
optimizer=None,
save_model: bool = False, save_summary: bool = True):
"""
Function in charge of training the model defined in the given class
:param training_data: DataGenerator class, generating the training data
:param validation_data: Datagenerator class, generating the validation data
:param optimizer: optimizer to use during training (tf.keras.optimizers.Adam(0.0001)),
:param loss_function: loss function to use
:param epochs: number of epochs to run
:param save_model: should the model be saved at the end of the training phase?
:param save_summary: save the summary of the model into the log folder
:return:
"""
if optimizer == None:
optimizer = tf.keras.optimizers.Adam(0.0001)
# get the structure of the model as defined by the build function
self.model = self.build_model(self.input_shape, self.output_shape)
# compile the model
self.model.compile(optimizer=optimizer, loss=loss_function, metrics=['accuracy'])
# save the summary of the model if required
if save_summary & self.logs:
with open(self.log_dir / 'summary.txt', 'w') as f:
with redirect_stdout(f):
self.model.summary()
# execute "on before train" operations
self._on_before_train()
# train the model
history = self.model.fit(training_data, steps_per_epoch=len(training_data), epochs=epochs,
validation_data=validation_data, validation_steps=len(validation_data),
callbacks=self._get_callbacks(), workers=4, shuffle=True)
# execute "on after train" operations
self._on_after_train()
model_path = None
# save the final model
if save_model & self.logs:
model_path = self.log_dir / "final-model.h5"
self.model.save(model_path)
if self.verbose:
print("Model saved: {}".format(model_path))
clear_session()
return history.history,model_path,self.checkpoint_path | 38.598901 | 117 | 0.636726 | 905 | 7,025 | 4.818785 | 0.236464 | 0.034854 | 0.016051 | 0.009631 | 0.208897 | 0.094015 | 0.045402 | 0.013758 | 0 | 0 | 0 | 0.003214 | 0.291246 | 7,025 | 182 | 118 | 38.598901 | 0.872665 | 0.364555 | 0 | 0.160494 | 0 | 0 | 0.055145 | 0 | 0 | 0 | 0 | 0 | 0.012346 | 1 | 0.098765 | false | 0 | 0.098765 | 0 | 0.234568 | 0.061728 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76ac32571cb948c9489cc7293946eee1deb0de5c | 834 | py | Python | scripts/create_erc20pool.py | mikaelaakko/staking-pool-ERC20-ERC721 | b5e3fe6c5c3051ba25a696afe28d01868c1069b9 | [
"MIT"
] | 1 | 2022-02-18T14:37:51.000Z | 2022-02-18T14:37:51.000Z | scripts/create_erc20pool.py | mikaelaakko/staking-pool-ERC20-ERC721 | b5e3fe6c5c3051ba25a696afe28d01868c1069b9 | [
"MIT"
] | null | null | null | scripts/create_erc20pool.py | mikaelaakko/staking-pool-ERC20-ERC721 | b5e3fe6c5c3051ba25a696afe28d01868c1069b9 | [
"MIT"
] | null | null | null | from brownie import (
StakingPoolFactory,
StripERC20,
WETH,
accounts,
network,
config,
)
from scripts.helper_functions import get_account
strip_address = "0x0Ff63FbbDEe379B4FDA592Ea869188643Ab4c478"
weth_address = "0x55eD4d3A07e41D446A4213C797057b10A53B9e79"
week_seconds = 604800
def deploy_factory(duration):
account = get_account()
strip = StripERC20.at(strip_address)
weth = WETH.at(weth_address)
factory = StakingPoolFactory[-1]
staking_factory_contract = factory.createERC20StakingPool(
strip, weth, duration, {"from": account}
)
print(f"Pool {staking_factory_contract} created!")
def main():
name = "Strip staking pool"
symbol = "SSP"
name_hex = "0x5374726970207374616B696E6720706F6F6C"
symbol_hex = "0x535350"
deploy_factory(week_seconds)
| 23.828571 | 62 | 0.731415 | 80 | 834 | 7.4125 | 0.5125 | 0.033727 | 0.05059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.154412 | 0.184652 | 834 | 34 | 63 | 24.529412 | 0.717647 | 0 | 0 | 0 | 0 | 0 | 0.233813 | 0.177458 | 0 | 0 | 0.155875 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.074074 | 0 | 0.148148 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76ad26393ad43403e265611493e5f79435924719 | 16,954 | py | Python | generator/generate.py | glynnc/liblcf | 301371de7d8e39f30c464ace355252b58beb71ee | [
"MIT"
] | null | null | null | generator/generate.py | glynnc/liblcf | 301371de7d8e39f30c464ace355252b58beb71ee | [
"MIT"
] | null | null | null | generator/generate.py | glynnc/liblcf | 301371de7d8e39f30c464ace355252b58beb71ee | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
import sys
import os
import re
gen_dir = os.path.dirname(os.path.abspath(__file__))
csv_dir = os.path.join(gen_dir, "csv")
tmpl_dir = os.path.join(gen_dir, "templates")
dest_dir = os.path.abspath(os.path.join(gen_dir, "..", "src", "generated"))
class Template(object):
def __init__(self, filename):
with open(os.path.join(tmpl_dir, filename), 'r') as f:
name = None
value = None
for line in f:
if line[0] == '@':
if name is not None:
setattr(self, name, value)
name = line[1:].rstrip('\r\n')
value = ''
else:
value += line
if name is not None:
setattr(self, name, value)
copy = Template('copyright.tmpl')
reader = Template('reader.tmpl')
ctor = Template('constructor.tmpl')
decl = Template('declaration.tmpl')
decl2 = Template('declaration.tmpl')
chunk = Template('chunks.tmpl')
freader = Template('flag_reader.tmpl')
decl2.enum_header = decl.enum2_header
decl2.enum_tmpl = decl.enum2_tmpl
decl2.enum_footer = decl.enum2_footer
cpp_types = {
'Boolean': 'bool',
'Double': 'double',
'Integer': 'int',
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int16': 'int16_t',
'String': 'std::string',
}
def flags_def(struct_name):
f = ['\t\t\tbool %s;\n' % name for name in flags[struct_name]]
return 'struct Flags {\n' + ''.join(f) + '\t\t}'
def cpp_type(ty, prefix = True, expand_flags = None):
if ty in cpp_types:
return cpp_types[ty]
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(1), prefix, expand_flags)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return 'std::vector<%s>' % cpp_type(m.group(2), prefix, expand_flags)
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return cpp_type(m.group(2), prefix, expand_flags)
m = re.match(r'Ref<(.*)>', ty)
if m:
return 'int'
m = re.match(r'Enum<(.*)>', ty)
if m:
return 'int'
m = re.match(r'(.*)_Flags$', ty)
if m:
if expand_flags:
return flags_def(expand_flags)
else:
ty = m.expand(r'\1::Flags')
if prefix:
ty = 'RPG::' + ty
return ty
if prefix:
ty = 'RPG::' + ty
return ty
int_types = {
'UInt8': 'uint8_t',
'UInt16': 'uint16_t',
'UInt32': 'uint32_t',
'Int16': 'int16_t',
}
def struct_headers(ty, header_map):
if ty == 'String':
return ['<string>']
if ty in int_types:
return ['"reader_types.h"']
if ty in cpp_types:
return []
m = re.match(r'Ref<(.*):(.*)>', ty)
if m:
return struct_headers(m.group(2), header_map)
if re.match(r'Ref<(.*)>', ty):
return []
if re.match(r'Enum<(.*)>', ty):
return []
if re.match(r'(.*)_Flags$', ty):
return []
m = re.match(r'Array<(.*):(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(1), header_map)
m = re.match(r'(Vector|Array)<(.*)>', ty)
if m:
return ['<vector>'] + struct_headers(m.group(2), header_map)
header = header_map.get(ty)
if header is not None:
return ['"rpg_%s.h"' % header]
if ty in ['Parameters', 'Equipment', 'EventCommand', 'MoveCommand', 'Rect', 'TreeMap']:
return ['"rpg_%s.h"' % ty.lower()]
return []
def get_structs(filename = 'structs.csv'):
result = []
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
filetype, structname, hasid = data
hasid = bool(int(hasid)) if hasid else None
filename = structname.lower()
result.append((filetype, filename, structname, hasid))
return result
def get_fields(filename = 'fields.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',', 6)
struct, fname, issize, ftype, code, dfl, comment = data
issize = issize.lower() == 't'
code = int(code, 16) if code else None
if struct not in result:
result[struct] = []
result[struct].append((fname, issize, ftype, code, dfl, comment))
return result
def get_enums(filename = 'enums.csv'):
enums = {}
fields = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
sname, ename, name, num = data
num = int(num)
if (sname, ename) not in fields:
if sname not in enums:
enums[sname] = []
enums[sname].append(ename)
fields[sname, ename] = []
fields[sname, ename].append((name, num))
return enums, fields
def get_flags(filename = 'flags.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
struct, fname = data
if struct not in result:
result[struct] = []
result[struct].append(fname)
return result
def get_setup(filename = 'setup.csv'):
result = {}
with open(os.path.join(csv_dir, filename), 'r') as f:
for line in f:
sline = line.strip()
if not sline:
continue
if sline.startswith("#"):
continue
data = sline.split(',')
struct, method, headers = data
headers = headers.split(' ') if headers else []
if struct not in result:
result[struct] = []
result[struct].append((method, headers))
return result
def get_headers(structs, sfields, setup):
header_map = dict([(struct_name, filename)
for filetype, filename, struct_name, hasid in structs])
result = {}
for filetype, filename, struct_name, hasid in structs:
if struct_name not in sfields:
continue
headers = set()
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
headers.update(struct_headers(ftype, header_map))
if struct_name in setup:
for method, hdrs in setup[struct_name]:
headers.update(hdrs)
result[struct_name] = sorted(x for x in headers if x[0] == '<') + sorted(x for x in headers if x[0] == '"')
return result
def write_enums(sname, f):
for ename in enums[sname]:
dcl = decl2 if (sname, ename) in [('MoveCommand','Code'),('EventCommand','Code')] else decl
evars = dict(ename = ename)
f.write(dcl.enum_header % evars)
ef = efields[sname, ename]
n = len(ef)
for i, (name, num) in enumerate(ef):
comma = '' if i == n - 1 else ','
vars = dict(ename = ename,
name = name,
num = num,
comma = comma)
f.write(dcl.enum_tmpl % vars)
f.write(dcl.enum_footer % evars)
f.write('\n')
def write_setup(sname, f):
for method, headers in setup[sname]:
f.write('\t\t%s;\n' % method)
def generate_reader(f, struct_name, vars):
f.write(copy.header)
f.write(reader.header % vars)
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
fvars = dict(
ftype = cpp_type(ftype),
fname = fname)
if issize:
f.write(reader.size_tmpl % fvars)
else:
f.write(reader.typed_tmpl % fvars)
f.write(reader.footer % vars)
def write_flags(f, sname, fname):
for name in flags[sname]:
fvars = dict(
fname = fname,
name = name)
f.write(ctor.flags % fvars)
def generate_ctor(f, struct_name, hasid, vars):
f.write(copy.header)
f.write(ctor.header % vars)
if hasid:
f.write(ctor.tmpl % dict(fname = 'ID', default = '0'))
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
if ftype.endswith('_Flags'):
write_flags(f, struct_name, fname)
continue
if dfl == '':
continue
if ftype.startswith('Vector'):
continue
if ftype.startswith('Array'):
continue
if ftype == 'Boolean':
dfl = dfl.lower()
elif ftype == 'String':
dfl = '"' + dfl[1:-1] + '"'
if '|' in dfl:
# dfl = re.sub(r'(.*)\|(.*)', r'\1', dfl)
dfl = -1
fvars = dict(
fname = fname,
default = dfl)
f.write(ctor.tmpl % fvars)
if struct_name in setup and any('Init()' in method
for method, hdrs in setup[struct_name]):
f.write('\n\tInit();\n')
f.write(ctor.footer % vars)
def needs_ctor(struct_name, hasid):
if hasid:
return True
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
if ftype.endswith('_Flags'):
return True
if dfl != '':
return True
return False
def generate_header(f, struct_name, hasid, vars):
f.write(copy.header)
f.write(decl.header1 % vars)
if headers[struct_name]:
f.write(decl.header2)
for header in headers[struct_name]:
f.write(decl.header_tmpl % dict(header = header))
f.write(decl.header3 % vars)
if struct_name in enums:
write_enums(struct_name, f)
needs_blank = False
if needs_ctor(struct_name, hasid):
f.write(decl.ctor % vars)
needs_blank = True
if struct_name in setup:
write_setup(struct_name, f)
needs_blank = True
if needs_blank:
f.write('\n')
if hasid:
f.write(decl.tmpl % dict(ftype = 'int', fname = 'ID'))
for field in sfields[struct_name]:
fname, issize, ftype, code, dfl, comment = field
if not ftype:
continue
if issize:
continue
fvars = dict(
ftype = cpp_type(ftype, False, struct_name),
fname = fname)
f.write(decl.tmpl % fvars)
f.write(decl.footer % vars)
def generate_chunks(f, struct_name, vars):
f.write(chunk.header % vars)
mwidth = max(len(field[0] + ('_size' if field[1] else '')) for field in sfields[struct_name]) + 1
mwidth = (mwidth + 3) // 4 * 4
# print struct_name, mwidth
sf = sfields[struct_name]
n = len(sf)
for i, field in enumerate(sf):
fname, issize, ftype, code, dfl, comment = field
if issize:
fname += '_size'
pad = mwidth - len(fname)
ntabs = (pad + 3) // 4
tabs = '\t' * ntabs
comma = ' ' if i == n - 1 else ','
fvars = dict(
fname = fname,
tabs = tabs,
code = code,
comma = comma,
comment = comment)
f.write(chunk.tmpl % fvars)
f.write(chunk.footer % vars)
def generate_struct(filetype, filename, struct_name, hasid):
if struct_name not in sfields:
return
vars = dict(
filetype = filetype,
filename = filename,
typeupper = filetype.upper(),
structname = struct_name,
structupper = struct_name.upper(),
idtype = ['NoID','WithID'][hasid])
filepath = os.path.join(dest_dir, '%s_%s.cpp' % (filetype, filename))
with open(filepath, 'w') as f:
generate_reader(f, struct_name, vars)
if needs_ctor(struct_name, hasid):
filepath = os.path.join(dest_dir, 'rpg_%s.cpp' % filename)
with open(filepath, 'w') as f:
generate_ctor(f, struct_name, hasid, vars)
filepath = os.path.join(dest_dir, 'rpg_%s.h' % filename)
with open(filepath, 'w') as f:
generate_header(f, struct_name, hasid, vars)
filepath = os.path.join(dest_dir, '%s_chunks.h' % filetype)
with open(filepath, 'a') as f:
generate_chunks(f, struct_name, vars)
def generate_rawstruct(filename, struct_name):
vars = dict(
filename = filename,
structname = struct_name,
structupper = struct_name.upper())
if needs_ctor(struct_name, False):
filepath = os.path.join(dest_dir, 'rpg_%s.cpp' % filename)
with open(filepath, 'w') as f:
generate_ctor(f, struct_name, False, vars)
filepath = os.path.join(dest_dir, 'rpg_%s.h' % filename)
with open(filepath, 'w') as f:
generate_header(f, struct_name, False, vars)
def generate_flags(filetype, filename, struct_name):
maxsize = (len(flags[struct_name]) + 7) // 8
maxwidth = max(len(fname) for fname in flags[struct_name])
maxwidth = (maxwidth + 2 + 3) // 4 * 4
vars = dict(
filetype = filetype,
filename = filename,
structname = struct_name,
structupper = struct_name.upper(),
maxsize = maxsize
)
filepath = os.path.join(dest_dir, '%s_%s_flags.cpp' % (filetype, filename))
with open(filepath, 'w') as f:
f.write(copy.header)
f.write(freader.header % vars)
for fname in flags[struct_name]:
width = len(fname)
pad1 = maxwidth - width - 2
tabs1 = (pad1 + 3) // 4
pad2 = maxwidth - width - 2
tabs2 = (pad2 + 3) // 4
fvars = dict(
fname = fname,
pad1 = '\t' * tabs1,
pad2 = '\t' * tabs2)
f.write(freader.tmpl % fvars)
f.write(freader.footer % vars)
def generate():
for filetype in ['ldb','lmt','lmu','lsd']:
vars = dict(
filetype = filetype,
typeupper = filetype.upper())
filepath = os.path.join(dest_dir, '%s_chunks.h' % filetype)
with open(filepath, 'w') as f:
f.write(copy.header)
f.write(chunk.file_header % vars)
for filetype, filename, struct_name, hasid in structs:
if hasid is not None:
generate_struct(filetype, filename, struct_name, hasid)
else:
generate_rawstruct(filename, struct_name)
if struct_name in flags:
generate_flags(filetype, filename, struct_name)
for filetype in ['ldb','lmt','lmu','lsd']:
filepath = os.path.join(dest_dir, '%s_chunks.h' % filetype)
with open(filepath, 'a') as f:
f.write(chunk.file_footer)
def list_files_struct(filetype, filename, struct_name, hasid):
if struct_name not in sfields:
return
print('%s_%s.cpp' % (filetype, filename))
if needs_ctor(struct_name, hasid):
print('rpg_%s.cpp' % filename)
print('rpg_%s.h' % filename)
def list_files_rawstruct(filename, struct_name):
if needs_ctor(struct_name, False):
print('rpg_%s.cpp' % filename)
print('rpg_%s.h' % filename)
def list_files_flags(filetype, filename, struct_name):
print('%s_%s_flags.cpp' % (filetype, filename))
def list_files():
for filetype in ['ldb','lmt','lmu','lsd']:
print('%s_chunks.h' % filetype)
for filetype, filename, struct_name, hasid in structs:
if hasid is not None:
list_files_struct(filetype, filename, struct_name, hasid)
else:
list_files_rawstruct(filename, struct_name)
if struct_name in flags:
list_files_flags(filetype, filename, struct_name)
def main(argv):
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
global structs, sfields, enums, efields, flags, setup, headers
structs = get_structs()
sfields = get_fields()
enums, efields = get_enums()
flags = get_flags()
setup = get_setup()
headers = get_headers(structs, sfields, setup)
if argv[1:] == ['-l']:
list_files()
else:
generate()
if __name__ == '__main__':
main(sys.argv)
| 31.051282 | 115 | 0.548543 | 2,112 | 16,954 | 4.283144 | 0.100852 | 0.077382 | 0.019898 | 0.03449 | 0.561132 | 0.522773 | 0.4392 | 0.37652 | 0.354742 | 0.298032 | 0 | 0.007884 | 0.319158 | 16,954 | 545 | 116 | 31.108257 | 0.775795 | 0.005073 | 0 | 0.474684 | 0 | 0 | 0.064157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056962 | false | 0 | 0.008439 | 0 | 0.139241 | 0.014768 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76af6ec2112ed2e11ec23b213a1caad36b51f515 | 4,597 | py | Python | application.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 9 | 2020-03-30T08:27:57.000Z | 2020-04-11T12:37:28.000Z | application.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 145 | 2020-03-25T20:41:24.000Z | 2020-04-15T17:39:10.000Z | application.py | VMS19/Inhalator | 77ff3f063efa48e825d1c5ef648203b2d70b753e | [
"MIT"
] | 4 | 2020-03-22T09:57:27.000Z | 2020-04-15T18:10:48.000Z | import os
import time
from uptime import uptime
from tkinter import Tk
from data.configurations import ConfigurationManager
from graphics.panes import MasterFrame
from graphics.themes import Theme
from graphics.calibrate.screen import calc_calibration_line
from graphics.constants import SCREEN_WIDTH, SCREEN_HEIGHT
from graphics.snackbar.default_config_snackbar import DefaultConfigSnackbar
class Application(object):
"""The Inhalator application"""
TEXT_SIZE = 10
HARDWARE_SAMPLE_RATE = 33 # HZ
__instance = None # shared instance
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
@classmethod
def instance(cls):
return cls.__instance
def __init__(self, measurements, events, arm_wd_event, drivers, sampler,
simulation=False, fps=10, sample_rate=70, record_sensors=False):
self.should_run = True
self.drivers = drivers
self.arm_wd_event = arm_wd_event
self.sampler = sampler
self.simulation = simulation
self.events = events
self.frame_interval = 1 / fps
self.sample_interval = 1 / sample_rate
self.last_sample_update_ts = 0
self.last_gui_update_ts = 0
self.root = Tk()
self.theme = Theme.choose_theme() # TODO: Make this configurable
self.root.protocol("WM_DELETE_WINDOW", self.exit) # Catches Alt-F4
self.root.title("Inhalator")
self.root.geometry(f'{SCREEN_WIDTH}x{SCREEN_HEIGHT}')
if os.uname()[1] == 'raspberrypi':
# on production we don't want to see the ugly cursor
self.root.config(cursor="none")
# We want fullscreen only for the raspberry-pi
self.root.attributes("-fullscreen", True)
self.master_frame = MasterFrame(self.root,
measurements=measurements,
events=events,
drivers=drivers,
record_sensors=record_sensors)
self.config = ConfigurationManager.config()
if ConfigurationManager.loaded_from_defaults:
DefaultConfigSnackbar(self.root).show()
# Load sensors calibrations
differential_pressure_driver = self.drivers.differential_pressure
differential_pressure_driver.set_calibration_offset(self.config.calibration.dp_offset)
oxygen_driver = self.drivers.a2d
oxygen_driver.set_oxygen_calibration(
*calc_calibration_line(
self.config.calibration.oxygen_point1,
self.config.calibration.oxygen_point2))
def exit(self):
self.root.quit()
self.should_run = False
def render(self):
self.master_frame.render()
self.events.alerts_queue.initial_uptime = uptime()
def gui_update(self):
self.root.update()
self.root.update_idletasks()
self.master_frame.update()
def sample(self):
self.sampler.sampling_iteration()
@property
def next_render(self):
return self.frame_interval - (time.time() - self.last_gui_update_ts)
@property
def next_sample(self):
return self.sample_interval - (time.time() - self.last_sample_update_ts)
def run(self):
self.render()
while self.should_run:
try:
time_now = time.time()
if (time_now - self.last_gui_update_ts) >= self.frame_interval:
self.gui_update()
self.last_gui_update_ts = time_now
if (time_now - self.last_sample_update_ts) >= self.sample_interval:
self.sample()
self.last_sample_update_ts = time_now
self.arm_wd_event.set()
except KeyboardInterrupt:
break
self.exit()
def run_iterations(self, max_iterations, fast_forward=True, render=True):
if render:
self.render()
for _ in range(max_iterations):
try:
if self.next_sample > 0 and not fast_forward:
time.sleep(max(self.next_sample, 0))
self.sample()
self.last_sample_update_ts = time.time()
if self.next_render <= 0:
self.gui_update()
self.last_gui_update_ts = time.time()
self.arm_wd_event.set()
except KeyboardInterrupt:
break
| 34.56391 | 94 | 0.614096 | 515 | 4,597 | 5.221359 | 0.300971 | 0.032726 | 0.018594 | 0.037189 | 0.151357 | 0.087021 | 0.087021 | 0.087021 | 0.026776 | 0 | 0 | 0.006268 | 0.305852 | 4,597 | 132 | 95 | 34.825758 | 0.836415 | 0.0459 | 0 | 0.173077 | 0 | 0 | 0.018519 | 0.006859 | 0 | 0 | 0 | 0.007576 | 0 | 1 | 0.105769 | false | 0 | 0.096154 | 0.028846 | 0.278846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b0222ce19734afd389fa8ad840fc2aee9b9b1f | 440 | py | Python | functions/save.py | notsys/minecraft-checker | da91ac6c8b7c8f7e1a7843dba87ad056c88c37de | [
"MIT"
] | null | null | null | functions/save.py | notsys/minecraft-checker | da91ac6c8b7c8f7e1a7843dba87ad056c88c37de | [
"MIT"
] | null | null | null | functions/save.py | notsys/minecraft-checker | da91ac6c8b7c8f7e1a7843dba87ad056c88c37de | [
"MIT"
] | null | null | null | from tabulate import tabulate
import os
import functions.menu as menu
def save(y,available,blocked,upcoming,taken):
menu.menu()
print(f'{taken} taken | {available} available | {blocked} blocked | {upcoming} upcoming\n')
headers = [f'{available} available', f'{blocked} blocked', f'{upcoming} upcoming']
r = tabulate(y, headers=headers,tablefmt="psql")
f = open("result.txt", "w")
f.write(r)
f.close() | 29.333333 | 95 | 0.661364 | 58 | 440 | 5.017241 | 0.465517 | 0.09622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.184091 | 440 | 15 | 96 | 29.333333 | 0.810585 | 0 | 0 | 0 | 0 | 0.090909 | 0.346939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.363636 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b032c0e71e5f826f28b65e0f1e154015953c98 | 547 | py | Python | samples/s9.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | 3 | 2019-05-04T12:19:09.000Z | 2019-08-30T07:12:31.000Z | samples/s9.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | null | null | null | samples/s9.py | AndreiHondrari/python_exploration | cb4ac0b92ddc48c322201ba31cd6e7c5ee6af06d | [
"MIT"
] | null | null | null | # prng sequence guessing
from collections import namedtuple
N = input()
sequences = {}
TsPair = namedtuple("TimestampsPair", ['t1', 't2'])
if N < 10:
for i in range(N):
timestamps = raw_input()
timestamps = map(int, timestamps.split(' '))
timestamps = TsPair(t1=timestamps[0], t2=timestamps[1])
if timestamps[0] - timestamps[1] <= 10**6:
numbers = []
for j in range(2):
numbers.append(input())
sequences[timestamps] = numbers
print(sequences)
| 19.535714 | 63 | 0.572212 | 60 | 547 | 5.2 | 0.55 | 0.089744 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036269 | 0.294333 | 547 | 27 | 64 | 20.259259 | 0.772021 | 0.040219 | 0 | 0 | 0 | 0 | 0.036468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b107c2aa48ea70becd8b93119d8cbb71c32586 | 3,268 | py | Python | tests/tracking/default_experiment/test_databricks_notebook_experiment_provider.py | adamreeve/mlflow | d0d307f7f7b49f013727191a672ae2139bf37343 | [
"Apache-2.0"
] | 1 | 2022-01-11T02:51:17.000Z | 2022-01-11T02:51:17.000Z | tests/tracking/default_experiment/test_databricks_notebook_experiment_provider.py | adamreeve/mlflow | d0d307f7f7b49f013727191a672ae2139bf37343 | [
"Apache-2.0"
] | null | null | null | tests/tracking/default_experiment/test_databricks_notebook_experiment_provider.py | adamreeve/mlflow | d0d307f7f7b49f013727191a672ae2139bf37343 | [
"Apache-2.0"
] | null | null | null | from unittest import mock
from mlflow.exceptions import MlflowException
from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE
from mlflow.tracking import MlflowClient
from mlflow.tracking.default_experiment.databricks_notebook_experiment_provider import (
DatabricksNotebookExperimentProvider,
DatabricksRepoNotebookExperimentProvider,
)
from mlflow.utils.mlflow_tags import MLFLOW_EXPERIMENT_SOURCE_TYPE, MLFLOW_EXPERIMENT_SOURCE_ID
def test_databricks_notebook_default_experiment_in_context():
with mock.patch("mlflow.utils.databricks_utils.is_in_databricks_notebook") as in_notebook_mock:
assert DatabricksNotebookExperimentProvider().in_context() == in_notebook_mock.return_value
def test_databricks_notebook_default_experiment_id():
with mock.patch("mlflow.utils.databricks_utils.get_notebook_id") as patch_notebook_id:
assert (
DatabricksNotebookExperimentProvider().get_experiment_id()
== patch_notebook_id.return_value
)
def test_databricks_repo_notebook_default_experiment_in_context():
with mock.patch(
"mlflow.utils.databricks_utils.is_in_databricks_repo_notebook"
) as in_repo_notebook_mock:
in_repo_notebook_mock.return_value = True
assert DatabricksRepoNotebookExperimentProvider().in_context()
with mock.patch(
"mlflow.utils.databricks_utils.is_in_databricks_repo_notebook"
) as not_in_repo_notebook_mock:
not_in_repo_notebook_mock.return_value = False
assert not DatabricksRepoNotebookExperimentProvider().in_context()
def test_databricks_repo_notebook_default_experiment_gets_id_by_request():
with mock.patch(
"mlflow.utils.databricks_utils.get_notebook_id"
) as notebook_id_mock, mock.patch(
"mlflow.utils.databricks_utils.get_notebook_path"
) as notebook_path_mock, mock.patch.object(
MlflowClient, "create_experiment"
) as create_experiment_mock:
notebook_id_mock.return_value = 1234
notebook_path_mock.return_value = "/Repos/path"
create_experiment_mock.return_value = "experiment_id"
returned_id = DatabricksRepoNotebookExperimentProvider().get_experiment_id()
assert returned_id == "experiment_id"
tags = {MLFLOW_EXPERIMENT_SOURCE_TYPE: "REPO_NOTEBOOK", MLFLOW_EXPERIMENT_SOURCE_ID: 1234}
create_experiment_mock.assert_called_once_with("/Repos/path", None, tags)
def test_databricks_repo_notebook_default_experiment_uses_fallback_notebook_id():
with mock.patch(
"mlflow.utils.databricks_utils.get_notebook_id"
) as notebook_id_mock, mock.patch(
"mlflow.utils.databricks_utils.get_notebook_path"
) as notebook_path_mock, mock.patch.object(
MlflowClient, "create_experiment"
) as create_experiment_mock:
DatabricksRepoNotebookExperimentProvider._resolved_repo_notebook_experiment_id = None
notebook_id_mock.return_value = 1234
notebook_path_mock.return_value = "/Repos/path"
create_experiment_mock.side_effect = MlflowException(
message="not enabled", error_code=INVALID_PARAMETER_VALUE
)
returned_id = DatabricksRepoNotebookExperimentProvider().get_experiment_id()
assert returned_id == 1234
| 45.388889 | 99 | 0.779988 | 374 | 3,268 | 6.355615 | 0.165775 | 0.055532 | 0.050484 | 0.067312 | 0.56037 | 0.541018 | 0.491796 | 0.444257 | 0.444257 | 0.376104 | 0 | 0.006151 | 0.154223 | 3,268 | 71 | 100 | 46.028169 | 0.853835 | 0 | 0 | 0.4 | 0 | 0 | 0.159425 | 0.123623 | 0 | 0 | 0 | 0 | 0.116667 | 1 | 0.083333 | false | 0 | 0.1 | 0 | 0.183333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b258efcb378ef005eb36ab0863fee39f30dbca | 873 | py | Python | pullfaces.py | dinuka-rp/Python-Face_recognition | 8e5d39b54d979868a6a6cf4c2b71e10b8dadd181 | [
"MIT"
] | 1 | 2019-10-23T06:33:11.000Z | 2019-10-23T06:33:11.000Z | pullfaces.py | dinuka-rp/Python-Face_recognition | 8e5d39b54d979868a6a6cf4c2b71e10b8dadd181 | [
"MIT"
] | 2 | 2021-06-08T20:27:35.000Z | 2021-09-08T01:22:09.000Z | pullfaces.py | DinDev3/Python-Face_recognition | 337f17f85173fda3a5d91896ddc5be70b33ed2fc | [
"MIT"
] | null | null | null | # This program identifies multiple faces in an image , displays and saves them
from PIL import Image #implementing the Pillow library (Imaging library)
import face_recognition
image = face_recognition.load_image_file('./img/groups/team.jpg')
face_locations = face_recognition.face_locations(image) #get locations of faces in image
for face_location in face_locations:
top, right, bottom, left = face_location #assigning co-ordinates of one face location to seperate variables
face_image = image[top:bottom,left:right] #gives a face image in a form of an array
pil_image = Image.fromarray(face_image)
# pil_image.show() #to display the identified faces in an image
pil_image.save(f'{top}.jpg') #saving the faces identified in an image with the top co-ordinate of the face given as the file name | 54.5625 | 144 | 0.729668 | 130 | 873 | 4.784615 | 0.461538 | 0.033762 | 0.043408 | 0.045016 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.208477 | 873 | 16 | 144 | 54.5625 | 0.900145 | 0.5063 | 0 | 0 | 0 | 0 | 0.070922 | 0.049645 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b677133fcbae61e2dea9e942a4312c344c21e4 | 8,692 | py | Python | models.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | 1 | 2020-07-25T11:27:17.000Z | 2020-07-25T11:27:17.000Z | models.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | null | null | null | models.py | efratkohen/Project | d95d20a1be8fe0e0918b3e699c640f36704639f8 | [
"MIT"
] | null | null | null | from enum import Enum
from keras import backend as K, Sequential, Input, Model
from keras.callbacks import EarlyStopping, ModelCheckpoint, TerminateOnNaN
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import (
Permute,
Dense,
multiply,
LSTM,
Bidirectional,
Conv1D,
MaxPooling1D,
Flatten,
TimeDistributed,
RepeatVector,
Dropout,
GRU,
AveragePooling1D,
)
from matplotlib import pyplot, pyplot as plt
import enum
class ModelType(enum.Enum):
SIMPLE_LSTM = 1
STACKED_LSTM = 2
BIDRECTIONAL_LSTM = 3
CNN = 4
CNN_LSTM = 5
LSTM_AUTOENCODER = 6
DEEP_CNN = 7
GRU = 8
GRU_CNN = 9
def attention_block(inputs, time_steps):
x = Permute((2, 1))(inputs)
x = Dense(time_steps, activation="softmax")(x)
x = Permute((2, 1), name="attention_prob")(x)
x = multiply([inputs, x])
return x
def get_activation(model, layer_name, inputs):
layer = [l for l in model.layers if l.name == layer_name][0]
func = K.function([model.input], [layer.output])
return func([inputs])[0]
def make_model(
model_type, Xtrain, Ytrain, opt="adam", loss_func="mse", summary=False, binary=False
):
if binary:
LAST_ACTIVATION = "sigmoid"
else:
LAST_ACTIVATION = "linear"
print(model_type)
if model_type is ModelType.SIMPLE_LSTM:
print(model_type)
# Single cell LSTM
model = Sequential()
model.add(
LSTM(
units=100,
activation="relu",
name="first_lstm",
recurrent_dropout=0.1,
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(Dense(1, activation=LAST_ACTIVATION))
elif model_type is ModelType.STACKED_LSTM:
# Stacked LSTM
model = Sequential()
model.add(
LSTM(
100,
activation="relu",
return_sequences=True,
recurrent_dropout=0.1,
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(
LSTM(50, activation="relu", return_sequences=True, recurrent_dropout=0.1)
)
model.add(LSTM(30, activation="relu", recurrent_dropout=0.2))
model.add(Dense(1, activation=LAST_ACTIVATION))
elif model_type is ModelType.BIDRECTIONAL_LSTM:
# Bidirectional LSTM
model = Sequential()
model.add(Bidirectional(LSTM(100, return_sequences=True, activation="relu")))
model.add(Bidirectional(LSTM(50, return_sequences=True, activation="relu")))
model.add(Bidirectional(LSTM(20, activation="relu")))
model.add(Dense(1, activation=LAST_ACTIVATION))
elif model_type is ModelType.CNN:
model = Sequential()
model.add(
Conv1D(
filters=128,
kernel_size=2,
activation="relu",
name="extractor",
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Conv1D(filters=64, kernel_size=2, activation="relu"))
model.add(Dropout(0.5))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(50, activation="relu"))
model.add(Dense(1, activation=LAST_ACTIVATION))
elif model_type is ModelType.CNN_LSTM:
model = Sequential()
model.add(
Conv1D(
filters=256,
kernel_size=5,
padding="same",
activation="relu",
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(Conv1D(filters=256, kernel_size=5, padding="same", activation="relu"))
model.add(MaxPooling1D(pool_size=4))
model.add(Conv1D(filters=256, kernel_size=5, padding="same", activation="relu"))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(100))
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dense(1, activation=LAST_ACTIVATION))
elif model_type is ModelType.LSTM_AUTOENCODER:
model = Sequential()
model.add(
Conv1D(
filters=128,
kernel_size=2,
activation="relu",
name="extractor",
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(Dropout(0.3))
model.add(MaxPooling1D(pool_size=2))
model.add(
Bidirectional(
LSTM(
50,
activation="relu",
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
)
model.add(RepeatVector(10))
model.add(Bidirectional(LSTM(50, activation="relu")))
model.add(Dense(1))
elif model_type is ModelType.DEEP_CNN:
model = Sequential()
model.add(
TimeDistributed(
Conv1D(filters=64, kernel_size=2, activation="relu"),
input_shape=(None, Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(TimeDistributed(Conv1D(filters=64, kernel_size=2, activation="relu")))
model.add(TimeDistributed(Dropout(0.5)))
model.add(TimeDistributed(MaxPooling1D(pool_size=2)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(100))
model.add(Dropout(0.5))
model.add(Dense(100, activation="relu"))
model.add(Dense(1, activation="softmax"))
elif model_type is ModelType.GRU:
model = Sequential()
model.add(
GRU(
75,
return_sequences=True,
input_shape=(Xtrain.shape[1], Xtrain.shape[2]),
)
)
model.add(GRU(units=30, return_sequences=True))
model.add(GRU(units=30))
model.add(Dense(units=1, activation=LAST_ACTIVATION))
elif model_type is ModelType.GRU_CNN:
inp_seq = Input(shape=(Xtrain.shape[1], Xtrain.shape[2]))
x = Bidirectional(GRU(100, return_sequences=True))(inp_seq)
x = AveragePooling1D(2)(x)
x = Conv1D(100, 3, activation="relu", padding="same", name="extractor")(x)
x = Flatten()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.5)(x)
out = Dense(1, activation=LAST_ACTIVATION)(x)
model = Model(inp_seq, out)
else:
print("ERROR ", model_type)
return None
model.compile(loss=loss_func, optimizer=opt)
# fit network
if summary:
model.summary()
return model
def fit(
Xtrain,
Ytrain,
Xtest,
Ytest,
model,
epochs=20,
batch_size=32,
graph=False,
binary=False,
pos_weight=1,
verbose=1,
):
min_es = EarlyStopping(monitor="val_loss", mode="min", verbose=1, patience=10)
checkpoint_es = ModelCheckpoint(
filepath="C:\\Users\\nitza\\Local\\WWTP\\models\\model.{epoch:02d}-{val_loss:.5f}.h5"
)
nan_es = TerminateOnNaN()
history = model.fit(
Xtrain,
Ytrain,
epochs=epochs,
batch_size=batch_size,
validation_data=(Xtest, Ytest),
verbose=verbose,
shuffle=True,
use_multiprocessing=False,
callbacks=[nan_es, min_es],
)
# plot history
if graph:
pyplot.plot(history.history["loss"], label="train")
pyplot.plot(history.history["val_loss"], label="test")
plt.legend()
plt.show()
return model
def evaluate(model, Xtest, Ytest, scalers, binary=False):
Yhat = model.predict(Xtest)
if not binary:
Yhat = scalers[-1].inverse_transform(Yhat)
Ytest = scalers[-1].inverse_transform(Ytest)
return Yhat, Ytest
def var_importance(names, model, X, Y, size, minimum):
res = dict()
orig_out = model.predict(X)
for i in range(min(size, len(names))):
new_x = X.copy()
perturbation = np.random.normal(0.0, 0.2, size=new_x.shape[:2])
new_x[:, :, i] = new_x[:, :, i] + perturbation
perturbed_out = model.predict(new_x)
f1_orig = metrics.calc_rmse(Y, orig_out, graph=False)
f1_pertubed = metrics.calc_rmse(Y, perturbed_out, graph=False)
effect = f1_orig - f1_pertubed
effect = -effect
res[names[i]] = effect
print(f"Variable {names[i]}, perturbation effect: {effect:.4f}")
return res
var_table = var_importance(
mdfs[-1].columns[:], model, Xtrain, Ytrain, Xtrain.shape[2] - 1, 0.002
)
| 29.665529 | 93 | 0.580994 | 1,016 | 8,692 | 4.855315 | 0.200787 | 0.077843 | 0.023718 | 0.044598 | 0.443341 | 0.412933 | 0.385364 | 0.362254 | 0.342996 | 0.29252 | 0 | 0.033442 | 0.294754 | 8,692 | 292 | 94 | 29.767123 | 0.771289 | 0.008399 | 0 | 0.301587 | 0 | 0.003968 | 0.040757 | 0.008593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.039683 | 0 | 0.130952 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76b732fabc69f97a04fe1ec88757dc0068f33d8f | 1,076 | py | Python | examples/run.py | haloship/rec-sys-dynamics | 886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5 | [
"MIT"
] | null | null | null | examples/run.py | haloship/rec-sys-dynamics | 886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5 | [
"MIT"
] | null | null | null | examples/run.py | haloship/rec-sys-dynamics | 886095eca8c71cc2f30d64f0b1da9a0a8f2f37f5 | [
"MIT"
] | null | null | null | import path_resolver
import argparse
from src.analysis.cluster import movielens, cluster, analysis, post_process
from src.analysis.simulate import simulate
parser = argparse.ArgumentParser(
description="Example script to replicate results obtained on user dynamics in recommender systems"
)
parser.add_argument('algo', help= "Name of algorithm: 'ease', 'cosin', or 'mf'")
parser.add_argument('dataset', help= "Name of dataset: \
'All_Neutral',\
'1_Biased_Communities_Control', \
'2_Biased_Communities_Control', \
'Biased_Neutral_Control'")
args = parser.parse_args()
# FOR All_Neutral
run = simulate(str(args.algo), str(args.dataset))
run_output = run.run_dynamics(n_i=10, n_u=0, n_r=30, steps=5, n_clusters = 2)
# save the plot_counts() and plot_percent pngs
analyse = analysis(run_output[1])
analyse.rename_cluster(1,1000)
analyse.plot_counts(show=False, loc=run.run_name+'/counts.png')
analyse.plot_percent(show=False, loc=run.run_name+'/percent.png') | 39.851852 | 102 | 0.695167 | 144 | 1,076 | 4.993056 | 0.527778 | 0.025035 | 0.041725 | 0.041725 | 0.061196 | 0.061196 | 0 | 0 | 0 | 0 | 0 | 0.017261 | 0.192379 | 1,076 | 27 | 103 | 39.851852 | 0.810127 | 0.055762 | 0 | 0 | 0 | 0 | 0.246548 | 0.076923 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76ba2a92260fc79d4f8b4eedd73dd3c7c4ae63ad | 2,404 | py | Python | src/scite/scite/scripts/commandsdoc.py | segafan/wme1_jankavan_tlc_edition-repo | 72163931f348d5a2132577930362d297cc375a26 | [
"MIT"
] | 3 | 2021-03-28T00:11:48.000Z | 2022-01-12T13:10:52.000Z | src/scite/scite/scripts/commandsdoc.py | segafan/wme1_jankavan_tlc_edition-repo | 72163931f348d5a2132577930362d297cc375a26 | [
"MIT"
] | null | null | null | src/scite/scite/scripts/commandsdoc.py | segafan/wme1_jankavan_tlc_edition-repo | 72163931f348d5a2132577930362d297cc375a26 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import with_statement
import os, sys
scintillaDirectory = os.path.join("..", "..", "scintilla", "include")
sys.path.append(scintillaDirectory)
import Face
def cell(s):
return "<td>%s</td>" % s
def faceFeatures(out):
out.write("<h2>Scintilla key commands</h2>\n")
out.write("<table>\n")
out.write("<thead>%s%s%s</thead>\n" % (cell("Command"), cell("Name"), cell("Explanation")))
face = Face.Face()
face.ReadFromFile(os.path.join(scintillaDirectory, "Scintilla.iface"))
texts = []
for name in face.features:
#~ print name
f = face.features[name]
if f["FeatureType"] == "fun" and \
f["ReturnType"] == "void" and \
not (f["Param1Type"] or f["Param2Type"]):
texts.append([name, f["Value"], " ".join(f["Comment"])])
texts.sort()
for t in texts:
out.write("<tr>%s%s%s</tr>\n" % (cell(t[1]), cell(t[0]), cell(t[2])))
out.write("</table>\n")
def menuFeatures(out):
out.write("<h2>SciTE menu commands</h2>\n")
out.write("<table>\n")
out.write("<thead>%s%s</thead>\n" % (cell("Command"), cell("Menu text")))
with open(os.path.join("..", "win32", "SciTERes.rc"), "rt") as f:
for l in f:
l = l.strip()
if l.startswith("MENUITEM") and "SEPARATOR" not in l:
l = l.replace("MENUITEM", "").strip()
text, symbol = l.split('",', 1)
symbol = symbol.strip()
text = text[1:].replace("&", "").replace("...", "")
if "\\t" in text:
text = text.split("\\t",1)[0]
if text:
out.write("<tr><td>%s</td><td>%s</td></tr>\n" % (symbol, text))
out.write("</table>\n")
startFile = """
<?xml version="1.0"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<!--Generated by scite/scripts/scommandsdoc.py -->
<style type="text/css">
table { border: 1px solid #1F1F1F; border-collapse: collapse; }
td { border: 1px solid; border-color: #E0E0E0 #000000; padding: 1px 5px 1px 5px; }
th { border: 1px solid #1F1F1F; padding: 1px 5px 1px 5px; }
thead { background-color: #000000; color: #FFFFFF; }
</style>
<body>
"""
if __name__ == "__main__":
with open(os.path.join("..", "doc", "CommandValues.html"), "w") as out:
out.write(startFile)
menuFeatures(out)
faceFeatures(out)
out.write("</body>\n</html>\n")
| 33.388889 | 93 | 0.596506 | 343 | 2,404 | 4.142857 | 0.355685 | 0.067558 | 0.028149 | 0.039409 | 0.138635 | 0.086559 | 0.086559 | 0.056298 | 0.056298 | 0.056298 | 0 | 0.030638 | 0.171797 | 2,404 | 71 | 94 | 33.859155 | 0.683074 | 0.013727 | 0 | 0.065574 | 0 | 0.032787 | 0.438398 | 0.046147 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.04918 | 0.016393 | 0.114754 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76bbe9ed61ac92be46c58eb48d6f9f31baf4a00f | 829 | py | Python | tests/unit/refund_tests.py | LyntServices/gopay-python-api | 0bee7da29f0ed9a414142bf5787255e190421da0 | [
"MIT"
] | null | null | null | tests/unit/refund_tests.py | LyntServices/gopay-python-api | 0bee7da29f0ed9a414142bf5787255e190421da0 | [
"MIT"
] | null | null | null | tests/unit/refund_tests.py | LyntServices/gopay-python-api | 0bee7da29f0ed9a414142bf5787255e190421da0 | [
"MIT"
] | null | null | null | import unittest
import gopay
from utils import Utils
class TestRefund(unittest.TestCase):
""" TestRefund class
To execute test for certain method properly it is necessary to add prefix 'test' to its name.
"""
def setUp(self):
self.payments = gopay.payments({
'goid': Utils.GO_ID,
'clientId': Utils.CLIENT_ID,
'clientSecret': Utils.CLIENT_SECRET,
'isProductionMode': False
})
def refund_payment(self):
payment_id = 3049525986
response = self.payments.refund_payment(payment_id, 1900)
if "error_code" not in str(response.json):
print('Response: ' + str(response.json))
print('Payment id: ' + str(response.json['id']))
else:
print('Error: ' + str(response.json))
| 25.90625 | 97 | 0.600724 | 93 | 829 | 5.268817 | 0.526882 | 0.089796 | 0.122449 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02385 | 0.291918 | 829 | 31 | 98 | 26.741935 | 0.810903 | 0.133896 | 0 | 0 | 0 | 0 | 0.116883 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.157895 | 0 | 0.315789 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76bd559ffa8e901eb2ded58ab32ff20f30927954 | 5,459 | py | Python | texar/data/data/large_file_test.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | texar/data/data/large_file_test.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | texar/data/data/large_file_test.py | lunayach/texar-pytorch | ac3e334e491f524dd01654b07af030fa20c88b34 | [
"Apache-2.0"
] | null | null | null | import contextlib
import resource
import time
import unittest
from typing import List, Optional, Tuple
import gc
import numpy as np
import torch
from texar.data.data.data_base import DataBase, DataSource
from texar.data.data.data_iterators import DataIterator
from texar.data.data.dataset_utils import Batch
from texar.data.data.text_data_base import TextLineDataSource
from texar.data.vocabulary import Vocab
from texar.utils.utils import AnyDict
RawExample = str
Example = Tuple[np.ndarray, np.ndarray]
@contextlib.contextmanager
def work_in_progress(msg):
print(msg + "... ", flush=True)
begin_time = time.time()
yield
time_consumed = time.time() - begin_time
print(f"done. ({time_consumed:.2f}s)", flush=True)
class ParallelData(DataBase[RawExample, Example]):
def __init__(self, source: DataSource[RawExample],
src_vocab_path: str,
tgt_vocab_path: str,
hparams: AnyDict,
device: Optional[torch.device] = None):
# hparams.update(parallelize_processing=False)
self.src_vocab = Vocab(src_vocab_path)
self.tgt_vocab = Vocab(tgt_vocab_path)
self.device = device
super().__init__(source, hparams)
def process(self, raw_example: RawExample) -> Example:
src, tgt = raw_example.strip().split('\t')
src = self.src_vocab.map_tokens_to_ids_py(src.split())
tgt = self.tgt_vocab.map_tokens_to_ids_py(tgt.split())
return src, tgt
def collate(self, examples: List[Example]) -> Batch:
src_pad_length = max(len(src) for src, _ in examples)
tgt_pad_length = max(len(tgt) for _, tgt in examples)
batch_size = len(examples)
src_indices = np.zeros((batch_size, src_pad_length), dtype=np.int64)
tgt_indices = np.zeros((batch_size, tgt_pad_length), dtype=np.int64)
for b_idx, (src, tgt) in enumerate(examples):
src_indices[b_idx, :len(src)] = src
tgt_indices[b_idx, :len(tgt)] = tgt
src_indices = torch.from_numpy(src_indices).to(device=self.device)
tgt_indices = torch.from_numpy(tgt_indices).to(device=self.device)
return Batch(batch_size, src=src_indices, tgt=tgt_indices)
def wrap_progress(func):
from tqdm import tqdm
return lambda: tqdm(func(), leave=False)
def get_process_memory():
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024 / 1024
@unittest.skip("Manual test only")
class LargeFileTest(unittest.TestCase):
def setUp(self) -> None:
self.source = TextLineDataSource(
'../../Downloads/en-es.bicleaner07.txt.gz',
compression_type='gzip')
self.source.__iter__ = wrap_progress( # type: ignore
self.source.__iter__)
self.num_workers = 3
self.batch_size = 64
def _test_modes_with_workers(self, lazy_mode: str, cache_mode: str,
num_workers: int):
from tqdm import tqdm
gc.collect()
mem = get_process_memory()
with work_in_progress(f"Data loading with lazy mode '{lazy_mode}' "
f"and cache mode '{cache_mode}' "
f"with {num_workers} workers"):
print(f"Memory before: {mem:.2f} MB")
with work_in_progress("Construction"):
data = ParallelData(self.source,
'../../Downloads/src.vocab',
'../../Downloads/tgt.vocab',
{'batch_size': self.batch_size,
'lazy_strategy': lazy_mode,
'cache_strategy': cache_mode,
'num_parallel_calls': num_workers,
'shuffle': False,
'allow_smaller_final_batch': False,
'max_dataset_size': 100000})
print(f"Memory after construction: {mem:.2f} MB")
iterator = DataIterator(data)
with work_in_progress("Iteration"):
for batch in tqdm(iterator, leave=False):
self.assertEqual(batch.batch_size, self.batch_size)
gc.collect()
print(f"Memory after iteration: {mem:.2f} MB")
with work_in_progress("2nd iteration"):
for batch in tqdm(iterator, leave=False):
self.assertEqual(batch.batch_size, self.batch_size)
def _test_modes(self, lazy_mode: str, cache_mode: str):
self._test_modes_with_workers(lazy_mode, cache_mode, self.num_workers)
self._test_modes_with_workers(lazy_mode, cache_mode, 1)
def test_none_processed(self):
self._test_modes('none', 'processed')
def test_process_loaded(self):
self._test_modes('process', 'loaded')
def test_process_processed(self):
self._test_modes('process', 'processed')
def test_all_none(self):
self._test_modes('all', 'none')
def test_all_loaded(self):
self._test_modes('all', 'loaded')
def test_all_processed(self):
self._test_modes('all', 'processed')
def _test_all_combinations(self):
self.test_none_processed()
self.test_process_loaded()
self.test_process_processed()
self.test_all_none()
self.test_all_loaded()
self.test_all_processed()
| 38.174825 | 78 | 0.616596 | 660 | 5,459 | 4.824242 | 0.237879 | 0.035176 | 0.032663 | 0.032035 | 0.238693 | 0.123744 | 0.110553 | 0.077889 | 0.077889 | 0.052136 | 0 | 0.007372 | 0.279355 | 5,459 | 142 | 79 | 38.443662 | 0.801983 | 0.010441 | 0 | 0.068376 | 0 | 0 | 0.102056 | 0.025375 | 0 | 0 | 0 | 0 | 0.017094 | 1 | 0.136752 | false | 0 | 0.136752 | 0.008547 | 0.324786 | 0.042735 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76be1e3d498d76e899af09c79f635e387363d63b | 1,473 | py | Python | cap2/api.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 9 | 2020-07-10T15:45:12.000Z | 2022-01-19T10:44:13.000Z | cap2/api.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 14 | 2020-06-15T16:04:54.000Z | 2022-03-12T01:05:47.000Z | cap2/api.py | nanusefue/CAP2-1 | 670b343ac7629fe0e64e86263ae420b01952f427 | [
"MIT"
] | 5 | 2021-01-05T01:26:48.000Z | 2022-01-23T11:20:49.000Z |
import luigi
from .pipeline.databases import MODULES as DB_MODULES
from .constants import (
STAGES,
STAGES_GROUP,
)
def run_db_stage(config_path='', cores=1, **kwargs):
"""Run the database stage of the pipeline."""
instances = []
for module in DB_MODULES:
instances.append(
module(
config_filename=config_path,
cores=cores
)
)
luigi.build(instances, local_scheduler=True, **kwargs)
def run_stage(samples, stage_name, config_path='', cores=1, workers=1, **kwargs):
"""Run a subpipeline on a list of samples. stage_name can be one of `qc`, `pre`, `reads`."""
modules = STAGES[stage_name]
group_modules = STAGES_GROUP.get(stage_name, [])
run_modules(
samples, modules,
group_modules=group_modules,
config_path=config_path,
cores=cores,
workers=workers,
**kwargs
)
def run_modules(samples, modules, group_modules=[], config_path='', cores=1, workers=1, **kwargs):
"""Run a set of modules for a list of samples."""
instances = []
for sample in samples:
for module in modules:
instance = module.from_sample(sample, config_path, cores=cores)
instances.append(instance)
for grp_module in group_modules:
instances.append(grp_module.from_samples('all', samples, config_path))
luigi.build(instances, local_scheduler=True, workers=workers, **kwargs)
| 28.882353 | 98 | 0.644263 | 182 | 1,473 | 5.032967 | 0.28022 | 0.087336 | 0.098253 | 0.052402 | 0.257642 | 0.233624 | 0.074236 | 0.074236 | 0.074236 | 0 | 0 | 0.0045 | 0.245757 | 1,473 | 50 | 99 | 29.46 | 0.819982 | 0.115411 | 0 | 0.055556 | 0 | 0 | 0.002333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76be3939c12479026243425b0c57784692e21da7 | 1,375 | py | Python | src/models.py | antble/CompSci-Project-1 | 3629e85752f70cc987de96a665bb2c25ce80a00f | [
"Apache-2.0"
] | null | null | null | src/models.py | antble/CompSci-Project-1 | 3629e85752f70cc987de96a665bb2c25ce80a00f | [
"Apache-2.0"
] | null | null | null | src/models.py | antble/CompSci-Project-1 | 3629e85752f70cc987de96a665bb2c25ce80a00f | [
"Apache-2.0"
] | null | null | null | import numpy as np
from src.utils import statistics
from sklearn import linear_model
'''
OLS Regression
'''
def ols_model(train_data, X_test, *args):
X_train, y_train = train_data
beta = np.linalg.pinv(X_train.T @ X_train) @ X_train.T @ y_train
y_predict = X_test @ beta
return y_predict, beta
def ols_model_skl(train_data, X_test, *args):
X_train, y_train = train_data
ols = linear_model.LinearRegression(fit_intercept=False)
ols.fit(X_train, y_train)
y_predict = ols.predict(X_test)
return y_predict, ols.coef_
'''
Ridge regression
'''
def ridge_model(train_data, X_test, lmb=0):
X_train, y_train = train_data
p = (X_train.T @ X_train).shape
identity_matrix = np.eye(p[0], p[1])
ridge_beta = np.linalg.pinv(X_train.T @ X_train + lmb*identity_matrix) @ X_train.T @ y_train
y_predict = X_test @ ridge_beta
return y_predict, ridge_beta
def ridge_model_skl(train_data, X_test, lmb):
X_train, y_train = train_data
ridge = linear_model.Ridge(lmb, fit_intercept=False)
ridge.fit(X_train, y_train)
return ridge.predict(X_test), ridge.coef_
'''
LASSO Regression
'''
def lasso_model_skl(train_data, X_test, lmb=0):
X_train, y_train = train_data
lasso = linear_model.Lasso(lmb, fit_intercept=False, tol=1e-2)
lasso.fit(X_train, y_train)
return lasso.predict(X_test), lasso.coef_
| 25.943396 | 96 | 0.714182 | 231 | 1,375 | 3.926407 | 0.194805 | 0.105843 | 0.061742 | 0.105843 | 0.449835 | 0.407938 | 0.329658 | 0.293275 | 0.293275 | 0.171996 | 0 | 0.005305 | 0.177455 | 1,375 | 52 | 97 | 26.442308 | 0.79664 | 0 | 0 | 0.16129 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.096774 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c05d501d2db1028d5ed0985cbecc07cbbeee94 | 3,652 | py | Python | dataset/pipa.py | tugrabatin/backdoors101 | af12c08280fe59380f74c05e2737eb2e92a80fdf | [
"MIT"
] | 179 | 2020-11-08T18:57:35.000Z | 2022-03-29T00:51:36.000Z | dataset/pipa.py | tugrabatin/backdoors101 | af12c08280fe59380f74c05e2737eb2e92a80fdf | [
"MIT"
] | 15 | 2020-11-24T01:20:13.000Z | 2022-03-03T03:45:55.000Z | dataset/pipa.py | tugrabatin/backdoors101 | af12c08280fe59380f74c05e2737eb2e92a80fdf | [
"MIT"
] | 46 | 2020-11-30T02:36:02.000Z | 2022-03-20T02:39:08.000Z | from __future__ import print_function, division
import torch
import torch.utils.data as data
from torchvision.datasets.folder import default_loader
class Annotations:
photoset_id = None
photo_id = None
xmin = None
ymin = None
width = None
height = None
identity_id = None
subset_id = None
people_on_photo = 0
def __repr__(self):
return f'photoset: {self.photoset_id}, photo id: {self.photo_id}, ' \
f'identity: {self.identity_id}, subs: {self.subset_id}, ' \
f'{self.people_on_photo}'
class PipaDataset(data.Dataset):
"""Face Landmarks dataset."""
def __init__(self, data_path, train=True, transform=None):
"""
Args:
data_path (string): Directory with all the data.
train (bool): train or test dataset.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.directory = data_path
try:
if train:
self.data_list = torch.load(f'{self.directory}/train_split.pt')
else:
self.data_list = torch.load(f'{self.directory}/test_split.pt')
self.photo_list = torch.load(f'{self.directory}/photo_list.pt')
self.target_identities = torch.load(
f'{self.directory}/target_identities.pt')
except FileNotFoundError:
raise FileNotFoundError(
'Please download the archive: https://drive.google.com/'
'file/d/1IAsTDl6kw4u8kk7Ikyf8K2A4RSPv9izz')
self.transform = transform
self.loader = default_loader
self.labels = torch.tensor(
[self.get_label(x)[0] for x in range(len(self))])
self.metadata = [self.get_label(x) for x in range(len(self))]
def __len__(self):
return len(self.data_list)
def get_label(self, idx):
photo_id, identities = self.data_list[idx]
target = len(identities) - 1
if target > 4:
target = 4
target_identity = 0
for pos, z in enumerate(self.target_identities):
if z in identities:
target_identity = pos + 1
return target, target_identity, photo_id, idx
def __getitem__(self, idx):
photo_id, identities = self.data_list[idx]
x = self.photo_list[photo_id][0]
if x.subset_id == 1:
path = 'train'
else:
path = 'test'
target = len(identities) - 1
# more than 5 people nobody cares
if target > 4:
target = 4
target_identity = 0
for pos, z in enumerate(self.target_identities):
if z in identities:
target_identity = pos + 1
# get image
sample = self.loader(
f'{self.directory}/{path}/{x.photoset_id}_{x.photo_id}.jpg')
crop = self.get_crop(photo_id)
sample = sample.crop(crop)
if self.transform is not None:
sample = self.transform(sample)
return sample, target, target_identity, (photo_id, idx)
def get_crop(self, photo_id):
ids = self.photo_list[photo_id]
left = 100000
upper = 100000
right = 0
lower = 0
for x in ids:
left = min(x.xmin, left)
upper = min(x.ymin, upper)
right = max(x.xmin + x.width, right)
lower = max(x.ymin + x.height, lower)
diff = (right - left) - (lower - upper)
if diff >= 0:
lower += diff
else:
right -= diff
return left, upper, right, lower
| 30.949153 | 79 | 0.570646 | 445 | 3,652 | 4.51236 | 0.267416 | 0.041833 | 0.02988 | 0.027888 | 0.270916 | 0.239542 | 0.208167 | 0.175299 | 0.140438 | 0.101594 | 0 | 0.015944 | 0.33023 | 3,652 | 117 | 80 | 31.213675 | 0.804988 | 0.070099 | 0 | 0.215909 | 0 | 0 | 0.125975 | 0.073785 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.045455 | 0.022727 | 0.295455 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c3514a5ebad4003ba1794c25c1144967d20722 | 5,422 | py | Python | src/exabgp/bgp/message/update/nlri/bgpls/tlvs/node.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 1,560 | 2015-01-01T08:53:05.000Z | 2022-03-29T20:22:43.000Z | src/exabgp/bgp/message/update/nlri/bgpls/tlvs/node.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 818 | 2015-01-01T17:38:40.000Z | 2022-03-30T07:29:24.000Z | src/exabgp/bgp/message/update/nlri/bgpls/tlvs/node.py | pierky/exabgp | 34be537ae5906c0830b31da1152ae63108ccf911 | [
"BSD-3-Clause"
] | 439 | 2015-01-06T21:20:41.000Z | 2022-03-19T23:24:25.000Z | # encoding: utf-8
"""
node.py
Created by Evelio Vila on 2016-11-26. eveliovila@gmail.com
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
from struct import unpack
from exabgp.protocol.ip import IP
from exabgp.protocol.iso import ISO
from exabgp.bgp.message.notification import Notify
# +--------------------+-------------------+----------+
# | Sub-TLV Code Point | Description | Length |
# +--------------------+-------------------+----------+
# | 512 | Autonomous System | 4 |
# | 513 | BGP-LS Identifier | 4 |
# | 514 | OSPF Area-ID | 4 |
# | 515 | IGP Router-ID | Variable |
# +--------------------+-------------------+----------+
# https://tools.ietf.org/html/rfc7752#section-3.2.1.4
# ================================================================== NODE-DESC-SUB-TLVs
NODE_TLVS = {
512: 'autonomous-system',
513: 'bgp-ls-id',
514: 'ospf-area-id',
515: 'igp-rid',
}
# TODO
# 3.2.1.5. Multi-Topology ID
class NodeDescriptor(object):
def __init__(self, node_id, dtype, psn=None, dr_id=None, packed=None):
self.node_id = node_id
self.dtype = dtype
self.psn = psn
self.dr_id = dr_id
self._packed = packed
@classmethod
def unpack(cls, data, igp):
dtype, dlength = unpack('!HH', data[0:4])
if dtype not in NODE_TLVS.keys():
raise Exception("Unknown Node Descriptor Sub-TLV")
# OSPF Area-ID
if dtype == 514:
return (
cls(node_id=IP.unpack(data[4 : 4 + dlength]), dtype=dtype, packed=data[: 4 + dlength]),
data[4 + dlength :],
)
# IGP Router-ID: The TLV size in combination with the protocol
# identifier enables the decoder to determine the type
# of the node: sec 3.2.1.4.
elif dtype == 515:
# OSPFv{2,3} non-pseudonode
if (igp == 3 or igp == 6) and dlength == 4:
r_id = IP.unpack(data[4 : 4 + 4])
return cls(node_id=r_id, dtype=dtype, packed=data[: 4 + dlength]), data[4 + 4 :]
# OSPFv{2,3} LAN pseudonode
if (igp == 3 or igp == 6) and dlength == 8:
r_id = IP.unpack(data[4 : 4 + 4])
dr_id = IP.unpack(data[8 : 4 + 8])
return cls(node_id=r_id, dtype=dtype, psn=None, dr_id=dr_id, packed=data[: 4 + dlength]), data[4 + 8 :]
# IS-IS non-pseudonode
if (igp == 1 or igp == 2) and dlength == 6:
return (
cls(node_id=ISO.unpack_sysid(data[4 : 4 + 6]), dtype=dtype, packed=data[: 4 + dlength]),
data[4 + 6 :],
)
# IS-IS LAN pseudonode = ISO Node-ID + PSN
# Unpack ISO address
if (igp == 1 or igp == 2) and dlength == 7:
iso_node = ISO.unpack_sysid(data[4 : 4 + 6])
psn = unpack('!B', data[4 + 6 : 4 + 7])[0]
return cls(node_id=iso_node, dtype=dtype, psn=psn, packed=data[: 4 + dlength]), data[4 + 7 :]
elif dtype == 512 and dlength == 4:
# ASN
return (
cls(node_id=unpack('!L', data[4 : 4 + dlength])[0], dtype=dtype, packed=data[: 4 + dlength]),
data[4 + 4 :],
)
elif dtype == 513 and dlength == 4:
# BGP-LS
return (
cls(node_id=unpack('!L', data[4 : 4 + dlength])[0], dtype=dtype, packed=data[: 4 + dlength]),
data[4 + 4 :],
)
else:
raise Notify(3, 5, 'could not decode Local Node descriptor')
def json(self, compact=None):
ospf = None
designated = None
psn = None
router_id = None
asn = None
bgpls_id = None
if self.dtype == 514:
ospf = '"ospf-area-id": "%s"' % self.node_id
if self.dr_id is not None:
designated = '"designated-router-id": "%s"' % self.dr_id
if self.psn is not None:
psn = '"psn": "%s"' % self.psn
if self.dtype == 515:
router_id = '"router-id": "%s"' % self.node_id
if self.dtype == 512:
asn = '"autonomous-system": %d' % self.node_id
if self.dtype == 513:
bgpls_id = '"bgp-ls-identifier": "%d"' % self.node_id
content = ', '.join(d for d in [ospf, designated, psn, router_id, asn, bgpls_id] if d)
return content
def __eq__(self, other):
return isinstance(other, NodeDescriptor) and self.node_id == other.node_id
def __neq__(self, other):
return self.node_id != other.node_id
def __lt__(self, other):
raise RuntimeError('Not implemented')
def __le__(self, other):
raise RuntimeError('Not implemented')
def __gt__(self, other):
raise RuntimeError('Not implemented')
def __ge__(self, other):
raise RuntimeError('Not implemented')
def __str__(self):
return self.json()
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self._packed)
def __hash__(self):
return hash(str(self))
def pack(self):
return self._packed
| 35.671053 | 119 | 0.500369 | 680 | 5,422 | 3.852941 | 0.236765 | 0.041985 | 0.022901 | 0.040076 | 0.335115 | 0.312214 | 0.278626 | 0.164122 | 0.09084 | 0.053435 | 0 | 0.043008 | 0.335301 | 5,422 | 151 | 120 | 35.907285 | 0.683962 | 0.217816 | 0 | 0.141414 | 0 | 0 | 0.073414 | 0.005464 | 0 | 0 | 0 | 0.006623 | 0 | 1 | 0.141414 | false | 0 | 0.040404 | 0.070707 | 0.343434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c76b3318aabc52c57cd61148edf9a8a15fc12b | 17,314 | py | Python | TwitterWebsiteSearch/TwitterClient.py | dtuit/TwitterWebsiteSearch | 9e7cdee9fd82139b8a8b540be0103eca6b82b0e2 | [
"MIT"
] | 5 | 2017-08-29T04:07:19.000Z | 2021-04-25T15:16:34.000Z | TwitterWebsiteSearch/TwitterClient.py | dtuit/TwitterWebsiteSearch | 9e7cdee9fd82139b8a8b540be0103eca6b82b0e2 | [
"MIT"
] | null | null | null | TwitterWebsiteSearch/TwitterClient.py | dtuit/TwitterWebsiteSearch | 9e7cdee9fd82139b8a8b540be0103eca6b82b0e2 | [
"MIT"
] | 3 | 2016-08-20T23:25:14.000Z | 2017-08-29T04:07:20.000Z | import requests
from requests import Request, Session
from requests.packages.urllib3.util import Retry
from requests.adapters import HTTPAdapter
from datetime import datetime, timezone
from time import sleep
import lxml
import lxml.html as lh
from urllib.parse import quote, urlsplit
import re
from operator import itemgetter
from copy import deepcopy
#tmp import
# from lxml import etree
from lxml.etree import strip_elements
# import logging
# logging.basicConfig(level=logging.DEBUG)
# import time
# def timing(f):
# def wrap(*args):
# time1 = time.time()
# ret = f(*args)
# time2 = time.time()
# print("{} function took {:0.3f} ms".format(f.__name__, (time2-time1)*1000.0))
# return ret
# return wrap
class TwitterClient():
FIDDLER_DEBUG = False
@staticmethod
def init_default_session(retrys=5,backoff_factor=0.1):
session = Session()
session.headers.update(
{'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.116 Safari/537.36',
'Accept-Encoding' : 'gzip, deflate, sdch, br',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-GB,en-US;q=0.8,en;q=0.6',
'X-Requested-With': 'XMLHttpRequest'})
retries = Retry(total=retrys,
backoff_factor=backoff_factor,
status_forcelist=[ 500, 502, 503, 504 ])
session.mount('https://', HTTPAdapter(max_retries=retries))
if TwitterClient.FIDDLER_DEBUG:
proxies = {'http': 'http://127.0.0.1:8888', 'https': 'https://127.0.0.1:8888'}
session.proxies.update(proxies)
return session
def __init__(self,
session=None,
timeout=12,
continue_on_empty_result=True):
if session is None:
session = self.init_default_session()
self.session = session
self.timeout = timeout
self.continue_on_empty_result = continue_on_empty_result
self.search_url = 'https://twitter.com/i/search/timeline'
self.user_url = 'https://twitter.com/i/profiles/show/{username}/timeline/tweets'
def search_query(self, queryBuilder, raw_query_str=None):
if raw_query_str is None:
raw_query_str = queryBuilder.build()
request = self._prepare_request(self.search_url, raw_query_str)
resp = self._execute_request(request)
resp_json = resp.json()
# Extract Results
tweets = []
if resp_json is not None and resp_json['items_html'] is not None:
tweets = self.parse_tweets(resp_json['items_html'])
next_query = deepcopy(queryBuilder)
next_query.max_position = resp_json.get('min_position') #switch the labels because twitter mislabels them
next_query.min_position = resp_json.get('max_position')
next_query.reset_error_state = False
min_id = max_id = None
if len(tweets) > 0:
min_id = tweets[0]['id_str']
max_id = tweets[1]['id_str']
retval = {
'_request': request,
'_response_raw': resp,
'_response_json': resp_json,
'refresh_query': queryBuilder,
'next_query': next_query,
'tweets': tweets,
'min_id': min_id,
'max_id': max_id
}
return retval
def user_query(self, user):
raise NotImplementedError
def get_search_iterator(self, search_query):
# determine if this is the first query or a continuation.
search_query.autoset_reset_error_state()
result = self.search_query(search_query)
next_query = result['next_query']
yield result
while True:
if len(result['tweets']) == 0:
if not self.continue_on_empty_result:
print('No tweets returned terminating program')
break
else:
break
# TODO remimplement
result = self.search_query(next_query)
next_query = result['next_query']
yield result
# def binary_search_
# def get_search_iterator(self, queryBuilder):
# qb = qb_prev = deepcopy(queryBuilder)
# result = self.search_query(qb)
# prev_min_tweetId = None
# yield result
# while True:
# if len(result['tweets']) == 0:
# if not self.continue_on_empty_result:
# print('No tweets returned terminating program')
# break
# else:
# # Sometimes the API stops returning tweets even when there are more
# # we can try to find these tweets by modifying the max_position parameter.
# int_minId = int(qb.min_tweetId)
# for x in range(8, len(qb.min_tweetId)): #TODO impl something more sophisticated
# qb.min_tweetId = int_minId - 10**x
# result = self.search(qb)
# if len(result['tweets']) > 0:
# break
# else:
# print('No tweets returned terminating program')
# # if we didnt find any point to continue from, break.
# break
# if qb.max_tweetId is None:
# qb.max_tweetId = result['tweets'][0]['id_str']
# # In a high volume search query like 'a' must use the max_tweet_id provided by the result,
# # otherwise the same results will be returned many times. (only happens during the first ~10 pages of results)
# res_min_pos = result['response_json'].get('min_position')
# if res_min_pos is not None:
# split = res_min_pos.split('-')
# qb.max_tweetId = split[2]
# prev_min_tweetId = qb.min_tweetId
# qb.min_tweetId = result['tweets'][-1]['id_str']
# # If the current request returns the same tweets as the last
# # the query is configured wrong
# # TODO create more accurate metric
# if prev_min_tweetId is qb.min_tweetId:
# break
# result = self.search_query(qb)
# yield result
def _execute_request(self, prepared_request):
try:
if TwitterClient.FIDDLER_DEBUG:
result = self.session.send(prepared_request, timeout=self.timeout, verify=False)
else:
result = self.session.send(prepared_request, timeout=self.timeout)
return result
except requests.exceptions.Timeout as e:
raise
def _prepare_request(self, url, payload_str):
req = Request('GET', url, params=payload_str, cookies={})
return self.session.prepare_request(req)
@staticmethod
def _encode_max_postion_param(min, max):
return "TWEET-{0}-{1}".format(min, max)
def parse_tweets(self, items_html):
try:
html = lh.fromstring(items_html)
except lxml.etree.ParserError as e:
return []
tweets = []
for li in html.cssselect('li.js-stream-item'):
# Check if is a tweet type element
if 'data-item-id' not in li.attrib:
continue
tweet = self._parse_tweet(li)
if tweet is not None:
tweets.append(tweet)
return tweets
def _parse_tweet(self, tweetElement):
'''
Parses the attributes of a tweet from the tweetElement into a dict
returns None if there is an error in the tweet
'''
li = tweetElement
tweet = {
'created_at' : None,
'id_str' : li.get('data-item-id'),
'text' : None,
'lang' : None,
'entities': {
'hashtags': [],
'symbols':[],
'user_mentions':[],
'urls':[],
},
'user' : {
'id_str' : None,
'name' : None,
'screen_name': None,
'profile_image_url': None,
'verified': False
},
'retweet_count' : 0,
'favorite_count' : 0,
'is_quote_status' : False,
'in_reply_to_user_id': None,
'in_reply_to_screen_name' : None,
'contains_photo': False,
'contains_video': False,
'contains_card': False
}
content_div = li.cssselect('div.tweet')
if len(content_div) > 0:
content_div = content_div[0]
tweet['user']['id_str'] = content_div.get('data-user-id')
tweet['user']['name'] = content_div.get('data-name')
tweet['user']['screen_name'] = content_div.get('data-screen-name')
reply_a = content_div.cssselect('div.tweet-context a.js-user-profile-link') # tweet-context can be used by many functions, incl follow, reply, retweet only extract reply atm
if len(reply_a) > 0:
if len(content_div.cssselect('div.tweet-context span.Icon--reply')) > 0: # check if actually a reply
tweet['in_reply_to_user_id'] = reply_a[0].get('data-user-id')
tweet['in_reply_to_screen_name'] = reply_a[0].get('href').strip('/')
user_img = content_div.cssselect('img.avatar')
if len(user_img) > 0:
tweet['user']['profile_image_url'] = user_img[0].get('src')
text_p = content_div.cssselect('p.tweet-text, p.js-tweet-text')
if len(text_p) > 0:
text_p = text_p[0]
self._parse_tweet_text(text_p, tweet)
tweet['lang'] = text_p.get('lang')
self._parse_tweet_entites(text_p, tweet['entities'])
else:
# there is no tweet text, unknown if this occurs
return None
verified_span = content_div.cssselect('span.Icon--verified')
if len(verified_span) > 0:
tweet['user']['verified'] = True
date_span = content_div.cssselect('span._timestamp')
if len(date_span) > 0:
timestamp = int(date_span[0].get('data-time-ms'))/1000
tweet['created_at'] = datetime.fromtimestamp(timestamp, tz=timezone.utc).strftime('%a %b %d %H:%M:%S %z %Y')
#Retweet and Favoritte counts
counts = li.cssselect('span.ProfileTweet-action--retweet, span.ProfileTweet-action--favorite')
if len(counts) > 0:
for c in counts:
classes = c.get('class').split(' ')
if 'ProfileTweet-action--retweet' in classes:
tweet['retweet_count'] = int(c[0].get('data-tweet-stat-count'))
elif 'ProfileTweet-action--favorite' in classes:
tweet['favorite_count'] = int(c[0].get('data-tweet-stat-count'))
#Extract Quoted Status
quoted_tweet_context = content_div.cssselect('div.QuoteTweet-innerContainer')
if len(quoted_tweet_context) > 0:
quoted_tweet_context = quoted_tweet_context[0]
tweet['is_quote_status'] = True
tweet['quoted_status_id_str'] = quoted_tweet_context.get('data-item-id')
tweet['quoted_status'] = {
'id_str': None,
'text': None,
'user': {
'id_str' : None,
'name' : None,
'screen_name' : None,
},
'entities' : {
'hashtags' : [],
'symbols' :[],
'user_mentions':[],
'urls':[]
}
}
qtweet = tweet['quoted_status']
qtweet['id_str'] = quoted_tweet_context.get('data-item-id')
qtweet['user']['id_str'] = quoted_tweet_context.get('data-user-id')
qtweet['user']['screen_name'] = quoted_tweet_context.get('data-screen-name')
qt_user_name = quoted_tweet_context.cssselect('b.QuoteTweet-fullname')
if len(qt_user_name) > 0:
qtweet['user']['name'] = qt_user_name[0].text_content()
qt_text = quoted_tweet_context.cssselect('div.QuoteTweet-text.tweet-text')
if len(qt_text) > 0:
qt_text = qt_text[0]
self._parse_tweet_text(qt_text, qtweet)
self._parse_tweet_entites(qt_text, qtweet['entities'])
# Extract Media entities
tweet_media_context = content_div.cssselect('div.AdaptiveMedia-container')
if len(tweet_media_context) > 0:
tweet_media_context = tweet_media_context[0]
tweet['entities']['media'] = []
photo_found = False
tweet_media_photos = tweet_media_context.cssselect('div.AdaptiveMedia-photoContainer')
for elm in tweet_media_photos:
tweet['contains_photo'] = photo_found = True
photo = {
'media_url' : elm.get('data-image-url'),
'type' : 'photo'
}
tweet['entities']['media'].append(photo)
if not photo_found:
tweet_media_video = tweet_media_context.cssselect('div.AdaptiveMedia-videoContainer')
if len(tweet_media_video) > 0:
tweet['contains_video'] = True
video = {
'type' : 'video',
'video_type' : re.search(re.compile(r"PlayableMedia--([a-zA-Z]*)"), tweet_media_video[0].cssselect('div[class*="PlayableMedia--"]')[0].get('class')).group(1),
'media_url' : 'https://twitter.com/i/videos/tweet/' + tweet['id_str'],
'video_thumbnail' : re.search(re.compile(r"background-image:url\(\'(.*)\'"),tweet_media_video[0].cssselect('div.PlayableMedia-player')[0].get('style')).group(1)
}
tweet['entities']['media'].append(video)
return tweet
def _parse_tweet_text(self, text_element, tweet):
#hacky way to include Emojis
for emoj in text_element.cssselect('img.Emoji'):
emoj.tail = emoj.get('alt') + emoj.tail if emoj.tail else emoj.get('alt')
#Modify Urls so they are correct
for url in text_element.cssselect('a.twitter-timeline-link'):
is_truncated = u'\u2026' in url.text_content()
url_disp = url.cssselect('span.js-display-url')
if len(url_disp) > 0:
url_disp_text = url_disp[0].text_content()
if is_truncated:
url_disp_text = url_disp_text + u'\u2026'
url.attrib['xtract-display-url'] = url_disp_text # store for later extraction
elif 'pic.twitter.com' in url.text:
url.attrib['xtract-display-url'] = url.text
strip_elements(url, ['*'])
url.text = url.attrib['href']
tmp = str(text_element.text_content())
for m in re.finditer(r'(?<!\s)(?<!\\n)(http|https)://', tmp): #add a space before urls where required
tmp = tmp[:m.start()] + ' ' + tmp[m.start():]
tweet['text'] = tmp
def _parse_tweet_entites(self, element, entities):
tags = element.cssselect('a.twitter-hashtag, a.twitter-cashtag, a.twitter-atreply, a.twitter-timeline-link')
if len(tags) > 0:
for tag in tags:
classes = tag.get('class').split(' ')
if 'twitter-hashtag' in classes:
entities['hashtags'].append(tag.text_content().strip(' \n#'))
elif 'twitter-cashtag' in classes:
entities['symbols'].append(tag.text_content().strip(' \n$'))
elif 'twitter-atreply' in classes:
mentioned_user = {
'id_str' : tag.get('data-mentioned-user-id'),
'screen_name' : tag.get('href').strip('/') if tag.get('href') is not None else None
}
entities['user_mentions'].append(mentioned_user)
elif 'twitter-timeline-link' in classes:
url = {
'url': tag.get('href'),
'expanded_url' : tag.get('data-expanded-url'),
'display_url' : tag.get('xtract-display-url')
}
entities['urls'].append(url)
if __name__ == "__main__":
import TwitterQuery
# TwitterClient.FIDDLER_DEBUG = True
x = TwitterClient(timeout=None)
try:
gen = x.get_search_iterator(TwitterQuery.SearchQuery('apple filter:replies'))
for res in gen:
print(len(res['tweets']))
except requests.exceptions.Timeout as e:
print(e)
def get_ids(tweets):
return [tweet['id_str'] for tweet in tweets] | 40.171694 | 184 | 0.549498 | 1,976 | 17,314 | 4.623482 | 0.212551 | 0.010398 | 0.019702 | 0.011493 | 0.207531 | 0.128065 | 0.087456 | 0.075197 | 0.059545 | 0.025394 | 0 | 0.012948 | 0.335336 | 17,314 | 431 | 185 | 40.171694 | 0.780935 | 0.171364 | 0 | 0.134021 | 0 | 0.010309 | 0.191951 | 0.046355 | 0 | 0 | 0 | 0.00232 | 0 | 1 | 0.044674 | false | 0 | 0.04811 | 0.006873 | 0.134021 | 0.010309 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c82b69f1006320dc0fddf50db8de80c24e26c9 | 532 | py | Python | sumupto.py | SasiGV/pands-problem-set | 3ddcb21b1103ab88d734e1281188a772573c839e | [
"Apache-2.0"
] | null | null | null | sumupto.py | SasiGV/pands-problem-set | 3ddcb21b1103ab88d734e1281188a772573c839e | [
"Apache-2.0"
] | null | null | null | sumupto.py | SasiGV/pands-problem-set | 3ddcb21b1103ab88d734e1281188a772573c839e | [
"Apache-2.0"
] | null | null | null | #Sasikala Varatharajan - G00376470
#Program to calculate sum of numbers from 1 to the number entered
#Get input from User
n=int(input("Please enter a positive integer "))
#Create an empty intergar called Sum and set initialize it to zero
sum = 0
if n > 0:
#Create a For loop and add all numbers starting from 1 upto the number entered
for num in range (0, n+1, 1):
sum = sum + num
#Print the output value
print ("Sum of numbers 1 to", n, "is: ", sum)
else:
print ("Oops, It is a negative number") | 31.294118 | 82 | 0.680451 | 91 | 532 | 3.978022 | 0.571429 | 0.027624 | 0.066298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039702 | 0.242481 | 532 | 17 | 83 | 31.294118 | 0.858561 | 0.526316 | 0 | 0 | 0 | 0 | 0.345679 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c8506b592c49671db60b7ecb7cdfe6fc993e2a | 1,661 | py | Python | https/views.py | bridgesign/Mini-tweet | 97e13afda73b816c953bd93baba31c4686621fdd | [
"Apache-2.0"
] | null | null | null | https/views.py | bridgesign/Mini-tweet | 97e13afda73b816c953bd93baba31c4686621fdd | [
"Apache-2.0"
] | null | null | null | https/views.py | bridgesign/Mini-tweet | 97e13afda73b816c953bd93baba31c4686621fdd | [
"Apache-2.0"
] | null | null | null | from . import handler
import os
from .settings import static
from . import settings
import json
from api.parse import parser
from . import token
import psycopg2
def static_handler(request):
split = request.headers['url'].split('/')
filename, ftype = split[-1], split[-2]
path = os.path.join(settings.static, ftype, filename)
if os.path.isfile(path):
with open(path, 'rb') as fp:
data = fp.read()
else:
return handler.httpresponse(request, settings.NOT_FOUND_TEMPLATE, 404)
h = handler.httpresponse(request, data, content_type=settings.ext_to_type[filename.split('.')[-1]])
h.cache_control = ["public", "max-age=3600"]
return h
def api_handler(request):
if request.headers['method']=='POST':
conn = psycopg2.connect(**settings.DBSETTINGS)
cur = conn.cursor()
try:
if 'token' in request.headers['cookie']:
ctx = token.validate_token(request.headers['cookie']['token'])
else:
ctx = {}
# Connecting to DB in thread safe manner
p = parser({'conn':conn, 'cur':cur})
response = p.parse(ctx, json.loads(request.body))
h = handler.httpresponse(request, json.dumps(response), content_type='application/json')
except:
h = handler.httpresponse(request, settings.BAD_REQUEST_TEMPLATE, 400)
cur.close()
conn.close()
return h
else:
return handler.httpresponse(request, settings.BAD_REQUEST_TEMPLATE, 400)
def index(request):
path = os.path.join('templates','index.html')
with open(path, 'rb') as fp:
data = fp.read()
return handler.httpresponse(request, data)
patterns = (
('^(?![\s\S])', index),
('index(\.html|\.htm)?', index),
('static/(image|css|js)/.*', static_handler),
('api', api_handler)
)
| 29.140351 | 100 | 0.696568 | 227 | 1,661 | 5.026432 | 0.378855 | 0.099912 | 0.136722 | 0.084137 | 0.192813 | 0.192813 | 0.145486 | 0.145486 | 0.04908 | 0 | 0 | 0.012579 | 0.138471 | 1,661 | 56 | 101 | 29.660714 | 0.784766 | 0.022878 | 0 | 0.18 | 0 | 0 | 0.098088 | 0.014806 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.16 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76c89cbd402fd7fb8d66be683d4508aeece89775 | 2,209 | py | Python | oandapy-master/oandapybot-master/logic/candle.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | null | null | null | oandapy-master/oandapybot-master/logic/candle.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | null | null | null | oandapy-master/oandapybot-master/logic/candle.py | cdibble2011/OANDA | 68327d6d65dd92952d7a1dc49fe29efca766d900 | [
"MIT"
] | null | null | null | # Candle sticks
import datetime
import time
from logic import Indicator, ValidateDatapoint
class Candle(Indicator):
# Opening price
Open = 0.0
# Closing price
Close = 0.0
# Highest price
High = 0.0
# Lowest price
Low = 0.0
# Open timestamp
OpenTime = datetime.datetime.fromtimestamp(time.time());
# Close timestamp
CloseTime = OpenTime;
def __init__(self, openTime, closeTime):
if (isinstance(openTime, datetime.datetime)):
self.OpenTime = openTime
if (isinstance(closeTime, datetime.datetime)):
self.CloseTime = closeTime
self._is_closed = False
# Returns true if candle stick accumulated enough data to represent the
# time span between Opening and Closing timestamps
def SeenEnoughData(self):
return self._is_closed
def AmounOfDataStillMissing(self):
if (self.SeenEnoughData()):
return 0
return 1
def Update(self, data):
if ( self.CloseTime < self.OpenTime ):
self._resetPrice(0.0)
self._is_closed = False
return
if (not ValidateDatapoint(data)):
return
_current_timestamp = data["now"]
_price = data["value"]
if (_current_timestamp >= self.CloseTime):
self._is_closed = True
if (_current_timestamp <= self.CloseTime and _current_timestamp >= self.OpenTime):
self._updateData(_price)
def _resetPrice(self, price):
self.High = price
self.Low = price
self.Open = price
self.Close = price
# Update the running timestamps of the data
def _updateData(self, price):
# If this is the first datapoint, initialize the values
if ( self.High == 0.0 and self.Low == 0.0 and self.Open == 0.0 and self.Close == 0.0):
self._resetPrice(price)
self._is_closed = False
return
# Update the values in case the current datapoint is a current High/Low
self.Close = price
self.High = max(price,self.High)
self.Low = min(price,self.Low)
| 26.939024 | 94 | 0.595292 | 250 | 2,209 | 5.144 | 0.272 | 0.013997 | 0.046656 | 0.039658 | 0.083981 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013378 | 0.323223 | 2,209 | 81 | 95 | 27.271605 | 0.846823 | 0.176098 | 0 | 0.166667 | 0 | 0 | 0.004427 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0625 | 0.020833 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76caeb17e7fcca41625bbccedc33d81d11c076ce | 1,703 | py | Python | aoc2020/day15.py | rfrazier716/aoc_2020 | 90c35d16910d28ec2f6de15d4b758977d0ff6f7b | [
"MIT"
] | null | null | null | aoc2020/day15.py | rfrazier716/aoc_2020 | 90c35d16910d28ec2f6de15d4b758977d0ff6f7b | [
"MIT"
] | null | null | null | aoc2020/day15.py | rfrazier716/aoc_2020 | 90c35d16910d28ec2f6de15d4b758977d0ff6f7b | [
"MIT"
] | null | null | null | from collections import defaultdict
def memory_generator(starting_numbers):
# make a dictionary that keeps track of number that were called and when they were called
memory_dict = defaultdict(int, zip(starting_numbers,range(1,len(starting_numbers)+1)))
next_num = 0
turn = len(memory_dict) + 1
# now constantly loop over the last value, check if it's been called before, and if so
# this turns value is the difference from when it was last called
while True:
yield next_num # return the next number
# now update the dictionary so you can generate the number after
last_called_turn = memory_dict[next_num]
# if it's never been called added it to the dict and set the next num to zero
if not last_called_turn:
memory_dict[next_num]=turn
next_num = 0 # next number is always zero since it's never been called
else:
temp = turn - memory_dict[next_num] # temporarily store what the next number will be
memory_dict[next_num]= turn # update the memory dict with the turn
next_num = temp # update the next num to be the difference from when the number was last called
turn+=1 # incrememnt the turn
def find_nth_memory_result(input_array, search_number):
gen = memory_generator(input_array)
for _ in range(search_number-1-len(input_array)):
next(gen)
return next(gen)
if __name__ == "__main__":
input_array = [1,2,16,19,18,0]
part1_answer = find_nth_memory_result(input_array, 2020)
print(f"Part1 Solution: {part1_answer}")
part2_answer = find_nth_memory_result(input_array, 30000000)
print(f"Part1 Solution: {part2_answer}") | 48.657143 | 107 | 0.704052 | 263 | 1,703 | 4.357414 | 0.380228 | 0.061082 | 0.048866 | 0.059337 | 0.212042 | 0.140489 | 0.115183 | 0 | 0 | 0 | 0 | 0.025974 | 0.231356 | 1,703 | 35 | 108 | 48.657143 | 0.849503 | 0.373459 | 0 | 0.148148 | 0 | 0 | 0.064394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0 | 0.148148 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
76cd82a03ec0619f89a6f670da436df7f60a8e00 | 864 | py | Python | src/make_simple_polygon.py | wreck-count/Fast-Simple-Polygon-Triangulation | 9b54f8cd87512d9ca2ee6e8208571c91595bfcae | [
"MIT"
] | 1 | 2021-06-29T08:28:32.000Z | 2021-06-29T08:28:32.000Z | src/make_simple_polygon.py | wreck-count/Fast-Simple-Polygon-Triangulation | 9b54f8cd87512d9ca2ee6e8208571c91595bfcae | [
"MIT"
] | null | null | null | src/make_simple_polygon.py | wreck-count/Fast-Simple-Polygon-Triangulation | 9b54f8cd87512d9ca2ee6e8208571c91595bfcae | [
"MIT"
] | null | null | null | import geopandas as gpd
from shapely.geometry import Polygon
lat_point_list = [50.854457, 52.518172, 50.072651, 48.853033, 50.854457]
lon_point_list = [4.377184, 13.407759, 14.435935, 2.349553, 4.377184]
polygon_geom = Polygon(zip(lon_point_list, lat_point_list))
crs = {'init': 'epsg:4326'}
polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])
print(polygon.geometry)
polygon.to_file(filename='polygon.geojson', driver='GeoJSON')
polygon.to_file(filename='polygon.shp', driver="ESRI Shapefile")
import random
from shapely.geometry import Point
def generate_random(number, polygon):
points = []
minx, miny, maxx, maxy = polygon.bounds
while len(points) < number:
pnt = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
if polygon.contains(pnt):
points.append(pnt)
return points | 34.56 | 78 | 0.71875 | 121 | 864 | 5.024793 | 0.520661 | 0.059211 | 0.0625 | 0.082237 | 0.092105 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111565 | 0.149306 | 864 | 25 | 79 | 34.56 | 0.715646 | 0 | 0 | 0 | 0 | 0 | 0.069364 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.2 | 0 | 0.3 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f113307e1dde4f1936705a49826b0a68aefcbf5 | 6,678 | py | Python | openmv_filesys/astro_sensor.py | morrowsend/OpenMV-Astrophotography-Gear | abcc1755ab85b32c8fbdba67d24350f296513544 | [
"MIT"
] | 1 | 2021-06-29T02:19:25.000Z | 2021-06-29T02:19:25.000Z | openmv_filesys/astro_sensor.py | morrowsend/OpenMV-Astrophotography-Gear | abcc1755ab85b32c8fbdba67d24350f296513544 | [
"MIT"
] | null | null | null | openmv_filesys/astro_sensor.py | morrowsend/OpenMV-Astrophotography-Gear | abcc1755ab85b32c8fbdba67d24350f296513544 | [
"MIT"
] | 1 | 2021-02-07T02:00:58.000Z | 2021-02-07T02:00:58.000Z | import micropython
micropython.opt_level(2)
import sensor, image, pyb, time, gc
import exclogger
class AstroCam(object):
def __init__(self, pixfmt = sensor.GRAYSCALE, simulate = None):
self.pixfmt = pixfmt
self.gain = -2
self.shutter = -2
self.framesize = sensor.QQCIF
self.flip = False
self.fileseq = 1
self.img = None
self.has_error = False
self.wait_init = 0
self.snap_started = False
self.simulate = False
if simulate is not None:
sensor.shutdown(True)
gc.collect()
#print("about to load simulation file, checking memory")
#micropython.mem_info(False)
print("loading simulation file ...", end="")
self.img = image.Image(simulate, copy_to_fb = True)
print(" done, alloc and converting ...", end="")
self.img = sensor.alloc_extra_fb(self.img.width(), self.img.height(), sensor.RGB565).replace(self.img).to_grayscale()
print(" done!")
self.simulate = True
self.snap_started = False
self.width = self.img.width()
self.height = self.img.height()
def init(self, gain_db = 0, shutter_us = 500000, framesize = sensor.WQXGA2, force_reset = True, flip = False):
if self.simulate:
self.shutter = shutter_us
self.gain = gain_db
self.snap_started = False
return
if force_reset or self.has_error or self.gain != gain_db or self.shutter != shutter_us or self.framesize != framesize or self.flip != flip:
sensor.reset()
sensor.set_pixformat(self.pixfmt)
sensor.set_framesize(framesize)
if flip: # upside down camera
sensor.set_vflip(True)
sensor.set_hmirror(True)
self.flip = flip
self.framesize = framesize
if shutter_us < 0:
sensor.set_auto_exposure(True)
else:
if shutter_us > 500000:
sensor.__write_reg(0x3037, 0x08) # slow down PLL
if shutter_us > 1000000:
pyb.delay(100)
sensor.__write_reg(0x3037, 0x18) # slow down PLL
if shutter_us > 1500000:
pyb.delay(100)
sensor.__write_reg(0x3036, 80) # slow down PLL
# warning: doesn't work well, might crash
pyb.delay(200)
sensor.set_auto_exposure(False, shutter_us)
self.shutter = shutter_us
if gain_db < 0:
sensor.set_auto_gain(True)
else:
sensor.set_auto_gain(False, gain_db)
self.gain = gain_db
self.wait_init = 2
self.width = sensor.width()
self.height = sensor.height()
def check_init(self):
if self.wait_init > 0:
if self.snap_started == False:
self.snapshot_start()
elif self.snapshot_check():
self.snapshot_finish()
self.wait_init -= 1
return False
return True
def snapshot(self, filename = None):
if self.simulate:
pyb.delay(self.shutter // 1000)
self.snap_started = False
return self.img
try:
if self.snap_started == True:
self.img = self.snapshot_finish()
else:
self.img = sensor.snapshot()
if filename == "auto":
filename = "%u_%u_%u.jpg" % (self.fileseq, round(self.gain), self.shutter)
self.fileseq += 1
if filename is not None:
self.img.save(filename, quality = 100)
return self.img
except RuntimeError as exc:
exclogger.log_exception(exc)
self.has_error = True
return None
def snapshot_start(self):
if self.snap_started == True:
return
if self.simulate:
self.sim_t = pyb.millis()
self.snap_started = True
return
try:
sensor.snapshot_start()
self.snap_started = True
except RuntimeError as exc:
exclogger.log_exception(exc)
self.has_error = True
def snapshot_check(self):
if self.snap_started == False:
return False
if self.simulate:
dt = pyb.elapsed_millis(self.sim_t)
if dt > (self.shutter // 1000):
return True
else:
return False
return sensor.snapshot_check()
def snapshot_finish(self):
if self.snap_started == False:
return None
if self.snap_started == False:
return False
if self.simulate:
while self.snapshot_check() == False:
gc.collect()
self.snap_started = False
return self.img
try:
self.img = sensor.snapshot_finish()
self.has_error = False
except RuntimeError as exc:
exclogger.log_exception(exc)
self.img = None
self.has_error = True
self.snap_started = False
return self.img
def test_gather(self, shots = 2, gain_start = 0, gain_step = 16, gain_limit = 128, shutter_start = 500000, shutter_step = 250000, shutter_limit = 1500000):
shot = 0
rnd = pyb.rng() % 1000
gain = gain_start
shutter = shutter_start
while True:
self.init(gain_db = gain, shutter_us = shutter, framesize = sensor.WQXGA2, force_reset = False, flip = True)
fn = "%u_%u_%u_%u_%u.jpg" % (rnd, self.fileseq, shot, round(self.gain), self.shutter)
print(fn + " ... ", end="")
self.snapshot(filename = fn)
print("done")
shot += 1
if shot >= shots:
shot = 0
gain += gain_step
if gain > gain_limit:
gain = gain_start
shutter += shutter_step
if shutter > shutter_limit:
return
def test_view(self):
self.init(gain_db = -1, shutter_us = -1, framesize = sensor.WQXGA2, force_reset = True, flip = True)
clock = time.clock()
while True:
clock.tick()
self.snapshot()
print("%u - %0.2f" % (self.fileseq, clock.fps()))
if __name__ == "__main__":
cam = AstroCam()
cam.test_view()
#cam.test_gather()
| 36.097297 | 159 | 0.529799 | 747 | 6,678 | 4.564926 | 0.198126 | 0.034897 | 0.061584 | 0.058651 | 0.286217 | 0.183578 | 0.143695 | 0.100587 | 0.079472 | 0.064516 | 0 | 0.029908 | 0.379155 | 6,678 | 184 | 160 | 36.293478 | 0.792571 | 0.029949 | 0 | 0.413174 | 0 | 0 | 0.01932 | 0 | 0 | 0 | 0.004019 | 0 | 0 | 1 | 0.053892 | false | 0 | 0.017964 | 0 | 0.179641 | 0.035928 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f12094bbfecc319d7f0685f3ac1415baa8332b9 | 1,843 | py | Python | blueprints/docker.py | LucasBiason/python521 | ed418d5335f84fcbfdb1251bc571710c48b429bc | [
"MIT"
] | null | null | null | blueprints/docker.py | LucasBiason/python521 | ed418d5335f84fcbfdb1251bc571710c48b429bc | [
"MIT"
] | 2 | 2021-05-25T13:26:42.000Z | 2021-05-25T13:26:43.000Z | blueprints/docker.py | LucasBiason/python521 | ed418d5335f84fcbfdb1251bc571710c48b429bc | [
"MIT"
] | null | null | null |
import flask
import docker
from services import decorators
blueprint = flask.Blueprint('docker', __name__)
connection = docker.DockerClient()
@blueprint.route('/docker', methods=[ 'GET' ])
@decorators.login_required
@decorators.loggingroutes
def get_docker():
try:
lista_dockers = connection.containers.list(all=True)
except Exception as msg:
## Dentro do container não vai rodar pois não colocamos o Docker dentro
# ('Connection aborted.', FileNotFoundError(2, 'No such file or directory'))
print(msg)
lista_dockers = []
context = {
'page': 'docker',
'route': {
'is_public': False
},
'containers': lista_dockers
}
return flask.render_template('docker.html', context=context)
@blueprint.route('/docker/start/<string:short_id>/', methods=[ 'GET' ])
@decorators.login_required
@decorators.loggingroutes
def start_docker(short_id):
container = connection.containers.get(short_id)
if container and container.status != 'running':
container.start()
flask.flash("Container Iniciado", "success")
elif not container:
flask.flash("Container não Encontrado", "danger")
else:
flask.flash("Container já está iniciado", "danger")
return flask.redirect('/docker')
@blueprint.route('/docker/stop/<string:short_id>/', methods=[ 'GET' ])
@decorators.login_required
@decorators.loggingroutes
def stop_docker(short_id):
container = connection.containers.get(short_id)
if container and container.status == 'running':
container.stop()
flask.flash("Container Encerrado", "success")
elif not container:
flask.flash("Container não Encontrado", "danger")
else:
flask.flash("Container já está encerrado", "danger")
return flask.redirect('/docker')
| 30.213115 | 84 | 0.675529 | 202 | 1,843 | 6.059406 | 0.376238 | 0.034314 | 0.093137 | 0.061275 | 0.522059 | 0.471405 | 0.471405 | 0.471405 | 0.423203 | 0.423203 | 0 | 0.000678 | 0.199132 | 1,843 | 60 | 85 | 30.716667 | 0.828591 | 0.077591 | 0 | 0.333333 | 0 | 0 | 0.19705 | 0.037168 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.104167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1335bba0da53d7e660db8abd330c6d402f948e | 1,620 | py | Python | tools/csv_embed_distances.py | hypraptive/bearid | 03e200e6ee5c236344b0dc7da05cf39ef39a7f5b | [
"MIT"
] | 33 | 2017-04-25T22:59:10.000Z | 2022-02-24T21:19:55.000Z | tools/csv_embed_distances.py | hypraptive/bearid | 03e200e6ee5c236344b0dc7da05cf39ef39a7f5b | [
"MIT"
] | 5 | 2017-04-12T22:55:15.000Z | 2020-03-08T02:02:54.000Z | tools/csv_embed_distances.py | hypraptive/bearid | 03e200e6ee5c236344b0dc7da05cf39ef39a7f5b | [
"MIT"
] | 3 | 2020-11-25T14:31:04.000Z | 2021-06-21T23:13:40.000Z | #! /usr/bin/python3
import sys
import argparse
import xml_utils as u
import datetime
import os
from argparse import RawTextHelpFormatter
from collections import defaultdict
##------------------------------------------------------------
## generate csv of distances between all permutations of two
## embedding files.
##
## usage:
## csv_embed_distances.py -out e_dist.csv e_test.xml e_train.xml
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='\nGenerate CSV of distances for all permutations of two input embedding files.\n\n \tExample: ' + os.path.basename (argv[0]) + ' -out e_dist.csv embed1.xml embed2.xml',
formatter_class=RawTextHelpFormatter)
parser.add_argument ('embed1')
parser.add_argument ('embed2')
parser.add_argument ('-db', '--db',
help='db of images info.')
parser.add_argument ('-out', '--output', default="e_dist.csv",
help='specify csv output file.')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2, 3], help='')
# help="increase output verbosity"
u.set_argv (argv)
args = parser.parse_args()
u.set_verbosity (args.verbosity)
u.set_argv (argv)
u.set_filetype ('embeds')
verbose = 0
if verbose > 0:
print("files: ", args.files)
if os.path.exists (args.output) :
u.current_datetime ()
csv_filename = 'e_dist_' + u.current_datetime () + '.csv'
print ('CSV file exists, writing to ' + csv_filename)
args.output = csv_filename
u.gen_embed_dist_csv ([args.embed1], [args.embed2], args.output, args.db)
if __name__ == "__main__":
main (sys.argv)
| 33.75 | 216 | 0.659877 | 217 | 1,620 | 4.75576 | 0.396313 | 0.043605 | 0.082364 | 0.03876 | 0.040698 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010691 | 0.133951 | 1,620 | 47 | 217 | 34.468085 | 0.724875 | 0.199383 | 0 | 0.058824 | 0 | 0 | 0.225781 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.205882 | 0 | 0.235294 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f14b3f40c82ae119234be850bf3cbe591a95186 | 294 | py | Python | calculate_time.py | Tips-Lee/Interview | 62c9eb8cefe49eb8b1beabe30ec5def66427ea60 | [
"MIT"
] | 1 | 2020-04-26T00:56:03.000Z | 2020-04-26T00:56:03.000Z | calculate_time.py | Tips-Lee/Interview | 62c9eb8cefe49eb8b1beabe30ec5def66427ea60 | [
"MIT"
] | null | null | null | calculate_time.py | Tips-Lee/Interview | 62c9eb8cefe49eb8b1beabe30ec5def66427ea60 | [
"MIT"
] | null | null | null | import time
class cal_time:
def __init__(self, func):
self.f = func
def __call__(self, *args, **kwargs):
start = time.time()
ans = self.f(*args, **kwargs)
end = time.time()
t = end - start
print('total time: %f' % t)
return ans | 21 | 40 | 0.517007 | 38 | 294 | 3.763158 | 0.5 | 0.06993 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.346939 | 294 | 14 | 41 | 21 | 0.744792 | 0 | 0 | 0 | 0 | 0 | 0.047458 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.454545 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f184275b62d014ce34231a10c787a12374b4a75 | 1,133 | py | Python | tests/conftest.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | tests/conftest.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | tests/conftest.py | lycantropos/rene | c73c616f3e360b994e92c950a3616a8ccb1136b9 | [
"MIT"
] | null | null | null | import os
import platform
from datetime import timedelta
import pytest
from hypothesis import (HealthCheck,
settings)
is_pypy = platform.python_implementation() == 'PyPy'
on_ci = bool(os.getenv('CI', False))
max_examples = (-(-settings.default.max_examples // 5)
if is_pypy and on_ci
else settings.default.max_examples)
settings.register_profile('default',
max_examples=max_examples,
suppress_health_check=[HealthCheck.too_slow])
if on_ci:
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_call(item: pytest.Item) -> None:
set_deadline = settings(deadline=((timedelta(hours=1)
/ (max_examples
* len(item.session.items)))))
item.obj = set_deadline(item.obj)
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session: pytest.Session,
exitstatus: pytest.ExitCode) -> None:
if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = pytest.ExitCode.OK
| 35.40625 | 75 | 0.61165 | 120 | 1,133 | 5.583333 | 0.475 | 0.098507 | 0.080597 | 0.077612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002503 | 0.294793 | 1,133 | 31 | 76 | 36.548387 | 0.836045 | 0 | 0 | 0 | 0 | 0 | 0.011474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.192308 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1b8955bf3b066b5a24e9aae941aaa3fcfedc5f | 3,748 | py | Python | algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 2 | 2021-03-17T11:25:46.000Z | 2021-11-18T04:20:54.000Z | algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | algorithms/refinement/parameterisation/scan_varying_goniometer_parameters.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from scitbx import matrix
from dials.algorithms.refinement.parameterisation.goniometer_parameters import (
GoniometerMixin,
)
from dials.algorithms.refinement.parameterisation.scan_varying_model_parameters import (
GaussianSmoother,
ScanVaryingModelParameterisation,
ScanVaryingParameterSet,
)
class ScanVaryingGoniometerParameterisation(
ScanVaryingModelParameterisation, GoniometerMixin
):
"""A scan-varying parameterisation for the setting rotation of a goniometer
with angles expressed in mrad."""
def __init__(
self, goniometer, t_range, num_intervals, beam=None, experiment_ids=None
):
if experiment_ids is None:
experiment_ids = [0]
# The state of a scan varying goniometer parameterisation is a matrix
# '[S](t)', expressed as a function of image number 't'
# in a sequential scan.
#
# The initial state is a snapshot of the setting matrix
# at the point of initialisation '[S0]', which is independent of
# image number.
#
# Future states are composed by rotations around two axes orthogonal to the
# initial spindle axis direction.
#
# [S](t) = [G2](t)[G1](t)[S0]
# Set up the smoother
smoother = GaussianSmoother(t_range, num_intervals)
nv = smoother.num_values()
# Set up the initial state
e_lab = matrix.col(goniometer.get_rotation_axis())
istate = matrix.sqr(goniometer.get_setting_rotation())
self._S_at_t = istate
# Factory function to provide to _build_p_list
def parameter_type(value, axis, ptype, name):
return ScanVaryingParameterSet(value, nv, axis, ptype, name)
# Build the parameter list
p_list = self._build_p_list(e_lab, beam, parameter_type=parameter_type)
# Set up the base class
ScanVaryingModelParameterisation.__init__(
self, goniometer, istate, p_list, smoother, experiment_ids=experiment_ids
)
return
def compose(self, t):
"""calculate state and derivatives for model at image number t"""
# Extract setting matrix from the initial state
iS0 = self._initial_state
# extract parameter sets from the internal list
gamma1_set, gamma2_set = self._param
# extract angles and other data at time t using the smoother
gamma1, gamma1_weights, gamma1_sumweights = self._smoother.value_weight(
t, gamma1_set
)
gamma2, gamma2_weights, gamma2_sumweights = self._smoother.value_weight(
t, gamma2_set
)
# calculate derivatives of angles wrt underlying parameters.
dgamma1_dp = gamma1_weights * (1.0 / gamma1_sumweights)
dgamma2_dp = gamma2_weights * (1.0 / gamma2_sumweights)
self._S_at_t, dS_dval = self._compose_core(
iS0,
gamma1,
gamma2,
gamma1_axis=gamma1_set.axis,
gamma2_axis=gamma2_set.axis,
)
# calculate derivatives of state wrt underlying smoother parameters
dS_dp1 = [None] * dgamma1_dp.size
for (i, v) in dgamma1_dp:
dS_dp1[i] = dS_dval[0] * v
dS_dp2 = [None] * dgamma2_dp.size
for (i, v) in dgamma2_dp:
dS_dp2[i] = dS_dval[1] * v
# store derivatives as list-of-lists
self._dstate_dp = [dS_dp1, dS_dp2]
return
def get_state(self):
"""Return setting matrix [S] at image number t"""
# only a single goniometer is parameterised here, so no multi_state_elt
# argument is allowed
return self._S_at_t
| 33.168142 | 88 | 0.652615 | 451 | 3,748 | 5.203991 | 0.323725 | 0.027695 | 0.015339 | 0.010226 | 0.078398 | 0.040051 | 0 | 0 | 0 | 0 | 0 | 0.016617 | 0.277481 | 3,748 | 112 | 89 | 33.464286 | 0.850074 | 0.308164 | 0 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0.017241 | 0.224138 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1b9b470f4aa8fbe3dda5465263cc78a43c4fc1 | 26,022 | py | Python | src/model_apply.py | hadao211/routing-challenge | e2863a3b48c4aa387538a06d3c705c219942e134 | [
"MIT"
] | null | null | null | src/model_apply.py | hadao211/routing-challenge | e2863a3b48c4aa387538a06d3c705c219942e134 | [
"MIT"
] | null | null | null | src/model_apply.py | hadao211/routing-challenge | e2863a3b48c4aa387538a06d3c705c219942e134 | [
"MIT"
] | 1 | 2021-08-21T11:53:20.000Z | 2021-08-21T11:53:20.000Z | from os import path
import json, time, numpy as np, pandas as pd, datetime, copy, re, traceback, multiprocessing as mp
from scipy.spatial import distance
# function for solving instance
def meta_zone(zone):
m = re.match('[A-Z]+(?=-)', zone)
higher_meta = m.group(0) if m else zone
m_ = re.match('[A-Z]-\d+(?=.)', zone)
meta = m_.group(0) if m_ else zone
return meta, higher_meta
# function for solving instance
def solve(k,route, travel_time, package, timer_start, penalty, sz_weight, last_zone_weight, max_allowed_dist):
print("start "+str(k))
try:
n = len(travel_time)
points = np.zeros((n,2)) # coords of nodes
stops = route["stops"] # stop dictionary
nodes = [k for k,v in travel_time.items()] # node ids of nodes
zones_l = [stops[k]["zone_id"] for k,v in travel_time.items()] # zone ids of nodes
types = [stops[k]["type"] for k,v in travel_time.items()] # stop types of nodes
tt = np.zeros((n+1,n+1)) # convert travel matrix from dict to np array
# add artificial node n+1 as end node of last path
tt[n, :] = 999999
tt[:, n] = 0
tt[n,n] = 999999
# fill tt and node coords
for i in range(n):
tt[i,0:n] = list(travel_time[nodes[i]].values())
tt[i,i] = 999999
points[i,0] = stops[nodes[i]]["lat"]
points[i,1] = stops[nodes[i]]["lng"]
# impute missing zone data ###################################################################
nans = [i for i in range(len(zones_l)) if pd.isna(zones_l[i])]
for i in nans:
if types[i] == "Station":
zones_l[i] = "Start"
else:
closest = min([tt[i,j] for j in range(n) if tt[i,j] > 0 and pd.isna(zones_l[j]) == False])
closest = np.where(tt[i,:] == closest)[0][0]
zones_l[i] = zones_l[closest]
# 'assign' nodes to clusters ##################################################################
zones = list(set(zones_l)) # list of unique zone ids
clusters = {} # dict that holds all nodes for a zone
for i in zones:
clusters[i] = [j for j in range(n) if zones_l[j] == i]
# data per node ###############################################################################
a = [] # lower TW
b = [] # upper TW
s = [] # service times
dateFormat = '%Y-%m-%d %H:%M:%S'
start_time = str(route['date_YYYY_MM_DD']+" "+str(route['departure_time_utc']))
start_time_obj = datetime.datetime.strptime(start_time, dateFormat)
start_time = start_time = (start_time_obj.hour * 60 + start_time_obj.minute) * 60 + start_time_obj.second
unlimited_a = 0 # lower TW for unrestricted nodes
unlimited_b = start_time+2*24*3600 # upper TW for unrestricted nodes: starting time of tour + 2 days
# loop over stops
for i in nodes:
s_tmp = 0.0
if len(package[i])==0: # set parameters for depots
a.append(start_time)
b.append(unlimited_b)
s.append(s_tmp)
else: # set parameters for regular stops
a_stop = [unlimited_a] # in case all TW entries of package are nans
b_stop = [unlimited_b]
# loop over packages of stop
for p in package[i].values():
s_tmp += p['planned_service_time_seconds'] # add service times
if isinstance(p['time_window']['start_time_utc'], str):
a_package = datetime.datetime.strptime(p['time_window']['start_time_utc'], dateFormat)
b_package = datetime.datetime.strptime(p['time_window']['end_time_utc'], dateFormat)
# check if time windows stretch til next day
# for TW=[a,_]
if int(a_package.day) != int(start_time_obj.day):
a_stop.append(((24+a_package.hour) * 60 + a_package.minute) * 60 + a_package.second)
else:
a_stop.append((a_package.hour * 60 + a_package.minute) * 60 + a_package.second)
# for TW=[_,b]
if int(b_package.day) != int(start_time_obj.day):
b_stop.append(((24+b_package.hour) * 60 + b_package.minute) * 60 + b_package.second)
else:
b_stop.append((b_package.hour * 60 + b_package.minute) * 60 + b_package.second)
# set most narrow TW
s.append(s_tmp)
a.append(max(a_stop))
b.append(min(b_stop))
# determine centroids of each zone for meta tour
centroids = [] # not actual centroids but node of cluster closest to centroid
ctrs = np.zeros((len(zones), 2)) # holds coords of the actual centroids
# find cluster node nearest to centroid
cou = 0 #
for i in zones:
if len(clusters[i]) > 1:
ctr = [0,0] # centroid of zone i
ctr[0] = np.mean(points[clusters[i], 0])
ctr[1] = np.mean(points[clusters[i], 1])
ctrs[cou, :] = ctr #
cou+=1 #
dist = distance.cdist([ctr], points[clusters[i],:])
min_ind = dist.argmin()
centroids.append(clusters[i][min_ind])
else:
centroids.append(clusters[i][0])
ctrs[cou, :] = points[clusters[i][0],:]#
cou +=1
s = np.array(s)
a = np.array(a)
b = np.array(b)
# solving routine #############################################################################
###############################################################################################
# determine meta tour
#print("starting")
shortest_distances_clusters={}
for i in clusters.keys():
shortest_distances_clusters[i]={}
for j in clusters.keys():
if i !=j:
best_dist=9999999
best_from = None
best_to = None
for x in clusters[i]:
for y in clusters[j]:
dist=tt[int(x)][int(y)]
if dist<best_dist:
best_dist=dist
best_from=x
best_to=y
shortest_distances_clusters[i][j]={"dist":best_dist,"from":best_from,"to":best_to}
cluster_order=[("Start",clusters["Start"])]
remaining_cluster=copy.deepcopy(list(clusters.keys()))
remaining_cluster.remove("Start")
while len(remaining_cluster)>0:
best_dist=999999
best_ind=None
for ind,r in enumerate(remaining_cluster):
last=cluster_order[-1][0]
dist=shortest_distances_clusters[last][r]["dist"]
if dist <=best_dist:
best_dist=dist
best_ind=ind
cluster_order.append((remaining_cluster[best_ind],clusters[remaining_cluster[best_ind]]))
remaining_cluster.pop(best_ind)
#########################################################################
def two_opt_global_clusters(cluster_order, sz_weight, last_zone_weight):
improved = True
distance = shortest_distances_clusters
start_time = time.time()
tour=cluster_order
while improved:
if time.time() - start_time >= 10:
return tour
min_i = 9999
min_j = 9999
change = 0
improved = False
min_change = 0
num_cities = len(tour)
# Find the best move
for i in range(num_cities - 2):
for j in range(i + 2, num_cities - 1):
# change = dist(i, j,tour) + dist(i+1, j+1,tour) - dist(i, i+1,tour) - dist(j, j+1,tour)
dist_change = distance[tour[i][0]][tour[j][0]]["dist"] + distance[tour[i + 1][0]][tour[j + 1][0]]["dist"] - \
distance[tour[i][0]][
tour[i + 1][0]]["dist"] - distance[tour[j][0]][tour[j + 1][0]]["dist"]
##########################################################
curr_mz_violation = sum( [ meta_zone(tour[i][0]) != meta_zone(tour[j][0]),
meta_zone(tour[i+1][0]) != meta_zone(tour[j+1][0]) ] )
prev_mz_violation = sum( [ meta_zone(tour[i][0]) != meta_zone(tour[i+1][0]),
meta_zone(tour[j][0]) != meta_zone(tour[j+1][0]) ] )
meta_zone_change = curr_mz_violation - prev_mz_violation
if i != 0:
last_zone_change = sum( [tour[i][-1] != tour[j][-1], tour[i+1][-1] != tour[j+1][-1]] ) \
- sum( [tour[i][-1] != tour[i+1][-1], tour[j][-1] != tour[j+1][-1]] )
else:
last_zone_change = 0
if meta_zone_change != 0:
change = dist_change*(1-sz_weight) + meta_zone_change*100*sz_weight
else:
change = dist_change*(1-last_zone_weight) + last_zone_change*100*last_zone_weight
###########################################################
if change < min_change and change < -0.00000001:
improved = True
min_change = change
min_i, min_j = i, j
if min_change < 0:
tour[min_i + 1:min_j + 1] = tour[min_i + 1:min_j + 1][::-1]
return cluster_order
##########################################################################
# apply 2-opts on the clusters
cluster_order=two_opt_global_clusters(cluster_order, sz_weight, last_zone_weight)
curr_dist = sum([shortest_distances_clusters[cluster_order[i][0]][cluster_order[i+1][0]]["dist"] for i in range(len(cluster_order)-1)])
# looping improvement between distance optimization and zone id rules
new_cluster_order = copy.deepcopy(cluster_order)
changed= True
start_time = time.time()
while (time.time() - start_time <= 20) and changed:
changed=False
for i in range(1, len(new_cluster_order)-1):
n1 = new_cluster_order[i][0]
n2 = new_cluster_order[i+1][0]
rem = [(idx, r[0]) for idx,r in list(enumerate(new_cluster_order))[i+2:]]
# improve following zone id
if meta_zone(n1)[0] == meta_zone(n2)[0]:
# last zone rule
if n1[-1] != n2[-1]:
tmp = [idx for idx, r in rem if meta_zone(n1)[0] == meta_zone(r)[0] and n1[-1] == r[-1]]
if len(tmp) != 0:
new_cluster_order = new_cluster_order[:i+1] \
+ [new_cluster_order[j] for j in tmp] \
+ [new_cluster_order[idx] for idx in range(i+1,len(new_cluster_order)) if idx not in tmp]
changed=True
else:
# super zone rule
tmp = [idx for idx, r in rem if meta_zone(n1)[0] == meta_zone(r)[0]]
if len(tmp) != 0:
new_cluster_order = new_cluster_order[:i+1] \
+ [new_cluster_order[j] for j in tmp] \
+ [new_cluster_order[idx] for idx in range(i+1,len(new_cluster_order)) if idx not in tmp]
changed=True
if changed:
new_dist = sum([shortest_distances_clusters[new_cluster_order[i][0]][new_cluster_order[i+1][0]]["dist"] for i in range(len(new_cluster_order)-1)])
# if distance increased < maximum allowed increased => exit
if new_dist - curr_dist <= max_allowed_dist:
cluster_order = copy.deepcopy(new_cluster_order)
changed=False
else:
# improve distance
new_cluster_order = two_opt_global_clusters(new_cluster_order, sz_weight, last_zone_weight)
meta_tour = []
for i in cluster_order:
meta_tour.append(zones.index(i[0]))
# solve cluster paths #########################################################################
# find closest cluster nodes between 2 neighboring clusters in meta tour
connections = [0] # numbers are not actual node ids but position of node in clusters dictionary
for i in range(len(meta_tour)-1):
tt_sel = tt[clusters[ zones[meta_tour[i] ] ], : ]
tt_sel = tt_sel[: , clusters[ zones[meta_tour[i+1] ] ] ]
j = np.where( tt_sel == np.min(tt_sel) )
# if j is already used, find second closest
if len(clusters[ zones[meta_tour[i] ] ])>1 and \
clusters[zones[meta_tour[i]]] [connections[-1]] == clusters[zones[meta_tour[i]]] [j[0][0]]:
tt_sel[j[0][0],:] = 999999
j = np.where( tt_sel == np.min(tt_sel) )
connections.append(j[0][0])
connections.append(j[1][0])
# add artificial node as destination of path of the last cluster
if n not in clusters[ zones[ meta_tour[-1]] ]:
clusters[ zones[ meta_tour[-1]] ].append(n)
if len(connections)<len(zones)*2:
connections.append( len(clusters[ zones[ meta_tour[-1]] ])-1)
# solve path within clusters ################################################################################################################
# function to determine a score for given tour
def tour_score(tt, a, b, s, tour, start_t, penalty):#, meta):
# global penalty
wait = 0
delay = 0
t = start_t + s[tour[0]]
t_seq = [t]
for i in range(1, len(tour)):
t += round(s[tour[i]] + tt[tour[i-1], tour[i]],2)
t = max(t, a[tour[i]])
wait += max(a[tour[i]]-t, 0)
delay += max(t-b[tour[i]],0)
t_seq.append(t)
return (1-penalty)*t+penalty*(wait+delay), t_seq, wait, delay
t_final = [start_time] # contains point in time when service is finished at node; for all nodes
big_tour = [clusters[ zones[ meta_tour[0] ] ][0] ] # contains final tour; KEEP IN MIND: entries are the index of a node in nodes
if len(a) == n:
a = np.append(a,a[big_tour[0]])
b = np.append(b,b[big_tour[0]])
s = np.append(s,s[big_tour[0]])
for i in range(1, len(meta_tour)):
cn = clusters[ zones[meta_tour[i] ] ] # list of clusters nodes, makes following code more concise
if len(cn) == 1: # 1 node clusters
big_tour.append(cn[connections[i*2]])
t_final.append(max(a[big_tour[-1]], t_final[-1] + tt[big_tour[-2], big_tour[-1]] + s[big_tour[-1]]))
elif len(cn) == 2: # 2 node clusters
big_tour.append(cn[connections[i*2]] )
t_final.append(max(a[big_tour[-1]], t_final[-1] + tt[big_tour[-2], big_tour[-1]] + s[big_tour[-1]]))
big_tour.append(cn[connections[i*2+1]] )
t_final.append(max(a[big_tour[-1]], t_final[-1] + tt[big_tour[-2], big_tour[-1]] + s[big_tour[-1]] ))
else: # larger clusters => farthest insertion
# farthest insertion
sub_tour = [connections[i*2], connections[i*2+1]] # contains path: KEEP IN MIND: entrier are the index of a node in cn
# distance matrix of nodes within cluster
tt_s = tt[cn, :]
tt_s = tt_s[:, cn]
# parameter of nodes within cluster
a_s = a[cn]
b_s = b[cn]
s_s = s[cn]
for i in range(len(cn)):
tt_s[i,i] = -1000
tt_sel = copy.deepcopy(tt_s)
tt_sel[: , sub_tour ]= -1000
test = copy.deepcopy(tt_sel)
rem = list(set([i for i in range(len(cn))])-set(sub_tour))
for i in range(len(cn)-2):
# determine farthest centroid
far = np.max(tt_sel[sub_tour,:])
far = np.where(tt_sel[sub_tour, :] == far)[1][0]
tt_sel[:,far] = -1000
# insert at best position
best = 9999999999
b_ind = -1
for j in range(1, len(sub_tour)):
new_tour = copy.deepcopy(sub_tour)
new_tour.insert(j, far)
new, t_seq, _, _ = tour_score(tt_s, a_s, b_s, s_s, new_tour, max(a[cn[sub_tour[0]]] ,t_final[-1] + tt[big_tour[-1], cn[sub_tour[0]]]), penalty)
if new < best:
b_ind = j
best_tour = new_tour
best = new
tseq = t_seq
# update rem and sub tour/path
rem.remove(far)
sub_tour = copy.deepcopy(best_tour)
t_final += tseq
sub_tour2 = [cn[index] for index in sub_tour] # convert to real node ids (index of node in nodes)
big_tour += sub_tour2 # add to big tour
big_tour.pop() # pop artificial node
t_final.pop() # pop artificial node
final_score, t_final_val, ff_wait, ff_delay = tour_score(tt, a, b, s, big_tour, start_time, penalty)
# convert to result format
out = {}
for i in range(len(big_tour)):
out[nodes[big_tour[i]]] = i
return k,out
except Exception as e:
print(e)
print("EXCEPTION CAUGHT!!!!!!!!")
return k,{}
# Preprocessing ###############################################################################
###############################################################################################
if __name__ == "__main__":
# Get Directory
BASE_DIR = path.dirname(path.dirname(path.abspath(__file__)))
# Read input data
print('Reading Input Data')
# Model Build output
try:
model_path=path.join(BASE_DIR, 'data/model_build_outputs/model.json')
with open(model_path, newline='') as in_file:
model_build_out = json.load(in_file)
except Exception: # in case build process crashed completely, use fallback parameters
print("no model file, use fallback solution instead")
model_build_out={0: {
'sz_weight': {'best': 0.9},
'lz_weight': {'best': 0},
'penalty': {'best': 0},
'max_dist': {'best': 300}},
1: {
'sz_weight': {'best': 0.8},
'lz_weight': {'best': 0},
'penalty': {'best': 0},
'max_dist': {'best': 300}},
2: {
'sz_weight': {'best': 0.8},
'lz_weight': {'best': 0},
'penalty': {'best': 0},
'max_dist': {'best': 300} }
}
#check the model output
for i in range(3):
if i not in model_build_out: # check set
model_build_out.update({i: {
'sz_weight': {'best': 0.9 if i == 0 else 0.8},
'lz_weight': {'best': 0},
'penalty': {'best': 0},
'max_dist': {'best': 300}}
})
else:
params = ['sz_weight', 'lz_weight', 'penalty', 'max_dist']
default_vals = [0.9 if i == 1 else 0.8, 0, 0, 300]
for j in range(len(params)): # check params
if params[j] not in model_build_out[i]:
model_build_out[i].update({params[j]: {'best': default_vals[j]}})
else:
if 'best' not in model_build_out[i][params[j]]:
model_build_out[i][params[j]].update({'best': default_vals[j]})
print(model_build_out)
# Prediction Routes (Model Apply input)
prediction_routes_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_route_data.json')
with open(prediction_routes_path, newline='') as in_file:
prediction_routes = json.load(in_file)
# Prediction Travel Times
prediction_travel_times_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_travel_times.json')
with open(prediction_travel_times_path, newline='') as in_file:
prediction_travel_times = json.load(in_file)
# Prediction Travel Times
prediction_packages_path = path.join(BASE_DIR, 'data/model_apply_inputs/new_package_data.json')
with open(prediction_packages_path, newline='') as in_file:
prediction_packages = json.load(in_file)
###################################################################################################
output = {} # for actual tour output
count = 1
# load data ###################################################################################
# read parameter
weight_dict = {}
for key, route in prediction_routes.items():
weight_dict[key] = {}
n_sz = len(set([meta_zone(route['stops'][s]['zone_id'])[0]
for s in route['stops'] if str(route['stops'][s]['zone_id']) != 'nan']))
if n_sz == 1:
sz_weight = model_build_out[0]['sz_weight']['best']
lz_weight = model_build_out[0]['lz_weight']['best']
penalty = model_build_out[0]['penalty']['best']
max_dist = model_build_out[0]['max_dist']['best']
elif n_sz == 2:
sz_weight = model_build_out[1]['sz_weight']['best']
lz_weight = model_build_out[1]['lz_weight']['best']
penalty = model_build_out[1]['penalty']['best']
max_dist = model_build_out[1]['max_dist']['best']
else:
sz_weight = model_build_out[2]['sz_weight']['best']
lz_weight = model_build_out[2]['lz_weight']['best']
penalty = model_build_out[2]['penalty']['best']
max_dist = model_build_out[2]['max_dist']['best']
weight_dict[key].update({
'sz_weight': sz_weight,
'lz_weight': lz_weight,
'penalty': penalty,
'max_dist': max_dist
})
# start solving ###################################################################################
timer_start = time.time()
args = [(key, prediction_routes[key], copy.deepcopy(prediction_travel_times[key]), prediction_packages[key],
timer_start,
weight_dict[key]['penalty'], weight_dict[key]['sz_weight'],
weight_dict[key]['lz_weight'], weight_dict[key]['max_dist']) for key in list(prediction_routes.keys())]
pool = mp.Pool(mp.cpu_count())
result = pool.starmap(solve, args)
for key,out in result:
output[key] = {}
output[key]["proposed"] = {}
output[key]['proposed'] = out
print("Finish solving")
# Write output data
output_path=path.join(BASE_DIR, 'data/model_apply_outputs/proposed_sequences.json')
with open(output_path, 'w') as out_file:
json.dump(output, out_file)
print("Success: The '{}' file has been saved".format(output_path))
print(f"model apply done after {time.time()-timer_start}")
with open(path.join(BASE_DIR, 'data/model_apply_outputs/runningtime_apply.json'), 'w') as f:
f.write(str(time.time()-timer_start))
print('Done!')
| 44.255102 | 168 | 0.458266 | 3,008 | 26,022 | 3.771941 | 0.120346 | 0.037017 | 0.027763 | 0.013573 | 0.335096 | 0.265556 | 0.221488 | 0.166931 | 0.12912 | 0.123832 | 0 | 0.023735 | 0.383137 | 26,022 | 587 | 169 | 44.330494 | 0.683092 | 0.106372 | 0 | 0.163212 | 0 | 0 | 0.056429 | 0.014438 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010363 | false | 0 | 0.007772 | 0 | 0.033679 | 0.025907 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1c65e273012bb93c3cad39c1fe3b14f2e4cc5f | 7,465 | py | Python | tests/osquery/osquery_load.py | synthetic-intelligence/zentral | 774104cea90b7f3d6f2aac655859c1b1f034f8dd | [
"Apache-2.0"
] | null | null | null | tests/osquery/osquery_load.py | synthetic-intelligence/zentral | 774104cea90b7f3d6f2aac655859c1b1f034f8dd | [
"Apache-2.0"
] | null | null | null | tests/osquery/osquery_load.py | synthetic-intelligence/zentral | 774104cea90b7f3d6f2aac655859c1b1f034f8dd | [
"Apache-2.0"
] | null | null | null | import random
import requests
import string
import uuid as uuid_mod
SYSTEM_INFOS = [
{'cpu_brand': 'Apple M1',
'cpu_logical_cores': '8',
'cpu_physical_cores': '8',
'cpu_subtype': 'ARM64E',
'cpu_type': 'arm64e',
'hardware_model': 'MacBook Air (M1, 2020)',
'physical_memory': '17179869184'},
{'cpu_brand': 'Intel(R) Core(TM) i7-1068NG7 CPU @ 2.30GHz',
'cpu_logical_cores': '8',
'cpu_physical_cores': '4',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookPro16,2',
'physical_memory': '17179869184'},
{'cpu_brand': 'Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz',
'cpu_logical_cores': '16',
'cpu_physical_cores': '8',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookPro16,1',
'physical_memory': '34359738368'},
{'cpu_brand': 'Intel(R) Core(TM) i5-8210Y CPU @ 1.60GHz',
'cpu_logical_cores': '4',
'cpu_physical_cores': '2',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookAir8,2',
'physical_memory': '17179869184'},
{'cpu_brand': 'Intel(R) Core(TM) i5-1030NG7 CPU @ 1.10GHz',
'cpu_logical_cores': '8',
'cpu_physical_cores': '4',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookAir9,1',
'physical_memory': '17179869184'},
{'cpu_brand': 'Intel(R) Core(TM) i5-8279U CPU @ 2.40GHz',
'cpu_logical_cores': '8',
'cpu_physical_cores': '4',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookPro15,2',
'physical_memory': '17179869184'},
{'cpu_brand': 'Intel(R) Core(TM) i5-8259U CPU @ 2.30GHz',
'cpu_logical_cores': '8',
'cpu_physical_cores': '4',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookPro15,2',
'physical_memory': '17179869184'},
{'cpu_brand': 'Apple M1 Max',
'cpu_logical_cores': '10',
'cpu_physical_cores': '10',
'cpu_subtype': 'ARM64E',
'cpu_type': 'arm64e',
'hardware_model': 'MacBook Pro (16-inch, 2021)',
'physical_memory': '68719476736'},
{'cpu_brand': 'Apple M1 Pro',
'cpu_logical_cores': '8',
'cpu_physical_cores': '8',
'cpu_subtype': 'ARM64E',
'cpu_type': 'arm64e',
'hardware_model': 'MacBook Pro (14-inch, 2021)',
'physical_memory': '34359738368'},
{'cpu_brand': 'Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz',
'cpu_logical_cores': '12',
'cpu_physical_cores': '6',
'cpu_subtype': 'Intel x86-64h Haswell',
'cpu_type': 'x86_64h',
'hardware_model': 'MacBookPro15,1',
'physical_memory': '34359738368'},
]
OS_VERSIONS = [
(12, 3, 1, "macOS", "21E258"),
(12, 2, 1, "macOS", "21D62"),
(11, 6, 5, "macOS", "20G527"),
(11, 6, 0, "macOS", "20G165"),
]
FIREFOX_CHOICES = [
("10022.4.19", "100.0"),
("9922.4.11", "99.0.1"),
("9922.3.30", "99.0"),
]
def make_random_word_function():
with open("/usr/share/dict/words", "r", encoding="utf-8") as f:
word_list = list(set(w.strip().lower() for w in f.readlines() if w.strip() and len(w) > 3))
def random_word_function():
return random.choice(word_list)
return random_word_function
def random_serial_number(prefix=""):
return (prefix
+ random.choice(string.ascii_uppercase)
+ "".join(random.sample(string.ascii_uppercase + string.digits, max(0, 9 - len(prefix))))
+ "".join(random.sample(string.ascii_uppercase, 2)))
def random_uuid():
return str(uuid_mod.uuid4()).upper()
def random_os_version(cpu_type):
choices = OS_VERSIONS
if "arm" in cpu_type.lower():
choices = [t for t in choices if t[0] >= 12]
os_version_t = random.choice(choices)
os_version = dict(zip(("major", "minor", "patch", "name", "build"), os_version_t))
os_version["table_name"] = "os_version"
return os_version
def random_system_info(computer_name, serial_number):
system_info = random.choice(SYSTEM_INFOS)
system_info["computer_name"] = computer_name
system_info["hardware_serial"] = serial_number
system_info["table_name"] = "system_info"
return system_info
def random_firefox_version():
version, version_str = random.choice(FIREFOX_CHOICES)
return {
"bundle_id": "org.mozilla.firefox",
"bundle_name": "Firefox",
"bundle_version": version,
"bundle_version_str": version_str,
"bundle_path": "/Applications/Firefox.app",
"table_name": "apps"
}
def random_inventory_result(node_key, computer_name, serial_number, uuid, osquery_version):
system_info = random_system_info(computer_name, serial_number)
os_version = random_os_version(system_info["cpu_type"])
app = random_firefox_version()
return {
"node_key": node_key,
"log_type": "result",
"action": "snapshot",
"data": [
{"snapshot": [
system_info,
os_version,
app,
],
"hostIdentifier": serial_number,
"calendarTime": "",
"unixTime": 0,
"epoch": 0,
"counter": 0,
"numerics": False,
"name": "ztl-inv",
"decorations": {
"serial_number": serial_number,
"version": osquery_version,
}}
]
}
def enroll(base_url, enrollment_secret, computer_name, serial_number, uuid, osquery_version):
enroll_payload = {
"host_identifier": computer_name,
"enroll_secret": enrollment_secret,
"platform_type": "21",
"host_details": {"system_info": {"hardware_serial": serial_number,
"uuid": uuid},
"osquery_info": {"version": osquery_version}},
}
response = requests.post(f"{base_url}/osquery/enroll",
json=enroll_payload,
headers={'user-agent': f"osquery/{osquery_version}"})
response.raise_for_status()
return response.json()["node_key"]
def post_inventory_result(base_url, node_key, computer_name, serial_number, uuid, osquery_version):
inventory_result = random_inventory_result(node_key, computer_name, serial_number, uuid, osquery_version)
response = requests.post(f"{base_url}/osquery/log",
json=inventory_result,
headers={'user-agent': f"osquery/{osquery_version}"})
response.raise_for_status()
def iter_machines(num=10, prefix=""):
random_word_function = make_random_word_function()
for i in range(num):
yield ("-".join(random_word_function() for _ in range(3)),
random_serial_number(prefix),
random_uuid())
if __name__ == "__main__":
import sys
base_url, enrollment_secret = sys.argv[1:]
osquery_version = "5.2.2"
for computer_name, serial_number, uuid in iter_machines(2000, prefix="DEMO"):
print(computer_name, serial_number, uuid)
node_key = enroll(base_url, enrollment_secret, computer_name, serial_number, uuid, osquery_version)
print("Enrollment OK")
post_inventory_result(base_url, node_key, computer_name, serial_number, uuid, osquery_version)
print("Inventory OK")
| 34.560185 | 109 | 0.612592 | 899 | 7,465 | 4.787542 | 0.215795 | 0.047398 | 0.034851 | 0.055762 | 0.498838 | 0.485827 | 0.440753 | 0.420771 | 0.418448 | 0.367565 | 0 | 0.068651 | 0.235097 | 7,465 | 215 | 110 | 34.72093 | 0.685114 | 0 | 0 | 0.26738 | 0 | 0 | 0.339719 | 0.019156 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.026738 | 0.016043 | 0.13369 | 0.016043 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1d966244bdd8238e88c699c4861240b6a65462 | 3,155 | py | Python | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/micropython/rccar/main.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 5 | 2021-06-02T23:44:25.000Z | 2021-12-27T16:21:57.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/micropython/rccar/main.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 22 | 2021-05-31T01:33:25.000Z | 2021-10-18T18:32:39.000Z | WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/learn-python3/micropython/rccar/main.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | 3 | 2021-06-19T03:37:47.000Z | 2021-08-31T00:49:51.000Z | #!/usr/bin/env pybricks-micropython
import struct, threading
from pybricks import ev3brick as brick
from pybricks.ev3devices import (
Motor,
TouchSensor,
ColorSensor,
InfraredSensor,
UltrasonicSensor,
GyroSensor,
)
from pybricks.parameters import (
Port,
Stop,
Direction,
Button,
Color,
SoundFile,
ImageFile,
Align,
)
from pybricks.tools import print, wait, StopWatch
from pybricks.robotics import DriveBase
from devices import detectJoystick
class Robot:
def __init__(self):
self.motor = Motor(Port.B)
self.ultrasonic = UltrasonicSensor(Port.S4)
self.active = True
self.speed = 0
self.colors = [None, Color.GREEN, Color.YELLOW, Color.RED]
def setSpeed(self, acc):
if acc < 0:
self.speed = max(-3, self.speed - 1)
elif acc > 0:
self.speed = min(3, self.speed + 1)
else:
self.speed = 0
if self.speed != 0:
self.motor.run(self.speed * 90)
else:
self.motor.stop()
brick.light(self.colors[abs(self.speed)])
def inactive(self):
self.active = False
self.setSpeed(0)
brick.sound.beep()
def autoStopLoop(robot):
while robot.active:
if robot.speed > 0 and robot.ultrasonic.distance() < 200:
robot.setSpeed(0)
wait(100)
def joystickLoop(robot, eventFile):
FORMAT = "llHHI"
EVENT_SIZE = struct.calcsize(FORMAT)
with open(eventFile, "rb") as infile:
while True:
event = infile.read(EVENT_SIZE)
_, _, t, c, v = struct.unpack(FORMAT, event)
# button A, B:
if t == 1 and v == 1:
if c == 305:
# press A:
robot.setSpeed(1)
elif c == 304:
# press B:
robot.setSpeed(-1)
elif c == 307:
# press X:
return robot.inactive()
elif t == 3:
if c == 1:
# Left stick & vertical:
speed = 0
if v < 32768:
# up:
speed = 1
elif v > 32768:
# down:
speed = -1
robot.setSpeed(speed)
def buttonLoop(robot):
while True:
if not any(brick.buttons()):
wait(10)
else:
if Button.LEFT in brick.buttons():
robot.setSpeed(-1)
elif Button.RIGHT in brick.buttons():
robot.setSpeed(1)
elif Button.CENTER in brick.buttons():
robot.setSpeed(0)
elif Button.UP in brick.buttons():
return robot.inactive()
wait(500)
def main():
brick.sound.beep()
joystickEvent = detectJoystick(["Controller"])
robot = Robot()
t = threading.Thread(target=autoStopLoop, args=(robot,))
t.start()
if joystickEvent:
joystickLoop(robot, joystickEvent)
else:
buttonLoop(robot)
main()
| 25.650407 | 66 | 0.511252 | 332 | 3,155 | 4.834337 | 0.364458 | 0.050467 | 0.034891 | 0.04486 | 0.08785 | 0.047352 | 0.047352 | 0.047352 | 0 | 0 | 0 | 0.030618 | 0.389223 | 3,155 | 122 | 67 | 25.860656 | 0.802283 | 0.033914 | 0 | 0.183673 | 0 | 0 | 0.005592 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.071429 | 0 | 0.173469 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f1f0cf6edffe04fb3dbf21bea18886f3b13bfaf | 3,160 | py | Python | 0.3.7/src/SoundPrintCollector.py | RockmanZheng/Digital-Speech-Recognizer | 6cf0b9edc4d040458f5172e811fd5e266ab284f1 | [
"Apache-2.0"
] | 15 | 2017-06-13T01:14:34.000Z | 2020-12-27T14:37:24.000Z | 0.3.7/src/SoundPrintCollector.py | RockmanZheng/Digital-Speech-Recognizer | 6cf0b9edc4d040458f5172e811fd5e266ab284f1 | [
"Apache-2.0"
] | 2 | 2017-11-13T13:04:24.000Z | 2018-07-15T16:44:17.000Z | 0.3.7/src/SoundPrintCollector.py | RockmanZheng/Digital-Speech-Recognizer | 6cf0b9edc4d040458f5172e811fd5e266ab284f1 | [
"Apache-2.0"
] | 15 | 2017-08-03T07:33:22.000Z | 2022-01-23T04:16:57.000Z | # Prompt the user to utter scripts
# Record sound print into wave file
import pyaudio
import wave
import sys
import Utility
def Collect(output_file,instruction=''):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
RECORD_SECONDS = 5
if instruction != '':
raw_input(instruction)
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print("* recording. Press <crtl>+<c> to complete the recording.")
frames = []
while True:
try:
data = stream.read(CHUNK)
frames.append(data)
except KeyboardInterrupt:
break
print("* done recording.")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(output_file, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
MAIN_DIR = '../'
WAVE_FOLDER = MAIN_DIR + 'wav/'
SINGLE_FOLDER = WAVE_FOLDER + 'single/'
CONFIG_DIR = MAIN_DIR + 'config/'
DICT_DIR = MAIN_DIR + 'dict/'
# EMBEDDED_FOLDER = WAVE_FOLDER+'Embedded/'
# if len(sys.argv)<3:
if len(sys.argv) < 2:
# sys.exit("Usage: SoundPrintCollector.py <dict> <transcipts> <config>")
sys.exit("Usage: SoundPrintCollector.py <dict> <config>")
num_repeat_key = 'NUMREPEAT'
# Get configuration
# num_repeat = Utility.ParseConfig(sys.argv[3],num_repeat_key)
# num_repeat = Utility.ParseConfig(sys.argv[2],num_repeat_key)
num_repeat = Utility.ParseConfig(CONFIG_DIR + sys.argv[2] + '.conf',num_repeat_key)
if num_repeat != '':
num_repeat = int(num_repeat)
else:
num_repeat = 1
# Get word list
# dict_file = open(sys.argv[1])
# model_id = []
# words = []
# for line in dict_file:
# tokens = line.strip().split()
# words.append(tokens[1])
# model_id.append(tokens[0])
# dict_file.close()
words,model_id = Utility.GetDictionary(DICT_DIR + sys.argv[1] + '.txt')
# # Get scripts
# scripts_file = open(sys.argv[2])
# script_ids = []
# scripts = []
# for line in scripts_file:
# tokens = line.strip().split()
# script_ids.append(tokens[0])
# script = ''
# for i in range(1,len(tokens)):
# script += tokens[i]+' '
# scripts.append(script)
# num_scripts = len(scripts)
# total_num = len(words)*num_repeat+num_scripts
total_num = len(words) * num_repeat
# Collect sound print for single model
for i in range(len(words)):
for k in range(num_repeat):
total_num -= 1
print(str(total_num) + ' transcript(s) remaining.')
if words[i].find('!') > -1:
instruction = 'Press <Enter> to record background noise.\n' + words[i]
else:
instruction = 'Get ready to speek the following script and press <Enter> to start record.\n Remember to leave 3 seconds of blank before and after the utterance.' + words[i]
Collect(SINGLE_FOLDER + model_id[i] + '-' + str(k) + '.wav',instruction)
# # Collect sound print for embedded model
# for i in range(num_scripts):
# total_num -= 1
# print str(total_num)+'transcript(s) remaining.'
# instruction = 'Get ready to speek the following script and press <Enter> to start record.\n'+scripts[i]
# Collect(EMBEDDED_FOLDER+script_ids[i]+'.wav',instruction)
| 25.483871 | 175 | 0.692405 | 453 | 3,160 | 4.690949 | 0.298013 | 0.059294 | 0.015059 | 0.038118 | 0.267765 | 0.230118 | 0.176 | 0.109176 | 0.109176 | 0.109176 | 0 | 0.011338 | 0.162658 | 3,160 | 124 | 176 | 25.483871 | 0.791761 | 0.379114 | 0 | 0.033333 | 0 | 0.016667 | 0.199168 | 0.01144 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016667 | false | 0 | 0.066667 | 0 | 0.083333 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f22f0d4d5c89564faf5aad5ae6f9de88a11a4aa | 907 | py | Python | scripts/WindowsVersion/Linker.py | Lyniat/AutomatedWallpaperChanger | 76093f4f9bd20d8fdfd497f6dfbe93d22b17feac | [
"MIT"
] | null | null | null | scripts/WindowsVersion/Linker.py | Lyniat/AutomatedWallpaperChanger | 76093f4f9bd20d8fdfd497f6dfbe93d22b17feac | [
"MIT"
] | null | null | null | scripts/WindowsVersion/Linker.py | Lyniat/AutomatedWallpaperChanger | 76093f4f9bd20d8fdfd497f6dfbe93d22b17feac | [
"MIT"
] | 1 | 2021-07-19T17:32:04.000Z | 2021-07-19T17:32:04.000Z | import winshell
import os
from pathlib import Path
class Linker:
"""Creates the shortcuts for the Desktop as well as the Autostart"""
def __init__(self, install_path, autostart_path):
# Gets the paths
self.install_path = install_path
self.autostart_path = autostart_path
# Gets desktop path
self.desktop_path = Path(winshell.desktop())
# Creates shortcut for autostart
self.create_shortcut(self.autostart_path)
# Creates shortcut for desktop
self.create_shortcut(self.desktop_path)
def create_shortcut(self, path):
"""Makes a shortcut of the .exe in the specified folder"""
with winshell.shortcut(os.path.join(path, "AWC.lnk")) as link:
link.path = os.path.join(self.install_path, "Launcher.exe")
link.working_directory = self.install_path
if __name__ == "__main__":
...
| 29.258065 | 72 | 0.669239 | 115 | 907 | 5.043478 | 0.365217 | 0.094828 | 0.103448 | 0.072414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.242558 | 907 | 30 | 73 | 30.233333 | 0.84425 | 0.23043 | 0 | 0 | 0 | 0 | 0.039474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f24e0bab05f02affd8c7615a133de4f38e4ed8f | 1,428 | py | Python | baselines/bao/bao/classifier/factory.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 63 | 2021-07-01T23:40:55.000Z | 2022-03-15T21:56:57.000Z | baselines/bao/bao/classifier/factory.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 1 | 2022-03-04T11:15:55.000Z | 2022-03-28T09:33:54.000Z | baselines/bao/bao/classifier/factory.py | armancohan/flex | 2a005fd18f522d2667421f170568df1164a73c3a | [
"Apache-2.0"
] | 3 | 2021-07-31T05:06:14.000Z | 2022-02-28T12:45:06.000Z | import torch
from .nn import NN
from .proto import PROTO
from .r2d2 import R2D2
from .lrd2 import LRD2
from .mlp import MLP
from .routing import ROUTING
from ..dataset.utils import tprint
def get_classifier(ebd_dim, args):
tprint("Building classifier")
if args.classifier == 'nn':
model = NN(ebd_dim, args)
elif args.classifier == 'proto':
model = PROTO(ebd_dim, args)
elif args.classifier == 'r2d2':
model = R2D2(ebd_dim, args)
elif args.classifier == 'lrd2':
model = LRD2(ebd_dim, args)
elif args.classifier == 'routing':
model = ROUTING(ebd_dim, args)
elif args.classifier == 'mlp':
# detach top layer from rest of MLP
if args.mode == 'finetune':
top_layer = MLP.get_top_layer(args, args.n_train_class)
model = MLP(ebd_dim, args, top_layer=top_layer)
# if not finetune, train MLP as a whole
else:
model = MLP(ebd_dim, args)
else:
raise ValueError('Invalid classifier. '
'classifier can only be: nn, proto, r2d2, mlp.')
if args.snapshot != '':
# load pretrained models
tprint("Loading pretrained classifier from {}".format(
args.snapshot + '.clf'
))
model.load_state_dict(torch.load(args.snapshot + '.clf'))
if args.cuda != -1:
return model.cuda(args.cuda)
else:
return model
| 30.382979 | 73 | 0.607843 | 183 | 1,428 | 4.644809 | 0.300546 | 0.056471 | 0.094118 | 0.082353 | 0.207059 | 0.164706 | 0 | 0 | 0 | 0 | 0 | 0.014677 | 0.284314 | 1,428 | 46 | 74 | 31.043478 | 0.817025 | 0.065826 | 0 | 0.078947 | 0 | 0 | 0.121805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.210526 | 0 | 0.289474 | 0.078947 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f25539a7c0c30ccf2e316ab7f7df71a729fa981 | 2,747 | py | Python | models/gauss.py | tangym/autoapi | adc3ce02a803dd989be787ff21568231103d8625 | [
"Apache-2.0"
] | null | null | null | models/gauss.py | tangym/autoapi | adc3ce02a803dd989be787ff21568231103d8625 | [
"Apache-2.0"
] | null | null | null | models/gauss.py | tangym/autoapi | adc3ce02a803dd989be787ff21568231103d8625 | [
"Apache-2.0"
] | null | null | null | import json
import collections
import numpy as np
from scipy.stats import multivariate_normal
# import matplotlib.pyplot as plt
def parse_json_parameters(func):
def inner(*args, **kwargs):
print(args, kwargs)
args = [json.loads(value) if type(value) is str else value
for value in args]
kwargs = {key: json.loads(kwargs[key])
if type(kwargs[key]) is str else kwargs[key]
for key in kwargs}
return func(*args, **kwargs)
return inner
def parse_json(x):
try:
return json.loads(x)
except:
return x
# @parse_json_parameters
def generate_gauss_sample(mu='0', sigma='1', n='1'):
mu, sigma, n = parse_json(mu), parse_json(sigma), parse_json(n)
try:
return np.random.multivariate_normal(mu, sigma, n).tolist()
except ValueError as ve:
return np.random.normal(mu, sigma, n).tolist()
# @parse_json_parameters
def gauss_pdf(mu='0', sigma='1', x='0'):
mu, sigma, x = parse_json(mu), parse_json(sigma), parse_json(x)
return multivariate_normal(mu, sigma).pdf(x).tolist()
# def plot_dataset(class1, class2):
# for sample in class1:
# plt.plot(sample[0], sample[1], 'r.')
# for sample in class2:
# plt.plot(sample[0], sample[1], 'g.')
# plt.savefig('{}-{}.pdf'.format(len(class1), len(class2)))
# def decide(class1, class2):
# def inner(sample):
# return 2 * class1.pdf(sample) - class2.pdf(sample)
# return inner
# def plot_roc():
# with open('dataset.txt') as f:
# dataset = json.load(f)
# # Training model
# class1_samples = np.matrix(dataset['class1']['samples']).T
# class2_samples = np.matrix(dataset['class2']['samples']).T
# class1 = mnormal(mean=np.asarray(np.mean(class1_samples, axis=1)).reshape(-1), cov=np.cov(class1_samples))
# class2 = mnormal(mean=np.asarray(np.mean(class2_samples, axis=1)).reshape(-1), cov=np.cov(class2_samples))
# g = decide(class1, class2)
# # Counting TP and FP
# samples = [(g(sample), 'class1') for sample in dataset['class1']['samples']] + [(g(sample), 'class2') for sample in dataset['class2']['samples']]
# samples.sort(key=lambda e: e[0], reverse=True)
# samples = [sample[1] for sample in samples]
# tp = np.cumsum([sample=='class1' for sample in samples])
# tp = [i/len(dataset['class1']['samples']) for i in tp]
# fp = np.cumsum([sample=='class2' for sample in samples])
# fp = [i/len(dataset['class2']['samples']) for i in fp]
# # Plot
# plt.plot(fp, tp)
# plt.plot([0,1], [0,1], '--')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.savefig('roc.pdf')
| 35.217949 | 151 | 0.613396 | 387 | 2,747 | 4.284238 | 0.24031 | 0.054282 | 0.046442 | 0.03076 | 0.228589 | 0.131484 | 0.074789 | 0.074789 | 0 | 0 | 0 | 0.023843 | 0.221332 | 2,747 | 77 | 152 | 35.675325 | 0.751286 | 0.583182 | 0 | 0.071429 | 0 | 0 | 0.005435 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.178571 | false | 0 | 0.142857 | 0 | 0.571429 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f25a23bcc4d4f91ead0bc8861399e70bae9cc51 | 5,873 | py | Python | dxm/lib/DxAlgorithm/DxAlgorithm.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 5 | 2018-08-23T15:47:05.000Z | 2022-01-19T23:38:18.000Z | dxm/lib/DxAlgorithm/DxAlgorithm.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 59 | 2018-10-15T10:37:00.000Z | 2022-03-22T20:49:25.000Z | dxm/lib/DxAlgorithm/DxAlgorithm.py | experiortec/dxm-toolkit | b2ab6189e163c62fa8d7251cd533d2a36430d44a | [
"Apache-2.0"
] | 12 | 2019-03-08T19:59:13.000Z | 2021-12-16T03:28:04.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
import logging
import pickle
import json
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.masking_api.api.sync_api import SyncApi
from dxm.lib.masking_api.rest import ApiException
from dxm.lib.masking_api.genericmodel import GenericModel
class DxAlgorithm(object):
swagger_types = {
'algorithm_name': 'str',
'algorithm_type': 'str',
'created_by': 'str',
'description': 'str',
'algorithm_extension': 'dict',
'framework_id': 'int',
'plugin_id': 'int',
'fields': 'dict'
}
swagger_map = {
'algorithm_name': 'algorithmName',
'algorithm_type': 'algorithmType',
'created_by': 'createdBy',
'description': 'description',
'algorithm_extension': 'algorithmExtension',
'framework_id': 'frameworkId',
'plugin_id': 'pluginId',
'fields' : 'fields'
}
def __init__(self, engine):
"""
Constructor
:param engine: DxMaskingEngine object
"""
#Algorithm.__init__(self)
self.__engine = engine
self.__logger = logging.getLogger()
self.__domain_name = None
self.__sync = None
self.__logger.debug("creating DxAlgorithm object")
self.__api = SyncApi
self.__apiexc = ApiException
self.__obj = None
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
def from_alg(self, alg):
"""
Set obj properties with a Algorithm object
:param column: Algorithm object
"""
self.__obj = alg
self.__obj.swagger_map = self.swagger_map
self.__obj.swagger_types = self.swagger_types
@property
def domain_name(self):
return self.__domain_name
@domain_name.setter
def domain_name(self, domain):
self.__domain_name = domain
@property
def sync(self):
return self.__sync
@sync.setter
def sync(self, sync):
self.__sync = sync
@property
def algorithm_name(self):
if self.obj is not None and hasattr(self.obj,'algorithm_name'):
return self.obj.algorithm_name
else:
return None
@property
def algorithm_type(self):
if self.obj is not None and hasattr(self.obj,'algorithm_type'):
return self.obj.algorithm_type
else:
return None
@property
def created_by(self):
if self.obj is not None and hasattr(self.obj,'created_by'):
return self.obj.created_by
else:
return None
@property
def description(self):
if self.obj is not None and hasattr(self.obj,'description'):
return self.obj.description
else:
return None
@property
def algorithm_extension(self):
if self.obj is not None and hasattr(self.obj,'algorithm_extension'):
return self.obj.algorithm_extension
else:
return None
@property
def framework_id(self):
if self.obj is not None and hasattr(self.obj,'framework_id'):
return self.obj.framework_id
else:
return None
@property
def plugin_id(self):
if self.obj is not None and hasattr(self.obj,'plugin_id'):
return self.obj.plugin_id
else:
return None
@property
def fields(self):
if self.obj is not None and hasattr(self.obj,'fields'):
return self.obj.fields
else:
return None
# def export(self, path=None):
# """
# Export algorithm into file
# :param path: path to save algorithm
# """
# api_sync = SyncApi(self.__engine.api_client)
# self.__logger.debug("Export input %s" % self.sync)
# export_list = []
# export_list.append(self.sync)
# api_response = api_sync.export(export_list)
# self.__logger.debug("Export response %s" % str(api_response))
#
# # binary_file = open('{0}.alg'.format(self.algorithm_name), mode='wb')
# # json.dump(api_response.blob, binary_file)
# # binary_file.close()
#
# binary_file = open('{0}.alg_bin '.format(self.algorithm_name), mode='wb')
# pickle.dump(api_response, binary_file)
# binary_file.close()
#
#
# def importalg(self, path=None):
# """
# Import algorithm from file
# :param path: path to save algorithm
# """
#
# binary_file = open('{0}.alg_bin'.format("EU_LAST_NAME"), mode='rb')
# algobj = pickle.load(binary_file)
# binary_file.close()
#
#
# api_sync = SyncApi(self.__engine.api_client)
# self.__logger.debug("Import input %s" % self.sync)
# api_response = api_sync.import_object(algobj, force_overwrite=True)
# self.__logger.debug("Import response %s" % str(api_response))
#
# # binary_file = open('{0}.alg'.format(self.algorithm_name), mode='wb')
# # json.dump(api_response.blob, binary_file)
# # binary_file.close()
| 29.074257 | 83 | 0.610761 | 703 | 5,873 | 4.903272 | 0.237553 | 0.060923 | 0.02611 | 0.033943 | 0.364665 | 0.310995 | 0.236437 | 0.195822 | 0.195822 | 0.195822 | 0 | 0.003811 | 0.285203 | 5,873 | 201 | 84 | 29.218905 | 0.817294 | 0.356887 | 0 | 0.268519 | 0 | 0 | 0.116954 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138889 | false | 0 | 0.074074 | 0.018519 | 0.425926 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f27d366d10504ccd771920410823b0f81358bab | 15,577 | py | Python | tfsnippet/variational/estimators.py | QianLiGui/tfsnippet | 63adaf04d2ffff8dec299623627d55d4bacac598 | [
"MIT"
] | 63 | 2018-06-06T11:56:40.000Z | 2022-03-22T08:00:59.000Z | tfsnippet/variational/estimators.py | QianLiGui/tfsnippet | 63adaf04d2ffff8dec299623627d55d4bacac598 | [
"MIT"
] | 39 | 2018-07-04T12:40:53.000Z | 2022-02-09T23:48:44.000Z | tfsnippet/variational/estimators.py | QianLiGui/tfsnippet | 63adaf04d2ffff8dec299623627d55d4bacac598 | [
"MIT"
] | 34 | 2018-06-25T09:59:22.000Z | 2022-02-23T12:46:33.000Z | from contextlib import contextmanager
import tensorflow as tf
from tfsnippet.ops import log_mean_exp, convert_to_tensor_and_cast
from tfsnippet.utils import (add_name_arg_doc, get_static_shape,
get_dimension_size, is_tensor_object, assert_deps)
from .utils import _require_multi_samples
__all__ = [
'sgvb_estimator', 'iwae_estimator', 'nvil_estimator', 'vimco_estimator',
]
@add_name_arg_doc
def sgvb_estimator(values, axis=None, keepdims=False, name=None):
"""
Derive the gradient estimator for
:math:`\\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})}\\big[f(\\mathbf{x},\\mathbf{z})\\big]`,
by SGVB (Kingma, D.P. and Welling, M., 2013) algorithm.
.. math::
\\nabla \\, \\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})}\\big[f(\\mathbf{x},\\mathbf{z})\\big] = \\nabla \\, \\mathbb{E}_{q(\\mathbf{\\epsilon})}\\big[f(\\mathbf{x},\\mathbf{z}(\\mathbf{\\epsilon}))\\big] = \\mathbb{E}_{q(\\mathbf{\\epsilon})}\\big[\\nabla f(\\mathbf{x},\\mathbf{z}(\\mathbf{\\epsilon}))\\big]
Args:
values: Values of the target function given `z` and `x`, i.e.,
:math:`f(\\mathbf{z},\\mathbf{x})`.
axis: The sampling axes to be reduced in outputs.
If not specified, no axis will be reduced.
keepdims (bool): When `axis` is specified, whether or not to keep
the reduced axes? (default :obj:`False`)
Returns:
tf.Tensor: The surrogate for optimizing the original target.
Maximizing/minimizing this surrogate via gradient descent will
effectively maximize/minimize the original target.
"""
values = tf.convert_to_tensor(values)
with tf.name_scope(name, default_name='sgvb_estimator', values=[values]):
estimator = values
if axis is not None:
estimator = tf.reduce_mean(estimator, axis=axis, keepdims=keepdims)
return estimator
@add_name_arg_doc
def iwae_estimator(log_values, axis, keepdims=False, name=None):
"""
Derive the gradient estimator for
:math:`\\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\Big[\\log \\frac{1}{K} \\sum_{k=1}^K f\\big(\\mathbf{x},\\mathbf{z}^{(k)}\\big)\\Big]`,
by IWAE (Burda, Y., Grosse, R. and Salakhutdinov, R., 2015) algorithm.
.. math::
\\begin{aligned}
&\\nabla\\,\\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\Big[\\log \\frac{1}{K} \\sum_{k=1}^K f\\big(\\mathbf{x},\\mathbf{z}^{(k)}\\big)\\Big]
= \\nabla \\, \\mathbb{E}_{q(\\mathbf{\\epsilon}^{(1:K)})}\\Bigg[\\log \\frac{1}{K} \\sum_{k=1}^K w_k\\Bigg]
= \\mathbb{E}_{q(\\mathbf{\\epsilon}^{(1:K)})}\\Bigg[\\nabla \\log \\frac{1}{K} \\sum_{k=1}^K w_k\\Bigg] = \\\\
& \\quad \\mathbb{E}_{q(\\mathbf{\\epsilon}^{(1:K)})}\\Bigg[\\frac{\\nabla \\frac{1}{K} \\sum_{k=1}^K w_k}{\\frac{1}{K} \\sum_{i=1}^K w_i}\\Bigg]
= \\mathbb{E}_{q(\\mathbf{\\epsilon}^{(1:K)})}\\Bigg[\\frac{\\sum_{k=1}^K w_k \\nabla \\log w_k}{\\sum_{i=1}^K w_i}\\Bigg]
= \\mathbb{E}_{q(\\mathbf{\\epsilon}^{(1:K)})}\\Bigg[\\sum_{k=1}^K \\widetilde{w}_k \\nabla \\log w_k\\Bigg]
\\end{aligned}
Args:
log_values: Log values of the target function given `z` and `x`, i.e.,
:math:`\\log f(\\mathbf{z},\\mathbf{x})`.
axis: The sampling axes to be reduced in outputs.
keepdims (bool): When `axis` is specified, whether or not to keep
the reduced axes? (default :obj:`False`)
Returns:
tf.Tensor: The surrogate for optimizing the original target.
Maximizing/minimizing this surrogate via gradient descent will
effectively maximize/minimize the original target.
"""
_require_multi_samples(axis, 'iwae estimator')
log_values = tf.convert_to_tensor(log_values)
with tf.name_scope(name, default_name='iwae_estimator',
values=[log_values]):
estimator = log_mean_exp(log_values, axis=axis, keepdims=keepdims)
return estimator
@add_name_arg_doc
def nvil_estimator(values, latent_log_joint, baseline=None,
center_by_moving_average=True, decay=0.8,
axis=None, keepdims=False, batch_axis=None,
name=None):
"""
Derive the gradient estimator for
:math:`\\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})}\\big[f(\\mathbf{x},\\mathbf{z})\\big]`,
by NVIL (Mnih and Gregor, 2014) algorithm.
.. math::
\\begin{aligned}
\\nabla \\, \\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})} \\big[f(\\mathbf{x},\\mathbf{z})\\big]
&= \\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})}\\Big[
\\nabla f(\\mathbf{x},\\mathbf{z}) + f(\\mathbf{x},\\mathbf{z})\\,\\nabla\\log q(\\mathbf{z}|\\mathbf{x})\\Big] \\\\
&= \\mathbb{E}_{q(\\mathbf{z}|\\mathbf{x})}\\Big[
\\nabla f(\\mathbf{x},\\mathbf{z}) + \\big(f(\\mathbf{x},\\mathbf{z}) - C_{\\psi}(\\mathbf{x})-c\\big)\\,\\nabla\\log q(\\mathbf{z}|\\mathbf{x})\\Big]
\\end{aligned}
where :math:`C_{\\psi}(\\mathbf{x})` is a learnable network with parameter
:math:`\\psi`, and `c` is a learnable constant. They would be learnt by
minimizing :math:`\\mathbb{E}_{ q(\\mathbf{z}|\\mathbf{x}) }\\Big[\\big(f(\\mathbf{x},\\mathbf{z}) - C_{\\psi}(\\mathbf{x})-c\\big)^2 \\Big]`.
Args:
values: Values of the target function given `z` and `x`, i.e.,
:math:`f(\\mathbf{z},\\mathbf{x})`.
latent_log_joint: Values of :math:`\\log q(\\mathbf{z}|\\mathbf{x})`.
baseline: Values of the baseline function :math:`C_{\\psi}(\\mathbf{x})`
given input `x`. If this is not specified, then this method will
degenerate to the REINFORCE algorithm, with only a moving
average estimated constant baseline `c`.
center_by_moving_average (bool): Whether or not to use the moving
average to maintain an estimation of `c` in above equations?
decay: The decaying factor for moving average.
axis: The sampling axes to be reduced in outputs.
If not specified, no axis will be reduced.
keepdims (bool): When `axis` is specified, whether or not to keep
the reduced axes? (default :obj:`False`)
batch_axis: The batch axes to be reduced when computing
expectation over `x`. If not specified, all axes will be
treated as batch axes, except the sampling axes.
Returns:
(tf.Tensor, tf.Tensor): The `(surrogate, baseline cost)`.
`surrogate` is the surrogate for optimizing the original target.
Maximizing/minimizing this surrogate via gradient descent will
effectively maximize/minimize the original target.
`baseline cost` is the cost to be minimized for training baseline.
It will be :obj:`None` if `baseline` is :obj:`None`.
"""
if baseline is None and not center_by_moving_average:
raise ValueError('`baseline` is not specified, thus '
'`center_by_moving_average` must be False.')
values = tf.convert_to_tensor(values) # f(x,z)
latent_log_joint = tf.convert_to_tensor(latent_log_joint) # log q(z|x)
if baseline is not None:
baseline = tf.convert_to_tensor(baseline)
dtype = values.dtype
@contextmanager
def mk_scope():
if center_by_moving_average:
with tf.variable_scope(None, default_name=name or 'nvil_estimator'):
yield
else:
ns_values = [values, latent_log_joint]
if baseline is not None:
ns_values += [baseline]
with tf.name_scope(name or 'nvil_estimator', values=ns_values):
yield
with mk_scope():
l_signal = values
baseline_cost = None
# compute the baseline cost
if baseline is not None:
# baseline_cost = E[(f(x,z)-C(x)-c)^2]
with tf.name_scope('baseline_cost'):
baseline_cost = tf.square(
tf.stop_gradient(l_signal) - baseline)
if axis is not None:
baseline_cost = tf.reduce_mean(
baseline_cost, axis, keepdims=keepdims)
l_signal = l_signal - baseline
# estimate `c` by moving average
if center_by_moving_average:
with tf.name_scope('center_by_moving_average'):
batch_center = tf.reduce_mean(
l_signal, axis=batch_axis, keepdims=True)
moving_mean_shape = get_static_shape(batch_center)
if None in moving_mean_shape:
raise ValueError(
'The shape of `values` after `batch_axis` having been '
'reduced must be static: values {}, batch_axis {}'.
format(values, batch_axis)
)
moving_mean = tf.get_variable(
'moving_mean', shape=moving_mean_shape,
initializer=tf.constant_initializer(0.),
trainable=False, dtype=dtype
)
decay = convert_to_tensor_and_cast(1. - decay, dtype)
moving_mean = moving_mean.assign(
moving_mean - (moving_mean - batch_center) * decay)
l_signal = l_signal - moving_mean
# compute the nvil cost
with tf.name_scope('cost'):
cost = tf.stop_gradient(l_signal) * latent_log_joint + values
if axis is not None:
cost = tf.reduce_mean(cost, axis, keepdims=keepdims)
return cost, baseline_cost
def _vimco_replace_diag(x, y, axis):
assert(isinstance(axis, int))
assert(get_static_shape(x) is not None)
assert(get_static_shape(y) is not None)
rank = len(get_static_shape(x))
assert(rank >= 2)
assert(len(get_static_shape(y)) == rank)
assert(-rank <= axis < -1)
k = get_static_shape(x)[axis]
assert(get_static_shape(x)[axis + 1] == k)
assert(get_static_shape(y)[axis] == k)
assert(get_static_shape(y)[axis + 1] == 1)
if k is None:
k = tf.shape(x)[axis]
diag_mask = tf.reshape(
tf.eye(k, k, dtype=x.dtype),
tf.stack([1] * (rank + axis) + [k, k] + [1] * (-axis - 2), axis=0)
)
return x * (1 - diag_mask) + y * diag_mask
def _vimco_control_variate(log_f, axis):
assert(isinstance(axis, int))
assert(get_static_shape(log_f) is not None)
rank = len(get_static_shape(log_f))
assert(rank >= 1)
assert(-rank <= axis <= -1)
K = get_dimension_size(log_f, axis=axis)
K_f = tf.cast(K, dtype=log_f.dtype)
mean_except_k = (
(tf.reduce_mean(log_f, axis=axis, keepdims=True) - log_f / K_f) *
(K_f / (K_f - 1))
)
mean_except_k = tf.expand_dims(mean_except_k, axis=axis)
x_expand = tf.expand_dims(log_f, axis=axis - 1)
tile_rep = [1] * (rank + axis) + [K] + [1] * (-axis)
x_tiled = tf.tile(x_expand, tile_rep)
merged = _vimco_replace_diag(x_tiled, mean_except_k, axis=axis - 1)
return log_mean_exp(merged, axis=axis)
@add_name_arg_doc
def vimco_estimator(log_values, latent_log_joint, axis=None, keepdims=False,
name=None):
"""
Derive the gradient estimator for
:math:`\\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\Big[\\log \\frac{1}{K} \\sum_{k=1}^K f\\big(\\mathbf{x},\\mathbf{z}^{(k)}\\big)\\Big]`,
by VIMCO (Minh and Rezende, 2016) algorithm.
.. math::
\\begin{aligned}
&\\nabla\\,\\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\Big[\\log \\frac{1}{K} \\sum_{k=1}^K f\\big(\\mathbf{x},\\mathbf{z}^{(k)}\\big)\\Big] \\\\
&\\quad = \\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\bigg[{\\sum_{k=1}^K \\hat{L}(\\mathbf{z}^{(k)}|\\mathbf{z}^{(-k)}) \\, \\nabla \\log q(\\mathbf{z}^{(k)}|\\mathbf{x})}\\bigg] +
\\mathbb{E}_{q(\\mathbf{z}^{(1:K)}|\\mathbf{x})}\\bigg[{\\sum_{k=1}^K \\widetilde{w}_k\\,\\nabla\\log f(\\mathbf{x},\\mathbf{z}^{(k)})}\\bigg]
\\end{aligned}
where :math:`w_k = f\\big(\\mathbf{x},\\mathbf{z}^{(k)}\\big)$, $\\widetilde{w}_k = w_k / \\sum_{i=1}^K w_i`, and:
.. math::
\\begin{aligned}
\\hat{L}(\\mathbf{z}^{(k)}|\\mathbf{z}^{(-k)})
&= \\hat{L}(\\mathbf{z}^{(1:K)}) - \\log \\frac{1}{K} \\bigg(\\hat{f}(\\mathbf{x},\\mathbf{z}^{(-k)})+\\sum_{i \\neq k} f(\\mathbf{x},\\mathbf{z}^{(i)})\\bigg) \\\\
\\hat{L}(\\mathbf{z}^{(1:K)}) &= \\log \\frac{1}{K} \\sum_{k=1}^K f(\\mathbf{x},\\mathbf{z}^{(k)}) \\\\
\\hat{f}(\\mathbf{x},\\mathbf{z}^{(-k)}) &= \\exp\\big(\\frac{1}{K-1} \\sum_{i \\neq k} \\log f(\\mathbf{x},\\mathbf{z}^{(i)})\\big)
\\end{aligned}
Args:
log_values: Log values of the target function given `z` and `x`, i.e.,
:math:`\\log f(\\mathbf{z},\\mathbf{x})`.
latent_log_joint: Values of :math:`\\log q(\\mathbf{z}|\\mathbf{x})`.
axis: The sampling axes to be reduced in outputs.
keepdims (bool): When `axis` is specified, whether or not to keep
the reduced axes? (default :obj:`False`)
Returns:
tf.Tensor: The surrogate for optimizing the original target.
Maximizing/minimizing this surrogate via gradient descent will
effectively maximize/minimize the original target.
"""
_require_multi_samples(axis, 'vimco_estimator')
# check axis and rank
if get_static_shape(log_values) is None:
raise ValueError('vimco_estimator only supports `log_values` with '
'deterministic ndims.')
rank = len(get_static_shape(log_values))
try:
axis = int(axis)
except TypeError:
raise TypeError('vimco_estimator only supports integer `axis`: '
'got {!r}'.format(axis))
if not (-rank <= axis < rank):
raise ValueError('`axis` out of range: rank {} vs axis {}'.
format(rank, axis))
# prepare for the computation
log_values = tf.convert_to_tensor(log_values) # log f(x,z)
latent_log_joint = tf.convert_to_tensor(latent_log_joint) # log q(z|x)
with tf.name_scope(name, default_name='vimco_estimator',
values=[log_values, latent_log_joint]):
# check whether or not the sampling axis has more than 1 sample
sample_size = get_dimension_size(log_values, axis=axis)
err_msg = ('VIMCO requires sample size >= 2: '
'sample axis is {}'.format(axis))
if is_tensor_object(sample_size):
with assert_deps([
tf.assert_greater_equal(
sample_size, 2,
message=err_msg
)
]):
log_values = tf.identity(log_values)
else:
if sample_size < 2:
raise ValueError(err_msg)
# the variance reduction term
if axis >= 0:
axis -= rank
control_variate = _vimco_control_variate(log_values, axis=axis)
# the final estimator
true_term = log_mean_exp(log_values, axis=axis, keepdims=True)
fake_term = tf.reduce_sum(
latent_log_joint * tf.stop_gradient(true_term - control_variate),
axis=axis,
keepdims=keepdims
)
if not keepdims:
true_term = tf.squeeze(true_term, axis=axis)
estimator = true_term + fake_term
return estimator
| 44.002825 | 314 | 0.577775 | 2,134 | 15,577 | 4.049203 | 0.118088 | 0.040505 | 0.033098 | 0.035644 | 0.544034 | 0.489642 | 0.449485 | 0.424372 | 0.373915 | 0.347413 | 0 | 0.007254 | 0.256596 | 15,577 | 353 | 315 | 44.127479 | 0.738946 | 0.46742 | 0 | 0.171598 | 0 | 0 | 0.078123 | 0.006554 | 0 | 0 | 0 | 0 | 0.094675 | 1 | 0.04142 | false | 0 | 0.029586 | 0 | 0.106509 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f283edba2f0f21c8d22c3a228847a20f6f6b797 | 438 | py | Python | code_all/day06/demo06.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day06/demo06.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day06/demo06.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | """
字典dict基本操作
创建
遍历
删除
"""
# 列表擅长存储单一维度的信息
# 字典擅长存储多个维度的信息
# 1. 创建
# 语法1:字典名 = {键1:值1,键2:值2}
dict_gsx = {
"name": "郭世鑫",
"age": 26,
"sex": "女"
}
dict_wz = {
"name": "王志",
"age": 22,
"sex": "男"
}
dict_llt = {
"name": "刘兰涛",
"age": 25,
"sex": "女"
}
# 语法2:字典名 = {键1:值1,键2:值2}
# 对于可迭代对象元素的格式要求:一分为二
list01 = ["悟空", ("猪", "八戒"), ["唐", "三藏"]]
dict02 = dict(list01)
print(dict02)
| 14.129032 | 41 | 0.461187 | 57 | 438 | 3.491228 | 0.684211 | 0.050251 | 0.070352 | 0.090452 | 0.110553 | 0 | 0 | 0 | 0 | 0 | 0 | 0.080645 | 0.292237 | 438 | 30 | 42 | 14.6 | 0.56129 | 0.305936 | 0 | 0.111111 | 0 | 0 | 0.178182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f29cb0ce214325b80d6f5c9cbf743c296c6bd71 | 8,279 | py | Python | tests/fullscale/viscoelasticity/nofaults-2d/axialstrain_genmaxwell_soln.py | cehanagan/pylith | cf5c1c34040460a82f79b6eb54df894ed1b1ee93 | [
"MIT"
] | 93 | 2015-01-08T16:41:22.000Z | 2022-02-25T13:40:02.000Z | tests/fullscale/viscoelasticity/nofaults-2d/axialstrain_genmaxwell_soln.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 277 | 2015-02-20T16:27:35.000Z | 2022-03-30T21:13:09.000Z | tests/fullscale/viscoelasticity/nofaults-2d/axialstrain_genmaxwell_soln.py | sloppyjuicy/pylith | ac2c1587f87e45c948638b19560813d4d5b6a9e3 | [
"MIT"
] | 71 | 2015-03-24T12:11:08.000Z | 2022-03-03T04:26:02.000Z | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/viscoelasticity/nofaults-2d/axialstrain_genmaxwell_soln.py
#
# @brief Analytical solution to axial strain relaxation problem for a generalized Maxwell viscoelastic material.
#
# 2-D axial strain solution for linear generalized Maxwell viscoelastic material.
#
# Uy=0
# ----------
# | |
# Ux=0 | | Ux=U0
# | |
# | |
# ----------
# Uy=0
#
# Dirichlet boundary conditions
# Ux(-4000,y) = 0
# Ux(+4000,y) = U0
# Uy(x,-4000) = 0
# Uy(x,+4000) = 0
#
import numpy
# Physical properties.
p_density = 2500.0
p_vs = 3464.1016
p_vp = 6000.0
p_viscosity_1 = 9.46728e17
p_viscosity_2 = 4.73364e17
p_viscosity_3 = 1.893456e18
p_shear_ratio_1 = 0.25
p_shear_ratio_2 = 0.25
p_shear_ratio_3 = 0.25
# Applied displacement.
U0 = 1.0
# Derived properties.
p_mu = p_density*p_vs*p_vs
p_lambda = p_density*p_vp*p_vp - 2.0*p_mu
p_youngs = p_mu*(3.0*p_lambda + 2.0*p_mu)/(p_lambda + p_mu)
p_poissons = 0.5*p_lambda/(p_lambda + p_mu)
p_shear_ratio_0 = 1.0 - p_shear_ratio_1 - p_shear_ratio_2 - p_shear_ratio_3
p_tau_1 = p_viscosity_1/(p_mu*p_shear_ratio_1)
p_tau_2 = p_viscosity_2/(p_mu*p_shear_ratio_2)
p_tau_3 = p_viscosity_3/(p_mu*p_shear_ratio_3)
# Time information.
year = 60.0*60.0*24.0*365.25
dt = 0.025*year
startTime = dt
endTime = 1.0*year
numSteps = 40
timeArray = numpy.linspace(startTime, endTime, num=numSteps, dtype=numpy.float64)
# Uniform strain field (plane strain).
e0 = U0/8000.0
exx = e0*numpy.ones(numSteps, dtype=numpy.float64)
eyy = numpy.zeros(numSteps, dtype=numpy.float64)
ezz = numpy.zeros(numSteps, dtype=numpy.float64)
exy = numpy.zeros(numSteps, dtype=numpy.float64)
# Deviatoric strains.
eMean = (exx + eyy + ezz)/3.0
eDevxx = exx - eMean
eDevyy = eyy - eMean
eDevzz = ezz - eMean
eDevxy = exy
# Deviatoric stresses.
timeFac1 = numpy.exp(-timeArray/p_tau_1)
timeFac2 = numpy.exp(-timeArray/p_tau_2)
timeFac3 = numpy.exp(-timeArray/p_tau_3)
sDevxx = 2.0*p_mu*eDevxx*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)
sDevyy = 2.0*p_mu*eDevyy*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)
sDevzz = 2.0*p_mu*eDevzz*(p_shear_ratio_0 + p_shear_ratio_1*timeFac1 + p_shear_ratio_2*timeFac2 + p_shear_ratio_3*timeFac3)
sDevxy = numpy.zeros_like(sDevxx)
# Total stresses.
sMean = e0*(3.0*p_lambda + 2.0*p_mu)/3.0
sxx = sDevxx + sMean
syy = sDevyy + sMean
szz = sDevzz + sMean
sxy = sDevxy
# Get viscous strains from initial deviatoric strains (strain rate = 0).
eVisxx_1 = eDevxx*timeFac1
eVisyy_1 = eDevyy*timeFac1
eViszz_1 = eDevzz*timeFac1
eVisxy_1 = eDevxy
eVisxx_2 = eDevxx*timeFac2
eVisyy_2 = eDevyy*timeFac2
eViszz_2 = eDevzz*timeFac2
eVisxy_2 = eDevxy
eVisxx_3 = eDevxx*timeFac3
eVisyy_3 = eDevyy*timeFac3
eViszz_3 = eDevzz*timeFac3
eVisxy_3 = eDevxy
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""Analytical solution to axial extension problem.
"""
SPACE_DIM = 2
TENSOR_SIZE = 4
def __init__(self):
self.fields = {
"displacement": self.displacement,
"density": self.density,
"shear_modulus": self.shear_modulus,
"bulk_modulus": self.bulk_modulus,
"shear_modulus_ratio": self.shear_modulus_ratio,
"maxwell_time": self.maxwell_time,
"cauchy_strain": self.strain,
"cauchy_stress": self.stress,
"viscous_strain": self.viscous_strain,
"initial_amplitude": self.initial_displacement,
}
return
def getField(self, name, mesh_entity, pts):
field = self.fields[name](pts)
return field
def displacement(self, locs):
"""Compute displacement field at locations.
"""
(npts, dim) = locs.shape
disp = numpy.zeros((numSteps, npts, self.SPACE_DIM), dtype=numpy.float64)
disp[:,:, 0] = numpy.dot(exx.reshape(numSteps, 1), (locs[:, 0] + 4000.0).reshape(1, npts))
return disp
def initial_displacement(self, locs):
"""Compute initial displacement field at locations.
"""
(npts, dim) = locs.shape
disp = numpy.zeros((1, npts, self.SPACE_DIM), dtype=numpy.float64)
disp[0,:, 0] = e0*(locs[:, 0] + 4000.0).reshape(1, npts)
return disp
def density(self, locs):
"""Compute density field at locations.
"""
(npts, dim) = locs.shape
density = p_density * numpy.ones((1, npts, 1), dtype=numpy.float64)
return density
def shear_modulus(self, locs):
"""Compute shear modulus field at locations.
"""
(npts, dim) = locs.shape
shear_modulus = p_mu * numpy.ones((1, npts, 1), dtype=numpy.float64)
return shear_modulus
def bulk_modulus(self, locs):
"""Compute bulk modulus field at locations.
"""
(npts, dim) = locs.shape
bulk_modulus = (p_lambda + 2.0 / 3.0 * p_mu) * numpy.ones((1, npts, 1), dtype=numpy.float64)
return bulk_modulus
def maxwell_time(self, locs):
"""Compute Maxwell time field at locations.
"""
(npts, dim) = locs.shape
maxwell_time = numpy.zeros((1, npts, 3), dtype=numpy.float64)
maxwell_time[0,:, 0] = p_tau_1
maxwell_time[0,:, 1] = p_tau_2
maxwell_time[0,:, 2] = p_tau_3
return maxwell_time
def shear_modulus_ratio(self, locs):
"""Compute shear modulus ratio field at locations.
"""
(npts, dim) = locs.shape
shear_modulus_ratio = numpy.zeros((1, npts, 3), dtype=numpy.float64)
shear_modulus_ratio[0,:, 0] = p_shear_ratio_1
shear_modulus_ratio[0,:, 1] = p_shear_ratio_2
shear_modulus_ratio[0,:, 2] = p_shear_ratio_3
return shear_modulus_ratio
def strain(self, locs):
"""Compute strain field at locations.
"""
(npts, dim) = locs.shape
strain = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)
strain[:,:, 0] = exx.reshape(numSteps, 1)
strain[:,:, 1] = eyy.reshape(numSteps, 1)
strain[:,:, 2] = ezz.reshape(numSteps, 1)
strain[:,:, 3] = exy.reshape(numSteps, 1)
return strain
def stress(self, locs):
"""Compute stress field at locations.
"""
(npts, dim) = locs.shape
stress = numpy.zeros((numSteps, npts, self.TENSOR_SIZE), dtype=numpy.float64)
stress[:,:, 0] = sxx.reshape(numSteps, 1)
stress[:,:, 1] = syy.reshape(numSteps, 1)
stress[:,:, 2] = szz.reshape(numSteps, 1)
stress[:,:, 3] = sxy.reshape(numSteps, 1)
return stress
def viscous_strain(self, locs):
"""Compute viscous strain field at locations.
"""
(npts, dim) = locs.shape
viscous_strain = numpy.zeros((numSteps, npts, 3*self.TENSOR_SIZE), dtype=numpy.float64)
viscous_strain[:,:, 0] = eVisxx_1.reshape(numSteps, 1)
viscous_strain[:,:, 1] = eVisyy_1.reshape(numSteps, 1)
viscous_strain[:,:, 2] = eViszz_1.reshape(numSteps, 1)
viscous_strain[:,:, 3] = eVisxy_1.reshape(numSteps, 1)
viscous_strain[:,:, 4] = eVisxx_2.reshape(numSteps, 1)
viscous_strain[:,:, 5] = eVisyy_2.reshape(numSteps, 1)
viscous_strain[:,:, 6] = eViszz_2.reshape(numSteps, 1)
viscous_strain[:,:, 7] = eVisxy_2.reshape(numSteps, 1)
viscous_strain[:,:, 8] = eVisxx_3.reshape(numSteps, 1)
viscous_strain[:,:, 9] = eVisyy_3.reshape(numSteps, 1)
viscous_strain[:,:, 10] = eViszz_3.reshape(numSteps, 1)
viscous_strain[:,:, 11] = eVisxy_3.reshape(numSteps, 1)
return viscous_strain
# End of file
| 34.210744 | 123 | 0.628578 | 1,129 | 8,279 | 4.402126 | 0.181577 | 0.030181 | 0.055332 | 0.050905 | 0.384306 | 0.315895 | 0.222133 | 0.202817 | 0.159356 | 0.118712 | 0 | 0.054259 | 0.218625 | 8,279 | 241 | 124 | 34.352697 | 0.714021 | 0.226235 | 0 | 0.081633 | 0 | 0 | 0.020939 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.006803 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f2bc4c42f2c78df859c0e4146c39f512955ca96 | 2,486 | py | Python | scripts/tsne_embed.py | Imperssonator/afm-cnn | 67f757cb38cf595b32f768f26d4a6d646fbb1b36 | [
"MIT"
] | null | null | null | scripts/tsne_embed.py | Imperssonator/afm-cnn | 67f757cb38cf595b32f768f26d4a6d646fbb1b36 | [
"MIT"
] | null | null | null | scripts/tsne_embed.py | Imperssonator/afm-cnn | 67f757cb38cf595b32f768f26d4a6d646fbb1b36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import h5py
import click
import numpy as np
import warnings
import pandas as pd
from sklearn.decomposition import PCA, KernelPCA
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
import ntsne
# needed with slurm to see local python library under working dir
import sys
sys.path.append(os.path.join(os.getcwd(), 'code'))
#import models
#from models import Base, User, Collection, Sample, Micrograph, dbpath
#from sqlalchemy import create_engine
#from sqlalchemy.orm import sessionmaker
#
#engine = create_engine('sqlite:///data/microstructures.sqlite')
#Base.metadata.bind = engine
#DBSession = sessionmaker(bind=engine)
#db = DBSession()
def load_representations(datafile):
# grab image representations from hdf5 file
keys, features = [], []
with h5py.File(datafile, 'r') as f:
for key in f:
keys.append(key)
features.append(f[key][...])
return np.array(keys), np.array(features)
def stash_tsne_embeddings(resultsfile, keys, embeddings, perplexity):
with h5py.File(resultsfile) as f:
g = f.create_group('perplexity-{}'.format(perplexity))
for idx, key in enumerate(keys):
# add t-SNE map point for each record
g[key] = embeddings[idx]
return
@click.command()
@click.argument('datafile', type=click.Path())
@click.option('--kernel', '-k', type=click.Choice(['linear', 'chi2']), default='linear')
@click.option('--n-repeats', '-r', type=int, default=1)
@click.option('--seed', '-s', type=int, default=None)
def tsne_embed(datafile, kernel, n_repeats, seed):
# datafile = './data/full/features/vgg16_block5_conv3-vlad-32.h5'
print('working')
resultsfile = datafile.replace('features', 'tsne')
keys, features = load_representations(datafile)
if kernel == 'linear':
x_pca = PCA(n_components=50).fit_transform(features)
elif kernel == 'chi2':
gamma = -1 / np.mean(additive_chi2_kernel(features))
with warnings.catch_warnings():
warnings.simplefilter("once", DeprecationWarning)
x_pca = KernelPCA(n_components=50, kernel=chi2_kernel, gamma=gamma).fit_transform(features)
perplexity = [10, 20, 30, 40, 50, 60]
for p in perplexity:
x_tsne = ntsne.best_tsne(x_pca, perplexity=p, theta=0.1, n_repeats=n_repeats)
stash_tsne_embeddings(resultsfile, keys, x_tsne, p)
if __name__ == '__main__':
tsne_embed()
| 32.285714 | 103 | 0.685036 | 326 | 2,486 | 5.09816 | 0.43865 | 0.024067 | 0.021661 | 0.036101 | 0.040915 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018272 | 0.185438 | 2,486 | 76 | 104 | 32.710526 | 0.802469 | 0.211585 | 0 | 0 | 0 | 0 | 0.058582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065217 | false | 0 | 0.217391 | 0 | 0.326087 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f2beebf2938dcf71d6f556607fd862e60478c1e | 1,325 | py | Python | src/brewlog/forms/widgets.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 3 | 2019-03-11T04:30:06.000Z | 2020-01-26T03:21:52.000Z | src/brewlog/forms/widgets.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 23 | 2019-02-06T20:37:37.000Z | 2020-06-01T07:08:35.000Z | src/brewlog/forms/widgets.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | null | null | null | from html import escape
from markupsafe import Markup
from wtforms.compat import text_type
from wtforms.widgets.core import html_params
EMPTY_HINTS = [
('', ''),
]
def textarea_with_hints(field, **kwargs):
kwargs.setdefault('id', field.id)
hints = kwargs.pop('hints', EMPTY_HINTS)
if not EMPTY_HINTS[0] in hints:
hints = EMPTY_HINTS + hints
obj_id = kwargs['id']
hint_elem_id = f'{obj_id}_hints'
if len(hints) > 1:
hint = [f'<select {html_params(id=hint_elem_id)} class="form-control">']
for hint_value, hint_label in hints:
hint.append(
f'<option value="{escape(hint_value)}">{escape(hint_label)}</option>'
)
hint.append('</select>')
hint = Markup(''.join(hint))
else:
hint = ''
textarea = Markup(
f'<textarea {html_params(name=field.name, **kwargs)}>'
f'{escape(text_type(field._value()))}</textarea>'
)
if hint:
script = f"""
<script type="text/javascript">
$("#{hint_elem_id}").change(function() {{
var value = $(this).val();
$("#{obj_id}").val(value);
}});
</script>
"""
else:
script = ''
items = [i for i in [hint, textarea, script] if i]
return Markup('<br />'.join(items))
| 28.804348 | 85 | 0.563774 | 162 | 1,325 | 4.450617 | 0.345679 | 0.055479 | 0.041609 | 0.033287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002073 | 0.271698 | 1,325 | 45 | 86 | 29.444444 | 0.745078 | 0 | 0 | 0.04878 | 0 | 0 | 0.353962 | 0.204528 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.097561 | 0 | 0.146341 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f2df52d0ccad9f803247d4c4e720cd7ff4d0a44 | 4,466 | py | Python | buildnotifylib/preferences.py | johnjohndoe/buildnotify | ea1cec69eba6011f4a91cdf20aa4f55f28141c90 | [
"MIT"
] | 27 | 2017-11-13T09:31:22.000Z | 2022-03-09T11:00:15.000Z | buildnotifylib/preferences.py | johnjohndoe/buildnotify | ea1cec69eba6011f4a91cdf20aa4f55f28141c90 | [
"MIT"
] | 159 | 2017-09-14T16:22:51.000Z | 2022-03-21T06:01:39.000Z | buildnotifylib/preferences.py | johnjohndoe/buildnotify | ea1cec69eba6011f4a91cdf20aa4f55f28141c90 | [
"MIT"
] | 10 | 2018-06-12T02:13:18.000Z | 2021-03-12T11:04:51.000Z | from typing import Optional, List, Tuple
from PyQt5.QtCore import QStringListModel
from PyQt5.QtWidgets import QDialog, QWidget
from buildnotifylib.config import Config, Preferences
from buildnotifylib.generated.preferences_ui import Ui_Preferences
from buildnotifylib.server_configuration_dialog import ServerConfigurationDialog
class PreferencesDialog(QDialog):
def __init__(self, conf: Config, parent: QWidget = None):
QDialog.__init__(self, parent)
self.conf = conf
self.ui = Ui_Preferences()
self.ui.setupUi(self)
self.checkboxes = dict(successfulBuild=self.ui.successfulBuildsCheckbox,
brokenBuild=self.ui.brokenBuildsCheckbox, fixedBuild=self.ui.fixedBuildsCheckbox,
stillFailingBuild=self.ui.stillFailingBuildsCheckbox,
connectivityIssues=self.ui.connectivityIssuesCheckbox,
lastBuildTimeForProject=self.ui.showLastBuildTimeCheckbox)
self.set_values_from_config()
# Connect up the buttons.
self.ui.addButton.clicked.connect(self.add_server)
self.ui.removeButton.clicked.connect(self.remove_element)
self.ui.buttonBox.accepted.connect(self.accept)
self.ui.configureProjectButton.clicked.connect(self.configure_projects)
def set_values_from_config(self):
self.ui.cctrayPathList.setModel(QStringListModel(self.conf.get_urls()))
self.ui.cctrayPathList.clicked.connect(lambda _: self.item_selection_changed(True))
self.ui.cctrayPathList.doubleClicked.connect(self.configure_projects)
self.ui.removeButton.clicked.connect(lambda _: self.item_selection_changed(False))
for key, checkbox in self.checkboxes.items():
checkbox.setChecked(self.conf.get_value(str(key)))
self.ui.pollingIntervalSpinBox.setValue(self.conf.get_interval_in_seconds())
self.ui.scriptCheckbox.setChecked(self.conf.get_custom_script_enabled())
self.ui.scriptLineEdit.setText(self.conf.get_custom_script())
self.ui.sortBuildByLastBuildTime.setChecked(self.conf.get_sort_by_last_build_time())
self.ui.sortBuildByName.setChecked(self.conf.get_sort_by_name())
self.ui.showLastBuildLabelCheckbox.setChecked(self.conf.get_show_last_build_label())
def item_selection_changed(self, status):
self.ui.configureProjectButton.setEnabled(status)
def add_server(self):
server_config = ServerConfigurationDialog(None, self.conf, self).open()
if server_config is not None:
self.conf.save_server_config(server_config)
urls = self.ui.cctrayPathList.model().stringList()
urls.append(server_config.url)
self.ui.cctrayPathList.setModel(QStringListModel(urls))
def remove_element(self):
index = self.ui.cctrayPathList.selectionModel().currentIndex()
urls = self.ui.cctrayPathList.model().stringList()
urls.pop(index.row())
self.ui.cctrayPathList.setModel(QStringListModel(urls))
def configure_projects(self):
url = self.ui.cctrayPathList.selectionModel().currentIndex().data()
if not url:
return
server_config = ServerConfigurationDialog(url, self.conf, self).open()
if server_config is not None:
self.conf.save_server_config(server_config)
def get_urls(self) -> List[str]:
return [str(url) for url in self.ui.cctrayPathList.model().stringList()]
def get_interval_in_seconds(self) -> int:
return self.ui.pollingIntervalSpinBox.value()
def get_selections(self) -> List[Tuple[str, bool]]:
return [(key, checkbox.isChecked()) for (key, checkbox) in list(self.checkboxes.items())]
def open(self) -> Optional[Preferences]: # type: ignore
if self.exec_() == QDialog.Accepted:
return Preferences(
urls=self.get_urls(),
interval=self.get_interval_in_seconds(),
custom_script_text=self.ui.scriptLineEdit.text(),
custom_script_checked=self.ui.scriptCheckbox.isChecked(),
sort_by_build_time=self.ui.sortBuildByLastBuildTime.isChecked(),
sort_by_name=self.ui.sortBuildByName.isChecked(),
selections=self.get_selections(),
show_last_build_label=self.ui.showLastBuildLabelCheckbox.isChecked()
)
return None
| 47.510638 | 112 | 0.697268 | 484 | 4,466 | 6.247934 | 0.266529 | 0.071429 | 0.066138 | 0.034722 | 0.271495 | 0.15873 | 0.140873 | 0.049603 | 0.049603 | 0.049603 | 0 | 0.000563 | 0.204657 | 4,466 | 93 | 113 | 48.021505 | 0.850788 | 0.008061 | 0 | 0.106667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.08 | 0.04 | 0.306667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f30b973a7b4217f88644328ff0a2c4461e056e4 | 5,082 | py | Python | src/test/python/tranquilitybase/gcpdac/integration/solution/test_integration_solution.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 2 | 2020-04-23T16:50:26.000Z | 2021-05-09T11:30:42.000Z | src/test/python/tranquilitybase/gcpdac/integration/solution/test_integration_solution.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 156 | 2020-04-08T14:08:47.000Z | 2021-07-01T14:48:15.000Z | src/test/python/tranquilitybase/gcpdac/integration/solution/test_integration_solution.py | tranquilitybase-io/tb-gcp-dac | 1d65afced1ab7427262dcdf98ee544370201439a | [
"Apache-2.0"
] | 2 | 2020-06-24T11:19:58.000Z | 2020-06-24T13:27:22.000Z | import json
import unittest
import requests
from celery import states
from time import sleep
from unittest import TestCase
from src.test.python.tranquilitybase.gcpdac import local_test_runner
from tranquilitybase.gcpdac.integration.solution.solution_config import get_solutionId, get_payload, \
processed_environments
class UserFlowTests(unittest.TestCase):
def test_solution(self):
solutionTest_methods = SolutionTest_methods()
solution_payload = solutionTest_methods.create_solution(get_payload())
solutionTest_methods.delete_solution()
# check solution values after deleting solution
solutionTest_methods.check_values(solution_response=json.loads(solution_payload), solution_input=get_payload())
class SolutionTest_methods():
def delete_solution(self):
taskid = SolutionUtils.delete_solution_task(get_solutionId())
print("Deleting a solution")
print("Celery task id {}".format(taskid))
status = ''
max_tries = 10
try_count = 0
while status != states.SUCCESS and status != states.FAILURE:
try_count = try_count+1
if try_count >= max_tries:
break
print("Checking task {}".format(taskid))
status, payload = SolutionUtils.delete_solution_task_result(taskid)
print('Status {}'.format(status))
print('Payload {}'.format(payload))
sleep(10)
TestCase().assertEqual(states.SUCCESS, status)
def create_solution(self, solution_input):
taskid = SolutionUtils.create_solution_task(solution_input)
print("Creating a solution")
print("Celery task id {}".format(taskid))
status = ''
payload = {}
max_tries = 10
try_count = 0
while status != states.SUCCESS and status != states.FAILURE:
try_count = try_count+1
if try_count >= max_tries:
break
print("Checking task {}".format(taskid))
status, payload = SolutionUtils.create_solution_task_result(taskid)
print('Status {}'.format(status))
sleep(10)
TestCase().assertEqual(states.SUCCESS, status)
print('Payload {}'.format(payload))
return payload
def check_values(self, solution_response, solution_input):
TestCase().assertFalse("billing_account_id" in solution_response)
environment_projects = solution_response["environment_projects"]["value"]
for environment_project in environment_projects:
labels = environment_project["labels"]
self.check_common_project_labels(labels)
if 'environment' not in labels:
TestCase().fail("No environment label")
environment_label = labels['environment']
if environment_label not in processed_environments:
TestCase().fail("Invalid environment label")
workspace_project = solution_response["workspace_project"]["value"]
self.check_common_project_labels(workspace_project["labels"])
solution_folder = solution_response["solution_folder"]["value"]
display_name = solution_folder["display_name"]
TestCase().assertEqual(solution_input['name'], display_name)
def check_common_project_labels(self, labels):
if 'cost-code' not in labels:
TestCase().fail("No cost-code label")
if 'business-unit' not in labels:
TestCase().fail("No business-unit label")
if 'team' not in labels:
TestCase().fail("No team label")
class SolutionUtils:
@staticmethod
def create_solution_task(payload):
endpoint_url = f"http://{local_test_runner.houston_url()}/solution_async/"
data = json.dumps(payload, indent=4)
resp = requests.post(endpoint_url, headers=local_test_runner.headers, data=data)
resp_json = resp.json()
task_id = resp_json['taskid']
return task_id
@staticmethod
def create_solution_task_result(taskId):
endpoint_url = f"http://{local_test_runner.houston_url()}/result/create/{taskId}"
resp = requests.get(endpoint_url, headers=local_test_runner.headers)
resp_json = resp.json()
status = resp_json['status']
payload = resp_json.get('payload', None)
return status, payload
@staticmethod
def delete_solution_task(solutionId):
url = '{}/solution_async/{}'.format(local_test_runner.houston_url(), solutionId)
resp = requests.delete(url, headers=local_test_runner.headers)
resp_json = resp.json()
task_id = resp_json['taskid']
return task_id
@staticmethod
def delete_solution_task_result(taskId):
url = '{}/solution_async/result/delete/{}'.format(local_test_runner.houston_url(), taskId)
resp = requests.get(url, headers=local_test_runner.headers)
resp_json = resp.json()
status = resp_json['status']
payload = resp_json.get('payload', None)
return status, payload | 36.042553 | 119 | 0.665486 | 562 | 5,082 | 5.775801 | 0.188612 | 0.034504 | 0.04159 | 0.029575 | 0.467653 | 0.392483 | 0.342575 | 0.300062 | 0.271103 | 0.218731 | 0 | 0.003344 | 0.234947 | 5,082 | 141 | 120 | 36.042553 | 0.831533 | 0.008855 | 0 | 0.411215 | 0 | 0 | 0.121525 | 0.006751 | 0 | 0 | 0 | 0 | 0.037383 | 1 | 0.084112 | false | 0 | 0.074766 | 0 | 0.233645 | 0.093458 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f31fdc18b197872d19cefe205514ca154af965e | 447 | py | Python | PySpace/json/json1.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/json/json1.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/json/json1.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# 文件名:json1.py
"""
JSON (JavaScript Object Notation) 是一种轻量级的数据交换格式。它基于ECMAScript的一个子集。
Python3 中可以使用 json 模块来对 JSON 数据进行编解码,它包含了两个函数:
json.dumps(): 对数据进行编码。
json.loads(): 对数据进行解码。
在json的编解码过程中,python 的原始类型与json类型会相互转换,具体的转化对照如下:
"""
import json
# Python字典类型转换为JSON对象
data = {
'no':1,
'name':'Runoob',
'url':'http://www.runoob.com'
}
json_str = json.dumps(data)
print("Python原始数据:",repr(data))
print("JSON对象:",json_str)
| 17.192308 | 67 | 0.727069 | 55 | 447 | 5.872727 | 0.727273 | 0.055728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01 | 0.105145 | 447 | 25 | 68 | 17.88 | 0.7975 | 0.581655 | 0 | 0 | 0 | 0 | 0.308571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f3260b90cbd388828d1e9d27dbf8dcf71f7a00b | 9,617 | py | Python | test.py | andygikling/aiozyre | 3e5d4ecad0c35e7382d3d16c6147bdbc2d445b75 | [
"BSD-3-Clause"
] | 4 | 2020-01-07T10:23:27.000Z | 2021-03-24T08:19:33.000Z | test.py | andygikling/aiozyre | 3e5d4ecad0c35e7382d3d16c6147bdbc2d445b75 | [
"BSD-3-Clause"
] | 3 | 2020-05-24T04:45:10.000Z | 2020-09-11T18:15:56.000Z | test.py | andygikling/aiozyre | 3e5d4ecad0c35e7382d3d16c6147bdbc2d445b75 | [
"BSD-3-Clause"
] | 1 | 2020-05-23T15:05:30.000Z | 2020-05-23T15:05:30.000Z |
import faulthandler
faulthandler.enable(all_threads=True)
try:
import tracemalloc
except ImportError:
# Not available in pypy
pass
else:
tracemalloc.start()
import asyncio
import sys
import unittest
from pprint import pformat
from aiozyre import Node, Stopped
class AIOZyreTestCase(unittest.TestCase):
__slots__ = ('nodes', 'loop')
def setUp(self):
self.nodes = {}
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self) -> None:
self.loop.close()
def test_cluster(self):
self.loop.run_until_complete(self.create_cluster())
try:
self.assert_received_message('soup', event='ENTER', name='salad')
except AssertionError:
self.assert_received_message('soup', event='ENTER', name='lacroix')
self.assert_received_message('soup', event='JOIN', name='salad', group='foods')
self.assert_received_message('soup', event='JOIN', name='lacroix', group='drinks')
self.assert_received_message('soup', event='SHOUT', name='salad', group='foods',
blob=b'Hello foods from salad')
self.assert_received_message('soup', event='SHOUT', name='lacroix', group='drinks',
blob=b'Hello drinks from lacroix')
try:
self.assert_received_message('salad', event='ENTER', name='soup')
except AssertionError:
self.assert_received_message('salad', event='ENTER', name='lacroix')
self.assert_received_message('salad', event='JOIN', name='soup', group='foods')
self.assert_received_message('salad', event='JOIN', name='soup', group='drinks')
self.assert_received_message('salad', event='JOIN', name='lacroix', group='drinks')
self.assert_received_message('salad', event='SHOUT', name='soup', group='foods',
blob=b'Hello foods from soup')
try:
self.assert_received_message('lacroix', event='ENTER', name='salad')
except AssertionError:
self.assert_received_message('lacroix', event='ENTER', name='soup')
self.assert_received_message('lacroix', event='JOIN', name='salad', group='foods')
self.assert_received_message('lacroix', event='JOIN', name='soup', group='drinks')
self.assert_received_message('lacroix', event='JOIN', name='soup', group='foods')
self.assert_received_message('lacroix', event='SHOUT', name='soup', group='drinks',
blob=b'Hello drinks from soup')
self.assertEqual(self.nodes['soup']['own_groups'], {'foods', 'drinks'})
self.assertEqual(self.nodes['soup']['peer_groups'], {'foods', 'drinks'})
self.assertEqual(self.nodes['soup']['peer_header_value_types'], {'pamplemousse', 'caesar'})
self.assertEqual(self.nodes['soup']['peers'], {self.nodes['salad']['uuid'], self.nodes['lacroix']['uuid']})
self.assertEqual(self.nodes['soup']['peers_by_group'], {
'foods': {self.nodes['salad']['uuid']},
'drinks': {self.nodes['lacroix']['uuid']}
})
self.assertEqual(self.nodes['salad']['own_groups'], {'foods'})
self.assertEqual(self.nodes['salad']['peer_groups'], {'foods', 'drinks'})
self.assertEqual(self.nodes['salad']['peer_header_value_types'], {'pamplemousse', 'tomato bisque'})
self.assertEqual(self.nodes['salad']['peers'], {self.nodes['lacroix']['uuid'], self.nodes['soup']['uuid']})
self.assertEqual(self.nodes['salad']['peers_by_group'], {
'foods': {self.nodes['soup']['uuid']},
'drinks': {self.nodes['lacroix']['uuid'], self.nodes['soup']['uuid']}
})
self.assertEqual(self.nodes['lacroix']['own_groups'], {'drinks'})
self.assertEqual(self.nodes['lacroix']['peer_groups'], {'foods', 'drinks'})
self.assertEqual(self.nodes['lacroix']['peer_header_value_types'], {'tomato bisque', 'caesar'})
self.assertEqual(self.nodes['lacroix']['peers'], {self.nodes['salad']['uuid'], self.nodes['soup']['uuid']})
self.assertEqual(self.nodes['lacroix']['peers_by_group'], {
'foods': {self.nodes['salad']['uuid'], self.nodes['soup']['uuid']},
'drinks': {self.nodes['soup']['uuid']}
})
def test_start_stop(self):
self.loop.run_until_complete(self.start_stop())
self.assert_received_message('fizz', blob=b'Hello #1 from buzz')
self.assert_received_message('fizz', blob=b'Hello #2 from buzz')
def test_timeout(self):
self.loop.run_until_complete(self.timeout())
def assert_received_message(self, node_name, **kwargs):
match = False
for msg in self.nodes[node_name]['messages']:
if set(kwargs.items()).issubset(set(msg.to_dict().items())):
match = True
break
self.assertTrue(match, '%s not in %s' % (pformat(kwargs), pformat(self.nodes[node_name]['messages'])))
async def create_cluster(self):
print('starting nodes...')
soup = await self.start('soup', groups=['foods', 'drinks'], headers={'type': 'tomato bisque'})
salad = await self.start('salad', groups=['foods'], headers={'type': 'caesar'})
lacroix = await self.start('lacroix', groups=['drinks'], headers={'type': 'pamplemousse'})
print('setting up listeners...')
self.listen(soup, salad, lacroix)
print('sending messages...')
await asyncio.wait([
self.create_task(soup.shout('drinks', b'Hello drinks from soup')),
self.create_task(soup.shout('foods', b'Hello foods from soup')),
self.create_task(salad.shout('foods', b'Hello foods from salad')),
self.create_task(lacroix.shout('drinks', b'Hello drinks from lacroix')),
])
print('collecting peer data...')
await asyncio.wait([
self.create_task(self.collect_peer_info('soup')),
self.create_task(self.collect_peer_info('salad')),
self.create_task(self.collect_peer_info('lacroix')),
])
# Give nodes some time to receive the messages
print('Receiving messages...')
await asyncio.sleep(5)
print('Stopping nodes...')
await asyncio.wait([
self.create_task(self.nodes[node]['node'].stop())
for node in self.nodes
])
async def timeout(self):
fizz = await self.start('fizz')
try:
with self.assertRaises(asyncio.TimeoutError):
await fizz.recv(timeout=0)
finally:
await fizz.stop()
async def start_stop(self):
fizz = await self.start('fizz', groups=['test'])
buzz = await self.start('buzz', groups=['test'])
self.listen(fizz)
await buzz.whisper(fizz.uuid, b'Hello #1 from buzz')
# Give some time to receive messages
await asyncio.sleep(3)
await fizz.stop()
await buzz.stop()
# Restart and send a new message
await fizz.start()
await buzz.start()
self.listen(fizz)
await buzz.whisper(fizz.uuid, b'Hello #2 from buzz')
# Give some time to receive messages
await asyncio.sleep(3)
await fizz.stop()
await buzz.stop()
async def start(self, name, groups=None, headers=None) -> Node:
node = Node(
name, groups=groups, headers=headers,
endpoint='inproc://{}'.format(name),
gossip_endpoint='inproc://gossip',
# verbose=True,
evasive_timeout_ms=30000,
expired_timeout_ms=30000,
)
await node.start()
self.nodes[node.name] = {'node': node, 'messages': [], 'uuid': node.uuid}
return node
def listen(self, *nodes):
for node in nodes:
# Intentionally don't wait for these, they stop themselves
self.create_task(self._listen(node))
async def _listen(self, node):
name = node.name
print('%s: listener started' % node.name)
while True:
try:
msg = await node.recv()
except Stopped:
print('%s: listener stopped' % node.name)
break
else:
self.nodes[name]['messages'].append(msg)
async def collect_peer_info(self, name):
node = self.nodes[name]['node']
print('%s: collecting peer header values "type"...'% name)
self.nodes[name]['peer_header_value_types'] = peer_header_value_types = set()
for peer in self.nodes.values():
if peer['node'].name != name:
peer_header_value_types.add(await node.peer_header_value(peer['node'].uuid, 'type'))
print('%s: collecting peers...' % name)
self.nodes[name]['peers'] = await node.peers()
print('%s: collecting peer groups...' % name)
self.nodes[name]['peer_groups'] = await node.peer_groups()
print('%s: collecting own groups...' % name)
self.nodes[name]['own_groups'] = await node.own_groups()
print('%s: collecting peers by group...' % name)
self.nodes[name]['peers_by_group'] = peers_by_group = {}
for group in {'drinks', 'foods'}:
peers_by_group[group] = await node.peers_by_group(group)
def create_task(self, coro):
if sys.version_info[:2] >= (3, 8):
return asyncio.create_task(coro)
else:
return self.loop.create_task(coro)
if __name__ == '__main__':
unittest.main()
| 41.632035 | 115 | 0.598939 | 1,126 | 9,617 | 4.983126 | 0.143872 | 0.070576 | 0.078596 | 0.089111 | 0.558724 | 0.486366 | 0.415434 | 0.328997 | 0.202103 | 0.184637 | 0 | 0.002883 | 0.242591 | 9,617 | 230 | 116 | 41.813043 | 0.767435 | 0.024748 | 0 | 0.172043 | 0 | 0 | 0.190608 | 0.009819 | 0 | 0 | 0 | 0 | 0.22043 | 1 | 0.043011 | false | 0.005376 | 0.043011 | 0 | 0.112903 | 0.075269 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f33fdc73c452aedf8ae2eda011018c277164092 | 2,045 | py | Python | markets/_bittrex_market_generator.py | lax1089/crypto-arbitrage-trader | 7bad71d60490035568418c3bfa6291b2865ef3f0 | [
"MIT"
] | 9 | 2018-06-13T09:04:24.000Z | 2021-11-23T00:10:25.000Z | markets/_bittrex_market_generator.py | lax1089/crypto-arbitrage-trader | 7bad71d60490035568418c3bfa6291b2865ef3f0 | [
"MIT"
] | null | null | null | markets/_bittrex_market_generator.py | lax1089/crypto-arbitrage-trader | 7bad71d60490035568418c3bfa6291b2865ef3f0 | [
"MIT"
] | 1 | 2022-03-30T06:32:20.000Z | 2022-03-30T06:32:20.000Z | import urllib.request
import urllib.error
import urllib.parse
import json
from ._bittrex_base_market import BittrexBaseMarket
class BittrexMarketGenerator():
def __init__(self):
print("initialized bittrex market gen")
def get_market_json(self):
url = 'https://bittrex.com/api/v1.1/public/getmarkets'
req = urllib.request.Request(url,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"User-Agent": "curl/7.24.0 (x86_64-apple-darwin12.0)"})
res = urllib.request.urlopen(req)
market_json = json.loads(res.read().decode('utf8'))
return market_json
def get_market_summary_json(self):
url = 'https://bittrex.com/api/v1.1/public/getmarketsummaries'
req = urllib.request.Request(url,
headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"User-Agent": "curl/7.24.0 (x86_64-apple-darwin12.0)"})
res = urllib.request.urlopen(req)
market_summary_json = json.loads(res.read().decode('utf8'))
return market_summary_json
def get_markets(self, market_json):
markets = list()
for market in market_json['result']:
market_name = market['MarketName']
curr1 = market['MarketCurrency']
curr2 = market['BaseCurrency']
markets.append(BittrexBaseMarket(curr1, curr2, market_name, 0.0025, 0.0025, 0))
return markets
def update_markets(self, markets):
market_summary_json = self.get_market_summary_json()
for market in markets:
for i in market_summary_json['result']:
if i['MarketName'] == market.code:
market.bid = i['Bid']
market.ask = i['Ask']
| 35.877193 | 92 | 0.548655 | 215 | 2,045 | 5.065116 | 0.353488 | 0.071625 | 0.093664 | 0.029385 | 0.416896 | 0.416896 | 0.416896 | 0.416896 | 0.416896 | 0.339761 | 0 | 0.031641 | 0.335452 | 2,045 | 57 | 93 | 35.877193 | 0.769684 | 0 | 0 | 0.27907 | 0 | 0 | 0.203015 | 0.058291 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.116279 | 0 | 0.325581 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f39f1dd19feed7138be131f4bb62e922c66bb51 | 1,798 | py | Python | section_11_(api)/using_requests.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | 5 | 2019-10-25T20:47:22.000Z | 2021-12-07T06:37:22.000Z | section_11_(api)/using_requests.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | null | null | null | section_11_(api)/using_requests.py | govex/python-lessons | e692f48b6db008a45df0b941dee1e580f5a6c800 | [
"MIT"
] | 1 | 2021-07-20T18:56:15.000Z | 2021-07-20T18:56:15.000Z | import requests
title = raw_input("Enter your movie: ")
url = 'http://bechdeltest.com/api/v1/getMoviesByTitle?title={0}'.format(title).replace(" ","+").replace("'","'")
print(url)
response = requests.get(url).json()
print(response)
# Search for 'matrix' gives the following JSON response (this is printed at line 11):
# [
# {
# u'rating': u'3',
# u'submitterid': u'1',
# u'imdbid': u'0234215',
# u'title': u'Matrix Reloaded, The',
# u'dubious': u'0',
# u'visible': u'1',
# u'year': u'2003',
# u'date': u'2008-07-21 00:00:00',
# u'id': u'58'
# },
# {
# u'rating': u'3',
# u'submitterid': u'1',
# u'imdbid': u'0242653',
# u'title': u'Matrix Revolutions, The',
# u'dubious': u'0',
# u'visible': u'1',
# u'year': u'2003',
# u'date': u'2008-07-21 00:00:00',
# u'id': u'59'
# },
# {
# u'rating': u'1',
# u'submitterid': u'7916',
# u'imdbid': u'0303678',
# u'title': u'Armitage: Dual Matrix',
# u'dubious': u'1',
# u'visible': u'1',
# u'year': u'2002',
# u'date': u'2013-08-01 15:26:03',
# u'id': u'4429'
# },
# {
# u'rating': u'3',
# u'submitterid': u'1',
# u'imdbid': u'0133093',
# u'title': u'Matrix, The',
# u'dubious': u'0',
# u'visible': u'1',
# u'year': u'1999',
# u'date': u'2008-07-20 00:00:00',
# u'id': u'36'
# }
# ]
# Which is then looped through
for movie in response:
print(movie['title'], movie['rating'])
# And printed:
# Matrix Reloaded, The 3
# Matrix Revolutions, The 3
# Armitage: Dual Matrix 1
# Matrix, The 3
| 24.297297 | 116 | 0.471635 | 247 | 1,798 | 3.42915 | 0.319838 | 0.021251 | 0.031877 | 0.047226 | 0.323495 | 0.309327 | 0.297521 | 0.27863 | 0.27863 | 0.27863 | 0 | 0.113655 | 0.3198 | 1,798 | 73 | 117 | 24.630137 | 0.578904 | 0.765851 | 0 | 0 | 0 | 0 | 0.253406 | 0 | 0.125 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f3d0e62173f1ad9185d6e8fe2379194edc0dad5 | 3,888 | py | Python | torrents/alcazar_event_processor.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 9 | 2019-03-26T14:50:00.000Z | 2020-11-10T16:44:08.000Z | torrents/alcazar_event_processor.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 22 | 2019-03-02T23:16:13.000Z | 2022-02-27T10:36:36.000Z | torrents/alcazar_event_processor.py | 2600box/harvest | 57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd | [
"Apache-2.0"
] | 5 | 2019-04-24T00:51:30.000Z | 2020-11-06T18:31:49.000Z | import time
from itertools import chain
from Harvest.utils import get_logger
from torrents.alcazar_client import update_torrent_from_alcazar, \
create_or_update_torrent_from_alcazar
from torrents.models import Torrent, Realm, TorrentInfo
from torrents.signals import torrent_removed
logger = get_logger(__name__)
class AlcazarEventProcessor:
@classmethod
def _process_removed_events(cls, realm, removed_info_hashes):
removed_torrents_qs = Torrent.objects.filter(realm=realm, info_hash__in=removed_info_hashes)
removed_info_hashes = list(removed_torrents_qs.values_list('info_hash', flat=True))
logger.debug('Matched {} Torrent objects for deletion.'.format(len(removed_info_hashes)))
removed_torrents_qs.delete()
for removed_info_hash in removed_info_hashes:
torrent_removed.send_robust(cls, realm=realm, info_hash=removed_info_hash)
@classmethod
def _process_added_torrents(cls, realm, added_torrent_states):
# Short-circuit to avoid any queries
if not added_torrent_states:
return
info_hashes = [state['info_hash'] for state in added_torrent_states]
torrent_info_ids = {
item[0]: item[1] for item in
TorrentInfo.objects.filter(
realm=realm,
info_hash__in=info_hashes,
is_deleted=False,
).values_list('info_hash', 'id')
}
for added_state in added_torrent_states:
create_or_update_torrent_from_alcazar(
realm=realm,
torrent_info_id=torrent_info_ids.get(added_state['info_hash']),
torrent_state=added_state,
)
@classmethod
def _process_events(cls, realm, events):
cls._process_removed_events(realm, events['removed'])
updated_info_hashes = [state['info_hash'] for state in chain(events['added'], events['updated'])]
existing_torrents = {
t.info_hash: t for t in Torrent.objects.filter(realm=realm, info_hash__in=updated_info_hashes)}
added_torrents_states = []
logger.debug('Matched {} Torrent objects for updating.', len(existing_torrents))
num_updated = 0
for updated_state in chain(events['added'], events['updated']):
torrent = existing_torrents.get(updated_state['info_hash'])
if not torrent:
added_torrents_states.append(updated_state)
else:
if update_torrent_from_alcazar(torrent, updated_state):
num_updated += 1
logger.debug('Actually updated {} in DB.', num_updated)
logger.debug('Matched {} new states for adding.', len(added_torrents_states))
cls._process_added_torrents(realm, added_torrents_states)
@classmethod
def _process(cls, events):
realms = {realm.name: realm for realm in Realm.objects.all()}
for realm_name, batch in events.items():
realm = realms.get(realm_name)
if not realm:
realm, _ = Realm.objects.get_or_create(name=realm_name)
logger.debug('Processing events for realm {}.', realm_name)
cls._process_events(realm, batch)
@classmethod
def process(cls, events):
start = time.time()
logger.debug('Processing events.')
retries_remaining = 3
while True:
try:
cls._process(events)
break
except Exception:
if retries_remaining > 0:
logger.warning('Exception during alcazar event processing. Retrying.')
retries_remaining -= 1
else:
logger.exception('Exhausted event processing retries.')
raise
logger.debug('Completed alcazar update in {:.3f}.', time.time() - start)
| 38.117647 | 107 | 0.643261 | 448 | 3,888 | 5.279018 | 0.227679 | 0.043975 | 0.044397 | 0.040592 | 0.25074 | 0.207188 | 0.103171 | 0.061734 | 0 | 0 | 0 | 0.002825 | 0.271605 | 3,888 | 101 | 108 | 38.49505 | 0.832274 | 0.008745 | 0 | 0.111111 | 0 | 0 | 0.103063 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061728 | false | 0 | 0.074074 | 0 | 0.160494 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f3d91e7f95020f3e5f0281618902c8deef3aec6 | 8,452 | py | Python | Session10/Day2/dsfp_mh_mcmc.py | hsnee/LSSTC-DSFP-Sessions | 5d90992179c80efbd63e9ecc95fe0fef7a0d83c1 | [
"MIT"
] | 10 | 2016-08-01T16:47:14.000Z | 2019-11-12T10:56:55.000Z | Session10/Day2/dsfp_mh_mcmc.py | hsnee/LSSTC-DSFP-Sessions | 5d90992179c80efbd63e9ecc95fe0fef7a0d83c1 | [
"MIT"
] | 2 | 2017-04-26T16:05:10.000Z | 2019-09-06T20:15:34.000Z | Session10/Day2/dsfp_mh_mcmc.py | hsnee/LSSTC-DSFP-Sessions | 5d90992179c80efbd63e9ecc95fe0fef7a0d83c1 | [
"MIT"
] | 10 | 2017-04-21T23:38:13.000Z | 2021-06-08T04:06:35.000Z | import numpy as np
import matplotlib.pyplot as plt
def hastings_ratio(theta_1, theta_0, y, x, y_unc):
'''
Calculate the Hastings ratio
Parameters
----------
theta_1 : tuple
proposed new posterior position
theta_0 : tuple
current posterior position
y : arr-like, shape (n_samples)
Array of observational measurements
x : arr-like, shape (n_samples)
Array of positions where y is measured
y_unc : arr-like, shape (n_samples)
Array of uncertainties on y
Returns
-------
h_ratio : float
The Hastings ratio
'''
lnpost1 = lnposterior(theta_1, y_obs, x, y_unc)
lnpost0 = lnposterior(theta_0, y_obs, x, y_unc)
h_ratio = np.exp(lnpost1)/np.exp(lnpost0)
return h_ratio
def propose_jump(theta, cov):
'''
Generate a proposed new position for MCMC chain
Parameters
----------
theta : 1-D array_like, of length N
current position of the MCMC chain
cov : 1-D or 2-D array_like, of shape (N,) or (N, N)
Covariance matrix of the distribution. It must be symmetric
and positive-semidefinite for proper sampling.
1-D inputs for cov require the standard deviation along
each axis of the N-dimensional Gaussian.
Returns
-------
proposed_position : 1-D array_like, of length N
'''
if np.shape(theta) == np.shape(cov):
cov = np.diag(np.array(cov)**2)
proposed_position = np.random.multivariate_normal(theta, cov)
return proposed_position
def mh_mcmc(theta_0, cov, nsteps, y, x, y_unc):
'''
Metropolis-Hastings MCMC algorithm
Parameters
----------
theta_0 : 1-D array_like of shape N
starting position for the MCMC chain
cov : 1-D or 2-D array_like, of shape (N,) or (N, N)
Covariance matrix of the distribution. It must be symmetric
and positive-semidefinite for proper sampling.
1-D inputs for cov require the standard deviation along
each axis of the N-dimensional Gaussian.
nsteps : int
Number of steps to take in the MCMC chain
y : arr-like, shape (n_samples)
Array of observational measurements
x : arr-like, shape (n_samples)
Array of positions where y is measured
y_unc : arr-like, shape (n_samples)
Array of uncertainties on y
Returns
-------
(positions, lnpost_at_pos, acceptance_ratio) : tuple
positions : 2-D array_like of shape (nsteps+1, N)
Position of the MCMC chain at every step
lnpost_at_pos : 1-D array_like of shape nsteps+1
log-posterior value at the position of the MCMC chain
acceptance_ratio : 1-D array_like of shape nsteps+1
acceptance ratio of all previous steps in the chain
'''
positions = np.zeros((nsteps+1, len(theta_0)))
lnpost_at_pos = -np.inf*np.ones(nsteps+1)
acceptance_ratio = np.zeros_like(lnpost_at_pos)
accepted = 0
positions[0] = theta_0
lnpost_at_pos[0] = lnposterior(theta_0, y, x, y_unc)
for step_num in np.arange(1, nsteps+1):
proposal = propose_jump(positions[step_num-1], cov)
H = hastings_ratio(proposal, positions[step_num-1], y, x, y_unc)
R = np.random.uniform()
if H > R:
accepted += 1
positions[step_num] = proposal
lnpost_at_pos[step_num] = lnposterior(proposal, y, x, y_unc)
acceptance_ratio[step_num] = float(accepted)/step_num
else:
positions[step_num] = positions[step_num-1]
lnpost_at_pos[step_num] = lnpost_at_pos[step_num-1]
acceptance_ratio[step_num] = float(accepted)/step_num
return (positions, lnpost_at_pos, acceptance_ratio)
def plot_post(theta_0, cov, nsteps, y, x, y_unc):
'''
Plot posterior trace from MH MCMC
Parameters
----------
theta_0 : 1-D array_like of shape N
starting position for the MCMC chain
cov : 1-D or 2-D array_like, of shape (N,) or (N, N)
Covariance matrix of the distribution. It must be symmetric
and positive-semidefinite for proper sampling.
1-D inputs for cov require the standard deviation along
each axis of the N-dimensional Gaussian.
nsteps : int
Number of steps to take in the MCMC chain
y : arr-like, shape (n_samples)
Array of observational measurements
x : arr-like, shape (n_samples)
Array of positions where y is measured
y_unc : arr-like, shape (n_samples)
Array of uncertainties on y
'''
pos, lnpost, acc = mh_mcmc(theta_0, cov, nsteps, y_obs, x, y_unc)
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(9,4))
ax1.plot(pos[:,0], pos[:,1], 'o-', alpha=0.3)
ax1.plot(2.3, 15, '*', ms=30,
mfc='Crimson', mec='0.8', mew=2,
alpha=0.7)
ax1.set_xlabel('m', fontsize=14)
ax1.set_ylabel('b', fontsize=14)
ax2.plot(pos[:,0], pos[:,1], 'o-', alpha=0.3)
cax = ax2.scatter(pos[:,0], pos[:,1], c = lnpost, zorder=10)
ax2.plot(2.3, 15, '*', ms=30,
mfc='Crimson', mec='0.8', mew=2,
alpha=0.7, zorder=20)
ax2.set_xlabel('m', fontsize=14)
ax2.set_ylabel('b', fontsize=14)
cbar = fig.colorbar(cax)
cbar.ax.set_ylabel(r'$\log \; \pi (\theta)$', fontsize=12)
fig.tight_layout()
return
def plot_mh_summary(theta_0, cov, nsteps, y, x, y_unc):
'''
Plot the posterior, draws from the posterior, and 1-d chains
Parameters
----------
theta_0 : 1-D array_like of shape N
starting position for the MCMC chain
cov : 1-D or 2-D array_like, of shape (N,) or (N, N)
Covariance matrix of the distribution. It must be symmetric
and positive-semidefinite for proper sampling.
1-D inputs for cov require the standard deviation along
each axis of the N-dimensional Gaussian.
nsteps : int
Number of steps to take in the MCMC chain
y : arr-like, shape (n_samples)
Array of observational measurements
x : arr-like, shape (n_samples)
Array of positions where y is measured
y_unc : arr-like, shape (n_samples)
Array of uncertainties on y
'''
pos, lnpost, acc = mh_mcmc(theta_0, cov, nsteps, y_obs, x, y_unc)
fig = plt.figure(figsize=(7.5,6))
ax1 = plt.subplot2grid((4,5), (0, 0), colspan=2, rowspan=2)
ax2 = plt.subplot2grid((4,5), (2, 0), colspan=2, rowspan=2)
ax3 = plt.subplot2grid((4,5), (0, 2), colspan=3)
ax4 = plt.subplot2grid((4,5), (1, 2), colspan=3, sharex=ax3)
ax5 = plt.subplot2grid((4,5), (2, 2), colspan=3, sharex=ax3)
ax6 = plt.subplot2grid((4,5), (3, 2), colspan=3, sharex=ax3)
# posterior
ax1.hexbin(pos[:,0], pos[:,1], gridsize=50, mincnt=1, bins='log')
ax1.plot(2.3, 15, '*', ms=30,
mfc='Crimson', mec='0.8', mew=2,
alpha=0.7)
ylims = ax1.get_ylim()
xlims = ax1.get_xlim()
ax1.plot([2.3, 2.3], ylims, 'Crimson', alpha=0.3)
ax1.plot(xlims, [15, 15], 'Crimson', alpha=0.3)
ax1.set_ylim(ylims)
ax1.set_xlim(xlims)
ax1.set_xlabel('m')
ax1.set_ylabel('b')
ax1.xaxis.set_ticks_position('top')
ax1.xaxis.set_label_position('top')
ax1.tick_params(top=True, bottom=False)
# posterior draws
ax2.errorbar(x, y_obs, y_unc, fmt='o')
# ax2.plot([0,100],
# b_true + m_true*np.array([0,100]),
# '--', color='DarkOrange', lw=2, zorder=-10)
for draw in np.random.choice(len(pos), 10, replace=False):
ax2.plot([0,100], pos[draw,1] + pos[draw,0]*np.array([0,100]),
'DarkOrange', alpha=0.4)
ax2.set_xlabel('x')
ax2.set_ylabel('y')
ax3.plot(pos[:,0])
ax3.set_ylabel('m')
ax4.plot(pos[:,1])
ax4.set_ylabel('b')
ax5.plot(lnpost)
ax5.set_ylabel('$\ln \; \pi$')
ax6.plot(acc)
ax6.set_ylabel('acceptance')
ax6.set_xlabel('step number')
plt.setp(ax3.get_xticklabels(), visible=False)
plt.setp(ax4.get_xticklabels(), visible=False)
plt.setp(ax5.get_xticklabels(), visible=False)
fig.tight_layout()
fig.subplots_adjust(top=0.93, left=0.09, right=0.99, hspace=0.07, wspace=0.75) | 31.655431 | 82 | 0.602224 | 1,255 | 8,452 | 3.937849 | 0.174502 | 0.023068 | 0.029138 | 0.031566 | 0.583165 | 0.494537 | 0.461959 | 0.447794 | 0.416431 | 0.398219 | 0 | 0.043875 | 0.27461 | 8,452 | 267 | 82 | 31.655431 | 0.762192 | 0.428183 | 0 | 0.134021 | 0 | 0 | 0.031791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051546 | false | 0 | 0.020619 | 0 | 0.113402 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f3ff67dbb0740f2004b391228e54493135364ff | 8,677 | py | Python | src/hub/dataload/sources/chembl/chembl_dump.py | biothings/mychem.info | 3d0f66ae9c1e9e6fa78a32868f440d162660e2aa | [
"Apache-2.0"
] | 10 | 2017-07-24T11:45:27.000Z | 2022-02-14T13:42:36.000Z | src/hub/dataload/sources/chembl/chembl_dump.py | biothings/mychem.info | 3d0f66ae9c1e9e6fa78a32868f440d162660e2aa | [
"Apache-2.0"
] | 92 | 2017-06-22T16:49:20.000Z | 2022-03-24T20:50:01.000Z | src/hub/dataload/sources/chembl/chembl_dump.py | biothings/mychem.info | 3d0f66ae9c1e9e6fa78a32868f440d162660e2aa | [
"Apache-2.0"
] | 11 | 2017-06-12T18:31:35.000Z | 2022-01-31T02:56:52.000Z | import glob
import json
import os
import os.path
import itertools
import biothings
import config
biothings.config_for_app(config)
from config import DATA_ARCHIVE_ROOT
from biothings.hub.dataload.dumper import HTTPDumper
from biothings.utils.common import iter_n
class ChemblDumper(HTTPDumper):
SRC_NAME = "chembl"
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
SRC_VERSION_URL = "https://www.ebi.ac.uk/chembl/api/data/status.json"
"""
As the code is written, we have:
- 1,961,462 "molecule" json objects
- 5,134 "mechanism" json objects
- 37,259 "drug_indication" json objects
- 13,382 "target" json objects
- 14,342 "binding_site" json objects
"""
SRC_DATA_URLS = {
# primary data source
"molecule": "https://www.ebi.ac.uk/chembl/api/data/molecule.json",
# supplementary data sources to `molecule`
"drug_indication": "https://www.ebi.ac.uk/chembl/api/data/drug_indication.json",
"mechanism": "https://www.ebi.ac.uk/chembl/api/data/mechanism.json",
# Used to join with `mechanism` by `target_chembl_id`
"target": "https://www.ebi.ac.uk/chembl/api/data/target.json",
# used to join with `mechanism` by `site_id`
"binding_site": "https://www.ebi.ac.uk/chembl/api/data/binding_site.json"
}
SCHEDULE = "0 12 * * *"
SLEEP_BETWEEN_DOWNLOAD = 0.1
MAX_PARALLEL_DUMP = 5 # HUB_MAX_WORKERS // 2
# number of documents in each download job, i.e. number of documents in each .part* file
TO_DUMP_DOWNLOAD_SIZE = 1000
# number of .part* files to be merged together after download
POST_DUMP_MERGE_SIZE = 100
def get_total_count_of_documents(self, src_data_name):
"""
Get the total count of documents from the first page of the url specified by `src_data_name`.
`total_count` is a member of the `page_meta` member of the root json object.
Args:
src_data_name (str): must be a key to self.__class__.SRC_DATA_URLS
Returns:
int: the total count of documents
"""
if src_data_name not in self.__class__.SRC_DATA_URLS:
raise KeyError("Cannot recognize src_data_name={}. Must be one of {{{}}}".
format(src_data_name, ", ".join(self.__class__.SRC_DATA_URLS.keys())))
data = self.load_json_from_file(self.__class__.SRC_DATA_URLS[src_data_name])
return data["page_meta"]["total_count"]
def load_json_from_file(self, file) -> dict:
"""
Read the content of `file` and return the json object
Args:
file (str): could either be an URL ("remotefile") or a path to a local text file ("localfile")
Returns:
object: the json object read from the `file`
"""
"""
Note that:
- `json.loads(string)` deserializes string
- `json.load(file)` deserializes a file object
"""
if file.startswith("http://") or file.startswith("https://"): # file is an URL
data = json.loads(self.client.get(file).text)
else: # file is a local path
data = json.load(open(file))
return data
def remote_is_better(self, remotefile, localfile):
remote_data = self.load_json_from_file(remotefile)
assert "chembl_db_version" in remote_data
assert remote_data["status"] == "UP" # API is working correctly
self.release = remote_data["chembl_db_version"]
if localfile is None:
# ok we have the release, we can't compare further so we need to download
return True
local_data = self.load_json_from_file(localfile)
self.logger.info("ChEMBL DB version: remote=={}, local=={}".
format(remote_data["chembl_db_version"], local_data["chembl_db_version"]))
# comparing strings should work since it's formatted as "ChEMBL_xxx"
if remote_data["chembl_db_version"] > local_data["chembl_db_version"]:
return True
else:
return False
def create_todump_list(self, force=False, **kwargs):
version_filename = os.path.basename(self.__class__.SRC_VERSION_URL)
try:
current_localfile = os.path.join(self.current_data_folder, version_filename)
if not os.path.exists(current_localfile):
current_localfile = None
except TypeError:
# current data folder doesn't even exist
current_localfile = None
remote_better = self.remote_is_better(self.__class__.SRC_VERSION_URL, current_localfile)
self.logger.info("ChEMBL Dump: force=={}, current_localfile=={}, remote_better=={}".
format(force, current_localfile, remote_better))
if force or current_localfile is None or remote_better:
new_localfile = os.path.join(self.new_data_folder, version_filename)
self.to_dump.append({"remote": self.__class__.SRC_VERSION_URL, "local": new_localfile})
"""
Now we need to scroll the API endpoints. Let's get the total number of records
and generate URLs for each call to parallelize the downloads for each type of source data,
i.e. "molecule", "mechanism", "drug_indication", "target" and "binding_site".
The partition size is set to 1000 json objects (represented by `TO_DUMP_DOWNLOAD_SIZE`).
E.g. suppose for "molecule" data we have a `total_count` of 2500 json objects, and then we'll have,
in the process of iteration:
- (part_index, part_start) = (0, 0)
- (part_index, part_start) = (1, 1000)
- (part_index, part_start) = (2, 2000)
Therefore we would download 3 files, i.e. "molecule.part0", "molecule.part1", and "molecule.part2".
"""
part_size = self.__class__.TO_DUMP_DOWNLOAD_SIZE
for src_data_name in self.__class__.SRC_DATA_URLS:
total_count = self.get_total_count_of_documents(src_data_name)
for part_index, part_start in enumerate(range(0, total_count, part_size)):
remote = self.__class__.SRC_DATA_URLS[src_data_name] + \
"?limit=" + str(part_size) + \
"&offset=" + str(part_start)
local = os.path.join(self.new_data_folder, "{}.part{}".format(src_data_name, part_index))
self.to_dump.append({"remote": remote, "local": local})
def post_dump(self, *args, **kwargs):
"""
In the post-dump phase, for each type of source data, we merge each chunk of 100 .part* files
into one .*.json file. (This way we won't have a small number of huge files nor a pile of small files.)
E.g. as the code is written, we have 1,961,462 "molecule" json objects.
Therefore we would download 1,962 files, i.e. "molecule.part0", ..., "molecule.part1961".
For each chunk of 100 such files, e.g. "molecule.part0", ..., "molecule.part99", we merge them into one
json file, e.g. "molecule.100.json".
We'll also remove metadata (useless now)
"""
self.logger.info("Merging JSON documents in '%s'" % self.new_data_folder)
chunk_size = self.__class__.POST_DUMP_MERGE_SIZE
for src_data_name in self.__class__.SRC_DATA_URLS:
part_files = glob.iglob(os.path.join(self.new_data_folder, "{}.part*".format(src_data_name)))
for chunk, cnt in iter_n(part_files, chunk_size, with_cnt=True):
outfile = os.path.join(self.new_data_folder, "{}.{}.json".format(src_data_name, cnt))
"""
For each "molecule" json object, we only fetch the value associated with the "molecules" key.
This rule also applies to "mechanism", "drug_indication", "target" and "binding_site"
json objects.
"""
data_key = src_data_name + "s"
merged_value = itertools.chain.from_iterable(self.load_json_from_file(f)[data_key] for f in chunk)
merged_data = {data_key: list(merged_value)}
json.dump(merged_data, open(outfile, "w"))
self.logger.info("Merged %s %s files" % (src_data_name, cnt))
# now we can delete the part files
self.logger.info("Deleting part files")
part_files = glob.iglob(os.path.join(self.new_data_folder, "{}.part*".format(src_data_name)))
for f in part_files:
os.remove(f)
self.logger.info("Post-dump merge done")
| 43.169154 | 114 | 0.631094 | 1,181 | 8,677 | 4.406435 | 0.230313 | 0.033628 | 0.035934 | 0.021522 | 0.307648 | 0.20907 | 0.171407 | 0.133359 | 0.089162 | 0.089162 | 0 | 0.014579 | 0.264838 | 8,677 | 200 | 115 | 43.385 | 0.801223 | 0.190849 | 0 | 0.107527 | 0 | 0 | 0.158712 | 0.004118 | 0 | 0 | 0 | 0 | 0.021505 | 1 | 0.053763 | false | 0 | 0.107527 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f419842dbdfdc91028cbda728bbb96e7a5880e7 | 2,487 | py | Python | data/convert2json_bar.py | A-mberr/project | 60ac3455d48a45450336a50a4de79d6c11d1cb42 | [
"Unlicense"
] | null | null | null | data/convert2json_bar.py | A-mberr/project | 60ac3455d48a45450336a50a4de79d6c11d1cb42 | [
"Unlicense"
] | null | null | null | data/convert2json_bar.py | A-mberr/project | 60ac3455d48a45450336a50a4de79d6c11d1cb42 | [
"Unlicense"
] | null | null | null | # Name: Amber Nobel
# Student number: 11819359
import pandas as pd
countries_in_eu = [
'Albania', 'Andorra', 'Austria', 'Belarus', 'Belgium',
'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czechia',
'Denmark', 'Estonia', 'Finland', 'France', 'Germany', 'Greece',
'Hungary', 'Iceland', 'Ireland', 'Italy', 'Latvia', 'Liechtenstein',
'Lithuania', 'Luxembourg', 'The former Yugoslav republic of Macedonia',
'Malta', 'Republic of Moldova', 'Monaco', 'Montenegro', 'Netherlands', 'Norway',
'Poland', 'Portugal', 'Romania', 'Russia', 'San Marino',
'Serbia and Montenegro', 'Serbia', 'Slovakia', 'Slovenia', 'Spain',
'Sweden', 'Switzerland', 'Ukraine',
'United Kingdom of Great Britain and Northern Ireland'
]
codes = {
"Albania": "AL",
"Andorra": "AD",
"Austria": "AT",
"Azerbaijan": "AZ",
"Belarus": "BY",
"Belgium": "BE",
"Bosnia and Herzegovina": "BA",
"Bulgaria": "BG",
"Croatia": "HR",
"Cyprus": "CY",
"Czechia": "CZ",
"Denmark": "DK",
"Estonia": "EE",
"Finland": "FI",
"France": "FR",
"Georgia": "GE",
"Germany": "DE",
"Greece": "GR",
"Hungary": "HU",
"Iceland": "IS",
"Ireland": "IE",
"Italy": "IT",
"Kazakhstan": "KZ",
"Kosovo": "XK",
"Latvia": "LV",
"Liechtenstein": "LI",
"Lithuania": "LT",
"Luxembourg": "LU",
"The former Yugoslav republic of Macedonia": "MK",
"Malta": "MT",
"Republic of Moldova": "MD",
"Monaco": "MC",
"Montenegro": "ME",
"Netherlands": "NL",
"Norway": "NO",
"Poland": "PL",
"Portugal": "PT",
"Romania": "RO",
"Russia": "RU",
"San Marino": "SM",
"Serbia": "RS",
"Slovakia": "SK",
"Slovenia": "SI",
"Spain": "ES",
"Sweden": "SE",
"Switzerland": "CH",
"Turkey": "TR",
"Ukraine": "UA",
"United Kingdom of Great Britain and Northern Ireland": "GB",
"Vatican City": "VA"
}
vacc_types = [
"Hib",
"Pneu",
"DTP",
"Hepb",
]
for vacc_type in vacc_types:
csvFilePath = "vacc_eu_{}.csv".format(vacc_type)
df = pd.read_csv(csvFilePath, header=1)
# selects only countries from Europe
df = df[df['Country'].isin(countries_in_eu)]
# changes country name to country code
df["Country"] = df['Country'].map(codes).fillna(df['Country'])
df = df.set_index('Country')
df = df.apply(pd.to_numeric, errors='ignore')
df.to_json('vacc_bar_{}.json'.format(vacc_type), orient='index')
| 26.178947 | 84 | 0.564536 | 273 | 2,487 | 5.080586 | 0.593407 | 0.028839 | 0.018745 | 0.036049 | 0.116799 | 0.116799 | 0.064888 | 0.064888 | 0 | 0 | 0 | 0.004649 | 0.221552 | 2,487 | 94 | 85 | 26.457447 | 0.711777 | 0.045838 | 0 | 0 | 0 | 0 | 0.46875 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.012658 | 0 | 0.012658 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f429c50b35eaa47c1a01acf58c137d77a1df5c6 | 720 | py | Python | exercicio_py/ex0018_matrizes/main_v7.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0018_matrizes/main_v7.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | exercicio_py/ex0018_matrizes/main_v7.py | danielle8farias/Exercicios-Python-3 | f2fe9b6ca63536df1d83fd10162cfc04de36b830 | [
"MIT"
] | null | null | null | ########
# autora: danielle8farias@gmail.com
# repositório: https://github.com/danielle8farias
# Descrição: Dado uma matriz, o programa retorna a sua transposta.
########
import sys
sys.path.append('/home/danielle8farias/hello-world-python3/meus_modulos')
from mensagem import ler_cabecalho, criar_rodape, criar_linha
from funcoes_matriz import ler_matriz, imprimir_matriz, matriz_transposta
ler_cabecalho('matriz:')
arquivo = 'matriz_exemplo.txt'
matriz = ler_matriz(arquivo)
n_linhas = len (matriz)
n_colunas = len(matriz[0])
imprimir_matriz(matriz, n_linhas, n_colunas)
matriz_T = matriz_transposta(matriz)
ler_cabecalho('matriz transposta:')
imprimir_matriz(matriz_T, n_colunas, n_linhas)
print()
criar_rodape()
| 30 | 73 | 0.788889 | 97 | 720 | 5.608247 | 0.474227 | 0.066176 | 0.110294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007634 | 0.090278 | 720 | 23 | 74 | 31.304348 | 0.822901 | 0.204167 | 0 | 0 | 0 | 0 | 0.175407 | 0.097649 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f4429e5d4d3c544a3a02ea4f09154ef7c36601f | 6,294 | py | Python | tests/test_halo_exchange.py | BuildJet/distdl | 28b0dcf2c0a762de924cc310398a2eab9c35297f | [
"BSD-2-Clause"
] | null | null | null | tests/test_halo_exchange.py | BuildJet/distdl | 28b0dcf2c0a762de924cc310398a2eab9c35297f | [
"BSD-2-Clause"
] | null | null | null | tests/test_halo_exchange.py | BuildJet/distdl | 28b0dcf2c0a762de924cc310398a2eab9c35297f | [
"BSD-2-Clause"
] | null | null | null | import os
import numpy as np
import pytest
import torch
from adjoint_test import check_adjoint_test_tight
from distdl.nn.mixins.conv_mixin import ConvMixin
from distdl.nn.mixins.halo_mixin import HaloMixin
from distdl.nn.mixins.pooling_mixin import PoolingMixin
use_cuda = 'USE_CUDA' in os.environ
class MockConvLayer(HaloMixin, ConvMixin):
pass
class MockPoolLayer(HaloMixin, PoolingMixin):
pass
adjoint_parametrizations = []
# Main functionality
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 9), [1, 1, 3, 3], # P_x_ranks, P_x_shape
[1, 1, 10, 7], # x_global_shape
torch.float32, # dtype
[1, 1, 3, 3], # kernel_size
[1, 1, 1, 1], # stride
[0, 0, 0, 0], # padding
[1, 1, 1, 1], # dilation
MockConvLayer, # MockKernelStyle
9, # passed to comm_split_fixture, required MPI ranks
id="conv-same_padding-float32",
marks=[pytest.mark.mpi(min_size=9)]
)
)
# Main functionality
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 9), [1, 1, 3, 3], # P_x_ranks, P_x_shape
[1, 1, 10, 7], # x_global_shape
torch.float64, # dtype
[1, 1, 3, 3], # kernel_size
[1, 1, 1, 1], # stride
[0, 0, 0, 0], # padding
[1, 1, 1, 1], # dilation
MockConvLayer, # MockKernelStyle
9, # passed to comm_split_fixture, required MPI ranks
id="conv-same_padding-float64",
marks=[pytest.mark.mpi(min_size=9)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 3), [1, 1, 3], # P_x_ranks, P_x_shape
[1, 1, 10], # x_global_shape
torch.float32, # dtype
[2], # kernel_size
[2], # stride
[0], # padding
[1], # dilation
MockConvLayer, # MockKernelStyle
3, # passed to comm_split_fixture, required MPI ranks
id="conv-same_padding-float32",
marks=[pytest.mark.mpi(min_size=3)]
)
)
adjoint_parametrizations.append(
pytest.param(
np.arange(0, 3), [1, 1, 3], # P_x_ranks, P_x_shape
[1, 1, 10], # x_global_shape
torch.float64, # dtype
[2], # kernel_size
[2], # stride
[0], # padding
[1], # dilation
MockConvLayer, # MockKernelStyle
3, # passed to comm_split_fixture, required MPI ranks
id="conv-same_padding-float64",
marks=[pytest.mark.mpi(min_size=3)]
)
)
@pytest.mark.parametrize("P_x_ranks, P_x_shape,"
"x_global_shape,"
"dtype,"
"kernel_size,"
"stride,"
"padding,"
"dilation,"
"MockKernelStyle,"
"comm_split_fixture",
adjoint_parametrizations,
indirect=["comm_split_fixture"])
def test_halo_exchange_adjoint(barrier_fence_fixture,
comm_split_fixture,
P_x_ranks, P_x_shape,
x_global_shape,
dtype,
kernel_size, stride, padding, dilation,
MockKernelStyle):
import numpy as np
import torch
from distdl.backends.mpi.partition import MPIPartition
from distdl.nn.halo_exchange import HaloExchange
from distdl.nn.padnd import PadNd
from distdl.utilities.slicing import compute_subshape
from distdl.utilities.torch import zero_volume_tensor
device = torch.device('cuda' if use_cuda else 'cpu')
# Isolate the minimum needed ranks
base_comm, active = comm_split_fixture
if not active:
return
P_world = MPIPartition(base_comm)
P_x_base = P_world.create_partition_inclusive(P_x_ranks)
P_x = P_x_base.create_cartesian_topology_partition(P_x_shape)
x_global_shape = np.asarray(x_global_shape)
kernel_size = np.asarray(kernel_size)
stride = np.asarray(stride)
padding = np.asarray(padding)
dilation = np.asarray(dilation)
halo_shape = None
recv_buffer_shape = None
send_buffer_shape = None
if P_x.active:
mockup_layer = MockKernelStyle()
exchange_info = mockup_layer._compute_exchange_info(x_global_shape,
kernel_size,
stride,
padding,
dilation,
P_x.active,
P_x.shape,
P_x.index)
halo_shape = exchange_info[0]
recv_buffer_shape = exchange_info[1]
send_buffer_shape = exchange_info[2]
pad_layer = PadNd(halo_shape, value=0)
pad_layer = pad_layer.to(device)
halo_layer = HaloExchange(P_x, halo_shape, recv_buffer_shape, send_buffer_shape)
halo_layer = halo_layer.to(device)
x = zero_volume_tensor(x_global_shape[0], device=device)
if P_x.active:
x_local_shape = compute_subshape(P_x.shape,
P_x.index,
x_global_shape)
x = torch.randn(*x_local_shape, device=device).to(dtype)
x = pad_layer.forward(x)
x.requires_grad = True
dy = zero_volume_tensor(x_global_shape[0], device=device)
if P_x.active:
dy = torch.randn(*x.shape, device=device).to(dtype)
x_clone = x.clone()
dy_clone = dy.clone()
# x_clone is be modified in place by halo_layer, but we assign y to
# reference it for clarity
y = halo_layer(x_clone)
# dy_clone is modified in place by halo_layer-adjoint, but we assign dx to
# reference it for clarity
y.backward(dy_clone)
dx = dy_clone
x = x.detach()
dx = dx.detach()
dy = dy.detach()
y = y.detach()
check_adjoint_test_tight(P_world, x, dx, y, dy)
P_world.deactivate()
P_x_base.deactivate()
P_x.deactivate()
| 32.78125 | 84 | 0.560375 | 754 | 6,294 | 4.421751 | 0.185676 | 0.016797 | 0.043191 | 0.016797 | 0.494001 | 0.459208 | 0.391722 | 0.390522 | 0.390522 | 0.390522 | 0 | 0.026895 | 0.350175 | 6,294 | 191 | 85 | 32.95288 | 0.788264 | 0.132031 | 0 | 0.393548 | 0 | 0 | 0.045211 | 0.018454 | 0 | 0 | 0 | 0 | 0 | 1 | 0.006452 | false | 0.012903 | 0.096774 | 0 | 0.122581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f4614f1c0ed9b06d2d2365fd147912b5bfcb7c3 | 1,489 | py | Python | algorithms/logistic_regression/logitsticRegresision.py | krasen86/ML_algorithms | 54dd289ec82b36119bd5680833d0faab67058eb3 | [
"MIT"
] | null | null | null | algorithms/logistic_regression/logitsticRegresision.py | krasen86/ML_algorithms | 54dd289ec82b36119bd5680833d0faab67058eb3 | [
"MIT"
] | null | null | null | algorithms/logistic_regression/logitsticRegresision.py | krasen86/ML_algorithms | 54dd289ec82b36119bd5680833d0faab67058eb3 | [
"MIT"
] | null | null | null | import numpy as np
class LogisticRegression:
def __init__(self, learningRate=0.001, numberOfIterations=1000):
self.learningRate = learningRate
self.numberOfIterations = numberOfIterations
self.weights = None
self.bias = None
def fit(self, samples, labels):
# initialize parameters
numberOfSamples, numberOfFeatures = samples.shape
self.weights = np.zeros(numberOfFeatures)
self.bias = 0
# gradient descent
for _ in range(self.numberOfIterations):
# approximate the labels with liniear combination of weights and samples with bias
linearModel = np.dot(samples, self.weights) + self.bias
# apply sigmoid
predicted = self._sigmoid(linearModel)
# compute gradients
delivaritiveWeights = (1/numberOfSamples) * np.dot(samples.T, predicted - labels)
delivaritiveBias = (1/numberOfSamples) * np.sum(predicted - labels)
# update parameters based on the gradient
self.weights -= self.learningRate * delivaritiveWeights
self.bias -= self.learningRate * delivaritiveBias
def predict(self, samples):
linearModel = np.dot(samples, self.weights) + self.bias
predicted = self._sigmoid(linearModel)
labelPredictedClasses = [1 if i > 0.5 else 0 for i in predicted]
return labelPredictedClasses
def _sigmoid(self, x):
return 1/(1 + np.exp(-x)) | 38.179487 | 94 | 0.650101 | 152 | 1,489 | 6.315789 | 0.407895 | 0.057292 | 0.0375 | 0.047917 | 0.0875 | 0.0875 | 0.0875 | 0.0875 | 0 | 0 | 0 | 0.015668 | 0.271323 | 1,489 | 39 | 95 | 38.179487 | 0.869124 | 0.128946 | 0 | 0.16 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.04 | 0.04 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f469ad81dc702dbbdb599c1d178b52955ddb183 | 5,432 | py | Python | mars/optimization/logical/common/head.py | perfumescent/mars | 9bf9bb990587cb9f091d108ed7f725fb429a80e8 | [
"Apache-2.0"
] | null | null | null | mars/optimization/logical/common/head.py | perfumescent/mars | 9bf9bb990587cb9f091d108ed7f725fb429a80e8 | [
"Apache-2.0"
] | null | null | null | mars/optimization/logical/common/head.py | perfumescent/mars | 9bf9bb990587cb9f091d108ed7f725fb429a80e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from ....core import OperandType, TileableType, CHUNK_TYPE
from ....dataframe.base.value_counts import DataFrameValueCounts
from ....dataframe.datasource.core import HeadOptimizedDataSource
from ....dataframe.sort.core import DataFrameSortOperand
from ....dataframe.utils import parse_index
from ....utils import implements
from ..core import OptimizationRule, OptimizationRecord, OptimizationRecordType
class HeadPushDown(OptimizationRule):
@implements(OptimizationRule.match)
def match(self, op: OperandType) -> bool:
node = op.outputs[0]
input_node = self._graph.predecessors(node)[0]
successors = self._graph.successors(input_node)
return self._all_successor_head_pushdown(successors)
def _all_successor_head_pushdown(self, successors: List[TileableType]):
for succ in successors:
rule_types = self._optimizer_cls.get_rule_types(type(succ.op))
if rule_types is None:
return False
for rule_type in rule_types:
if not issubclass(rule_type, HeadPushDown):
return False
rule = rule_type(self._graph, self._records, self._optimizer_cls)
if not rule._can_push_down(succ.op):
return False
return True
def _can_push_down(self, op: OperandType) -> bool:
input_nodes = self._graph.predecessors(op.outputs[0])
accept_types = (
HeadOptimizedDataSource,
DataFrameSortOperand,
DataFrameValueCounts,
)
if (
len(input_nodes) == 1
and op.can_be_optimized()
and isinstance(input_nodes[0].op, accept_types)
and input_nodes[0] not in self._graph.results
):
return True
return False
def apply(self, op: OperandType):
node = op.outputs[0]
input_node = self._graph.predecessors(node)[0]
nrows = input_node.op.nrows or 0
head = op.indexes[0].stop
new_input_op = input_node.op.copy()
new_input_op._key = input_node.op.key
new_input_op._nrows = nrows = max(nrows, head)
new_input_params = input_node.params.copy()
new_input_params["shape"] = (nrows,) + input_node.shape[1:]
pandas_index = node.index_value.to_pandas()[:nrows]
new_input_params["index_value"] = parse_index(pandas_index, node)
new_input_params.update(input_node.extra_params)
new_entity = (
new_input_op.new_tileable
if not isinstance(node, CHUNK_TYPE)
else new_input_op.new_chunk
)
new_input_node = new_entity(input_node.inputs, kws=[new_input_params]).data
if (
new_input_node.op.nrows == head
and self._graph.count_successors(input_node) == 1
):
new_input_node._key = node.key
new_input_node._id = node.id
# just remove the input data
self._graph.add_node(new_input_node)
for succ in self._graph.successors(node):
self._graph.add_edge(new_input_node, succ)
for pred in self._graph.predecessors(input_node):
self._graph.add_edge(pred, new_input_node)
self._graph.remove_node(input_node)
self._graph.remove_node(node)
# mark optimization record
# the input node is removed
self._records.append_record(
OptimizationRecord(input_node, None, OptimizationRecordType.delete)
)
self._records.append_record(
OptimizationRecord(node, new_input_node, OptimizationRecordType.replace)
)
new_node = new_input_node
else:
self._replace_node(input_node, new_input_node)
new_op = op.copy()
new_op._key = op.key
params = node.params.copy()
params.update(node.extra_params)
new_entity = (
new_op.new_tileable
if not isinstance(node, CHUNK_TYPE)
else new_op.new_chunk
)
new_node = new_entity([new_input_node], kws=[params]).data
self._replace_node(node, new_node)
# mark optimization record
self._records.append_record(
OptimizationRecord(
input_node, new_input_node, OptimizationRecordType.replace
)
)
self._records.append_record(
OptimizationRecord(node, new_node, OptimizationRecordType.replace)
)
# check node if it's in result
try:
i = self._graph.results.index(node)
self._graph.results[i] = new_node
except ValueError:
pass
| 39.362319 | 88 | 0.631443 | 642 | 5,432 | 5.087227 | 0.267913 | 0.079914 | 0.044091 | 0.027557 | 0.195958 | 0.177281 | 0.119412 | 0.0594 | 0.0594 | 0.0594 | 0 | 0.006219 | 0.28958 | 5,432 | 137 | 89 | 39.649635 | 0.840114 | 0.12905 | 0 | 0.205607 | 0 | 0 | 0.003396 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037383 | false | 0.009346 | 0.074766 | 0 | 0.186916 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f49a3fb21d094950777761f4be4e543e9c25599 | 823 | py | Python | altair/vegalite/v2/examples/cumulative_wiki_donations.py | zjffdu/altair | cd34b03ce011f16616f7c6c59a3c60436b679302 | [
"BSD-3-Clause"
] | null | null | null | altair/vegalite/v2/examples/cumulative_wiki_donations.py | zjffdu/altair | cd34b03ce011f16616f7c6c59a3c60436b679302 | [
"BSD-3-Clause"
] | null | null | null | altair/vegalite/v2/examples/cumulative_wiki_donations.py | zjffdu/altair | cd34b03ce011f16616f7c6c59a3c60436b679302 | [
"BSD-3-Clause"
] | null | null | null | """
Cumulative Wikipedia Donations
==============================
This chart shows cumulative donations to Wikipedia over the past 10 years. This chart was inspired by https://www.reddit.com/r/dataisbeautiful/comments/7guwd0/cumulative_wikimedia_donations_over_the_past_10/ but using lines instead of areas.
Data comes from https://frdata.wikimedia.org/.
"""
import altair as alt
data = "https://frdata.wikimedia.org/donationdata-vs-day.csv"
chart = alt.Chart(data).mark_line().encode(
alt.X(
'date:T', timeUnit='monthdate',
axis=alt.Axis(format='%B', title='Month')
),
alt.Y(
'max(ytdsum):Q', stack=None,
axis=alt.Axis(title='Cumulative Donations')
),
alt.Color('date:O', timeUnit='year', legend=alt.Legend(title='Year')),
alt.Order('data:O', timeUnit='year')
)
| 32.92 | 241 | 0.669502 | 111 | 823 | 4.900901 | 0.603604 | 0.033088 | 0.040441 | 0.047794 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008559 | 0.148238 | 823 | 24 | 242 | 34.291667 | 0.767475 | 0.426488 | 0 | 0.142857 | 0 | 0 | 0.282328 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f4ae7fe0c623e0a6063b5d4970978fb4fb9660a | 542 | py | Python | modules/hash_extract.py | TURROKS/IOC-Parser | 2c6d0fd049fbf3ba00766459ad19cde10aa8e6a8 | [
"Apache-2.0"
] | null | null | null | modules/hash_extract.py | TURROKS/IOC-Parser | 2c6d0fd049fbf3ba00766459ad19cde10aa8e6a8 | [
"Apache-2.0"
] | null | null | null | modules/hash_extract.py | TURROKS/IOC-Parser | 2c6d0fd049fbf3ba00766459ad19cde10aa8e6a8 | [
"Apache-2.0"
] | null | null | null | import modules.common as common
import iocextract
# function reads inp file, extracts hashes using a regex string
def main(inp, out):
for line in inp.readlines():
for new_hash in iocextract.extract_hashes(line):
if new_hash not in common.Hashes:
common.Hashes.append(new_hash)
print(new_hash + ', ')
else:
print(new_hash + ' Already in List')
out.write('#####HASHES#####\n\n')
for item in common.Hashes:
out.write('"' + item + '", \n') | 25.809524 | 63 | 0.581181 | 70 | 542 | 4.414286 | 0.5 | 0.113269 | 0.090615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.298893 | 542 | 21 | 64 | 25.809524 | 0.813158 | 0.112546 | 0 | 0 | 0 | 0 | 0.091667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.153846 | 0 | 0.230769 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f51ab0338418834dbcbbb94011e6959c76bbe20 | 29,534 | py | Python | openslides/motions/models.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | openslides/motions/models.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | openslides/motions/models.py | rolandgeider/OpenSlides | 331141c17cb23da26e377d4285efdb4a50753a59 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Max
from django.utils import formats
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy, ugettext_noop
from jsonfield import JSONField
from openslides.agenda.models import Item
from openslides.core.config import config
from openslides.core.models import Tag
from openslides.mediafiles.models import Mediafile
from openslides.poll.models import (
BaseOption,
BasePoll,
BaseVote,
CollectDefaultVotesMixin,
)
from openslides.utils.models import RESTModelMixin
from openslides.utils.search import user_name_helper
from .access_permissions import (
CategoryAccessPermissions,
MotionAccessPermissions,
WorkflowAccessPermissions,
)
from .exceptions import WorkflowError
class Motion(RESTModelMixin, models.Model):
"""
The Motion Class.
This class is the main entry point to all other classes related to a motion.
"""
access_permissions = MotionAccessPermissions()
active_version = models.ForeignKey(
'MotionVersion',
on_delete=models.SET_NULL,
null=True,
related_name="active_version")
"""
Points to a specific version.
Used be the permitted-version-system to deside which version is the active
version. Could also be used to only choose a specific version as a default
version. Like the sighted versions on Wikipedia.
"""
state = models.ForeignKey(
'State',
on_delete=models.SET_NULL,
null=True) # TODO: Check whether null=True is necessary.
"""
The related state object.
This attribute is to get the current state of the motion.
"""
identifier = models.CharField(max_length=255, null=True, blank=True,
unique=True)
"""
A string as human readable identifier for the motion.
"""
identifier_number = models.IntegerField(null=True)
"""
Counts the number of the motion in one category.
Needed to find the next free motion identifier.
"""
category = models.ForeignKey(
'Category',
on_delete=models.SET_NULL,
null=True,
blank=True)
"""
ForeignKey to one category of motions.
"""
attachments = models.ManyToManyField(Mediafile, blank=True)
"""
Many to many relation to mediafile objects.
"""
parent = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name='amendments')
"""
Field for amendments to reference to the motion that should be altered.
Null if the motion is not an amendment.
"""
tags = models.ManyToManyField(Tag, blank=True)
"""
Tags to categorise motions.
"""
submitters = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='motion_submitters', blank=True)
"""
Users who submit this motion.
"""
supporters = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='motion_supporters', blank=True)
"""
Users who support this motion.
"""
class Meta:
default_permissions = ()
permissions = (
('can_see', 'Can see motions'),
('can_create', 'Can create motions'),
('can_support', 'Can support motions'),
('can_manage', 'Can manage motions'),
)
ordering = ('identifier', )
verbose_name = ugettext_noop('Motion')
def __str__(self):
"""
Return the title of this motion.
"""
return self.title
# TODO: Use transaction
def save(self, use_version=None, *args, **kwargs):
"""
Save the motion.
1. Set the state of a new motion to the default state.
2. Ensure that the identifier is not an empty string.
3. Save the motion object.
4. Save the version data.
5. Set the active version for the motion if a new version object was saved.
The version data is *not* saved, if
1. the django-feature 'update_fields' is used or
2. the argument use_version is False (differ to None).
The argument use_version is choose the version object into which the
version data is saved.
* If use_version is False, no version data is saved.
* If use_version is None, the last version is used.
* Else the given version is used.
To create and use a new version object, you have to set it via the
use_version argument. You have to set the title, text and reason into
this version object before giving it to this save method. The properties
motion.title, motion.text and motion.reason will be ignored.
"""
if not self.state:
self.reset_state()
# Solves the problem, that there can only be one motion with an empty
# string as identifier.
if not self.identifier and isinstance(self.identifier, str):
self.identifier = None
super(Motion, self).save(*args, **kwargs)
if 'update_fields' in kwargs:
# Do not save the version data if only some motion fields are updated.
return
if use_version is False:
# We do not need to save the version.
return
elif use_version is None:
use_version = self.get_last_version()
# Save title, text and reason into the version object.
for attr in ['title', 'text', 'reason']:
_attr = '_%s' % attr
data = getattr(self, _attr, None)
if data is not None:
setattr(use_version, attr, data)
delattr(self, _attr)
# If version is not in the database, test if it has new data and set
# the version_number.
if use_version.id is None:
if not self.version_data_changed(use_version):
# We do not need to save the version.
return
version_number = self.versions.aggregate(Max('version_number'))['version_number__max'] or 0
use_version.version_number = version_number + 1
# Necessary line if the version was set before the motion got an id.
use_version.motion = use_version.motion
use_version.save()
# Set the active version of this motion. This has to be done after the
# version is saved in the database.
# TODO: Move parts of these last lines of code outside the save method
# when other versions than the last ones should be edited later on.
if self.active_version is None or not self.state.leave_old_version_active:
# TODO: Don't call this if it was not a new version
self.active_version = use_version
self.save(update_fields=['active_version'])
def version_data_changed(self, version):
"""
Compare the version with the last version of the motion.
Returns True if the version data (title, text, reason) is different,
else returns False.
"""
if not self.versions.exists():
# If there is no version in the database, the data has always changed.
return True
last_version = self.get_last_version()
for attr in ['title', 'text', 'reason']:
if getattr(last_version, attr) != getattr(version, attr):
return True
return False
def set_identifier(self):
"""
Sets the motion identifier automaticly according to the config value if
it is not set yet.
"""
# The identifier is already set or should be set manually
if config['motions_identifier'] == 'manually' or self.identifier:
# Do not set an identifier.
return
# The motion is an amendment
elif self.is_amendment():
motions = self.parent.amendments.all()
# The motions should be counted per category
elif config['motions_identifier'] == 'per_category':
motions = Motion.objects.filter(category=self.category)
# The motions should be counted over all.
else:
motions = Motion.objects.all()
number = motions.aggregate(Max('identifier_number'))['identifier_number__max'] or 0
if self.is_amendment():
parent_identifier = self.parent.identifier or ''
prefix = '%s %s ' % (parent_identifier, config['motions_amendments_prefix'])
elif self.category is None or not self.category.prefix:
prefix = ''
else:
prefix = '%s ' % self.category.prefix
number += 1
identifier = '%s%d' % (prefix, number)
while Motion.objects.filter(identifier=identifier).exists():
number += 1
identifier = '%s%d' % (prefix, number)
self.identifier = identifier
self.identifier_number = number
def get_title(self):
"""
Get the title of the motion.
The title is taken from motion.version.
"""
try:
return self._title
except AttributeError:
return self.get_active_version().title
def set_title(self, title):
"""
Set the title of the motion.
The title will be saved in the version object, when motion.save() is
called.
"""
self._title = title
title = property(get_title, set_title)
"""
The title of the motion.
Is saved in a MotionVersion object.
"""
def get_text(self):
"""
Get the text of the motion.
Simular to get_title().
"""
try:
return self._text
except AttributeError:
return self.get_active_version().text
def set_text(self, text):
"""
Set the text of the motion.
Simular to set_title().
"""
self._text = text
text = property(get_text, set_text)
"""
The text of a motin.
Is saved in a MotionVersion object.
"""
def get_reason(self):
"""
Get the reason of the motion.
Simular to get_title().
"""
try:
return self._reason
except AttributeError:
return self.get_active_version().reason
def set_reason(self, reason):
"""
Set the reason of the motion.
Simular to set_title().
"""
self._reason = reason
reason = property(get_reason, set_reason)
"""
The reason for the motion.
Is saved in a MotionVersion object.
"""
def get_new_version(self, **kwargs):
"""
Return a version object, not saved in the database.
The version data of the new version object is populated with the data
set via motion.title, motion.text, motion.reason if these data are
not given as keyword arguments. If the data is not set in the motion
attributes, it is populated with the data from the last version
object if such object exists.
"""
if self.pk is None:
# Do not reference the MotionVersion object to an unsaved motion
new_version = MotionVersion(**kwargs)
else:
new_version = MotionVersion(motion=self, **kwargs)
if self.versions.exists():
last_version = self.get_last_version()
else:
last_version = None
for attr in ['title', 'text', 'reason']:
if attr in kwargs:
continue
_attr = '_%s' % attr
data = getattr(self, _attr, None)
if data is None and last_version is not None:
data = getattr(last_version, attr)
if data is not None:
setattr(new_version, attr, data)
return new_version
def get_active_version(self):
"""
Returns the active version of the motion.
If no active version is set by now, the last_version is used.
"""
if self.active_version:
return self.active_version
else:
return self.get_last_version()
def get_last_version(self):
"""
Return the newest version of the motion.
"""
try:
return self.versions.order_by('-version_number')[0]
except IndexError:
return self.get_new_version()
def is_submitter(self, user):
"""
Returns True if user is a submitter of this motion, else False.
"""
return user in self.submitters.all()
def is_supporter(self, user):
"""
Returns True if user is a supporter of this motion, else False.
"""
return user in self.supporters.all()
def create_poll(self):
"""
Create a new poll for this motion.
Return the new poll object.
"""
if self.state.allow_create_poll:
poll = MotionPoll.objects.create(motion=self)
poll.set_options()
return poll
else:
raise WorkflowError('You can not create a poll in state %s.' % self.state.name)
@property
def workflow(self):
"""
Returns the id of the workflow of the motion.
"""
# TODO: Rename to workflow_id
return self.state.workflow.pk
def set_state(self, state):
"""
Set the state of the motion.
'state' can be the id of a state object or a state object.
"""
if type(state) is int:
state = State.objects.get(pk=state)
if not state.dont_set_identifier:
self.set_identifier()
self.state = state
def reset_state(self, workflow=None):
"""
Set the state to the default state.
'workflow' can be a workflow, an id of a workflow or None.
If the motion is new and workflow is None, it chooses the default
workflow from config.
"""
if type(workflow) is int:
workflow = Workflow.objects.get(pk=workflow)
if workflow is not None:
new_state = workflow.first_state
elif self.state:
new_state = self.state.workflow.first_state
else:
new_state = (Workflow.objects.get(pk=config['motions_workflow']).first_state or
Workflow.objects.get(pk=config['motions_workflow']).states.all()[0])
self.set_state(new_state)
def get_agenda_title(self):
"""
Return a simple title string for the agenda.
Returns only the motion title so that you have only agenda item number
and title in the agenda.
"""
return str(self)
def get_agenda_list_view_title(self):
"""
Return a title string for the agenda list view.
Returns only the motion title so that you have agenda item number,
title and motion identifier in the agenda.
Note: It has to be the same return value like in JavaScript.
"""
if self.identifier:
string = '%s (%s %s)' % (self.title, _(self._meta.verbose_name), self.identifier)
else:
string = '%s (%s)' % (self.title, _(self._meta.verbose_name))
return string
@property
def agenda_item(self):
"""
Returns the related agenda item.
"""
content_type = ContentType.objects.get_for_model(self)
return Item.objects.get(object_id=self.pk, content_type=content_type)
@property
def agenda_item_id(self):
"""
Returns the id of the agenda item object related to this object.
"""
return self.agenda_item.pk
def get_allowed_actions(self, person):
"""
Return a dictonary with all allowed actions for a specific person.
The dictonary contains the following actions.
* see
* update / edit
* delete
* create_poll
* support
* unsupport
* change_state
* reset_state
NOTE: If you update this function please also update the
'isAllowed' function on client side in motions/site.js.
"""
# TODO: Remove this method and implement these things in the views.
actions = {
'see': (person.has_perm('motions.can_see') and
(not self.state.required_permission_to_see or
person.has_perm(self.state.required_permission_to_see) or
self.is_submitter(person))),
'update': (person.has_perm('motions.can_manage') or
(self.is_submitter(person) and
self.state.allow_submitter_edit)),
'delete': person.has_perm('motions.can_manage'),
'create_poll': (person.has_perm('motions.can_manage') and
self.state.allow_create_poll),
'support': (self.state.allow_support and
config['motions_min_supporters'] > 0 and
not self.is_submitter(person) and
not self.is_supporter(person)),
'unsupport': (self.state.allow_support and
self.is_supporter(person)),
'change_state': person.has_perm('motions.can_manage'),
'reset_state': person.has_perm('motions.can_manage')}
actions['edit'] = actions['update']
return actions
def write_log(self, message_list, person=None):
"""
Write a log message.
The message should be in English and translatable,
e. g. motion.write_log(message_list=[ugettext_noop('Message Text')])
"""
MotionLog.objects.create(motion=self, message_list=message_list, person=person)
def is_amendment(self):
"""
Returns True if the motion is an amendment.
A motion is a amendment if amendments are activated in the config and
the motion has a parent.
"""
return config['motions_amendments_enabled'] and self.parent is not None
def get_search_index_string(self):
"""
Returns a string that can be indexed for the search.
"""
return " ".join((
self.title or '',
self.text or '',
self.reason or '',
str(self.category) if self.category else '',
user_name_helper(self.submitters.all()),
user_name_helper(self.supporters.all()),
" ".join(tag.name for tag in self.tags.all())))
class MotionVersion(RESTModelMixin, models.Model):
"""
A MotionVersion object saves some date of the motion.
"""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='versions')
"""The motion to which the version belongs."""
version_number = models.PositiveIntegerField(default=1)
"""An id for this version in realation to a motion.
Is unique for each motion.
"""
title = models.CharField(max_length=255)
"""The title of a motion."""
text = models.TextField()
"""The text of a motion."""
reason = models.TextField(null=True, blank=True)
"""The reason for a motion."""
creation_time = models.DateTimeField(auto_now=True)
"""Time when the version was saved."""
class Meta:
default_permissions = ()
unique_together = ("motion", "version_number")
def __str__(self):
"""Return a string, representing this object."""
counter = self.version_number or ugettext_lazy('new')
return "Motion %s, Version %s" % (self.motion_id, counter)
@property
def active(self):
"""Return True, if the version is the active version of a motion. Else: False."""
return self.active_version.exists()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class Category(RESTModelMixin, models.Model):
"""
Model for categories of motions.
"""
access_permissions = CategoryAccessPermissions()
name = models.CharField(max_length=255)
"""Name of the category."""
prefix = models.CharField(blank=True, max_length=32)
"""Prefix of the category.
Used to build the identifier of a motion.
"""
class Meta:
default_permissions = ()
ordering = ['prefix']
def __str__(self):
return self.name
class MotionLog(RESTModelMixin, models.Model):
"""Save a logmessage for a motion."""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='log_messages')
"""The motion to witch the object belongs."""
message_list = JSONField()
"""
The log message. It should be a list of strings in English.
"""
person = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True)
"""A user object, who created the log message. Optional."""
time = models.DateTimeField(auto_now=True)
"""The Time, when the loged action was performed."""
class Meta:
default_permissions = ()
ordering = ['-time']
def __str__(self):
"""
Return a string, representing the log message.
"""
time = formats.date_format(self.time, 'DATETIME_FORMAT')
time_and_messages = '%s ' % time + ''.join(map(_, self.message_list))
if self.person is not None:
return _('%(time_and_messages)s by %(person)s') % {'time_and_messages': time_and_messages,
'person': self.person}
return time_and_messages
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class MotionVote(RESTModelMixin, BaseVote):
"""Saves the votes for a MotionPoll.
There should allways be three MotionVote objects for each poll,
one for 'yes', 'no', and 'abstain'."""
option = models.ForeignKey(
'MotionOption',
on_delete=models.CASCADE)
"""The option object, to witch the vote belongs."""
class Meta:
default_permissions = ()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.option.poll.motion
class MotionOption(RESTModelMixin, BaseOption):
"""Links between the MotionPollClass and the MotionVoteClass.
There should be one MotionOption object for each poll."""
poll = models.ForeignKey(
'MotionPoll',
on_delete=models.CASCADE)
"""The poll object, to witch the object belongs."""
vote_class = MotionVote
"""The VoteClass, to witch this Class links."""
class Meta:
default_permissions = ()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.poll.motion
class MotionPoll(RESTModelMixin, CollectDefaultVotesMixin, BasePoll):
"""The Class to saves the vote result for a motion poll."""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='polls')
"""The motion to witch the object belongs."""
option_class = MotionOption
"""The option class, witch links between this object the the votes."""
vote_values = ['Yes', 'No', 'Abstain']
"""The possible anwers for the poll. 'Yes, 'No' and 'Abstain'."""
class Meta:
default_permissions = ()
def __str__(self):
"""
Representation method only for debugging purposes.
"""
return 'MotionPoll for motion %s' % self.motion
def set_options(self):
"""Create the option class for this poll."""
# TODO: maybe it is possible with .create() to call this without poll=self
# or call this in save()
self.get_option_class()(poll=self).save()
def get_percent_base_choice(self):
return config['motions_poll_100_percent_base']
def get_slide_context(self, **context):
return super(MotionPoll, self).get_slide_context(poll=self)
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class State(RESTModelMixin, models.Model):
"""
Defines a state for a motion.
Every state belongs to a workflow. All states of a workflow are linked together
via 'next_states'. One of these states is the first state, but this
is saved in the workflow table (one-to-one relation). In every state
you can configure some handling of a motion. See the following fields
for more information.
"""
name = models.CharField(max_length=255)
"""A string representing the state."""
action_word = models.CharField(max_length=255)
"""An alternative string to be used for a button to switch to this state."""
workflow = models.ForeignKey(
'Workflow',
on_delete=models.CASCADE,
related_name='states')
"""A many-to-one relation to a workflow."""
next_states = models.ManyToManyField('self', symmetrical=False)
"""A many-to-many relation to all states, that can be choosen from this state."""
css_class = models.CharField(max_length=255, default='primary')
"""
A css class string for showing the state name in a coloured label based on bootstrap,
e.g. 'danger' (red), 'success' (green), 'warning' (yellow), 'default' (grey).
Default value is 'primary' (blue).
"""
required_permission_to_see = models.CharField(max_length=255, blank=True)
"""
A permission string. If not empty, the user has to have this permission to
see a motion in this state.
To use this feature change the database entry of a state object and add
your favourite permission string. You can do this e. g. by editing the
definitions in create_builtin_workflows() in openslides/motions/signals.py.
"""
allow_support = models.BooleanField(default=False)
"""If true, persons can support the motion in this state."""
allow_create_poll = models.BooleanField(default=False)
"""If true, polls can be created in this state."""
allow_submitter_edit = models.BooleanField(default=False)
"""If true, the submitter can edit the motion in this state."""
versioning = models.BooleanField(default=False)
"""
If true, editing the motion will create a new version by default.
This behavior can be changed by the form and view, e. g. via the
MotionDisableVersioningMixin.
"""
leave_old_version_active = models.BooleanField(default=False)
"""If true, new versions are not automaticly set active."""
dont_set_identifier = models.BooleanField(default=False)
"""
Decides if the motion gets an identifier.
If true, the motion does not get an identifier if the state change to
this one, else it does.
"""
class Meta:
default_permissions = ()
def __str__(self):
"""Returns the name of the state."""
return self.name
def save(self, **kwargs):
"""Saves a state in the database.
Used to check the integrity before saving.
"""
self.check_next_states()
super(State, self).save(**kwargs)
def get_action_word(self):
"""Returns the alternative name of the state if it exists."""
return self.action_word or self.name
def check_next_states(self):
"""Checks whether all next states of a state belong to the correct workflow."""
# No check if it is a new state which has not been saved yet.
if not self.id:
return
for state in self.next_states.all():
if not state.workflow == self.workflow:
raise WorkflowError('%s can not be next state of %s because it does not belong to the same workflow.' % (state, self))
def get_root_rest_element(self):
"""
Returns the workflow to this instance which is the root REST element.
"""
return self.workflow
class Workflow(RESTModelMixin, models.Model):
"""
Defines a workflow for a motion.
"""
access_permissions = WorkflowAccessPermissions()
name = models.CharField(max_length=255)
"""A string representing the workflow."""
first_state = models.OneToOneField(
State,
on_delete=models.SET_NULL,
related_name='+',
null=True)
"""A one-to-one relation to a state, the starting point for the workflow."""
class Meta:
default_permissions = ()
def __str__(self):
"""Returns the name of the workflow."""
return self.name
def save(self, **kwargs):
"""Saves a workflow in the database.
Used to check the integrity before saving.
"""
self.check_first_state()
super(Workflow, self).save(**kwargs)
def check_first_state(self):
"""Checks whether the first_state itself belongs to the workflow."""
if self.first_state and not self.first_state.workflow == self:
raise WorkflowError(
'%s can not be first state of %s because it '
'does not belong to it.' % (self.first_state, self))
| 31.791173 | 134 | 0.618406 | 3,680 | 29,534 | 4.849457 | 0.125543 | 0.022694 | 0.009246 | 0.013617 | 0.277093 | 0.211084 | 0.17057 | 0.124958 | 0.109324 | 0.076208 | 0 | 0.002159 | 0.294136 | 29,534 | 928 | 135 | 31.825431 | 0.853888 | 0.238945 | 0 | 0.265509 | 0 | 0 | 0.0694 | 0.008351 | 0 | 0 | 0 | 0.006466 | 0 | 1 | 0.119107 | false | 0 | 0.042184 | 0.007444 | 0.444169 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f550e9bf9a276fbb780fa8bc882a77497808503 | 58,046 | py | Python | src/gam/var.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 102 | 2022-01-15T22:08:37.000Z | 2022-03-31T16:02:20.000Z | src/gam/var.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 29 | 2022-01-14T20:16:51.000Z | 2022-03-25T15:56:33.000Z | src/gam/var.py | GAM-team/GAM | b45ad5dcafc217690afc3c2e7086c1895f036172 | [
"Apache-2.0"
] | 30 | 2022-01-14T22:18:10.000Z | 2022-03-31T17:31:40.000Z | """Variables common across modules"""
# pylint: disable=too-many-lines
import os
import ssl
import string
import sys
import platform
import re
GAM_AUTHOR = 'Jay Lee <jay0lee@gmail.com>'
GAM_VERSION = '6.22'
GAM_LICENSE = 'Apache License 2.0 (http://www.apache.org/licenses/LICENSE-2.0)'
GAM_URL = 'https://jaylee.us/gam'
GAM_INFO = (
f'GAM {GAM_VERSION} - {GAM_URL} / {GAM_AUTHOR} / '
f'Python {platform.python_version()} {sys.version_info.releaselevel} / '
f'{platform.platform()} {platform.machine()}')
GAM_RELEASES = 'https://github.com/GAM-team/GAM/releases'
GAM_WIKI = 'https://github.com/GAM-team/GAM/wiki'
GAM_ALL_RELEASES = 'https://api.github.com/repos/GAM-team/GAM/releases'
GAM_LATEST_RELEASE = GAM_ALL_RELEASES + '/latest'
GAM_PROJECT_FILEPATH = 'https://raw.githubusercontent.com/GAM-team/GAM/master/src/'
true_values = ['on', 'yes', 'enabled', 'true', '1']
false_values = ['off', 'no', 'disabled', 'false', '0']
usergroup_types = [
'user', 'users', 'group', 'group_ns', 'group_susp', 'group_inde', 'ou',
'org', 'ou_ns', 'org_ns', 'ou_susp', 'org_susp', 'ou_and_children',
'ou_and_child', 'ou_and_children_ns', 'ou_and_child_ns',
'ou_and_children_susp', 'ou_and_child_susp', 'query', 'queries', 'license',
'licenses', 'licence', 'licences', 'file', 'csv', 'csvfile', 'all', 'cros',
'cros_sn', 'crosquery', 'crosqueries', 'crosfile', 'croscsv', 'croscsvfile'
]
ERROR_PREFIX = 'ERROR: '
WARNING_PREFIX = 'WARNING: '
UTF8 = 'utf-8'
UTF8_SIG = 'utf-8-sig'
FN_ENABLEDASA_TXT = 'enabledasa.txt'
FN_EXTRA_ARGS_TXT = 'extra-args.txt'
FN_LAST_UPDATE_CHECK_TXT = 'lastupdatecheck.txt'
MY_CUSTOMER = 'my_customer'
# See https://support.google.com/drive/answer/37603
MAX_GOOGLE_SHEET_CELLS = 10000000
MAX_LOCAL_GOOGLE_TIME_OFFSET = 30
SKUS = {
'1010010001': {
'product': '101001',
'aliases': ['identity', 'cloudidentity'],
'displayName': 'Cloud Identity'
},
'1010050001': {
'product': '101005',
'aliases': ['identitypremium', 'cloudidentitypremium'],
'displayName': 'Cloud Identity Premium'
},
'1010350001': {
'product': '101035',
'aliases': ['cloudsearch'],
'displayName': 'Google Cloud Search',
},
'1010310002': {
'product': '101031',
'aliases': ['gsefe', 'e4e', 'gsuiteenterpriseeducation'],
'displayName': 'Google Workspace for Education Plus - Legacy'
},
'1010310003': {
'product': '101031',
'aliases': ['gsefes', 'e4es', 'gsuiteenterpriseeducationstudent'],
'displayName': 'Google Workspace for Education Plus - Legacy (Student)'
},
'1010310005': {
'product': '101031',
'aliases': ['gwes', 'workspaceeducationstandard'],
'displayName': 'Google Workspace for Education Standard'
},
'1010310006': {
'product': '101031',
'aliases': ['gwesstaff', 'workspaceeducationstandardstaff'],
'displayName': 'Google Workspace for Education Standard (Staff)'
},
'1010310007': {
'product': '101031',
'aliases': ['gwesstudent', 'workspaceeducationstandardstudent'],
'displayName': 'Google Workspace for Education Standard (Extra Student)'
},
'1010310008': {
'product': '101031',
'aliases': ['gwep', 'workspaceeducationplus'],
'displayName': 'Google Workspace for Education Plus'
},
'1010310009': {
'product': '101031',
'aliases': ['gwepstaff', 'workspaceeducationplusstaff'],
'displayName': 'Google Workspace for Education Plus (Staff)'
},
'1010310010': {
'product': '101031',
'aliases': ['gwepstudent', 'workspaceeducationplusstudent'],
'displayName': 'Google Workspace for Education Plus (Extra Student)'
},
'1010330003': {
'product': '101033',
'aliases': ['gvstarter', 'voicestarter', 'googlevoicestarter'],
'displayName': 'Google Voice Starter'
},
'1010330004': {
'product': '101033',
'aliases': ['gvstandard', 'voicestandard', 'googlevoicestandard'],
'displayName': 'Google Voice Standard'
},
'1010330002': {
'product': '101033',
'aliases': ['gvpremier', 'voicepremier', 'googlevoicepremier'],
'displayName': 'Google Voice Premier'
},
'1010360001': {
'product': '101036',
'aliases': ['meetdialing','googlemeetglobaldialing'],
'displayName': 'Google Meet Global Dialing'
},
'1010370001': {
'product': '101037',
'aliases': ['gwetlu', 'workspaceeducationupgrade'],
'displayName': 'Google Workspace for Education: Teaching and Learning Upgrade'
},
'Google-Apps': {
'product': 'Google-Apps',
'aliases': ['standard', 'free'],
'displayName': 'G Suite Legacy'
},
'Google-Apps-For-Business': {
'product': 'Google-Apps',
'aliases': ['gafb', 'gafw', 'basic', 'gsuitebasic'],
'displayName': 'G Suite Basic'
},
'Google-Apps-For-Government': {
'product': 'Google-Apps',
'aliases': ['gafg', 'gsuitegovernment', 'gsuitegov'],
'displayName': 'G Suite Government'
},
'Google-Apps-For-Postini': {
'product': 'Google-Apps',
'aliases': [
'gams', 'postini', 'gsuitegams', 'gsuitepostini',
'gsuitemessagesecurity'
],
'displayName': 'G Suite Message Security'
},
'Google-Apps-Lite': {
'product': 'Google-Apps',
'aliases': ['gal', 'gsl', 'lite', 'gsuitelite'],
'displayName': 'G Suite Lite'
},
'Google-Apps-Unlimited': {
'product': 'Google-Apps',
'aliases': ['gau', 'gsb', 'unlimited', 'gsuitebusiness'],
'displayName': 'G Suite Business'
},
'1010020027': {
'product': 'Google-Apps',
'aliases': ['wsbizstart', 'workspacebusinessstarter'],
'displayName': 'Workspace Business Starter'
},
'1010020028': {
'product': 'Google-Apps',
'aliases': ['wsbizstan', 'workspacebusinessstandard'],
'displayName': 'Workspace Business Standard'
},
'1010020025': {
'product': 'Google-Apps',
'aliases': ['wsbizplus', 'workspacebusinessplus'],
'displayName': 'Workspace Business Plus'
},
'1010060001': {
'product': 'Google-Apps',
'aliases': [
'gsuiteessentials', 'essentials', 'd4e', 'driveenterprise',
'drive4enterprise', 'wsess', 'workspaceesentials'
],
'displayName': 'Google Workspace Essentials'
},
'1010060003': {
'product': 'Google-Apps',
'aliases': ['wsentess', 'workspaceenterpriseessentials'],
'displayName': 'Workspace Enterprise Essentials'
},
'1010020026': {
'product': 'Google-Apps',
'aliases': ['wsentstan', 'workspaceenterprisestandard'],
'displayName': 'Workspace Enterprise Standard'
},
'1010020020': {
'product': 'Google-Apps',
'aliases': ['gae', 'gse', 'enterprise', 'gsuiteenterprise',
'wsentplus', 'workspaceenterpriseplus'],
'displayName': 'Workspace Enterprise Plus'
},
'1010020029': {
'product': 'Google-Apps',
'aliases': ['wes', 'workspaceenterprisestarter'],
'displayName': 'Workspace Enterprise Starter'
},
'1010020030': {
'product': 'Google-Apps',
'aliases': ['workspacefrontline', 'workspacefrontlineworker'],
'displayName': 'Workspace Frontline'
},
'1010340002': {
'product': '101034',
'aliases': ['gsbau', 'businessarchived', 'gsuitebusinessarchived'],
'displayName': 'G Suite Business Archived'
},
'1010340001': {
'product': '101034',
'aliases': ['gseau', 'enterprisearchived', 'gsuiteenterprisearchived'],
'displayName': 'Google Workspace Enterprise Plus Archived'
},
'Google-Drive-storage-20GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive20gb', '20gb', 'googledrivestorage20gb'],
'displayName': 'Google Drive Storage 20GB'
},
'Google-Drive-storage-50GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive50gb', '50gb', 'googledrivestorage50gb'],
'displayName': 'Google Drive Storage 50GB'
},
'Google-Drive-storage-200GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive200gb', '200gb', 'googledrivestorage200gb'],
'displayName': 'Google Drive Storage 200GB'
},
'Google-Drive-storage-400GB': {
'product': 'Google-Drive-storage',
'aliases': ['drive400gb', '400gb', 'googledrivestorage400gb'],
'displayName': 'Google Drive Storage 400GB'
},
'Google-Drive-storage-1TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive1tb', '1tb', 'googledrivestorage1tb'],
'displayName': 'Google Drive Storage 1TB'
},
'Google-Drive-storage-2TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive2tb', '2tb', 'googledrivestorage2tb'],
'displayName': 'Google Drive Storage 2TB'
},
'Google-Drive-storage-4TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive4tb', '4tb', 'googledrivestorage4tb'],
'displayName': 'Google Drive Storage 4TB'
},
'Google-Drive-storage-8TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive8tb', '8tb', 'googledrivestorage8tb'],
'displayName': 'Google Drive Storage 8TB'
},
'Google-Drive-storage-16TB': {
'product': 'Google-Drive-storage',
'aliases': ['drive16tb', '16tb', 'googledrivestorage16tb'],
'displayName': 'Google Drive Storage 16TB'
},
'Google-Vault': {
'product': 'Google-Vault',
'aliases': ['vault', 'googlevault'],
'displayName': 'Google Vault'
},
'Google-Vault-Former-Employee': {
'product': 'Google-Vault',
'aliases': ['vfe', 'googlevaultformeremployee'],
'displayName': 'Google Vault Former Employee'
},
'Google-Chrome-Device-Management': {
'product': 'Google-Chrome-Device-Management',
'aliases': ['chrome', 'cdm', 'googlechromedevicemanagement'],
'displayName': 'Google Chrome Device Management'
}
}
PRODUCTID_NAME_MAPPINGS = {
'101001': 'Cloud Identity Free',
'101005': 'Cloud Identity Premium',
'101031': 'G Suite Workspace for Education',
'101033': 'Google Voice',
'101034': 'G Suite Archived',
'101035': 'Cloud Search',
'101036': 'Google Meet Global Dialing',
'101037': 'G Suite Workspace for Education',
'Google-Apps': 'Google Workspace',
'Google-Chrome-Device-Management': 'Google Chrome Device Management',
'Google-Drive-storage': 'Google Drive Storage',
'Google-Vault': 'Google Vault',
}
# Legacy APIs that use v1 discovery. Newer APIs should all use v2.
V1_DISCOVERY_APIS = {
'drive',
'oauth2',
}
API_NAME_MAPPING = {
'directory': 'admin',
'directory_beta': 'admin',
'reports': 'admin',
'datatransfer': 'admin',
'drive3': 'drive',
'calendar': 'calendar-json',
'cloudidentity_beta': 'cloudidentity',
}
API_VER_MAPPING = {
'accesscontextmanager': 'v1',
'alertcenter': 'v1beta1',
'driveactivity': 'v2',
'calendar': 'v3',
'cbcm': 'v1.1beta1',
'chromemanagement': 'v1',
'chromepolicy': 'v1',
'classroom': 'v1',
'cloudidentity': 'v1',
'cloudidentity_beta': 'v1beta1',
'cloudresourcemanager': 'v3',
'contactdelegation': 'v1',
'datatransfer': 'datatransfer_v1',
'directory': 'directory_v1',
'directory_beta': 'directory_v1.1beta1',
'drive': 'v2',
'drive3': 'v3',
'gmail': 'v1',
'groupssettings': 'v1',
'iam': 'v1',
'iap': 'v1',
'licensing': 'v1',
'oauth2': 'v2',
'pubsub': 'v1',
'reports': 'reports_v1',
'reseller': 'v1',
'servicemanagement': 'v1',
'serviceusage': 'v1',
'sheets': 'v4',
'siteVerification': 'v1',
'storage': 'v1',
'vault': 'v1',
'versionhistory': 'v1',
}
USERINFO_EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
API_SCOPE_MAPPING = {
'alertcenter': ['https://www.googleapis.com/auth/apps.alerts',],
'driveactivity': [
'https://www.googleapis.com/auth/drive.activity',
'https://www.googleapis.com/auth/drive',
],
'calendar': ['https://www.googleapis.com/auth/calendar',],
'cloudidentity': ['https://www.googleapis.com/auth/cloud-identity'],
'cloudidentity_beta': ['https://www.googleapis.com/auth/cloud-identity'],
'drive': ['https://www.googleapis.com/auth/drive',],
'drive3': ['https://www.googleapis.com/auth/drive',],
'gmail': [
'https://mail.google.com/',
'https://www.googleapis.com/auth/gmail.settings.basic',
'https://www.googleapis.com/auth/gmail.settings.sharing',
],
'sheets': ['https://www.googleapis.com/auth/spreadsheets',],
}
ADDRESS_FIELDS_PRINT_ORDER = [
'contactName',
'organizationName',
'addressLine1',
'addressLine2',
'addressLine3',
'locality',
'region',
'postalCode',
'countryCode',
]
ADDRESS_FIELDS_ARGUMENT_MAP = {
'contact': 'contactName',
'contactname': 'contactName',
'name': 'organizationName',
'organizationname': 'organizationName',
'address': 'addressLine1',
'address1': 'addressLine1',
'addressline1': 'addressLine1',
'address2': 'addressLine2',
'addressline2': 'addressLine2',
'address3': 'addressLine3',
'addressline3': 'addressLine3',
'city': 'locality',
'locality': 'locality',
'state': 'region',
'region': 'region',
'zipcode': 'postalCode',
'postal': 'postalCode',
'postalcode': 'postalCode',
'country': 'countryCode',
'countrycode': 'countryCode',
}
SERVICE_NAME_TO_ID_MAP = {
'Calendar': '435070579839',
'Currents': '553547912911',
'Drive and Docs': '55656082996',
'Google Data Studio': '810260081642',
}
SERVICE_NAME_CHOICES_MAP = {
'calendar': 'Calendar',
'currents': 'Currents',
'datastudio': 'Google Data Studio',
'google data studio': 'Google Data Studio',
'drive': 'Drive and Docs',
'drive and docs': 'Drive and Docs',
'googledrive': 'Drive and Docs',
'gdrive': 'Drive and Docs',
}
PRINTJOB_ASCENDINGORDER_MAP = {
'createtime': 'CREATE_TIME',
'status': 'STATUS',
'title': 'TITLE',
}
PRINTJOB_DESCENDINGORDER_MAP = {
'CREATE_TIME': 'CREATE_TIME_DESC',
'STATUS': 'STATUS_DESC',
'TITLE': 'TITLE_DESC',
}
PRINTJOBS_DEFAULT_JOB_LIMIT = 0
PRINTJOBS_DEFAULT_MAX_RESULTS = 100
CALENDAR_REMINDER_METHODS = [
'email',
'sms',
'popup',
]
CALENDAR_NOTIFICATION_METHODS = [
'email',
'sms',
]
CALENDAR_NOTIFICATION_TYPES_MAP = {
'eventcreation': 'eventCreation',
'eventchange': 'eventChange',
'eventcancellation': 'eventCancellation',
'eventresponse': 'eventResponse',
'agenda': 'agenda',
}
DEVICE_ORDERBY_CHOICES_MAP = {
'createtime': 'create_time',
'devicetype': 'device_type',
'lastsynctime': 'last_sync_time',
'model': 'model',
'osversion': 'os_version',
'serialnumber': 'serial_number'
}
DRIVEFILE_FIELDS_CHOICES_MAP = {
'alternatelink': 'alternateLink',
'appdatacontents': 'appDataContents',
'cancomment': 'canComment',
'canreadrevisions': 'canReadRevisions',
'contentrestrictions': 'contentRestrictions',
'copyable': 'copyable',
'copyrequireswriterpermission': 'copyRequiresWriterPermission',
'createddate': 'createdDate',
'createdtime': 'createdDate',
'description': 'description',
'driveid': 'driveId',
'editable': 'editable',
'explicitlytrashed': 'explicitlyTrashed',
'fileextension': 'fileExtension',
'filesize': 'fileSize',
'foldercolorrgb': 'folderColorRgb',
'fullfileextension': 'fullFileExtension',
'headrevisionid': 'headRevisionId',
'iconlink': 'iconLink',
'id': 'id',
'lastmodifyinguser': 'lastModifyingUser',
'lastmodifyingusername': 'lastModifyingUserName',
'lastviewedbyme': 'lastViewedByMeDate',
'lastviewedbymedate': 'lastViewedByMeDate',
'lastviewedbymetime': 'lastViewedByMeDate',
'lastviewedbyuser': 'lastViewedByMeDate',
'linksharemetadata': 'linkShareMetadata',
'md5': 'md5Checksum',
'md5checksum': 'md5Checksum',
'md5sum': 'md5Checksum',
'mime': 'mimeType',
'mimetype': 'mimeType',
'modifiedbyme': 'modifiedByMeDate',
'modifiedbymedate': 'modifiedByMeDate',
'modifiedbymetime': 'modifiedByMeDate',
'modifiedbyuser': 'modifiedByMeDate',
'modifieddate': 'modifiedDate',
'modifiedtime': 'modifiedDate',
'name': 'title',
'originalfilename': 'originalFilename',
'ownedbyme': 'ownedByMe',
'ownernames': 'ownerNames',
'owners': 'owners',
'parents': 'parents',
'permissions': 'permissions',
'resourcekey': 'resourceKey',
'quotabytesused': 'quotaBytesUsed',
'quotaused': 'quotaBytesUsed',
'shareable': 'shareable',
'shared': 'shared',
'sharedwithmedate': 'sharedWithMeDate',
'sharedwithmetime': 'sharedWithMeDate',
'sharinguser': 'sharingUser',
'shortcutdetails': 'shortcutDetails',
'spaces': 'spaces',
'thumbnaillink': 'thumbnailLink',
'title': 'title',
'userpermission': 'userPermission',
'version': 'version',
'viewedbyme': 'labels(viewed)',
'viewedbymedate': 'lastViewedByMeDate',
'viewedbymetime': 'lastViewedByMeDate',
'viewerscancopycontent': 'labels(restricted)',
'webcontentlink': 'webContentLink',
'webviewlink': 'webViewLink',
'writerscanshare': 'writersCanShare',
}
DRIVEFILE_LABEL_CHOICES_MAP = {
'restricted': 'restricted',
'restrict': 'restricted',
'starred': 'starred',
'star': 'starred',
'trashed': 'trashed',
'trash': 'trashed',
'viewed': 'viewed',
'view': 'viewed',
}
DRIVEFILE_ORDERBY_CHOICES_MAP = {
'createddate': 'createdDate',
'folder': 'folder',
'lastviewedbyme': 'lastViewedByMeDate',
'lastviewedbymedate': 'lastViewedByMeDate',
'lastviewedbyuser': 'lastViewedByMeDate',
'modifiedbyme': 'modifiedByMeDate',
'modifiedbymedate': 'modifiedByMeDate',
'modifiedbyuser': 'modifiedByMeDate',
'modifieddate': 'modifiedDate',
'name': 'title',
'quotabytesused': 'quotaBytesUsed',
'quotaused': 'quotaBytesUsed',
'recency': 'recency',
'sharedwithmedate': 'sharedWithMeDate',
'starred': 'starred',
'title': 'title',
'viewedbymedate': 'lastViewedByMeDate',
}
DELETE_DRIVEFILE_FUNCTION_TO_ACTION_MAP = {
'delete': 'purging',
'trash': 'trashing',
'untrash': 'untrashing',
}
DRIVEFILE_LABEL_CHOICES_MAP = {
'restricted': 'restricted',
'restrict': 'restricted',
'starred': 'starred',
'star': 'starred',
'trashed': 'trashed',
'trash': 'trashed',
'viewed': 'viewed',
'view': 'viewed',
}
APPLICATION_VND_GOOGLE_APPS = 'application/vnd.google-apps.'
MIMETYPE_GA_DOCUMENT = f'{APPLICATION_VND_GOOGLE_APPS}document'
MIMETYPE_GA_DRAWING = f'{APPLICATION_VND_GOOGLE_APPS}drawing'
MIMETYPE_GA_FOLDER = f'{APPLICATION_VND_GOOGLE_APPS}folder'
MIMETYPE_GA_FORM = f'{APPLICATION_VND_GOOGLE_APPS}form'
MIMETYPE_GA_FUSIONTABLE = f'{APPLICATION_VND_GOOGLE_APPS}fusiontable'
MIMETYPE_GA_MAP = f'{APPLICATION_VND_GOOGLE_APPS}map'
MIMETYPE_GA_PRESENTATION = f'{APPLICATION_VND_GOOGLE_APPS}presentation'
MIMETYPE_GA_SCRIPT = f'{APPLICATION_VND_GOOGLE_APPS}script'
MIMETYPE_GA_SITES = f'{APPLICATION_VND_GOOGLE_APPS}sites'
MIMETYPE_GA_SPREADSHEET = f'{APPLICATION_VND_GOOGLE_APPS}spreadsheet'
MIMETYPE_GA_SHORTCUT = f'{APPLICATION_VND_GOOGLE_APPS}shortcut'
MIMETYPE_GA_3P_SHORTCUT = f'{APPLICATION_VND_GOOGLE_APPS}drive-sdk'
MIMETYPE_CHOICES_MAP = {
'gdoc': MIMETYPE_GA_DOCUMENT,
'gdocument': MIMETYPE_GA_DOCUMENT,
'gdrawing': MIMETYPE_GA_DRAWING,
'gfolder': MIMETYPE_GA_FOLDER,
'gdirectory': MIMETYPE_GA_FOLDER,
'gform': MIMETYPE_GA_FORM,
'gfusion': MIMETYPE_GA_FUSIONTABLE,
'gpresentation': MIMETYPE_GA_PRESENTATION,
'gscript': MIMETYPE_GA_SCRIPT,
'gshortcut': MIMETYPE_GA_SHORTCUT,
'g3pshortcut': MIMETYPE_GA_3P_SHORTCUT,
'gsite': MIMETYPE_GA_SITES,
'gsheet': MIMETYPE_GA_SPREADSHEET,
'gspreadsheet': MIMETYPE_GA_SPREADSHEET,
'shortcut': MIMETYPE_GA_SHORTCUT,
}
DFA_CONVERT = 'convert'
DFA_LOCALFILEPATH = 'localFilepath'
DFA_LOCALFILENAME = 'localFilename'
DFA_LOCALMIMETYPE = 'localMimeType'
DFA_OCR = 'ocr'
DFA_OCRLANGUAGE = 'ocrLanguage'
DFA_PARENTQUERY = 'parentQuery'
NON_DOWNLOADABLE_MIMETYPES = [
MIMETYPE_GA_FORM, MIMETYPE_GA_FUSIONTABLE, MIMETYPE_GA_MAP
]
GOOGLEDOC_VALID_EXTENSIONS_MAP = {
MIMETYPE_GA_DRAWING: ['.jpeg', '.jpg', '.pdf', '.png', '.svg'],
MIMETYPE_GA_DOCUMENT: [
'.docx', '.html', '.odt', '.pdf', '.rtf', '.txt', '.zip'
],
MIMETYPE_GA_PRESENTATION: ['.pdf', '.pptx', '.odp', '.txt'],
MIMETYPE_GA_SPREADSHEET: ['.csv', '.ods', '.pdf', '.xlsx', '.zip'],
}
MACOS_CODENAMES = {
10: {
6: 'Snow Leopard',
7: 'Lion',
8: 'Mountain Lion',
9: 'Mavericks',
10: 'Yosemite',
11: 'El Capitan',
12: 'Sierra',
13: 'High Sierra',
14: 'Mojave',
15: 'Catalina',
16: 'Big Sur'
},
11: 'Big Sur',
12: 'Monterey',
}
_MICROSOFT_FORMATS_LIST = [{
'mime':
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'ext':
'.docx'
}, {
'mime':
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'ext':
'.dotx'
}, {
'mime':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'ext':
'.pptx'
}, {
'mime':
'application/vnd.openxmlformats-officedocument.presentationml.template',
'ext':
'.potx'
}, {
'mime': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'ext': '.xlsx'
}, {
'mime':
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'ext':
'.xltx'
}, {
'mime': 'application/msword',
'ext': '.doc'
}, {
'mime': 'application/msword',
'ext': '.dot'
}, {
'mime': 'application/vnd.ms-powerpoint',
'ext': '.ppt'
}, {
'mime': 'application/vnd.ms-powerpoint',
'ext': '.pot'
}, {
'mime': 'application/vnd.ms-excel',
'ext': '.xls'
}, {
'mime': 'application/vnd.ms-excel',
'ext': '.xlt'
}]
DOCUMENT_FORMATS_MAP = {
'csv': [{
'mime': 'text/csv',
'ext': '.csv'
}],
'doc': [{
'mime': 'application/msword',
'ext': '.doc'
}],
'dot': [{
'mime': 'application/msword',
'ext': '.dot'
}],
'docx': [{
'mime':
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'ext':
'.docx'
}],
'dotx': [{
'mime':
'application/vnd.openxmlformats-officedocument.wordprocessingml.template',
'ext':
'.dotx'
}],
'epub': [{
'mime': 'application/epub+zip',
'ext': '.epub'
}],
'html': [{
'mime': 'text/html',
'ext': '.html'
}],
'jpeg': [{
'mime': 'image/jpeg',
'ext': '.jpeg'
}],
'jpg': [{
'mime': 'image/jpeg',
'ext': '.jpg'
}],
'mht': [{
'mime': 'message/rfc822',
'ext': 'mht'
}],
'odp': [{
'mime': 'application/vnd.oasis.opendocument.presentation',
'ext': '.odp'
}],
'ods': [{
'mime': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ext': '.ods'
}, {
'mime': 'application/vnd.oasis.opendocument.spreadsheet',
'ext': '.ods'
}],
'odt': [{
'mime': 'application/vnd.oasis.opendocument.text',
'ext': '.odt'
}],
'pdf': [{
'mime': 'application/pdf',
'ext': '.pdf'
}],
'png': [{
'mime': 'image/png',
'ext': '.png'
}],
'ppt': [{
'mime': 'application/vnd.ms-powerpoint',
'ext': '.ppt'
}],
'pot': [{
'mime': 'application/vnd.ms-powerpoint',
'ext': '.pot'
}],
'potx': [{
'mime':
'application/vnd.openxmlformats-officedocument.presentationml.template',
'ext':
'.potx'
}],
'pptx': [{
'mime':
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'ext':
'.pptx'
}],
'rtf': [{
'mime': 'application/rtf',
'ext': '.rtf'
}],
'svg': [{
'mime': 'image/svg+xml',
'ext': '.svg'
}],
'tsv': [{
'mime': 'text/tab-separated-values',
'ext': '.tsv'
}, {
'mime': 'text/tsv',
'ext': '.tsv'
}],
'txt': [{
'mime': 'text/plain',
'ext': '.txt'
}],
'xls': [{
'mime': 'application/vnd.ms-excel',
'ext': '.xls'
}],
'xlt': [{
'mime': 'application/vnd.ms-excel',
'ext': '.xlt'
}],
'xlsx': [{
'mime':
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'ext':
'.xlsx'
}],
'xltx': [{
'mime':
'application/vnd.openxmlformats-officedocument.spreadsheetml.template',
'ext':
'.xltx'
}],
'zip': [{
'mime': 'application/zip',
'ext': '.zip'
}],
'ms':
_MICROSOFT_FORMATS_LIST,
'microsoft':
_MICROSOFT_FORMATS_LIST,
'micro$oft':
_MICROSOFT_FORMATS_LIST,
'openoffice': [{
'mime': 'application/vnd.oasis.opendocument.presentation',
'ext': '.odp'
}, {
'mime': 'application/x-vnd.oasis.opendocument.spreadsheet',
'ext': '.ods'
}, {
'mime': 'application/vnd.oasis.opendocument.spreadsheet',
'ext': '.ods'
}, {
'mime': 'application/vnd.oasis.opendocument.text',
'ext': '.odt'
}],
}
REFRESH_PERM_ERRORS = [
'invalid_grant: reauth related error (rapt_required)', # no way to reauth today
'invalid_grant: Token has been expired or revoked.',
]
DNS_ERROR_CODES_MAP = {
1: 'DNS Query Format Error',
2: 'Server failed to complete the DNS request',
3: 'Domain name does not exist',
4: 'Function not implemented',
5: 'The server refused to answer for the query',
6: 'Name that should not exist, does exist',
7: 'RRset that should not exist, does exist',
8: 'Server not authoritative for the zone',
9: 'Name not in zone'
}
EMAILSETTINGS_OLD_NEW_OLD_FORWARD_ACTION_MAP = {
'ARCHIVE': 'archive',
'DELETE': 'trash',
'KEEP': 'leaveInInBox',
'MARK_READ': 'markRead',
'archive': 'ARCHIVE',
'trash': 'DELETE',
'leaveInInbox': 'KEEP',
'markRead': 'MARK_READ',
}
EMAILSETTINGS_IMAP_EXPUNGE_BEHAVIOR_CHOICES_MAP = {
'archive': 'archive',
'deleteforever': 'deleteForever',
'trash': 'trash',
}
EMAILSETTINGS_IMAP_MAX_FOLDER_SIZE_CHOICES = [
'0', '1000', '2000', '5000', '10000'
]
EMAILSETTINGS_POP_ENABLE_FOR_CHOICES_MAP = {
'allmail': 'allMail',
'fromnowon': 'fromNowOn',
'mailfromnowon': 'fromNowOn',
'newmail': 'fromNowOn',
}
EMAILSETTINGS_FORWARD_POP_ACTION_CHOICES_MAP = {
'archive': 'archive',
'delete': 'trash',
'keep': 'leaveInInbox',
'leaveininbox': 'leaveInInbox',
'markread': 'markRead',
'trash': 'trash',
}
RT_PATTERN = re.compile(r'(?s){RT}.*?{(.+?)}.*?{/RT}')
RT_OPEN_PATTERN = re.compile(r'{RT}')
RT_CLOSE_PATTERN = re.compile(r'{/RT}')
RT_STRIP_PATTERN = re.compile(r'(?s){RT}.*?{/RT}')
RT_TAG_REPLACE_PATTERN = re.compile(r'{(.*?)}')
LOWERNUMERIC_CHARS = string.ascii_lowercase + string.digits
ALPHANUMERIC_CHARS = LOWERNUMERIC_CHARS + string.ascii_uppercase
URL_SAFE_CHARS = ALPHANUMERIC_CHARS + '-._~'
FILTER_ADD_LABEL_TO_ARGUMENT_MAP = {
'IMPORTANT': 'important',
'STARRED': 'star',
'TRASH': 'trash',
}
FILTER_REMOVE_LABEL_TO_ARGUMENT_MAP = {
'IMPORTANT': 'notimportant',
'UNREAD': 'markread',
'INBOX': 'archive',
'SPAM': 'neverspam',
}
FILTER_CRITERIA_CHOICES_MAP = {
'excludechats': 'excludeChats',
'from': 'from',
'hasattachment': 'hasAttachment',
'haswords': 'query',
'musthaveattachment': 'hasAttachment',
'negatedquery': 'negatedQuery',
'nowords': 'negatedQuery',
'query': 'query',
'size': 'size',
'subject': 'subject',
'to': 'to',
}
FILTER_ACTION_CHOICES = [
'archive',
'forward',
'important',
'label',
'markread',
'neverspam',
'notimportant',
'star',
'trash',
]
VAULT_MATTER_ACTIONS = [
'reopen',
'undelete',
'close',
'delete',
]
CROS_ARGUMENT_TO_PROPERTY_MAP = {
'activetimeranges': [
'activeTimeRanges.activeTime', 'activeTimeRanges.date'
],
'annotatedassetid': ['annotatedAssetId',],
'annotatedlocation': ['annotatedLocation',],
'annotateduser': ['annotatedUser',],
'asset': ['annotatedAssetId',],
'assetid': ['annotatedAssetId',],
'autoupdateexpiration': ['autoUpdateExpiration',],
'bootmode': ['bootMode',],
'cpustatusreports': ['cpuStatusReports',],
'devicefiles': ['deviceFiles',],
'deviceid': ['deviceId',],
'dockmacaddress': ['dockMacAddress',],
'diskvolumereports': ['diskVolumeReports',],
'ethernetmacaddress': ['ethernetMacAddress',],
'ethernetmacaddress0': ['ethernetMacAddress0',],
'firmwareversion': ['firmwareVersion',],
'lastenrollmenttime': ['lastEnrollmentTime',],
'lastknownnetwork': ['lastKnownNetwork'],
'lastsync': ['lastSync',],
'location': ['annotatedLocation',],
'macaddress': ['macAddress',],
'manufacturedate': ['manufactureDate',],
'meid': ['meid',],
'model': ['model',],
'notes': ['notes',],
'ordernumber': ['orderNumber',],
'org': ['orgUnitPath',],
'orgunitid': ['orgUnitId',],
'orgunitpath': ['orgUnitPath',],
'osversion': ['osVersion',],
'ou': ['orgUnitPath',],
'platformversion': ['platformVersion',],
'recentusers': ['recentUsers.email', 'recentUsers.type'],
'serialnumber': ['serialNumber',],
'status': ['status',],
'supportenddate': ['supportEndDate',],
'systemramtotal': ['systemRamTotal',],
'systemramfreereports': ['systemRamFreeReports',],
'tag': ['annotatedAssetId',],
'timeranges': ['activeTimeRanges.activeTime', 'activeTimeRanges.date'],
'times': ['activeTimeRanges.activeTime', 'activeTimeRanges.date'],
'tpmversioninfo': ['tpmVersionInfo',],
'user': ['annotatedUser',],
'users': ['recentUsers.email', 'recentUsers.type'],
'willautorenew': ['willAutoRenew',],
}
CROS_BASIC_FIELDS_LIST = [
'deviceId', 'annotatedAssetId', 'annotatedLocation', 'annotatedUser',
'lastSync', 'notes', 'serialNumber', 'status'
]
CROS_SCALAR_PROPERTY_PRINT_ORDER = [
'orgUnitId',
'orgUnitPath',
'annotatedAssetId',
'annotatedLocation',
'annotatedUser',
'lastSync',
'notes',
'serialNumber',
'status',
'model',
'firmwareVersion',
'platformVersion',
'osVersion',
'bootMode',
'meid',
'dockMacAddress',
'ethernetMacAddress',
'ethernetMacAddress0',
'macAddress',
'systemRamTotal',
'lastEnrollmentTime',
'orderNumber',
'manufactureDate',
'supportEndDate',
'autoUpdateExpiration',
'tpmVersionInfo',
'willAutoRenew',
]
CROS_RECENT_USERS_ARGUMENTS = ['recentusers', 'users']
CROS_ACTIVE_TIME_RANGES_ARGUMENTS = ['timeranges', 'activetimeranges', 'times']
CROS_DEVICE_FILES_ARGUMENTS = ['devicefiles', 'files']
CROS_CPU_STATUS_REPORTS_ARGUMENTS = [
'cpustatusreports',
]
CROS_DISK_VOLUME_REPORTS_ARGUMENTS = [
'diskvolumereports',
]
CROS_SYSTEM_RAM_FREE_REPORTS_ARGUMENTS = [
'systemramfreereports',
]
CROS_LISTS_ARGUMENTS = CROS_ACTIVE_TIME_RANGES_ARGUMENTS + \
CROS_RECENT_USERS_ARGUMENTS + \
CROS_DEVICE_FILES_ARGUMENTS + \
CROS_CPU_STATUS_REPORTS_ARGUMENTS + \
CROS_DISK_VOLUME_REPORTS_ARGUMENTS + \
CROS_SYSTEM_RAM_FREE_REPORTS_ARGUMENTS
CROS_START_ARGUMENTS = ['start', 'startdate', 'oldestdate']
CROS_END_ARGUMENTS = ['end', 'enddate']
# From https://www.chromium.org/chromium-os/tpm_firmware_update
CROS_TPM_VULN_VERSIONS = [
'41f',
'420',
'628',
'8520',
]
CROS_TPM_FIXED_VERSIONS = [
'422',
'62b',
'8521',
]
COLLABORATIVE_INBOX_ATTRIBUTES = [
'whoCanAddReferences',
'whoCanAssignTopics',
'whoCanEnterFreeFormTags',
'whoCanMarkDuplicate',
'whoCanMarkFavoriteReplyOnAnyTopic',
'whoCanMarkFavoriteReplyOnOwnTopic',
'whoCanMarkNoResponseNeeded',
'whoCanModifyTagsAndCategories',
'whoCanTakeTopics',
'whoCanUnassignTopic',
'whoCanUnmarkFavoriteReplyOnAnyTopic',
'favoriteRepliesOnTop',
]
GROUP_SETTINGS_LIST_ATTRIBUTES = {
# ACL choices
'whoCanAdd',
'whoCanApproveMembers',
'whoCanApproveMessages',
'whoCanAssignTopics',
'whoCanAssistContent',
'whoCanBanUsers',
'whoCanContactOwner',
'whoCanDeleteAnyPost',
'whoCanDeleteTopics',
'whoCanDiscoverGroup',
'whoCanEnterFreeFormTags',
'whoCanHideAbuse',
'whoCanInvite',
'whoCanJoin',
'whoCanLeaveGroup',
'whoCanLockTopics',
'whoCanMakeTopicsSticky',
'whoCanMarkDuplicate',
'whoCanMarkFavoriteReplyOnAnyTopic',
'whoCanMarkFavoriteReplyOnOwnTopic',
'whoCanMarkNoResponseNeeded',
'whoCanModerateContent',
'whoCanModerateMembers',
'whoCanModifyMembers',
'whoCanModifyTagsAndCategories',
'whoCanMoveTopicsIn',
'whoCanMoveTopicsOut',
'whoCanPostAnnouncements',
'whoCanPostMessage',
'whoCanTakeTopics',
'whoCanUnassignTopic',
'whoCanUnmarkFavoriteReplyOnAnyTopic',
'whoCanViewGroup',
'whoCanViewMembership',
# Miscellaneous choices
'default_sender',
'messageModerationLevel',
'replyTo',
'spamModerationLevel',
}
GROUP_SETTINGS_BOOLEAN_ATTRIBUTES = {
'allowExternalMembers',
'allowGoogleCommunication',
'allowWebPosting',
'archiveOnly',
'enableCollaborativeInbox',
'favoriteRepliesOnTop',
'includeCustomFooter',
'includeInGlobalAddressList',
'isArchived',
'membersCanPostAsTheGroup',
'sendMessageDenyNotification',
'showInGroupDirectory',
}
#
# Global variables
#
# The following GM_XXX constants are arbitrary but must be unique
# Most errors print a message and bail out with a return code
# Some commands want to set a non-zero return code but not bail
GM_SYSEXITRC = 'sxrc'
# Path to gam
GM_GAM_PATH = 'gpth'
# Python source, PyInstaller or StaticX?
GM_GAM_TYPE = 'gtyp'
# Are we on Windows?
GM_WINDOWS = 'wndo'
# Encodings
GM_SYS_ENCODING = 'syen'
# Extra arguments to pass to GAPI functions
GM_EXTRA_ARGS_DICT = 'exad'
# Current API services
GM_CURRENT_API_SERVICES = 'caps'
# Current API user
GM_CURRENT_API_USER = 'capu'
# Current API scope
GM_CURRENT_API_SCOPES = 'scoc'
# Values retrieved from oauth2service.json
GM_OAUTH2SERVICE_JSON_DATA = 'oajd'
GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID = 'oaci'
# Full path to enabledasa.txt
GM_ENABLEDASA_TXT = 'enda'
# File containing time of last GAM update check
GM_LAST_UPDATE_CHECK_TXT = 'lupc'
# Dictionary mapping OrgUnit ID to Name
GM_MAP_ORGUNIT_ID_TO_NAME = 'oi2n'
# Dictionary mapping Role ID to Name
GM_MAP_ROLE_ID_TO_NAME = 'ri2n'
# Dictionary mapping Role Name to ID
GM_MAP_ROLE_NAME_TO_ID = 'rn2i'
# Dictionary mapping User ID to Name
GM_MAP_USER_ID_TO_NAME = 'ui2n'
# GAM cache directory. If no_cache is True, this variable will be set to None
GM_CACHE_DIR = 'gacd'
# Reset GAM cache directory after discovery
GM_CACHE_DISCOVERY_ONLY = 'gcdo'
# Dictionary mapping Building ID to Name
GM_MAP_BUILDING_ID_TO_NAME = 'bi2n'
# Dictionary mapping Building Name to ID
GM_MAP_BUILDING_NAME_TO_ID = 'bn2i'
#
_DEFAULT_CHARSET = UTF8
_FN_CLIENT_SECRETS_JSON = 'client_secrets.json'
_FN_OAUTH2SERVICE_JSON = 'oauth2service.json'
_FN_OAUTH2_TXT = 'oauth2.txt'
#
GM_Globals = {
GM_SYSEXITRC: 0,
GM_GAM_PATH: None,
GM_GAM_TYPE: None,
GM_WINDOWS: os.name == 'nt',
GM_SYS_ENCODING: _DEFAULT_CHARSET,
GM_EXTRA_ARGS_DICT: {
'prettyPrint': False
},
GM_CURRENT_API_SERVICES: {},
GM_CURRENT_API_USER: None,
GM_CURRENT_API_SCOPES: [],
GM_OAUTH2SERVICE_JSON_DATA: None,
GM_OAUTH2SERVICE_ACCOUNT_CLIENT_ID: None,
GM_ENABLEDASA_TXT: '',
GM_LAST_UPDATE_CHECK_TXT: '',
GM_MAP_ORGUNIT_ID_TO_NAME: {},
GM_MAP_ROLE_ID_TO_NAME: None,
GM_MAP_ROLE_NAME_TO_ID: None,
GM_MAP_USER_ID_TO_NAME: None,
GM_CACHE_DIR: None,
GM_CACHE_DISCOVERY_ONLY: True,
GM_MAP_BUILDING_ID_TO_NAME: None,
GM_MAP_BUILDING_NAME_TO_ID: None,
}
#
# Global variables defined by environment variables/signal files
#
# Automatically generate gam batch command if number of users specified in gam
# users xxx command exceeds this number
# Default: 0, don't automatically generate gam batch commands
GC_AUTO_BATCH_MIN = 'auto_batch_min'
# When processing items in batches, how many should be processed in each batch
GC_BATCH_SIZE = 'batch_size'
# GAM cache directory. If no_cache is specified, this variable will be set to None
GC_CACHE_DIR = 'cache_dir'
# GAM cache discovery only. If no_cache is False, only API discovery calls will be cached
GC_CACHE_DISCOVERY_ONLY = 'cache_discovery_only'
# Character set of batch, csv, data files
GC_CHARSET = 'charset'
# Path to client_secrets.json
GC_CLIENT_SECRETS_JSON = 'client_secrets_json'
# GAM config directory containing client_secrets.json, oauth2.txt,
# oauth2service.json, extra_args.txt
GC_CONFIG_DIR = 'config_dir'
# custmerId from gam.cfg or retrieved from Google
GC_CUSTOMER_ID = 'customer_id'
# Admin email address, required when enable_dasa is true, overrides oauth2.txt value otherwise
GC_ADMIN_EMAIL = 'admin_email'
# If debug_level > 0: extra_args[u'prettyPrint'] = True,
# httplib2.debuglevel = gam_debug_level, appsObj.debug = True
GC_DEBUG_LEVEL = 'debug_level'
# ID Token decoded from OAuth 2.0 refresh token response. Includes hd (domain)
# and email of authorized user
GC_DECODED_ID_TOKEN = 'decoded_id_token'
# Domain obtained from gam.cfg or oauth2.txt
GC_DOMAIN = 'domain'
# Google Drive download directory
GC_DRIVE_DIR = 'drive_dir'
# Enable Delegated Admin Service Accounts
GC_ENABLE_DASA = 'enabledasa'
# If no_browser is True, writeCSVfile won't open a browser when todrive is set
# and doRequestOAuth prints a link and waits for the verification code when
# oauth2.txt is being created
GC_NO_BROWSER = 'no_browser'
# If no_tdemail is True, writeCSVfile won't send an email
GC_NO_TDEMAIL = 'no_tdemail'
# oauth_browser forces usage of web server OAuth flow that proved problematic.
GC_OAUTH_BROWSER = 'oauth_browser'
# Disable GAM API caching
GC_NO_CACHE = 'no_cache'
# Disable Short URLs
GC_NO_SHORT_URLS = 'no_short_urls'
# Disable GAM update check
GC_NO_UPDATE_CHECK = 'no_update_check'
# Number of threads for gam batch
GC_NUM_THREADS = 'num_threads'
# Path to oauth2.txt
GC_OAUTH2_TXT = 'oauth2_txt'
# Path to oauth2service.json
GC_OAUTH2SERVICE_JSON = 'oauth2service_json'
# Default section to use for processing
GC_SECTION = 'section'
# Add (n/m) to end of messages if number of items to be processed exceeds this number
GC_SHOW_COUNTS_MIN = 'show_counts_min'
# Enable/disable "Getting ... " messages
GC_SHOW_GETTINGS = 'show_gettings'
# GAM config directory containing json discovery files
GC_SITE_DIR = 'site_dir'
# CSV Columns GAM should show on CSV output
GC_CSV_HEADER_FILTER = 'csv_header_filter'
# CSV Columns GAM should not show on CSV output
GC_CSV_HEADER_DROP_FILTER = 'csv_header_drop_filter'
# CSV Rows GAM should filter
GC_CSV_ROW_FILTER = 'csv_row_filter'
# CSV Rows GAM should filter/drop
GC_CSV_ROW_DROP_FILTER = 'csv_row_drop_filter'
# Minimum TLS Version required for HTTPS connections
GC_TLS_MIN_VERSION = 'tls_min_ver'
# Maximum TLS Version used for HTTPS connections
GC_TLS_MAX_VERSION = 'tls_max_ver'
# Path to certificate authority file for validating TLS hosts
GC_CA_FILE = 'ca_file'
TLS_MIN = 'TLSv1_3' if hasattr(ssl.SSLContext(), 'minimum_version') else None
GC_Defaults = {
GC_ADMIN_EMAIL: '',
GC_AUTO_BATCH_MIN: 0,
GC_BATCH_SIZE: 50,
GC_CACHE_DIR: '',
GC_CACHE_DISCOVERY_ONLY: True,
GC_CHARSET: _DEFAULT_CHARSET,
GC_CLIENT_SECRETS_JSON: _FN_CLIENT_SECRETS_JSON,
GC_CONFIG_DIR: '',
GC_CUSTOMER_ID: MY_CUSTOMER,
GC_DEBUG_LEVEL: 0,
GC_DECODED_ID_TOKEN: '',
GC_DOMAIN: '',
GC_DRIVE_DIR: '',
GC_ENABLE_DASA: False,
GC_NO_BROWSER: False,
GC_NO_TDEMAIL: False,
GC_NO_CACHE: False,
GC_NO_SHORT_URLS: False,
GC_NO_UPDATE_CHECK: False,
GC_NUM_THREADS: 25,
GC_OAUTH_BROWSER: False,
GC_OAUTH2_TXT: _FN_OAUTH2_TXT,
GC_OAUTH2SERVICE_JSON: _FN_OAUTH2SERVICE_JSON,
GC_SECTION: '',
GC_SHOW_COUNTS_MIN: 0,
GC_SHOW_GETTINGS: True,
GC_SITE_DIR: '',
GC_CSV_HEADER_FILTER: '',
GC_CSV_HEADER_DROP_FILTER: '',
GC_CSV_ROW_FILTER: '',
GC_CSV_ROW_DROP_FILTER: '',
GC_TLS_MIN_VERSION: TLS_MIN,
GC_TLS_MAX_VERSION: None,
GC_CA_FILE: None,
}
GC_Values = {}
GC_TYPE_BOOLEAN = 'bool'
GC_TYPE_CHOICE = 'choi'
GC_TYPE_DIRECTORY = 'dire'
GC_TYPE_EMAIL = 'emai'
GC_TYPE_FILE = 'file'
GC_TYPE_HEADERFILTER = 'heaf'
GC_TYPE_INTEGER = 'inte'
GC_TYPE_LANGUAGE = 'lang'
GC_TYPE_ROWFILTER = 'rowf'
GC_TYPE_STRING = 'stri'
GC_VAR_TYPE = 'type'
GC_VAR_LIMITS = 'lmit'
GC_VAR_INFO = {
GC_ADMIN_EMAIL: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_AUTO_BATCH_MIN: {
GC_VAR_TYPE: GC_TYPE_INTEGER,
GC_VAR_LIMITS: (0, None)
},
GC_BATCH_SIZE: {
GC_VAR_TYPE: GC_TYPE_INTEGER,
GC_VAR_LIMITS: (1, 1000)
},
GC_CACHE_DIR: {
GC_VAR_TYPE: GC_TYPE_DIRECTORY
},
GC_CACHE_DISCOVERY_ONLY: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_CHARSET: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_CLIENT_SECRETS_JSON: {
GC_VAR_TYPE: GC_TYPE_FILE
},
GC_CONFIG_DIR: {
GC_VAR_TYPE: GC_TYPE_DIRECTORY
},
GC_CUSTOMER_ID: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_DEBUG_LEVEL: {
GC_VAR_TYPE: GC_TYPE_INTEGER,
GC_VAR_LIMITS: (0, None)
},
GC_DECODED_ID_TOKEN: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_DOMAIN: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_DRIVE_DIR: {
GC_VAR_TYPE: GC_TYPE_DIRECTORY
},
GC_ENABLE_DASA: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NO_BROWSER: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NO_TDEMAIL: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NO_CACHE: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NO_SHORT_URLS: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NO_UPDATE_CHECK: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_NUM_THREADS: {
GC_VAR_TYPE: GC_TYPE_INTEGER,
GC_VAR_LIMITS: (1, None)
},
GC_OAUTH_BROWSER: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_OAUTH2_TXT: {
GC_VAR_TYPE: GC_TYPE_FILE
},
GC_OAUTH2SERVICE_JSON: {
GC_VAR_TYPE: GC_TYPE_FILE
},
GC_SECTION: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_SHOW_COUNTS_MIN: {
GC_VAR_TYPE: GC_TYPE_INTEGER,
GC_VAR_LIMITS: (0, None)
},
GC_SHOW_GETTINGS: {
GC_VAR_TYPE: GC_TYPE_BOOLEAN
},
GC_SITE_DIR: {
GC_VAR_TYPE: GC_TYPE_DIRECTORY
},
GC_CSV_HEADER_FILTER: {
GC_VAR_TYPE: GC_TYPE_HEADERFILTER
},
GC_CSV_HEADER_DROP_FILTER: {
GC_VAR_TYPE: GC_TYPE_HEADERFILTER
},
GC_CSV_ROW_FILTER: {
GC_VAR_TYPE: GC_TYPE_ROWFILTER
},
GC_CSV_ROW_DROP_FILTER: {
GC_VAR_TYPE: GC_TYPE_ROWFILTER
},
GC_TLS_MIN_VERSION: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_TLS_MAX_VERSION: {
GC_VAR_TYPE: GC_TYPE_STRING
},
GC_CA_FILE: {
GC_VAR_TYPE: GC_TYPE_FILE
},
}
# Google API constants
NEVER_TIME = '1970-01-01T00:00:00.000Z'
NEVER_TIME_NOMS = '1970-01-01T00:00:00Z'
ROLE_MANAGER = 'MANAGER'
ROLE_MEMBER = 'MEMBER'
ROLE_OWNER = 'OWNER'
PROJECTION_CHOICES_MAP = {
'basic': 'BASIC',
'full': 'FULL',
}
SORTORDER_CHOICES_MAP = {
'ascending': 'ASCENDING',
'descending': 'DESCENDING',
}
#
CLEAR_NONE_ARGUMENT = [
'clear',
'none',
]
#
MESSAGE_API_ACCESS_CONFIG = 'API access is configured in your Control Panel' \
' under: Security-Show more-Advanced' \
' settings-Manage API client access'
MESSAGE_API_ACCESS_DENIED = 'API access Denied.\n\nPlease make sure the Client' \
' ID: {0} is authorized for the API Scope(s): {1}'
MESSAGE_GAM_EXITING_FOR_UPDATE = 'GAM is now exiting so that you can' \
' overwrite this old version with the' \
' latest release'
MESSAGE_GAM_OUT_OF_MEMORY = 'GAM has run out of memory. If this is a large' \
' G Suite instance, you should use a 64-bit' \
' version of GAM on Windows or a 64-bit version' \
' of Python on other systems.'
MESSAGE_HEADER_NOT_FOUND_IN_CSV_HEADERS = 'Header "{0}" not found in CSV' \
' headers of "{1}".'
MESSAGE_HIT_CONTROL_C_TO_UPDATE = '\n\nHit CTRL+C to visit the GAM website' \
' and download the latest release or wait' \
' 15 seconds continue with this boring old' \
' version. GAM won\'t bother you with this ' \
' announcement for 1 week or you can create' \
' a file named noupdatecheck.txt in the same' \
' location as gam.py or gam.exe and GAM' \
' won\'t ever check for updates.'
MESSAGE_INVALID_JSON = 'The file {0} has an invalid format.'
MESSAGE_NO_DISCOVERY_INFORMATION = 'No online discovery doc and {0} does not' \
' exist locally'
MESSAGE_NO_TRANSFER_LACK_OF_DISK_SPACE = 'Cowardly refusing to perform' \
' migration due to lack of target' \
' drive space. Source size: {0}mb' \
' Target Free: {1}mb'
MESSAGE_RESULTS_TOO_LARGE_FOR_GOOGLE_SPREADSHEET = 'Results are too large for' \
' Google Spreadsheets.' \
' Uploading as a regular' \
' CSV file.'
MESSAGE_SERVICE_NOT_APPLICABLE = 'Service not applicable for this address:' \
' {0}. Please make sure service is enabled' \
' for user and run\n\ngam user <user> check' \
' serviceaccount\n\nfor further instructions'
MESSAGE_INSTRUCTIONS_OAUTH2SERVICE_JSON = 'Please run\n\ngam create project\n' \
'gam user <user> check ' \
'serviceaccount\n\nto create and' \
' configure a service account.'
MESSAGE_UPDATE_GAM_TO_64BIT = 'You\'re running a 32-bit version of GAM on a' \
' 64-bit version of Windows, upgrade to a' \
' windows-x86_64 version of GAM'
MESSAGE_YOUR_SYSTEM_TIME_DIFFERS_FROM_GOOGLE_BY = 'Your system time differs' \
' from %s by %s'
shared_drive_values = ['teamdrive', 'teamdrives',
'shareddrive', 'shareddrives']
USER_ADDRESS_TYPES = ['home', 'work', 'other']
USER_EMAIL_TYPES = ['home', 'work', 'other']
USER_EXTERNALID_TYPES = [
'account', 'customer', 'login_id', 'network', 'organization'
]
USER_GENDER_TYPES = ['female', 'male', 'unknown']
USER_IM_TYPES = ['home', 'work', 'other']
USER_KEYWORD_TYPES = ['occupation', 'outlook', 'mission']
USER_LOCATION_TYPES = ['default', 'desk']
USER_ORGANIZATION_TYPES = ['domain_only', 'school', 'unknown', 'work']
USER_PHONE_TYPES = [
'assistant', 'callback', 'car', 'company_main', 'grand_central', 'home',
'home_fax', 'isdn', 'main', 'mobile', 'other', 'other_fax', 'pager',
'radio', 'telex', 'tty_tdd', 'work', 'work_fax', 'work_mobile', 'work_pager'
]
USER_RELATION_TYPES = [
'admin_assistant', 'assistant', 'brother', 'child', 'domestic_partner',
'dotted_line_manager', 'exec_assistant', 'father', 'friend', 'manager',
'mother', 'parent', 'partner', 'referred_by', 'relative', 'sister', 'spouse'
]
USER_WEBSITE_TYPES = [
'app_install_page', 'blog', 'ftp', 'home', 'home_page', 'other', 'profile',
'reservations', 'resume', 'work'
]
WEBCOLOR_MAP = {
'aliceblue': '#f0f8ff',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred': '#cd5c5c',
'indigo': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370db',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#db7093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
# Gmail label colors
LABEL_COLORS = [
'#000000',
'#076239',
'#0b804b',
'#149e60',
'#16a766',
'#1a764d',
'#1c4587',
'#285bac',
'#2a9c68',
'#3c78d8',
'#3dc789',
'#41236d',
'#434343',
'#43d692',
'#44b984',
'#4a86e8',
'#653e9b',
'#666666',
'#68dfa9',
'#6d9eeb',
'#822111',
'#83334c',
'#89d3b2',
'#8e63ce',
'#999999',
'#a0eac9',
'#a46a21',
'#a479e2',
'#a4c2f4',
'#aa8831',
'#ac2b16',
'#b65775',
'#b694e8',
'#b9e4d0',
'#c6f3de',
'#c9daf8',
'#cc3a21',
'#cccccc',
'#cf8933',
'#d0bcf1',
'#d5ae49',
'#e07798',
'#e4d7f5',
'#e66550',
'#eaa041',
'#efa093',
'#efefef',
'#f2c960',
'#f3f3f3',
'#f691b3',
'#f6c5be',
'#f7a7c0',
'#fad165',
'#fb4c2f',
'#fbc8d9',
'#fcda83',
'#fcdee8',
'#fce8b3',
'#fef1d1',
'#ffad47',
'#ffbc6b',
'#ffd6a2',
'#ffe6c7',
'#ffffff',
]
# Valid language codes
LANGUAGE_CODES_MAP = {
'ach': 'ach',
'af': 'af',
'ag': 'ga',
'ak': 'ak',
'am': 'am',
'ar': 'ar',
'az': 'az',
'be': 'be',
'bem': 'bem',
'bg': 'bg',
'bn': 'bn',
'br': 'br',
'bs': 'bs',
'ca': 'ca',
'chr': 'chr',
'ckb': 'ckb',
'co': 'co',
'crs': 'crs',
'cs': 'cs',
'cy': 'cy',
'da': 'da',
'de': 'de',
'ee': 'ee',
'el': 'el',
'en': 'en',
'en-gb': 'en-GB',
'en-us': 'en-US',
'eo': 'eo',
'es': 'es',
'es-419': 'es-419',
'et': 'et',
'eu': 'eu',
'fa': 'fa',
'fi': 'fi',
'fo': 'fo',
'fr': 'fr',
'fr-ca': 'fr-ca',
'fy': 'fy',
'ga': 'ga',
'gaa': 'gaa',
'gd': 'gd',
'gl': 'gl',
'gn': 'gn',
'gu': 'gu',
'ha': 'ha',
'haw': 'haw',
'he': 'he',
'hi': 'hi',
'hr': 'hr',
'ht': 'ht',
'hu': 'hu',
'hy': 'hy',
'ia': 'ia',
'id': 'id',
'ig': 'ig',
'in': 'in',
'is': 'is',
'it': 'it',
'iw': 'iw',
'ja': 'ja',
'jw': 'jw',
'ka': 'ka',
'kg': 'kg',
'kk': 'kk',
'km': 'km',
'kn': 'kn',
'ko': 'ko',
'kri': 'kri',
'ku': 'ku',
'ky': 'ky',
'la': 'la',
'lg': 'lg',
'ln': 'ln',
'lo': 'lo',
'loz': 'loz',
'lt': 'lt',
'lua': 'lua',
'lv': 'lv',
'mfe': 'mfe',
'mg': 'mg',
'mi': 'mi',
'mk': 'mk',
'ml': 'ml',
'mn': 'mn',
'mo': 'mo',
'mr': 'mr',
'ms': 'ms',
'mt': 'mt',
'my': 'my',
'ne': 'ne',
'nl': 'nl',
'nn': 'nn',
'no': 'no',
'nso': 'nso',
'ny': 'ny',
'nyn': 'nyn',
'oc': 'oc',
'om': 'om',
'or': 'or',
'pa': 'pa',
'pcm': 'pcm',
'pl': 'pl',
'ps': 'ps',
'pt-br': 'pt-BR',
'pt-pt': 'pt-PT',
'qu': 'qu',
'rm': 'rm',
'rn': 'rn',
'ro': 'ro',
'ru': 'ru',
'rw': 'rw',
'sd': 'sd',
'sh': 'sh',
'si': 'si',
'sk': 'sk',
'sl': 'sl',
'sn': 'sn',
'so': 'so',
'sq': 'sq',
'sr': 'sr',
'sr-me': 'sr-ME',
'st': 'st',
'su': 'su',
'sv': 'sv',
'sw': 'sw',
'ta': 'ta',
'te': 'te',
'tg': 'tg',
'th': 'th',
'ti': 'ti',
'tk': 'tk',
'tl': 'tl',
'tn': 'tn',
'to': 'to',
'tr': 'tr',
'tt': 'tt',
'tum': 'tum',
'tw': 'tw',
'ug': 'ug',
'uk': 'uk',
'ur': 'ur',
'uz': 'uz',
'vi': 'vi',
'wo': 'wo',
'xh': 'xh',
'yi': 'yi',
'yo': 'yo',
'zh-cn': 'zh-CN',
'zh-hk': 'zh-HK',
'zh-tw': 'zh-TW',
'zu': 'zu',
}
# maxResults exception values for API list calls. Should only be listed if:
# - discovery doc does not specify maximum value (we use maximum value if it
# exists, not this)
# - actual max API returns with maxResults=<bigNum> > default API returns
# when maxResults isn't specified (we should use default otherwise by not
# setting maxResults)
MAX_RESULTS_API_EXCEPTIONS = {
'calendar.acl.list': 250,
'calendar.calendarList.list': 250,
'calendar.events.list': 2500,
'calendar.settings.list': 250,
'directory.chromeosdevices.list': 200,
'drive.files.list': 1000,
}
ONE_KILO_BYTES = 1000
ONE_MEGA_BYTES = 1000000
ONE_GIGA_BYTES = 1000000000
DELTA_DATE_PATTERN = re.compile(r'^([+-])(\d+)([dwy])$')
DELTA_DATE_FORMAT_REQUIRED = '(+|-)<Number>(d|w|y)'
DELTA_TIME_PATTERN = re.compile(r'^([+-])(\d+)([mhdwy])$')
DELTA_TIME_FORMAT_REQUIRED = '(+|-)<Number>(m|h|d|w|y)'
HHMM_FORMAT = '%H:%M'
HHMM_FORMAT_REQUIRED = 'hh:mm'
YYYYMMDD_FORMAT = '%Y-%m-%d'
YYYYMMDD_FORMAT_REQUIRED = 'yyyy-mm-dd'
YYYYMMDDTHHMMSS_FORMAT_REQUIRED = 'yyyy-mm-ddThh:mm:ss[.fff](Z|(+|-(hh:mm)))'
YYYYMMDD_PATTERN = re.compile(r'^[0-9]{4}-[0-9]{2}-[0-9]{2}$')
UID_PATTERN = re.compile(r'u?id: ?(.+)', re.IGNORECASE)
| 29.330975 | 94 | 0.605244 | 5,749 | 58,046 | 5.896678 | 0.308401 | 0.007788 | 0.009292 | 0.011032 | 0.201121 | 0.142832 | 0.105752 | 0.071032 | 0.051357 | 0.020914 | 0 | 0.035401 | 0.228663 | 58,046 | 1,978 | 95 | 29.345804 | 0.721752 | 0.062829 | 0 | 0.16492 | 0 | 0.000552 | 0.461983 | 0.080361 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.005516 | 0 | 0.005516 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f55819c7fde2558b6072ee4e0544797f9dd0ca1 | 6,037 | py | Python | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/user_profile_response.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class UserProfileResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'credential': 'CredentialResponse',
'owner': 'str',
'account': 'str',
'ui_properties': 'dict(str, object)'
}
attribute_map = {
'credential': 'credential',
'owner': 'owner',
'account': 'account',
'ui_properties': 'uiProperties'
}
def __init__(self, credential=None, owner=None, account=None, ui_properties=None):
"""
UserProfileResponse - a model defined in Swagger
"""
self._credential = None
self._owner = None
self._account = None
self._ui_properties = None
if credential is not None:
self.credential = credential
if owner is not None:
self.owner = owner
if account is not None:
self.account = account
if ui_properties is not None:
self.ui_properties = ui_properties
@property
def credential(self):
"""
Gets the credential of this UserProfileResponse.
:return: The credential of this UserProfileResponse.
:rtype: CredentialResponse
"""
return self._credential
@credential.setter
def credential(self, credential):
"""
Sets the credential of this UserProfileResponse.
:param credential: The credential of this UserProfileResponse.
:type: CredentialResponse
"""
self._credential = credential
@property
def owner(self):
"""
Gets the owner of this UserProfileResponse.
:return: The owner of this UserProfileResponse.
:rtype: str
"""
return self._owner
@owner.setter
def owner(self, owner):
"""
Sets the owner of this UserProfileResponse.
:param owner: The owner of this UserProfileResponse.
:type: str
"""
self._owner = owner
@property
def account(self):
"""
Gets the account of this UserProfileResponse.
:return: The account of this UserProfileResponse.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""
Sets the account of this UserProfileResponse.
:param account: The account of this UserProfileResponse.
:type: str
"""
self._account = account
@property
def ui_properties(self):
"""
Gets the ui_properties of this UserProfileResponse.
:return: The ui_properties of this UserProfileResponse.
:rtype: dict(str, object)
"""
return self._ui_properties
@ui_properties.setter
def ui_properties(self, ui_properties):
"""
Sets the ui_properties of this UserProfileResponse.
:param ui_properties: The ui_properties of this UserProfileResponse.
:type: dict(str, object)
"""
self._ui_properties = ui_properties
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, UserProfileResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 29.886139 | 984 | 0.604274 | 668 | 6,037 | 5.35479 | 0.287425 | 0.063741 | 0.111826 | 0.014537 | 0.270059 | 0.130277 | 0.065418 | 0.041375 | 0.021247 | 0.021247 | 0 | 0.001691 | 0.314395 | 6,037 | 201 | 985 | 30.034826 | 0.862527 | 0.40434 | 0 | 0.072289 | 0 | 0 | 0.058616 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.168675 | false | 0 | 0.036145 | 0 | 0.361446 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f5b50b3cdb081de40cbc7892b3382fb3bcb5e33 | 1,243 | py | Python | templatepython/examples/fizzbuzz.py | humayun-argn/python-template | 100f61fc4f092cee2b27c0855c4830325e0267a1 | [
"MIT"
] | 16 | 2020-07-23T13:40:54.000Z | 2022-02-18T23:02:09.000Z | templatepython/examples/fizzbuzz.py | humayun-argn/python-template | 100f61fc4f092cee2b27c0855c4830325e0267a1 | [
"MIT"
] | 2 | 2020-07-11T10:19:44.000Z | 2020-11-01T05:55:36.000Z | templatepython/examples/fizzbuzz.py | humayun-argn/python-template | 100f61fc4f092cee2b27c0855c4830325e0267a1 | [
"MIT"
] | 3 | 2021-05-18T18:05:15.000Z | 2021-12-27T13:24:44.000Z | #!/usr/bin/env python3
"""
FizzBuzz
https://medium.freecodecamp.org/a-software-engineering-survival-guide-fe3eafb47166
https://medium.freecodecamp.org/coding-interviews-for-dummies-5e048933b82b
This solution uses the following syntax features:
Modulo (%, remainder)
Strict equality (==)
Addition assignment (+=)
"""
from typing import List, Union
def fizzbuzz_print() -> None:
"""Print 1-100
---
- Multiples of 3: Fizz
- Multiples of 5: Buzz
- Multiples of 3 and 5: FizzBuzz
"""
for i in range(1, 101):
out = ""
if i % 3 == 0:
out += "Fizz"
if i % 5 == 0:
out += "Buzz"
print(out or i)
def fizzbuzz_list() -> List[Union[int, str]]:
"""Create a list 1-100
---
- Multiples of 3 and 5: FizzBuzz
- Multiples of 3: Fizz
- Multiples of 5: Buzz
- Else: integer
"""
out: List[Union[int, str]] = []
for i in range(100):
if i % 3 == 0 and i % 5 == 0:
out.insert(i, "FizzBuzz")
elif i % 3 == 0:
out.insert(i, "Fizz")
elif i % 5 == 0:
out.insert(i, "Buzz")
else:
out.insert(i, i)
return out
if __name__ == "__main__":
print(fizzbuzz_list())
| 23.45283 | 82 | 0.5535 | 165 | 1,243 | 4.10303 | 0.412121 | 0.097489 | 0.070901 | 0.026588 | 0.215657 | 0.20384 | 0.094535 | 0.094535 | 0 | 0 | 0 | 0.058824 | 0.302494 | 1,243 | 52 | 83 | 23.903846 | 0.72203 | 0.418343 | 0 | 0 | 0 | 0 | 0.048558 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.173913 | 0.130435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f5b7d77d537ad078c5197d7ca0feffbbbacd00d | 3,774 | py | Python | src/utils/glsl/generate_hardcode.py | Time-Coder/glass | c249f11ba906d0e8d40dac52f1cfc99506d362cd | [
"MIT"
] | 1 | 2021-06-05T13:34:58.000Z | 2021-06-05T13:34:58.000Z | src/utils/glsl/generate_hardcode.py | Time-Coder/glass | c249f11ba906d0e8d40dac52f1cfc99506d362cd | [
"MIT"
] | null | null | null | src/utils/glsl/generate_hardcode.py | Time-Coder/glass | c249f11ba906d0e8d40dac52f1cfc99506d362cd | [
"MIT"
] | null | null | null | import os
import pathlib
import copy
def delete_comments(content):
while True:
pos_start = content.find("/*")
if pos_start == -1:
break
pos_end = content.find("*/", pos_start+2)
if pos_end == -1:
pos_end = len(content)-2
content = content[:pos_start] + content[pos_end+2:]
while True:
pos_start = content.find("//")
if pos_start == -1:
break
pos_end = content.find("\n", pos_start + 2)
if pos_end == -1:
pos_end = len(content)
content = content[:pos_start] + content[pos_end:]
return content
class Node:
def __init__(self):
self.content = ""
self.in_edges = set()
def skip_space(content, pos):
if pos < 0:
return 0
if pos >= len(content):
return len(content)
while pos < len(content):
if content[pos] not in ' \t':
break
pos += 1
return pos
def to_hard_code(filename):
content = delete_comments(open(filename).read())
lines = content.split("\n")
name = filename.replace("/", "_").replace("\\", "_").replace(".", "_").replace("glsl", "shader")
result = Node()
for line in lines:
line = line.rstrip()
if line == "":
continue
pos_start = line.find("#include")
if pos_start == -1:
line = '"' + line.replace('\\', '\\\\').replace('"', '\\"') + '\\n"'
else:
pos_start = skip_space(line, pos_start+len("#include"))
start_sign = line[pos_start]
pos_start += 1
if start_sign == '"':
end_sign = '"'
elif start_sign == '<':
end_sign = '>'
pos_end = line.find(end_sign, pos_start)
if pos_end == -1:
raise SyntexError("Error format in #include")
file_name = line[pos_start:pos_end]
file_name = os.path.relpath(os.path.dirname(filename) + "/" + file_name)
var_name = file_name.replace("/", "_").replace("\\", "_").replace(".", "_").replace("glsl", "shader")
line = '+ glass::' + var_name + ' +'
result.in_edges.add(var_name)
result.content += line + '\n'
return result
def generate_hardcode_dict(folder_name):
files = all_files(".")
result = {}
for file in files:
var_name = file.replace("/", "_").replace("\\", "_").replace(".", "_").replace("glsl", "shader")
result[var_name] = to_hard_code(file)
return result
def extname(filename):
return pathlib.Path(filename).suffix
def all_files(folder = "."):
result = []
files = os.listdir(folder)
for file in files:
file = folder + "/" + file
if os.path.isfile(file) and extname(file) == ".glsl":
if len(file)>=2 and (file[:2] == "./" or file[:2] == ".\\"):
file = file[2:]
result.append(file)
elif os.path.isdir(file):
result.extend(all_files(file))
return result
result_dict = generate_hardcode_dict(".")
result_dict_copy = copy.deepcopy(result_dict)
order_list = []
while True:
pop_key = None
for key in result_dict:
if len(result_dict[key].in_edges) == 0:
order_list.append(key)
for sub_key in result_dict:
if key in result_dict[sub_key].in_edges:
result_dict[sub_key].in_edges.remove(key)
pop_key = key
break
if pop_key != None:
result_dict.pop(pop_key)
else:
break
for key in result_dict:
print(result_dict[key].in_edges)
out_file = open("../shaders.cpp", "w")
out_file.write("#include \"glass/utils/shaders.h\"\n")
out_file.write("\nusing namespace std;\n")
for var_name in order_list:
out_file.write("string glass::" + var_name + " = \n")
out_file.write(result_dict_copy[var_name].content + ";\n\n")
out_file.close()
out_file = open("../../../include/glass/utils/shaders.h", "w")
out_file.write("#ifndef __SHADERS_H__\n")
out_file.write("#define __SHADERS_H__\n")
out_file.write("#include <string>\n")
out_file.write("namespace glass\n")
out_file.write("{\n")
for var_name in order_list:
out_file.write("extern std::string " + var_name + ";\n")
out_file.write("};\n")
out_file.write("#endif")
out_file.close() | 24.828947 | 104 | 0.649179 | 556 | 3,774 | 4.167266 | 0.188849 | 0.055244 | 0.062149 | 0.044886 | 0.325421 | 0.26025 | 0.179543 | 0.111351 | 0.111351 | 0.111351 | 0 | 0.00603 | 0.165077 | 3,774 | 152 | 105 | 24.828947 | 0.729292 | 0 | 0 | 0.219512 | 0 | 0 | 0.101192 | 0.010066 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056911 | false | 0 | 0.02439 | 0.00813 | 0.154472 | 0.00813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f5cee69e3b4b126cfc5e418c2c152018ba7ad45 | 8,696 | py | Python | pytorch3d/implicitron/models/renderer/ray_sampler.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pytorch3d/implicitron/models/renderer/ray_sampler.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | pytorch3d/implicitron/models/renderer/ray_sampler.py | janEbert/pytorch3d | accdac80fb29e82f72d4e8e73135ba8fd790b6c0 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import field
from typing import Optional, Tuple
import torch
from pytorch3d.implicitron.tools import camera_utils
from pytorch3d.implicitron.tools.config import Configurable
from pytorch3d.renderer import NDCMultinomialRaysampler, RayBundle
from pytorch3d.renderer.cameras import CamerasBase
from .base import EvaluationMode, RenderSamplingMode
class RaySampler(Configurable, torch.nn.Module):
"""
Samples a fixed number of points along rays which are in turn sampled for
each camera in a batch.
This class utilizes `NDCMultinomialRaysampler` which allows to either
randomly sample rays from an input foreground saliency mask
(`RenderSamplingMode.MASK_SAMPLE`), or on a rectangular image grid
(`RenderSamplingMode.FULL_GRID`). The sampling mode can be set separately
for training and evaluation by setting `self.sampling_mode_training`
and `self.sampling_mode_training` accordingly.
The class allows two modes of sampling points along the rays:
1) Sampling between fixed near and far z-planes:
Active when `self.scene_extent <= 0`, samples points along each ray
with approximately uniform spacing of z-coordinates between
the minimum depth `self.min_depth` and the maximum depth `self.max_depth`.
This sampling is useful for rendering scenes where the camera is
in a constant distance from the focal point of the scene.
2) Adaptive near/far plane estimation around the world scene center:
Active when `self.scene_extent > 0`. Samples points on each
ray between near and far planes whose depths are determined based on
the distance from the camera center to a predefined scene center.
More specifically,
`min_depth = max(
(self.scene_center-camera_center).norm() - self.scene_extent, eps
)` and
`max_depth = (self.scene_center-camera_center).norm() + self.scene_extent`.
This sampling is ideal for object-centric scenes whose contents are
centered around a known `self.scene_center` and fit into a bounding sphere
with a radius of `self.scene_extent`.
Similar to the sampling mode, the sampling parameters can be set separately
for training and evaluation.
Settings:
image_width: The horizontal size of the image grid.
image_height: The vertical size of the image grid.
scene_center: The xyz coordinates of the center of the scene used
along with `scene_extent` to compute the min and max depth planes
for sampling ray-points.
scene_extent: The radius of the scene bounding sphere centered at `scene_center`.
If `scene_extent <= 0`, the raysampler samples points between
`self.min_depth` and `self.max_depth` depths instead.
sampling_mode_training: The ray sampling mode for training. This should be a str
option from the RenderSamplingMode Enum
sampling_mode_evaluation: Same as above but for evaluation.
n_pts_per_ray_training: The number of points sampled along each ray during training.
n_pts_per_ray_evaluation: The number of points sampled along each ray during evaluation.
n_rays_per_image_sampled_from_mask: The amount of rays to be sampled from the image grid
min_depth: The minimum depth of a ray-point. Active when `self.scene_extent > 0`.
max_depth: The maximum depth of a ray-point. Active when `self.scene_extent > 0`.
stratified_point_sampling_training: if set, performs stratified random sampling
along the ray; otherwise takes ray points at deterministic offsets.
stratified_point_sampling_evaluation: Same as above but for evaluation.
"""
image_width: int = 400
image_height: int = 400
scene_center: Tuple[float, float, float] = field(
default_factory=lambda: (0.0, 0.0, 0.0)
)
scene_extent: float = 0.0
sampling_mode_training: str = "mask_sample"
sampling_mode_evaluation: str = "full_grid"
n_pts_per_ray_training: int = 64
n_pts_per_ray_evaluation: int = 64
n_rays_per_image_sampled_from_mask: int = 1024
min_depth: float = 0.1
max_depth: float = 8.0
# stratified sampling vs taking points at deterministic offsets
stratified_point_sampling_training: bool = True
stratified_point_sampling_evaluation: bool = False
def __post_init__(self):
super().__init__()
self.scene_center = torch.FloatTensor(self.scene_center)
self._sampling_mode = {
EvaluationMode.TRAINING: RenderSamplingMode(self.sampling_mode_training),
EvaluationMode.EVALUATION: RenderSamplingMode(
self.sampling_mode_evaluation
),
}
self._raysamplers = {
EvaluationMode.TRAINING: NDCMultinomialRaysampler(
image_width=self.image_width,
image_height=self.image_height,
n_pts_per_ray=self.n_pts_per_ray_training,
min_depth=self.min_depth,
max_depth=self.max_depth,
n_rays_per_image=self.n_rays_per_image_sampled_from_mask
if self._sampling_mode[EvaluationMode.TRAINING]
== RenderSamplingMode.MASK_SAMPLE
else None,
unit_directions=True,
stratified_sampling=self.stratified_point_sampling_training,
),
EvaluationMode.EVALUATION: NDCMultinomialRaysampler(
image_width=self.image_width,
image_height=self.image_height,
n_pts_per_ray=self.n_pts_per_ray_evaluation,
min_depth=self.min_depth,
max_depth=self.max_depth,
n_rays_per_image=self.n_rays_per_image_sampled_from_mask
if self._sampling_mode[EvaluationMode.EVALUATION]
== RenderSamplingMode.MASK_SAMPLE
else None,
unit_directions=True,
stratified_sampling=self.stratified_point_sampling_evaluation,
),
}
def forward(
self,
cameras: CamerasBase,
evaluation_mode: EvaluationMode,
mask: Optional[torch.Tensor] = None,
) -> RayBundle:
"""
Args:
cameras: A batch of `batch_size` cameras from which the rays are emitted.
evaluation_mode: one of `EvaluationMode.TRAINING` or
`EvaluationMode.EVALUATION` which determines the sampling mode
that is used.
mask: Active for the `RenderSamplingMode.MASK_SAMPLE` sampling mode.
Defines a non-negative mask of shape
`(batch_size, image_height, image_width)` where each per-pixel
value is proportional to the probability of sampling the
corresponding pixel's ray.
Returns:
ray_bundle: A `RayBundle` object containing the parametrizations of the
sampled rendering rays.
"""
sample_mask = None
if (
# pyre-fixme[29]
self._sampling_mode[evaluation_mode] == RenderSamplingMode.MASK_SAMPLE
and mask is not None
):
sample_mask = torch.nn.functional.interpolate(
mask,
# pyre-fixme[6]: Expected `Optional[int]` for 2nd param but got
# `List[int]`.
size=[self.image_height, self.image_width],
mode="nearest",
)[:, 0]
if self.scene_extent > 0.0:
# Override the min/max depth set in initialization based on the
# input cameras.
min_depth, max_depth = camera_utils.get_min_max_depth_bounds(
cameras, self.scene_center, self.scene_extent
)
# pyre-fixme[29]:
# `Union[BoundMethod[typing.Callable(torch.Tensor.__getitem__)[[Named(self,
# torch.Tensor), Named(item, typing.Any)], typing.Any], torch.Tensor],
# torch.Tensor, torch.nn.Module]` is not a function.
ray_bundle = self._raysamplers[evaluation_mode](
cameras=cameras,
mask=sample_mask,
min_depth=float(min_depth[0]) if self.scene_extent > 0.0 else None,
max_depth=float(max_depth[0]) if self.scene_extent > 0.0 else None,
)
return ray_bundle
| 45.528796 | 96 | 0.662374 | 1,075 | 8,696 | 5.15907 | 0.242791 | 0.036783 | 0.029751 | 0.014425 | 0.282726 | 0.258204 | 0.246664 | 0.201226 | 0.172016 | 0.140281 | 0 | 0.008268 | 0.276794 | 8,696 | 190 | 97 | 45.768421 | 0.873589 | 0.516099 | 0 | 0.215909 | 0 | 0 | 0.006988 | 0 | 0 | 0 | 0 | 0.005263 | 0 | 1 | 0.022727 | false | 0 | 0.090909 | 0 | 0.284091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f5fb52afdee604b9fbf7559c2c35f9daa937ae9 | 2,871 | py | Python | planning_launch/launch/mission_planning/mission_planning.launch.py | tier4/autoware_launcher.iv.universe | 6cd7bef7f97da75621aef424fa190a6e9ec3a300 | [
"Apache-2.0"
] | 5 | 2020-09-25T08:53:20.000Z | 2021-08-11T14:27:17.000Z | planning_launch/launch/mission_planning/mission_planning.launch.py | tier4/autoware_launcher.iv.universe | 6cd7bef7f97da75621aef424fa190a6e9ec3a300 | [
"Apache-2.0"
] | 46 | 2020-11-06T14:47:52.000Z | 2021-08-12T06:53:29.000Z | planning_launch/launch/mission_planning/mission_planning.launch.py | tier4/autoware_launcher.iv.universe | 6cd7bef7f97da75621aef424fa190a6e9ec3a300 | [
"Apache-2.0"
] | 25 | 2020-09-30T16:38:53.000Z | 2021-08-11T14:38:21.000Z | # Copyright 2021 Tier IV, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import launch
from launch.actions import DeclareLaunchArgument
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import ComposableNodeContainer
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
container = ComposableNodeContainer(
name='mission_planning_container',
namespace='',
package='rclcpp_components',
executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='mission_planner',
plugin='mission_planner::MissionPlannerLanelet2',
name='mission_planner',
remappings=[
('input/vector_map', '/map/vector_map'),
('input/goal_pose', '/planning/mission_planning/goal'),
('input/checkpoint', '/planning/mission_planning/checkpoint'),
('output/route', '/planning/mission_planning/route'),
('debug/route_marker',
'/planning/mission_planning/route_marker'),
],
parameters=[
{
'map_frame': 'map',
'base_link_frame': 'base_link',
}
],
extra_arguments=[{
'use_intra_process_comms': LaunchConfiguration('use_intra_process')
}],
),
ComposableNode(
package='mission_planner',
plugin='mission_planner::GoalPoseVisualizer',
name='goal_pose_visualizer',
remappings=[
('input/route', '/planning/mission_planning/route'),
('output/goal_pose',
'/planning/mission_planning/echo_back_goal_pose'),
],
extra_arguments=[{
'use_intra_process_comms': LaunchConfiguration('use_intra_process')
}],
)
],
)
return launch.LaunchDescription([
DeclareLaunchArgument('use_intra_process', default_value='false',
description='use ROS2 component container communication'),
container
])
| 40.43662 | 88 | 0.592128 | 258 | 2,871 | 6.391473 | 0.492248 | 0.063675 | 0.083687 | 0.05094 | 0.226804 | 0.149181 | 0.149181 | 0.082474 | 0.082474 | 0.082474 | 0 | 0.005112 | 0.318704 | 2,871 | 70 | 89 | 41.014286 | 0.837935 | 0.198885 | 0 | 0.296296 | 0 | 0 | 0.313648 | 0.158793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018519 | false | 0 | 0.092593 | 0 | 0.12963 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f5fcfd6021a2de97a5ebce4f9a1bbfac3341807 | 1,268 | py | Python | dongfangtoutiao.py | lixueyuan/- | 0b3fcaa9bd9eea2453c5f6859823f92d2459469e | [
"Apache-2.0"
] | null | null | null | dongfangtoutiao.py | lixueyuan/- | 0b3fcaa9bd9eea2453c5f6859823f92d2459469e | [
"Apache-2.0"
] | null | null | null | dongfangtoutiao.py | lixueyuan/- | 0b3fcaa9bd9eea2453c5f6859823f92d2459469e | [
"Apache-2.0"
] | null | null | null | import requests
from urllib.parse import urlencode
import json
import re
def get_page_detail(pageNumber,keyWord):
data = {
#分页加载默认一页五条数据
'callback': 'jQuery18308665374998856634_1526290762213',
'type': keyWord,
'pgnum': pageNumber,
}
#包装参数
params = urlencode(data)
#爬取的主网址
base = 'http://pcflow.dftoutiao.com/toutiaopc_jrtt/newspool'
url = base + '?' + params
print(url)
try:
response = requests.get(url)
if response.status_code == 200:
token = re.findall(r"{.*}", response.text)
return token
return None
except ConnectionError:
print('connection error')
return None
def parse_page_index(html):
data = json.loads(html[0])
if data and 'data' in data.keys():
for item in data.get('data'):
print(item.get('miniimg'))
print(item.get('topic'))
print(item.get('source'))
# for imagedata in item.get('miniimg'):
# # print(imagedata)
# print(imagedata.get('src'))
# # print(item.get('miniimg'))
def main():
html = get_page_detail('1','yule')
if html:
parse_page_index(html)
if __name__ == '__main__':
main() | 25.877551 | 64 | 0.576498 | 140 | 1,268 | 5.085714 | 0.485714 | 0.049157 | 0.067416 | 0.050562 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042506 | 0.294953 | 1,268 | 49 | 65 | 25.877551 | 0.753915 | 0.111987 | 0 | 0.055556 | 0 | 0 | 0.150268 | 0.035778 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.111111 | 0 | 0.277778 | 0.138889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f604712379f982d1fbd6ff6a8bf8fb8bf5c46db | 858 | py | Python | d03p1.py | emaballarin/AoC-2021 | 19ccb275eaf83a22e3e80e9a6aec11fa6dd923fc | [
"MIT"
] | 1 | 2021-12-01T11:27:45.000Z | 2021-12-01T11:27:45.000Z | d03p1.py | emaballarin/AoC-2021 | 19ccb275eaf83a22e3e80e9a6aec11fa6dd923fc | [
"MIT"
] | null | null | null | d03p1.py | emaballarin/AoC-2021 | 19ccb275eaf83a22e3e80e9a6aec11fa6dd923fc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from functools import partial as fpartial
import numpy as np
from util.transforms import binarray2int
from util.specifics import binary_abundance
def solve() -> int:
# Transposition eases operating over rows w.r.t. over columns
data_in = np.genfromtxt("./data/d03/p1/input", delimiter=1).transpose()
# Original (i.e. pre-transposition) number of rows
data_len = len(data_in[0])
# Number of 1s is sum at that position; number of 0s is number of elements - number of 1s
gamma = np.apply_along_axis(
fpartial(binary_abundance, col_len=data_len), 1, data_in
)
# (Binary complement, done with integers)
epsilon = 1 - gamma
consumption = binarray2int(epsilon) * binarray2int(gamma)
return consumption
def main() -> None:
print(solve())
if __name__ == "__main__":
main()
| 25.235294 | 93 | 0.699301 | 120 | 858 | 4.85 | 0.6 | 0.068729 | 0.034364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020468 | 0.202797 | 858 | 33 | 94 | 26 | 0.830409 | 0.300699 | 0 | 0 | 0 | 0 | 0.045378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.235294 | 0 | 0.411765 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f6806332e710a076973a5dac22b128dd421ede3 | 6,034 | py | Python | boardfarm/devices/friendly_acs_soap.py | superice119/boardfarm | c525b4da94bf745d30c4a9f675aa4a7ae184b1fd | [
"BSD-3-Clause-Clear"
] | null | null | null | boardfarm/devices/friendly_acs_soap.py | superice119/boardfarm | c525b4da94bf745d30c4a9f675aa4a7ae184b1fd | [
"BSD-3-Clause-Clear"
] | null | null | null | boardfarm/devices/friendly_acs_soap.py | superice119/boardfarm | c525b4da94bf745d30c4a9f675aa4a7ae184b1fd | [
"BSD-3-Clause-Clear"
] | null | null | null | import os
import xmltodict
from boardfarm.lib.bft_logging import LoggerMeta
from zeep import Client
from zeep.wsse.username import UsernameToken
if "BFT_DEBUG" in os.environ:
import logging.config
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
class FriendlyACS():
__metaclass__ = LoggerMeta
log = ""
log_calls = ""
model = "friendly_acs_soap"
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.username = self.kwargs['username']
self.password = self.kwargs['password']
self.ipaddr = self.kwargs['ipaddr']
self.wsdl = "http://" + self.kwargs[
'ipaddr'] + "/ftacsws/acsws.asmx?WSDL"
self.client = Client(wsdl=self.wsdl,
wsse=UsernameToken(self.username, self.password))
self.port = self.kwargs.get('port', '80')
self.log = ""
name = "acs_server"
def __str__(self):
return "FriendlyACS"
def close(self):
pass
def get(self, cpeid, param, source=0):
# source = 0 (CPE), source = 1 (DB)
ret = self.client.service.FTGetDeviceParameters(devicesn=cpeid,
source=source,
arraynames=[param])
if None == ret['Params']:
return None
else:
return ret['Params']['ParamWSDL'][0]['Value']
def set(self, cpeid, attr, value):
array_of_param = self.client.get_type(
'{http://www.friendly-tech.com}ArrayOfParam')
arr = array_of_param([{'Name': attr, 'Value': value}])
# TODO: investigate push, endsession, reprovision, priority to make sure they are what we want
self.client.service.FTSetDeviceParameters(devicesn=cpeid, \
arrayparams=arr, \
push=True, \
endsession=False, \
priority=0)
def rpc(self, cpeid, name, content):
''' Invoke custom RPC on specific CM'''
ret = self.client.service.FTRPCInvoke(devicesn=cpeid,
rpcname=name,
soapcontent=content)
return xmltodict.parse(ret['Response'])
def rpc_GetParameterAttributes(self, cpeid, name):
content = '<cwmp:GetParameterAttributes xmlns:cwmp="urn:dslforum-org:cwmp-1-0"> <ParameterNames arrayType-="xsd:string[1]"> <string>%s</string> </ParameterNames> </cwmp:GetParameterAttributes>' % name
ret = self.rpc(cpeid, name, content)
return ret['cwmp:GetParameterAttributesResponse']['ParameterList'][
'ParameterAttributeStruct']
def rpc_GetParameterValues(self, cpeid, name):
content = '<cwmp:GetParameterValues xmlns:cwmp="urn:dslforum-org:cwmp-1-0"> <ParameterNames arrayType="xsd:string[1]"> <string>%s</string> </ParameterNames> </cwmp:GetParameterValues>' % name
ret = self.rpc(cpeid, name, content)
return ret['cwmp:GetParameterValuesResponse']['ParameterList'][
'ParameterValueStruct']['Value']['#text']
def getcurrent(self, cpeid, param, source=0):
self.client.service.FTGetDeviceParameters(devicesn=cpeid,
source=source,
arraynames=[param + '.'])
def rpc_SetParameterAttributes(self, cpeid, name, set_value):
content = '<cwmp:SetParameterAttributes xmlns:cwmp="urn:dslforum-org:cwmp-1-0"> <ParameterList arrayType="cwmp:SetParameterAttributesStruct[1]"> <SetParameterAttributesStruct> <Name>%s</Name> <NotificationChange>1</NotificationChange> <Notification>%s</Notification> <AccessListChange>0</AccessListChange> <AccessList></AccessList> </SetParameterAttributesStruct> </ParameterList> </cwmp:SetParameterAttributes>' % (
name, set_value)
self.rpc(cpeid, name, content)
def rpc_AddObject(self, cpeid, obj_name):
content = '<cwmp:AddObject xmlns:cwmp="urn:dslforum-org:cwmp-1-0"> <ObjectName>%s.</ObjectName> <ParameterKey></ParameterKey> </cwmp:AddObject>' % obj_name
self.rpc(cpeid, obj_name, content)
def rpc_DeleteObject(self, cpeid, obj_name):
content = '<cwmp:DeleteObject xmlns:cwmp="urn:dslforum-org:cwmp-1-0"> <ObjectName>%s.</ObjectName> <ParameterKey></ParameterKey> </cwmp:DeleteObject>' % obj_name
self.rpc(cpeid, obj_name, content)
def is_online(self, cpeid):
ret = self.client.service.FTCPEStatus(devicesn=cpeid)
return ret['Online']
def delete_cpe(self, cpeid):
print("WARN: not impl for this class")
pass
if __name__ == '__main__':
import sys
if ':' in sys.argv[1]:
ip = sys.argv[1].split(':')[0]
port = sys.argv[1].split(':')[1]
else:
ip = sys.argv[1]
port = 80
acs = FriendlyACS(ipaddr=ip,
port=port,
username=sys.argv[2],
password=sys.argv[3])
ret = acs.rpc_GetParameterAttributes('DEAP815610DA',
'Device.WiFi.SSID.1.SSID')
print(ret['Notification'])
ret = acs.get('DEAP815610DA', 'Device.DeviceInfo.SoftwareVersion')
print(ret)
ret = acs.get('DEAP815610DA', 'Device.WiFi.SSID.1.SSID')
print(ret)
| 37.478261 | 424 | 0.560988 | 582 | 6,034 | 5.735395 | 0.297251 | 0.029658 | 0.02876 | 0.029958 | 0.289395 | 0.244757 | 0.22858 | 0.22858 | 0.196525 | 0.174955 | 0 | 0.012931 | 0.307922 | 6,034 | 160 | 425 | 37.7125 | 0.786398 | 0.026516 | 0 | 0.114754 | 0 | 0.040984 | 0.292533 | 0.168428 | 0 | 0 | 0 | 0.00625 | 0 | 1 | 0.114754 | false | 0.040984 | 0.057377 | 0.008197 | 0.278689 | 0.032787 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f68bad39e47426834db4d7281d8f521915704a1 | 1,550 | py | Python | seisgen/util_SPECFEM3D/ibool_reader.py | Liang-Ding/seisgen | 59688b88ecfb52c22824f5fe60b17c7a7e37f3b0 | [
"MIT"
] | 5 | 2021-11-22T23:54:01.000Z | 2021-12-06T06:17:45.000Z | seisgen/util_SPECFEM3D/ibool_reader.py | Liang-Ding/seisgen | 59688b88ecfb52c22824f5fe60b17c7a7e37f3b0 | [
"MIT"
] | null | null | null | seisgen/util_SPECFEM3D/ibool_reader.py | Liang-Ding/seisgen | 59688b88ecfb52c22824f5fe60b17c7a7e37f3b0 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------
# ibool reader.
#
# Author: Liang Ding
# Email: myliang.ding@mail.utoronto.ca
# -------------------------------------------------------------------
from seisgen.util_SPECFEM3D import NGLLX, NGLLY, NGLLZ, CONSTANT_INDEX_27_GLL
from scipy.io import FortranFile
import numpy as np
def read_ibool_by_scipy(ibool_file, NSPEC):
'''
Read the ibool file in the folder */model3D/
'''
f = FortranFile(ibool_file, 'r')
ibool = f.read_reals(dtype='int32')
f.close()
ibool = np.reshape(ibool, (NSPEC, NGLLX * NGLLY * NGLLZ))
# The index in *.ibool files starts from 1.
ibool = ibool - 1
return ibool
def DEnquire_Element(ibool_file, index_element, NSPEC):
''' Read the index of the 27 GLL points where the SGT been stored in the selected element.'''
ibool = read_ibool_by_scipy(ibool_file, NSPEC)
if ibool.__len__() <= index_element:
return np.zeros(27)
else:
NGLLX_N3 = 3
NGLLY_N3 = 3
NGLLZ_N3 = 3
# The global index in slice of selected GLL points.
gll_array = ibool[index_element][CONSTANT_INDEX_27_GLL]
# sort the index.
gll_points = []
gll_array = np.reshape(gll_array, [NGLLZ_N3, NGLLY_N3, NGLLX_N3])
for i in range(NGLLX_N3):
for j in range(NGLLY_N3):
for k in range(NGLLZ_N3):
gll_points.append(gll_array[k, j, i])
gll_points = np.asarray(gll_points)
return gll_points
| 29.245283 | 97 | 0.582581 | 204 | 1,550 | 4.210784 | 0.362745 | 0.073341 | 0.034924 | 0.041909 | 0.069849 | 0.069849 | 0.069849 | 0 | 0 | 0 | 0 | 0.022222 | 0.245161 | 1,550 | 52 | 98 | 29.807692 | 0.711966 | 0.287742 | 0 | 0 | 0 | 0 | 0.005607 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f69217e425b162dc5a97da49f5c472c66b388fd | 5,882 | py | Python | GasGrid/OtherTools/Data Collection Tools/nts_data_collect.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 21 | 2021-03-08T01:58:25.000Z | 2022-03-09T15:46:16.000Z | GasGrid/OtherTools/Data Collection Tools/nts_data_collect.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 63 | 2021-05-04T15:05:30.000Z | 2022-03-23T14:32:29.000Z | GasGrid/OtherTools/Data Collection Tools/nts_data_collect.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 011aee78c016b76762eaf511c78fabe3f98189f4 | [
"MIT"
] | 15 | 2021-03-08T07:52:03.000Z | 2022-03-29T04:46:20.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
import io
from tabulate import tabulate
import os
import numpy as np
import bs4 as bs
import pandas as pd
from tqdm import tqdm
import time
import re
def get_data(from_date,to_date,data):
'''
DESCRIPTION:
get_data will obtain data between two defined dates from the national grid's online database.
Taking advantage of the Selenium library for interaction with webpages and Beautiful Soup for
the extraction of html tables
INPUTS:
from_date: A date that the start of data to be obtained is required in the form 'DD,MM,YYYY'
to_date: A date that the end of data to be obtained is required in the form 'DD,MM,YYYY'
data: An array containing values of consecutuve folder numbers to click on/obtain data from
OUTPUTS:
table: A pandas dataframe containing all information specified
NOTES:
the 'data' array is currently the least 'flexible' aspect of this code, for example it is obtained
through manual interaction with the online database, however once obtained for the data needed
it can be stored and reused. For now is relatively fine as only certain information is required from
the database.
'''
#--- opening the database in a firefox window ---#
driver = webdriver.Firefox()
driver.get("https://mip-prd-web.azurewebsites.net/DataItemExplorer")
wait = WebDriverWait(driver,10)
#--- clicking through the file tree ---#
base_text = '/html/body/div[1]/div/div[2]/div[2]/div/div'
end_text = '/span/span[1]'
add_text = ''
for i in range(len(data)):
if i == len(data)-1:
end_text = '/span/span[2]'
add_text += '/ul/li['+data[i]+']'
complete_text = base_text + add_text + end_text
wait.until(ec.visibility_of_element_located\
((By.XPATH,complete_text))).click()
#--- inserting dates required ---#
wait.until(ec.element_to_be_clickable((By.ID,"applicableForRadioButton"))).click()
wait.until(ec.element_to_be_clickable((By.ID,"FromDateTime"))).clear()
wait.until(ec.element_to_be_clickable((By.ID,"FromDateTime"))).send_keys(from_date)
wait.until(ec.element_to_be_clickable((By.ID,"ToDateTime"))).clear()
wait.until(ec.element_to_be_clickable((By.ID,"ToDateTime"))).send_keys(to_date)
wait.until(ec.visibility_of_element_located((By.ID,"viewReportButton"))).click()
#--- creation of pandas dataframe ---#
header = []
for i in range(6):
header.append(wait.until(ec.visibility_of_element_located\
((By.XPATH,"/html/body/div[1]/div[2]/table/thead/tr/th["+str(i+1)+"]"))).text)
html = driver.page_source
soup = bs.BeautifulSoup(html,'lxml')
driver.quit()
table = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
row = []
for i in tds:
row.append(i.text)
table.append(row)
table = pd.DataFrame(table)
table.columns = header
return table
'''
EXAMPLE CODE FOR get_data
'''
# actual_flows = ['14','4']
# comp_weather_var_actual = ['18','1','1']
# comp_weather_table = get_data('01/10/2020','20/10/2020',comp_weather_var_actual)
# print(comp_weather_table)
def real_time_intakes():
'''
DESCRIPTION:
Calls the National Grid online publication of incoming flows to the NTS and produces
two numpy tables, one with zonal intakes and one with terminal intakes.
Units are mcm/day.
'''
#--- opening intakes webpage ---#
os.system('cls' if os.name == 'nt' else 'clear')
while True:
driver = webdriver.Firefox()
try:
driver.get("https://mip-prd-web.azurewebsites.net/InstantaneousView")
except:
print('ONLINE DATABASE UNAVAILABLE, CHECK NETWORK CONNECTION')
break
wait = WebDriverWait(driver,10)
html = driver.page_source
#--- converting all the information to a table ---#
#--- as all data presented in a large html table ---#
soup = bs.BeautifulSoup(html,'lxml')
driver.quit()
table = []
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
row = []
for i in tds:
row.append(i.text)
table.append(row)
table = pd.DataFrame(table)
#--- ontaining only the required values ---#
table = table.to_numpy()[4:,1:]
zone_names = table[1:29,0]
latest_zone_value = table[1:29,6]
latest_zone_value[6] = latest_zone_value[6][1:]
latest_zone_value = latest_zone_value.astype(np.float)
zone_supply = np.concatenate(([zone_names],[latest_zone_value]),axis=0).T
zone_supply_pd = pd.DataFrame(zone_supply)
terminal_names = table[47:59,0]
latest_terminal_value = table[47:59,6].astype(np.float)
terminal_supply = np.concatenate(([terminal_names],[latest_terminal_value]),axis=0).T
terminal_supply_pd = pd.DataFrame(terminal_supply)
overall_df = pd.concat((zone_supply_pd,terminal_supply_pd),axis=1,ignore_index=True)
header = ['Zone Supply','Instananeous Flow (mcm/day)','Terminal Supply','Instananeous Flow (mcm/day)']
print(tabulate(overall_df,headers=header,showindex="never"))
overall_df.to_excel('Intakedata.xlsx')
for i in reversed(range(120)):
print('TIME FOR NEXT UPDATE: ',i,' SECONDS',end='\r')
time.sleep(1)
os.system('cls' if os.name == 'nt' else 'clear')
return
'''
EXAMPLE CODE FOR real_time_intakes()
'''
real_time_intakes() | 38.444444 | 110 | 0.66032 | 819 | 5,882 | 4.60928 | 0.312576 | 0.019073 | 0.023311 | 0.023841 | 0.267815 | 0.227815 | 0.227815 | 0.227815 | 0.196821 | 0.148344 | 0 | 0.015234 | 0.218803 | 5,882 | 153 | 111 | 38.444444 | 0.806311 | 0.276947 | 0 | 0.297872 | 0 | 0.021277 | 0.131806 | 0.0271 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0.159574 | 0 | 0.202128 | 0.031915 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f6cf0a69c1e3cc460fbf95605b3932048d1765d | 57,972 | py | Python | jupyterhub/src/jupyterhub_config.py | kskels/workshop-spawner | 71f27b82ee1c65d7a61b41e784ec40b43d25ddd5 | [
"Apache-2.0"
] | null | null | null | jupyterhub/src/jupyterhub_config.py | kskels/workshop-spawner | 71f27b82ee1c65d7a61b41e784ec40b43d25ddd5 | [
"Apache-2.0"
] | null | null | null | jupyterhub/src/jupyterhub_config.py | kskels/workshop-spawner | 71f27b82ee1c65d7a61b41e784ec40b43d25ddd5 | [
"Apache-2.0"
] | null | null | null | # This file provides common configuration for the different ways that
# the deployment can run. Configuration specific to the different modes
# will be read from separate files at the end of this configuration
# file.
import os
import json
import string
import yaml
import threading
import time
import requests
import wrapt
from tornado import gen
from kubernetes.client.rest import ApiException
from kubernetes.client.configuration import Configuration
from kubernetes.config.incluster_config import load_incluster_config
from kubernetes.client.api_client import ApiClient
from openshift.dynamic import DynamicClient, Resource
from openshift.dynamic.exceptions import ResourceNotFoundError
# The workshop name and configuration type are passed in through the
# environment. The applicaton name should be the value used for the
# deployment, and more specifically, must match the name of the route.
workshop_name = os.environ.get('WORKSHOP_NAME')
application_name = os.environ.get('APPLICATION_NAME')
if not workshop_name:
workshop_name = 'homeroom'
if not application_name:
application_name = workshop_name
print('INFO: Workshop name is %r.' % workshop_name)
print('INFO: Application name is %r.' % application_name)
configuration_type = os.environ.get('CONFIGURATION_TYPE', 'hosted-workshop')
print('INFO: Configuration type is %r.' % configuration_type)
homeroom_link = os.environ.get('HOMEROOM_LINK')
print('INFO: Homeroom link is %r.' % homeroom_link)
homeroom_name = os.environ.get('HOMEROOM_NAME')
print('INFO: Homeroom name is %r.' % homeroom_name)
# Work out the service account name and name of the namespace that the
# deployment is in.
service_account_path = '/var/run/secrets/kubernetes.io/serviceaccount'
service_account_name = '%s-spawner' % application_name
print('INFO: Service account name is %r.' % service_account_name)
with open(os.path.join(service_account_path, 'namespace')) as fp:
namespace = fp.read().strip()
print('INFO: Namespace is %r.' % namespace)
full_service_account_name = 'system:serviceaccount:%s:%s' % (
namespace, service_account_name)
print('INFO: Full service account name is %r.' % full_service_account_name)
# Determine the Kubernetes REST API endpoint and cluster information,
# including working out the address of the internal image regstry.
kubernetes_service_host = os.environ['KUBERNETES_SERVICE_HOST']
kubernetes_service_port = os.environ['KUBERNETES_SERVICE_PORT']
kubernetes_server_url = 'https://%s:%s' % (kubernetes_service_host,
kubernetes_service_port)
kubernetes_server_version_url = '%s/version' % kubernetes_server_url
with requests.Session() as session:
response = session.get(kubernetes_server_version_url, verify=False)
kubernetes_server_info = json.loads(response.content.decode('UTF-8'))
image_registry = 'image-registry.openshift-image-registry.svc:5000'
if kubernetes_server_info['major'] == '1':
if kubernetes_server_info['minor'] in ('10', '10+', '11', '11+'):
image_registry = 'docker-registry.default.svc:5000'
# Initialise the client for the REST API used doing configuration.
#
# XXX Currently have a workaround here for OpenShift 4.0 beta versions
# which disables verification of the certificate. If don't use this the
# Python openshift/kubernetes clients will fail. We also disable any
# warnings from urllib3 to get rid of the noise in the logs this creates.
load_incluster_config()
import urllib3
urllib3.disable_warnings()
instance = Configuration()
instance.verify_ssl = False
Configuration.set_default(instance)
api_client = DynamicClient(ApiClient())
try:
image_stream_resource = api_client.resources.get(
api_version='image.openshift.io/v1', kind='ImageStream')
except ResourceNotFoundError:
image_stream_resource = None
try:
route_resource = api_client.resources.get(
api_version='route.openshift.io/v1', kind='Route')
except ResourceNotFoundError:
route_resource = None
ingress_resource = api_client.resources.get(
api_version='networking.k8s.io/v1', kind='Ingress')
# Create a background thread to dynamically calculate back link to the
# Homeroom workshop picker if no explicit link is provided, but group is.
def watch_for_homeroom():
global homeroom_link
while True:
if route_resource is not None:
try:
route = route_resource.get(namespace=namespace, name=homeroom_name)
scheme = 'http'
if route.metadata.annotations:
if route.metadata.annotations['homeroom/index'] == homeroom_name:
if route.tls and route.tls.termination:
scheme = 'https'
link = '%s://%s' % (scheme, route.spec.host)
if link != homeroom_link:
print('INFO: Homeroom link set to %s.' % link)
homeroom_link = link
except ApiException as e:
if e.status != 404:
print('ERROR: Error looking up homeroom route. %s' % e)
except Exception as e:
print('ERROR: Error looking up homeroom route. %s' % e)
try:
ingress = ingress_resource.get(namespace=namespace, name=homeroom_name)
scheme = 'http'
if ingress.metadata.annotations:
if ingress.metadata.annotations['homeroom/index'] == homeroom_name:
if ingress.tls:
scheme = 'https'
link = '%s://%s' % (scheme, ingress.spec.rules[0].host)
if link != homeroom_link:
print('INFO: Homeroom link set to %s.' % link)
homeroom_link = link
except ApiException as e:
if e.status != 404:
print('ERROR: Error looking up homeroom ingress. %s' % e)
except Exception as e:
print('ERROR: Error looking up homeroom ingress. %s' % e)
time.sleep(15)
if not homeroom_link and homeroom_name:
thread = threading.Thread(target=watch_for_homeroom)
thread.daemon = True
thread.start()
# Workaround bug in minishift where a service cannot be contacted from a
# pod which backs the service. For further details see the minishift issue
# https://github.com/minishift/minishift/issues/2400.
#
# What these workarounds do is monkey patch the JupyterHub proxy client
# API code, and the code for creating the environment for local service
# processes, and when it sees something which uses the service name as
# the target in a URL, it replaces it with localhost. These work because
# the proxy/service processes are in the same pod. It is not possible to
# change hub_connect_ip to localhost because that is passed to other
# pods which need to contact back to JupyterHub, and so it must be left
# as the service name.
@wrapt.patch_function_wrapper('jupyterhub.proxy', 'ConfigurableHTTPProxy.add_route')
def _wrapper_add_route(wrapped, instance, args, kwargs):
def _extract_args(routespec, target, data, *_args, **_kwargs):
return (routespec, target, data, _args, _kwargs)
routespec, target, data, _args, _kwargs = _extract_args(*args, **kwargs)
old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port)
new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port
if target.startswith(old):
target = target.replace(old, new)
return wrapped(routespec, target, data, *_args, **_kwargs)
@wrapt.patch_function_wrapper('jupyterhub.spawner', 'LocalProcessSpawner.get_env')
def _wrapper_get_env(wrapped, instance, args, kwargs):
env = wrapped(*args, **kwargs)
target = env.get('JUPYTERHUB_API_URL')
old = 'http://%s:%s' % (c.JupyterHub.hub_connect_ip, c.JupyterHub.hub_port)
new = 'http://127.0.0.1:%s' % c.JupyterHub.hub_port
if target and target.startswith(old):
target = target.replace(old, new)
env['JUPYTERHUB_API_URL'] = target
return env
# Define all the defaults for the JupyterHub instance for our setup.
c.JupyterHub.port = 8080
c.JupyterHub.hub_ip = '0.0.0.0'
c.JupyterHub.hub_port = 8081
c.JupyterHub.hub_connect_ip = '%s-spawner' % application_name
c.ConfigurableHTTPProxy.api_url = 'http://127.0.0.1:8082'
c.Spawner.start_timeout = 180
c.Spawner.http_timeout = 60
c.KubeSpawner.port = 10080
c.KubeSpawner.common_labels = {
'app': '%s' % application_name
}
c.KubeSpawner.extra_labels = {
'spawner': configuration_type,
'class': 'session',
'user': '{username}'
}
c.KubeSpawner.uid = os.getuid()
c.KubeSpawner.fs_gid = os.getuid()
c.KubeSpawner.extra_annotations = {
"alpha.image.policy.openshift.io/resolve-names": "*"
}
c.KubeSpawner.cmd = ['start-singleuser.sh']
c.KubeSpawner.pod_name_template = '%s-user-{username}' % application_name
c.JupyterHub.admin_access = False
if os.environ.get('JUPYTERHUB_COOKIE_SECRET'):
c.JupyterHub.cookie_secret = os.environ[
'JUPYTERHUB_COOKIE_SECRET'].encode('UTF-8')
else:
c.JupyterHub.cookie_secret_file = '/opt/app-root/data/cookie_secret'
c.JupyterHub.db_url = '/opt/app-root/data/database.sqlite'
c.JupyterHub.authenticator_class = 'tmpauthenticator.TmpAuthenticator'
c.JupyterHub.spawner_class = 'kubespawner.KubeSpawner'
c.JupyterHub.logo_file = '/opt/app-root/src/images/HomeroomIcon.png'
c.Spawner.environment = dict()
c.JupyterHub.services = []
c.KubeSpawner.init_containers = []
c.KubeSpawner.extra_containers = []
c.JupyterHub.extra_handlers = []
# Determine amount of memory to allocate for workshop environment.
def convert_size_to_bytes(size):
multipliers = {
'k': 1000,
'm': 1000**2,
'g': 1000**3,
't': 1000**4,
'ki': 1024,
'mi': 1024**2,
'gi': 1024**3,
'ti': 1024**4,
}
size = str(size)
for suffix in multipliers:
if size.lower().endswith(suffix):
return int(size[0:-len(suffix)]) * multipliers[suffix]
else:
if size.lower().endswith('b'):
return int(size[0:-1])
try:
return int(size)
except ValueError:
raise RuntimeError('"%s" is not a valid memory specification. Must be an integer or a string with suffix K, M, G, T, Ki, Mi, Gi or Ti.' % size)
c.Spawner.mem_limit = convert_size_to_bytes(
os.environ.get('WORKSHOP_MEMORY', '512Mi'))
# Override the image details with that for the terminal or dashboard
# image being used. The default is to assume that a image stream with
# the same name as the application name is being used. The call to the
# function resolve_image_name() is to try and resolve to image registry
# when using image stream. This is to workaround issue that many
# clusters do not have image policy controller configured correctly.
#
# Note that we set the policy that images will always be pulled to the
# node each time when the image name is not explicitly provided. This is
# so that during development, changes to the terminal image will always
# be picked up. Someone developing a new image need only update the
# 'latest' tag on the image using 'oc tag'.
#
# Check for TERMINAL_IMAGE is for backward compatibility. Should use
# WORKSHOP_IMAGE now.
workshop_image = os.environ.get('WORKSHOP_IMAGE')
if not workshop_image:
workshop_image = os.environ.get('TERMINAL_IMAGE')
if not workshop_image:
c.KubeSpawner.image_pull_policy = 'Always'
workshop_image = '%s-session:latest' % application_name
def resolve_image_name(name):
# If no image stream resource we are on plain Kubernetes.
if image_stream_resource is None:
return name
# If the image name contains a slash, we assume it is already
# referring to an image on some image registry. Even if it does
# not contain a slash, it may still be hosted on docker.io.
if name.find('/') != -1:
return name
# Separate actual source image name and tag for the image from the
# name. If the tag is not supplied, default to 'latest'.
parts = name.split(':', 1)
if len(parts) == 1:
source_image, tag = parts, 'latest'
else:
source_image, tag = parts
# See if there is an image stream in the current project with the
# target name.
try:
image_stream = image_stream_resource.get(namespace=namespace,
name=source_image)
except ApiException as e:
if e.status not in (403, 404):
raise
return name
# If we get here then the image stream exists with the target name.
# We need to determine if the tag exists. If it does exist, we
# extract out the full name of the image including the reference
# to the image registry it is hosted on.
if image_stream.status.tags:
for entry in image_stream.status.tags:
if entry.tag == tag:
registry_image = image_stream.status.dockerImageRepository
if registry_image:
return '%s:%s' % (registry_image, tag)
# Use original value if can't find a matching tag.
return name
c.KubeSpawner.image = resolve_image_name(workshop_image)
# Work out hostname for the exposed route of the JupyterHub server. This
# is tricky as we need to use the REST API to query it. This is used
# when needing to do OAuth.
public_hostname = os.environ.get('PUBLIC_HOSTNAME')
public_protocol = os.environ.get('PUBLIC_PROTOCOL')
route_name = '%s-spawner' % application_name
if not public_hostname:
if route_resource is not None:
routes = route_resource.get(namespace=namespace)
for route in routes.items:
if route.metadata.name == route_name:
if not public_protocol:
public_protocol = route.spec.tls and 'https' or 'http'
public_hostname = route.spec.host
break
if not public_hostname:
ingresses = ingress_resource.get(namespace=namespace)
for ingresses in ingresses.items:
if ingresses.metadata.name == route_name:
if not public_protocol:
public_protocol = ingresses.spec.tls and 'https' or 'http'
public_hostname = ingresses.spec.rules[0].host
break
if not public_hostname:
raise RuntimeError('Cannot calculate external host name for the spawner.')
c.Spawner.environment['JUPYTERHUB_ROUTE'] = '%s://%s' % (public_protocol, public_hostname)
# Work out the subdomain under which applications hosted in the cluster
# are hosted. Calculate this from the route for the spawner route if
# not supplied explicitly.
cluster_subdomain = os.environ.get('CLUSTER_SUBDOMAIN')
if not cluster_subdomain:
cluster_subdomain = '.'.join(public_hostname.split('.')[1:])
c.Spawner.environment['CLUSTER_SUBDOMAIN'] = cluster_subdomain
# The terminal image will normally work out what versions of OpenShift
# and Kubernetes command line tools should be used, based on the version
# of OpenShift which is being used. Allow these to be overridden if
# necessary.
if os.environ.get('OC_VERSION'):
c.Spawner.environment['OC_VERSION'] = os.environ.get('OC_VERSION')
if os.environ.get('ODO_VERSION'):
c.Spawner.environment['ODO_VERSION'] = os.environ.get('ODO_VERSION')
if os.environ.get('KUBECTL_VERSION'):
c.Spawner.environment['KUBECTL_VERSION'] = os.environ.get('KUBECTL_VERSION')
# Common functions for creating projects, injecting resources etc.
namespace_resource = api_client.resources.get(
api_version='v1', kind='Namespace')
service_account_resource = api_client.resources.get(
api_version='v1', kind='ServiceAccount')
secret_resource = api_client.resources.get(
api_version='v1', kind='Secret')
cluster_role_resource = api_client.resources.get(
api_version='rbac.authorization.k8s.io/v1', kind='ClusterRole')
role_binding_resource = api_client.resources.get(
api_version='rbac.authorization.k8s.io/v1', kind='RoleBinding')
limit_range_resource = api_client.resources.get(
api_version='v1', kind='LimitRange')
resource_quota_resource = api_client.resources.get(
api_version='v1', kind='ResourceQuota')
service_resource = api_client.resources.get(
api_version='v1', kind='Service')
namespace_template = string.Template("""
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "${name}",
"labels": {
"app": "${application_name}",
"spawner": "${configuration}",
"class": "session",
"user": "${username}"
},
"annotations": {
"spawner/requestor": "${requestor}",
"spawner/namespace": "${namespace}",
"spawner/deployment": "${deployment}",
"spawner/account": "${account}",
"spawner/session": "${session}"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ClusterRole",
"blockOwnerDeletion": false,
"controller": true,
"name": "${owner}",
"uid": "${uid}"
}
]
}
}
""")
service_account_template = string.Template("""
{
"kind": "ServiceAccount",
"apiVersion": "v1",
"metadata": {
"name": "${name}",
"labels": {
"app": "${application_name}",
"spawner": "${configuration}",
"class": "session",
"user": "${username}"
}
}
}
""")
role_binding_template = string.Template("""
{
"kind": "RoleBinding",
"apiVersion": "rbac.authorization.k8s.io/v1",
"metadata": {
"name": "${name}-${tag}",
"labels": {
"app": "${application_name}",
"spawner": "${configuration}",
"class": "session",
"user": "${username}"
}
},
"subjects": [
{
"kind": "ServiceAccount",
"namespace": "${namespace}",
"name": "${name}"
}
],
"roleRef": {
"apiGroup": "rbac.authorization.k8s.io",
"kind": "ClusterRole",
"name": "${role}"
}
}
""")
resource_budget_mapping = {
"small": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "small"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "1",
"memory": "1Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "1",
"memory": "1Gi"
},
"default": {
"cpu": "250m",
"memory": "256Mi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "1Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "small"
}
},
"spec": {
"hard": {
"limits.cpu": "1",
"limits.memory": "1Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "small"
}
},
"spec": {
"hard": {
"limits.cpu": "1",
"limits.memory": "1Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "small"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "3",
"replicationcontrollers": "10",
"secrets": "20",
"services": "5"
}
}
},
},
"medium": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "medium"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "2",
"memory": "2Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "2",
"memory": "2Gi"
},
"default": {
"cpu": "500m",
"memory": "512Mi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "5Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "medium"
}
},
"spec": {
"hard": {
"limits.cpu": "2",
"limits.memory": "2Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "medium"
}
},
"spec": {
"hard": {
"limits.cpu": "2",
"limits.memory": "2Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "medium"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "6",
"replicationcontrollers": "15",
"secrets": "25",
"services": "10"
}
}
},
},
"large": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "large"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "4",
"memory": "4Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "4",
"memory": "4Gi"
},
"default": {
"cpu": "500m",
"memory": "1Gi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "10Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "large"
}
},
"spec": {
"hard": {
"limits.cpu": "4",
"limits.memory": "4Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "large"
}
},
"spec": {
"hard": {
"limits.cpu": "4",
"limits.memory": "4Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "large"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "12",
"replicationcontrollers": "25",
"secrets": "35",
"services": "20"
}
}
}
},
"x-large": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "x-large"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "8",
"memory": "8Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "8",
"memory": "8Gi"
},
"default": {
"cpu": "500m",
"memory": "2Gi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "20Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "x-large"
}
},
"spec": {
"hard": {
"limits.cpu": "8",
"limits.memory": "8Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "x-large"
}
},
"spec": {
"hard": {
"limits.cpu": "8",
"limits.memory": "8Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "x-large"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "18",
"replicationcontrollers": "35",
"secrets": "45",
"services": "30"
}
}
}
},
"xx-large": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "xx-large"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "12",
"memory": "12Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "12",
"memory": "12Gi"
},
"default": {
"cpu": "500m",
"memory": "2Gi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "20Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "xx-large"
}
},
"spec": {
"hard": {
"limits.cpu": "12",
"limits.memory": "12Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "xx-large"
}
},
"spec": {
"hard": {
"limits.cpu": "12",
"limits.memory": "12Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "xx-large"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "24",
"replicationcontrollers": "45",
"secrets": "55",
"services": "40"
}
}
}
},
"xxx-large": {
"resource-limits" : {
"kind": "LimitRange",
"apiVersion": "v1",
"metadata": {
"name": "resource-limits",
"annotations": {
"resource-budget": "xxx-large"
}
},
"spec": {
"limits": [
{
"type": "Pod",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "16",
"memory": "16Gi"
}
},
{
"type": "Container",
"min": {
"cpu": "50m",
"memory": "32Mi"
},
"max": {
"cpu": "16",
"memory": "16Gi"
},
"default": {
"cpu": "500m",
"memory": "2Gi"
},
"defaultRequest": {
"cpu": "50m",
"memory": "128Mi"
}
},
{
"type": "PersistentVolumeClaim",
"min": {
"storage": "1Gi"
},
"max": {
"storage": "20Gi"
}
}
]
}
},
"compute-resources" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources",
"annotations": {
"resource-budget": "xxx-large"
}
},
"spec": {
"hard": {
"limits.cpu": "16",
"limits.memory": "16Gi"
},
"scopes": [
"NotTerminating"
]
}
},
"compute-resources-timebound" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "compute-resources-timebound",
"annotations": {
"resource-budget": "xxx-large"
}
},
"spec": {
"hard": {
"limits.cpu": "16",
"limits.memory": "16Gi"
},
"scopes": [
"Terminating"
]
}
},
"object-counts" : {
"kind": "ResourceQuota",
"apiVersion": "v1",
"metadata": {
"name": "object-counts",
"annotations": {
"resource-budget": "xxx-large"
}
},
"spec": {
"hard": {
"persistentvolumeclaims": "30",
"replicationcontrollers": "55",
"secrets": "65",
"services": "50"
}
}
}
}
}
service_template = string.Template("""
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "${name}",
"labels": {
"app": "${application_name}",
"spawner": "${configuration}",
"class": "session",
"user": "${username}"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"blockOwnerDeletion": false,
"controller": true,
"name": "${name}",
"uid": "${uid}"
}
]
},
"spec": {
"type": "ClusterIP",
"selector": {
"app": "${application_name}",
"spawner": "${configuration}",
"user": "${username}"
},
"ports": []
}
}
""")
route_template = string.Template("""
{
"apiVersion": "route.openshift.io/v1",
"kind": "Route",
"metadata": {
"name": "${name}-${port}",
"labels": {
"app": "${application_name}",
"spawner": "${configuration}",
"class": "session",
"user": "${username}",
"port": "${port}"
},
"ownerReferences": [
{
"apiVersion": "v1",
"kind": "ServiceAccount",
"blockOwnerDeletion": false,
"controller": true,
"name": "${name}",
"uid": "${uid}"
}
]
},
"spec": {
"host": "${host}",
"port": {
"targetPort": "${port}-tcp"
},
"to": {
"kind": "Service",
"name": "${name}",
"weight": 100
}
}
}
""")
@gen.coroutine
def create_service_account(spawner, pod):
short_name = spawner.user.name
user_account_name = '%s-%s' % (application_name, short_name)
owner_uid = None
print('INFO: Create service account "%s".' % user_account_name)
while True:
try:
text = service_account_template.safe_substitute(
configuration=configuration_type, namespace=namespace,
name=user_account_name, application_name=application_name,
username=short_name)
body = json.loads(text)
service_account_object = service_account_resource.create(
namespace=namespace, body=body)
owner_uid = service_account_object.metadata.uid
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating service account. %s' % e)
raise
else:
print('WARNING: Service account %s exists.' % user_account_name)
break
except Exception as e:
print('ERROR: Error creating service account. %s' % e)
raise
else:
break
# If we didn't create a service account object as one already existed,
# we need to query the existing one to get the uid to use as owner.
if owner_uid is None:
try:
service_account_object = service_account_resource.get(
namespace=namespace, name=user_account_name)
owner_uid = service_account_object.metadata.uid
except Exception as e:
print('ERROR: Error getting service account. %s' % e)
raise
print('INFO: Service account id is %s.' % owner_uid)
return owner_uid
@gen.coroutine
def create_project_namespace(spawner, pod, project_name):
short_name = spawner.user.name
user_account_name = '%s-%s' % (application_name, short_name)
try:
text = namespace_template.safe_substitute(
configuration=configuration_type, name=project_name,
application_name=application_name,
requestor=full_service_account_name, namespace=namespace,
deployment=application_name, account=user_account_name,
session=pod.metadata.name, owner=project_owner.metadata.name,
uid=project_owner.metadata.uid, username=short_name)
body = json.loads(text)
namespace_resource.create(body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating project. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating project. %s' % e)
raise
@gen.coroutine
def setup_project_namespace(spawner, pod, project_name, role, budget):
short_name = spawner.user.name
user_account_name = '%s-%s' % (application_name, short_name)
# Wait for project namespace to exist before continuing.
for _ in range(30):
try:
project = namespace_resource.get(name=project_name)
except ApiException as e:
if e.status == 404:
yield gen.sleep(0.1)
continue
print('ERROR: Error querying project. %s' % e)
raise
else:
break
else:
# If can't verify project created, carry on anyway.
print('ERROR: Could not verify project creation. %s' % project_name)
raise Exception('Could not verify project creation. %s' % project_name)
project_uid = project.metadata.uid
# Create role binding in the project so the spawner service account can
# delete project when done. Will fail if the project hasn't actually
# been created yet.
try:
text = role_binding_template.safe_substitute(
configuration=configuration_type, namespace=namespace,
name=service_account_name, tag='admin', role='admin',
application_name=application_name, username=short_name)
body = json.loads(text)
role_binding_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating role binding for spawner. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating rolebinding for spawner. %s' % e)
raise
# Create role binding in the project so the users service account
# can create resources in it.
try:
text = role_binding_template.safe_substitute(
configuration=configuration_type, namespace=namespace,
name=user_account_name, tag=role, role=role,
application_name=application_name, username=short_name)
body = json.loads(text)
role_binding_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating role binding for user. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating rolebinding for user. %s' % e)
raise
# Create role binding in the project so the users service account
# can perform additional actions declared through additional policy
# rules for a specific workshop session.
try:
text = role_binding_template.safe_substitute(
configuration=configuration_type, namespace=namespace,
name=user_account_name, tag='session-rules',
role=application_name+'-session-rules',
application_name=application_name, username=short_name)
body = json.loads(text)
role_binding_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating role binding for extras. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating rolebinding for extras. %s' % e)
raise
# Determine what project namespace resources need to be used.
if budget != 'unlimited':
if budget not in resource_budget_mapping:
budget = 'default'
elif not resource_budget_mapping[budget]:
budget = 'default'
if budget not in ('default', 'unlimited'):
budget_item = resource_budget_mapping[budget]
resource_limits_definition = budget_item['resource-limits']
compute_resources_definition = budget_item['compute-resources']
compute_resources_timebound_definition = budget_item['compute-resources-timebound']
object_counts_definition = budget_item['object-counts']
# Delete any limit ranges applied to the project that may conflict
# with the limit range being applied. For the case of unlimited, we
# delete any being applied but don't replace it.
if budget != 'default':
try:
limit_ranges = limit_range_resource.get(
namespace=project_name)
except ApiException as e:
print('ERROR: Error querying limit ranges. %s' % e)
raise
for limit_range in limit_ranges.items:
try:
limit_range_resource.delete(namespace=project_name,
name=limit_range.metadata.name)
except ApiException as e:
print('ERROR: Error deleting limit range. %s' % e)
raise
# Create limit ranges for the project namespace so any deployments
# will have default memory/cpu min and max values.
if budget not in ('default', 'unlimited'):
try:
body = resource_limits_definition
limit_range_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating limit range. %s' % e)
raise
# Delete any resource quotas applied to the project namespace that
# may conflict with the resource quotas being applied.
if budget != 'default':
try:
resource_quotas = resource_quota_resource.get(namespace=project_name)
except ApiException as e:
print('ERROR: Error querying resource quotas. %s' % e)
raise
for resource_quota in resource_quotas.items:
try:
resource_quota_resource.delete(namespace=project_name,
name=resource_quota.metadata.name)
except ApiException as e:
print('ERROR: Error deleting resource quota. %s' % e)
raise
# Create resource quotas for the project so there is a maximum for
# what resources can be used.
if budget not in ('default', 'unlimited'):
try:
body = compute_resources_definition
resource_quota_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating compute resources quota. %s' % e)
raise
try:
body = compute_resources_timebound_definition
resource_quota_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating compute resources timebound quota. %s' % e)
raise
try:
body = object_counts_definition
resource_quota_resource.create(namespace=project_name, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating object counts quota. %s' % e)
raise
# Return the project UID for later use as owner UID if needed.
return project_uid
extra_resources = {}
extra_resources_loader = None
if os.path.exists('/opt/app-root/resources/extra_resources.yaml'):
with open('/opt/app-root/resources/extra_resources.yaml') as fp:
extra_resources = fp.read().strip()
extra_resources_loader = yaml.safe_load
if os.path.exists('/opt/app-root/resources/extra_resources.json'):
with open('/opt/app-root/resources/extra_resources.json') as fp:
extra_resources = fp.read().strip()
extra_resources_loader = json.loads
def _namespaced_resources():
api_groups = api_client.resources.parse_api_groups()
for api in api_groups.values():
for domain, items in api.items():
for version, group in items.items():
try:
for kind in group.resources:
if domain:
version = '%s/%s' % (domain, version)
resource = api_client.resources.get(api_version=version, kind=kind)
if type(resource) == Resource and resource.namespaced:
yield (version, resource.kind)
except Exception:
pass
namespaced_resources = set(_namespaced_resources())
@gen.coroutine
def create_extra_resources(spawner, pod, project_name, owner_uid,
user_account_name, short_name):
if not extra_resources:
return
template = string.Template(extra_resources)
text = template.safe_substitute(spawner_namespace=namespace,
project_namespace=project_name, image_registry=image_registry,
service_account=user_account_name, username=short_name,
application_name=application_name)
data = extra_resources_loader(text)
if isinstance(data, dict) and data.get('kind') == 'List':
data = data['items']
for body in data:
try:
kind = body['kind']
api_version = body['apiVersion']
if not (api_version, kind) in namespaced_resources:
body['metadata']['ownerReferences'] = [dict(
apiVersion='v1', kind='Namespace', blockOwnerDeletion=False,
controller=True, name=project_name, uid=owner_uid)]
if kind.lower() == 'namespace':
annotations = body['metadata'].setdefault('annotations', {})
annotations['spawner/requestor'] = full_service_account_name
annotations['spawner/namespace'] = namespace
annotations['spawner/deployment'] = application_name
annotations['spawner/account'] = user_account_name
annotations['spawner/session'] = pod.metadata.name
resource = api_client.resources.get(api_version=api_version, kind=kind)
target_namespace = body['metadata'].get('namespace', project_name)
resource.create(namespace=target_namespace, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating resource %s. %s' % (body, e))
raise
else:
print('WARNING: Resource already exists %s.' % body)
except Exception as e:
print('ERROR: Error creating resource %s. %s' % (body, e))
raise
if kind.lower() == 'namespace':
annotations = body['metadata'].get('annotations', {})
role = annotations.get('session/role', 'admin')
default_budget = os.environ.get('RESOURCE_BUDGET', 'default')
budget = annotations.get('session/budget', default_budget)
yield setup_project_namespace(spawner, pod,
body['metadata']['name'], role, budget)
@gen.coroutine
def expose_service_ports(spawner, pod, owner_uid):
short_name = spawner.user.name
user_account_name = '%s-%s' % (application_name, short_name)
# Can't do this for now if deployed to plain Kubernetes.
if route_resource is None:
return
exposed_ports = os.environ.get('EXPOSED_PORTS', '')
if exposed_ports:
exposed_ports = exposed_ports.split(',')
try:
text = service_template.safe_substitute(
configuration=configuration_type, name=user_account_name,
application_name=application_name, username=short_name,
uid=owner_uid)
body = json.loads(text)
for port in exposed_ports:
body['spec']['ports'].append(dict(name='%s-tcp' % port,
protocol="TCP", port=int(port), targetPort=int(port)))
service_resource.create(namespace=namespace, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating service. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating service. %s' % e)
raise
for port in exposed_ports:
try:
host = '%s-%s.%s' % (user_account_name, port, cluster_subdomain)
text = route_template.safe_substitute(configuration=configuration_type,
name=user_account_name, application_name=application_name,
port='%s' % port, username=short_name, uid=owner_uid, host=host)
body = json.loads(text)
route_resource.create(namespace=namespace, body=body)
except ApiException as e:
if e.status != 409:
print('ERROR: Error creating route. %s' % e)
raise
except Exception as e:
print('ERROR: Error creating route. %s' % e)
raise
@gen.coroutine
def wait_on_service_account(user_account_name):
for _ in range(10):
try:
service_account = service_account_resource.get(
namespace=namespace, name=user_account_name)
# Hope that all secrets added at same time and don't have
# to check names to verify api token secret added.
if service_account.secrets:
for item in service_account.secrets:
try:
secret = secret_resource.get(namespace=namespace,
name=item['name'])
except Exception as e:
print('WARNING: Error fetching secret. %s' % e)
yield gen.sleep(0.1)
break
else:
break
else:
yield gen.sleep(0.1)
continue
except Exception as e:
print('ERROR: Error fetching service account. %s' % e)
raise
else:
# If can't verify after multiple attempts, continue on anyway.
print('WARNING: Could not verify account. %s' % user_account_name)
# Load configuration corresponding to the configuration type.
c.Spawner.environment['DEPLOYMENT_TYPE'] = 'spawner'
c.Spawner.environment['CONFIGURATION_TYPE'] = configuration_type
config_root = '/opt/app-root/src/configs'
config_file = '%s/%s.py' % (config_root, configuration_type)
if os.path.exists(config_file):
with open(config_file) as fp:
exec(compile(fp.read(), config_file, 'exec'), globals())
# Load configuration provided via the environment.
environ_config_file = '/opt/app-root/configs/jupyterhub_config.py'
if os.path.exists(environ_config_file):
with open(environ_config_file) as fp:
exec(compile(fp.read(), environ_config_file, 'exec'), globals())
| 32.117452 | 151 | 0.49015 | 5,214 | 57,972 | 5.329881 | 0.120637 | 0.02267 | 0.016733 | 0.023318 | 0.480353 | 0.424433 | 0.394602 | 0.372508 | 0.348615 | 0.334185 | 0 | 0.013444 | 0.39952 | 57,972 | 1,804 | 152 | 32.135255 | 0.784867 | 0.098168 | 0 | 0.503511 | 0 | 0.000702 | 0.245578 | 0.036279 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009129 | false | 0.000702 | 0.011236 | 0.000702 | 0.030899 | 0.033708 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f6d55643f17e1e6df4acdca8f4b81328ac69205 | 1,005 | py | Python | bbox_annotation_convert.py | lafius/YoLOGO | 5dc888773a7a6442e270ed8111494042a1386de9 | [
"MIT"
] | 3 | 2019-05-21T11:00:29.000Z | 2020-12-08T09:47:59.000Z | bbox_annotation_convert.py | lafius/YoLOGO | 5dc888773a7a6442e270ed8111494042a1386de9 | [
"MIT"
] | null | null | null | bbox_annotation_convert.py | lafius/YoLOGO | 5dc888773a7a6442e270ed8111494042a1386de9 | [
"MIT"
] | 1 | 2021-01-29T03:25:55.000Z | 2021-01-29T03:25:55.000Z | import os
from collections import defaultdict
SOURCE_DIR = os.getcwd()
classes = []
with open('class.txt', "r") as f:
for line in f.readlines():
classes.append(line.split()[-1])
f.close()
def convertAnnotation(filename, newAnnotation):
with open(filename, "r") as f:
listeAnnotation = f.readlines()
for i in range(1, len(listeAnnotation)):
bbox = listeAnnotation[i].split()
newAnnotation[filename].append(bbox[0] + "," + bbox[1] + "," + bbox[2] + "," + bbox[3] + "," + str(classes.index(bbox[4])))
f.close()
if __name__ == '__main__':
new_Annotation = defaultdict(list)
dir = os.listdir(SOURCE_DIR + "/Labels/")
for filename in dir:
convertAnnotation(SOURCE_DIR + "/Labels/" + filename, new_Annotation)
with open('annotation.txt', "w") as f:
for fileName in new_Annotation:
f.write(SOURCE_DIR + "/Images/" + fileName.split("/")[-1] + " " + " ".join(new_Annotation[fileName]) + "\n")
f.close() | 34.655172 | 135 | 0.610945 | 123 | 1,005 | 4.861789 | 0.422764 | 0.060201 | 0.013378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010152 | 0.21592 | 1,005 | 29 | 136 | 34.655172 | 0.748731 | 0 | 0 | 0.125 | 0 | 0 | 0.0666 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f6ee92d3e4ebb7dafcda9e91ea9d426dbaf292c | 5,379 | py | Python | c3dm/config.py | facebookresearch/c3dm | cac38418e41f75f1395422200b8d7bdf6725aa43 | [
"MIT"
] | 15 | 2020-12-04T16:40:21.000Z | 2021-11-06T01:35:16.000Z | c3dm/config.py | facebookresearch/c3dm | cac38418e41f75f1395422200b8d7bdf6725aa43 | [
"MIT"
] | 2 | 2021-03-16T09:05:22.000Z | 2021-12-23T12:43:37.000Z | c3dm/config.py | facebookresearch/c3dm | cac38418e41f75f1395422200b8d7bdf6725aa43 | [
"MIT"
] | 2 | 2021-04-08T00:50:29.000Z | 2021-11-06T01:35:06.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import inspect
import copy
import os
import yaml
import ast
import numpy as np
from tools.attr_dict import nested_attr_dict
from tools.utils import auto_init_args
def convert_to_stringval(cfg_,squeeze=None,stringify_vals=False):
out = {}
convert_to_stringval_rec( [('ROOT',cfg_)], out,
squeeze=squeeze,stringify_vals=stringify_vals)
return out
def convert_to_stringval_rec( flds, output, squeeze=None, stringify_vals=False):
for k,v in flds[-1][1].items():
if isinstance(v,dict):
flds_cp = copy.deepcopy(flds)
flds_cp.append( (k,v) )
convert_to_stringval_rec( flds_cp, output,
squeeze=squeeze, stringify_vals=stringify_vals)
else:
valname = [] ; valname_full = []
for f in flds[1:]:
valname_full.append(squeeze_string(f[0],squeeze))
valname_full.append(squeeze_string(k,squeeze))
valname_full = ".".join(valname_full)
if stringify_vals:
output[valname_full] = str(v)
else:
output[valname_full] = v
def squeeze_key_string(f,squeeze_inter,squeeze_tail):
keys = f.split('.')
tail = keys[-1]
inter = keys[0:-1]
nkeys = len(keys)
if nkeys > 1:
take_from_each = int(np.floor(float(squeeze_inter-nkeys)/float(nkeys-1)))
take_from_each = max(take_from_each,1)
for keyi in range(nkeys-1):
s = inter[keyi]
s = s[0:min(take_from_each,len(s))]
inter[keyi] = s
tail = squeeze_string(tail,squeeze_tail)
inter.append(tail)
out = ".".join( inter )
return out
def squeeze_string(f,squeeze):
if squeeze is None or squeeze > len(f): return f;
idx = np.round(np.linspace(0,len(f)-1,squeeze))
idx = idx.astype(int).tolist()
f_short = [ f[i] for i in idx ]
f_short = str("").join(f_short)
return f_short
def get_default_args(C):
# returns dict of keyword args of a callable C
sig = inspect.signature(C)
kwargs = {}
for pname,defval in dict(sig.parameters).items():
if defval.default==inspect.Parameter.empty:
print('skipping %s' % pname)
continue
else:
kwargs[pname] = copy.deepcopy(defval.default)
return kwargs
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def arg_as_list(s):
v = ast.literal_eval(s)
if type(v) is not list:
raise argparse.ArgumentTypeError("Argument \"%s\" is not a list" % (s))
return v
def get_arg_parser(cfg_constructor):
dargs = (get_default_args(cfg_constructor)
if inspect.isclass(cfg_constructor)
else cfg_constructor)
dargs_full_name = convert_to_stringval(dargs,stringify_vals=False)
parser = argparse.ArgumentParser(
description='Auto-initialized argument parser'
)
for darg, val in dargs_full_name.items():
tp = type(val) if val is not None else str
if tp==bool:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=str2bool,
)
elif tp == list:
parser.add_argument(
'--%s' % darg,
type=arg_as_list,
default=val,
help=darg)
else:
parser.add_argument(
'--%s' % darg,
dest=darg,
help=darg,
default=val,
type=tp,
)
return parser
def set_config_from_config(cfg,cfg_set):
# cfg_set ... dict with nested options
cfg_dot_separated = convert_to_stringval(cfg_set,stringify_vals=False)
set_config(cfg,cfg_dot_separated)
def set_config_rec(cfg,tgt_key,val,check_only=False):
if len(tgt_key) > 1:
k = tgt_key.pop(0)
if k not in cfg:
#raise ValueError('no such config key %s' % k )
cfg[k] = {}
set_config_rec(cfg[k],tgt_key,val,check_only=check_only)
else:
if check_only:
assert cfg[tgt_key[0]]==val
else:
cfg[tgt_key[0]] = val
def set_config(cfg,cfg_set):
# cfg_set ... dict with .-separated options
for cfg_key,cfg_val in cfg_set.items():
# print('setting %s = %s' % (cfg_key,str(cfg_val)) )
cfg_key_split = [ k for k in cfg_key.split('.') if len(k) > 0 ]
set_config_rec(cfg,copy.deepcopy(cfg_key_split),cfg_val)
set_config_rec(cfg,cfg_key_split,cfg_val,check_only=True)
def set_config_from_file(cfg,cfg_filename):
# set config from yaml file
with open(cfg_filename, 'r') as f:
yaml_cfg = yaml.load(f)
set_config_from_config(cfg,yaml_cfg)
def dump_config(cfg):
cfg_filename = os.path.join(cfg.exp_dir,'expconfig.yaml')
with open(cfg_filename, 'w') as yaml_file:
yaml.dump(cfg, yaml_file, default_flow_style=False)
| 30.389831 | 81 | 0.581521 | 717 | 5,379 | 4.153417 | 0.23431 | 0.030222 | 0.036266 | 0.020148 | 0.201142 | 0.081263 | 0.054399 | 0.054399 | 0.034923 | 0.034923 | 0 | 0.006176 | 0.307678 | 5,379 | 176 | 82 | 30.5625 | 0.793502 | 0.054843 | 0 | 0.176471 | 0 | 0 | 0.028768 | 0 | 0 | 0 | 0 | 0 | 0.007353 | 1 | 0.095588 | false | 0 | 0.066176 | 0 | 0.220588 | 0.007353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f700623aae7a95a1ed534501dc5e986a608fd04 | 2,324 | py | Python | cave/build/tools/headup/Source/FileTypeConfig.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | cave/build/tools/headup/Source/FileTypeConfig.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | cave/build/tools/headup/Source/FileTypeConfig.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 3 Mar 2010 #3771 jelkins Initial Creation.
from ConfigParser import ConfigParser
from ConfigParser import NoOptionError
from os import pathsep
from os import listdir
from os.path import join
class FileTypeConfig(ConfigParser):
""" Handles file type configurations
"""
def __init__(self,defaultConfig = None,configDirectories = None,
fileType = None):
self.fileType = fileType
dConf = {"space":" "}
if defaultConfig != None:
dConf.update(defaultConfig)
ConfigParser.__init__(self,dConf)
if configDirectories != None:
self.loadConfig(configDirectories)
def isAvailable(self,fileType = None):
if fileType == None:
fileType = self.fileType
return self.has_section(fileType)
def loadConfig(self,configDirectories):
for path in configDirectories.split(pathsep):
for file in listdir(path):
if ".cfg" in file:
self.read(join(path,file))
def _getConfig(self,configKey,getterFunction,varDict=None):
result = None
try:
if varDict != None:
result = getterFunction(self.fileType,configKey,vars=varDict)
else:
result = getterFunction(self.fileType,configKey)
except NoOptionError:
pass
return result
def getConfig(self,configKey,varDict=None):
return self._getConfig(configKey,self.get,varDict)
def getBooleanConfig(self,configKey):
return self._getConfig(configKey,self.getboolean)
| 29.794872 | 76 | 0.678141 | 264 | 2,324 | 5.924242 | 0.507576 | 0.038363 | 0.02046 | 0.031969 | 0.09335 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022527 | 0.216867 | 2,324 | 77 | 77 | 30.181818 | 0.836813 | 0.421256 | 0 | 0 | 0 | 0 | 0.007639 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0.026316 | 0.131579 | 0.052632 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f70634b62de82ee85628c44e5f54c1ae0ba85ac | 577 | py | Python | var/spack/repos/builtin/packages/r-rodbc/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/r-rodbc/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-rodbc/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RRodbc(RPackage):
"""An ODBC database interface."""
homepage = "https://cran.rstudio.com/web/packages/RODBC/"
url = "https://cran.rstudio.com/src/contrib/RODBC_1.3-13.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/RODBC/"
version('1.3-13', 'c52ef9139c2ed85adc53ad6effa7d68e')
depends_on('unixodbc')
| 30.368421 | 73 | 0.714038 | 79 | 577 | 5.177215 | 0.78481 | 0.066015 | 0.07824 | 0.09291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066937 | 0.145581 | 577 | 18 | 74 | 32.055556 | 0.762677 | 0.376083 | 0 | 0 | 0 | 0.142857 | 0.566952 | 0.091168 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.714286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f72a5836ba28dfe89a013c0b38cf2e69cd03926 | 862 | py | Python | pythonFiles/pydev/launcher.py | lee-vius/python-preview | 1d953a8d08698693042ce763ec7861224661032f | [
"MIT"
] | 1 | 2021-02-25T05:47:14.000Z | 2021-02-25T05:47:14.000Z | pythonFiles/pydev/launcher.py | lee-vius/python-preview | 1d953a8d08698693042ce763ec7861224661032f | [
"MIT"
] | null | null | null | pythonFiles/pydev/launcher.py | lee-vius/python-preview | 1d953a8d08698693042ce763ec7861224661032f | [
"MIT"
] | null | null | null | import os
import os.path
import sys
import traceback
sys.stdout.write('&info&succeeded to launch script')
sys.stdout.flush()
try:
import debugger
except:
traceback.print_exc()
print('Press Enter to close...')
try:
raw_input()
except NameError:
input()
sys.exit(1)
#=======================================================================================================================
# 1. Debugger port to connect to.
# 2. GUID for the debug session.
# 3. Startup script name.
#=======================================================================================================================
port_num = int(sys.argv[1])
debug_id = sys.argv[2]
del sys.argv[1:3]
# filename = sys.argv[0]
sys.path[0] = ''
current_pid = os.getpid()
del sys, os
print(current_pid)
debugger.debug(port_num, debug_id, current_pid) | 22.684211 | 120 | 0.488399 | 98 | 862 | 4.204082 | 0.479592 | 0.067961 | 0.038835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013495 | 0.140371 | 862 | 38 | 121 | 22.684211 | 0.54251 | 0.403712 | 0 | 0.083333 | 0 | 0 | 0.108055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.208333 | 0 | 0.208333 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f72ddd81a2967ee26f9ff43caf15bf5a947f6a1 | 11,398 | py | Python | framework/UI/FitnessView.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | framework/UI/FitnessView.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | framework/UI/FitnessView.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A view widget for visualizing the R^2 fitness of the local stepwise
regression results.
"""
#For future compatibility with Python 3
from __future__ import division, print_function, absolute_import
#End compatibility block for Python 3
try:
from PySide import QtCore as qtc
from PySide import QtGui as qtg
from PySide import QtGui as qtw
from PySide import QtSvg as qts
except ImportError as e:
from PySide2 import QtCore as qtc
from PySide2 import QtGui as qtg
from PySide2 import QtWidgets as qtw
from PySide2 import QtSvg as qts
from .BaseTopologicalView import BaseTopologicalView
import math
import numpy as np
class FitnessView(BaseTopologicalView):
"""
A view widget for visualizing the R^2 fitness of the local stepwise
regression results.
"""
def __init__(self, parent=None, amsc=None, title=None):
""" Initialization method that can optionally specify the parent widget,
an AMSC object to reference, and a title for this widget.
@ In, parent, an optional QWidget that will be the parent of this widget
@ In, amsc, an optional AMSC_Object specifying the underlying data
object for this widget to use.
@ In, title, an optional string specifying the title of this widget.
"""
super(FitnessView, self).__init__(parent,amsc,title)
def Reinitialize(self, parent=None, amsc=None, title=None):
""" Reinitialization method that resets this widget and can optionally
specify the parent widget, an AMSC object to reference, and a title for
this widget.
@ In, parent, an optional QWidget that will be the parent of this widget
@ In, amsc, an optional AMSC_Object specifying the underlying data
object for this widget to use.
@ In, title, an optional string specifying the title of this widget.
"""
# Try to apply a new layout, if one already exists then make sure to grab
# it for updating
if self.layout() is None:
self.setLayout(qtw.QVBoxLayout())
layout = self.layout()
self.clearLayout(layout)
self.padding = 2
## General Graphics View/Scene setup
self.scene = qtw.QGraphicsScene()
self.scene.setSceneRect(0,0,100,100)
self.gView = qtw.QGraphicsView(self.scene)
self.gView.setRenderHints(qtg.QPainter.Antialiasing |
qtg.QPainter.SmoothPixmapTransform)
self.gView.setHorizontalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)
self.gView.setVerticalScrollBarPolicy(qtc.Qt.ScrollBarAlwaysOff)
self.font = qtg.QFont('sans-serif', 12)
## Defining the right click menu
self.rightClickMenu = qtw.QMenu()
self.fillAction = self.rightClickMenu.addAction('Fill viewport')
self.fillAction.setCheckable(True)
self.fillAction.setChecked(True)
self.fillAction.triggered.connect(self.updateScene)
self.showNumberAction = self.rightClickMenu.addAction('Show Numeric Values')
self.showNumberAction.setCheckable(True)
self.showNumberAction.setChecked(True)
self.showNumberAction.triggered.connect(self.updateScene)
captureAction = self.rightClickMenu.addAction('Capture')
captureAction.triggered.connect(self.saveImage)
self.gView.scale(self.gView.width()/self.scene.width(),
self.gView.height()/self.scene.height())
layout.addWidget(self.gView)
self.updateScene()
def saveImage(self, filename=None):
"""
Saves the current display of this view to a static image by loading a
file dialog box.
@ In, filename, string, optional parameter specifying where this image
will be saved. If None, then a dialog box will prompt the user for a
name and location.
@ Out, None
"""
if filename is None:
dialog = qtw.QFileDialog(self)
dialog.setFileMode(qtw.QFileDialog.AnyFile)
dialog.setAcceptMode(qtw.QFileDialog.AcceptSave)
dialog.exec_()
if dialog.result() == qtw.QFileDialog.Accepted:
filename = dialog.selectedFiles()[0]
else:
return
self.scene.clearSelection()
self.scene.setSceneRect(self.scene.itemsBoundingRect())
if filename.endswith('.svg'):
svgGen = qts.QSvgGenerator()
svgGen.setFileName(filename)
svgGen.setSize(self.scene.sceneRect().size().toSize())
svgGen.setViewBox(self.scene.sceneRect())
svgGen.setTitle("Screen capture of " + self.__class__.__name__)
svgGen.setDescription("Generated from RAVEN.")
painter = qtg.QPainter(svgGen)
else:
image = qtg.QImage(self.scene.sceneRect().size().toSize(), qtg.QImage.Format_ARGB32)
image.fill(qtc.Qt.transparent)
painter = qtg.QPainter(image)
self.scene.render(painter)
if not filename.endswith('.svg'):
image.save(filename,quality=100)
del painter
def contextMenuEvent(self,event):
""" An event handler triggered when the user right-clicks on this view that
will force the context menu to appear.
@ In, event, a QContextMenuEvent specifying the context of this event.
"""
self.rightClickMenu.popup(event.globalPos())
def resizeEvent(self,event):
""" An event handler triggered when the user resizes this view.
@ In, event, a QResizeEvent specifying the context of this event.
"""
super(FitnessView, self).resizeEvent(event)
self.gView.scale(self.gView.width()/self.scene.width(),
self.gView.height()/self.scene.height())
self.updateScene()
def selectionChanged(self):
""" An event handler triggered when the user changes the selection of the
data.
"""
self.updateScene()
def persistenceChanged(self):
""" An event handler triggered when the user changes the persistence setting
of the data.
"""
self.updateScene()
def modelsChanged(self):
""" An event handler triggered when the user requests a new set of local
models.
"""
self.updateScene()
def updateScene(self):
""" A method for drawing the scene of this view.
"""
self.scene.clear()
if self.fillAction.isChecked():
self.scene.setSceneRect(0,0,100*float(self.gView.width())/float(self.gView.height()),100)
else:
self.scene.setSceneRect(0,0,100,100)
width = self.scene.width()
height = self.scene.height()
plotWidth = width - 2*self.padding
plotHeight = height - 2*self.padding
axisPen = qtg.QPen(qtc.Qt.black)
names = self.amsc.GetNames()[:-1]
if not self.amsc.FitsSynced():
txtItem = self.scene.addSimpleText('Rebuild Local Models',self.font)
txtItem.setFlag(qtw.QGraphicsItem.ItemIgnoresTransformations)
txtItem.setPos(0,0)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
self.scene.changed.connect(self.scene.invalidate)
self.gView.fitInView(self.scene.sceneRect(),qtc.Qt.KeepAspectRatio)
return
selection = self.amsc.GetSelectedSegments()
colorMap = self.amsc.GetColors()
## Check if they selected any extrema
if selection is None or len(selection) == 0:
selection = []
selectedExts = self.amsc.GetSelectedExtrema()
allSegments = self.amsc.GetCurrentLabels()
for minMaxPair in allSegments:
for extIdx in selectedExts:
if extIdx in minMaxPair:
selection.append(minMaxPair)
## Okay, well then we will just plot everything we have for the current
## level
if len(selection) == 0:
selection = allSegments
selectionCount = len(selection)
if selectionCount > 0:
axisHeight = plotHeight/float(selectionCount)
axisWidth = plotWidth/float(selectionCount)
dimCount = len(names)
fitErrorData = {}
for j,extPair in enumerate(selection):
fitErrorData[extPair] = self.amsc.ComputePerDimensionFitErrors(extPair)
maxValue = 1
j = 0
for extPair in selection:
retValue = fitErrorData[extPair]
if retValue is not None:
indexOrder,rSquared,fStatistic = retValue
myColor = colorMap[extPair]
myPen = qtg.QPen(qtg.QColor('#000000'))
brushColor = qtg.QColor(myColor)
brushColor.setAlpha(127)
myBrush = qtg.QBrush(brushColor)
vals = rSquared
w = axisWidth / dimCount
self.font.setPointSizeF(np.clip(w-2*self.padding,2,18))
for i,val in enumerate(vals):
name = names[indexOrder[i]]
if val > 0:
barExtent = (val/maxValue)*plotHeight
else:
barExtent = 0
x = j*axisWidth + i*axisWidth/float(dimCount)+self.padding
y = height-self.padding
h = -barExtent
if self.showNumberAction.isChecked():
numTxtItem = self.scene.addSimpleText('%.3g' % val, self.font)
fm = qtg.QFontMetrics(numTxtItem.font())
fontHeight = fm.height()
fontWidth = fm.width(numTxtItem.text())
numTxtItem.setPos(x+(w-fontHeight)/2.,y-plotHeight+fontWidth)
#numTxtItem.rotate(285) #XXX not in qt5
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
numTxtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
numTxtItem.setZValue(2)
myRect = self.scene.addRect(x,y,w,h,myPen,myBrush)
myRect.setToolTip(str(val))
myRect.setAcceptHoverEvents(True)
txtItem = self.scene.addSimpleText(' ' + name,self.font)
fm = qtg.QFontMetrics(txtItem.font())
fontHeight = fm.height()
fontWidth = fm.width(name)
txtItem.setPos(x+(w-fontHeight)/2.,y)
#txtItem.rotate(270) #XXX not in qt5
txtItem.setFlag(qtw.QGraphicsItem.ItemIsMovable)
txtItem.setFlag(qtw.QGraphicsItem.ItemIsSelectable)
txtItem.setZValue(2)
x = j*axisWidth+self.padding
y = height-self.padding
w = axisWidth
h = -plotHeight
self.scene.addRect(x,y,w,h,axisPen)
j += 1
self.scene.changed.connect(self.scene.invalidate)
self.gView.fitInView(self.scene.sceneRect(),qtc.Qt.KeepAspectRatio)
def test(self):
"""
A test function for performing operations on this class that need to be
automatically tested such as simulating mouse and keyboard events, and
other internal operations. For this class in particular, we will test:
- Building the models (which allows the actual plot to be displayed)
- Saving the view buffer in svg and png formats.
- Triggering the resize event.
@ In, None
@ Out, None
"""
self.amsc.BuildModels()
self.amsc.ClearSelection()
self.saveImage(self.windowTitle()+'.svg')
self.saveImage(self.windowTitle()+'.png')
self.resizeEvent(qtg.QResizeEvent(qtc.QSize(1,1),qtc.QSize(100,100)))
super(FitnessView, self).test()
| 36.415335 | 95 | 0.683716 | 1,397 | 11,398 | 5.559771 | 0.289907 | 0.034762 | 0.020729 | 0.014806 | 0.270761 | 0.243337 | 0.212437 | 0.182052 | 0.17716 | 0.166087 | 0 | 0.010791 | 0.219512 | 11,398 | 312 | 96 | 36.532051 | 0.862298 | 0.276364 | 0 | 0.155914 | 0 | 0 | 0.01736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053763 | false | 0 | 0.069892 | 0 | 0.139785 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f76066515b21780cdfe4cdebbc95c9f722c5365 | 2,391 | py | Python | 13/main.py | DVRodri8/advent-of-code-2019 | 8c1e1a0766b067fbe282dd482bc258275c5a3364 | [
"MIT"
] | null | null | null | 13/main.py | DVRodri8/advent-of-code-2019 | 8c1e1a0766b067fbe282dd482bc258275c5a3364 | [
"MIT"
] | null | null | null | 13/main.py | DVRodri8/advent-of-code-2019 | 8c1e1a0766b067fbe282dd482bc258275c5a3364 | [
"MIT"
] | null | null | null | from time import sleep
from intMachine import intMachine
from os import system
from time import sleep
class Arcade():
def __init__(self, program, ws=[], animation=False):
self.__computer = intMachine(program)
self.__mblock = [ ' ', '#', 'B', '-', 'O']
self.__screen = [['%' for i in range(23)] for i in range(37)]
self.__score = 0
self.__ws = ws[:]
self.__ANIMATION = animation
def __nextFrame(self):
output = self.__computer.run()
return [output[i:i+3] for i in range(0,len(output),3)]
def __updateScreen(self):
for t in self.__nextFrame():
x,y,c = t
if x==-1 and y == 0:
self.__score = c
else:
self.__screen[x][y] = self.__mblock[c]
def __print(self):
system("clear")
print("score:",self.__score)
for i in range(len(self.__screen[0])):
for j in range(len(self.__screen)):
print(self.__screen[j][i], end='')
print()
sleep(0.01)
def __interact(self):
if len(self.__ws) > 0:
i = self.__ws.pop(0)
else:
i = input()
d=0
if i=='a': d=-1
elif i=='d': d=1
self.__computer.appendStdin(d)
def print(self):
self.__updateScreen()
self.__print()
def run(self):
while True:
self.__updateScreen()
if self.__ANIMATION: self.__print()
self.__interact()
if self.__computer.isHalted(): break
else: self.__computer.run()
if not self.__ANIMATION: print(self.__score)
with open("input", "r") as f:
program = list(map(int, f.readline().strip().split(",")))
# Part 1
G=intMachine(program)
output = G.run()
print(len([i for i in output[2::3] if i==2]))
# Part 2
# My way to find which part of memory store the board
# in order to don't wait for inputs raise an Exception on
# io instructions on the intMachine class
'''
zeros = []
for i,v in enumerate(program):
if v==0: zeros.append(i)
for i in zeros:
break
p = program[:]
p[i] = 3
try:
Arcade(p).print()
except:
print("no")
print(i)
sleep(0.0001)
'''
# Replace floor space with wall
for i in range(1416, 1416+36):
program[i]=1
G = Arcade(program, ['n']*5000, True)
G.run()
| 24.397959 | 69 | 0.547888 | 329 | 2,391 | 3.775076 | 0.340426 | 0.025765 | 0.033816 | 0.044283 | 0.032206 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027862 | 0.309494 | 2,391 | 97 | 70 | 24.649485 | 0.724409 | 0.079883 | 0 | 0.101695 | 0 | 0 | 0.01379 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.118644 | false | 0 | 0.067797 | 0 | 0.220339 | 0.152542 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f76c2ea5bf6266807ac181b18a05763f4cc86ed | 1,103 | py | Python | test_set_finder.py | SaarLevy/set-finder | 02bdcf6a484f73fb808cc467142f18304036a97c | [
"MIT"
] | null | null | null | test_set_finder.py | SaarLevy/set-finder | 02bdcf6a484f73fb808cc467142f18304036a97c | [
"MIT"
] | null | null | null | test_set_finder.py | SaarLevy/set-finder | 02bdcf6a484f73fb808cc467142f18304036a97c | [
"MIT"
] | null | null | null | import unittest
import set_finder
from card import Card, Color, Suit, Fill
class Test_Set_Finder(unittest.TestCase):
def test_values_consistent(self):
self.assertTrue(set_finder.values_consistent(1, 1, 1))
self.assertTrue(set_finder.values_consistent(1, 2, 3))
self.assertFalse(set_finder.values_consistent(1, 1, 2))
def test_check_set(self):
a1 = Card(Color.red, Suit.diamond, Fill.striped, 1)
a2 = Card(Color.red, Suit.diamond, Fill.striped, 2)
a3 = Card(Color.red, Suit.diamond, Fill.striped, 3)
self.assertTrue(set_finder.check_set(a1, a2, a3))
b1 = Card(Color.red, Suit.diamond, Fill.striped, 1)
b2 = Card(Color.green, Suit.wave, Fill.solid, 1)
b3 = Card(Color.purple, Suit.circle, Fill.blank, 1)
self.assertTrue(set_finder.check_set(b1, b2, b3))
c1 = Card(Color.red, Suit.diamond, Fill.striped, 1)
c2 = Card(Color.red, Suit.diamond, Fill.striped, 2)
c3 = Card(Color.green, Suit.circle, Fill.solid, 1)
self.assertFalse(set_finder.check_set(c1, c2, c3))
| 35.580645 | 63 | 0.659112 | 163 | 1,103 | 4.343558 | 0.245399 | 0.127119 | 0.101695 | 0.135593 | 0.538136 | 0.535311 | 0.408192 | 0.247175 | 0 | 0 | 0 | 0.041427 | 0.212149 | 1,103 | 31 | 64 | 35.580645 | 0.773303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f7af1d7d5c5fb2278ca5fd3846a86c199882fb7 | 8,660 | py | Python | mac-platform-tools/systrace/catapult/devil/devil/android/app_ui.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 2 | 2022-01-14T23:19:48.000Z | 2022-02-08T23:40:26.000Z | mac-platform-tools/systrace/catapult/devil/devil/android/app_ui.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 7 | 2022-02-15T01:11:37.000Z | 2022-03-02T12:46:13.000Z | mac-platform-tools/systrace/catapult/devil/devil/android/app_ui.py | NBPS-Robotics/FTC-Code-Team-9987---2022 | 180538f3ebd234635fa88f96ae7cf7441df6a246 | [
"MIT"
] | 1 | 2022-03-04T01:03:53.000Z | 2022-03-04T01:03:53.000Z | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides functionality to interact with UI elements of an Android app."""
import collections
import re
from xml.etree import ElementTree as element_tree
from devil.android import decorators
from devil.android import device_temp_file
from devil.utils import geometry
from devil.utils import timeout_retry
_DEFAULT_SHORT_TIMEOUT = 10
_DEFAULT_SHORT_RETRIES = 3
_DEFAULT_LONG_TIMEOUT = 30
_DEFAULT_LONG_RETRIES = 0
# Parse rectangle bounds given as: '[left,top][right,bottom]'.
_RE_BOUNDS = re.compile(
r'\[(?P<left>\d+),(?P<top>\d+)\]\[(?P<right>\d+),(?P<bottom>\d+)\]')
class _UiNode(object):
def __init__(self, device, xml_node, package=None):
"""Object to interact with a UI node from an xml snapshot.
Note: there is usually no need to call this constructor directly. Instead,
use an AppUi object (below) to grab an xml screenshot from a device and
find nodes in it.
Args:
device: A device_utils.DeviceUtils instance.
xml_node: An ElementTree instance of the node to interact with.
package: An optional package name for the app owning this node.
"""
self._device = device
self._xml_node = xml_node
self._package = package
def _GetAttribute(self, key):
"""Get the value of an attribute of this node."""
return self._xml_node.attrib.get(key)
@property
def bounds(self):
"""Get a rectangle with the bounds of this UI node.
Returns:
A geometry.Rectangle instance.
"""
d = _RE_BOUNDS.match(self._GetAttribute('bounds')).groupdict()
return geometry.Rectangle.FromDict({k: int(v) for k, v in d.items()})
def Tap(self, point=None, dp_units=False):
"""Send a tap event to the UI node.
Args:
point: An optional geometry.Point instance indicating the location to
tap, relative to the bounds of the UI node, i.e. (0, 0) taps the
top-left corner. If ommited, the center of the node is tapped.
dp_units: If True, indicates that the coordinates of the point are given
in device-independent pixels; otherwise they are assumed to be "real"
pixels. This option has no effect when the point is ommited.
"""
if point is None:
point = self.bounds.center
else:
if dp_units:
point = (float(self._device.pixel_density) / 160) * point
point += self.bounds.top_left
x, y = (str(int(v)) for v in point)
self._device.RunShellCommand(['input', 'tap', x, y], check_return=True)
def Dump(self):
"""Get a brief summary of the child nodes that can be found on this node.
Returns:
A list of lines that can be logged or otherwise printed.
"""
summary = collections.defaultdict(set)
for node in self._xml_node.iter():
package = node.get('package') or '(no package)'
label = node.get('resource-id') or '(no id)'
text = node.get('text')
if text:
label = '%s[%r]' % (label, text)
summary[package].add(label)
lines = []
for package, labels in sorted(summary.iteritems()):
lines.append('- %s:' % package)
for label in sorted(labels):
lines.append(' - %s' % label)
return lines
def __getitem__(self, key):
"""Retrieve a child of this node by its index.
Args:
key: An integer with the index of the child to retrieve.
Returns:
A UI node instance of the selected child.
Raises:
IndexError if the index is out of range.
"""
return type(self)(self._device, self._xml_node[key], package=self._package)
def _Find(self, **kwargs):
"""Find the first descendant node that matches a given criteria.
Note: clients would usually call AppUi.GetUiNode or AppUi.WaitForUiNode
instead.
For example:
app = app_ui.AppUi(device, package='org.my.app')
app.GetUiNode(resource_id='some_element', text='hello')
would retrieve the first matching node with both of the xml attributes:
resource-id='org.my.app:id/some_element'
text='hello'
As the example shows, if given and needed, the value of the resource_id key
is auto-completed with the package name specified in the AppUi constructor.
Args:
Arguments are specified as key-value pairs, where keys correnspond to
attribute names in xml nodes (replacing any '-' with '_' to make them
valid identifiers). At least one argument must be supplied, and arguments
with a None value are ignored.
Returns:
A UI node instance of the first descendant node that matches ALL the
given key-value criteria; or None if no such node is found.
Raises:
TypeError if no search arguments are provided.
"""
matches_criteria = self._NodeMatcher(kwargs)
for node in self._xml_node.iter():
if matches_criteria(node):
return type(self)(self._device, node, package=self._package)
return None
def _NodeMatcher(self, kwargs):
# Auto-complete resource-id's using the package name if available.
resource_id = kwargs.get('resource_id')
if (resource_id is not None and self._package is not None
and ':id/' not in resource_id):
kwargs['resource_id'] = '%s:id/%s' % (self._package, resource_id)
criteria = [(k.replace('_', '-'), v) for k, v in kwargs.items()
if v is not None]
if not criteria:
raise TypeError('At least one search criteria should be specified')
return lambda node: all(node.get(k) == v for k, v in criteria)
class AppUi(object):
# timeout and retry arguments appear unused, but are handled by decorator.
# pylint: disable=unused-argument
def __init__(self, device, package=None):
"""Object to interact with the UI of an Android app.
Args:
device: A device_utils.DeviceUtils instance.
package: An optional package name for the app.
"""
self._device = device
self._package = package
@property
def package(self):
return self._package
@decorators.WithTimeoutAndRetriesDefaults(_DEFAULT_SHORT_TIMEOUT,
_DEFAULT_SHORT_RETRIES)
def _GetRootUiNode(self, timeout=None, retries=None):
"""Get a node pointing to the root of the UI nodes on screen.
Note: This is currently implemented via adb calls to uiatomator and it
is *slow*, ~2 secs per call. Do not rely on low-level implementation
details that may change in the future.
TODO(crbug.com/567217): Swap to a more efficient implementation.
Args:
timeout: A number of seconds to wait for the uiautomator dump.
retries: Number of times to retry if the adb command fails.
Returns:
A UI node instance pointing to the root of the xml screenshot.
"""
with device_temp_file.DeviceTempFile(self._device.adb) as dtemp:
output = self._device.RunShellCommand(
['uiautomator', 'dump', dtemp.name], single_line=True,
check_return=True)
if output.startswith('ERROR:'):
raise RuntimeError(
'uiautomator dump command returned error: {}'.format(output))
xml_node = element_tree.fromstring(
self._device.ReadFile(dtemp.name, force_pull=True))
return _UiNode(self._device, xml_node, package=self._package)
def ScreenDump(self):
"""Get a brief summary of the nodes that can be found on the screen.
Returns:
A list of lines that can be logged or otherwise printed.
"""
return self._GetRootUiNode().Dump()
def GetUiNode(self, **kwargs):
"""Get the first node found matching a specified criteria.
Args:
See _UiNode._Find.
Returns:
A UI node instance of the node if found, otherwise None.
"""
# pylint: disable=protected-access
return self._GetRootUiNode()._Find(**kwargs)
@decorators.WithTimeoutAndRetriesDefaults(_DEFAULT_LONG_TIMEOUT,
_DEFAULT_LONG_RETRIES)
def WaitForUiNode(self, timeout=None, retries=None, **kwargs):
"""Wait for a node matching a given criteria to appear on the screen.
Args:
timeout: A number of seconds to wait for the matching node to appear.
retries: Number of times to retry in case of adb command errors.
For other args, to specify the search criteria, see _UiNode._Find.
Returns:
The UI node instance found.
Raises:
device_errors.CommandTimeoutError if the node is not found before the
timeout.
"""
def node_found():
return self.GetUiNode(**kwargs)
return timeout_retry.WaitFor(node_found)
| 35.346939 | 79 | 0.678984 | 1,230 | 8,660 | 4.678862 | 0.264228 | 0.012163 | 0.007819 | 0.009731 | 0.176021 | 0.135882 | 0.089835 | 0.044483 | 0.031625 | 0.031625 | 0 | 0.003307 | 0.231871 | 8,660 | 244 | 80 | 35.491803 | 0.861846 | 0.488222 | 0 | 0.083333 | 0 | 0.010417 | 0.070647 | 0.01592 | 0 | 0 | 0 | 0.004098 | 0 | 1 | 0.15625 | false | 0 | 0.072917 | 0.020833 | 0.385417 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f7bc1d2e2ff7ffa0eea88735806bb111db8a6c9 | 9,277 | py | Python | event_monitor/common/lockutils.py | wangyu2014/galaxy | e34473ea8b903d31dac6c74c826a91319b681bf6 | [
"BSD-2-Clause"
] | null | null | null | event_monitor/common/lockutils.py | wangyu2014/galaxy | e34473ea8b903d31dac6c74c826a91319b681bf6 | [
"BSD-2-Clause"
] | null | null | null | event_monitor/common/lockutils.py | wangyu2014/galaxy | e34473ea8b903d31dac6c74c826a91319b681bf6 | [
"BSD-2-Clause"
] | null | null | null | from contextlib import contextmanager
from threading import Condition
from threading import current_thread
from threading import Lock
from time import time
class RWLock(object):
def __init__(self, lock=None):
"""Initialize this read-write lock."""
# Condition variable, used to signal waiters of a change in object
# state.
if lock is None:
self.__condition = Condition(Lock())
else:
self.__condition = Condition(lock)
# Initialize with no writers.
self.__writer = None
self.__upgradewritercount = 0
self.__pendingwriters = []
# Initialize with no readers.
self.__readers = {}
def acquire_read(self, blocking=True, timeout=None):
"""Acquire a read lock for the current thread, waiting at most
timeout seconds or doing a non-blocking check in case timeout is <= 0.
In case timeout is None, the call to acquire_read blocks until the
lock request can be serviced.
In case the timeout expires before the lock could be serviced, a
RuntimeError is thrown."""
if not blocking:
endtime = -1
elif timeout is not None:
endtime = time() + timeout
else:
endtime = None
me = current_thread()
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new read lock, always.
self.__writercount += 1
return
while True:
if self.__writer is None:
# Only test anything if there is no current writer.
if self.__upgradewritercount or self.__pendingwriters:
if me in self.__readers:
# Only grant a read lock if we already have one
# in case writers are waiting for their turn.
# This means that writers can't easily get starved
# (but see below, readers can).
self.__readers[me] += 1
return
# No, we aren't a reader (yet), wait for our turn.
else:
# Grant a new read lock, always, in case there are
# no pending writers (and no writer).
self.__readers[me] = self.__readers.get(me, 0) + 1
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
raise RuntimeError("Acquiring read lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release()
def acquire_write(self, timeout=None):
"""Acquire a write lock for the current thread, waiting at most
timeout seconds or doing a non-blocking check in case timeout is <= 0.
In case the write lock cannot be serviced due to the deadlock
condition mentioned above, a ValueError is raised.
In case timeout is None, the call to acquire_write blocks until the
lock request can be serviced.
In case the timeout expires before the lock could be serviced, a
RuntimeError is thrown."""
if timeout is not None:
endtime = time() + timeout
me, upgradewriter = current_thread(), False
self.__condition.acquire()
try:
if self.__writer is me:
# If we are the writer, grant a new write lock, always.
self.__writercount += 1
return
elif me in self.__readers:
# If we are a reader, no need to add us to pendingwriters,
# we get the upgradewriter slot.
if self.__upgradewritercount:
# If we are a reader and want to upgrade, and someone
# else also wants to upgrade, there is no way we can do
# this except if one of us releases all his read locks.
# Signal this to user.
if timeout is not None:
raise RuntimeError(
"Write lock upgrade would deadlock until timeout")
else:
raise ValueError(
"Inevitable dead lock, denying write lock")
upgradewriter = True
self.__upgradewritercount = self.__readers.pop(me)
else:
# We aren't a reader, so add us to the pending writers queue
# for synchronization with the readers.
self.__pendingwriters.append(me)
while True:
if not self.__readers and self.__writer is None:
# Only test anything if there are no readers and writers.
if self.__upgradewritercount:
if upgradewriter:
# There is a writer to upgrade, and it's us. Take
# the write lock.
self.__writer = me
self.__writercount = self.__upgradewritercount + 1
self.__upgradewritercount = 0
return
# There is a writer to upgrade, but it's not us.
# Always leave the upgrade writer the advance slot,
# because he presumes he'll get a write lock directly
# from a previously held read lock.
elif self.__pendingwriters[0] is me:
# If there are no readers and writers, it's always
# fine for us to take the writer slot, removing us
# from the pending writers queue.
# This might mean starvation for readers, though.
self.__writer = me
self.__writercount = 1
self.__pendingwriters = self.__pendingwriters[1:]
return
if timeout is not None:
remaining = endtime - time()
if remaining <= 0:
# Timeout has expired, signal caller of this.
if upgradewriter:
# Put us back on the reader queue. No need to
# signal anyone of this change, because no other
# writer could've taken our spot before we got
# here (because of remaining readers), as the test
# for proper conditions is at the start of the
# loop, not at the end.
self.__readers[me] = self.__upgradewritercount
self.__upgradewritercount = 0
else:
# We were a simple pending writer, just remove us
# from the FIFO list.
self.__pendingwriters.remove(me)
raise RuntimeError("Acquiring write lock timed out")
self.__condition.wait(remaining)
else:
self.__condition.wait()
finally:
self.__condition.release()
def release(self):
"""Release the currently held lock.
In case the current thread holds no lock, a ValueError is thrown."""
me = current_thread()
self.__condition.acquire()
try:
if self.__writer is me:
# We are the writer, take one nesting depth away.
self.__writercount -= 1
if not self.__writercount:
# No more write locks; take our writer position away and
# notify waiters of the new circumstances.
self.__writer = None
self.__condition.notifyAll()
elif me in self.__readers:
# We are a reader currently, take one nesting depth away.
self.__readers[me] -= 1
if not self.__readers[me]:
# No more read locks, take our reader position away.
del self.__readers[me]
if not self.__readers:
# No more readers, notify waiters of the new
# circumstances.
self.__condition.notifyAll()
else:
raise ValueError("Trying to release unheld lock")
finally:
self.__condition.release()
@property
@contextmanager
def readlock(self):
self.acquire_read()
try:
yield
finally:
self.release()
@property
@contextmanager
def writelock(self):
self.acquire_write()
try:
yield
finally:
self.release()
| 42.949074 | 78 | 0.511372 | 972 | 9,277 | 4.744856 | 0.216049 | 0.039462 | 0.016912 | 0.017346 | 0.373157 | 0.325889 | 0.284475 | 0.242194 | 0.242194 | 0.211405 | 0 | 0.00363 | 0.435809 | 9,277 | 215 | 79 | 43.148837 | 0.877532 | 0.327584 | 0 | 0.596899 | 0 | 0 | 0.028859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0 | 0.03876 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f7ff0be57aebe2550e805caa1e135470f309ea7 | 1,891 | py | Python | app/database/scraper/parsers/CourseParser.py | gilltan97/uoft-course-api | b66b8409fb81d151ef470c71aeb3e5aad24691dd | [
"MIT"
] | null | null | null | app/database/scraper/parsers/CourseParser.py | gilltan97/uoft-course-api | b66b8409fb81d151ef470c71aeb3e5aad24691dd | [
"MIT"
] | null | null | null | app/database/scraper/parsers/CourseParser.py | gilltan97/uoft-course-api | b66b8409fb81d151ef470c71aeb3e5aad24691dd | [
"MIT"
] | null | null | null | from utils import Scraper
from parsers import Helpers
import bs4 as BeautifulSoup
class CourseParser:
"""
Course parser which parses data from web service called
Course Finder located at: http://coursefinder.utoronto.ca/.
-- references --
:see: https://docs.python.org/3.4/library/re.html
"""
host_url = 'http://coursefinder.utoronto.ca/course-search/search/'
def __init__(self, course_code):
self.course_code = course_code
def retrieve_html(self):
"""
Search for course at host url and retrieve the html data with the
given course code
"""
scraper = Scraper.Scraper()
url = CourseParser.host_url + 'courseInquiry'
params = {
'methodToCall': 'start',
'viewId': 'CourseDetails-InquiryView',
'courseId': '{}'.format(self.course_code)
}
return scraper.get_data(url, params=params, json=False, write=False)
def parse_html(self):
"""
Parse the HTML content and create a JSON file from that content
"""
soup = BeautifulSoup.BeautifulSoup(self.retrieve_html(), 'html.parser')
if soup.find(id='u19')['data-headerfor'] != 'correctPage':
print("Course not found.")
return None
return {
'code': self.course_code[:-5],
'name': Helpers.get_name(soup),
'division': Helpers.get_division(soup),
'department': Helpers.get_department(soup),
'prerequisites': Helpers.get_prerequisites(soup),
'exclusion': Helpers.get_exclusion(soup),
'level': Helpers.get_courselevel(soup),
'campus': Helpers.get_campus(soup),
'breadth requirements': Helpers.get_breadth(soup),
'term': Helpers.get_term(soup),
'sections': Helpers.get_sections(soup)
}
| 33.767857 | 79 | 0.607615 | 206 | 1,891 | 5.456311 | 0.456311 | 0.088968 | 0.049822 | 0.046263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00437 | 0.273929 | 1,891 | 55 | 80 | 34.381818 | 0.814275 | 0.17504 | 0 | 0 | 0 | 0 | 0.183356 | 0.016915 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088235 | false | 0 | 0.088235 | 0 | 0.323529 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f81b80b0e19ba69b31b17aae322062d35934abf | 1,005 | py | Python | ML/Projects/spam_classifier_naive_bayes/build_vocabulary.py | xuyannus/Machine-Learning-Collection | 6d5dcd18d4e40f90e77355d56a2902e4c617ecbe | [
"MIT"
] | 3,094 | 2020-09-20T04:34:31.000Z | 2022-03-31T23:59:46.000Z | ML/Projects/spam_classifier_naive_bayes/build_vocabulary.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 79 | 2020-09-24T08:54:17.000Z | 2022-03-30T14:45:08.000Z | ML/Projects/spam_classifier_naive_bayes/build_vocabulary.py | xkhainguyen/Machine-Learning-Collection | 425d196e9477dbdbbd7cc0d19d29297571746ab5 | [
"MIT"
] | 1,529 | 2020-09-20T16:21:21.000Z | 2022-03-31T21:16:25.000Z | # -*- coding: utf-8 -*-
"""
We want go through each word in all emails,
check if the word is an actual english word
by comparing with nltk.corpus words and if it is
then add it to our vocabulary.
"""
import pandas as pd
import nltk
from nltk.corpus import words
vocabulary = {}
data = pd.read_csv("data/emails.csv")
nltk.download("words")
set_words = set(words.words())
def build_vocabulary(curr_email):
idx = len(vocabulary)
for word in curr_email:
if word.lower() not in vocabulary and word.lower() in set_words:
vocabulary[word] = idx
idx += 1
if __name__ == "__main__":
for i in range(data.shape[0]):
curr_email = data.iloc[i, :][0].split()
print(
f"Current email is {i}/{data.shape[0]} and the \
length of vocab is curr {len(vocabulary)}"
)
build_vocabulary(curr_email)
# Write dictionary to vocabulary.txt file
file = open("vocabulary.txt", "w")
file.write(str(vocabulary))
file.close()
| 23.928571 | 72 | 0.645771 | 150 | 1,005 | 4.213333 | 0.493333 | 0.056962 | 0.041139 | 0.075949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006502 | 0.234826 | 1,005 | 41 | 73 | 24.512195 | 0.815345 | 0.228856 | 0 | 0 | 0 | 0 | 0.056209 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.166667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f81bfe97d1be0be08bfd049718d6a4355de5593 | 5,356 | py | Python | Ch16/pftrack.py | quietcoolwu/MLCode | b4bdb7ee3468da597e5d16cfb58728e3c29ca889 | [
"Xnet",
"X11"
] | null | null | null | Ch16/pftrack.py | quietcoolwu/MLCode | b4bdb7ee3468da597e5d16cfb58728e3c29ca889 | [
"Xnet",
"X11"
] | null | null | null | Ch16/pftrack.py | quietcoolwu/MLCode | b4bdb7ee3468da597e5d16cfb58728e3c29ca889 | [
"Xnet",
"X11"
] | null | null | null | # Code from Chapter 16 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by Stephen Marsland (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2014
# 2D particle filter tracking. Euclidean distance based
import numpy as np
import pylab as pl
def systematic(w, N):
# Systematic resampling
# One too many to make sure it is >1
samples = np.random.rand(N + 1)
indices = np.arange(N + 1)
u = (samples + indices) / N
cumw = np.cumsum(w)
Ncopies = np.zeros((N))
keep = np.zeros((N))
# ni copies of particle xi where ni = number of u between ws[i-1] and ws[i]
j = 0
for i in range(N):
while ((u[j] < cumw[i]) & (j < N)):
keep[j] = i
Ncopies[i] += 1
j += 1
return keep
def pf(x0, xdot, sigma, T, N, width):
# Sample x0 from prior p(x0)
particles = np.zeros((N, 2, T + 1))
x = np.zeros((2, T + 1))
x[:, 0] = x0
particles[:, :, 0] = x0
particlepred = np.zeros((N, 2, T))
particlepred[:, :, 0] = x0 + np.random.uniform(-width, width, (N, 2))
print(particlepred[:, :, 0])
weights = np.ones((N, T))
# Main loop
for t in range(0, T):
# importance sampling
particlepred[:, :, t] = particles[:, :, t] + np.random.uniform(-width, width, (N, 2))
# print particlepred[:,:,t]
print(x[:, t])
print(x[:, t] - particlepred[:, :, t])
weights[:, t] = np.sum((x[:, t] - particlepred[:, :, t]) ** 2 + 1e-99, axis=1)
print(weights[:, t])
weights[:, t] = 1. / np.sum((x[:, t] - particlepred[:, :, t]) ** 2 + 1e-99, axis=1)
print(weights[:, t])
# weights[:,t] = np.sum(1./np.sqrt(sigma) * np.exp(-0.5/sigma * (x[:,t] - particlepred[:,:,t])**2) + 1e-99,axis=1)
weights[:, t] /= np.sum(weights[:, t])
print(weights[:, t])
# selection
resample = False
if 1. / sum(weights[:, t] ** 2) < N / 2.:
print("Resampling")
resample = True
sys = True
if resample:
if sys:
keep = systematic(weights[:, t], N)
else:
# Residual resampling
# Add a little bit because of a rounding error!
Ncopies = np.floor(weights[:, t] * N + 1e-10)
keep = np.zeros((N))
j = 0
for i in range(N):
keep[j:j + Ncopies[i]] = i
j += Ncopies[i]
Nleft = int(N - np.sum(Ncopies))
# Rest by systematic resampling
if Nleft > 0:
print("sys resample")
probs = (weights[:, t] * N - Ncopies) / Nleft
extrakeep = systematic(probs, Nleft)
keep[j:] = extrakeep
else:
keep = list(range(N))
print(keep)
# output
for i in range(N):
particles[i, :, t + 1] = particlepred[keep[i], :, t]
# print "here"
print(x[:, t])
print(particlepred[:, :, t])
# x[:,t+1] = x[:,t] + xdot*np.random.uniform(-1,1,(1,2))
x[:, t + 1] = x[:, t] + xdot # + np.random.uniform(-1,1,(1,2))
# print particles[:,:,t]
return particles, x, weights
def pf_demo():
x0 = np.array([10, 12])
xdot = np.array([10, 8])
np.random.seed(3)
T = 15
N = 30
sigma = 1.0
[particles, x, weights] = pf(x0, xdot, sigma, T, N, 15)
x = x[:, :T]
particles = particles[:, :, :T]
# print particles
# print x
dfilt = x[[0, 1], :] - particles[[0, 1], :]
mse_filt = np.sqrt(np.sum(dfilt ** 2))
# plot_track(x,y,xfilt,Pfilt)
plot_position(x, particles, T)
def plot_position(x, particles, T):
pl.ion()
pl.figure()
colours = pl.cm.gray(np.linspace(0, 1, T))
# for t in [0,5,10,14]:
for t in range(T):
# print particles[:,:,t]
pl.plot(x[0, t], x[1, t], 'x', color=colours[t], ms=10.)
pl.plot(particles[:, 0, t], particles[:, 1, t], 'o', color=colours[t])
# pl.plot(particles[:,0,5],particles[:,1,5],'go')
# pl.plot(particles[:,0,10],particles[:,1,10],'co')
# pl.plot(particles[:,0,14],particles[:,1,14],'ko')
pl.xlim((0, 150))
pl.ylim((0, 150))
def plot_track(x, y, Kx, P):
fig = pl.figure()
ax = fig.add_subplot(111, aspect='equal')
pl.plot(x[0, :], x[1, :], 'ks-')
pl.plot(y[0, :], y[1, :], 'k*')
pl.plot(Kx[0, :], Kx[1, :], 'kx:')
obs_size, T = np.shape(y)
from matplotlib.patches import Ellipse
# Axes of ellipse are eigenvectors of covariance matrix, lengths are square roots of eigenvalues
ellsize = np.zeros((obs_size, T))
ellangle = np.zeros((T))
for t in range(T):
[evals, evecs] = np.linalg.eig(P[:2, :2, t])
ellsize[:, t] = np.sqrt(evals)
ellangle[t] = np.angle(evecs[0, 0] + 0.j * evecs[0, 1])
ells = [Ellipse(xy=[Kx[0, t], Kx[1, t]], width=ellsize[0, t], height=ellsize[1, t], angle=ellangle[t]) for t in
range(T)]
for e in ells:
ax.add_artist(e)
e.set_alpha(0.1)
e.set_facecolor([0.7, 0.7, 0.7])
| 31.692308 | 122 | 0.511202 | 782 | 5,356 | 3.485934 | 0.286445 | 0.035216 | 0.014674 | 0.016141 | 0.168379 | 0.130227 | 0.109685 | 0.099413 | 0.099413 | 0.05796 | 0 | 0.043162 | 0.307879 | 5,356 | 168 | 123 | 31.880952 | 0.692204 | 0.244772 | 0 | 0.149533 | 0 | 0 | 0.009225 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046729 | false | 0 | 0.028037 | 0 | 0.093458 | 0.102804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f831b7eadeddc95c309456c4f686e40797eef19 | 5,599 | py | Python | tools/extract_ppi.py | RenskeW/cwl-epitope | 475d0716b07c58b40ed01b4a49d5a14b15d641cc | [
"AAL"
] | null | null | null | tools/extract_ppi.py | RenskeW/cwl-epitope | 475d0716b07c58b40ed01b4a49d5a14b15d641cc | [
"AAL"
] | null | null | null | tools/extract_ppi.py | RenskeW/cwl-epitope | 475d0716b07c58b40ed01b4a49d5a14b15d641cc | [
"AAL"
] | null | null | null | """
This script combines multiple BioDL datasets and extracts the relevant information.
In addition, it uses the UniProt mapping_dict tool to find pdb identifiers for each UniProt identifier in the dataset.
@author: Renske de Wit
@dateCreated: 2022-05-27
Inputs:
- biodl_training_....csv
- biodl_test_....csv
Outputs:
- directory with fasta files for each protein in the combined bioDL dataset (and which maps to a UniProt identifier)
"""
import argparse
import pandas as pd
import urllib.parse
import urllib.request
from pathlib import Path
import os
def parse_args():
"""
Parses arguments from the command line.
"""
parser = argparse.ArgumentParser(description='Combines features into 1 file for every fasta sequence, stores files in 1 output directory.')
# Arguments
parser.add_argument('train_biodl_path', help='Path to biodl training dataset.')
parser.add_argument('test_biodl_path', help='Path to biodl testing dataset.')
parser.add_argument('--outdir', help='Path to output directory.', default="./fasta_files")
return parser.parse_args()
def merge_datasets(df1, df2):
"""
Merge testing and training set into one dataframe.
"""
assert sorted(list(df1.columns)) == sorted(list(df2.columns))
new_df = pd.concat([df1, df2], axis=0, ignore_index=True)
assert len(new_df) == len(df1) + len(df2)
return new_df
def use_uniprot_mapping_tool(uniprot_ids):
"""
Queries UniProt API to return PDB ids associated with query UniProt IDs.
Code adapted from the example at https://www.uniprot.org/help/api_idmapping.
"""
url = 'https://www.uniprot.org/uploadlists/'
query = " ".join(uniprot_ids)
params = {
'from': 'ACC+ID',
'to': 'PDB_ID',
'format': 'tab',
'query': query
}
data = urllib.parse.urlencode(params)
data = data.encode('utf-8')
req = urllib.request.Request(url, data)
with urllib.request.urlopen(req) as f:
response = f.read()
return response
def response_to_dictionary(response):
"""
Converts the response obtained from UniProt mapping tool to a dictionary.
Output: Dictionary with Uniprot IDs as keys and a lists of PDB ids as values (single UniProt IDs may map to different PDB ids).
"""
list = response.decode('utf-8').split(sep="\n") # output: ['From\tTo', uniprot1\tpdb1, uniprot2\tpdb2, ... ]
split = [a.split('\t') for a in list[1:]] # output: [ [uniprot1, pdb1], [uniprot2, pdb2], ... ]
mapping_dict = {}
for id in split:
if len(id) == 2:
if id[0] not in mapping_dict.keys():
mapping_dict[id[0]] = [id[1]]
else:
mapping_dict[id[0]].append(id[1])
return mapping_dict
def map_identifiers(dataset):
"""
Uses the UniProt mapping_dict tool to map pdb identifiers to every UniProt ID in the dataset.
"""
uniprot_ids = [i for i in dataset["uniprot_id"]]
response = use_uniprot_mapping_tool(uniprot_ids)
# Convert response to dictionary
mapping = response_to_dictionary(response)
# Arbitrary & not necessarily correct choice: map the first pdb id to the uniprot id.
dataset.insert(len(dataset.columns), "pdb_id", "")
for i in dataset.index:
uniprot_id = dataset.loc[i, "uniprot_id"]
try:
pdb_id = mapping[uniprot_id][0]
dataset.loc[i, "pdb_id"] = pdb_id
except KeyError:
dataset = dataset.drop(index = i) # drop rows which do not map to any pdb id
return dataset, response.decode('utf-8')
def write_fasta_files(dataset, out_dir):
"""
Writes a fasta file for every pdb id in the dataset, which includes which residues are PPI residues.
"""
for i in dataset.index:
pdb_id = dataset.loc[i, "pdb_id"]
sequence = dataset.loc[i, "sequence"]
domain = dataset.loc[i, "domain"]
# Remove commas from domain and sequence
sequence = "".join(sequence.split(sep=","))
domain = "".join(domain.split(sep=","))
assert len(sequence) == len(domain)
# Write fasta file
filename = f"{pdb_id}.fasta"
out_path = Path(out_dir) / filename
with open(out_path, 'w') as f:
f.writelines(f">{pdb_id}\n{sequence}\n{domain}")
def main():
args = parse_args()
train_biodl_path = args.train_biodl_path
test_biodl_path = args.test_biodl_path
out_dir = args.outdir
# Create output directory
if not os.path.exists(out_dir): # maybe introduce some safeguards here to avoid overwriting existing files
os.mkdir(out_dir)
# Read input data
data_train = pd.read_csv(train_biodl_path)
data_test = pd.read_csv(test_biodl_path)
# Extract columns of interest from both dataframes
relevant_columns = [ "domain", "sequence", "uniprot_id" ]
data_train_slim = data_train[relevant_columns]
data_test_slim = data_test[relevant_columns]
# Merge the two datasets
combined_data = merge_datasets(data_train_slim, data_test_slim)
# Map the UniProt identifiers in the dataset to their associated pdb ids (obtained from UniProt mapping tool)
mapped_dataset, uniprot_response = map_identifiers(combined_data)
# Write each protein sequence together with its interface residues to a separate .fasta file.
write_fasta_files(mapped_dataset, out_dir)
# Write UniProt response to a file as well
with open(Path(out_dir).parent / 'uniprot_mapping.tsv', 'w') as f:
f.write(uniprot_response)
if __name__ == "__main__":
main() | 32.552326 | 143 | 0.670834 | 777 | 5,599 | 4.684685 | 0.289575 | 0.016484 | 0.01511 | 0.010714 | 0.082418 | 0.047253 | 0.017033 | 0 | 0 | 0 | 0 | 0.00874 | 0.223433 | 5,599 | 172 | 144 | 32.552326 | 0.828427 | 0.32738 | 0 | 0.022989 | 0 | 0 | 0.128669 | 0.008505 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.08046 | false | 0 | 0.068966 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f8a17ff768229051455475e41d5e2e581a55eab | 12,664 | py | Python | python_api/renderer/raysampler.py | openNGP/openNGP | 085d6e2f94fcdc5c1c15a62027d31b31398842bb | [
"MIT"
] | 3 | 2022-03-04T09:16:20.000Z | 2022-03-19T02:57:01.000Z | python_api/renderer/raysampler.py | openNGP/openNGP | 085d6e2f94fcdc5c1c15a62027d31b31398842bb | [
"MIT"
] | 2 | 2022-03-08T10:54:47.000Z | 2022-03-11T08:58:18.000Z | python_api/renderer/raysampler.py | openNGP/openNGP | 085d6e2f94fcdc5c1c15a62027d31b31398842bb | [
"MIT"
] | null | null | null | import torch
import numpy as np
from collections import namedtuple
from python_api.renderer.raymarching import near_far_from_aabb
from python_api.renderer.rays import Rays, RaysWithDepth
from python_api.utils import FunctionRegistry
SamplerResult = namedtuple(
'SamplerResult',
('xyzs', 'views', 'z_vals', 'deltas')
)
SamplerResultWithBound = namedtuple(
'SamplerResultWithBound',
('xyzs', 'views', 'z_vals', 'deltas', 'nears', 'fars')
)
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
# Get pdf
device = weights.device
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[...,:1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples, device=device)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples], device=device)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0., 1., N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds-1), inds-1)
above = torch.min((cdf.shape[-1]-1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[...,1]-cdf_g[...,0])
denom = torch.where(denom<1e-5, torch.ones_like(denom), denom)
t = (u-cdf_g[...,0])/denom
samples = bins_g[...,0] + t * (bins_g[...,1]-bins_g[...,0])
return samples
def delta_from_zval(z_vals, rays_d, delta_inf=1e10):
# Convert these values using volume rendering (Section 4)
deltas = z_vals[:, 1:] - z_vals[:, :-1] # (N_rays, N_samples_-1)
delta_inf = delta_inf * torch.ones_like(deltas[:, :1]) # (N_rays, 1) the last delta is infinity
deltas = torch.cat([deltas, delta_inf], -1) # (N_rays, N_samples_)
deltas = deltas * torch.norm(rays_d[...,None,:], dim=-1)
return deltas
def uniform_sampler(rays: Rays, N_samples: int, lindisp: bool, perturb: bool):
near, far = rays.near, rays.far
rays_o, rays_d = rays.origins, rays.directions
N_rays = rays_o.shape[0]
device = rays_o.device
t_vals = torch.linspace(0., 1., steps=N_samples, device=device)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape, device=device)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
# Convert these values using volume rendering (Section 4)
deltas = delta_from_zval(z_vals, rays_d)
views = rays.viewdirs[...,None,:].expand(pts.shape)
return SamplerResult(pts, views, z_vals, deltas)
def _importance_sampler(rays: Rays,
samples: SamplerResult, # samples from last pass
weights,
N_importance,
use_norm_dir,
delta_inf,
perturb,
concat_input_sample,
stop_grad):
rays_o = rays.origins
if use_norm_dir:
rays_d = rays.viewdirs
else:
rays_d = rays.directions
z_vals = samples.z_vals
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.))
if stop_grad:
z_samples = z_samples.detach()
if concat_input_sample:
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
else:
z_vals, _ = torch.sort(z_samples, -1)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]
# Convert these values using volume rendering (Section 4)
deltas = delta_from_zval(z_vals, rays_d, delta_inf)
views = rays.viewdirs[...,None,:].expand(pts.shape)
return SamplerResult(pts, views, z_vals, deltas)
def importance_sampler(rays: Rays,
samples: SamplerResult, # samples from last pass
weights,
N_importance,
use_norm_dir,
perturb):
return _importance_sampler(rays,
samples,
weights,
N_importance,
use_norm_dir,
1e10,
perturb,
True,
True)
def importance_sampler_mipnerf360(rays: Rays,
samples: SamplerResult, # samples from last pass
weights,
N_importance,
use_norm_dir,
perturb,
concat_input_sample,
stop_grad):
return _importance_sampler(rays,
samples,
weights,
N_importance,
use_norm_dir,
1e10,
perturb,
concat_input_sample,
stop_grad)
def ngp_importance_sampler(rays: Rays,
samples: SamplerResultWithBound, # samples from last pass
weights,
primitive,
N_importance,
perturb):
if isinstance(samples, SamplerResultWithBound):
sample_dist = (samples.fars - samples.nears) / samples.z_vals.shape[-1]
use_norm_dir = True
else:
sample_dist = (rays.far - rays.near) / samples.z_vals.shape[-1]
use_norm_dir = False
sample_ret = _importance_sampler(rays,
samples,
weights,
N_importance,
use_norm_dir,
sample_dist,
perturb,
True,
True)
aabb = primitive.geometry.aabb
pts = torch.min(torch.max(sample_ret.xyzs, aabb[:3]), aabb[3:]) # a manual clip.
return sample_ret._replace(xyzs=pts)
def ngp_uniform_sampler(rays: Rays, primitive, num_steps, min_near, perturb):
rays_o, rays_d = rays.origins, rays.viewdirs # rays.directions isn't normalized
device = rays_o.device
prefix = rays_o.shape[:-1]
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
N = rays_o.shape[0] # N = B * N, in fact
device = rays_o.device
# choose aabb
aabb = primitive.geometry.aabb
# sample steps
nears, fars = near_far_from_aabb(rays_o, rays_d, aabb, min_near)
nears.unsqueeze_(-1)
fars.unsqueeze_(-1)
#print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')
z_vals = torch.linspace(0.0, 1.0, num_steps, device=device).unsqueeze(0) # [1, T]
z_vals = z_vals.expand((N, num_steps)) # [N, T]
z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]
# same perturb strategy with ori nerf, which guarantees z_vals in [near, far]
# if perturb > 0.:
# # get intervals between samples
# mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
# upper = torch.cat([mids, z_vals[...,-1:]], -1)
# lower = torch.cat([z_vals[...,:1], mids], -1)
# # stratified samples in those intervals
# t_rand = torch.rand(z_vals.shape, device=device)
# z_vals = lower + (upper - lower) * t_rand
# perturb z_vals
sample_dist = (fars - nears) / (num_steps - 1)
if perturb:
z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist
#z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.
# generate xyzs
pts = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]
pts = torch.min(torch.max(pts, aabb[:3]), aabb[3:]) # a manual clip.
# Convert these values using volume rendering (Section 4)
deltas = delta_from_zval(z_vals, rays_d, sample_dist)
views = rays.viewdirs[...,None,:].expand(pts.shape)
return SamplerResultWithBound(pts, views, z_vals, deltas, nears, fars)
def ngp_sampler_with_depth(rays: RaysWithDepth, primitive, num_steps, min_near, perturb, epsilon):
rays_o, rays_d = rays.origins, rays.viewdirs # rays.directions isn't normalized
device = rays_o.device
prefix = rays_o.shape[:-1]
rays_o = rays_o.contiguous().view(-1, 3)
rays_d = rays_d.contiguous().view(-1, 3)
N = rays_o.shape[0] # N = B * N, in fact
device = rays_o.device
# choose aabb
aabb = primitive.geometry.aabb
# sample steps
nears, fars = near_far_from_aabb(rays_o, rays_d, aabb, min_near)
nears.unsqueeze_(-1)
fars.unsqueeze_(-1)
#print(f'nears = {nears.min().item()} ~ {nears.max().item()}, fars = {fars.min().item()} ~ {fars.max().item()}')
z_vals = torch.linspace(0.0, 1.0, num_steps, device=device).unsqueeze(0) # [1, T]
z_vals = z_vals.expand((N, num_steps)) # [N, T]
z_vals = nears + (fars - nears) * z_vals # [N, T], in [nears, fars]
# perturb z_vals
sample_dist = (fars - nears) / num_steps
if perturb:
z_vals = z_vals + (torch.rand(z_vals.shape, device=device) - 0.5) * sample_dist
#z_vals = z_vals.clamp(nears, fars) # avoid out of bounds xyzs.
# sample from depth prior, N(d_prior, epsilon)
extra_z_vals = torch.zeros_like(z_vals)
valid_depth = rays.depth[rays.mask]
m = torch.distributions.Normal(valid_depth, epsilon*torch.ones_like(valid_depth))
depth_samples = m.sample(torch.Size([num_steps]))
extra_z_vals[rays.mask.squeeze()] = depth_samples.T
# sample from
m = torch.distributions.Uniform(nears[~rays.mask], fars[~rays.mask])
pad_samples = m.sample(torch.Size([num_steps]))
extra_z_vals[~rays.mask.squeeze()] = pad_samples.T
z_vals, _ = torch.sort(torch.cat([z_vals, extra_z_vals], -1), -1)
# generate xyzs
pts = rays_o.unsqueeze(-2) + rays_d.unsqueeze(-2) * z_vals.unsqueeze(-1) # [N, 1, 3] * [N, T, 1] -> [N, T, 3]
pts = torch.min(torch.max(pts, aabb[:3]), aabb[3:]) # a manual clip.
# Convert these values using volume rendering (Section 4)
deltas = delta_from_zval(z_vals, rays_d, sample_dist)
views = rays.viewdirs[...,None,:].expand(pts.shape)
return SamplerResult(pts, views, z_vals, deltas)
def sparsity_sampler(rays: Rays, primitive, n_sp):
device = rays.origins.device
bound = primitive.geometry.bound
sp_points = torch.empty((n_sp, 3), device=device)
sp_points.uniform_(-bound, bound)
return SamplerResult(sp_points, None, None, None)
raysampler = FunctionRegistry(
uniform_sampler=uniform_sampler,
importance_sampler=importance_sampler,
importance_sampler_mipnerf360=importance_sampler_mipnerf360,
instant_ngp_sampler=ngp_uniform_sampler,
ngp_uniform_sampler=ngp_uniform_sampler,
ngp_importance_sampler=ngp_importance_sampler,
ngp_sampler_with_depth=ngp_sampler_with_depth,
sparsity_sampler=sparsity_sampler
)
| 37.578635 | 116 | 0.581017 | 1,664 | 12,664 | 4.204327 | 0.124399 | 0.052887 | 0.012007 | 0.011435 | 0.598342 | 0.565609 | 0.524728 | 0.516295 | 0.482133 | 0.446541 | 0 | 0.018146 | 0.286323 | 12,664 | 336 | 117 | 37.690476 | 0.755919 | 0.169141 | 0 | 0.451754 | 0 | 0 | 0.008227 | 0.002105 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04386 | false | 0 | 0.105263 | 0.008772 | 0.192982 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f8a224b51e6689597f171d5894a74164eaa371f | 754 | py | Python | patchalerts/games/fortnite.py | silentdot/PatchAlerts | eee0ece48a39dcf16f6b4d6ea3335e56e1082d78 | [
"MIT"
] | 8 | 2019-07-08T05:59:50.000Z | 2020-08-26T23:13:15.000Z | patchalerts/games/fortnite.py | silentdot/PatchAlerts | eee0ece48a39dcf16f6b4d6ea3335e56e1082d78 | [
"MIT"
] | 1 | 2020-05-27T21:10:54.000Z | 2020-05-28T02:23:03.000Z | patchalerts/games/fortnite.py | silentdot/PatchAlerts | eee0ece48a39dcf16f6b4d6ea3335e56e1082d78 | [
"MIT"
] | 4 | 2018-11-20T07:30:25.000Z | 2020-06-24T20:23:39.000Z | from util import loader
from wrappers.update import Update
from games.base_class import Game
class Fortnite(Game):
def __init__(self):
super().__init__("Fortnite", homepage='https://www.epicgames.com/fortnite/')
def scan(self):
soup = loader.soup("https://www.epicgames.com/fortnite/en-US/patch-notes/") # Follow redirect to latest.
_title = soup.find(attrs={'property': "og:title"})['content']
_desc = soup.find(attrs={'class': "patch-notes-text"}).get_text('\n')
_img = soup.find(attrs={'property': "og:image"})['content']
_url = loader.get_redirect()
yield Update(game=self, update_name=_title, post_url=_url, desc=_desc, image=_img, color="#1c237a")
if __name__ == "__main__":
lol = Fortnite()
for u in lol.scan():
print(u)
| 32.782609 | 107 | 0.702918 | 108 | 754 | 4.638889 | 0.509259 | 0.047904 | 0.077844 | 0.07984 | 0.203593 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006006 | 0.116711 | 754 | 22 | 108 | 34.272727 | 0.746246 | 0.034483 | 0 | 0 | 0 | 0 | 0.247934 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.176471 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f8b3b75523296d05c8c3a2f6a1a0602de5b94e6 | 469 | py | Python | tests/test_base_model.py | jpscaletti/sqlalchemy-wrapper | 049d150ca95e24e532bb1b64a9454683dc8e62c6 | [
"BSD-3-Clause"
] | 39 | 2016-01-01T02:44:15.000Z | 2018-12-10T10:32:28.000Z | tests/test_base_model.py | jpscaletti/sqlalchemy-wrapper | 049d150ca95e24e532bb1b64a9454683dc8e62c6 | [
"BSD-3-Clause"
] | 10 | 2016-01-09T15:05:30.000Z | 2018-02-14T21:15:40.000Z | tests/test_base_model.py | jpscaletti/sqlalchemy-wrapper | 049d150ca95e24e532bb1b64a9454683dc8e62c6 | [
"BSD-3-Clause"
] | 13 | 2015-12-02T23:20:19.000Z | 2018-01-15T06:57:08.000Z | def test_fill(dbs, TestModelA):
obj = dbs.create(TestModelA, title="Remember")
obj.fill(title="lorem ipsum")
dbs.commit()
updated = dbs.first(TestModelA)
assert updated.title == "lorem ipsum"
def test_repr(dbs, TestModelA):
obj = dbs.create(TestModelA, title="Hello world")
dbs.commit()
repr = str(obj)
assert f"<TestModelA #{id(obj)}" in repr
assert f"\n id = {obj.id}" in repr
assert "\n title = 'Hello world'" in repr
| 26.055556 | 53 | 0.643923 | 66 | 469 | 4.545455 | 0.348485 | 0.06 | 0.106667 | 0.126667 | 0.266667 | 0.266667 | 0.266667 | 0 | 0 | 0 | 0 | 0 | 0.208955 | 469 | 17 | 54 | 27.588235 | 0.808625 | 0 | 0 | 0.153846 | 0 | 0 | 0.219616 | 0 | 0 | 0 | 0 | 0 | 0.307692 | 1 | 0.153846 | false | 0 | 0 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f8d5d0caa86ba142e07a4811bdd1e8a34863f11 | 3,485 | py | Python | sharpy/plans/acts/protoss/act_defensive_cannons.py | eladyaniv01/sharpy-sc2 | 91119cc3e3fce683c2dbe9687c616c9cc0461b06 | [
"MIT"
] | null | null | null | sharpy/plans/acts/protoss/act_defensive_cannons.py | eladyaniv01/sharpy-sc2 | 91119cc3e3fce683c2dbe9687c616c9cc0461b06 | [
"MIT"
] | null | null | null | sharpy/plans/acts/protoss/act_defensive_cannons.py | eladyaniv01/sharpy-sc2 | 91119cc3e3fce683c2dbe9687c616c9cc0461b06 | [
"MIT"
] | null | null | null | from typing import Optional
from sharpy.general.zone import Zone
from sc2 import UnitTypeId
from sc2.position import Point2
from sc2.unit import Unit
from sharpy.plans.acts.act_base import ActBase
class ActDefensiveCannons(ActBase):
"""Act of starting to build new buildings up to specified count"""
def __init__(self, to_count_pre_base: int, additional_batteries: int = 0, to_base_index: Optional[int] = None):
self.to_base_index = to_base_index
self.additional_batteries = additional_batteries
assert to_count_pre_base is not None and isinstance(to_count_pre_base, int) and (to_count_pre_base > 0 or additional_batteries > 0)
self.to_count_per_base = to_count_pre_base
super().__init__()
async def execute(self) -> bool:
map_center = self.ai.game_info.map_center
pending_cannon_count = self.pending_build(UnitTypeId.PHOTONCANNON)
pending_battery_count = self.pending_build(UnitTypeId.SHIELDBATTERY)
all_ready = True
# Go through zones so that furthest expansions are fortified first
zones = self.knowledge.expansion_zones
for i in range(0, len(zones)):
zone = zones[i]
# Filter out zones that aren't ours and own zones that we are about to lose.
if zone.our_townhall is None or zone.known_enemy_power.ground_power > zone.our_power.ground_presence:
continue
if self.to_base_index is not None and i != self.to_base_index:
# Defenses are not ordered to that base
continue
closest_pylon: Unit = None
pylons = zone.our_units(UnitTypeId.PYLON)
if pylons.exists:
closest_pylon = pylons.closest_to(zone.center_location)
available_minerals = self.ai.minerals - self.knowledge.reserved_minerals
can_afford_cannon = available_minerals >= 150
can_afford_battery = available_minerals >= 100
if closest_pylon is None or closest_pylon.distance_to(zone.center_location) > 10:
# We need a pylon, but only if one isn't already on the way
if not self.pending_build(UnitTypeId.PYLON) and can_afford_battery:
await self.ai.build(UnitTypeId.PYLON, near=zone.center_location.towards(map_center, 4))
all_ready = False
continue
if zone.our_photon_cannons.amount + pending_cannon_count < self.to_count_per_base:
all_ready = False
if closest_pylon.is_ready and can_afford_cannon:
pos = self.defense_position(zone, closest_pylon)
await self.ai.build(UnitTypeId.PHOTONCANNON, near=pos)
if zone.our_batteries.amount + pending_battery_count < self.additional_batteries:
all_ready = False
if closest_pylon.is_ready and can_afford_battery:
pos = self.defense_position(zone, closest_pylon)
await self.ai.build(UnitTypeId.SHIELDBATTERY, near=pos)
return all_ready
def defense_position(self, zone: Zone, pylon: Unit):
position: Point2 = pylon.position
path = zone.paths.get(self.knowledge.enemy_main_zone.zone_index, None)
if path and path.distance > 50:
target_pos = path.get_index(10)
return position.towards(target_pos, 3)
return position.towards(zone.center_location, -2)
| 43.5625 | 139 | 0.667145 | 456 | 3,485 | 4.846491 | 0.313596 | 0.043439 | 0.022624 | 0.031674 | 0.171041 | 0.099548 | 0.099548 | 0.099548 | 0.099548 | 0.099548 | 0 | 0.009404 | 0.267719 | 3,485 | 79 | 140 | 44.113924 | 0.856583 | 0.085222 | 0 | 0.145455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018182 | 1 | 0.036364 | false | 0 | 0.109091 | 0 | 0.218182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
4f8f018df0b0fd5b16b4fc6fe2287fa62ce54ad9 | 5,004 | py | Python | project/image_patch/__init__.py | delldu/ImagePatch | aaeadba9fe9f40e9bf900468f100a06bafc8231f | [
"MIT"
] | 1 | 2020-07-22T04:18:28.000Z | 2020-07-22T04:18:28.000Z | project/image_patch/__init__.py | delldu/Patch | aaeadba9fe9f40e9bf900468f100a06bafc8231f | [
"MIT"
] | null | null | null | project/image_patch/__init__.py | delldu/Patch | aaeadba9fe9f40e9bf900468f100a06bafc8231f | [
"MIT"
] | null | null | null | """Image/Video Patch Package.""" # coding=utf-8
#
# /************************************************************************************
# ***
# *** Copyright Dell 2021, 2022(18588220928@163.com) All Rights Reserved.
# ***
# *** File Author: Dell, 2021年 12月 14日 星期二 00:22:28 CST
# ***
# ************************************************************************************/
#
__version__ = "1.0.0"
import os
from tqdm import tqdm
import torch
import redos
import todos
from . import patch
import pdb
PATCH_ZEROPAD_TIMES = 128
def get_model():
"""Create model."""
model_path = "models/image_patch.pth"
cdir = os.path.dirname(__file__)
checkpoint = model_path if cdir == "" else cdir + "/" + model_path
model = patch.ImagePatchModel(4, 3)
todos.model.load(model, checkpoint)
device = todos.model.get_device()
model = model.to(device)
model.eval()
model = torch.jit.script(model)
todos.data.mkdir("output")
if not os.path.exists("output/image_patch.torch"):
model.save("output/image_patch.torch")
return model, device
def model_forward(model, device, input_tensor):
# zeropad for model
H, W = input_tensor.size(2), input_tensor.size(3)
if H % PATCH_ZEROPAD_TIMES != 0 or W % PATCH_ZEROPAD_TIMES != 0:
input_tensor = todos.data.zeropad_tensor(input_tensor, times=PATCH_ZEROPAD_TIMES)
output_tensor = todos.model.forward(model, device, input_tensor)
output_tensor = output_tensor[:, :, 0:H, 0:W]
mask = torch.ones_like(output_tensor)
return torch.cat((output_tensor, mask[:, 0:1, :, :]), dim=1)
def image_client(name, input_files, output_dir):
redo = redos.Redos(name)
cmd = redos.image.Command()
image_filenames = todos.data.load_files(input_files)
for filename in image_filenames:
output_file = f"{output_dir}/{os.path.basename(filename)}"
context = cmd.patch(filename, output_file)
redo.set_queue_task(context)
print(f"Created {len(image_filenames)} tasks for {name}.")
def image_server(name, host="localhost", port=6379):
# load model
model, device = get_model()
def do_service(input_file, output_file, targ):
print(f" clean {input_file} ...")
try:
input_tensor = todos.data.load_rgba_tensor(input_file)
output_tensor = model_forward(model, device, input_tensor)
todos.data.save_tensor(output_tensor, output_file)
return True
except Exception as e:
print("exception: ", e)
return False
return redos.image.service(name, "image_patch", do_service, host, port)
def image_predict(input_files, output_dir):
# Create directory to store result
todos.data.mkdir(output_dir)
# load model
model, device = get_model()
# load files
image_filenames = todos.data.load_files(input_files)
# start predict
progress_bar = tqdm(total=len(image_filenames))
for filename in image_filenames:
progress_bar.update(1)
# orig input
input_tensor = todos.data.load_rgba_tensor(filename)
# pytorch recommand clone.detach instead of torch.Tensor(input_tensor)
orig_tensor = input_tensor.clone().detach()
predict_tensor = model_forward(model, device, input_tensor)
output_file = f"{output_dir}/{os.path.basename(filename)}"
todos.data.save_tensor([orig_tensor, predict_tensor], output_file)
def video_service(input_file, output_file, targ):
# load video
video = redos.video.Reader(input_file)
if video.n_frames < 1:
print(f"Read video {input_file} error.")
return False
# Create directory to store result
output_dir = output_file[0 : output_file.rfind(".")]
todos.data.mkdir(output_dir)
# load model
model, device = get_model()
print(f" clean {input_file}, save to {output_file} ...")
progress_bar = tqdm(total=video.n_frames)
def clean_video_frame(no, data):
# print(f"frame: {no} -- {data.shape}")
progress_bar.update(1)
input_tensor = todos.data.frame_totensor(data)
# keep tensor 1x4xHxW
output_tensor = model_forward(model, device, input_tensor)
temp_output_file = "{}/{:06d}.png".format(output_dir, no)
todos.data.save_tensor(output_tensor, temp_output_file)
video.forward(callback=clean_video_frame)
redos.video.encode(output_dir, output_file)
# delete temp files
for i in range(video.n_frames):
temp_output_file = "{}/{:06d}.png".format(output_dir, i)
os.remove(temp_output_file)
return True
def video_client(name, input_file, output_file):
cmd = redos.video.Command()
context = cmd.patch(input_file, output_file)
redo = redos.Redos(name)
redo.set_queue_task(context)
print(f"Created 1 video tasks for {name}.")
def video_server(name, host="localhost", port=6379):
return redos.video.service(name, "video_patch", video_service, host, port)
| 29.785714 | 89 | 0.65008 | 656 | 5,004 | 4.73628 | 0.240854 | 0.054715 | 0.027358 | 0.037013 | 0.337303 | 0.289025 | 0.209849 | 0.162214 | 0.059865 | 0.032829 | 0 | 0.018273 | 0.201639 | 5,004 | 167 | 90 | 29.964072 | 0.759449 | 0.135691 | 0 | 0.242105 | 0 | 0 | 0.098811 | 0.04055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.073684 | 0.010526 | 0.263158 | 0.063158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |