hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
876a947ea1b19fe069f293dc368c754e8d9e9711 | 1,057 | py | Python | demo_app_3pages_deep.py | MarijaAP/freezeyt | 730a79a0e4bcee38e2d285ead4b9ccd695440516 | [
"MIT"
] | null | null | null | demo_app_3pages_deep.py | MarijaAP/freezeyt | 730a79a0e4bcee38e2d285ead4b9ccd695440516 | [
"MIT"
] | null | null | null | demo_app_3pages_deep.py | MarijaAP/freezeyt | 730a79a0e4bcee38e2d285ead4b9ccd695440516 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
"""Create the home page of the web app.
Link to the second page.
"""
return """
<html>
<head>
<title>Hello world</title>
</head>
<body>
Hello world!
<br>
<a href='/second_page.html'>LINK</a> to second page.
</body>
</html>
"""
@app.route('/second_page.html')
def second_page():
"""Show the second page.
Link to the third page.
"""
return """
<html>
<head>
<title>Hello world second page</title>
</head>
<body>
Second page !!!
<a href='/third_page.html'>LINK</a> to page 3.
</body>
</html>
"""
@app.route('/third_page.html')
def third_page():
"""Show the third page of the web app."""
return """
<html>
<head>
<title>Hello world third page</title>
</head>
<body>
Page 3 !!!
</body>
</html>
"""
| 18.224138 | 64 | 0.470199 | 121 | 1,057 | 4.024793 | 0.247934 | 0.164271 | 0.086242 | 0.117043 | 0.318275 | 0.195072 | 0.135524 | 0 | 0 | 0 | 0 | 0.003012 | 0.371807 | 1,057 | 57 | 65 | 18.54386 | 0.730422 | 0.137181 | 0 | 0.585366 | 0 | 0 | 0.752847 | 0.074032 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0 | 0.02439 | 0 | 0.170732 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
876a99e9cc17fc4370d8852ddd46f7f32c7207d3 | 6,932 | py | Python | Main.py | Farhad-Shabani/TSETMC_Dashboard | 8279bd9579f02447b9ab70dfe491d56713810f51 | [
"MIT"
] | 5 | 2021-04-10T17:04:48.000Z | 2021-09-17T11:49:53.000Z | Main.py | Farhad-Shabani/TSETMC_Dashboard | 8279bd9579f02447b9ab70dfe491d56713810f51 | [
"MIT"
] | null | null | null | Main.py | Farhad-Shabani/TSETMC_Dashboard | 8279bd9579f02447b9ab70dfe491d56713810f51 | [
"MIT"
] | 3 | 2021-07-04T16:52:10.000Z | 2021-08-18T15:17:10.000Z | import re, time, requests
from bs4 import BeautifulSoup
from openpyxl import workbook
from openpyxl import load_workbook
from src.Scrape_Index import Scrape_Index
from src.Database_Maker import Database_Maker, Sorted_Database
def Export_Database(portfolio,Selected_Stocks):
StartTime = time.time()
Index = Scrape_Index()
Portfolio_Database = Database_Maker(portfolio)
Selected_Stocks_Database = Database_Maker(Selected_Stocks)
CountCache, VolRatioSort, ValueSort, BuyCapitaSort, SellCapitaSort, BuyQueue, SellQueue = Sorted_Database(Portfolio_Database,portfolio)
wb = load_workbook(filename='Dashboard_Template.xlsx')
ws = wb.active
# Index ---------------------------------------------------------------------------------------------------------
ws['C7'] = Index[0]
ws['D7'] = Index[1]
ws['C8'] = Index[2]
ws['D8'] = Index[3]
ws['C9'] = Index[4]
ws['D9'] = Index[5]
ws['J7'] = Index[6]
ws['J8'] = Index[7]
TotalValueVabank = 0
for i in ValueSort:
TotalValueVabank += i[8]
ws['J11'] = TotalValueVabank
Num = 0
BaseVolNum = 0
for i in Portfolio_Database:
Num += 1
if i[7] >= i[2]/1000000:
BaseVolNum +=1
ws['J12'] = round(BaseVolNum / Num, 2)
# Number of Stocks -----------------------------------------------------------------------------------------------
ws['P12'] = CountCache[0]
ws['O12'] = CountCache[1]
ws['N12'] = CountCache[2]
ws['M12'] = CountCache[3]
ws['E41'] = CountCache[4]
ws['J41'] = CountCache[5]
ws['D52'] = CountCache[6]
ws['J52'] = CountCache[7]
# To Show price of Selected Stocks -------------------------------------------------------------------------------
Selected_Stocks_Map = {0:'B', 1:'C', 2:'D', 3:'E', 4:'G', 5:'H', 6:'I', 7:'J'}
for i in Selected_Stocks_Map:
ws['{}15'.format(Selected_Stocks_Map[i])] = Selected_Stocks_Database[i][0]
ws['{}16'.format(Selected_Stocks_Map[i])] = Selected_Stocks_Database[i][11]
ws['{}17'.format(Selected_Stocks_Map[i])] = Selected_Stocks_Database[i][12]
# ValueSort -----------------------------------------------------------------------------------------------------
for j in range(10):
ws['M{}'.format(22+j)] = ValueSort[j][0]
ws['N{}'.format(22+j)] = ValueSort[j][8]
# Queue ---------------------------------------------------------------------------------------------------------
if len(BuyQueue) > 7: LBuy = 7
else: LBuy = len(BuyQueue)
for z in range(LBuy):
ws['B{}'.format(34+z)] = BuyQueue[z][0]
ws['C{}'.format(34+z)] = BuyQueue[z][1]
ws['D{}'.format(34+z)] = BuyQueue[z][2]
ws['E{}'.format(34+z)] = BuyQueue[z][3]
if len(SellQueue) > 7: LSell = 7
else: LSell = len(SellQueue)
for t in range(LSell):
ws['G{}'.format(34+t)] = SellQueue[t][0]
ws['H{}'.format(34+t)] = SellQueue[t][1]
ws['I{}'.format(34+t)] = SellQueue[t][2]
ws['J{}'.format(34+t)] = SellQueue[t][3]
# Suspicious In & Out ----------------------------------------------------------------------------------------=--
if len(BuyCapitaSort) > 5: BCSort = 5
else: BCSort = len(BuyCapitaSort)
for k in range(BCSort):
ws['B{}'.format(47+k)] = BuyCapitaSort[k][0]
ws['D{}'.format(47+k)] = BuyCapitaSort[k][-5]
if len(VolRatioSort) > 5: VRSort = 5
else: VRSort = len(VolRatioSort)
for l in range(VRSort):
ws['E{}'.format(47+l)] = VolRatioSort[l][0]
ws['G{}'.format(47+l)] = VolRatioSort[l][22]
if len(SellCapitaSort) > 5: SCSort = 5
else: SCSort = len(SellCapitaSort)
for m in range(SCSort):
ws['H{}'.format(47+m)] = SellCapitaSort[m][0]
ws['J{}'.format(47+m)] = SellCapitaSort[m][-4]
EndTime = time.time()
print('Your Dashboard is ready! \nIt took {} seconds to create the output excel.'.format(int(EndTime - StartTime)))
return wb.save(filename="TSETMC_DailyDashboard.xlsx")
# Give a list of 7 your desired stocks in order to make the Excel-based Dashboard --------------------------------------
Portfolio = {'ثمسکن': ['3863538898378476','&c=70%20'],
'تنوین': ['25357135030606405','&c=67%20'],
'ونوین': ['47302318535715632','&c=57%20'],
'وتوسم': ['17528249960294496','&c=56%20'],
'وصنا': ['46982154647719707','&c=56%20'],
'سشمال': ['6757220448540984','&c=53%20'],
'سكرما': ['15472396110662150','&c=53%20'],
'سمازن': ['33808206014018431','&c=53%20'],
'ساربيل': ['34890845654517313','&c=53%20'],
'شوينده': ['3493306453706327','&c=44%20'],
'شپاكسا': ['11622051128546106','&c=44%20'],
'شدوص': ['40611478183231802','&c=44%20'],
'شگل': ['44153164692325703','&c=44%20'],
'ساينا': ['64298008532791199','&c=44%20'],
'شفا': ['36899214178084525','&c=43%20'],
'بهپاك': ['12746730665870442','&c=42%20'],
'غمارگ': ['52975109254504632','&c=42%20'],
'قشكر': ['35964395659427029','&c=38%20'],
'قنيشا': ['63380098535169030','&c=38%20'],
'قپيرا': ['67030488744129337','&c=38%20'],
'بترانس': ['46752599569017089','&c=31%20'],
'پكرمان': ['23214828924506640','&c=25%20'],
'سيمرغ': ['28450080638096732','&c=01%20'],
'زپارس': ['33420285433308219','&c=01%20'],
'وهنر': ['60783654574662426','&c=90%20'],
'وسنا': ['24662567615903665','&c=65%20'],
'قنقش': ['3050342257199174','&c=38%20'],
'نوين': ['59866041653103343','&c=66%20'],
'سقاين': ['60654872678917533','&c=53%20'],
'ساروج': ['44802346787824971','&c=53%20'],
'شاراك': ['7711282667602555','&c=44%20'],
'سيدكو': ['37281199178613855','&c=53%20'],
'وبشهر': ['13937270451301973','&c=42%20'],
'وبانك': ['48010225447410247','&c=39%20'],
'سنوین': ['36995197800118822', '&c=56%20']
}
# Give a list of your 8 desired stocks to show the real-time prices in your dashboard ----------------------------------
Selected_Stocks = {
'وبانك': ['48010225447410247','&c=39%20'],
'شستا': ['2400322364771558','&c=39%20'],
'وغدير': ['26014913469567886','&c=39%20'],
'واميد': ['52232388263291380','&c=39%20'],
'وصندوق': ['37204371816016200','&c=39%20'],
'وبشهر': ['13937270451301973','&c=42%20'],
'سيدكو': ['37281199178613855','&c=53%20'],
'شاراك': ['7711282667602555','&c=44%20']
}
Export_Database(Portfolio, Selected_Stocks) | 41.508982 | 140 | 0.493508 | 747 | 6,932 | 4.526104 | 0.311914 | 0.057971 | 0.011831 | 0.020112 | 0.224786 | 0.091097 | 0.060041 | 0.060041 | 0.041704 | 0 | 0 | 0.196371 | 0.236728 | 6,932 | 167 | 141 | 41.508982 | 0.442638 | 0.13142 | 0 | 0.046875 | 0 | 0 | 0.258765 | 0.00838 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007813 | false | 0 | 0.046875 | 0 | 0.0625 | 0.007813 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
8770580ce533e32ec2aa655344a49621da5aa45b | 1,620 | py | Python | habittest.py | Stephenjcl/GalaxyMapGeneration | 8285405eff691f43ac548af4ba114d9801804bed | [
"MIT"
] | null | null | null | habittest.py | Stephenjcl/GalaxyMapGeneration | 8285405eff691f43ac548af4ba114d9801804bed | [
"MIT"
] | null | null | null | habittest.py | Stephenjcl/GalaxyMapGeneration | 8285405eff691f43ac548af4ba114d9801804bed | [
"MIT"
] | null | null | null | #Viability test
from garnets import generate_stellar_system, random_star
from enviroment import BreathabilityPhrase
breathableair = False
moon = False
attempts = 0
# while breathableair is False or attempts <= 35000:
# try:
# stellar_system = generate_stellar_system(random_star())
# for i in range(0, len(stellar_system.planets)):
# if stellar_system.planets[i].breath == BreathabilityPhrase.BREATHABLE:
# breathableair = True
# for n in range(0, len(stellar_system.planets[i].moons)):
# if stellar_system.planets[i].moons[n].breath == BreathabilityPhrase.BREATHABLE:
# breathableair = True
# moon = True
# attempts += 1
#
# print('\r', attempts, "attempts made.", end='', flush=True)
#
# except:
# pass
#
# if moon == True:
# type = "moon"
# else:
# type = "planet"
# print('\r', "Habitable", type, "found after", attempts, "attempts. This gives a breathable atmosphere probability of:", 1/attempts*100, "%", end = '', flush=True)
#Count breathable atmospheres
# f = open('Production Run.csv', 'w')
#
# f.close()
with open('Production Run.csv', 'r') as content_file:
content = content_file.read()
print(content.count("BreathabilityPhrase.BREATHABLE"))
habitable = content.count("BreathabilityPhrase.BREATHABLE")
print("There are", habitable, "habitable worlds in this galaxy. This means there is oxygen present, and no excess poisonous gases.", '\n',
"This means there is a", habitable/35000*100, "% chance of breathing the air on a given world.")
| 36 | 164 | 0.654938 | 192 | 1,620 | 5.458333 | 0.458333 | 0.086832 | 0.076336 | 0.060115 | 0.271947 | 0.05916 | 0.05916 | 0 | 0 | 0 | 0 | 0.016588 | 0.218519 | 1,620 | 44 | 165 | 36.818182 | 0.811216 | 0.595679 | 0 | 0 | 1 | 0 | 0.409236 | 0.095541 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
87718ba5182046e1eaa2f1525372eec9456c7f7d | 625 | py | Python | app/account/urls.py | mohit4/BucketCrab | 7ff5f120e5a0e103f47c689c723702737c2df3da | [
"Apache-2.0"
] | null | null | null | app/account/urls.py | mohit4/BucketCrab | 7ff5f120e5a0e103f47c689c723702737c2df3da | [
"Apache-2.0"
] | null | null | null | app/account/urls.py | mohit4/BucketCrab | 7ff5f120e5a0e103f47c689c723702737c2df3da | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.views import LogoutView
from django.urls import path
from django.contrib.auth.views import LogoutView
from .views import UserRegisterView, UserLoginView, ProfileDetailView, ProfileUpdateView
app_name = 'account'
urlpatterns = [
path('', UserLoginView.as_view(), name='login'),
path('account/logout/', LogoutView.as_view(), name='logout'),
path('account/register/', UserRegisterView.as_view(), name='register'),
path('account/<int:pk>/', ProfileDetailView.as_view(), name='profile-detail'),
path('account/<int:pk>/update/', ProfileUpdateView.as_view(), name='profile-update'),
] | 41.666667 | 89 | 0.7408 | 73 | 625 | 6.260274 | 0.369863 | 0.065646 | 0.109409 | 0.091904 | 0.201313 | 0.201313 | 0.201313 | 0.201313 | 0 | 0 | 0 | 0 | 0.1008 | 625 | 15 | 90 | 41.666667 | 0.813167 | 0 | 0 | 0.166667 | 0 | 0 | 0.202875 | 0.038339 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5e3f985537a9efba4ea48038ce1efeffe50b06d9 | 218 | py | Python | ksj.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | ksj.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | ksj.py | itspuneet/itspuneet | d44f78afcff275aa56f03bba738ac3e4f2c30843 | [
"bzip2-1.0.6"
] | null | null | null | a=list(input().split(','))
st,num=[],[]
for i in a:
s1,n=i.split(':')
st.append(s1)
num.append(n)
print(st)
print(num)
for i in range(len(num)):
for j in range(i):
print(st[j])
| 16.769231 | 27 | 0.495413 | 38 | 218 | 2.842105 | 0.421053 | 0.166667 | 0.12963 | 0.166667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.279817 | 218 | 12 | 28 | 18.166667 | 0.675159 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e41eafa7384a90d2c56bfa80b6e7cf49f5eed89 | 3,063 | py | Python | Gathered CTF writeups/2018-09-01-tokyowesterns/crypto_mixed/MTRecover.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:00:41.000Z | 2022-03-27T06:00:41.000Z | Gathered CTF writeups/2018-09-01-tokyowesterns/crypto_mixed/MTRecover.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | null | null | null | Gathered CTF writeups/2018-09-01-tokyowesterns/crypto_mixed/MTRecover.py | mihaid-b/CyberSakura | f60e6b6bfd6898c69b84424b080090ae98f8076c | [
"MIT"
] | 1 | 2022-03-27T06:01:42.000Z | 2022-03-27T06:01:42.000Z | import random
class MT19937Recover:
"""Reverses the Mersenne Twister based on 624 observed outputs.
The internal state of a Mersenne Twister can be recovered by observing
624 generated outputs of it. However, if those are not directly
observed following a twist, another output is required to restore the
internal index.
See also https://en.wikipedia.org/wiki/Mersenne_Twister#Pseudocode .
"""
def unshiftRight(self, x, shift):
res = x
for i in range(32):
res = x ^ res >> shift
return res
def unshiftLeft(self, x, shift, mask):
res = x
for i in range(32):
res = x ^ (res << shift & mask)
return res
def untemper(self, v):
""" Reverses the tempering which is applied to outputs of MT19937 """
v = self.unshiftRight(v, 18)
v = self.unshiftLeft(v, 15, 0xefc60000)
v = self.unshiftLeft(v, 7, 0x9d2c5680)
v = self.unshiftRight(v, 11)
return v
def go(self, outputs, forward=True):
"""Reverses the Mersenne Twister based on 624 observed values.
Args:
outputs (List[int]): list of >= 624 observed outputs from the PRNG.
However, >= 625 outputs are required to correctly recover
the internal index.
forward (bool): Forward internal state until all observed outputs
are generated.
Returns:
Returns a random.Random() object.
"""
result_state = None
assert len(outputs) >= 624 # need at least 624 values
ivals = []
for i in range(624):
ivals.append(self.untemper(outputs[i]))
if len(outputs) >= 625:
# We have additional outputs and can correctly
# recover the internal index by bruteforce
challenge = outputs[624]
for i in range(1, 626):
state = (3, tuple(ivals + [i]), None)
r = random.Random()
r.setstate(state)
if challenge == r.getrandbits(32):
result_state = state
break
else:
# With only 624 outputs we assume they were the first observed 624
# outputs after a twist --> we set the internal index to 624.
result_state = (3, tuple(ivals + [624]), None)
rand = random.Random()
rand.setstate(result_state)
if forward:
for i in range(624, len(outputs)):
assert rand.getrandbits(32) == outputs[i]
return rand
def test_PythonMT19937Recover():
"""Just a testcase to ensure correctness"""
mtb = MT19937Recover()
r1 = random.Random(0x31337)
# just some discarded random numbers to move internal state forward
[r1.getrandbits(32) for _ in range(1234)]
# the actual leak of 1000 values
n = [r1.getrandbits(32) for _ in range(1000)]
r2 = mtb.go(n)
assert r1.getrandbits(32) == r2.getrandbits(32)
test_PythonMT19937Recover()
| 29.451923 | 79 | 0.584721 | 375 | 3,063 | 4.752 | 0.378667 | 0.027497 | 0.016835 | 0.030864 | 0.161616 | 0.109989 | 0.08193 | 0.08193 | 0.032548 | 0.032548 | 0 | 0.066277 | 0.330069 | 3,063 | 103 | 80 | 29.737864 | 0.802144 | 0.379367 | 0 | 0.122449 | 1 | 0 | 0 | 0 | 0 | 0 | 0.01522 | 0 | 0.061224 | 1 | 0.102041 | false | 0 | 0.020408 | 0 | 0.22449 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e43ed9b4a92075c599548b4b2abcf77cf992e1a | 780 | py | Python | visualize.py | mac389/semantic-distance | 0ae8223e64610de79fdc370c7e44e2cda051c190 | [
"MIT"
] | 2 | 2017-03-09T21:42:32.000Z | 2018-07-13T02:04:15.000Z | visualize.py | mac389/semantic-distance | 0ae8223e64610de79fdc370c7e44e2cda051c190 | [
"MIT"
] | null | null | null | visualize.py | mac389/semantic-distance | 0ae8223e64610de79fdc370c7e44e2cda051c190 | [
"MIT"
] | 2 | 2017-03-09T21:42:35.000Z | 2018-07-13T02:04:17.000Z | import os, json, matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
READ = 'rb'
directory = json.load(open('directory.json',READ))
filename = os.path.join(directory['data-prefix'],'test-similarity-matrix.npy')
data = np.load(filename).astype(float)
data = (data-data.min())/(data.max()-data.min()) #Think more about how to scale
f,ax = plt.subplots(figsize=(12,9))
#Only for control
color_series = {i:color for i,color in enumerate(sns.color_palette("husl", 3))}
colors = pd.Series([color_series[i%3] for i in xrange(data.shape[0])])
print colors
hmap = sns.clustermap(np.corrcoef(data),col_colors = colors,row_colors=colors)
#plt.tight_layout()
plt.savefig('./results/clustermap-corr2.png') | 28.888889 | 80 | 0.732051 | 125 | 780 | 4.52 | 0.576 | 0.046018 | 0.042478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010072 | 0.108974 | 780 | 27 | 81 | 28.888889 | 0.802878 | 0.080769 | 0 | 0 | 0 | 0 | 0.125874 | 0.078322 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.294118 | null | null | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e47a8df66bb852c1c0f2a85665d80d87b1c508c | 497 | py | Python | volume/src/backend/db_create.py | sunokpa/st-kilda-pier | 45d838a9f1dc1132bab858c58e2716028ce9abe5 | [
"Apache-2.0"
] | 1 | 2018-08-03T08:17:25.000Z | 2018-08-03T08:17:25.000Z | volume/src/backend/db_create.py | sunokpa/st-kilda-pier | 45d838a9f1dc1132bab858c58e2716028ce9abe5 | [
"Apache-2.0"
] | null | null | null | volume/src/backend/db_create.py | sunokpa/st-kilda-pier | 45d838a9f1dc1132bab858c58e2716028ce9abe5 | [
"Apache-2.0"
] | null | null | null | from run import db
import sqlalchemy
import os, uuid, base62
DB_HOST = "mysql-skp"
DB_USER = "root"
DB_PW = os.environ['MYSQL_ROOT_PASSWORD']
DB_NAME = "flask_skp"
DB_ENGINE_URI = "mysql://{}:{}@{}".format(DB_USER, DB_PW, DB_HOST)
engine = sqlalchemy.create_engine(DB_ENGINE_URI)
try:
engine.execute("DROP DATABASE {}".format(DB_NAME))
except:
print("")
engine.execute("CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8mb4'".format(DB_NAME))
engine.execute("USE {}".format(DB_NAME))
| 24.85 | 84 | 0.716298 | 73 | 497 | 4.630137 | 0.452055 | 0.071006 | 0.106509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009195 | 0.124748 | 497 | 19 | 85 | 26.157895 | 0.767816 | 0 | 0 | 0 | 0 | 0 | 0.259557 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.066667 | 0.2 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5e4b8b3b427389c0e078fc9fdbbb5a626515d43d | 585 | py | Python | blog/migrations/0008_auto_20190107_1755.py | dkowsikpai/librolet | 7148670655157ca5f1ad6853039c9ec00e37adef | [
"MIT"
] | null | null | null | blog/migrations/0008_auto_20190107_1755.py | dkowsikpai/librolet | 7148670655157ca5f1ad6853039c9ec00e37adef | [
"MIT"
] | null | null | null | blog/migrations/0008_auto_20190107_1755.py | dkowsikpai/librolet | 7148670655157ca5f1ad6853039c9ec00e37adef | [
"MIT"
] | null | null | null | # Generated by Django 2.1.3 on 2019-01-07 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0007_auto_20190107_1750'),
]
operations = [
migrations.RemoveField(
model_name='postpick',
name='user',
),
migrations.AddField(
model_name='post',
name='image',
field=models.ImageField(default='default.jpg', upload_to='post_pics'),
),
migrations.DeleteModel(
name='PostPick',
),
]
| 22.5 | 82 | 0.558974 | 57 | 585 | 5.614035 | 0.736842 | 0.05625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077889 | 0.319658 | 585 | 25 | 83 | 23.4 | 0.726131 | 0.076923 | 0 | 0.157895 | 1 | 0 | 0.141264 | 0.042751 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.052632 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e4d4c75d1c04f3087dc7de35d8f22acd91277a6 | 16,962 | py | Python | GCE/data_generation.py | FloList/GCE_NN | 3f6dd059fd366164a6fcda07643a9dae970a6aba | [
"MIT"
] | 6 | 2020-11-24T01:18:01.000Z | 2021-12-20T18:34:28.000Z | GCE/data_generation.py | FloList/GCE_NN | 3f6dd059fd366164a6fcda07643a9dae970a6aba | [
"MIT"
] | null | null | null | GCE/data_generation.py | FloList/GCE_NN | 3f6dd059fd366164a6fcda07643a9dae970a6aba | [
"MIT"
] | null | null | null | """
Generate and save maps for each template.
"""
import random
import numpy as np
from scipy import stats
import healpy as hp
import matplotlib.pyplot as plt
import os
import pickle
from .data_utils import get_fermi_pdf_sampler, masked_to_full
from .utils import multipage, auto_garbage_collect
import ray
import time
import warnings
def generate_template_maps(params, temp_dict, ray_settings, n_example_plots, job_id=0):
"""
Generate simulated template maps for each template (output format: NESTED!)
:param params: DotDict containing the settings (see parameters.py)
:param temp_dict: DotDict containing the templates
:param ray_settings: dictionary containing the settings for ray
:param n_example_plots: number of maps to plot and save for each template (as a quick check)
:param job_id: if running several jobs for the data generation: ID of the current job
"""
start_time = time.time()
# Get settings that will be stored in a separate file together with the maps
t_p = params.mod["models_P"]
t_ps = params.mod["models_PS"]
nside = params.data["nside"]
outer_rad = params.data["outer_rad"]
inner_band = params.data["inner_band"]
mask_type = params.data["mask_type"]
do_fermi_psf = params.data["psf"]
leakage_delta = params.data["leakage_delta"] if do_fermi_psf else 0
if "db" in params.keys():
do_poisson_scatter_p = False if params.db["deactivate_poiss_scatter_for_P"] else True
else:
do_poisson_scatter_p = True
name = params.tt["filename_base"]
n_chunk = params.tt["n_chunk"]
n_sim_per_chunk = params.tt["n_sim_per_chunk"]
poisson_a_is_log = params.tt["poisson_A_is_log"]
add_two_temps_ps = params.tt["add_two_temps_PS"]
output_path = params.gen["template_maps_folder"]
prior_dict = params.tt.priors
save_example_plot = n_example_plots > 0
exp = temp_dict["exp"]
rescale_compressed = temp_dict["rescale_compressed"]
# Set output dtypes
dtype_data = np.uint32 if do_poisson_scatter_p else np.float32 # without Poisson draw, counts are non-integer
dtype_flux_arr = np.float32
# Set a random seed for numpy (using random because numpy duplicates random number generator for multiple processes)
random_seed = random.randint(0, int(2 ** 32 - 1))
np.random.seed(random_seed)
print("Job ID:", job_id, "Random Seed:", random_seed)
# PSF: use Fermi-LAT PSF
if do_fermi_psf:
pdf = get_fermi_pdf_sampler()
else:
pdf = None
# Get the masks
total_mask_neg = temp_dict["mask_ROI_full"] # uncompressed, nest format, contains PS mask if desired
total_mask_neg_safety = temp_dict["mask_safety_full"] # the same for the slightly larger ROI
# Initialise the output dictionary
data_out = dict()
# Create the output folder (if it doesn't exist yet)
os.makedirs(output_path, exist_ok=True)
# Print
print("Starting map generation for '{0}'.".format(params.tt["data_name"]))
print("Number of chunks: {0}, number of simulations per chunk: "
"{1}\n -> {2} maps per model.".format(n_chunk, n_sim_per_chunk, n_chunk * n_sim_per_chunk))
if len(add_two_temps_ps) > 0:
print(" Twice as many maps will be created for", add_two_temps_ps)
# Start with the Poissonian models
for temp in t_p:
print("Starting with Poissonian model '{:}'".format(temp))
t = temp_dict["T_counts"][temp] # exposure-corrected template in counts space
# Get pixels that are not masked
indices_roi = temp_dict["indices_roi"]
# Mask template and compress
t_masked = t * (1 - total_mask_neg)
t_masked_compressed = t_masked[indices_roi]
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# For each chunk
for chunk in range(n_chunk):
# Draw the (log) amplitude
a = np.asarray([random.uniform(prior_dict[temp][0], prior_dict[temp][1])
for _ in range(n_sim_per_chunk)])
# Generate the maps: NOTE: exposure-correction is included in the Poissonian templates ("T_counts")
random_draw_fn = np.random.poisson if do_poisson_scatter_p else lambda x: x
if poisson_a_is_log:
sim_maps = np.asarray([random_draw_fn((10.0 ** a[i]) * t_masked_compressed)
for i in range(n_sim_per_chunk)])
else:
sim_maps = np.asarray([random_draw_fn(a[i] * t_masked_compressed)
for i in range(n_sim_per_chunk)])
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["is_log_A"] = poisson_a_is_log
settings_out["exp"] = exp
settings_out["rescale_compressed"] = rescale_compressed
settings_out["indices_roi"] = indices_roi
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
# The full map can be recovered as
# map_full = np.zeros(npix), map_full[data_out["indices_roi"]] = data_out["val"]
data_out["data"] = sim_maps.astype(dtype_data)
data_out["info"] = dict()
data_out["info"]["A"] = a
with open(os.path.join(temp_folder, name + "_" + str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t_masked, title="Template (exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
for i in range(n_example_plots):
hp.mollview(masked_to_full(sim_maps[i, :], indices_roi, nside=nside),
title=int(np.round(sim_maps[i, :].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
# Initialise Ray
if t_ps:
ray.init(**ray_settings)
if "num_cpus" in ray_settings.keys():
print("Ray: running on", ray_settings["num_cpus"], "CPUs.")
# Put the large array / objects that are template-independent into the object store
exp_id = ray.put(exp)
pdf_id = ray.put(pdf)
# Define a function for the simulation of the point-source models
@ray.remote
def create_simulated_map(skew_, loc_, scale_, flux_lims_, enforce_upper_flux_, t_, exp_, pdf_, name_,
inds_outside_roi_, size_approx_mean_=10000, flux_log_=False):
from .ps_mc import run
assert np.all(np.isfinite(flux_lims_)), "Flux limits must be finite!"
max_total_flux = flux_lims_[1] if enforce_upper_flux_ else -np.infty
# Draw the desired flux
if flux_log_:
flux_desired = 10 ** np.random.uniform(*flux_lims_)
else:
flux_desired = np.random.uniform(*flux_lims_)
# Calculate the expected value of 10^X
exp_value = (10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=int(size_approx_mean_))).mean()
# Determine the expected number of sources
n_sources_exp = flux_desired / exp_value
# Draw the observed number of sources from a Poisson distribution
n_sources = np.random.poisson(n_sources_exp)
# Initialise total flux
tot_flux = np.infty
# Draw fluxes until total flux is in valid range
flux_arr_ = []
while tot_flux >= max_total_flux:
flux_arr_ = 10 ** stats.skewnorm.rvs(skew_, loc=loc_, scale=scale_, size=n_sources)
tot_flux = flux_arr_.sum()
if not enforce_upper_flux_:
break
# If total flux > max-total_flux: reduce n_sources
if tot_flux > max_total_flux:
n_sources = int(max(1, int(n_sources // 1.05)))
# Do MC run
map_, n_phot_, flux_arr_out = run(np.asarray(flux_arr_), t_, exp_, pdf_, name_, save=False, getnopsf=True,
getcts=True, upscale_nside=16384, verbose=False, is_nest=True,
inds_outside_roi=inds_outside_roi_, clean_count_list=False)
return map_, n_phot_, flux_arr_out
# Do the point-source models
for temp in t_ps:
print("Starting with point-source model '{:}'".format(temp))
t = temp_dict["T_flux"][temp] # for point-sources: template after REMOVING the exposure correction is used
# Apply slightly larger mask
t_masked = t * (1 - total_mask_neg_safety)
# Correct flux limit priors for larger mask (after simulating the counts, ROI mask will be applied)
flux_corr_fac = t_masked.sum() / (t * (1 - total_mask_neg)).sum()
flux_lims_corr = [None] * 2
for i in range(2):
if prior_dict[temp]["flux_log"]:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] + np.log10(flux_corr_fac)
else:
flux_lims_corr[i] = prior_dict[temp]["flux_lims"][i] * flux_corr_fac
# Get indices where PSs are sampled although they lie outside ROI
inds_ps_outside_roi = set(np.setdiff1d(temp_dict["indices_safety"], temp_dict["indices_roi"]))
# Template needs to be normalised to sum up to unity for the new implementation!
# Might need to do this twice because of rounding errors
t_final = t_masked / t_masked.sum()
while t_final.sum() > 1.0:
t_final /= t_final.sum()
if t_final.sum() != 1.0:
warnings.warn("Template sum is not exactly 1, but {:}!".format(t_final.sum()))
# Make a subfolder
temp_folder = os.path.join(output_path, temp)
os.makedirs(temp_folder, exist_ok=True)
# Put the large arrays / objects to the object store
t_final_id = ray.put(t_final)
inds_ps_outside_roi_id = ray.put(inds_ps_outside_roi)
# For each chunk
this_n_chunk = 2 * n_chunk if temp in add_two_temps_ps else n_chunk
for chunk in range(this_n_chunk):
print(" Starting with chunk", chunk)
# Draw the parameters
mean_draw = np.random.uniform(*prior_dict[temp]["mean_exp"], size=n_sim_per_chunk)
var_draw = prior_dict[temp]["var_exp"] * np.random.chisquare(1, size=n_sim_per_chunk)
skew_draw = np.random.normal(loc=0, scale=prior_dict[temp]["skew_std"], size=n_sim_per_chunk)
# This code is for debugging without ray
# sim_maps, n_phot, flux_arr = create_simulated_map(skew_draw[0], mean_draw[0], np.sqrt(var_draw[0]),
# flux_lims_corr,
# prior_dict[temp]["enforce_upper_flux"],
# t_final, exp, pdf, "map_" + temp,
# flux_log_=prior_dict[temp]["flux_log"],
# inds_outside_roi_=inds_ps_outside_roi)
sim_maps, n_phot, flux_arr = map(list, zip(*ray.get(
[create_simulated_map.remote(skew_draw[i_PS], mean_draw[i_PS], np.sqrt(var_draw[i_PS]),
flux_lims_corr, prior_dict[temp]["enforce_upper_flux"],
t_final_id, exp_id, pdf_id, "map_" + temp,
flux_log_=prior_dict[temp]["flux_log"],
inds_outside_roi_=inds_ps_outside_roi_id)
for i_PS in range(n_sim_per_chunk)])))
# Apply ROI mask again and cut off counts outside ROI
sim_maps = np.asarray(sim_maps) * np.expand_dims((1 - total_mask_neg), [0, -1])
# The following assert is for the scenario where there is NO leakage INTO the ROI, and counts leaking
# OUT OF the ROI are deleted from photon-count list n_phot
# assert np.all(sim_maps[:, :, 0].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
# "Photons counts in maps and n_phot lists are not consistent! Aborting..."
# The following assert is for the scenario where there is leakage INTO and OUT OF the ROI, and n_phot
# contains ALL the counts (and only those counts) from PSs within the ROI.
assert np.all(sim_maps[:, :, 1].sum(1) == [n_phot[i].sum() for i in range(n_sim_per_chunk)]), \
"Photons counts in maps and n_phot lists are not consistent! Aborting..."
# Collect garbage
auto_garbage_collect()
# Save settings
if chunk == 0 and int(job_id) == 0:
settings_out = dict()
settings_out["T"] = t
settings_out["priors"] = prior_dict[temp]
settings_out["exp"] = exp # exposure
settings_out["rescale_compressed"] = rescale_compressed
settings_out["max_NP_sources"] = np.nan # not set here
settings_out["indices_roi"] = np.argwhere(1 - total_mask_neg).flatten()
settings_out["format"] = "NEST"
settings_out["mask_type"] = mask_type
settings_out["outer_rad"] = outer_rad
settings_out["inner_band"] = inner_band
settings_out["leakage_delta"] = leakage_delta
settings_out["nside"] = nside
print(" Writing settings file...")
with open(os.path.join(temp_folder, name + "_settings.pickle"), 'wb') as f:
pickle.dump(settings_out, f)
# Save maps
data_out["data"] = (sim_maps[:, temp_dict["indices_roi"], :]).astype(dtype_data)
data_out["n_phot"] = n_phot
data_out["flux_arr"] = [np.asarray(f, dtype=dtype_flux_arr) for f in flux_arr]
data_out["info"] = dict()
data_out["info"]["tot_flux"] = np.asarray([np.sum(f) for f in flux_arr])
data_out["info"]["means"] = mean_draw
data_out["info"]["vars"] = var_draw
data_out["info"]["skew"] = skew_draw
with open(os.path.join(temp_folder, name + "_"
+ str(job_id) + "_" + str(chunk) + ".pickle"), 'wb') as f:
pickle.dump(data_out, f)
# Plot some maps and save
if chunk == 0 and int(job_id) == 0 and save_example_plot:
plt.ioff()
hp.mollview(t * (1 - total_mask_neg), title="Template (not exposure-corrected)", nest=True)
hp.mollview(exp, title="Exposure (nside = " + str(nside) + ")", nest=True)
hp.mollview(total_mask_neg, title="Mask (" + str(mask_type) + ")", nest=True)
hp.mollview(total_mask_neg_safety, title="Extended mask (allowing leakage into ROI)", nest=True)
for i in range(n_example_plots):
hp.mollview(sim_maps[i, :, 0], title=int(np.round(sim_maps[i, :, 0].sum())), nest=True)
multipage(os.path.join(output_path, temp + "_examples.pdf"))
plt.close("all")
dash = 80 * "="
print(dash)
print("Done! Computation took {0} seconds.".format(time.time() - start_time))
print(dash)
# Loading pickle file e.g.: data = pickle.load( open( "./data/<...>.pickle", "rb" ) )
| 49.741935 | 120 | 0.578116 | 2,227 | 16,962 | 4.135608 | 0.170184 | 0.033442 | 0.019761 | 0.016938 | 0.379045 | 0.32975 | 0.303909 | 0.276004 | 0.258632 | 0.258632 | 0 | 0.007641 | 0.321012 | 16,962 | 340 | 121 | 49.888235 | 0.792047 | 0.216366 | 0 | 0.282407 | 1 | 0 | 0.109763 | 0.002276 | 0 | 0 | 0 | 0 | 0.009259 | 1 | 0.009259 | false | 0 | 0.060185 | 0 | 0.074074 | 0.060185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e51a18b38b505d030b71b633915de41ef88b7b3 | 2,295 | py | Python | BleVibrationDevice.py | Suitceyes-Project-Code/Vibration-Pattern-Player | 44d8bac61eed0ee7712eb0299d0d7029f688fe24 | [
"MIT"
] | null | null | null | BleVibrationDevice.py | Suitceyes-Project-Code/Vibration-Pattern-Player | 44d8bac61eed0ee7712eb0299d0d7029f688fe24 | [
"MIT"
] | null | null | null | BleVibrationDevice.py | Suitceyes-Project-Code/Vibration-Pattern-Player | 44d8bac61eed0ee7712eb0299d0d7029f688fe24 | [
"MIT"
] | 1 | 2021-10-04T14:26:49.000Z | 2021-10-04T14:26:49.000Z | from bluepy.btle import UUID, Peripheral
from VestDeviceBase import VestDevice
class BleVestDevice(VestDevice):
def __init__(self, deviceAddr):
try:
self._peripheral = Peripheral(deviceAddr)
serviceUUID = UUID("713d0000-503e-4c75-ba94-3148f18d941e")
characteristicUUID = UUID("713d0003-503e-4c75-ba94-3148f18d941e")
s = self._peripheral.getServiceByUUID(serviceUUID)
self._characteristic = s.getCharacteristics(characteristicUUID)[0]
except Exception as e:
print("Error: " + str(e))
def __isValidState(self):
return self._peripheral.getState() == "conn"
def __write(self, byteArr):
self._peripheral.writeCharacteristic(self._characteristic.getHandle(), byteArr)
def set_pin(self, index, intensity):
"""Sets a pin to a given intensity.
index: an integer from 0 - 6
intensity: an integer from 0 - 255
"""
if self.__isValidState():
rList=[0,index,intensity]
self.__write(bytes(rList))
def set_frequency(self,frequency):
"""Sets the frequency of the entire vest.
frequency.
"""
if self.__isValidState():
rList=[4, frequency & (255), (frequency & (255 << 8)) >> 8, (frequency & (255 << 16)) >> 16, (frequency & (255 << 24)) >> 24]
b = bytes(rList)
self.__write(b)
def mute(self):
"""Stops all motors on the vest from vibrating"""
if self.__isValidState():
rList=[3]
self.__write(bytes(rList))
def set_motor(self,index,rotation):
"""
Sets a given motor index to a given target rotation.
"""
if self.__isValidState():
rList = [11,index,rotation]
self.__write(bytes(rList))
def set_motor_speed(self,speed):
"""
Changes how long it takes to move 1 degree per millisecond.
"""
if speed <= 0:
raise ValueError("speed must be greater than 0.")
rList = [12,speed]
self.__write(bytes(rList))
def set_pins_batched(self, values = dict):
for pin in values:
self.set_pin(pin, values[pin]) | 35.859375 | 137 | 0.576035 | 247 | 2,295 | 5.190283 | 0.417004 | 0.023401 | 0.056162 | 0.071763 | 0.085803 | 0.085803 | 0.046802 | 0 | 0 | 0 | 0 | 0.055238 | 0.313725 | 2,295 | 64 | 138 | 35.859375 | 0.75873 | 0.132026 | 0 | 0.195122 | 0 | 0 | 0.059291 | 0.038115 | 0 | 0 | 0 | 0 | 0 | 1 | 0.219512 | false | 0 | 0.04878 | 0.02439 | 0.317073 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e581b3bc442bf985032c1a6311fd1a8c9e46d12 | 1,125 | py | Python | tests/unit/scalar/test_datetime.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | null | null | null | tests/unit/scalar/test_datetime.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | null | null | null | tests/unit/scalar/test_datetime.py | alexchamberlain/tartiflette | 6904b0f47770c348553e907be5f5bdb0929fe149 | [
"MIT"
] | null | null | null | import datetime
import pytest
@pytest.mark.parametrize(
"val,expected",
[
(datetime.datetime(1986, 12, 24, 15, 0, 4), "1986-12-24T15:00:04"),
(None, AttributeError),
("A", AttributeError),
],
)
def test_scalar_datetime_coerce_output(val, expected):
from tartiflette.scalar.builtins.datetime import ScalarDateTime
if type(expected) is type and issubclass(expected, Exception):
with pytest.raises(expected):
ScalarDateTime().coerce_output(val)
else:
assert ScalarDateTime().coerce_output(val) == expected
@pytest.mark.parametrize(
"val,expected",
[
("1986-12-24T15:00:04", datetime.datetime(1986, 12, 24, 15, 0, 4)),
("LOL", ValueError),
(None, TypeError),
],
)
def test_scalar_datetime_coerce_input(val, expected):
from tartiflette.scalar.builtins.datetime import ScalarDateTime
if type(expected) is type and issubclass(expected, Exception):
with pytest.raises(expected):
ScalarDateTime().coerce_input(val)
else:
assert ScalarDateTime().coerce_input(val) == expected
| 28.125 | 75 | 0.663111 | 126 | 1,125 | 5.825397 | 0.333333 | 0.089918 | 0.061308 | 0.065395 | 0.809264 | 0.517711 | 0.517711 | 0.517711 | 0.441417 | 0.441417 | 0 | 0.058691 | 0.212444 | 1,125 | 39 | 76 | 28.846154 | 0.769752 | 0 | 0 | 0.4375 | 0 | 0 | 0.058667 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e5cfeb015c77e2308db32ee9bf5063eeee0afb3 | 4,484 | py | Python | keras_frcnn/pascal_voc_parser.py | touchylk/fgcnn | db0a2156d21480e37aa5b3b74ca2e71bf2a2d50a | [
"Apache-2.0"
] | null | null | null | keras_frcnn/pascal_voc_parser.py | touchylk/fgcnn | db0a2156d21480e37aa5b3b74ca2e71bf2a2d50a | [
"Apache-2.0"
] | null | null | null | keras_frcnn/pascal_voc_parser.py | touchylk/fgcnn | db0a2156d21480e37aa5b3b74ca2e71bf2a2d50a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import cv2
import xml.etree.ElementTree as ET
import config
import numpy as np
cfg = config.Config()
def get_data(input_path):
all_imgs = []
classes_count = {}
class_mapping = {}
bird_classes_count ={}
bird_class_mapping = {}
visualise = False
data_path = '/media/e813/E/dataset/CUBbird/CUB_200_2011/CUB_200_2011'#[os.path.join(input_path,s) for s in cfg.pascal_voc_year]
print('Parsing annotation files')
if True:
annot_path = os.path.join(data_path, 'xml')
imgs_path = os.path.join(data_path, 'images')
imgsets_path_trainval = os.path.join(data_path, 'train.txt')
imgsets_path_test = os.path.join(data_path, 'test.txt')
trainval_files = []
test_files = []
try:
with open(imgsets_path_trainval) as f:
for line in f:
trainval_files.append(line.strip() + '.jpg')
except Exception as e:
print(e)
try:
with open(imgsets_path_test) as f:
for line in f:
test_files.append(line.strip() + '.jpg')
except Exception as e:
if data_path[-7:] == 'VOC2012':
# this is expected, most pascal voc distibutions dont have the test.txt file
pass
else:
print(e)
annots = [os.path.join(annot_path, s) for s in os.listdir(annot_path)]
idx = 0
for annot in annots:
if True:
idx += 1
et = ET.parse(annot)
element = et.getroot()
#element_objs = element.findall('object')
element_parts = element.find('parts')
element_filename = element.find('img_path').text
#print(element_filename)
element_width = int(element.find('size').find('width').text)
#print(element_width)
element_height = int(element.find('size').find('heigth').text)
oneparts = element_parts.findall('onepart')
bird_class_name = element.find('class_name').text
if bird_class_name not in bird_classes_count:
bird_classes_count[bird_class_name]= 1
else:
bird_classes_count[bird_class_name] += 1
if bird_class_name not in bird_class_mapping:
bird_class_mapping[bird_class_name] = len(bird_class_mapping)
#bird_class_index = {}
if len(oneparts) > 0:
annotation_data = {'filepath': (data_path+element_filename), 'width': element_width,
'height': element_height, 'bboxes': []}
element_train_or_test = element.find('train_or_test').text
if element_train_or_test == 'train':
annotation_data['imageset'] = 'trainval'
elif element_train_or_test == 'test':
annotation_data['imageset'] = 'test'
else:
annotation_data['imageset'] = 'trainval'
print 'error'
raise ValueError
for onepart in oneparts:
class_name = onepart.find('name').text
if class_name not in classes_count:
classes_count[class_name] = 1
else:
classes_count[class_name] += 1
if class_name not in class_mapping:
class_mapping[class_name] = len(class_mapping)
part_bbox = onepart.find('bndbox')
part_x = float(part_bbox.find('x').text)
part_y = float(part_bbox.find('y').text)
part_width = float(part_bbox.find('width').text)
part_heigth = float(part_bbox.find('heigth').text)
x1 = int(round(part_x - part_width/2))
x2 = int(round(part_x + part_width/2))
y1 = int(round(part_y - part_heigth/2))
y2 = int(round(part_y + part_heigth/2))
#x1 = int(round(float(obj_bbox.find('xmin').text)))
#y1 = int(round(float(obj_bbox.find('ymin').text)))
#x2 = int(round(float(obj_bbox.find('xmax').text)))
#y2 = int(round(float(obj_bbox.find('ymax').text)))
difficulty = (0 == 1)
annotation_data['bboxes'].append(
{'class': class_name, 'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'difficult': difficulty})
all_imgs.append(annotation_data)
#if annotation_data['imageset']=='test':
# visualise = True
#else:
# visualise = False
# print annotation_data['imageset']
if visualise:
img = cv2.imread(annotation_data['filepath'])
#print(annotation_data['filepath'])
#print(img.shape)
for bbox in annotation_data['bboxes']:
cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox[
'x2'], bbox['y2']), (0, 0, 255))
cv2.imshow('img', img)
print annotation_data
cv2.waitKey(0)
#except Exception as e:
# print(e)
# print('oo')
# continue
return all_imgs, classes_count, class_mapping
# all_imgs 是annotation_data的列表
# 每一个annotationdata是一个dict,包含 了''filepath,width,height,'bboxes,imageset
#其中,bboxes是一个列表,每一个box是一个字典
#
| 30.712329 | 128 | 0.665031 | 631 | 4,484 | 4.502377 | 0.239303 | 0.044351 | 0.021119 | 0.028159 | 0.299542 | 0.190778 | 0.099965 | 0.028863 | 0.028863 | 0 | 0 | 0.01856 | 0.194915 | 4,484 | 145 | 129 | 30.924138 | 0.768421 | 0.177743 | 0 | 0.163265 | 0 | 0 | 0.093195 | 0.015031 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.010204 | 0.05102 | null | null | 0.05102 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e5dc0bccba9bfb6b687c696f0803ac8924850d5 | 3,342 | py | Python | constants.py | granthitson/8ballbot | cc0778b6ce5857b8b39203d53c5e792297ab126f | [
"MIT"
] | 1 | 2019-05-13T00:15:41.000Z | 2019-05-13T00:15:41.000Z | constants.py | granthitson/AteBallPool | cc0778b6ce5857b8b39203d53c5e792297ab126f | [
"MIT"
] | null | null | null | constants.py | granthitson/AteBallPool | cc0778b6ce5857b8b39203d53c5e792297ab126f | [
"MIT"
] | null | null | null |
# IMAGES #
# UI NAVIGATION #
img_addFriend = "add_friend.png"
img_allow = "allow.png"
img_allowFlash = "enableflash_0.png"
img_allowFlash1 = "enableflash_1.png"
img_allowFlash2 = "enableflash_2.png"
img_alreadyStarted = "alreadystarted.png"
img_alreadyStarted1 = "alreadystarted1.png"
img_backButton = "back_button.png"
img_beginningGame = "beginninggame.png"
img_challengeFriend = "challenge_friend.png"
img_cheapButtonFriend = "cheap_button_friend.png"
img_cheapButton = "cheap_button.png"
img_cueUpdate = "cueUpdate.png"
img_cues = "cues.png"
img_collectCoins = "collectcoins.png"
img_collectCoins1 = "collectcoins_1.png"
img_defaultAcct = "defaultaccount.png"
img_eightBallSpinButton = "8ballspin_button.png"
img_emailArea = "email_area.png"
img_facebookLogo = "facebooklogo.png"
img_inviteFriend = "invite_friend.png"
img_isGameStart = "isgamestart.png"
img_loginButton3 = "login3_button.png"
img_loginWithMiniclip = "login_with_miniclip.png"
img_luckyShot = "luckyShot.png"
img_mainMenuBefore = "mainmenu_before.png"
img_passwordArea = "password_area.png"
img_playButtonGuest = "play_button_guest.png"
img_playButtonLogged = "play_button_logged.png"
img_playFree = "playFree.png"
img_playFriends = "playfriends.png"
img_playNow = "playnow.png"
img_poolChoice = "poolchoice.png"
img_poolChoice1 = "poolchoice1.png"
img_poolChoice200 = "poolchoice200.png"
img_poolChoice200_1 = "poolchoice200_1.png"
img_searchFriends = "search_friends.png"
img_searchFriends1 = "search_friends2.png"
img_searchFriends2 = "search_friends3.png"
img_signUpLogin = "signup_login_button.png"
img_spinWinCollect = "spinwin_collect.png"
img_spinWinIcon = "spinwinicon.png"
img_spinWinX = "spinwin_x.png"
img_topRightCorner = "top_right_corner.png"
img_topRightCornerLogged = "top_right_corner_logged.png"
img_turn = "turn.png"
img_turn1 = "turn1.png"
img_opponentTurn = "playertwoturn.png"
img_opponentTurn1 = "playertwoturn1.png"
img_url = "url.png"
img_url2 = "url2.png"
img_url3 = "url3.png"
img_urlBar = "urlbar.png"
img_unsecure = "unsecure.png"
img_xOut = "xout.png"
# UI NAVIGATION #
# GAME NAVIGATION #
img_1ball = "1ball.png"
img_2ball = "2ball.png"
img_3ball = "3ball.png"
img_4ball = "4ball.png"
img_5ball = "5ball.png"
img_6ball = "6ball.png"
img_7ball = "7ball.png"
img_8ball = "8ball.png"
img_9ball = "9ball.png"
img_10ball = "10ball.png"
img_11ball = "11ball.png"
img_12ball = "12ball.png"
img_13ball = "13ball.png"
img_14ball = "14ball.png"
img_15ball = "15ball.png"
img_1ballDark = "1ballDark.png"
img_2ballDark = "2ballDark.png"
img_3ballDark = "3ballDark.png"
img_4ballDark = "4ballDark.png"
img_5ballDark = "5ballDark.png"
img_6ballDark = "6ballDark.png"
img_7ballDark = "7ballDark.png"
img_8ballDark = "8ballDark.png"
img_9ballDark = "9ballDark.png"
img_10ballDark = "10ballDark.png"
img_11ballDark = "11ballDark.png"
img_12ballDark = "12ballDark.png"
img_13ballDark = "13ballDark.png"
img_14ballDark = "14ballDark.png"
img_15ballDark = "15ballDark.png"
img_cueball = "cueball.png"
img_eightball = "eightball.png"
img_ballPic1 = "ballpic1.png"
img_tlh = "tlh.png"
img_tmh = "tmh.png"
img_trh = "trh.png"
img_blh = "blh.png"
img_bmh = "bmh.png"
img_brh = "brh.png"
img_topRail = "toprail.png"
img_bottomRail = "bottomrail.png"
img_leftRail = "leftrail.png"
img_rightRail = "rightrail.png"
# GAME NAVIGATION #
# IMAGES #
debug = False | 29.575221 | 56 | 0.781269 | 440 | 3,342 | 5.629545 | 0.325 | 0.232539 | 0.024223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041777 | 0.097546 | 3,342 | 113 | 57 | 29.575221 | 0.779509 | 0.023339 | 0 | 0 | 0 | 0 | 0.412308 | 0.042769 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.010101 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e6ffb54b935a3bedbdb9d36bddd3349d6e52b30 | 2,816 | py | Python | benchmarks/Python/towers.py | OvermindDL1/are-we-fast-yet | 78b0560c7c3b7a90b22b90c4cb27660e56478d30 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/Python/towers.py | OvermindDL1/are-we-fast-yet | 78b0560c7c3b7a90b22b90c4cb27660e56478d30 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/Python/towers.py | OvermindDL1/are-we-fast-yet | 78b0560c7c3b7a90b22b90c4cb27660e56478d30 | [
"BSD-3-Clause"
] | null | null | null | # This code is based on the SOM class library.
#
# Copyright (c) 2001-2021 see AUTHORS.md file
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from benchmark import Benchmark
class _TowersDisk:
def __init__(self, size):
self.size = size
self.next = None
class Towers(Benchmark):
def __init__(self):
self._piles = None
self._moves_done = 0
def _push_disk(self, disk, pile):
top = self._piles[pile]
if top is not None and disk.size >= top.size:
raise Exception("Cannot put a big disk on a smaller one")
disk.next = top
self._piles[pile] = disk
def _pop_disk_from(self, pile):
top = self._piles[pile]
if top is None:
raise Exception("Attempting to remove a disk from an empty pile")
self._piles[pile] = top.next
top.next = None
return top
def _move_top_disk(self, from_pile, to_pile):
self._push_disk(self._pop_disk_from(from_pile), to_pile)
self._moves_done += 1
def _build_tower_at(self, pile, disks):
for i in range(disks, -1, -1):
self._push_disk(_TowersDisk(i), pile)
def _move_disks(self, disks, from_pile, to_pile):
if disks == 1:
self._move_top_disk(from_pile, to_pile)
else:
other_pile = (3 - from_pile) - to_pile
self._move_disks(disks - 1, from_pile, other_pile)
self._move_top_disk(from_pile, to_pile)
self._move_disks(disks - 1, other_pile, to_pile)
def benchmark(self):
self._piles = [None, None, None]
self._build_tower_at(0, 13)
self._moves_done = 0
self._move_disks(13, 0, 1)
return self._moves_done
def verify_result(self, result):
return result == 8191
| 35.2 | 79 | 0.675426 | 416 | 2,816 | 4.387019 | 0.355769 | 0.048219 | 0.038356 | 0.046027 | 0.109589 | 0.089863 | 0.089863 | 0.089863 | 0.036164 | 0 | 0 | 0.013214 | 0.247514 | 2,816 | 79 | 80 | 35.64557 | 0.848042 | 0.393821 | 0 | 0.136364 | 0 | 0 | 0.049852 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.204545 | false | 0 | 0.022727 | 0.022727 | 0.340909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e7d33ee6f25bf7e294ba72a60592d7ecbf6df93 | 1,479 | py | Python | src/figcli/test/cli/config.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 36 | 2020-07-21T21:22:02.000Z | 2021-10-20T06:55:47.000Z | src/figcli/test/cli/config.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | 2 | 2020-10-29T12:49:15.000Z | 2021-04-29T01:12:05.000Z | src/figcli/test/cli/config.py | figtools/figgy-cli | 88f4ccb8221ef9734f95b2637acfacc6e00983e7 | [
"Apache-2.0"
] | null | null | null | import os
### For PS items stored with this value, we will auto-clean them up from our audit table. Used for automated E2E testing.
DELETE_ME_VALUE = 'DELETE_ME' ### <-- use this for ALL VALUES
MFA_USER_ENV_KEY = 'MFA_USER'
MFA_SECRET_ENV_KEY = 'MFA_SECRET'
# Env vars
GOOGLE_SSO_USER = 'GOOGLE_SSO_USER'
GOOGLE_SSO_PASSWORD = 'GOOGLE_SSO_PASSWORD'
GOOGLE_IDP_ID = 'GOOGLE_IDP_ID'
GOOGLE_SP_ID = 'GOOGLE_SP_ID'
GOOGLE_MFA_SECRET = 'GOOGLE_MFA_SECRET'
OKTA_SSO_USER = 'OKTA_SSO_USER'
OKTA_SSO_PASSWORD = 'OKTA_SSO_PASSWORD'
OKTA_EMBED_URL = 'OKTA_EMBED_URL'
OKTA_MFA_SECRET = 'OKTA_MFA_SECRET'
# Variables
param_1 = '/shared/test/automated_test/param_1'
param_test_prefix = '/shared/test2/automated_test/'
dump_prefix = '/shared/test/automated_test/'
data_param_1 = '/data/test/automated_test/parm_1'
devops_param_1 = '/devops/test/automated_test/param_1'
# Values
param_1_val = DELETE_ME_VALUE
data_param_1_val = DELETE_ME_VALUE
devops_param_1_val = DELETE_ME_VALUE
# Desc
param_1_desc = 'desc1'
data_param_1_desc = 'datadesc1'
devops_param_1_desc = 'devopsdesc1'
# Share destination
automated_test_dest_1 = '/app/automated-test/dest/1'
# Others
if os.environ.get('GOOGLE_IDP_ID'):
# Google tests hit figgy-qa
DEFAULT_ENV = 'qa'
elif os.environ.get('OKTA_EMBED_URL'):
# Okta tests hit figgy-dev
DEFAULT_ENV = 'stage'
else:
# Bastion tests hit whatever env is logged-in
# Standard tests use profile which uses sandbox-qa
DEFAULT_ENV = 'stage'
| 29 | 121 | 0.772819 | 240 | 1,479 | 4.366667 | 0.358333 | 0.062977 | 0.049618 | 0.048664 | 0.194656 | 0.062977 | 0 | 0 | 0 | 0 | 0 | 0.014786 | 0.13117 | 1,479 | 50 | 122 | 29.58 | 0.800778 | 0.233266 | 0 | 0.064516 | 0 | 0 | 0.36795 | 0.165622 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.064516 | 0.032258 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5e7defce2e2ab62233e622a56872ccd1b331951a | 632 | py | Python | helper_functions.py | dadam1026/Homework1v2 | 47360f83eff0cff34498827138951afdcb0362b4 | [
"MIT"
] | null | null | null | helper_functions.py | dadam1026/Homework1v2 | 47360f83eff0cff34498827138951afdcb0362b4 | [
"MIT"
] | null | null | null | helper_functions.py | dadam1026/Homework1v2 | 47360f83eff0cff34498827138951afdcb0362b4 | [
"MIT"
] | null | null | null | # Contains helper functions for your apps!
from os import listdir, remove
# If the io following files are in the current directory, remove them!
# 1. 'currency_pair.txt'
# 2. 'currency_pair_history.csv'
# 3. 'trade_order.p'
def check_for_and_del_io_files():
# Your code goes here.
file_list = ['currency_pair.txt', 'currency_pair_history.csv', 'trade_order.p']
for i in file_list:
try:
remove(i)
print("File deleted")
except:
print("Could not delete file: " + i + ". File does not exist")
pass # nothing gets returned by this function, so end it with 'pass'.
| 30.095238 | 83 | 0.655063 | 93 | 632 | 4.290323 | 0.666667 | 0.120301 | 0.075188 | 0.110276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006276 | 0.243671 | 632 | 20 | 84 | 31.6 | 0.828452 | 0.420886 | 0 | 0 | 0 | 0 | 0.310924 | 0.070028 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.1 | 0.1 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5e81d9b31c50e8e47fe0403850ae0d0290d47815 | 618 | py | Python | python_basics/4.arithmetic_operators/discount_challenge.py | edilsonmatola/Python_Master | cef88a19e641f6454944bab358841b380c64699e | [
"MIT"
] | 2 | 2022-03-12T07:53:23.000Z | 2022-03-14T16:09:06.000Z | python_basics/4.arithmetic_operators/discount_challenge.py | edilsonmatola/Python_Master | cef88a19e641f6454944bab358841b380c64699e | [
"MIT"
] | 18 | 2022-03-13T19:45:48.000Z | 2022-03-31T06:04:12.000Z | python_basics/4.arithmetic_operators/discount_challenge.py | edilsonmatola/Python_Master | cef88a19e641f6454944bab358841b380c64699e | [
"MIT"
] | null | null | null | """
* Problem Description
*Suppose you are a university student and you need to pay 1536 dollars as a tuition fee.
*The college is offering a 10% discount on the early payment. How much money do you have to pay if you make an early payment?
*Task
*Create a variable named fee and assign 1536 to it.
*Create another variable discount_percent and assign 10 to it.
*Compute discount and assign it to the discount variable.
*Compute and print the fee you have to pay by subtracting discount from fee.
"""
fee = 1536
discount_percent = 10
discount = fee - (fee * 0.1)
fee = discount
print(fee) # Output: 1382.4
| 22.888889 | 125 | 0.744337 | 105 | 618 | 4.361905 | 0.495238 | 0.032751 | 0.039301 | 0.052402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050403 | 0.197411 | 618 | 26 | 126 | 23.769231 | 0.872984 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e864ccf01f6e54ee7967475c32a472181419c97 | 1,910 | py | Python | package/tests/base/test_grid.py | mondas-mania/cipher-py | e1dd287311ab487fd54a8becee444b3d7561b63c | [
"MIT"
] | null | null | null | package/tests/base/test_grid.py | mondas-mania/cipher-py | e1dd287311ab487fd54a8becee444b3d7561b63c | [
"MIT"
] | null | null | null | package/tests/base/test_grid.py | mondas-mania/cipher-py | e1dd287311ab487fd54a8becee444b3d7561b63c | [
"MIT"
] | null | null | null | from cipherpy.base import create_grid, playfair_digram_encode
import numpy as np
import pytest
alphabet = "abcdefghiklmnopqrstuvwxyz" # j has been removed
inv_alph = "zyxwvutsrqponmlkihgfedcba" # j has been removed
grid = np.array([
["a","b","c","d","e"],
["f","g","h","i","k"],
["l","m","n","o","p"],
["q","r","s","t","u"],
["v","w","x","y","z"]
])
inv_grid = np.array([
["z","y","x","w","v"],
["u","t","s","r","q"],
["p","o","n","m","l"],
["k","i","h","g","f"],
["e","d","c","b","a"]
])
def test_create_grid():
assert (create_grid() == grid).all()
assert create_grid().shape == (5,5)
assert (create_grid(inv_alph) == inv_grid).all()
with pytest.raises(Exception) as non_sqr_length:
create_grid("abc")
assert "is not a square" in str(non_sqr_length.value)
with pytest.raises(Exception) as dup_chr_alph:
create_grid("acab")
assert "Duplicate" in str(dup_chr_alph.value)
def test_playfair_digram_encode():
assert playfair_digram_encode("an", grid) == "cl"
assert playfair_digram_encode("al", grid) == "fq"
assert playfair_digram_encode("ac", grid) == "bd"
assert playfair_digram_encode("aa", grid) == "gg"
assert playfair_digram_encode("zz", grid) == "aa"
assert playfair_digram_encode("zz", inv_grid) == "tt"
with pytest.raises(Exception) as not_in_grid:
playfair_digram_encode("jj", grid)
assert "not found in the given grid" in str(not_in_grid.value)
with pytest.raises(Exception) as malformed_grid:
playfair_digram_encode("aa", np.array(["a", "b"]))
assert "not of equal dimensions" in str(malformed_grid.value)
with pytest.raises(Exception) as non_digram:
playfair_digram_encode("abc", grid)
assert "not two characters long" in str(non_digram.value)
# Test characters not in grid
# Test malformed grid
# Test input of length != 2 | 36.730769 | 66 | 0.630366 | 283 | 1,910 | 4.070671 | 0.332155 | 0.133681 | 0.190972 | 0.135417 | 0.190972 | 0.118924 | 0.0625 | 0 | 0 | 0 | 0 | 0.001919 | 0.181675 | 1,910 | 52 | 67 | 36.730769 | 0.735125 | 0.058115 | 0 | 0.044444 | 0 | 0 | 0.132107 | 0.027871 | 0 | 0 | 0 | 0 | 0.311111 | 1 | 0.044444 | false | 0 | 0.066667 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e8ac64fbe70fe326a2767fef3d9300d1b292115 | 2,049 | py | Python | class_4/dungeon.py | goosemanjack/python_intro_class | ca4b2390f2b0bb037d94ed4516d98848b7cf97d6 | [
"MIT"
] | null | null | null | class_4/dungeon.py | goosemanjack/python_intro_class | ca4b2390f2b0bb037d94ed4516d98848b7cf97d6 | [
"MIT"
] | null | null | null | class_4/dungeon.py | goosemanjack/python_intro_class | ca4b2390f2b0bb037d94ed4516d98848b7cf97d6 | [
"MIT"
] | null | null | null | # Dungeon Crawler
import asciiart
import level1
import level2
import time # timer
import hero
#def test():
# print(asciiart.baby_dragon())
# print(asciiart.big_skull())
# print(asciiart.dragon())
# print(asciiart.samurai())
# print(asciiart.skull_cross())
# print(asciiart.warrior())
# print(asciiart.chicken())
global my_hero = hero.Hero("Bob")
def start_dungeon():
global my_hero
result = level1.first_room()
if result == 1:
print(asciiart.chicken())
print("Have fun baking pies in safety, you baby.")
return
else:
print("You are a brave soul!")
print("")
time.sleep(0.75)
result = level1.second_room()
if(result == 1):
print(asciiart.skull_cross())
print("You are dead. It should have been obvious not to open the box.")
return
elif(result == 3):
return start_dungeon()
print("")
result = level1.next_level()
if(result == 1):
result = level1.second_room()
#...
return
elif(result == 2):
return run_second_room()
def run_second_room():
print("")
result = level2.nest()
if result == 1:
print(asciiart.baby_dragon())
time.sleep(0.75)
print("You have birthed a baby!")
return
elif result == 2:
print(asciiart.dragon())
print("Oh no! Momma dragon is here and she isn't happy!")
time.sleep(1.25)
print(asciiart.big_skull())
print("You are dead")
return
print("")
result = level2.dragon_lair()
if result == 1:
print(asciiart.samurai())
time.sleep(1.1)
print(asciiart.dragon())
time.sleep(1.25)
print("....")
time.sleep(0.25)
print("You are VICTORIOUS!!!")
return
else:
print(asciiart.dragon())
print("The dragon awakens! Prepare for battle.")
time.sleep(0.75)
print(asciiart.big_skull())
print("You are dead")
start_dungeon()
| 19.893204 | 79 | 0.570522 | 249 | 2,049 | 4.610442 | 0.325301 | 0.181185 | 0.039199 | 0.04878 | 0.256969 | 0.108014 | 0.062718 | 0.062718 | 0 | 0 | 0 | 0.024931 | 0.295266 | 2,049 | 102 | 80 | 20.088235 | 0.770083 | 0.120547 | 0 | 0.484375 | 0 | 0 | 0.160604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.078125 | null | null | 0.359375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e8c140172a68c8d661d38b28158caaeea26abc7 | 804 | py | Python | getTotalCount.py | bdo311/chirpseq-analysis | 64a5cdbb1fbac1ef8c5cca844ea743b80641287c | [
"Apache-2.0"
] | 3 | 2016-09-19T21:30:36.000Z | 2019-03-14T08:25:32.000Z | getTotalCount.py | bdo311/chirpseq-analysis | 64a5cdbb1fbac1ef8c5cca844ea743b80641287c | [
"Apache-2.0"
] | 2 | 2018-03-11T02:21:29.000Z | 2021-03-03T11:18:30.000Z | getTotalCount.py | bdo311/chirpseq-analysis | 64a5cdbb1fbac1ef8c5cca844ea743b80641287c | [
"Apache-2.0"
] | 3 | 2016-10-28T09:15:16.000Z | 2019-06-16T04:38:31.000Z | # getTotalCount.py
# 3/1/14
# Gets total count for all ChIP-seq reads
import csv, sys, fileinput
csv.register_dialect("textdialect", delimiter='\t')
if len(sys.argv) > 1:
fn = sys.argv[1]
ifile = open(fn, 'r')
reader = csv.reader(ifile, 'textdialect')
total = 0
counter = 0
for row in reader:
#if 'chr' not in row[0]: continue
#if counter % 100000 == 0: print counter
total = total + (int(row[2]) - int(row[1])) * float(row[3])
#print int(row[2])-int(row[1]),float(row[3])
counter += 1
print fn, 'combined', total
else:
total = 0
counter = 0
for line in fileinput.input():
row = line.split()
total = total + (int(row[2]) - int(row[1])) * float(row[3])
#print int(row[2])-int(row[1]),float(row[3])
counter += 1
print 'combined', total
| 23.647059 | 62 | 0.603234 | 128 | 804 | 3.78125 | 0.367188 | 0.099174 | 0.057851 | 0.082645 | 0.376033 | 0.305785 | 0.305785 | 0.305785 | 0.305785 | 0.305785 | 0 | 0.050955 | 0.218905 | 804 | 34 | 63 | 23.647059 | 0.719745 | 0.273632 | 0 | 0.4 | 0 | 0 | 0.075368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.05 | null | null | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5e8ecd87d9b6bf3e0c792cf33dd8743654d8794d | 970 | py | Python | binanceapi/constant.py | ramoslin02/binanceapi | 0ae32229da4fb3dcbd4f2276f442d26c83ad3a39 | [
"MIT"
] | 4 | 2021-02-12T23:19:50.000Z | 2021-12-21T14:45:55.000Z | binanceapi/constant.py | ramoslin02/binanceapi | 0ae32229da4fb3dcbd4f2276f442d26c83ad3a39 | [
"MIT"
] | null | null | null | binanceapi/constant.py | ramoslin02/binanceapi | 0ae32229da4fb3dcbd4f2276f442d26c83ad3a39 | [
"MIT"
] | 7 | 2021-02-23T01:02:23.000Z | 2022-03-08T12:56:59.000Z | from enum import Enum
class OrderStatus(object):
"""
Order Status
"""
NEW = "NEW"
PARTIALLY_FILLED = "PARTIALLY_FILLED"
FILLED = "FILLED"
CANCELED = "CANCELED"
PENDING_CANCEL = "PENDING_CANCEL"
REJECTED = "REJECTED"
EXPIRED = "EXPIRED"
class OrderType(Enum):
"""
Order type
"""
LIMIT = "LIMIT"
MARKET = "MARKET"
STOP = "STOP"
class RequestMethod(Enum):
"""
Request methods
"""
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
DELETE = 'DELETE'
class Interval(Enum):
"""
Interval for klines
"""
MINUTE_1 = '1m'
MINUTE_3 = '3m'
MINUTE_5 = '5m'
MINUTE_15 = '15m'
MINUTE_30 = '30m'
HOUR_1 = '1h'
HOUR_2 = '2h'
HOUR_4 = '4h'
HOUR_6 = '6h'
HOUR_8 = '8h'
HOUR_12 = '12h'
DAY_1 = '1d'
DAY_3 = '3d'
WEEK_1 = '1w'
MONTH_1 = '1M'
class OrderSide(Enum):
"""
order side
"""
BUY = "BUY"
SELL = "SELL"
| 15.396825 | 41 | 0.525773 | 111 | 970 | 4.423423 | 0.576577 | 0.0611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 0.331959 | 970 | 62 | 42 | 15.645161 | 0.70216 | 0.072165 | 0 | 0 | 0 | 0 | 0.161604 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027027 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5ea112ef558fca9073cce1f7c0ebf8ece1f28a9a | 2,289 | py | Python | Module_06/tests/sauce_lab/test_checkout_details.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_06/tests/sauce_lab/test_checkout_details.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | Module_06/tests/sauce_lab/test_checkout_details.py | JoseGtz/2021_python_selenium | c7b39479c78839ba2e2e2633a0f673a8b02fb4cb | [
"Unlicense"
] | null | null | null | """Test cases for inventory item"""
import pytest
from Module_06.src.elements.inventory_item import InventoryItem
from Module_06.src.pages.login import LoginPage
from Module_06.tests.common.test_base import TestBase
from Module_06.src.pages.checkout_details import CheckoutDetailsPage
from Module_06.src.pages.checkout_information import CheckoutInformationPage
_DEF_USER = 'standard_user'
_DEF_PASSWORD = 'secret_sauce'
class TestCheckoutDetails(TestBase):
@pytest.mark.sanity
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_details(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.continue_shopping()
inventory.products.reload()
print(f'Total elements in cart: {inventory.header.get_total_cart_items()}')
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_information(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.checkout_btn()
checkout_page = CheckoutInformationPage(self.driver, 5)
checkout_page.cancel_checkout()
print("Checkout Canceled")
@pytest.mark.regression
@pytest.mark.checkout_details
def test_checkout_remove(self):
"""Test inventory prices"""
login = LoginPage(self.driver)
login.open()
inventory = login.login(_DEF_USER, _DEF_PASSWORD)
first_item = inventory.products[0]
first_item: InventoryItem
first_item.add_to_cart()
inventory.header.goto_cart()
checkout_item = CheckoutDetailsPage(self.driver, 5)
checkout_item.remove_item_checkout()
print("Checkout Canceled")
| 36.333333 | 83 | 0.706859 | 262 | 2,289 | 5.908397 | 0.240458 | 0.052326 | 0.03876 | 0.03876 | 0.624677 | 0.611757 | 0.575581 | 0.575581 | 0.575581 | 0.575581 | 0 | 0.009325 | 0.203582 | 2,289 | 62 | 84 | 36.919355 | 0.839824 | 0.041503 | 0 | 0.615385 | 0 | 0 | 0.057064 | 0.018868 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0.076923 | 0.115385 | 0 | 0.192308 | 0.057692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5ea28c59393a6e808bc7631cfe435d3d54ff90cc | 3,600 | py | Python | face.py/DetectFace.py | DNSKT/python | 081e439e9e9f2e9b58ace5ff0cba93b8c46d5e36 | [
"MIT"
] | 4 | 2021-10-02T02:14:06.000Z | 2022-01-13T01:54:16.000Z | face.py/DetectFace.py | DNSKT/python | 081e439e9e9f2e9b58ace5ff0cba93b8c46d5e36 | [
"MIT"
] | null | null | null | face.py/DetectFace.py | DNSKT/python | 081e439e9e9f2e9b58ace5ff0cba93b8c46d5e36 | [
"MIT"
] | null | null | null | import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
# To install this module, run:
# python -m pip install Pillow
from PIL import Image, ImageDraw
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person
# This key will serve all examples in this document.
KEY = "650def957dcc45b080ffde1f72b8bac3"
# This endpoint will be used in all examples in this quickstart.
ENDPOINT = "https://facediscord.cognitiveservices.azure.com/"
# Create an authenticated FaceClient.
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
# Detect a face in an image that contains a single face
single_face_image_url = 'https://media.discordapp.net/attachments/912550999003127898/926672974092861470/IMG_20211231_221649_613.jpg'
single_image_name = os.path.basename(single_face_image_url)
# We use detection model 3 to get better performance.
detected_faces = face_client.face.detect_with_url(url=single_face_image_url, detection_model='detection_03')
if not detected_faces:
raise Exception('No face detected from image {}'.format(single_image_name))
# Display the detected face ID in the first single-face image.
# Face IDs are used for comparison to faces (their IDs) detected in other images.
print('Detected face ID from', single_image_name, ':')
for face in detected_faces: print (face.face_id)
print()
# Save this ID for use in Find Similar
first_image_face_ID = detected_faces[0].face_id
# Detect the faces in an image that contains multiple faces
# Each detected face gets assigned a new ID
multi_face_image_url = " https://cdn.discordapp.com/attachments/766489274471940106/792984874545709126/IMG_20201228_011741.jpg"
multi_image_name = os.path.basename(multi_face_image_url)
# We use detection model 3 to get better performance.
detected_faces2 = face_client.face.detect_with_url(url=multi_face_image_url, detection_model='detection_03')
# Search through faces detected in group image for the single face from first image.
# First, create a list of the face IDs found in the second image.
second_image_face_IDs = list(map(lambda x: x.face_id, detected_faces2))
# Next, find similar face IDs like the one detected in the first image.
similar_faces = face_client.face.find_similar(face_id=first_image_face_ID, face_ids=second_image_face_IDs)
if not similar_faces:
print('No similar faces found in', multi_image_name, '.')
# Print the details of the similar faces detected
else:
print('Similar faces found in', multi_image_name + ':')
for face in similar_faces:
first_image_face_ID = face.face_id
# The similar face IDs of the single face image and the group image do not need to match,
# they are only used for identification purposes in each image.
# The similar faces are matched using the Cognitive Services algorithm in find_similar().
face_info = next(x for x in detected_faces2 if x.face_id == first_image_face_ID)
if face_info:
print(' Face ID: ', first_image_face_ID)
print(' Face rectangle:')
print(' Left: ', str(face_info.face_rectangle.left))
print(' Top: ', str(face_info.face_rectangle.top))
print(' Width: ', str(face_info.face_rectangle.width))
print(' Height: ', str(face_info.face_rectangle.height)) | 48.648649 | 133 | 0.755833 | 523 | 3,600 | 5.017208 | 0.300191 | 0.032012 | 0.027439 | 0.030488 | 0.265625 | 0.148628 | 0.123476 | 0.047256 | 0.047256 | 0.047256 | 0 | 0.043347 | 0.173333 | 3,600 | 74 | 134 | 48.648649 | 0.838374 | 0.318611 | 0 | 0 | 0 | 0 | 0.204237 | 0.013559 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.311111 | 0 | 0.311111 | 0.244444 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5ea2f5c49120331dea159521e5e8c35ca8b6be45 | 2,741 | py | Python | examples/tesselation.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 916 | 2019-03-11T19:15:20.000Z | 2022-03-31T19:22:16.000Z | examples/tesselation.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 218 | 2019-03-11T06:05:52.000Z | 2022-03-30T16:59:22.000Z | examples/tesselation.py | 2dx/moderngl | 5f932560a535469626d79d22e4205f400e18f328 | [
"MIT"
] | 110 | 2019-04-06T18:32:24.000Z | 2022-03-21T20:30:47.000Z | #!/usr/bin/env python3
'''Simple example of using tesselation to render a cubic Bézier curve'''
import numpy as np
import moderngl
from ported._example import Example
class Tessellation(Example):
title = "Tessellation"
gl_version = (4, 0)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.prog = self.ctx.program(
vertex_shader='''
#version 400 core
in vec2 in_pos;
void main() { gl_Position = vec4(in_pos, 0.0, 1.0); }
''',
tess_control_shader='''
#version 400 core
layout(vertices = 4) out;
void main() {
// set tesselation levels, TODO compute dynamically
gl_TessLevelOuter[0] = 1;
gl_TessLevelOuter[1] = 32;
// pass through vertex positions
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
''',
tess_evaluation_shader='''
#version 400 core
layout(isolines, fractional_even_spacing, ccw) in;
// compute a point on a bezier curve with the points p0, p1, p2, p3
// the parameter u is in [0, 1] and determines the position on the curve
vec3 bezier(float u, vec3 p0, vec3 p1, vec3 p2, vec3 p3) {
float B0 = (1.0 - u) * (1.0 - u) * (1.0 - u);
float B1 = 3.0 * (1.0 - u) * (1.0 - u) * u;
float B2 = 3.0 * (1.0 - u) * u * u;
float B3 = u * u * u;
return B0 * p0 + B1 * p1 + B2 * p2 + B3 * p3;
}
void main() {
float u = gl_TessCoord.x;
vec3 p0 = vec3(gl_in[0].gl_Position);
vec3 p1 = vec3(gl_in[1].gl_Position);
vec3 p2 = vec3(gl_in[2].gl_Position);
vec3 p3 = vec3(gl_in[3].gl_Position);
gl_Position = vec4(bezier(u, p0, p1, p2, p3), 1.0);
}
''',
fragment_shader='''
#version 400 core
out vec4 frag_color;
void main() { frag_color = vec4(1.0); }
'''
)
# four vertices define a cubic Bézier curve; has to match the shaders
self.ctx.patch_vertices = 4
self.ctx.line_width = 5.0
vertices = np.array([
[-1.0, 0.0],
[-0.5, 1.0],
[0.5, -1.0],
[1.0, 0.0],
])
vbo = self.ctx.buffer(vertices.astype('f4'))
self.vao = self.ctx.simple_vertex_array(self.prog, vbo, 'in_pos')
def render(self, time, frame_time):
self.ctx.clear(0.2, 0.4, 0.7)
self.vao.render(mode=moderngl.PATCHES)
if __name__ == '__main__':
Tessellation.run()
| 28.552083 | 85 | 0.511127 | 360 | 2,741 | 3.738889 | 0.344444 | 0.019316 | 0.013373 | 0.059435 | 0.061664 | 0.011144 | 0 | 0 | 0 | 0 | 0 | 0.070609 | 0.364466 | 2,741 | 95 | 86 | 28.852632 | 0.702067 | 0.056914 | 0 | 0.134328 | 0 | 0.029851 | 0.640419 | 0.078355 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0.014925 | 0.044776 | 0 | 0.134328 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ea4c56b19379cf79c7f01fa78cbab208ced0a25 | 326 | py | Python | setup.py | rahul0705/smv | 01a4545be55f9355c8cc1d28918a8ab24d4a02e1 | [
"MIT"
] | null | null | null | setup.py | rahul0705/smv | 01a4545be55f9355c8cc1d28918a8ab24d4a02e1 | [
"MIT"
] | null | null | null | setup.py | rahul0705/smv | 01a4545be55f9355c8cc1d28918a8ab24d4a02e1 | [
"MIT"
] | null | null | null | """
author: Rahul Mohandas
"""
import setuptools
setuptools.setup(
name="smv",
version="0.1",
packages=setuptools.find_packages(exclude=["tests"]),
author="Rahul Mohandas",
author_email="rahul@rahulmohandas.com",
description="It's like scp but for moving",
license="MIT",
test_suite="tests"
)
| 20.375 | 57 | 0.671779 | 39 | 326 | 5.538462 | 0.769231 | 0.101852 | 0.175926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.171779 | 326 | 15 | 58 | 21.733333 | 0.792593 | 0.067485 | 0 | 0 | 0 | 0 | 0.283784 | 0.077703 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5eab19f2aca9d090175ce4ce170870b18995458b | 623 | py | Python | functions/solution/functions_numstr_human.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | 1 | 2019-01-02T15:04:08.000Z | 2019-01-02T15:04:08.000Z | functions/solution/functions_numstr_human.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | functions/solution/functions_numstr_human.py | giserh/book-python | ebd4e70cea1dd56986aa8efbae3629ba3f1ba087 | [
"MIT"
] | null | null | null | from typing import Union
NUMBER_DICT = {
'0': 'zero',
'1': 'one',
'2': 'two',
'3': 'three',
'4': 'four',
'5': 'five',
'6': 'six',
'7': 'seven',
'8': 'eight',
'9': 'nine',
'.': 'and',
}
def number_to_str(number: Union[int, float]) -> str:
"""
>>> number_to_str(1969)
'one thousand nine hundred sixty nine'
>>> number_to_str(31337)
'thirty one thousand three hundred thirty seven'
>>> number_to_str(13.37)
'thirteen and thirty seven hundredths'
>>> number_to_str(31.337)
'thirty one three hundreds thirty seven thousands'
"""
pass
| 18.878788 | 54 | 0.548957 | 80 | 623 | 4.1375 | 0.5875 | 0.120846 | 0.166163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061269 | 0.266453 | 623 | 32 | 55 | 19.46875 | 0.66302 | 0.449438 | 0 | 0 | 0 | 0 | 0.180602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.0625 | 0.0625 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
5eacda4c41904fde53e82605bee0dcf17f4ef068 | 5,343 | py | Python | cJobObject.py | SkyLined/mWindowsAPI | d64d57bbf87d2a7b33cf7de89263553793484a84 | [
"CC-BY-4.0"
] | 7 | 2017-10-09T14:32:22.000Z | 2021-01-30T07:25:50.000Z | cJobObject.py | SkyLined/mWindowsAPI | d64d57bbf87d2a7b33cf7de89263553793484a84 | [
"CC-BY-4.0"
] | 2 | 2017-12-12T02:53:18.000Z | 2019-02-19T09:23:18.000Z | cJobObject.py | SkyLined/mWindowsAPI | d64d57bbf87d2a7b33cf7de89263553793484a84 | [
"CC-BY-4.0"
] | 1 | 2017-12-12T02:42:18.000Z | 2017-12-12T02:42:18.000Z | from mWindowsSDK import *;
from .fbIsValidHandle import fbIsValidHandle;
from .fbLastErrorIs import fbLastErrorIs;
from .fohOpenForProcessIdAndDesiredAccess import fohOpenForProcessIdAndDesiredAccess;
from .fsGetPythonISA import fsGetPythonISA;
from .fThrowLastError import fThrowLastError;
from .oSystemInfo import oSystemInfo;
JOBOBJECT_EXTENDED_LIMIT_INFORMATION = {
"x86": JOBOBJECT_EXTENDED_LIMIT_INFORMATION32,
"x64": JOBOBJECT_EXTENDED_LIMIT_INFORMATION64,
}[fsGetPythonISA()];
class cJobObject(object):
def __init__(oSelf, *auProcessIds):
oKernel32 = foLoadKernel32DLL();
oSelf.__ohJob = oKernel32.CreateJobObjectW(NULL, NULL);
if not fbIsValidHandle(oSelf.__ohJob):
fThrowLastError("CreateJobObject(NULL, NULL)");
for uProcessId in auProcessIds:
assert oSelf.fbAddProcessForId(uProcessId, bThrowAllErrors = True), \
"Yeah, well, you know, that's just like ehh.. your opinion, man.";
def fbAddProcessForId(oSelf, uProcessId, bThrowAllErrors = False):
ohProcess = fohOpenForProcessIdAndDesiredAccess(uProcessId, PROCESS_SET_QUOTA | PROCESS_TERMINATE);
oKernel32 = foLoadKernel32DLL();
try:
if oKernel32.AssignProcessToJobObject(oSelf.__ohJob, ohProcess):
return True;
if bThrowAllErrors or not fbLastErrorIs(ERROR_ACCESS_DENIED):
fThrowLastError("AssignProcessToJobObject(%s, %s)" % (repr(oSelf.__ohJob), repr(ohProcess)));
finally:
if not oKernel32.CloseHandle(ohProcess):
fThrowLastError("CloseHandle(%s)" % (repr(ohProcess),));
# We cannot add the process to the job, but maybe it is already added?
ohProcess = fohOpenForProcessIdAndDesiredAccess(uProcessId, PROCESS_QUERY_LIMITED_INFORMATION);
try:
obProcessInJob = BOOLEAN();
if not oKernel32.IsProcessInJob(ohProcess, oSelf.__ohJob, obProcessInJob.foCreatePointer()):
fThrowLastError("IsProcessInJob(0x%X, ..., ...)" % (ohProcess,));
return obProcessInJob != 0;
finally:
if not oKernel32.CloseHandle(ohProcess):
fThrowLastError("CloseHandle(0x%X)" % (ohProcess,));
def __foQueryExtendedLimitInformation(oSelf):
oExtendedLimitInformation = JOBOBJECT_EXTENDED_LIMIT_INFORMATION();
odwReturnLength = DWORD();
oKernel32 = foLoadKernel32DLL();
if not oKernel32.QueryInformationJobObject(
oSelf.__ohJob, # hJob
JobObjectExtendedLimitInformation, # JobObjectInfoClass
LPVOID(oExtendedLimitInformation, bCast = True), # lpJobObjectInfo
oExtendedLimitInformation.fuGetSize(), # cbJobObjectInfoLength,
odwReturnLength.foCreatePointer(), # lpReturnLength
):
fThrowLastError("QueryInformationJobObject(hJob=%s, JobObjectInfoClass=0x%X, lpJobObjectInfo=0x%X, cbJobObjectInfoLength=0x%X, lpReturnLength=0x%X)" % (
repr(oSelf.__ohJob),
JobObjectExtendedLimitInformation,
oExtendedLimitInformation.fuGetAddress(),
oExtendedLimitInformation.fuGetSize(),
odwReturnLength.fuGetAddress()
));
assert odwReturnLength == oExtendedLimitInformation.fuGetSize(), \
"QueryInformationJobObject(hJob=%s, JobObjectInfoClass=0x%X, lpJobObjectInfo=0x%X, cbJobObjectInfoLength=0x%X, lpReturnLength=0x%X) => wrote 0x%X bytes" % (
repr(oSelf.__ohJob),
JobObjectExtendedLimitInformation,
oExtendedLimitInformation.fuGetAddress(),
oExtendedLimitInformation.fuGetSize(),
odwReturnLength.fuGetAddress(),
odwReturnLength.fuGetValue()
);
return oExtendedLimitInformation;
def __fSetExtendedLimitInformation(oSelf, oExtendedLimitInformation):
oKernel32 = foLoadKernel32DLL();
if not oKernel32.SetInformationJobObject(
oSelf.__ohJob, # hJob
JobObjectExtendedLimitInformation, # JobObjectInfoClass
LPVOID(oExtendedLimitInformation, bCast = True), # lpJobObjectInfo
oExtendedLimitInformation.fuGetSize(), # cbJobObjectInfoLength,
):
fThrowLastError("SetInformationJobObject(hJob=0x%X, JobObjectInfoClass=0x%X, lpJobObjectInfo=0x%X, cbJobObjectInfoLength=0x%X)" % \
(oSelf.__ohJob, JobObjectExtendedLimitInformation, oExtendedLimitInformation.fuGetAddress(),
oExtendedLimitInformation.fuGetSize()));
def fSetMaxProcessMemoryUse(oSelf, uMemoryUseInBytes):
oExtendedLimitInformation = oSelf.__foQueryExtendedLimitInformation();
oExtendedLimitInformation.ProcessMemoryLimit = int(uMemoryUseInBytes);
oExtendedLimitInformation.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_PROCESS_MEMORY;
oSelf.__fSetExtendedLimitInformation(oExtendedLimitInformation);
def fSetMaxTotalMemoryUse(oSelf, uMemoryUseInBytes):
oExtendedLimitInformation = oSelf.__foQueryExtendedLimitInformation();
oExtendedLimitInformation.JobMemoryLimit = int(uMemoryUseInBytes);
oExtendedLimitInformation.BasicLimitInformation.LimitFlags |= JOB_OBJECT_LIMIT_JOB_MEMORY;
oSelf.__fSetExtendedLimitInformation(oExtendedLimitInformation);
def fuGetMaxProcessMemoryUse(oSelf):
oExtendedLimitInformation = oSelf.__foQueryExtendedLimitInformation();
return int(oExtendedLimitInformation.PeakProcessMemoryUsed);
def fuGetMaxTotalMemoryUse(oSelf):
oExtendedLimitInformation = oSelf.__foQueryExtendedLimitInformation();
return int(oExtendedLimitInformation.PeakJobMemoryUsed);
| 49.934579 | 164 | 0.763242 | 396 | 5,343 | 10.126263 | 0.305556 | 0.011222 | 0.017456 | 0.026933 | 0.494015 | 0.440648 | 0.386783 | 0.308479 | 0.230175 | 0.214464 | 0 | 0.011866 | 0.148231 | 5,343 | 106 | 165 | 50.40566 | 0.86926 | 0.039117 | 0 | 0.357895 | 0 | 0.031579 | 0.113042 | 0.071652 | 0 | 0 | 0 | 0 | 0.021053 | 1 | 0.084211 | false | 0 | 0.073684 | 0 | 0.221053 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5eafe280d36c9d0913ae120c305742245a644cb5 | 5,036 | py | Python | tests/timelog.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | 2 | 2015-04-03T14:15:48.000Z | 2019-08-06T07:09:58.000Z | tests/timelog.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | null | null | null | tests/timelog.py | cmanley/viewvc | 18ce398586ff99ee13ac64f85c205efdf9c23bad | [
"BSD-2-Clause"
] | 1 | 2022-01-11T13:02:46.000Z | 2022-01-11T13:02:46.000Z |
import time
import profile
from vclib.ccvs import rcsparse
import viewvc
try:
import tparse
except ImportError:
tparse = None
def lines_changed(delta):
idx = 0
added = deleted = 0
while idx < len(delta):
op = delta[idx]
i = delta.find(' ', idx + 1)
j = delta.find('\n', i + 1)
line = int(delta[idx+1:i])
count = int(delta[i+1:j])
idx = j + 1
if op == 'd':
deleted = deleted + count
else: # 'a' for adding text
added = added + count
# skip new text
while count > 0:
nl = delta.find('\n', idx)
assert nl > 0, 'missing a newline in the delta in the RCS file'
idx = nl + 1
count = count - 1
return added, deleted
class FetchSink(rcsparse.Sink):
def __init__(self, which_rev=None):
self.head = self.branch = ''
self.tags = { }
self.meta = { }
self.revs = [ ]
self.base = { }
self.entries = { }
self.which = which_rev
def set_head_revision(self, revision):
self.head = revision
def set_principal_branch(self, branch_name):
self.branch = branch_name
def define_tag(self, name, revision):
self.tags[name] = revision
def define_revision(self, revision, timestamp, author, state,
branches, next):
self.meta[revision] = (timestamp, author, state)
self.base[next] = revision
for b in branches:
self.base[b] = revision
def set_revision_info(self, revision, log, text):
timestamp, author, state = self.meta[revision]
entry = viewvc.LogEntry(revision, int(timestamp) - time.timezone, author,
state, None, log)
# .revs is "order seen" and .entries is for random access
self.revs.append(entry)
self.entries[revision] = entry
if revision != self.head:
added, deleted = lines_changed(text)
if revision.count('.') == 1:
# on the trunk. reverse delta.
changed = '+%d -%d' % (deleted, added)
self.entries[self.base[revision]].changed = changed
else:
# on a branch. forward delta.
changed = '+%d -%d' % (added, deleted)
self.entries[revision].changed = changed
def parse_completed(self):
if self.which:
self.revs = [ self.entries[self.which] ]
def fetch_log2(full_name, which_rev=None):
sink = FetchSink(which_rev)
rcsparse.parse(open(full_name, 'rb'), sink)
return sink.head, sink.branch, sink.tags, sink.revs
def fetch_log3(full_name, which_rev=None):
sink = FetchSink(which_rev)
tparse.parse(full_name, sink)
return sink.head, sink.branch, sink.tags, sink.revs
def compare_data(d1, d2):
if d1[:3] != d2[:3]:
print 'd1:', d1[:3]
print 'd2:', d2[:3]
return
if len(d1[3]) != len(d2[3]):
print 'len(d1[3])=%d len(d2[3])=%d' % (len(d1[3]), len(d2[3]))
return
def sort_func(e, f):
return cmp(e.rev, f.rev)
d1[3].sort(sort_func)
d2[3].sort(sort_func)
import pprint
for i in range(len(d1[3])):
if vars(d1[3][i]) != vars(d2[3][i]):
pprint.pprint((i, vars(d1[3][i]), vars(d2[3][i])))
def compare_fetch(full_name, which_rev=None):
# d1 and d2 are:
# ( HEAD revision, branch name, TAGS { name : revision }, [ LogEntry ] )
d1 = viewvc.fetch_log(full_name, which_rev)
d2 = fetch_log2(full_name, which_rev)
print 'comparing external tools vs a parser module:'
compare_data(d1, d2)
if tparse:
d2 = fetch_log3(full_name, which_rev)
print 'comparing external tools vs the tparse module:'
compare_data(d1, d2)
def compare_many(files):
for file in files:
print file, '...'
compare_fetch(file)
def time_stream(stream_class, filename, n=10):
d1 = d2 = d3 = d4 = 0
t = time.time()
for i in range(n):
ts = stream_class(open(filename, 'rb'))
while ts.get() is not None:
pass
t = time.time() - t
print t/n
def time_fetch(full_name, which_rev=None, n=1):
times1 = [ None ] * n
times2 = [ None ] * n
for i in range(n):
t = time.time()
viewvc.fetch_log(full_name, which_rev)
times1[i] = time.time() - t
for i in range(n):
t = time.time()
fetch_log2(full_name, which_rev)
times2[i] = time.time() - t
times1.sort()
times2.sort()
i1 = int(n*.05)
i2 = int(n*.95)+1
times1 = times1[i1:i2]
times2 = times2[i1:i2]
t1 = reduce(lambda x,y: x+y, times1, 0) / len(times1)
t2 = reduce(lambda x,y: x+y, times2, 0) / len(times2)
print "t1=%.4f (%.4f .. %.4f) t2=%.4f (%.4f .. %.4f)" % \
(t1, times1[0], times1[-1], t2, times2[0], times2[-1])
def profile_stream(stream_class, filename, n=20):
p = profile.Profile()
def many_calls(filename, n):
for i in xrange(n):
ts = stream_class(open(filename, 'rb'))
while ts.get() is not None:
pass
p.runcall(many_calls, filename, n)
p.print_stats()
def profile_fetch(full_name, which_rev=None, n=10):
p = profile.Profile()
def many_calls(full_name, which_rev, n):
for i in xrange(n):
fetch_log2(full_name, which_rev)
p.runcall(many_calls, full_name, which_rev, n)
p.print_stats()
| 27.977778 | 77 | 0.616958 | 768 | 5,036 | 3.941406 | 0.197917 | 0.044929 | 0.055831 | 0.068715 | 0.319789 | 0.278824 | 0.200198 | 0.146019 | 0.121573 | 0.064751 | 0 | 0.032208 | 0.235504 | 5,036 | 179 | 78 | 28.134078 | 0.754026 | 0.046465 | 0 | 0.201342 | 0 | 0 | 0.051742 | 0 | 0 | 0 | 0 | 0 | 0.006711 | 0 | null | null | 0.013423 | 0.04698 | null | null | 0.080537 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5eb3c974fa92731f0fdca10f3d676b0f6e0ffd2f | 5,664 | py | Python | users/models.py | pnwclw/cyfmazyr | dfeca513c7334335426d226ec3834af598b08b8c | [
"MIT"
] | 1 | 2020-07-18T11:20:29.000Z | 2020-07-18T11:20:29.000Z | users/models.py | panwaclaw/cyfmazyr | dfeca513c7334335426d226ec3834af598b08b8c | [
"MIT"
] | 8 | 2020-05-24T14:08:12.000Z | 2021-09-08T02:03:52.000Z | users/models.py | pnwclw/cyfmazyr | dfeca513c7334335426d226ec3834af598b08b8c | [
"MIT"
] | 1 | 2020-05-24T12:24:40.000Z | 2020-05-24T12:24:40.000Z | from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext as _
from simple_history.models import HistoricalRecords
from phonenumber_field.modelfields import PhoneNumberField
from internals.models import School, University
from .templatetags.users import device, location
SEX_CHOICES = [
('male', _('Male')),
('female', _('Female'))
]
class Parent(models.Model):
class Meta:
verbose_name = _('Parent')
verbose_name_plural = _('Parents')
sex = models.CharField(max_length=128, choices=SEX_CHOICES)
last_name = models.CharField(max_length=128, verbose_name=_('Last Name'))
first_name = models.CharField(max_length=128, verbose_name=_('First Name'))
middle_name = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Middle Name'))
job = models.CharField(max_length=128, verbose_name=_('Job'))
phone_number = PhoneNumberField(verbose_name=_('Parent Phone Number'))
def __str__(self):
return f"{self.get_full_name()}, {self.job}, {self.phone_number}"
def get_full_name(self):
return f"{self.last_name} {self.first_name} {self.middle_name}"
class User(AbstractUser):
class Meta:
verbose_name = _('User')
verbose_name_plural = _('Users')
GROUP_CHOICES = [
('junior', _('Junior')),
('middle', _('Middle')),
('senior', _('Senior')),
]
def get_photo_upload_path(instance, filename):
ext = filename.split('.')[-1]
dt = timezone.now()
filename = f"{dt.year}-{dt.month:02d}-{dt.day:02d}-{dt.hour:02d}-{dt.minute:02d}-{dt.second:02d}.{ext}"
return f"profiles/{instance.id}/{filename}"
sex = models.CharField(max_length=128, choices=SEX_CHOICES, verbose_name=_('Sex'))
middle_name = models.CharField(max_length=128, null=True, blank=True, verbose_name=_('Middle Name'))
phone_number = PhoneNumberField(verbose_name=_('Phone Number'))
group = models.CharField(default='junior', max_length=20, choices=GROUP_CHOICES, verbose_name=_('Group'))
birthday = models.DateField(default=timezone.now, verbose_name=_('Birthday'))
parents = models.ManyToManyField(Parent)
school = models.ForeignKey(School, null=True, on_delete=models.SET_NULL, verbose_name=_('School'))
photo = models.ImageField(upload_to=get_photo_upload_path, null=True, blank=True, verbose_name=_('Profile Photo'))
klass = models.PositiveIntegerField(null=True, blank=True,
choices=[(i, str(i)) for i in range(1, 12)], verbose_name=_('Class'))
symbol = models.CharField(max_length=1, null=True, blank=True, verbose_name=_('Class Symbol'))
history = HistoricalRecords()
def get_full_name(self):
return f"{self.last_name} {self.first_name} {self.middle_name}"
def __str__(self):
return f"{self.get_full_name()} (ID: {self.id}, {self.phone_number})"
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, parent_link=True, verbose_name=_('User'))
telegram_id = models.IntegerField()
def __str__(self):
return self.user.get_full_name()
class Student(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True, parent_link=True, verbose_name=_('User'))
university = models.ForeignKey(University, null=True, on_delete=models.SET_NULL, verbose_name=_('University'))
admission_year = models.PositiveIntegerField(verbose_name=_('Admission Year'))
class SessionManager(models.Manager):
use_in_migrations = True
def encode(self, session_dict):
"""
Returns the given session dictionary serialized and encoded as a string.
"""
return SessionStore().encode(session_dict)
def save(self, session_key, session_dict, expire_date):
s = self.model(session_key, self.encode(session_dict), expire_date)
if session_dict:
s.save()
else:
s.delete() # Clear sessions with no data.
return s
class Session(models.Model):
"""
Session objects containing user session information.
Django provides full support for anonymous sessions. The session
framework lets you store and retrieve arbitrary data on a
per-site-visitor basis. It stores data on the server side and
abstracts the sending and receiving of cookies. Cookies contain a
session ID -- not the data itself.
Additionally this session object providers the following properties:
``user``, ``user_agent`` and ``ip``.
"""
class Meta:
verbose_name = _('session')
verbose_name_plural = _('sessions')
session_key = models.CharField(_('session key'), max_length=40, primary_key=True)
session_data = models.TextField(_('session data'))
expire_date = models.DateTimeField(_('expiry date'), db_index=True)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
user_agent = models.CharField(null=True, blank=True, max_length=200)
last_activity = models.DateTimeField(auto_now=True)
ip = models.GenericIPAddressField(null=True, blank=True, verbose_name='IP')
objects = SessionManager()
def __str__(self):
return f"Session {self.session_key} (User: {self.user}, Location: {location(self.ip)}, Device: {device(self.user_agent)}, Expires: {self.expire_date})"
def get_decoded(self):
return SessionStore(None, None).decode(self.session_data)
# At bottom to avoid circular import
from .backends.db import SessionStore # noqa: E402 isort:skip
| 38.27027 | 159 | 0.69827 | 716 | 5,664 | 5.297486 | 0.27514 | 0.072502 | 0.037965 | 0.05062 | 0.271289 | 0.24097 | 0.218824 | 0.208806 | 0.18666 | 0.123912 | 0 | 0.009852 | 0.175671 | 5,664 | 147 | 160 | 38.530612 | 0.802527 | 0.106992 | 0 | 0.16129 | 0 | 0.021505 | 0.155943 | 0.038685 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107527 | false | 0 | 0.096774 | 0.075269 | 0.752688 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5eb834e28210f09f415768f60fec78d4bb5c4bb0 | 984 | py | Python | core/data.py | mohamadnosratian/Pricer | a19fddc526add55385abd9569c0d3bdbd0cb79c4 | [
"MIT"
] | null | null | null | core/data.py | mohamadnosratian/Pricer | a19fddc526add55385abd9569c0d3bdbd0cb79c4 | [
"MIT"
] | 1 | 2020-11-29T15:26:47.000Z | 2020-11-29T15:26:47.000Z | core/data.py | mohamadnosratian/Pricer | a19fddc526add55385abd9569c0d3bdbd0cb79c4 | [
"MIT"
] | null | null | null | import TmConv
import time
class Data():
def __init__(self, content):
self.content = content
self.corrent = '-'
self.object = {}
def update(self):
Cp, name = self.content.Update()
if Cp != self.corrent:
self.corrent = Cp
self.object = {
"name": name,
"corrent": self.corrent,
"date": self.time_date(),
}
def time_date(self):
[y, m, d] = TmConv.gregorian_to_jalali(
time.localtime().tm_year, time.localtime().tm_mon, time.localtime().tm_mday)
h = time.localtime().tm_hour
_m = time.localtime().tm_min
s = time.localtime().tm_sec
return [y, m, d, h, _m, s]
def rearange(self, f):
st = ''
for i in range(0, len(f), 3):
holder = f[i:i+3]
st += holder + "," if i < len(f) - 3 else holder
return st
def arange(self, f):
return int(f.replace(',', ''))
| 24.6 | 88 | 0.505081 | 125 | 984 | 3.848 | 0.384 | 0.162162 | 0.18711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00624 | 0.348577 | 984 | 39 | 89 | 25.230769 | 0.74415 | 0 | 0 | 0 | 0 | 0 | 0.018293 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16129 | false | 0 | 0.064516 | 0.032258 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5eb8db949bc6779860bde61b3f9a676998243338 | 8,002 | py | Python | src/sage/categories/super_modules.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/categories/super_modules.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/categories/super_modules.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | r"""
Super modules
"""
#*****************************************************************************
# Copyright (C) 2015 Travis Scrimshaw <tscrim at ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.categories.category_types import Category_over_base_ring
from sage.categories.covariant_functorial_construction import CovariantConstructionCategory
# Note, a commutative algebra is not a commutative super algebra,
# therefore the following whitelist.
axiom_whitelist = frozenset(["Facade", "Finite", "Infinite",
"FiniteDimensional", "Connected", "WithBasis",
"FinitelyGeneratedAsLambdaBracketAlgebra",
# "Commutative", "Cocommutative",
"Supercommutative", "Supercocommutative",
"Associative", "Inverse", "Unital", "Division",
"AdditiveCommutative", "AdditiveAssociative",
"AdditiveInverse", "AdditiveUnital",
"NoZeroDivisors", "Distributive"])
class SuperModulesCategory(CovariantConstructionCategory, Category_over_base_ring):
@classmethod
def default_super_categories(cls, category, *args):
"""
Return the default super categories of `F_{Cat}(A,B,...)` for
`A,B,...` parents in `Cat`.
INPUT:
- ``cls`` -- the category class for the functor `F`
- ``category`` -- a category `Cat`
- ``*args`` -- further arguments for the functor
OUTPUT:
A join category.
This implements the property that subcategories constructed by
the set of whitelisted axioms is a subcategory.
EXAMPLES::
sage: HopfAlgebras(ZZ).WithBasis().FiniteDimensional().Super() # indirect doctest
Category of finite dimensional super hopf algebras with basis over Integer Ring
"""
axioms = axiom_whitelist.intersection(category.axioms())
C = super(SuperModulesCategory, cls).default_super_categories(category, *args)
return C._with_axioms(axioms)
def __init__(self, base_category):
"""
EXAMPLES::
sage: C = Algebras(QQ).Super()
sage: C
Category of super algebras over Rational Field
sage: C.base_category()
Category of algebras over Rational Field
sage: sorted(C.super_categories(), key=str)
[Category of graded algebras over Rational Field,
Category of super modules over Rational Field]
sage: AlgebrasWithBasis(QQ).Super().base_ring()
Rational Field
sage: HopfAlgebrasWithBasis(QQ).Super().base_ring()
Rational Field
"""
super(SuperModulesCategory, self).__init__(base_category, base_category.base_ring())
_functor_category = "Super"
def _repr_object_names(self):
"""
EXAMPLES::
sage: AlgebrasWithBasis(QQ).Super() # indirect doctest
Category of super algebras with basis over Rational Field
"""
return "super {}".format(self.base_category()._repr_object_names())
class SuperModules(SuperModulesCategory):
r"""
The category of super modules.
An `R`-*super module* (where `R` is a ring) is an `R`-module `M` equipped
with a decomposition `M = M_0 \oplus M_1` into two `R`-submodules
`M_0` and `M_1` (called the *even part* and the *odd part* of `M`,
respectively).
Thus, an `R`-super module automatically becomes a `\ZZ / 2 \ZZ`-graded
`R`-module, with `M_0` being the degree-`0` component and `M_1` being the
degree-`1` component.
EXAMPLES::
sage: Modules(ZZ).Super()
Category of super modules over Integer Ring
sage: Modules(ZZ).Super().super_categories()
[Category of graded modules over Integer Ring]
The category of super modules defines the super structure which
shall be preserved by morphisms::
sage: Modules(ZZ).Super().additional_structure()
Category of super modules over Integer Ring
TESTS::
sage: TestSuite(Modules(ZZ).Super()).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: Modules(ZZ).Super().super_categories()
[Category of graded modules over Integer Ring]
Nota bene::
sage: Modules(QQ).Super()
Category of super modules over Rational Field
sage: Modules(QQ).Super().super_categories()
[Category of graded modules over Rational Field]
"""
return [self.base_category().Graded()]
def extra_super_categories(self):
r"""
Adds :class:`VectorSpaces` to the super categories of ``self`` if
the base ring is a field.
EXAMPLES::
sage: Modules(QQ).Super().extra_super_categories()
[Category of vector spaces over Rational Field]
sage: Modules(ZZ).Super().extra_super_categories()
[]
This makes sure that ``Modules(QQ).Super()`` returns an
instance of :class:`SuperModules` and not a join category of
an instance of this class and of ``VectorSpaces(QQ)``::
sage: type(Modules(QQ).Super())
<class 'sage.categories.super_modules.SuperModules_with_category'>
.. TODO::
Get rid of this workaround once there is a more systematic
approach for the alias ``Modules(QQ)`` -> ``VectorSpaces(QQ)``.
Probably the latter should be a category with axiom, and
covariant constructions should play well with axioms.
"""
from sage.categories.modules import Modules
from sage.categories.fields import Fields
base_ring = self.base_ring()
if base_ring in Fields():
return [Modules(base_ring)]
else:
return []
class ParentMethods:
pass
class ElementMethods:
def is_even_odd(self):
"""
Return ``0`` if ``self`` is an even element or ``1``
if an odd element.
.. NOTE::
The default implementation assumes that the even/odd is
determined by the parity of :meth:`degree`.
Overwrite this method if the even/odd behavior is desired
to be independent.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_even_odd()
1
sage: C.basis()[2,2].is_even_odd()
0
"""
return self.degree() % 2
def is_even(self):
"""
Return if ``self`` is an even element.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_even()
False
sage: C.basis()[2,2].is_even()
True
"""
return self.is_even_odd() == 0
def is_odd(self):
"""
Return if ``self`` is an odd element.
EXAMPLES::
sage: cat = Algebras(QQ).WithBasis().Super()
sage: C = CombinatorialFreeModule(QQ, Partitions(), category=cat)
sage: C.degree_on_basis = sum
sage: C.basis()[2,2,1].is_odd()
True
sage: C.basis()[2,2].is_odd()
False
"""
return self.is_even_odd() == 1
| 35.40708 | 93 | 0.567733 | 852 | 8,002 | 5.226526 | 0.255869 | 0.035931 | 0.026948 | 0.029643 | 0.269481 | 0.208848 | 0.176286 | 0.151583 | 0.121716 | 0.121716 | 0 | 0.006373 | 0.313672 | 8,002 | 225 | 94 | 35.564444 | 0.804443 | 0.596726 | 0 | 0 | 0 | 0 | 0.122921 | 0.018022 | 0 | 0 | 0 | 0.004444 | 0 | 1 | 0.181818 | false | 0.022727 | 0.090909 | 0 | 0.568182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5eba75a29b860145b0023e5e394f13be95f75dca | 539 | py | Python | src/game_objects/projectiles/projectile.py | ozcer/Project-Ooze | 28eb84995f4fa283366e3f04edb7e393d5281ac5 | [
"MIT"
] | 1 | 2018-10-10T02:11:50.000Z | 2018-10-10T02:11:50.000Z | src/game_objects/projectiles/projectile.py | ozcer/Project-Ooze | 28eb84995f4fa283366e3f04edb7e393d5281ac5 | [
"MIT"
] | 29 | 2018-03-16T05:07:18.000Z | 2018-04-03T03:58:32.000Z | src/game_objects/projectiles/projectile.py | ozcer/FlaPy-Bird | 28eb84995f4fa283366e3f04edb7e393d5281ac5 | [
"MIT"
] | 1 | 2018-03-18T00:27:12.000Z | 2018-03-18T00:27:12.000Z | import pygame
from src.game_objects.dynamic import Dynamic
from src.game_objects.foes.foe import Foe
class Projectile(Dynamic):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def decayable(self):
"""
Overriding decayable in GameObject
:return: bool
"""
active_zone = self.game.surface.get_rect()
return not active_zone.colliderect(self.rect)
def draw(self):
super().draw()
def update(self):
super().update()
| 21.56 | 53 | 0.612245 | 62 | 539 | 5.112903 | 0.516129 | 0.044164 | 0.069401 | 0.113565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.272727 | 539 | 24 | 54 | 22.458333 | 0.808673 | 0.089054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.307692 | false | 0 | 0.230769 | 0 | 0.692308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ec47955f1ecf4b81bd290a76d77ba6d233be01c | 1,181 | py | Python | backend/keplerapi/api/migrations/0010_auto_20200627_2003.py | ADSPI/kepler | db45471e3428eea93579e48f130f255a1f5974c4 | [
"MIT"
] | 2 | 2020-02-16T15:23:21.000Z | 2020-03-07T12:39:56.000Z | backend/keplerapi/api/migrations/0010_auto_20200627_2003.py | creativepisystem/kepler | 307708666a1913fbb369a57b5fca04b20209929d | [
"MIT"
] | null | null | null | backend/keplerapi/api/migrations/0010_auto_20200627_2003.py | creativepisystem/kepler | 307708666a1913fbb369a57b5fca04b20209929d | [
"MIT"
] | 1 | 2020-05-13T00:07:18.000Z | 2020-05-13T00:07:18.000Z | # Generated by Django 3.0.3 on 2020-06-27 20:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0009_auto_20200625_2347'),
]
operations = [
migrations.AlterModelOptions(
name='hiredservice',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='person',
options={'ordering': ['-created_at']},
),
migrations.AlterModelOptions(
name='service',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='hiredservice',
name='accepted_at',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='hiredservice',
name='finished_at',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='interests',
name='interest',
field=models.CharField(choices=[('1', 'EXEMPLO1'), ('2', 'EXEMPLO2'), ('3', 'EXEMPLO3'), ('0', 'OTHER')], max_length=2),
),
]
| 28.804878 | 132 | 0.54022 | 101 | 1,181 | 6.19802 | 0.544554 | 0.129393 | 0.148562 | 0.115016 | 0.476038 | 0.4377 | 0.316294 | 0 | 0 | 0 | 0 | 0.04797 | 0.3116 | 1,181 | 40 | 133 | 29.525 | 0.722017 | 0.038103 | 0 | 0.529412 | 1 | 0 | 0.179894 | 0.020282 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.029412 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ec50ef5996e125d5eaf7ab56c50549bb75fe8e9 | 1,184 | py | Python | Scripts/ReverseSearch/social_media.py | balswyan/senior-capstone-fall-2018 | 8740614f0db917bfdc5131095fe566a92b806e73 | [
"MIT"
] | 1 | 2020-03-03T01:01:41.000Z | 2020-03-03T01:01:41.000Z | Scripts/ReverseSearch/social_media.py | balswyan/senior-capstone-fall-2018 | 8740614f0db917bfdc5131095fe566a92b806e73 | [
"MIT"
] | null | null | null | Scripts/ReverseSearch/social_media.py | balswyan/senior-capstone-fall-2018 | 8740614f0db917bfdc5131095fe566a92b806e73 | [
"MIT"
] | null | null | null | import urllib2
from cookielib import CookieJar
import os
import re
import time
import json
cookies = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies))
opener.addheaders = [('User-agent', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 '
'(KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17')]
with open("missing_people.json") as f:
people = json.load(f)
for person in people['results']:
facebook_profile = 'https://www.facebook.com/search/top/?q='+ person['firstname'] + '%20' + person['lastname']
facebook_post ='https://www.facebook.com/search/posts/?q='+ person['firstname'] + '%20' + person['lastname']
facebook_news = 'https://www.facebook.com/search/str/' + person['firstname'] + '%20' + person['lastname'] + '/links-keyword/stories-news-pivot'
instagram_tags = 'https://www.instagram.com/explore/tags/'+ person['firstname'] + person['lastname']
twitter_search = 'https://twitter.com/search?q='+ person['firstname'] + '%20' + person['lastname'] + '&src=typd'
twitter_hashtag = 'https://twitter.com/hashtag/' + person['firstname'] + person['lastname'] '?src=hash'
print(What ever you want)
| 47.36 | 145 | 0.686655 | 152 | 1,184 | 5.296053 | 0.5 | 0.111801 | 0.084472 | 0.114286 | 0.270807 | 0.13913 | 0.099379 | 0 | 0 | 0 | 0 | 0.035922 | 0.130068 | 1,184 | 24 | 146 | 49.333333 | 0.745631 | 0 | 0 | 0 | 0 | 0 | 0.435334 | 0.027895 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.3 | null | null | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ec5c3c5ad95f9da38c7ad657235db69b5b006c7 | 354 | py | Python | hackerrank/algorithms/general/kangaroo/kangaroo.py | nicklambourne/hackerrank-solutions | 9536aa16a67325566e6d3ebea5d5f2c5bf12a05d | [
"MIT"
] | null | null | null | hackerrank/algorithms/general/kangaroo/kangaroo.py | nicklambourne/hackerrank-solutions | 9536aa16a67325566e6d3ebea5d5f2c5bf12a05d | [
"MIT"
] | null | null | null | hackerrank/algorithms/general/kangaroo/kangaroo.py | nicklambourne/hackerrank-solutions | 9536aa16a67325566e6d3ebea5d5f2c5bf12a05d | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
def kangaroo(x1, v1, x2, v2):
if (x1 < x2 and v1 <= v2) or (x2 < x1 and v2 <= v1):
return "NO"
if ((x2 - x1) % (v2-v1)) == 0:
return "YES"
return "NO"
x1, v1, x2, v2 = input().strip().split(' ')
x1, v1, x2, v2 = [int(x1), int(v1), int(x2), int(v2)]
result = kangaroo(x1, v1, x2, v2)
print(result)
| 22.125 | 56 | 0.519774 | 61 | 354 | 3.016393 | 0.377049 | 0.086957 | 0.130435 | 0.173913 | 0.173913 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129771 | 0.259887 | 354 | 15 | 57 | 23.6 | 0.572519 | 0.036723 | 0 | 0.181818 | 0 | 0 | 0.023529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0 | 0.454545 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ec664692a8004415ba033ef9d8d62ed175a5bc8 | 2,115 | py | Python | tests/techniques/test_train_policy_gradient.py | alphagamatoe/AlphaToe | a7cd0969aa46dfd151a22ed8b9aec1a894747b17 | [
"MIT"
] | 172 | 2016-09-27T12:23:10.000Z | 2022-01-19T09:52:11.000Z | tests/techniques/test_train_policy_gradient.py | afcarl/AlphaToe | 1220f4f883dbbd7ac1d84092bdaf04ca18a4dbc2 | [
"MIT"
] | 13 | 2018-07-19T09:42:28.000Z | 2018-09-25T15:08:05.000Z | tests/techniques/test_train_policy_gradient.py | afcarl/AlphaToe | 1220f4f883dbbd7ac1d84092bdaf04ca18a4dbc2 | [
"MIT"
] | 63 | 2016-09-27T13:00:51.000Z | 2021-04-04T04:34:37.000Z | import functools
from unittest import TestCase
from common.base_game_spec import BaseGameSpec
from common.network_helpers import create_network
from games.tic_tac_toe import TicTacToeGameSpec
from games.tic_tac_toe_x import TicTacToeXGameSpec
from techniques.train_policy_gradient import train_policy_gradients
class _VerySimpleGameSpec(BaseGameSpec):
def new_board(self):
return [0, 0]
def apply_move(self, board_state, move, side):
board_state[move] = side
return board_state
def has_winner(self, board_state):
return board_state[0]
def __init__(self):
pass
def available_moves(self, board_state):
return [i for i, x in enumerate(board_state) if x == 0]
def board_dimensions(self):
return 2,
class TestTrainPolicyGradient(TestCase):
def test_learn_simple_game(self):
game_spec = _VerySimpleGameSpec()
create_model_func = functools.partial(create_network, 2, (4,))
variables, win_rate = train_policy_gradients(game_spec, create_model_func, None,
learn_rate=0.1,
number_of_games=1000, print_results_every=100,
batch_size=20,
randomize_first_player=False)
self.assertGreater(win_rate, 0.9)
def test_tic_tac_toe(self):
game_spec = TicTacToeGameSpec()
create_model_func = functools.partial(create_network, game_spec.board_squares(), (100, 100, 100,))
variables, win_rate = train_policy_gradients(game_spec, create_model_func, None,
learn_rate=1e-4,
number_of_games=60000,
print_results_every=1000,
batch_size=100,
randomize_first_player=False)
self.assertGreater(win_rate, 0.4)
| 39.90566 | 106 | 0.580142 | 225 | 2,115 | 5.111111 | 0.364444 | 0.06087 | 0.052174 | 0.026087 | 0.32 | 0.288696 | 0.288696 | 0.212174 | 0.212174 | 0.125217 | 0 | 0.033186 | 0.358865 | 2,115 | 52 | 107 | 40.673077 | 0.814897 | 0 | 0 | 0.097561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04878 | 1 | 0.195122 | false | 0.02439 | 0.170732 | 0.097561 | 0.536585 | 0.04878 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
5eca0176387a7151273d1f8238c5afd0d8ffbd54 | 36,675 | py | Python | src/telem.py | swharden/Telem-A-Gator | e2fcca77e9ae68ec5d002409e3d46c67ddc8ebb8 | [
"MIT"
] | null | null | null | src/telem.py | swharden/Telem-A-Gator | e2fcca77e9ae68ec5d002409e3d46c67ddc8ebb8 | [
"MIT"
] | null | null | null | src/telem.py | swharden/Telem-A-Gator | e2fcca77e9ae68ec5d002409e3d46c67ddc8ebb8 | [
"MIT"
] | 1 | 2019-06-18T13:58:19.000Z | 2019-06-18T13:58:19.000Z | import time
import os
import glob
import datetime
import numpy
import threading
import subprocess
#import scipy.stats
from PyQt4 import QtCore, QtGui
import matplotlib
matplotlib.use('TkAgg')
matplotlib.rcParams['backend'] = 'TkAgg'
import pylab
def shortenTo(s,maxsize=100):
if len(s)<=maxsize: return s
first=s[:maxsize/2]
last=s[-maxsize/2:]
return first+"..."+last
def messagebox(title,msg):
#tempApp = QtGui.QApplication(sys.argv)
QtGui.QMessageBox.information(QtGui.QDialog(),title,msg)
#tempApp.exit(0)
def com2lst(s):
"""separate CSVs to a list, returning [s] if no commas."""
if "," in s:
s=s.split(",")
else:
s=[s]
return s
def ep2dt(ep):
"""convert an epoch time to a datetime object."""
return datetime.datetime.fromtimestamp(float(ep))
def ep2st(ep):
"""convert epoch seconds to a string-formatted date."""
return dt2st(ep2dt(ep))
def ep2fn(ep):
"""convert epoch seconds to a file-ready date."""
dt=ep2dt(ep)
return dt.strftime('%Y-%m-%d-%H-%M-%S')
def ep2xl(ep):
dt=ep2dt(ep)
def dt2ep(dt):
"""convert a datetime object to epoch seconds."""
return time.mktime(dt.timetuple())
def dt2st(dt):
"""convert a datetime object to string-formatted date."""
return dt.strftime('%Y/%m/%d %H:%M:%S')
def st2dt(st):
"""convert a string-formatted date to a datetime object."""
st=str(st)
return datetime.datetime.strptime(st,'%Y/%m/%d %H:%M:%S')
def st2ep(st):
"""convert a string-formatted date to epoch seconds."""
st=str(st)
return dt2ep(st2dt(st))
def stripWhiteSpace(s):
"""eliminate spaces at ends of a string."""
while s[0]==" ": s=s[1:]
while s[-1]==" ": s=s[:-1]
return s
threads=[]
def threadCmd(cmd):
global threads
threads.append(ThreadCMDs())
threads[-1].cmd=cmd
threads[-1].start()
threads[-1].join()
def launchPath(path):
cmd="explorer.exe "+os.path.abspath(path)
threadCmd(cmd)
class ThreadCMDs(threading.Thread):
def __init__(self):
self.stdout = None
self.stderr = None
self.cmd = "cmd.exe"
threading.Thread.__init__(self)
def run(self):
p = subprocess.Popen(self.cmd.split(),
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.stdout, self.stderr = p.communicate()
class TelemSession:
"""Telemetry conversion and analysis session class.
Load this once, and interact with it accordingly.
"""
def __init__(self):
self.schemeClear()
self.dataClear()
self.log=[]
self.printLogLevel=15
self.secPerLine=10
self.processing=False
self.uimain=False
self.app=False
self.abortNow=False
self.schemeLoad()
#self.status="waiting"
self.debug("loaded telemetry session class",4)
######################
### SCHEME OPTIONS ###
######################
def scheme2txt(self,scheme,showIt=False):
"""Convert a scheme to text. Optionally print it to console."""
keys=scheme.keys()
keys.sort()
out="# AUTOMATICALLY GENERATED SCHEME:\n"
for key in keys:
val=scheme[key]
if type(val)==str:
val='"'+val+'"'
val=val.replace("\\","/")
out+="%s: %s\n"%(key,val)
return out
def schemeLoad(self,fname="scheme_default.ini"):
"""load a scheme.ini file and populate the scheme."""
self.debug("loading scheme from "+fname,3)
if fname==None: fname="scheme_default.ini"
if not os.path.exists(fname):
self.debug("Default scheme not found!\nWill generate a new one.",5)
self.schemeCreateDefault()
self.schemeSave()
return
f=open(fname)
raw=f.readlines()
f.close()
for line in raw:
if len(line)<3: continue
line=line.replace("\n","")
if line[0] in [" ","#","\n","\r"]: continue
if not ":" in line: continue
var,val=line.split(":",1)
val=stripWhiteSpace(val)
val=eval(val)
self.scheme[var]=val
self.debug("setting [%s] to [%s] (%s)"%(var,val,type(val)))
self.listAvailable()
self.schemeRecalculate()
def schemeSave(self,fname="scheme_default.ini"):
"""save a scheme to a file."""
self.debug("saving scheme to "+fname,3)
out=self.scheme2txt(self.scheme)
self.debug("saving scheme:",fname)
f=open(fname,'w')
f.write(out)
f.close()
def schemeRecalculate(self):
"""go through and do math for auto-generated fields."""
self.listAvailable()
try:
if self.scheme["animals"]=="all":
self.scheme["animals"]=",".join(self.animals)
if self.scheme["features"]=="all":
self.scheme["features"]=",".join(self.features)
if self.scheme["binunit"]==0: self.scheme["binsize"]=int(float(self.scheme["binnum"]))
if self.scheme["binunit"]==1: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60))
if self.scheme["binunit"]==2: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60*60))
if self.scheme["binunit"]==3: self.scheme["binsize"]=int(float(self.scheme["binnum"])*int(60*60*24))
if self.scheme["sweep"]==True: #24 hour sweeps
self.scheme["expSpanSec"]=60*60*24
self.scheme["baseSpanSec"]=60*60*24
self.scheme["basePoints"]=int(self.scheme["baseSpanSec"]/self.scheme["binsize"])
self.scheme["expPoints"]=int(self.scheme["expSpanSec"]/self.scheme["binsize"])
else:
self.scheme["expSpanSec"]=int(st2ep(self.scheme["expB"])-st2ep(self.scheme["expA"]))
self.scheme["baseSpanSec"]=int(st2ep(self.scheme["baseB"])-st2ep(self.scheme["baseA"]))
self.scheme["basePoints"]=int(self.scheme["baseSpanSec"]/self.scheme["binsize"])
self.scheme["expPoints"]=int(self.scheme["expSpanSec"]/self.scheme["binsize"])
except Exception:
self.debug("could not recalculate!",5)
def schemeGood(self):
"""Returns True if the scheme is valid."""
# TO DO
return True
def schemeShow(self):
keys=self.scheme.keys()
keys.sort()
for key in keys:
self.debug("%s = %s"%(key,self.scheme[key]),5)
def schemeClear(self):
"""Completely clear scheme."""
self.scheme={}
def schemeCreateDefault(self):
"""Generate example/demo scheme."""
self.scheme["location"]=os.path.abspath("./data-npy")
self.scheme["input"]=os.path.abspath("./data-txt")
self.scheme["output"]=r"./output"
self.scheme["features"]="all"
self.scheme["animals"]="all"
self.scheme["baseA"]="2012/05/23 19:00:00"
self.scheme["baseB"]="2012/06/08 19:00:00"
self.scheme["baseT"]="baseline"
self.scheme["expA"]="2012/06/08 19:00:00"
self.scheme["expB"]="2012/06/19 19:00:00"
self.scheme["expT"]="experiment"
self.scheme["baseline"]=True
self.scheme["sweep"]=True
self.scheme["binnum"]=1
self.scheme["binunit"]=2 # 0=sec, 1=min, 2=hr, 3=day
self.scheme["stdev"]=False
### FIGURE OPTIONS
self.scheme["plotPrimary"]=True
self.scheme["plotSecondary"]=False
self.scheme["plotErrorBars"]=True
self.scheme["plotKey"]=0
self.scheme["plotExperiment"]=True
self.scheme["plotBaseline"]=True
self.scheme["plotNormalized"]=False
### THE FOLLOWING ARE AUTO-CALCULATED BY schemeRecalculate()
#self.scheme["binsize"]=None #DO NOT SET THIS!
#self.scheme["expSpanSec"]=None #DO NOT SET THIS!
self.schemeRecalculate()
#######################
### DATA CONVERSION ###
#######################
def convert(self):
"""Given a folder of .txt data files, generate npy files."""
folderIn=os.path.abspath(self.scheme["input"])
folderOut=os.path.abspath(self.scheme["location"])
files=glob.glob(folderIn+"/*.txt")
for i in range(len(files)):
if self.uimain and self.app:
self.uimain.progConvertAnimal.setMaximum(len(files))
self.uimain.progConvertAnimal.setValue(i+1)
self.uimain.lblConvertAnimal.setText(os.path.split(files[i])[1])
self.app.processEvents()
self.convertTxt2Npy(files[i],folderOut)
self.uimain.progConvertAnimal.setValue(0)
self.uimain.progConvertFeature.setValue(0)
self.uimain.lblConvertAnimal.setText("complete")
self.uimain.lblConvertFeature.setText("complete")
messagebox("COMPLETE","file conversion complete!")
def convertTxt2Npy(self,fnameIn,pathOut):
"""Takes an input .txt raw data file and outputs multiple .npy data files.
ORIGINAL DATA FORMAT:
For this to work, the export settings in the telemetry analysis software
have to be configured as such:
select all data, click export.
File name: [I].txt (example: T12.txt)
Time mode: elapsed time (seconds)
Data format: width=3, precision=3
checkbox enabled: Import compatible
OUTPUT:
Output format is numpy binary files (.npy) of evenly spaced data.
Each point represents 10 seconds of time.
Missing data are replaced by numpy.NaN
"""
filePathIn,fileNameIn=os.path.split(fnameIn)
self.debug("LOADING: "+fnameIn)
self.uimain.lblConvertFeature.setText("loading ...")
self.app.processEvents()
f=open(fnameIn)
raw=f.read()
f.close()
raw=raw.split("\n")
animals=[] #[T5,T5,T5]
features=[] #[Activity,Diastolic,Heart Rate]
data=[]
self.debug("READING DATA")
for i in range(len(raw)):
line=raw[i]
if len(line)<10: continue
if line[0]=="#": # WE HAVE A HEADER LINE
if "Time: " in line:
ep_start=st2ep(line.split(": ")[1])
if "Col: " in line:
animal,feature=line.split(": ")[1].split(",")[0].split(".")
animals.append(animal)
features.append(feature)
else: # WE HAVE A DATA LINE
data.append(line.split(","))
self.debug("CONVERTING TO MATRIX")
self.uimain.lblConvertFeature.setText("converting to matrix ...")
self.app.processEvents()
data=numpy.array(data,dtype=float)
self.debug("RESHAPING DATA")
self.uimain.lblConvertFeature.setText("reshaping data ...")
self.app.processEvents()
data=numpy.reshape(data,(-1,len(animals)+1))
data[:,0]=data[:,0]+ep_start #turn time stamps into epoch
if self.uimain and self.app:
self.uimain.progConvertFeature.setMaximum(len(features))
self.app.processEvents()
for i in range(len(features)):
if self.uimain and self.app:
self.uimain.progConvertFeature.setValue(i+1)
#self.uimain.lblConvertFeature.setText(features[i])
self.app.processEvents()
tag="%s-%s-%d"%(animals[i],features[i],ep_start)+"-even.npy"
fname=os.path.join(pathOut,tag)
self.debug("CONVERTING TO EVENLY SPACED DATA")
self.uimain.lblConvertFeature.setText("spacing data ...")
self.app.processEvents()
timestamps=data[:,0].astype(int)
values=data[:,i+1]
indices=(timestamps-timestamps[0])/self.secPerLine
dayData=numpy.empty(indices[-1]+1,dtype=float)
dayData[:]=numpy.nan
dayData[indices]=values
self.debug("SAVING "+tag)
self.uimain.lblConvertFeature.setText("saving %s ..."%tag)
self.app.processEvents()
numpy.save(fname,dayData)
return
# to do
####################
### DATA LOADING ###
####################
def listAvailable(self):
"""returns [animals,features] from scheme["location"]."""
animals,features=[],[]
self.animalInfo=[] #[animal,startEp,endEp]
fnames=glob.glob(self.scheme["location"]+"/*-even.npy")
for fname in fnames:
fn,ft=os.path.split(fname)
ft=ft.split("-")
if not ft[0] in animals:
animals.append(ft[0])
startEp=int(ft[2])
length=numpy.memmap(fname).shape[0]
info=[ft[0],startEp,startEp+length*self.secPerLine]
#self.debug(str(info),5)
self.animalInfo.append(info)
if not ft[1] in features: features.append(ft[1])
self.animals=animals
self.features=features
return [animals,features]
def selectedTimes(self):
if len(self.animalInfo)==0: return [None,None]
first=None
last=None
selAnimals=com2lst(self.scheme["animals"])
for info in self.animalInfo:
if info[0] in selAnimals:
if first==None or info[1]<first: first=info[1]
if last==None or info[2]>last: last=info[2]
self.selectedExtremes=[first,last]
return [first,last]
def loadNpy(self,fname):
"""load a filename of a .npy and return [data,animal,feature,startEp,endEp].
You probably don't need to call this directly. loadData() calls it."""
fpath,ftag=os.path.split(fname)
#self.debug("\n\n",5)
self.debug("loading "+ftag,2)
data=numpy.load(fname) # pulls the whole thing to ram
#data=numpy.memmap(fname) # MEMORY MAPPING IS FASTER IF BETTER DATA TYPE
animal,feature,startEp,mode=ftag.split(".")[0].split("-")
startEp=int(startEp)
endEp=startEp+len(data)*self.secPerLine
return [data,animal,feature,startEp,endEp]
def loadData(self,animal=None,feature=None,location=None,startEpCut=False,endEpCut=False,binsize=False,sweep=False):
"""simple way to get data from animal/feature combo. return [x],[[ys]].
if binsize is given (sec), binning will occur.
If startEp and/or endEp are given (epoch), trimming will occur.
if sweep == False:
returns [X], [[Y]]
where x = time epochs
if sweep == True: (day starts at the time of startEpCut)
returns [X], [[Y],[Y],[Y]]
where x = ticks 0-24hr
UPDATE: returns [xs,data,startX,startX+self.secPerLine2*len(data[0])]
"""
### DEMO DATA ###################################
#startEpCut=st2ep("2012/06/01 19:00:00")
#endEpCut=st2ep("2012/06/10 19:00:00")
#binsize=60*60 #in seconds
#sweep=True
#################################################
if location==None:
location=self.scheme["location"]
self.secPerLine2=self.secPerLine
fnames=glob.glob(location+"/%s-%s*-even.npy"%(animal,feature))
if len(fnames)==0:
self.debug("%s - %s does not exist!"%(animal,feature),2)
return []
fname=fnames[0]
data,animal,feature,startEp,endEp=self.loadNpy(fname)
self.debug("data shape before cutting/padding: %s"%str(data.shape))
if startEpCut==False: startEpCut=startEp
if endEpCut==False: endEpCut=endEp
expectedPoints=int((endEpCut-startEpCut)/self.secPerLine)
offsetStart=int(startEpCut-startEp)/self.secPerLine
if startEpCut:
if offsetStart<0:
# left padding is necessary
padding=numpy.empty(abs(offsetStart))
padding[:]=numpy.nan
data=numpy.concatenate((padding,data))
elif offsetStart>0:
#left trimming is necessary
data=data[offsetStart:]
if endEpCut:
if len(data)<expectedPoints:
# right padding is necessary
padding=numpy.empty(expectedPoints-len(data))
padding[:]=numpy.nan
data=numpy.concatenate((data,padding))
elif len(data)>expectedPoints:
# right trimming is necessary
data=data[:expectedPoints]
self.debug("data shape after cutting/padding: %s"%str(data.shape))
if binsize:
self.debug("binning to %s"%binsize,5)
binSamples=int(binsize/self.secPerLine) #number of samples per bin
self.secPerLine2=self.secPerLine*binSamples #seconds per sample
if len(data) % binSamples: # we need to extend this to the appropriate bin size
hangover=len(data) % binSamples
needed=numpy.empty(binSamples-hangover)
needed[:]=numpy.NaN
data=numpy.append(data,needed)
data=numpy.reshape(data,(len(data)/binSamples,binSamples))
#data=numpy.ma.masked_invalid(data).mean(axis=1) #this is bad because it makes NaN become 0
#data=numpy.mean(data,axis=1) #now it's binned!
### THIS PART IS NEW #################################
avgs=numpy.empty(len(data))
for i in range(len(data)):
line=data[i]
line=line[numpy.where(numpy.isfinite(line))[0]]
avgs[i]=numpy.average(line)
data=avgs
######################################################
self.debug("data shape at end of binning: %s"%str(data.shape))
if sweep:
self.debug("sweeping",5)
samplesPerDay=int(60*60*24/self.secPerLine2)
if len(data) % samplesPerDay: # we need to extend this to the appropriate bin size
hangover=len(data) % samplesPerDay
needed=numpy.empty(samplesPerDay-hangover)
needed[:]=numpy.nan
data=numpy.append(data,needed)
days=len(data)/float(samplesPerDay)
data=numpy.reshape(data,(int(days),int(len(data)/days)))
xs=numpy.arange(0,24.0,24.0/float(len(data[0])))
else:
#data=numpy.array([data])
data=numpy.atleast_2d(data)
xs=range(int(startEpCut),int(startEpCut+self.secPerLine2*len(data[0])),int(self.secPerLine2))
for i in range(len(xs)): xs[i]=ep2dt(xs[i])
self.debug("data shape at end of sweeping: %s"%str(data.shape))
if numpy.max(data)==0 or numpy.ma.count(data)==0:
self.debug("%s - %s - NO DATA!"%(animal,feature),2)
return []
self.debug("returning data of size: %d"%len(data[0]))
return [xs,data,startEpCut,startEpCut+self.secPerLine2*len(data[0])]
#######################
### DATA STATISTICS ###
#######################
def dataAverage(self,data):
"""Given [[ys],[ys],[ys]] return [avg,err]. If stderr=False, return stdev."""
if data is None or not data.any():
self.debug("averager got None value",5)
return [[],[]]
if len(data)==1:
self.debug("only a single data stream, nothing to average",5)
return [data[0],numpy.zeros(len(data[0]))]
avg=numpy.mean(numpy.ma.masked_invalid(data),axis=0)
err=numpy.std(numpy.ma.masked_invalid(data),axis=0)
cnt=numpy.isfinite(data).sum(0)
if self.scheme["stdev"]==False:
err=err/numpy.sqrt(cnt) #standard error
if numpy.sum(numpy.isfinite(data))==0:
self.debug("Averager got nothing but NaN. Giving back NaN.",5)
avg[:]=numpy.NaN
err[:]=numpy.NaN
avg[numpy.ma.getmask(avg)]=numpy.nan
err[numpy.ma.getmask(err)]=numpy.nan
return [avg,err]
#################
### ANALYSIS ###
#################
def dataClear(self):
"""reset data={} where format is as follows:
data["feature"]=[x,E,ER,[Es,Es,Es],B,BR,[Bs,Bs,Bs],N]
where:
x - experiment x time points
E - experiment average trace
ER - experiment average error
Es - experiment individual traces
x2 - baseline x time points
B - baseline average trace
BR - baseline average error
Bs - baseline individual traces
N - normalized value (E-B) +/ ER
In reality, are better stats necesary???
"""
self.data={}
def schemeExecute(self):
self.schemeShow()
self.debug("executing analysis",2)
self.schemeRecalculate()
self.dataClear()
self.processing=True
animals=com2lst(self.scheme["animals"])
features=com2lst(self.scheme["features"])
timeExecuteStart=time.time()
if not os.path.exists(self.scheme["output"]):
os.makedirs(self.scheme["output"])
# data["feature"]=[x,E,ER,Es, x2,B,BR,Bs,N,NR]
# 0 1 2 3 4 5 6 7 8 9
#dataLine=[x,Eavg,Eerr,linearEs,x2,Bavg,Berr,linearBs,norm,normErr]
x,Eavg,Eerr,linearEs,x2,Bavg,Berr,linearBs,norm,normErr=[None]*10
for i in range(len(features)):
linearEs=numpy.empty((len(animals),self.scheme["expPoints"]))
linearEs[:]=numpy.NaN
linearBs=numpy.empty((len(animals),self.scheme["basePoints"]))
linearBs[:]=numpy.NaN
for j in range(len(animals)):
feature=features[i]
animal=animals[j]
progress=len(animals)*i+j
if self.uimain and self.app:
if self.abortNow==True:
self.abortNow=False
return
self.uimain.progExecute.setMaximum(len(features)*len(animals))
self.uimain.progExecute.setValue(progress+1)
self.uimain.lblStatus.setText("processing %s - %s"%(animal,feature))
self.app.processEvents()
dataLine=[None]*9
dataPack=self.loadData(animal,feature,self.scheme["location"],st2ep(self.scheme["expA"]),st2ep(self.scheme["expB"]),int(self.scheme["binsize"]),self.scheme["sweep"])
if len(dataPack)>0:
x,Es,timeA,timeB=dataPack
EsweepAvg,EsweepErr=self.dataAverage(Es)
if len(animals)==1:
Eavg,Eerr=EsweepAvg,EsweepErr
linearEs=Es
else:
linearEs[j][:]=EsweepAvg
if self.scheme["baseline"]==True:
dataPack=self.loadData(animal,feature,self.scheme["location"],st2ep(self.scheme["baseA"]),st2ep(self.scheme["baseB"]),int(self.scheme["binsize"]),self.scheme["sweep"])
if len(dataPack)>0:
x2,Bs,baseA,baseB=dataPack
BsweepAvg,BsweepErr=self.dataAverage(Bs)
if len(animals)==1:
Bavg,Berr=BsweepAvg,BsweepErr
linearBs=Bs
else:
linearBs[j]=BsweepAvg
pass # last thing to do for each animal
if len(animals)>1:
Eavg,Eerr=self.dataAverage(linearEs)
Bavg,Berr=self.dataAverage(linearBs)
if self.scheme["baseline"]==True:
if len(Eavg)==len(Bavg):
norm=Eavg-Bavg
normErr=numpy.sqrt(Eerr*Eerr+Berr*Berr)
else:
self.debug("can't create baseline because lengths are uneven.")
dataLine=[x,Eavg,Eerr,linearEs,x2,Bavg,Berr,linearBs,norm,normErr]
self.data[feature]=dataLine
pass #last thing to do for each feature
timeExecute=time.time()-timeExecuteStart
self.debug("scheme analyzed in %.03f seconds."%timeExecute,3)
if self.uimain and self.app:
if self.abortNow==True:
self.abortNow=False
return
self.uimain.lblStatus.setText("scheme analyzed in %.03f seconds."%timeExecute)
self.uimain.progExecute.setMaximum(len(features)*len(animals))
self.uimain.progExecute.setValue(0)
#####################
### DATA PLOTTING ###
#####################
def plotPopup(self):
self.uimain.btnLaunchInteractive.setEnabled(False)
self.plotFigure()
pylab.show()
self.uimain.btnLaunchInteractive.setEnabled(True)
def summaryPopup(self):
self.schemeRecalculate()
self.uimain.btnSummary.setEnabled(False)
self.plotSummary()
pylab.show()
self.uimain.btnSummary.setEnabled(True)
def plotSummary(self,fig=None):
"""plots summary figure for all animals in the current folder."""
self.debug("generating plot summary figure...",3)
if not fig: fig=pylab.figure()
axes=fig.gca()
selAnimals=com2lst(self.scheme["animals"])
for i in range(len(selAnimals)):
self.debug("generating plot summary figure... plotting animal %d of %d"%(i,len(selAnimals)),3)
animal=selAnimals[i]
feature=com2lst(self.scheme["features"])[0]
data=self.loadData(animal,feature,binsize=60*60,sweep=False)
if len(data)==0: continue
xs,data,startX,endX=data
ys=data[0]*0
ys=ys+i
axes.plot(xs,ys,'.')
for spine in axes.spines.itervalues():
spine.set_visible(False)
axes.set_yticklabels(selAnimals)
axes.yaxis.set_major_locator(matplotlib.ticker.FixedLocator(range(len(selAnimals))))
for xlabel in axes.get_xaxis().get_ticklabels():
xlabel.set_rotation(90)
fig.subplots_adjust(bottom=.35,left=.08, right=0.98)
fig.set_facecolor("#FFFFFF")
axes.set_title("DATA SUMMARY")
axes.autoscale()
axes.set_ylim((-.5,i+1.5))
x1,x2=axes.get_xlim()
x1=x1-3
x2=x2+3
axes.set_xlim((x1,x2))
if self.scheme["baseline"]:
axes.axvspan(st2dt(self.scheme["baseA"]),st2dt(self.scheme["baseB"]),facecolor="b",alpha=.1)
axes.text(ep2dt((st2ep(self.scheme["baseA"])+st2ep(self.scheme["baseB"]))/2),i+1,"baseline",color='blue',horizontalalignment='center',verticalalignment='top')
axes.axvspan(st2dt(self.scheme["expA"]),st2dt(self.scheme["expB"]),facecolor="g",alpha=.1)
axes.text(ep2dt((st2ep(self.scheme["expA"])+st2ep(self.scheme["expB"]))/2),i+1,"experiment",color='green',horizontalalignment='center',verticalalignment='top')
self.debug("generating plot summary figure... COMPLETE!",3)
return fig
def plotFigure(self,figure=None):
"""given a figure and data key, make a pretty telemetry graph."""
if not figure: figure=pylab.figure()
axes=figure.gca()
key=self.scheme["plotKey"]
self.debug("plotting data for key %d (%s)"%(key,self.data.keys()[key]),3)
key=self.data.keys()[key]
d=self.data[key]
if self.scheme["plotSecondary"]==True:
if numpy.array(d[3]).any and self.scheme["plotExperiment"]:
for yvals in d[3]:
# SECONDARY EXPERIMENTAL
axes.plot(d[0],yvals,'g-',alpha=.2)
if numpy.array(d[7]).any and self.scheme["baseline"] and self.scheme["plotBaseline"]:
for yvals in d[7]:
# SECONDARY BASELINE
axes.plot(d[4],yvals,'b-',alpha=.2)
if self.scheme["plotPrimary"]==True:
if numpy.array(d[1]).any and self.scheme["plotExperiment"]:
# PRIMARY EXPERIMENTAL
axes.plot(d[0],d[1],'g-',label="experiment")
if numpy.array(d[5]).any and self.scheme["baseline"] and self.scheme["plotBaseline"]:
# PRIMARY BASELINE
axes.plot(d[4],d[5],'b-',label="baseline")
if self.scheme["plotNormalized"] and d[8].any():
# NORMALIZED
axes.plot(d[0],d[8],'r-')
if self.scheme["plotErrorBars"]==True:
if numpy.array(d[1]).any and self.scheme["plotExperiment"]:
# EXPERIMENTAL ERROR BARS
axes.errorbar(d[0],d[1],yerr=d[2],fmt='g.')
if numpy.array(d[5]).any and self.scheme["baseline"] and self.scheme["plotBaseline"]:
# BASELINE ERROR BARS
axes.errorbar(d[4],d[5],yerr=d[6],fmt='b.')
if numpy.array(d[8]).any and self.scheme["plotNormalized"]==True:
# NORMALIZED ERROR BARS
axes.errorbar(d[0],d[8],yerr=d[9],fmt='r.')
for xlabel in axes.get_xaxis().get_ticklabels():
#TODO make labels offset by the 24 hour day start time
xlabel.set_rotation(90)
axes.set_title("%s - %s"%(self.scheme["animals"],key))
axes.grid()
figure.subplots_adjust(bottom=.35,left=.08, right=0.98)
figure.set_facecolor("#FFFFFF")
if self.scheme["sweep"]: axes.set_xlim([0,24])
#figure.canvas.draw()
return figure
###################
### DATA OUTPUT ###
###################
# data["feature"]=[x,E,ER,Es, x2,B,BR,Bs,N,NR]
# 0 1 2 3 4 5 6 7 8 9
def outputHTML(self,launchItToo=True):
self.outputImages()
out='<html><body><div align="center">'
out+="<h1>Telem-A-Gator</h2>"
out+="<h2>Summary Report</h2>"
out+='<img src="summary.png"><br>'
keys=self.data.keys()
for i in range(len(keys)):
out+='<img src="%s"><br>'%(keys[i]+".png")
out+="<h2>Scheme Data:</h2>"
out+=self.scheme2txt(self.scheme).replace("\n","<br>")
out+="</div></body></html>"
f=open(os.path.join(self.scheme["output"],"summary.html"),'w')
f.write(out)
f.close()
if launchItToo:
cmd="explorer.exe "+os.path.abspath(os.path.join(self.scheme["output"],"summary.html"))
self.debug("running: "+cmd,3)
threadCmd(cmd)
return
def outputImages(self):
"""save every feature in data{} as an image."""
keys=self.data.keys()
for i in range(len(keys)):
self.debug("generating image for %s"%keys[i])
self.scheme["plotKey"]=i
self.plotFigure()
self.debug("saving "+keys[i]+".png")
pylab.savefig(os.path.join(self.scheme["output"],keys[i]+".png"))
pylab.close()
self.plotSummary()
pylab.savefig(os.path.join(self.scheme["output"],"summary.png"))
pylab.close()
self.schemeSave(os.path.join(self.scheme["output"],"schemeUsed.ini"))
self.debug("image export complete.")
def generateCSV(self,dates,avg,err,sweeps,fname):
"""given some data, format it as a proper CSV file."""
fout=os.path.join(self.scheme["output"],fname)
#matrix=numpy.array([dates,avg,err,sweeps])
if dates==None or avg==None:
#no data
return
animals=com2lst(self.scheme["animals"])
rows=3
if sweeps:
rows+=len(sweeps)
cols = len(avg)
matrix=numpy.zeros((rows,cols),dtype=numpy.object)
for i in range(len(dates)):
if type(dates[i])<>float:
dates[i]=str(dates[i])
matrix[0,:len(dates)]=dates
matrix[1,:len(avg)]=avg
matrix[2,:len(err)]=err
if sweeps:
for i in range(len(sweeps)):
matrix[3+i,:]=sweeps[i]
matrix=numpy.rot90(matrix,1)
matrix=matrix[::-1]
labels=matrix[0]
self.debug("saving %s"%(fname))
out="Time,Average,Error"
if sweeps:
for i in range(len(sweeps)):
if len(animals)>1:
out+=","+animals[i]
else:
out+=",DAY %d"%(i+1)
out+="\n"
for line in matrix:
line=line.tolist()
for i in range(len(line)):
line[i]=str(line[i])
if line[i]=='nan':
line[i]=''
out+=",".join(line)+"\n"
f=open(fout,'w')
f.write(out)
f.close()
self.schemeSave(os.path.join(self.scheme["output"],"schemeUsed.ini"))
def outputExcel(self):
"""save every feature in data{} as an image."""
keys=self.data.keys()
for i in range(len(keys)):
self.debug("generating Excel file for %s"%(keys[i]))
dataLine=self.data[keys[i]]
self.generateCSV(dataLine[0],dataLine[1],dataLine[2],dataLine[3],keys[i]+"-experiment.csv")
self.generateCSV(dataLine[4],dataLine[5],dataLine[6],dataLine[7],keys[i]+"-baseline.csv")
self.generateCSV(dataLine[0],dataLine[8],dataLine[9],None,keys[i]+"-normalized.csv")
self.debug("Excel output complete.")
######################
### MISC PROCESSES ###
######################
def makeCrashLog(self):
sep="#"*20
out=sep+" MOST RECENT SCHEME "+sep
out="\n\n\n"+sep+" FULL LOG OUTPUT "+sep+"\n\n\n"
#self.schemeShow()
for line in self.log:
t,l,m=line
out+="[%s]%s%s\n"%(ep2st(t),"-"*l,m)
fname="crashlog-%s.txt"%(ep2fn(time.time()))
#fname="crashlog.txt"
f=open('./log/'+fname,'w')
f.write(out)
f.close()
messagebox("BUG REPORT","saved bug report as:\n"+fname)
def debug(self,msg,level=3):
"""save messages to session log with optional significance.
levels:
1 - critical, show pop-up window, exit
2 - critical, show pop-up window
3 - important
4 - casual
5 - rediculous
"""
self.log.append([time.time(),level,msg])
if level<2:
messagebox("IMPORTANT",msg)
if level<=self.printLogLevel:
print " "*level+msg
if self.uimain and self.app:
self.uimain.lblDebug.setText(shortenTo(msg.replace("\n","")))
self.uimain.textDebug.appendPlainText(msg)
self.app.processEvents()
def showDebug(self,maxLevel=5):
for item in self.log:
print item
if __name__ == "__main__":
print "DONT RUN ME DIRECTLY."
TG=TelemSession()
# TG.summaryPopup()
TG.schemeLoad("SCOTT.ini")
TG.schemeExecute()
TG.plotFigure()
# #TG.makeCrashLog()
pylab.show()
| 37.309257 | 187 | 0.531425 | 4,204 | 36,675 | 4.624167 | 0.163416 | 0.066872 | 0.009877 | 0.007922 | 0.28035 | 0.21821 | 0.16214 | 0.14249 | 0.111883 | 0.095679 | 0 | 0.01862 | 0.316155 | 36,675 | 982 | 188 | 37.347251 | 0.756499 | 0.056033 | 0 | 0.192012 | 0 | 0 | 0.104211 | 0.001457 | 0 | 0 | 0 | 0.001018 | 0 | 0 | null | null | 0.003072 | 0.016897 | null | null | 0.00768 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5eca3822d3d88e640f8cd392a9134c9bdd311c55 | 1,458 | py | Python | pincer/middleware/ready.py | shivamdurgbuns/Pincer | aa27d6d65023ea62a2d0c09c1e9bc0fe4763e0c3 | [
"MIT"
] | null | null | null | pincer/middleware/ready.py | shivamdurgbuns/Pincer | aa27d6d65023ea62a2d0c09c1e9bc0fe4763e0c3 | [
"MIT"
] | null | null | null | pincer/middleware/ready.py | shivamdurgbuns/Pincer | aa27d6d65023ea62a2d0c09c1e9bc0fe4763e0c3 | [
"MIT"
] | null | null | null | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""
non-subscription event sent immediately after connecting,
contains server information
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..commands import ChatCommandHandler
from ..exceptions import InvalidPayload
from ..objects.user.user import User
from ..utils.conversion import construct_client_dict
if TYPE_CHECKING:
from typing import Tuple
from ..utils.types import Coro
from ..core.dispatch import GatewayDispatch
async def on_ready_middleware(
self,
payload: GatewayDispatch
) -> Tuple[str]:
"""|coro|
Middleware for ``on_ready`` event.
Parameters
----------
payload : :class:`~pincer.core.dispatch.GatewayDispatch`
The data received from the stage instance create event
Returns
-------
Tuple[:class:`str`]
``on_ready``
"""
user = payload.data.get("user")
guilds = payload.data.get("guilds")
if not user or guilds is None:
raise InvalidPayload(
"A `user` and `guilds` key/value pair is expected on the "
"`ready` payload event."
)
self.bot = User.from_dict(construct_client_dict(self, user))
self.guilds = dict(map(lambda i: (i["id"], None), guilds))
await ChatCommandHandler(self).initialize()
return "on_ready",
def export() -> Coro:
return on_ready_middleware
| 24.711864 | 70 | 0.682442 | 178 | 1,458 | 5.488764 | 0.5 | 0.035824 | 0.032753 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003478 | 0.211248 | 1,458 | 58 | 71 | 25.137931 | 0.846087 | 0.123457 | 0 | 0 | 0 | 0 | 0.099291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.333333 | 0.037037 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5ece95cf5808af191aa4776b596abeebfb595d6b | 1,620 | py | Python | conftest.py | RussellJQA/stg-python-webdriver-cert | 73fa7b02f38fe534e074d727f96994a92636ba7b | [
"MIT"
] | 4 | 2021-04-29T22:03:36.000Z | 2021-10-20T11:25:55.000Z | conftest.py | RussellJQA/stg-python-webdriver-cert | 73fa7b02f38fe534e074d727f96994a92636ba7b | [
"MIT"
] | null | null | null | conftest.py | RussellJQA/stg-python-webdriver-cert | 73fa7b02f38fe534e074d727f96994a92636ba7b | [
"MIT"
] | 1 | 2021-05-30T12:56:13.000Z | 2021-05-30T12:56:13.000Z | """
This module implements some pytest fixtures for use with Selenium WebDriver.
"""
import os
import time
import pytest
# pip installed
from dotenv import find_dotenv, load_dotenv
from selenium.webdriver import Chrome
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager
@pytest.fixture
def driver() -> WebDriver:
# Setup: Code before the 'yield' statement is run before each test
driver: Chrome = Chrome(ChromeDriverManager().install(
)) # Install and initialize Chrome WebDriver for Selenium
driver.maximize_window()
yield driver
# Cleanup/Teardown: Code after the 'yield' statement is run after each test
# Load environment variables from .env file
load_dotenv(find_dotenv())
seconds_to_sleep_before_webdriver_quit = int(
os.environ.get("SECONDS_TO_SLEEP_BEFORE_WEBDRIVER_QUIT", "0"))
# Only do this when the corresponding environment variable has specifically been set to enable it
# [as for development or demonstration purposes --
# to allow (during test execution) the then current Web page to be observed].
if seconds_to_sleep_before_webdriver_quit:
time.sleep(seconds_to_sleep_before_webdriver_quit)
driver.quit()
@pytest.fixture
def wait(driver: WebDriver) -> WebDriverWait:
""" WebDriverWait allows us to wait until a condition is True.
For example, wait until an element is displayed
"""
return WebDriverWait(driver, timeout=10) # timeout is the max number of seconds to wait for.
| 31.153846 | 101 | 0.758025 | 214 | 1,620 | 5.616822 | 0.476636 | 0.037438 | 0.046589 | 0.066556 | 0.146423 | 0.109817 | 0 | 0 | 0 | 0 | 0 | 0.002254 | 0.178395 | 1,620 | 51 | 102 | 31.764706 | 0.900826 | 0.435185 | 0 | 0.086957 | 0 | 0 | 0.044118 | 0.042986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.347826 | 0 | 0.478261 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5ed2d6e06ea77385ae38bbc942bc6b4df97670f0 | 1,309 | py | Python | design.py | Raj-kar/Shuffle-Game-with-python | 1b0a765559850137bc49e503b6a79a03c3e8fb12 | [
"MIT"
] | null | null | null | design.py | Raj-kar/Shuffle-Game-with-python | 1b0a765559850137bc49e503b6a79a03c3e8fb12 | [
"MIT"
] | null | null | null | design.py | Raj-kar/Shuffle-Game-with-python | 1b0a765559850137bc49e503b6a79a03c3e8fb12 | [
"MIT"
] | null | null | null | from functions import decorate, ascii_text
def rules(): # Some Game rules, first shown at screen !
decorate(" ************************************************************ ")
decorate(" * * ")
decorate(" * Welcome to Word jumbling, Suffle, re-arange Game! * ")
decorate(" * * ")
decorate(" ************************************************************ ")
decorate("Game Rules --->> Two-player game | Each time a player enters a word and the game shows the word in shuffle form.")
decorate(
"Then player 2 will guess it. If the correct, then player 2 enter a word, and player 1 will guess it !")
decorate(
"Both the player will get three hints, one each time if they can't answer the word at once ..!")
decorate("The Game will run, untill player exit it !")
def loading_screen(p1, p2): # welcome player 1 and 2
ascii_text(f"WELCOME {p1} and {p2}")
decorate(f"We start with {p1} turn ..!")
decorate("Don't show the word to your opponent !")
# -> decorate is a function which you find at functions.py file
# -> It's just like print function, but it prints statements with different colors !
| 52.36 | 129 | 0.524828 | 158 | 1,309 | 4.329114 | 0.525316 | 0.093567 | 0.070175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010905 | 0.299465 | 1,309 | 24 | 130 | 54.541667 | 0.735005 | 0.1589 | 0 | 0.352941 | 0 | 0.176471 | 0.695896 | 0.11194 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
5ed4ceefc3e109900c35d9955460f833cabd84e2 | 485 | py | Python | manage.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | 5 | 2020-06-15T02:56:39.000Z | 2021-12-28T19:18:18.000Z | manage.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | 2 | 2019-12-01T15:50:05.000Z | 2021-12-17T07:54:23.000Z | manage.py | diogenesjusto/flask_leaderboard | 86dac90785e01747ffbde99e6ba65cf42e4c016e | [
"MIT"
] | 9 | 2020-01-19T11:21:33.000Z | 2022-02-22T06:28:52.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from config import Config
from main import User, Submission
app = Flask(__name__)
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 22.045455 | 60 | 0.77732 | 65 | 485 | 5.507692 | 0.353846 | 0.100559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.125773 | 485 | 21 | 61 | 23.095238 | 0.84434 | 0.119588 | 0 | 0 | 0 | 0 | 0.023529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.428571 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
5ed76a1d4a9d801f30aced725248325dce473b59 | 2,349 | py | Python | rebench/environment.py | tobega/ReBench | 123a9187f74d32f93b823dd0c354244aecd7437e | [
"MIT"
] | null | null | null | rebench/environment.py | tobega/ReBench | 123a9187f74d32f93b823dd0c354244aecd7437e | [
"MIT"
] | null | null | null | rebench/environment.py | tobega/ReBench | 123a9187f74d32f93b823dd0c354244aecd7437e | [
"MIT"
] | null | null | null | import getpass
import os
import subprocess
from cpuinfo import get_cpu_info
from psutil import virtual_memory
try:
from urllib.parse import urlparse
except ImportError:
# Python 2.7
from urlparse import urlparse
def _encode_str(out):
as_string = out.decode('utf-8')
if as_string and as_string[-1] == '\n':
as_string = as_string[:-1]
return as_string
def _exec(cmd):
try:
out = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return None
return _encode_str(out)
def determine_source_details():
result = dict()
try:
repo_url = subprocess.check_output(['git', 'ls-remote', '--get-url'])
except subprocess.CalledProcessError:
repo_url = ''
parsed = urlparse(repo_url)
if parsed.password:
# remove password
parsed = parsed._replace(
netloc="{}@{}".format(parsed.username, parsed.hostname))
result['repoURL'] = _encode_str(parsed.geturl())
result['branchOrTag'] = _exec(['git', 'show', '-s', '--format=%D', 'HEAD'])
result['commitId'] = _exec(['git', 'rev-parse', 'HEAD'])
result['commitMsg'] = _exec(['git', 'show', '-s', '--format=%B', 'HEAD'])
result['authorName'] = _exec(['git', 'show', '-s', '--format=%aN', 'HEAD'])
result['committerName'] = _exec(['git', 'show', '-s', '--format=%cN', 'HEAD'])
result['authorEmail'] = _exec(['git', 'show', '-s', '--format=%aE', 'HEAD'])
result['committerEmail'] = _exec(['git', 'show', '-s', '--format=%cE', 'HEAD'])
return result
def determine_environment():
result = dict()
result['userName'] = getpass.getuser()
result['manualRun'] = not ('CI' in os.environ and os.environ['CI'] == 'true')
u_name = os.uname()
result['hostName'] = u_name[1]
result['osType'] = u_name[0]
cpu_info = get_cpu_info()
result['cpu'] = cpu_info['brand']
result['clockSpeed'] = (cpu_info['hz_advertised_raw'][0]
* (10 ** cpu_info['hz_advertised_raw'][1]))
result['memory'] = virtual_memory().total
result['software'] = []
result['software'].append({'name': 'kernel', 'version': u_name[3]})
result['software'].append({'name': 'kernel-release', 'version': u_name[2]})
result['software'].append({'name': 'architecture', 'version': u_name[4]})
return result
| 32.178082 | 83 | 0.611324 | 282 | 2,349 | 4.91844 | 0.375887 | 0.035328 | 0.047585 | 0.051911 | 0.152848 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007407 | 0.195402 | 2,349 | 72 | 84 | 32.625 | 0.726455 | 0.011069 | 0 | 0.160714 | 0 | 0 | 0.20431 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0.053571 | 0.142857 | 0 | 0.303571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0d5987b219b64d6b388ff98a861c6bc4ea2a00e4 | 321 | py | Python | csv_to_table/urls.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null | csv_to_table/urls.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null | csv_to_table/urls.py | KariSpace/CRM_Sedicomm | cb19e90ca99c7a50a1841afbfb878191f62dec5c | [
"MIT"
] | null | null | null |
from . import views
from django.contrib.auth import views as auth_views
from django.urls import path
urlpatterns = [ #Kari CSV_TO_TABLE Commit
path('csv_upload/', views.csv_table, name='csv_table'),
path('today/', views.today_table, name='today_table'),
path('search/', views.search, name='search'),
] | 26.75 | 59 | 0.71028 | 46 | 321 | 4.782609 | 0.413043 | 0.1 | 0.136364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.152648 | 321 | 12 | 60 | 26.75 | 0.808824 | 0.074766 | 0 | 0 | 0 | 0 | 0.169492 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.375 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0d5d3731a59994ba117b8a32c3775bfdf7b49b71 | 1,259 | py | Python | 7. Trees/binary_euler_tour.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | 7. Trees/binary_euler_tour.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | 7. Trees/binary_euler_tour.py | vivek28111992/data_structure_and_algorithm_in_python_practice | 16cb3ba5d02049352b40482de647acaad4b3b44a | [
"MIT"
] | null | null | null | # Binary Euler Tour
# A Binary Euler Tour base class providing a specialized tour for binary tree.
from eulerTour import EulerTour
class BinaryEulerTour(EulerTour):
"""Abstract base class for performing Euler tour of a binary tree.
This version includes an additional _hook_invisit that is called after the tour of the left subtree (if any), yet before the tour of the right subtree (if any).
Note: Right child is always assigned index 1 in path, even if no left subling.
"""
def _tour(self, p, d, path):
results = [None, None] # will update with results od recursions
self._hook._previsit(p, d, path) # "pre visit" for p
if self._tree.left(p) is not None: # consider left child
path.append(0)
results[0] = self._tour(self._tree.left(p), d+1, path)
path.pop()
self._hook_invisit(p, d, path) # "in visit" for p
if self._tree.right(p) is not None: # consider right child
path.append(1)
results[1] = self._tour(self._tree.right(p), d+1, path)
path.pop()
answer = self._hook_postvisit(p, d, path, results)
return answer
def _hook_invisit(self, p, d, path): pass
| 41.966667 | 164 | 0.625894 | 185 | 1,259 | 4.167568 | 0.378378 | 0.018158 | 0.038911 | 0.031128 | 0.132296 | 0.085603 | 0 | 0 | 0 | 0 | 0 | 0.007769 | 0.284353 | 1,259 | 29 | 165 | 43.413793 | 0.847947 | 0.409849 | 0 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.058824 | 0.058824 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0d5dc684691063784e6ac984f160e9b545454def | 4,565 | py | Python | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | ppr-api/tests/unit/models/test_utils.py | pwei1018/ppr | 1fdd2f1ad33217045404d7b872d9fad41a4c7da6 | [
"Apache-2.0"
] | null | null | null | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Suite to ensure the datetime utility functions are working as expected."""
from datetime import timedelta as _timedelta
from ppr_api.models import utils as model_utils
def test_expiry_dt_from_years():
"""Assert that generating an expiry date from life years is performing as expected."""
expiry_ts = model_utils.expiry_dt_from_years(5)
now_ts = model_utils.now_ts()
print('Expiry timestamp: ' + model_utils.format_ts(expiry_ts))
print('Now timestamp: ' + model_utils.format_ts(now_ts))
assert (expiry_ts.year - now_ts.year) == 5
assert expiry_ts.hour == 23
assert expiry_ts.minute == 59
assert expiry_ts.second == 59
assert expiry_ts.day == now_ts.day
assert expiry_ts.month in (now_ts.month, (now_ts.month + 1))
def test_ts_from_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 17
assert test_ts.month == 2
assert test_ts.year == 2021
assert test_ts.hour == 7
assert test_ts.minute == 0
assert test_ts.second == 0
test_ts = model_utils.ts_from_iso_format('2021-02-16T23:00:00+00:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 23
test_ts = model_utils.ts_from_iso_format('2021-02-16T13:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day == 16
assert test_ts.hour == 21
test_ts = model_utils.ts_from_iso_format('2021-03-31T23:00:00-08:00')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.month == 4
assert test_ts.day == 1
assert test_ts.hour == 7
def test_ts_from_date_iso_format():
"""Assert that creating a UTC datetime object from an ISO date-time formatted string is performing as expected."""
test_ts = model_utils.ts_from_date_iso_format('2021-02-16')
print('Test timestamp: ' + model_utils.format_ts(test_ts))
assert test_ts.day in (16, 17)
assert test_ts.month == 2
assert test_ts.year == 2021
if test_ts.day == 16:
assert test_ts.hour >= 8
else:
assert test_ts.hour <= 7
def test_now_ts_offset():
"""Assert that adjusting UTC now by a number of days is performing as expected."""
now_ts = model_utils.now_ts() + _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, True)
print('Now timestamp + 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
now_ts = model_utils.now_ts() - _timedelta(days=60)
test_ts = model_utils.now_ts_offset(60, False)
print('Now timestamp - 60 days: ' + model_utils.format_ts(test_ts))
assert test_ts.day == now_ts.day
assert test_ts.month == now_ts.month
assert test_ts.year == now_ts.year
def test_today_ts_offset():
"""Assert that adjusting UTC today by a number of days is performing as expected."""
test_now_ts = model_utils.now_ts_offset(7, False)
test_today_ts = model_utils.today_ts_offset(7, False)
print('test now - 7 days: ' + model_utils.format_ts(test_now_ts))
print('test today - 7 days: ' + model_utils.format_ts(test_today_ts))
assert test_today_ts.hour == 0
assert test_today_ts.minute == 0
assert test_today_ts.second == 0
assert test_today_ts < test_now_ts
def test_expiry_dt_add_years():
"""Assert that adding years to an expiry date is performing as expected."""
expiry_ts = model_utils.expiry_dt_from_years(1)
add_ts = model_utils.expiry_dt_add_years(expiry_ts, 4)
print('Initial expiry: ' + model_utils.format_ts(expiry_ts))
print('Updated expiry: ' + model_utils.format_ts(add_ts))
assert (add_ts.year - expiry_ts.year) == 4
| 40.758929 | 118 | 0.718072 | 749 | 4,565 | 4.12283 | 0.189586 | 0.079663 | 0.093264 | 0.075777 | 0.622085 | 0.544041 | 0.503562 | 0.450453 | 0.44171 | 0.406412 | 0 | 0.040844 | 0.179409 | 4,565 | 111 | 119 | 41.126126 | 0.783235 | 0.256298 | 0 | 0.260274 | 0 | 0 | 0.103077 | 0.029878 | 0 | 0 | 0 | 0 | 0.479452 | 1 | 0.082192 | false | 0 | 0.027397 | 0 | 0.109589 | 0.178082 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d6c93e9529dba1be9084932e1bd1a732f244d8b | 1,097 | py | Python | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | 1 | 2019-01-12T14:35:54.000Z | 2019-01-12T14:35:54.000Z | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | null | null | null | wolfbot/sensors/avg_color.py | sunil3590/TIM | 5c8de80d631a4dea0f604091467dba7a7201de48 | [
"MIT"
] | null | null | null | import color_sensor_ISL29125
from time import time
cs = color_sensor_ISL29125.color_senser(1)
if cs.valid_init:
print "Valid color sensor"
else :
print "Color Sensor invalid"
t0 = time()
red_list = []
green_list = []
blue_list = []
for x in range(100):
stat = cs.readStatus()
if "" in stat: #"FLAG_CONV_DONE" in stat:
if "FLAG_CONV_R" not in stat:
red_list.append( cs.readRed() )
if "FLAG_CONV_G" not in stat:
green_list.append( cs.readGreen() )
if "FLAG_CONV_G" not in stat:
blue_list.append( cs.readBlue() )
tf = time()
red_avg = float(sum( red_list)) / float(len(red_list))
green_avg = float(sum( green_list)) / float(len(green_list))
blue_avg = float(sum( blue_list)) / float(len(blue_list))
print "In " + str( int((tf-t0)*10000)/10.0) + "ms the avg of: "
print str(len(red_list)) + " red vals was " + str(red_avg)
print str(len(green_list)) +" green vals was " + str(green_avg)
print str(len(blue_list)) + " blue vals was " + str(blue_avg)
print red_avg
print green_avg
print blue_avg
print ""
| 27.425 | 66 | 0.649043 | 176 | 1,097 | 3.829545 | 0.295455 | 0.071217 | 0.04451 | 0.032641 | 0.059347 | 0.059347 | 0.059347 | 0 | 0 | 0 | 0 | 0.027972 | 0.217867 | 1,097 | 39 | 67 | 28.128205 | 0.757576 | 0.022789 | 0 | 0.0625 | 0 | 0 | 0.128157 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.0625 | null | null | 0.3125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d71379845c71ad7524a31d7ceaa01d4a691d64e | 822 | py | Python | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | null | null | null | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | 3 | 2020-03-04T03:16:52.000Z | 2020-04-25T06:22:48.000Z | examples/static_content.py | kjosib/kale | ced67fa7a335fbe9524c86a0a805cfdd56f6d560 | [
"MIT"
] | 2 | 2020-05-22T16:29:33.000Z | 2022-01-10T19:36:51.000Z | """
<html><body>
<p>
You'll probably want to supply a stylesheet. Perhaps some javascript library.
Maybe even some images. One way or another, it's handy to be able to point at
a directory full of static content and let the framework do its job.
</p>
<p>
This example exercises that facility by presenting the examples folder within
your web browser.
</p>
<p>Click <a href="static">here</a> to see this work.</p>
<p>When you're done digesting this example, may I suggest
<a href="/static/simple_task_list.py"> simple_task_list.py </a>?</p>
</body></html>
"""
import os
import kali
app = kali.Router()
# This is how it's done:
app.delegate_folder("/static/", kali.StaticFolder(os.path.dirname(__file__)))
# This is enough to have an index page.
@app.function('/')
def hello(): return __doc__
kali.serve_http(app)
| 22.833333 | 77 | 0.723844 | 141 | 822 | 4.120567 | 0.666667 | 0.010327 | 0.037866 | 0.055077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148418 | 822 | 35 | 78 | 23.485714 | 0.83 | 0.749392 | 0 | 0 | 0 | 0 | 0.045685 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.285714 | 0.142857 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
0d74ec72c3d16df3ed04de198a0c999287ff6316 | 1,303 | py | Python | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | home/models.py | davidkiama/Foto-Moto- | 97f5cafb3580152c3672834dcbbbf5cfa961ff15 | [
"Unlicense"
] | null | null | null | from statistics import mode
from django.db import models
from cloudinary.models import CloudinaryField
# Create your models here.
class Image(models.Model):
# image = models.ImageField(
# upload_to='uploads/', default='default.jpg')
image = CloudinaryField('image')
title = models.CharField(max_length=60)
description = models.TextField()
location = models.ForeignKey('Location', on_delete=models.CASCADE)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
@classmethod
def get_all_images(cls):
images = cls.objects.all()
return images
@classmethod
def get_images_by_category(cls, category):
images = cls.objects.filter(category=category)
return images
@classmethod
def filter_by_location(cls, location):
images = cls.objects.filter(location=location)
return images
@classmethod
def search_by_category(cls, search_term):
images = cls.objects.filter(category__name__icontains=search_term)
return images
class Location(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=60)
def __str__(self):
return self.name
| 25.057692 | 74 | 0.697621 | 153 | 1,303 | 5.751634 | 0.326797 | 0.051136 | 0.072727 | 0.081818 | 0.245455 | 0.147727 | 0.147727 | 0.147727 | 0.147727 | 0.147727 | 0 | 0.005814 | 0.207982 | 1,303 | 51 | 75 | 25.54902 | 0.846899 | 0.076746 | 0 | 0.424242 | 0 | 0 | 0.017515 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.060606 | 0.757576 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0d779f79a6ff4f979c6a546d7fdf9c7fcb571967 | 1,248 | py | Python | tests/_utils.py | tsuyukimakoto/physaliidae | e55416e8b84c4a4ed2a31290f16ccd42350853d2 | [
"MIT"
] | 2 | 2017-04-26T01:10:24.000Z | 2019-05-04T03:29:24.000Z | tests/_utils.py | tsuyukimakoto/physaliidae | e55416e8b84c4a4ed2a31290f16ccd42350853d2 | [
"MIT"
] | 220 | 2019-01-01T03:18:11.000Z | 2022-03-28T20:29:49.000Z | tests/_utils.py | tsuyukimakoto/biisan | 8e55d73c582fcbba918595c2e741ffce7c88aaa9 | [
"MIT"
] | null | null | null | import os
import shutil
from contextlib import (
contextmanager,
)
from pathlib import Path
import pytest
@pytest.fixture(scope='function', autouse=True)
def cleanup():
test_generate_dir = (Path('.') / 'tests' / 'biisan_data')
if test_generate_dir.exists():
shutil.rmtree(test_generate_dir)
yield
if test_generate_dir.exists():
shutil.rmtree(test_generate_dir)
@pytest.fixture(scope='function', autouse=True)
def setenv():
os.environ['BIISAN_SETTINGS_MODULE'] = 'tests.biisan_data.data.biisan_local_settings'
yield
del os.environ['BIISAN_SETTINGS_MODULE']
@contextmanager
def cd(to):
prev_cwd = Path.cwd()
os.chdir(to)
try:
yield
finally:
os.chdir(prev_cwd)
def _copy_blog(entry_file):
src = Path('.') / 'test_data' / entry_file
dest = Path('.') / 'biisan_data' / 'data' / 'blog' / entry_file
shutil.copyfile(src, dest)
def copy_first_blog():
_copy_blog('my_first_blog.rst')
def copy_second_blog():
_copy_blog('my_second_blog.rst')
def copy_test_local_settings():
src = Path('.') / 'test_data' / 'biisan_local_settings.py'
dest = Path('.') / 'biisan_data' / 'data' / 'biisan_local_settings.py'
shutil.copyfile(src, dest)
| 22.285714 | 89 | 0.677885 | 164 | 1,248 | 4.865854 | 0.310976 | 0.075188 | 0.093985 | 0.086466 | 0.452381 | 0.308271 | 0.225564 | 0.125313 | 0.125313 | 0.125313 | 0 | 0 | 0.184295 | 1,248 | 55 | 90 | 22.690909 | 0.78389 | 0 | 0 | 0.275 | 1 | 0 | 0.208333 | 0.108974 | 0 | 0 | 0 | 0 | 0 | 1 | 0.175 | false | 0 | 0.125 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d7c9dd39ea4011bea154b13be9de5e46cbc2b5f | 1,254 | py | Python | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/search/views/author.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | from rest_framework import viewsets
from elasticsearch_dsl import Search
from elasticsearch_dsl.connections import connections
from search.filters import ElasticsearchFuzzyFilter
from search.documents import AuthorDocument
from search.serializers import AuthorDocumentSerializer
from utils.permissions import ReadOnly
class AuthorDocumentView(viewsets.ReadOnlyModelViewSet):
serializer_class = AuthorDocumentSerializer
document = AuthorDocument
permission_classes = [ReadOnly]
filter_backends = [ElasticsearchFuzzyFilter]
search_fields = ['first_name', 'last_name']
def __init__(self, *args, **kwargs):
assert self.document is not None
self.client = connections.get_connection(
self.document._get_using()
)
self.index = self.document._index._name
self.mapping = self.document._doc_type.mapping.properties.name
self.search = Search(
using=self.client,
index=self.index,
doc_type=self.document._doc_type.name
)
super(AuthorDocumentView, self).__init__(*args, **kwargs)
def get_queryset(self):
queryset = self.search.query()
queryset.model = self.document.Django.model
return queryset
| 32.153846 | 70 | 0.720893 | 130 | 1,254 | 6.738462 | 0.423077 | 0.082192 | 0.045662 | 0.043379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.205742 | 1,254 | 38 | 71 | 33 | 0.879518 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.066667 | false | 0 | 0.233333 | 0 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0d802fca09e29a84af3006203d6210a4292b4238 | 2,534 | py | Python | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | 2 | 2020-08-16T12:06:17.000Z | 2021-01-05T05:35:54.000Z | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | null | null | null | run.py | UNIFUZZ/getcvss | 360de42ef09d7e21ef7c539d48ac083d10e8e215 | [
"MIT"
] | 2 | 2020-08-16T11:07:29.000Z | 2022-01-04T02:18:50.000Z | import requests
sess = requests.session()
import gzip
import json
import time
import os
def downloadyear(year):
print("fetching year", year)
url = "https://nvd.nist.gov/feeds/json/cve/1.1/nvdcve-1.1-{year}.json.gz".format(year=year)
req = sess.get(url, stream=True)
return gzip.open(req.raw).read().decode()
def getdata(year):
# {"id": ["CWE1/CWE2", "CVSSV3 score", "CVSSV2 score", "vector V3", "vector V2"]}
# example: "CVE-2011-1474": ["CWE-400/CWE-835", 5.5, 4.9, "CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H", "AV:L/AC:L/Au:N/C:N/I:N/A:C"]
data = json.loads(downloadyear(year))
res = {}
for item in data["CVE_Items"]:
id = item["cve"]["CVE_data_meta"]["ID"]
cwes = [i["value"] for i in item["cve"]["problemtype"]["problemtype_data"][0]["description"]]
cwe = "/".join(cwes)
try:
cvssv3_score, vector_v3 = item["impact"]["baseMetricV3"]["cvssV3"]["baseScore"], item["impact"]["baseMetricV3"]["cvssV3"]["vectorString"]
except:
cvssv3_score, vector_v3 = -1, ""
try:
cvssv2_score, vector_v2 = item["impact"]["baseMetricV2"]["cvssV2"]["baseScore"], item["impact"]["baseMetricV2"]["cvssV2"]["vectorString"]
except:
cvssv2_score, vector_v2 = -1, ""
date = item["publishedDate"].split("T")[0]
res[id] = [cwe, cvssv3_score, cvssv2_score, vector_v3, vector_v2, date]
return res
def fullupdate():
res = {}
currentyear = int(time.strftime("%Y"))
for year in range(2002, currentyear+1):
res.update(getdata(year))
res.update(getdata("recent"))
return res
def writetofile(filepath, data):
d = sorted(data.items(), key=lambda i:(int(i[0].split("-")[1]),int(i[0].split("-")[2])))
with open(filepath, "w") as fp:
for id, itemdata in d:
fp.write(",".join([str(i) for i in [id]+itemdata])+"\n")
def readfromfile(filepath):
data = {}
for _line in open(filepath):
id, cwe, cvssv3_score, cvssv2_score, vector_v3, vector_v2 = _line.strip().split(",")
data[id] = [cwe, float(cvssv3_score), float(cvssv2_score), vector_v3, vector_v2]
return data
if __name__ == "__main__":
#print(getdata("recent"))
#print(os.path.getmtime("/tmp/cvssdata/cvss.csv"), time.time()-os.path.getmtime("/tmp/cvssdata/cvss.csv"))
data = fullupdate()
writetofile("/tmp/cvssdata/cvss.csv", data)
# TODO: add meta data comparation to avoid full update
| 40.870968 | 150 | 0.598658 | 351 | 2,534 | 4.225071 | 0.378917 | 0.059339 | 0.06878 | 0.051247 | 0.169252 | 0.144976 | 0.12677 | 0.083614 | 0.057991 | 0.057991 | 0 | 0.035376 | 0.207972 | 2,534 | 61 | 151 | 41.540984 | 0.703538 | 0.15588 | 0 | 0.16 | 0 | 0.02 | 0.168436 | 0.010618 | 0 | 0 | 0 | 0.016393 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.28 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d8131562321f737bfe94d6f5f98ee26c890434c | 2,974 | py | Python | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 56 | 2019-03-20T01:44:04.000Z | 2022-02-16T13:36:39.000Z | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 1 | 2020-07-08T20:30:23.000Z | 2020-11-07T15:41:25.000Z | src/enumerator.py | darkarnium/perimeterator | 8c694267d92ca1d28fc1494cd9394af34271ed39 | [
"MIT"
] | 9 | 2019-10-09T18:54:52.000Z | 2021-12-28T15:27:58.000Z | #!/usr/bin/env python3
''' Perimeterator Enumerator.
This wrapper is intended to allow for simplified AWS based deployment of the
Perimeterator enumerator. This allows for a cost effective method of
execution, as the Perimeterator poller component only needs to execute on a
defined schedule in order to detect changes.
'''
import os
import logging
import perimeterator
# TODO: This should likely be configurable.
MODULES = [
'rds',
'ec2',
'elb',
'elbv2',
'es',
]
def lambda_handler(event, context):
''' An AWS Lambda wrapper for the Perimeterator enumerator. '''
# Strip off any existing handlers that may have been installed by AWS.
logger = logging.getLogger()
for handler in logger.handlers:
logger.removeHandler(handler)
# Reconfigure the root logger the way we want it.
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(process)d - [%(levelname)s] %(message)s'
)
# Get the account id for the current AWS account.
account = perimeterator.helper.aws_account_id()
logger.info("Running in AWS account %s", account)
# Get configurable options from environment variables.
regions = os.getenv("ENUMERATOR_REGIONS", "us-west-2").split(",")
sqs_queue = os.getenv("ENUMERATOR_SQS_QUEUE", None)
logger.info("Configured results SQS queue is %s", sqs_queue)
logger.info(
"Configured regions for resource enumeration are %s",
", ".join(regions)
)
# Setup the SQS dispatcher for submission of addresses to scanners.
queue = perimeterator.dispatcher.sqs.Dispatcher(queue=sqs_queue)
# Process regions one at a time, enumerating addresses for all configured
# resources in the given region. Currently, it's not possible to only
# enumerate different resources types by region. Maybe later! :)
for region in regions:
logger.info("Attempting to enumerate resources in %s", region)
for module in MODULES:
logger.info("Attempting to enumerate %s resources", module)
try:
# Ensure a handler exists for this type of resource.
hndl = getattr(perimeterator.enumerator, module).Enumerator(
region=region
)
except AttributeError as err:
logger.error(
"Handler for %s resources not found, skipping: %s",
module,
err
)
continue
# Get all addresses and dispatch to SQS for processing.
logger.info(
"Submitting %s resources in %s for processing",
module,
region
)
queue.dispatch(account, hndl.get())
if __name__ == '__main__':
''' Allow the script to be invoked outside of Lambda. '''
lambda_handler(
dict(), # No real 'event' data.
dict() # No real 'context' data.
)
| 33.41573 | 77 | 0.631137 | 350 | 2,974 | 5.311429 | 0.465714 | 0.032275 | 0.029048 | 0.023669 | 0.033351 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001881 | 0.284802 | 2,974 | 88 | 78 | 33.795455 | 0.87212 | 0.353732 | 0 | 0.074074 | 0 | 0 | 0.220348 | 0 | 0 | 0 | 0 | 0.011364 | 0 | 1 | 0.018519 | false | 0 | 0.055556 | 0 | 0.074074 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d81f1dd86c97fac6a68af0051fa814935517691 | 3,493 | py | Python | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | null | null | null | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | 19 | 2020-10-20T09:38:09.000Z | 2021-04-01T09:13:59.000Z | invenio_rdm_pure/utils.py | utnapischtim/invenio-rdm-pure | 895addfb374dca640adc42ce68ab54ddfc8d412a | [
"MIT"
] | 2 | 2020-09-18T06:45:15.000Z | 2021-03-21T20:15:37.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Technische Universität Graz
#
# invenio-rdm-pure is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Utility methods."""
import smtplib
from datetime import datetime
from os.path import dirname, isabs, isfile, join
from pathlib import Path
from typing import List
from flask import current_app
from flask_security.utils import hash_password
from invenio_db import db
def get_user_id(user_email: str, user_password: str):
"""Get the userId of the user.
In case the user doesn't exist yet,
create it with given credentials.
"""
datastore = current_app.extensions["security"].datastore
if datastore is not None:
user = datastore.get_user(user_email)
if not user:
user = datastore.create_user(
email=user_email,
password=hash_password(user_password),
active=True,
)
db.session.commit()
return user.id
def make_user_admin(self, id_or_email: str) -> None:
"""Gives the user with given id or email administrator rights."""
return None # FIXME: Method stub'd until auxiliary methods are implemented.
datastore = current_app.extensions["security"].datastore
if datastore is not None:
invenio_pure_user = datastore.get_user(
id_or_email
) # FIXME: Not implemented yet.
admin_role = datastore.find_role("admin") # FIXME: Not implemented yet.
datastore.add_role_to_user(invenio_pure_user, admin_role)
def load_file_as_string(path):
"""Open a file and return the content as UTF-8 encoded string."""
if not isabs(path):
path = join(dirname(__file__), path)
if not isfile(path):
return ""
with open(path, "rb") as fp:
input = fp.read()
return input.decode("utf-8")
def get_dates_in_span(
start: datetime.date, stop: datetime.date, step: int
) -> List[datetime.date]:
"""Returns an ascending list of dates with given step between the two endpoints of the span."""
dates = []
if start == stop:
return [start]
elif step == 0:
return []
elif step < 0:
if start < stop:
return []
else:
while start >= stop:
dates.append(start)
start += datetime.timedelta(step)
dates.reverse()
elif step > 0:
if stop < start:
return []
else:
while start <= stop:
dates.append(start)
start += datetime.timedelta(step)
return dates
def send_email(
uuid: str,
file_name: str,
email_sender: str,
email_sender_password: str,
email_receiver: str,
):
"""Send an email."""
email_smtp_server = "smtp.gmail.com"
email_smtp_port = 587
email_subject = "Delete Pure File"
email_message = (
"""Subject: """
+ email_subject
+ """Please remove from pure uuid {} the file {}."""
)
# create SMTP session
session = smtplib.SMTP(email_smtp_server, email_smtp_port)
# start TLS for security
session.starttls()
# Authentication
session.login(email_sender, email_sender_password)
# sending the mail
message = email_message.format(uuid, file_name)
session.sendmail(email_sender, email_receiver, message)
# terminating the session
session.quit()
| 28.398374 | 99 | 0.634412 | 445 | 3,493 | 4.829213 | 0.357303 | 0.025593 | 0.012564 | 0.026989 | 0.122848 | 0.122848 | 0.122848 | 0.122848 | 0.122848 | 0.122848 | 0 | 0.00512 | 0.273118 | 3,493 | 122 | 100 | 28.631148 | 0.841276 | 0.221586 | 0 | 0.160494 | 0 | 0 | 0.038534 | 0 | 0 | 0 | 0 | 0.008197 | 0 | 1 | 0.061728 | false | 0.061728 | 0.098765 | 0 | 0.271605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0d836b79c1c35926c2be545a9cfa96aaad92778b | 1,992 | py | Python | cookiecutterassert/rules/run_script.py | yangzii0920/cookiecutterassert | d690bb06844821334e7b2b0e6361fb30556d718b | [
"Apache-2.0"
] | 3 | 2020-09-24T12:43:42.000Z | 2022-02-10T13:04:28.000Z | cookiecutterassert/rules/run_script.py | yangzii0920/cookiecutterassert | d690bb06844821334e7b2b0e6361fb30556d718b | [
"Apache-2.0"
] | 5 | 2020-11-05T22:04:07.000Z | 2021-07-07T15:45:38.000Z | cookiecutterassert/rules/run_script.py | cookiecutterassert/cooiecutterassert | 87a2b05c45c0cb30abfc11f944a6cb12a2863a09 | [
"Apache-2.0"
] | 4 | 2020-10-06T13:55:39.000Z | 2021-11-23T15:38:00.000Z | # Copyright 2020 Ford Motor Company
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import subprocess
from cookiecutterassert import messager
class RunScriptRule:
def __init__(self, options, testFolder, runFolder, script):
self.script = script
self.runFolder = runFolder
self.testFolder = testFolder
self.options = options
def execute(self, outputFolder):
workingDir = str(os.path.join(outputFolder, self.runFolder))
scriptprocess = subprocess.Popen(self.script, cwd = workingDir, shell=True)
scriptprocess.wait()
success = scriptprocess.returncode == 0
if (not success):
errorMessage = "assertion runScript {} {} failed. with non-zero return code [{}]".format(self.runFolder, self.script, scriptprocess.returncode)
messager.printError(errorMessage)
return success
def __eq__(self, obj):
return isinstance(obj, RunScriptRule) \
and obj.script == self.script \
and obj.runFolder == self.runFolder \
and obj.testFolder == self.testFolder \
and obj.options == self.options
def __ne__(self, obj):
return not self == obj
def __str__(self):
return "{0}: [testFolder={1}, runFolder={2}, script={3}, options={4}]".format(type(self).__name__, self.testFolder, self.runFolder, self.script, self.options)
def __repr__(self):
return self.__str__() | 31.125 | 166 | 0.674197 | 239 | 1,992 | 5.502092 | 0.476987 | 0.045627 | 0.019772 | 0.024335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009162 | 0.232932 | 1,992 | 64 | 167 | 31.125 | 0.85144 | 0.283133 | 0 | 0 | 0 | 0 | 0.08892 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.2 | false | 0 | 0.1 | 0.133333 | 0.5 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
0d8b44c6cdb28214bde2bc0db794be64cdb76647 | 319 | py | Python | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | Desafios/Desafio048.py | vaniaferreira/Python | 5b3158836d47c0bb7bc446e6636e7b3dcea8a0ab | [
"MIT"
] | null | null | null | #Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de 3 e que se encontram
# no intervalo de 1 até 500.
soma = 0
cont = 0
for c in range(1,501,2):
if c % 3 == 0:
cont = cont + 1
soma = soma + c
print('A soma dos números solicitados {} são {}'.format(cont, soma))
| 29 | 109 | 0.642633 | 58 | 319 | 3.534483 | 0.637931 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.06383 | 0.263323 | 319 | 10 | 110 | 31.9 | 0.808511 | 0.423197 | 0 | 0 | 0 | 0 | 0.21978 | 0 | 0 | 0 | 0 | 0.1 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d8d5718379dda93fd6474b84b7d6a14a112142a | 529 | py | Python | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | 1 | 2019-02-07T15:32:06.000Z | 2019-02-07T15:32:06.000Z | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | 9 | 2020-01-28T22:09:41.000Z | 2022-03-11T23:39:17.000Z | sums-of-numbers-game/prepare_sum_objects.py | babyrobot-eu/core-modules | 7e8c006c40153fb649208c9a78fc71aa70243f69 | [
"MIT"
] | null | null | null | import pickle
from random import shuffle
sums = [(5, 105) , (205, 305), (405, 1005), (1105, 1205), (1305, 1405)]
shuffle(sums)
a = {'sums': sums, 'current_sum': 0}
with open('child_data/child1.pkl', 'wb') as f:
pickle.dump(obj=a, file=f)
print(a)
shuffle(sums)
b = {'sums': sums, 'current_sum': 0}
with open('child_data/child2.pkl', 'wb') as f:
pickle.dump(obj=b, file=f)
print(b)
shuffle(sums)
c = {'sums': sums, 'current_sum': 0}
with open('child_data/child3.pkl', 'wb') as f:
pickle.dump(obj=c, file=f)
print(c)
| 25.190476 | 71 | 0.640832 | 92 | 529 | 3.619565 | 0.402174 | 0.132132 | 0.135135 | 0.162162 | 0.513514 | 0.513514 | 0.513514 | 0.324324 | 0.324324 | 0 | 0 | 0.086475 | 0.147448 | 529 | 20 | 72 | 26.45 | 0.651885 | 0 | 0 | 0.166667 | 0 | 0 | 0.215501 | 0.119093 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d8dedee8a01cdd537a2a3df042a8dfbd8ef5e42 | 5,781 | py | Python | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | 2 | 2015-03-09T14:26:57.000Z | 2015-03-30T17:30:56.000Z | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | null | null | null | menuparser.py | tomjaspers/vubresto-server | 96b441c76d8219505742eb5d08793bfb454ff9c8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python27
import io
import os
import json
import logging
import datetime
import requests
import lxml.html
from lxml.cssselect import CSSSelector
from multiprocessing.dummy import Pool as ThreadPool
# Path where the JSONs will get written. Permissions are your job.
SAVE_PATH = '.'
# Urls of the pages that will get parsed
URL_ETTERBEEK_NL = 'https://my.vub.ac.be/resto/etterbeek'
URL_ETTERBEEK_EN = 'https://my.vub.ac.be/restaurant/etterbeek'
URL_JETTE_NL = 'https://my.vub.ac.be/resto/jette'
URL_JETTE_EN = 'https://my.vub.ac.be/restaurant/jette'
# Mapping of colors for the menus.
DEFAULT_COLOR = '#f0eb93' # very light yellow
COLOR_MAPPING = {
'soep': '#fdb85b', # yellow
'soup': '#fdb85b', # yellow
'menu 1': '#68b6f3', # blue
'dag menu': '#68b6f3', # blue
'dagmenu': '#68b6f3', # blue
'health': '#ff9861', # orange
'menu 2': '#cc93d5', # purple
'meals of the world': '#cc93d5', # purple
'fairtrade': '#cc93d5', # purple
'fairtrade menu': '#cc93d5', # purple
'veggie': '#87b164', # green
'veggiedag': '#87b164', # green
'pasta': '#de694a', # red
'pasta bar': '#de694a', # red
'wok': '#6c4c42', # brown
}
# Months in Dutch, to allow the parsing of the (Dutch) site
MONTHS = ['januari', 'februari', 'maart', 'april', 'mei', 'juni', 'juli',
'augustus', 'september', 'oktober', 'november', 'december']
LOCAL_MONTHS = {month: i for i, month in enumerate(MONTHS, 1)}
def is_veggiedag_img(img):
return img and 'veggiedag' in img.get('src', '')
def normalize_text(text):
return text.replace(u'\xa0', u' ').strip()
def parse_restaurant(name, url):
data = []
# Construct CSS Selectors
sel_day_divs = CSSSelector('#content .views-row')
sel_date_span = CSSSelector('.date-display-single')
sel_tablerows = CSSSelector('table tr')
sel_img = CSSSelector('img')
# Request and build the DOM Tree
r = requests.get(url)
tree = lxml.html.fromstring(r.text)
# Apply selector to get divs representing 1 day
day_divs = sel_day_divs(tree)
for day_div in day_divs:
menus = []
# Apply selector to get date span (contains date string of day)
date_span = sel_date_span(day_div)
# date string should be format '29 september 2014', normally
date_string = normalize_text(date_span[0].text_content()).lower()
date_components = date_string.split()[1:]
month_name = normalize_text(date_components[1]).lower()
month = LOCAL_MONTHS.get(date_components[1], None)
if month:
date = datetime.date(int(date_components[2]), # year
month, # month
int(date_components[0])) # day
else:
# If we couldn't find a month, we try to use the previous date
logging.warning("{0} - Failed to get a month \
for the month_name {1} ".format(name, month_name))
try:
prev_date_components = map(int, data[-1]['date'].split('-'))
prev_date = datetime.date(prev_date_components[0], # year
prev_date_components[1], # month
prev_date_components[2]) # day
date = prev_date + datetime.timedelta(days=1)
except Exception:
# If we can't find any date, we'll skip the day
logging.exception("{0} - Couldn't derive date \
from previous dates".format(name))
continue
# Get the table rows
tablerows = sel_tablerows(day_div)
try:
for tr in tablerows:
tds = tr.getchildren()
menu_name = normalize_text(tds[0].text_content())
menu_dish = normalize_text(tds[1].text_content())
# Sometimes there is no menu name,
# but just an image (e.g., for "Veggiedag")
if not menu_name:
img = sel_img(tds[0])
img = img[0] if img else None
menu_name = 'Veggiedag' if is_veggiedag_img(img) else 'Menu'
menu_color = COLOR_MAPPING.get(menu_name.lower(), None)
if menu_color is None:
logging.warning(name + " - No color found for the menu: '" +
menu_name + "' (" + str(date) + ")")
menu_color = DEFAULT_COLOR
if menu_dish:
menus.append({'name': menu_name,
'dish': menu_dish,
'color': menu_color})
except:
# cba
pass
data.append({'date': str(date), 'menus': menus})
return data
def write_to_json(data, filename):
with io.open(os.path.join(SAVE_PATH, filename), 'w', encoding='utf8') as f:
f.write(unicode(json.dumps(data, ensure_ascii=False)))
def parse_and_save((name, url)):
try:
data = parse_restaurant(name, url)
except Exception:
logging.exception(name + " - Failed to parse")
data = []
try:
write_to_json(data, name.lower() + '.json')
except Exception:
logging.exception(name + " - Failed to save to json")
def main():
# Configure the logger
logging.basicConfig(filename='menuparser.log', level='WARNING')
# Parse and save the 2 restaurants
pool = ThreadPool(4)
pool.map(parse_and_save, [
('Etterbeek.nl', URL_ETTERBEEK_NL),
('Jette.nl', URL_JETTE_NL),
('Etterbeek.en', URL_ETTERBEEK_EN),
('Jette.en', URL_JETTE_EN),
])
if __name__ == "__main__":
main()
| 35.036364 | 80 | 0.572219 | 706 | 5,781 | 4.533994 | 0.331445 | 0.039363 | 0.012496 | 0.014995 | 0.056232 | 0.056232 | 0.056232 | 0 | 0 | 0 | 0 | 0.022172 | 0.305656 | 5,781 | 164 | 81 | 35.25 | 0.775287 | 0.144612 | 0 | 0.092437 | 0 | 0 | 0.14577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.008403 | 0.07563 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d902f3628cad0645f5a28226144694853af64ef | 1,673 | py | Python | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 13 | 2019-09-03T09:42:12.000Z | 2022-03-23T02:14:46.000Z | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 5 | 2020-12-17T09:38:22.000Z | 2021-04-30T00:29:54.000Z | utils_data.py | vkola/peds2019 | 232ee1400e5220a8f928c1bab0bfbd9d20552308 | [
"MIT"
] | 7 | 2019-07-08T19:04:00.000Z | 2022-02-19T09:04:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 16:05:54 2019
@author: Chonghua Xue (Kolachalama's Lab, BU)
"""
from torch.utils.data import Dataset
# true if gapped else false
vocab_o = { True: ['-'] + ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'],
False: ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y']}
aa2id_o = { True: dict(zip(vocab_o[True], list(range(len(vocab_o[True]))))),
False: dict(zip(vocab_o[False], list(range(len(vocab_o[False])))))}
id2aa_o = { True: dict(zip(list(range(len(vocab_o[True]))), vocab_o[True])),
False: dict(zip(list(range(len(vocab_o[False]))), vocab_o[False]))}
vocab_i = { True: vocab_o[True] + ['<SOS>', '<EOS>'],
False: vocab_o[False] + ['<SOS>', '<EOS>']}
aa2id_i = { True: dict(zip(vocab_i[True], list(range(len(vocab_i[True]))))),
False: dict(zip(vocab_i[False], list(range(len(vocab_i[False])))))}
id2aa_i = { True: dict(zip(list(range(len(vocab_i[True]))), vocab_i[True])),
False: dict(zip(list(range(len(vocab_i[False]))), vocab_i[False]))}
class ProteinSeqDataset(Dataset):
def __init__(self, fn, gapped=True):
# load data
with open(fn, 'r') as f:
self.data = [l.strip('\n') for l in f]
# char to id
self.data = [[aa2id_i[gapped][c] for c in r] for r in self.data]
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def collate_fn(batch):
return batch, [x for seq in batch for x in seq] | 38.906977 | 127 | 0.548117 | 270 | 1,673 | 3.255556 | 0.318519 | 0.075085 | 0.109215 | 0.154721 | 0.384528 | 0.352673 | 0.188851 | 0.120592 | 0.045506 | 0.045506 | 0 | 0.013667 | 0.212791 | 1,673 | 43 | 128 | 38.906977 | 0.653759 | 0.103407 | 0 | 0 | 0 | 0 | 0.042982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.041667 | 0.125 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
0d97e6e62284344f9f71b3d49de7c9c09fe6e5dd | 4,122 | py | Python | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | null | null | null | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | 3 | 2019-04-17T08:07:12.000Z | 2019-04-28T15:24:18.000Z | experimentum/Storage/Migrations/Schema.py | PascalKleindienst/experimentum | 5d547e0e8135d4d7ffe42b0c8a57e70a7ac2af4b | [
"Apache-2.0"
] | null | null | null | """The :py:class:`.Schema` class provides a database agnostic way of manipulating tables.
Tables
======
Creating Tables
---------------
To create a new database table, the :py:meth:`~.Schema.create` method is used.
The :py:meth:`~.Schema.create` method accepts a table name as its argument and returns
a :py:class:`.Blueprint` instance that can be used to define the new table.
When creating the table, you may use any of the :py:class:`.Blueprint` column methods
to define the table's columns::
with self.schema.create('users') as table:
table.increments('id')
Checking Existence
------------------
To check if a table or column exist you can use the :py:meth:`~.Schema.has_table` or
:py:meth:`~.Schema.has_column` methods respectively::
if self.schema.has_table('users'):
# ...
if self.schema.has_column('users', 'email'):
# ...
Renaming / Dropping Tables
--------------------------
To rename an existing database table, use the :py:meth:`~.Schema.rename` method::
self.schema.rename('from', 'to')
To drop a table, you can use the :py:meth:`~.Schema.drop` or
:py:meth:`~.Schema.drop_if_exists` methods::
self.schema.drop('users')
self.schema.drop_if_exists('users')
"""
from experimentum.cli import print_failure
from contextlib import contextmanager
class Schema(object):
"""Database agnostic way of manipulating tables.
The :py:class:`.Schema` class was inspired by the Laravel Schema Builder
(https://laravel.com/docs/5.6/migrations#tables).
Attributes:
app (App): Main App Class.
store (AbstractStore): Data Store.
"""
def __init__(self, app):
"""Set app and store.
Args:
app (App): Main App Class.
"""
self.app = app
self.store = app.make('store')
@contextmanager
def create(self, name):
"""Create a new table blueprint.
Args:
name (str): Name of the table.
Yields:
Blueprint: New Instance of a table blueprint
"""
try:
blueprint = self.app.make('blueprint', name)
blueprint.create()
yield blueprint
except Exception as exc:
print_failure('Error while creating blueprint: ' + str(exc), 1)
self._build(blueprint)
@contextmanager
def table(self, name):
"""Create a blueprint for an existing table.
Args:
name (str): Name of the table
Yields:
Blueprint: New Instance of a table blueprint
"""
try:
blueprint = self.app.make('blueprint', name)
yield blueprint
except Exception as exc:
print_failure('Error while creating blueprint: ' + str(exc), 1)
self._build(blueprint)
def rename(self, old, new):
"""Rename a table.
Args:
old (str): Old table name
new (str): New table name
"""
self.store.rename(old, new)
def drop(self, name):
"""Drop a table.
Args:
name (str): Name of the table
"""
self.store.drop(name)
def drop_if_exists(self, name):
"""Drop a table if it exists.
Args:
name (str): Name of the table
"""
self.store.drop_if_exists(name)
def has_table(self, table):
"""Check if database has a specific table.
Args:
table (str): Table to check existance of
"""
return self.store.has_table(table)
def has_column(self, table, column):
"""Check if table has a specific column.
Args:
table (str): Table to check
column (str): Column to check
"""
return self.store.has_column(table, column)
def _build(self, blueprint):
"""Build Schema based on the blueprint.
Args:
blueprint (Blueprint): Blueprint to build.
"""
if blueprint.action == 'create':
self.store.create(blueprint)
elif blueprint.action == 'alter':
self.store.alter(blueprint)
| 26.423077 | 89 | 0.586608 | 512 | 4,122 | 4.671875 | 0.228516 | 0.016722 | 0.035117 | 0.031355 | 0.353261 | 0.300167 | 0.224916 | 0.20485 | 0.200669 | 0.200669 | 0 | 0.001367 | 0.29015 | 4,122 | 155 | 90 | 26.593548 | 0.816131 | 0.558709 | 0 | 0.368421 | 0 | 0 | 0.06694 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.236842 | false | 0 | 0.052632 | 0 | 0.368421 | 0.394737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0d98cd3509f57f05513feadf15808accb6f7add9 | 601 | py | Python | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | kaskopy/items.py | aspirin1988/KASKO | 924278f67a303a861e2367412bbc1dc3df59c742 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from kaskopy.models import Car, RawData
class CarItem(scrapy.Item):
brand = scrapy.Field()
model = scrapy.Field()
year = scrapy.Field()
price = scrapy.Field()
def save(self):
kwargs = {
'mark': self['brand'],
'model': self['model'],
'year': self['year']
}
car, created = Car.get_or_create(**kwargs)
RawData.create(price=self['price'], car=car)
| 23.115385 | 52 | 0.592346 | 74 | 601 | 4.783784 | 0.594595 | 0.124294 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002232 | 0.254576 | 601 | 25 | 53 | 24.04 | 0.787946 | 0.231281 | 0 | 0 | 0 | 0 | 0.070175 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.133333 | 0 | 0.533333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0d9aee5955af9ee204ebd39cc82058caf26d7cbf | 2,466 | py | Python | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | GameLogServer/GameLogServer/log_reader.py | Frankity/IW4M-Admin | 515443c84a574944e946d3691a4682d916fd582e | [
"MIT"
] | null | null | null | import re
import os
import time
class LogReader(object):
def __init__(self):
self.log_file_sizes = {}
# (if the file changes more than this, ignore ) - 1 MB
self.max_file_size_change = 1000000
# (if the time between checks is greater, ignore ) - 5 minutes
self.max_file_time_change = 1000
def read_file(self, path):
# prevent traversing directories
if re.search('r^.+\.\.\\.+$', path):
return False
# must be a valid log path and log file
if not re.search(r'^.+[\\|\/](userraw|mods)[\\|\/].+.log$', path):
return False
# set the initialze size to the current file size
file_size = 0
if path not in self.log_file_sizes:
self.log_file_sizes[path] = {
'length' : self.file_length(path),
'read': time.time()
}
return ''
# grab the previous values
last_length = self.log_file_sizes[path]['length']
last_read = self.log_file_sizes[path]['read']
# the file is being tracked already
new_file_size = self.file_length(path)
# the log size was unable to be read (probably the wrong path)
if new_file_size < 0:
return False
now = time.time()
file_size_difference = new_file_size - last_length
time_difference = now - last_read
# update the new size and actually read the data
self.log_file_sizes[path] = {
'length': new_file_size,
'read': now
}
# if it's been too long since we read and the amount changed is too great, discard it
# todo: do we really want old events? maybe make this an "or"
if file_size_difference > self.max_file_size_change and time_difference > self.max_file_time_change:
return ''
new_log_info = self.get_file_lines(path, file_size_difference)
return new_log_info
def get_file_lines(self, path, length):
try:
file_handle = open(path, 'rb')
file_handle.seek(-length, 2)
file_data = file_handle.read(length)
file_handle.close()
return file_data.decode('utf-8')
except:
return False
def file_length(self, path):
try:
return os.stat(path).st_size
except:
return -1
reader = LogReader()
| 32.447368 | 108 | 0.576642 | 321 | 2,466 | 4.218069 | 0.34891 | 0.064993 | 0.048744 | 0.070901 | 0.134417 | 0.057607 | 0 | 0 | 0 | 0 | 0 | 0.010969 | 0.33455 | 2,466 | 75 | 109 | 32.88 | 0.814138 | 0.219789 | 0 | 0.235294 | 0 | 0 | 0.046025 | 0.019874 | 0 | 0 | 0 | 0.013333 | 0 | 1 | 0.078431 | false | 0 | 0.058824 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0da30113756629b303663abfe1005ea53d21c7f3 | 551 | py | Python | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | lopy_gateway/config.py | haroal/choco-lora | d72cce0bb3e090463fafb993fb1f51db2c634416 | [
"MIT"
] | null | null | null | """ LoPy LoRaWAN Nano Gateway configuration options """
import machine
import ubinascii
WIFI_MAC = ubinascii.hexlify(machine.unique_id()).upper()
# Set the Gateway ID to be the first 3 bytes of MAC address + 'FFFE' + last 3 bytes of MAC address
GATEWAY_ID = '30aea4fffe4e5638' #WIFI_MAC[:6] + "FFFE" + WIFI_MAC[6:12]
SERVER = 'router.eu.thethings.network'
PORT = 1700
NTP = "pool.ntp.org"
NTP_PERIOD_S = 3600
WIFI_SSID = 'S9-Alexis'
WIFI_PASS = 'aeiouy95'
# for EU868
LORA_FREQUENCY = 868100000
LORA_GW_DR = "SF7BW125" # DR_5
LORA_NODE_DR = 5
| 23.956522 | 99 | 0.731397 | 86 | 551 | 4.511628 | 0.662791 | 0.054124 | 0.041237 | 0.056701 | 0.092784 | 0 | 0 | 0 | 0 | 0 | 0 | 0.092473 | 0.15608 | 551 | 22 | 100 | 25.045455 | 0.741935 | 0.362976 | 0 | 0 | 0 | 0 | 0.234604 | 0.079179 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.076923 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0db1ad1545f9291e0a6a356a3b76f6cf63cf27eb | 2,132 | py | Python | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | examples/jet_substructure/syn.py | juliovicenzi/logicnets | ce4ca89e3b11702bade591c320177f17b7d8d187 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2021 Xilinx, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from logicnets.nn import generate_truth_tables, \
lut_inference, \
module_list_to_verilog_module
from train import configs, model_config, dataset_config, test
from dataset import JetSubstructureDataset
from models import JetSubstructureNeqModel, JetSubstructureLutModel
from logicnets.synthesis import synthesize_and_get_resource_counts
from logicnets.util import proc_postsynth_file
if __name__ == "__main__":
parser = ArgumentParser(description="Synthesize convert a PyTorch trained model into verilog")
parser.add_argument('--fpga-part', type=str, default="xcu280-fsvh2892-2L-e",
help="FPGA synthesis part (default: %(default)s)")
parser.add_argument('--clock-period', type=float, default=1.0,
help="Target clock frequency to use during Vivado synthesis (default: %(default)s)")
parser.add_argument('--log-dir', type=str, default='./log',
help="A location to store the log output of the training run and the output model (default: %(default)s)")
args = parser.parse_args()
if not os.path.exists(args.log_dir):
print(f"Could not find log directory {args.log_dir}")
exit(-1)
print("Running out-of-context synthesis")
ret = synthesize_and_get_resource_counts(
args.log_dir,
"logicnet",
fpga_part=args.fpga_part,
clk_period_ns=args.clock_period,
post_synthesis=1)
| 39.481481 | 130 | 0.720919 | 290 | 2,132 | 5.162069 | 0.537931 | 0.04008 | 0.034068 | 0.021376 | 0.082832 | 0.042752 | 0 | 0 | 0 | 0 | 0 | 0.011641 | 0.194184 | 2,132 | 53 | 131 | 40.226415 | 0.859721 | 0.263133 | 0 | 0 | 0 | 0.032258 | 0.270566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.322581 | 0 | 0.322581 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
0db528e74438080829d841b6eb4aaef15ebfc109 | 325 | py | Python | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | tests/views/test_ping.py | Eldies/image_storage | 6bdf55b426813da2e45407418a09cc585b245a22 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from app import app
class TestPingView(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.client = app.test_client()
def test_ping(self):
response = self.client.get('/ping')
assert response.data.decode('utf-8') == 'pong'
| 21.666667 | 54 | 0.621538 | 41 | 325 | 4.878049 | 0.609756 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007968 | 0.227692 | 325 | 14 | 55 | 23.214286 | 0.788845 | 0.064615 | 0 | 0 | 0 | 0 | 0.069536 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0dbe7e52a6fcd5026bcaa13575dd6d512f98dd47 | 1,368 | py | Python | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | commonware/response/middleware.py | Osmose/commonware | e2e02ad47b553362929bfb741105f10c56a1bdc8 | [
"BSD-3-Clause"
] | null | null | null | import inspect
import time
from django.conf import settings
class _statsd(object):
def incr(s, *a, **kw):
pass
def timing(s, *a, **kw):
pass
try:
from statsd import statsd
except ImportError:
statsd = _statsd()
class FrameOptionsHeader(object):
"""
Set an X-Frame-Options header. Default to DENY. Set
response['x-frame-options'] = 'SAMEORIGIN'
to override.
"""
def process_response(self, request, response):
if hasattr(response, 'no_frame_options'):
return response
if not 'x-frame-options' in response:
response['x-frame-options'] = 'DENY'
return response
class StrictTransportMiddleware(object):
"""
Set the Strict-Transport-Security header on responses. Use the
STS_MAX_AGE setting to control the max-age value. (Default: 1 month.)
Use the STS_SUBDOMAINS boolean to add includeSubdomains.
(Default: False.)
"""
def process_response(self, request, response):
if request.is_secure():
age = getattr(settings, 'STS_MAX_AGE', 2592000) # 30 days.
subdomains = getattr(settings, 'STS_SUBDOMAINS', False)
val = 'max-age=%d' % age
if subdomains:
val += '; includeSubDomains'
response['Strict-Transport-Security'] = val
return response
| 24.872727 | 73 | 0.625731 | 158 | 1,368 | 5.335443 | 0.449367 | 0.071174 | 0.061684 | 0.01898 | 0.092527 | 0.092527 | 0.092527 | 0 | 0 | 0 | 0 | 0.01003 | 0.271199 | 1,368 | 54 | 74 | 25.333333 | 0.835507 | 0.237573 | 0 | 0.241379 | 0 | 0 | 0.13004 | 0.025202 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0.068966 | 0.172414 | 0 | 0.517241 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
0dbee3b7f12c94c66b785df1beab2df77b47d739 | 22,992 | py | Python | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | null | null | null | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | null | null | null | tests/test_btc_rawtx_zcash.py | VDamas/app-cryptoescudo | 9e53ccdd836f7b5c787927c74eba3b0ac5d079b6 | [
"Apache-2.0"
] | 1 | 2022-02-08T22:42:41.000Z | 2022-02-08T22:42:41.000Z | import pytest
from dataclasses import dataclass, field
from functools import reduce
from typing import List, Optional
from helpers.basetest import BaseTestBtc, LedgerjsApdu, TxData, CONSENSUS_BRANCH_ID
from helpers.deviceappbtc import DeviceAppBtc, CommException
# Test data below is from a Zcash test log from Live team"
test_zcash_prefix_cmds = [
LedgerjsApdu( # Get version
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102" # i.e. "Zcash" + "1.3.23" (not checked)
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000000", # GET PUBLIC KEY - on 44'/133'/0'/0/0 path
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543" # "Zcash" + "ZEC"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # Get Public Key - on path 44'/133'
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=[
"e040000009028000002c80000085", # path 44'/133'
"e04000000d038000002c8000008580000000", # path 44'/133'/0'
"e04000000d038000002c8000008580000001", # path 44'/133'/1'
"b001000000"
],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000", # Coin info
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004", # Get Public Key - on path 44'/133'/0'/0/4
"e016000000"
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_sign_gti = [
LedgerjsApdu( # GET TRUSTED INPUT
commands=[
"e042000009000000010400008001",
"e042800025edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857010000006b",
"e042800032483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336dfa248aea9ccf022023b13e57595635452130",
"e0428000321c91ed0fe7072d295aa232215e74e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f42d458da",
"e04280000b1100831dc4ff72ffffff00",
"e04280000102",
"e042800022a0860100000000001976a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac",
"e0428000224d949100000000001976a914b714c60805804d86eb72a38c65ba8370582d09e888ac",
"e04280000400000000",
],
expected_resp="3200" + "--"*2 + "20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d94910000000000" + "--"*8
),
]
test_zcash_tx_to_sign_abandonned = [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b013832004d0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51010000004d9491000000000045e1e144cb88d4d800",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
], # tx aborted on 2nd command
expected_sw="6985"
),
]
test_zcash_tx_sign_restart_prefix_cmds = [
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
),
LedgerjsApdu(
commands=[
"e040000015058000002c80000085800000000000000000000004",
"e016000000",
],
expected_resp="1cb81cbd01055a63617368035a4543"
),
LedgerjsApdu(
commands=["b001000000"],
# expected_resp="01055a63617368--------------0102"
)
]
test_zcash_tx_to_sign_finalized = test_zcash_tx_sign_gti + [
LedgerjsApdu( # GET PUBLIC KEY
commands=["e040000015058000002c80000085800000000000000100000001"], # on 44'/133'/0'/1/1
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT START
commands=[
"e0440005090400008085202f8901",
"e04480053b""013832004d""0420b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""00",
"e044800504ffffff00",
]
),
LedgerjsApdu( # UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL
commands=[
"e04aff0015058000002c80000085800000000000000100000003",
# "e04a0000320240420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac39498200000000001976a91425ea06"
"e04a0000230140420f00000000001976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
"e04a8000045eb3f840"
],
expected_resp="0000"
),
LedgerjsApdu(
commands=[
"e044008509""0400008085202f8901",
"e04480853b""013832004d04""20b7c68231303b2425a91b12f05bd6935072e9901137ae30222ef6d60849fc51""01000000""4d94910000000000""45e1e144cb88d4d8""19",
"e04480851d""76a9140a146582553b2f5537e13cef6659e82ed8f69b8f88ac""ffffff00",
"e048000015""058000002c80000085800000000000000100000001"
],
check_sig_format=True
)
]
ledgerjs_test_data = [
test_zcash_prefix_cmds, test_zcash_tx_sign_gti, test_zcash_tx_to_sign_abandonned,
test_zcash_tx_sign_restart_prefix_cmds, test_zcash_tx_to_sign_finalized
]
utxo_single = bytes.fromhex(
# https://sochain.com/api/v2/tx/ZEC/ec9033381c1cc53ada837ef9981c03ead1c7c41700ff3a954389cfaddc949256
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"53685b8809efc50dd7d5cb0906b307a1b8aa5157baa5fc1bd6fe2d0344dd193a"
# Input prevout idx @offset 41
"00000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100ca0be9f37a4975432a52bb65b25e483f6f93d577955290bb7fb0"
"060a93bfc92002203e0627dff004d3c72a957dc9f8e4e0e696e69d125e4d8e27"
"5d119001924d3b48012103b243171fae5516d1dc15f9178cfcc5fdc67b0a8830"
"55c117b01ba8af29b953f6"
# Input sequence @offset 151
"ffffffff"
# Output count @offset 155
"01"
# Output #1 value @offset 156
"4072070000000000"
# Output #1 script length @offset 164
"19"
# Output #1 script (25 bytes) @offset 165
"76a91449964a736f3713d64283fd0018626ba50091c7e988ac"
# Locktime @offset 190
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 194
"0F"
# Expiry @offset 195
"00000000"
# valueBalance @offset 199
"0000000000000000"
# vShieldedSpend @offset 207
"00"
# vShieldedOutput @offset 208
"00"
# vJoinSplit @offset 209
"00"
)
utxos = [
# Considered a segwit tx - segwit flags couldn't be extracted from raw
# Get Trusted Input APDUs as they are not supposed to be sent w/ these APDUs.
bytes.fromhex(
# Version @offset 0
"04000080"
# versionGroupId @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input prevout hash @offset 9
"edc69b8179fd7c6a11a8a1ba5d17017df5e09296c3a1acdada0d94e199f68857"
# Input prevout idx @offset 41
"01000000"
# Input script length @offset 45
"6b"
# Input script (107 bytes) @ offset 46
"483045022100e8043cd498714122a78b6ecbf8ced1f74d1c65093c5e2649336d"
"fa248aea9ccf022023b13e575956354521301c91ed0fe7072d295aa232215e74"
"e50d01a73b005dac01210201e1c9d8186c093d116ec619b7dad2b7ff0e7dd16f"
"42d458da1100831dc4ff72"
# Input sequence @offset 153
"ffffff00"
# Output count @offset 157
"02"
# Output #1 value @offset 160
"a086010000000000"
# Output #1 script length @offset 168
"19"
# Output #1 script (25 bytes) @offset 167
"76a914fa9737ab9964860ca0c3e9ad6c7eb3bc9c8f6fb588ac"
# Output #2 value @offset 192
"4d94910000000000" # 9 540 685 units of ZEC smallest currency available
# Output #2 script length @offset 200
"19"
# Output #2 script (25 bytes) @offset 201
"76a914b714c60805804d86eb72a38c65ba8370582d09e888ac"
# Locktime @offset 226
"00000000"
# Extra payload (size of everything remaining, specific to btc app inner protocol @offset 230
"0F"
# Expiry @offset 231
"00000000"
# valueBalance @offset 235
"0000000000000000"
# vShieldedSpend @offset 243
"00"
# vShieldedOutput @offset 244
"00"
# vJoinSplit @offset 245
"00"
)
]
tx_to_sign = bytes.fromhex(
# version @offset 0
"04000080"
# Some Zcash flags (?) @offset 4
"85202f89"
# Input count @offset 8
"01"
# Input's prevout hash @offset 9
"d35f0793da27a5eacfe984c73b1907af4b50f3aa3794ba1bb555b9233addf33f"
# Prevout idx @offset 41
"01000000"
# input sequence @offset 45
"ffffff00"
# Output count @offset 49
"02"
# Output #1 value @offset 50
"40420f0000000000" # 1 000 000 units of available balance spent
# Output #1 script (26 bytes) @offset 58
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Output #2 value @offset 84
"2b51820000000000"
# Output #2 scritp (26 bytes) @offset 92
"1976a91490360f7a0b0e50d5dd0c924fc1d6e7adb8519c9388ac"
# Locktime @offset 118
"5eb3f840"
)
change_path = bytes.fromhex("058000002c80000085800000000000000100000003") # 44'/133'/0'/1/3
output_paths = [
bytes.fromhex("058000002c80000085800000000000000100000001"), # 44'/133'/0'/1/1
bytes.fromhex("058000002c80000085800000000000000000000004") # 44'/133'/0'/0/4
]
@pytest.mark.zcash
class TestLedgerjsZcashTx(BaseTestBtc):
def _send_raw_apdus(self, apdus: List[LedgerjsApdu], device: DeviceAppBtc):
# Send the Get Version APDUs
for apdu in apdus:
try:
for command in apdu.commands:
response = device.sendRawApdu(bytes.fromhex(command))
if apdu.expected_resp is not None:
self.check_raw_apdu_resp(apdu.expected_resp, response)
elif apdu.check_sig_format is not None and apdu.check_sig_format == True:
self.check_signature(response) # Only format is checked
except CommException as error:
if apdu.expected_sw is not None and error.sw.hex() == apdu.expected_sw:
continue
raise error
@pytest.mark.skip(reason="Hardcoded TrustedInput can't be replayed on a different device than the one that generated it")
@pytest.mark.manual
@pytest.mark.parametrize('test_data', ledgerjs_test_data)
def test_replay_zcash_test(self, test_data: List[LedgerjsApdu]) -> None:
"""
Replay of raw apdus from @gre.
First time an output is presented for validation, it must be rejected by user
Then tx will be restarted and on 2nd presentation of outputs they have to be
accepted.
"""
apdus = test_data
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
@pytest.mark.manual
def test_get_single_trusted_input(self) -> None:
btc = DeviceAppBtc()
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
input_datum = bytes.fromhex("00000000") + utxo_single
utxo_chunk_len = [
4 + 5 + 4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
4 + 1, # len(locktime || extra_data)
4+16+1+1+1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
trusted_input = btc.getTrustedInput(data=input_datum, chunks_len=utxo_chunk_len)
self.check_trusted_input(
trusted_input,
out_index=bytes.fromhex("00000000"),
out_amount=bytes.fromhex("4072070000000000"),
out_hash=bytes.fromhex("569294dcadcf8943953aff0017c4c7d1ea031c98f97e83da3ac51c1c383390ec")
)
print(" OK")
@pytest.mark.manual
def test_replay_zcash_test2(self) -> None:
"""
Adapted version to work around some hw limitations
"""
# Send the Get Version raw apdus
apdus = test_zcash_prefix_cmds
btc = DeviceAppBtc()
self._send_raw_apdus(apdus, btc)
# 1. Get Trusted Input
print("\n--* Get Trusted Input - from utxos")
output_indexes = [
tx_to_sign[41+4-1:41-1:-1], # out_index in tx_to_sign input must be passed BE as prefix to utxo tx
]
input_data = [out_idx + utxo for out_idx, utxo in zip(output_indexes, utxos)]
utxos_chunks_len = [
[ # utxo #1
4+5+4, # len(prevout_index (BE)||version||input_count||versionGroupId)
37, # len(prevout_hash||prevout_index||len(scriptSig))
-1, # len(scriptSig, from last byte of previous chunk) + len(input_sequence)
1, # len(output_count)
34, # len(output_value #1||len(scriptPubkey #1)||scriptPubkey #1)
34, # len(output_value #2||len(scriptPubkey #2)||scriptPubkey #2)
4 + 1, # len(locktime)
4 + 16 + 1 + 1 + 1 # len(Expiry||valueBalance||vShieldedSpend||vShieldedOutput||vJoinSplit)
]
]
trusted_inputs = [
btc.getTrustedInput(
data=input_datum,
chunks_len=chunks_len
)
for (input_datum, chunks_len) in zip(input_data, utxos_chunks_len)
]
print(" OK")
out_amounts = [utxos[0][192:192+8]] # UTXO tx's 2nd output's value
prevout_hashes = [tx_to_sign[9:9+32]]
for trusted_input, out_idx, out_amount, prevout_hash in zip(
trusted_inputs, output_indexes, out_amounts, prevout_hashes
):
self.check_trusted_input(
trusted_input,
out_index=out_idx[::-1], # LE for comparison w/ out_idx in trusted_input
out_amount=out_amount, # utxo output #1 is requested in tx to sign input
out_hash=prevout_hash # prevout hash in tx to sign
)
# 2.0 Get public keys for output paths & compute their hashes
print("\n--* Get Wallet Public Key - for each tx output path")
wpk_responses = [btc.getWalletPublicKey(output_path) for output_path in output_paths]
print(" OK")
pubkeys_data = [self.split_pubkey_data(data) for data in wpk_responses]
for pubkey in pubkeys_data:
print(pubkey)
# 2.1 Construct a pseudo-tx without input script, to be hashed 1st.
print("\n--* Untrusted Transaction Input Hash Start - Hash tx to sign first w/ all inputs having a null script length")
input_sequences = [tx_to_sign[45:45+4]]
ptx_to_hash_part1 = [tx_to_sign[:9]]
for trusted_input, input_sequence in zip(trusted_inputs, input_sequences):
ptx_to_hash_part1.extend([
bytes.fromhex("01"), # TrustedInput marker byte, triggers the TrustedInput's HMAC verification
bytes([len(trusted_input)]),
trusted_input,
bytes.fromhex("00"), # Input script length = 0 (no sigScript)
input_sequence
])
ptx_to_hash_part1 = reduce(lambda x, y: x+y, ptx_to_hash_part1) # Get a single bytes object
ptx_to_hash_part1_chunks_len = [
9 # len(version||flags||input_count) - skip segwit version+flag bytes
]
for trusted_input in trusted_inputs:
ptx_to_hash_part1_chunks_len.extend([
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||len(scriptSig) == 0)
4 # len(input_sequence)
])
btc.untrustedTxInputHashStart(
p1="00",
p2="05", # Value used for Zcash
data=ptx_to_hash_part1,
chunks_len=ptx_to_hash_part1_chunks_len
)
print(" OK")
# 2.2 Finalize the input-centric-, pseudo-tx hash with the remainder of that tx
# 2.2.1 Start with change address path
print("\n--* Untrusted Transaction Input Hash Finalize Full - Handle change address")
ptx_to_hash_part2 = change_path
ptx_to_hash_part2_chunks_len = [len(ptx_to_hash_part2)]
btc.untrustedTxInputHashFinalize(
p1="ff", # to derive BIP 32 change address
data=ptx_to_hash_part2,
chunks_len=ptx_to_hash_part2_chunks_len
)
print(" OK")
# 2.2.2 Continue w/ tx to sign outputs & scripts
print("\n--* Untrusted Transaction Input Hash Finalize Full - Continue w/ hash of tx output")
ptx_to_hash_part3 = tx_to_sign[49:118] # output_count||repeated(output_amount||scriptPubkey)
ptx_to_hash_part3_chunks_len = [len(ptx_to_hash_part3)]
response = btc.untrustedTxInputHashFinalize(
p1="00",
data=ptx_to_hash_part3,
chunks_len=ptx_to_hash_part3_chunks_len
)
assert response == bytes.fromhex("0000")
print(" OK")
# We're done w/ the hashing of the pseudo-tx with all inputs w/o scriptSig.
# 2.2.3. Zcash-specific: "When using Overwinter/Sapling, UNTRUSTED HASH SIGN is
# called with an empty authorization and nExpiryHeight following the first
# UNTRUSTED HASH TRANSACTION INPUT FINALIZE FULL"
print("\n--* Untrusted Has Sign - with empty Auth & nExpiryHeight")
branch_id_data = [
bytes.fromhex(
"00" # Number of derivations (None)
"00" # Empty validation code
),
tx_to_sign[-4:], # locktime
bytes.fromhex("01"), # SigHashType - always 01
bytes.fromhex("00000000") # Empty nExpiryHeight
]
response = btc.untrustedHashSign(
data = reduce(lambda x, y: x+y, branch_id_data)
)
# 3. Sign each input individually. Because inputs are segwit, hash each input with its scriptSig
# and sequence individually, each in a pseudo-tx w/o output_count, outputs nor locktime.
print("\n--* Untrusted Transaction Input Hash Start, step 2 - Hash again each input individually (only 1)")
# Inputs are P2WPKH, so use 0x1976a914{20-byte-pubkey-hash}88ac from utxo as scriptSig in this step.
#
# From btc.asc: "The input scripts shall be prepared by the host for the transaction signing process as
# per bitcoin rules : the current input script being signed shall be the previous output script (or the
# redeeming script when consuming a P2SH output, or the scriptCode when consuming a BIP 143 output), and
# other input script shall be null."
input_scripts = [utxos[0][196:196 + utxos[0][196] + 1]]
# input_scripts = [tx_to_sign[45:45 + tx_to_sign[45] + 1]]
# input_scripts = [bytes.fromhex("1976a914") + pubkey.pubkey_hash + bytes.fromhex("88ac")
# for pubkey in pubkeys_data]
ptx_for_inputs = [
[ tx_to_sign[:8], # Tx version||zcash flags
bytes.fromhex("0101"), # Input_count||TrustedInput marker byte
bytes([len(trusted_input)]),
trusted_input,
input_script,
input_sequence
] for trusted_input, input_script, input_sequence in zip(trusted_inputs, input_scripts, input_sequences)
]
ptx_chunks_lengths = [
[
9, # len(version||zcash flags||input_count) - segwit flag+version not sent
1 + 1 + len(trusted_input) + 1, # len(trusted_input_marker||len(trusted_input)||trusted_input||scriptSig_len == 0x19)
-1 # get len(scripSig) from last byte of previous chunk + len(input_sequence)
] for trusted_input in trusted_inputs
]
# Hash & sign each input individually
for ptx_for_input, ptx_chunks_len, output_path in zip(ptx_for_inputs, ptx_chunks_lengths, output_paths):
# 3.1 Send pseudo-tx w/ sigScript
btc.untrustedTxInputHashStart(
p1="00",
p2="80", # to continue previously started tx hash, be it BTc or other BTC-like coin
data=reduce(lambda x,y: x+y, ptx_for_input),
chunks_len=ptx_chunks_len
)
print(" Final hash OK")
# 3.2 Sign tx at last. Param is:
# Num_derivs||Dest output path||RFU (0x00)||tx locktime||sigHashType(always 0x01)||Branch_id for overwinter (4B)
print("\n--* Untrusted Transaction Hash Sign")
tx_to_sign_data = output_path \
+ bytes.fromhex("00") \
+ tx_to_sign[-4:] \
+ bytes.fromhex("01") \
+ bytes.fromhex("00000000")
response = btc.untrustedHashSign(
data = tx_to_sign_data
)
self.check_signature(response) # Check sig format only
# self.check_signature(response, expected_der_sig) # Can't test sig value as it depends on signing device seed
print(" Signature OK\n")
| 42.10989 | 155 | 0.62674 | 2,280 | 22,992 | 6.158772 | 0.216228 | 0.025637 | 0.012534 | 0.007976 | 0.358923 | 0.331363 | 0.275103 | 0.240991 | 0.216636 | 0.210796 | 0 | 0.224469 | 0.287926 | 22,992 | 545 | 156 | 42.187156 | 0.633215 | 0.308803 | 0 | 0.468354 | 0 | 0.005063 | 0.285028 | 0.187672 | 0.002532 | 0 | 0 | 0 | 0.002532 | 1 | 0.010127 | false | 0 | 0.01519 | 0 | 0.027848 | 0.04557 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0dcbb2c8e1a6f536fd7f94e8770c33962e2f268b | 1,699 | py | Python | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | nhoods/setup.py | MSLADevServGIS/NhoodProfiles | aa35d2a4d4be177dc8df79a34915eefb31a20634 | [
"MIT"
] | null | null | null | # Setup procedures -- WIP
import os
import re
import arcpy
arcpy.env.workspace = "in_memory"
# TODO: out_gdb = "//cityfiles/DEVServices/WallyG/projects/NhoodProfiles/nhoods/data/NhoodAmenities.gdb/MtStatePlane"
# DATA PROCESSING
# Nhood_buffers:
arcpy.Buffer_analysis("Nhoods", "nhood_buffers",
buffer_distance_or_field="100 Feet",
line_side="FULL", line_end_type="ROUND",
dissolve_option="LIST", dissolve_field="Name",
method="PLANAR")
# Parks:
parks = os.path.join(
r"\\cityfiles\Shared\PARKS AND RECREATION SHARED\GIS Data",
r"Parks Data.gdb\Parks")
arcpy.FeatureClassToFeatureClass_conversion(parks, "in_memory", "mem_parks")
# Delete Parks Fields
arcpy.DeleteField_management("mem_parks", drop_field="Reference;Rec_Date;Doc_Links;Subtype;Ownership;Origin;Maintenance;Platted_Size;Maint_Level;Status;Assessors_Parcel_No;Acres;Dev_Status;Owner_Type;Maint_Responsibility;Shape_Length;Shape_Area")
# COMMON AREAS
CAMA = r"W:\DATA\CAMA\Missoula\MissoulaOwnerParcel_shp\MissoulaOwnerParcel_shp.shp"
arcpy.Select_analysis(CAMA, "in_memory/mem_commons", '''"LegalDescr" LIKE
\'%COMMON%\'''')
# make new field "CAName"
arcpy.AddField_management("mem_commons", "CAName", "TEXT", "", "", 50)
with arcpy.da.UpdateCursor("mem_commons", ["LegalDescr", "CAName"]) as cur:
for row in cur:
row[1] = re.split("\W\s", row[0])[0].strip().title()
cur.updateRow(row)
arcpy.Dissolve_management(in_features="mem_commons", out_feature_class="in_memory/mem_commons_Diss", dissolve_field="CAName", statistics_fields="", multi_part="SINGLE_PART", unsplit_lines="DISSOLVE_LINES")
# Merge
| 32.673077 | 246 | 0.723955 | 216 | 1,699 | 5.453704 | 0.587963 | 0.042445 | 0.028014 | 0.03056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005461 | 0.137728 | 1,699 | 51 | 247 | 33.313725 | 0.798635 | 0.14126 | 0 | 0 | 0 | 0.043478 | 0.410221 | 0.230663 | 0 | 0 | 0 | 0.019608 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.130435 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0dd358ad72eca9a8df86326685bf5bf8a52c322f | 717 | py | Python | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | 10 | 2019-03-15T05:12:23.000Z | 2020-05-06T13:05:49.000Z | algorithm_web/admin/contest.py | KMU-algolab/algorithm | 2b734978da78b936244580ed1febe4f9f6cf1aea | [
"MIT"
] | null | null | null | from django.contrib import admin
from .. import models
@admin.register(models.Contest)
class ContestAdmin(admin.ModelAdmin):
"""
대회관리
"""
list_display = ['contest_name', 'start_time', 'end_time', 'message', 'host_email', 'after_open']
class Meta:
model = models.Contest
@admin.register(models.ContestProblem)
class ContestProblemAdmin(admin.ModelAdmin):
"""
대회 문제관리
"""
list_display = ['contest', 'problem']
class Meta:
model = models.ContestProblem
@admin.register(models.Participant)
class ParticipantAdmin(admin.ModelAdmin):
"""
참가자관리
"""
list_display = ['contest', 'participant']
class Meta:
model = models.Participant
| 19.378378 | 100 | 0.658298 | 72 | 717 | 6.444444 | 0.458333 | 0.084052 | 0.122845 | 0.12931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.211994 | 717 | 36 | 101 | 19.916667 | 0.821239 | 0.025105 | 0 | 0.176471 | 0 | 0 | 0.136294 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.647059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0dd4787568192c17a9f65ae09cd6486c8711bd89 | 4,920 | py | Python | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null | predix/admin/cf/spaces.py | Saifinbox/predix | d4a69da0adbc503fcf5d569e91c8ebb1aeac0605 | [
"BSD-3-Clause"
] | null | null | null |
import logging
import predix.admin.cf.api
import predix.admin.cf.orgs
import predix.admin.cf.apps
import predix.admin.cf.services
class Space(object):
"""
Operations and data for Cloud Foundry Spaces.
"""
def __init__(self, *args, **kwargs):
super(Space, self).__init__(*args, **kwargs)
self.api = predix.admin.cf.api.API()
self.name = self.api.config.get_space_name()
self.guid = self.api.config.get_space_guid()
self.org = predix.admin.cf.orgs.Org()
def _get_spaces(self):
"""
Get the marketplace services.
"""
guid = self.api.config.get_organization_guid()
uri = '/v2/organizations/%s/spaces' % (guid)
return self.api.get(uri)
def get_spaces(self):
"""
Return a flat list of the names for spaces in the organization.
"""
self.spaces = []
for resource in self._get_spaces()['resources']:
self.spaces.append(resource['entity']['name'])
return self.spaces
def get_space_services(self):
"""
Returns the services available for use in the space. This may
not always be the same as the full marketplace.
"""
uri = '/v2/spaces/%s/services' % (self.guid)
return self.api.get(uri)
def create_space(self, space_name):
"""
Create a new space of the given name.
"""
body = {
'name': space_name,
'organization_guid': self.api.config.get_organization_guid()
}
return self.api.post('/v2/spaces', body)
def delete_space(self, space_name):
"""
Delete a space of the given name.
"""
return self.api.delete("/v2/spaces/%s" % (self.guid))
def get_space_summary(self):
"""
Returns a summary of apps and services within a given
cloud foundry space.
It is the call used by `cf s` or `cf a` for quicker
responses.
"""
uri = '/v2/spaces/%s/summary' % (self.guid)
return self.api.get(uri)
def _get_apps(self):
"""
Returns raw results for all apps in the space.
"""
uri = '/v2/spaces/%s/apps' % (self.guid)
return self.api.get(uri)
def get_apps(self):
"""
Returns a list of all of the apps in the space.
"""
apps = []
for resource in self._get_apps()['resources']:
apps.append(resource['entity']['name'])
return apps
def has_app(self, app_name):
"""
Simple test to see if we have a name conflict
for the application.
"""
return app_name in self.get_apps()
def _get_services(self):
"""
Return the available services for this space.
"""
uri = '/v2/spaces/%s/services' % (self.guid)
return self.api.get(uri)
def get_services(self):
"""
Returns a flat list of the service names available
from the marketplace for this space.
"""
services = []
for resource in self._get_services()['resources']:
services.append(resource['entity']['label'])
return services
def _get_instances(self):
"""
Returns the service instances activated in this space.
"""
uri = '/v2/spaces/%s/service_instances' % (self.guid)
return self.api.get(uri)
def get_instances(self):
"""
Returns a flat list of the names of services created
in this space.
"""
services = []
for resource in self._get_instances()['resources']:
services.append(resource['entity']['name'])
return services
def has_service_with_name(self, service_name):
"""
Tests whether a service with the given name exists in
this space.
"""
return service_name in self.get_instances()
def has_service_of_type(self, service_type):
"""
Tests whether a service instance exists for the given
service.
"""
summary = self.get_space_summary()
for instance in summary['services']:
if service_type == instance['service_plan']['service']['label']:
return True
return False
def purge(self):
"""
Remove all services and apps from the space.
Will leave the space itself, call delete_space() if you
want to remove that too.
Similar to `cf delete-space -f <space-name>`.
"""
logging.warn("Purging all services from space %s" %
(self.name))
service = predix.admin.cf.services.Service()
for service_name in self.get_instances():
service.purge(service_name)
apps = predix.admin.cf.apps.App()
for app_name in self.get_apps():
apps.delete_app(app_name)
| 28.114286 | 76 | 0.57378 | 610 | 4,920 | 4.508197 | 0.191803 | 0.033091 | 0.037818 | 0.043273 | 0.337818 | 0.221455 | 0.166182 | 0.121818 | 0.084364 | 0.071273 | 0 | 0.002371 | 0.314228 | 4,920 | 174 | 77 | 28.275862 | 0.812685 | 0.240447 | 0 | 0.153846 | 0 | 0 | 0.100737 | 0.037776 | 0 | 0 | 0 | 0 | 0 | 1 | 0.217949 | false | 0 | 0.064103 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0dd5ea13d1486dc2358d111b35e9609c1546878c | 953 | py | Python | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 12 | 2022-01-29T20:30:03.000Z | 2022-03-29T11:46:14.000Z | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 38 | 2021-11-22T17:52:50.000Z | 2022-03-31T17:52:00.000Z | services/traction/api/endpoints/routes/v1/tenant/admin/issuer.py | bcgov/traction | 90cec4f1aebccd68eb986cb89dfae5819a07a2ee | [
"Apache-2.0"
] | 9 | 2021-11-22T18:05:48.000Z | 2022-03-29T11:25:08.000Z | import logging
from fastapi import APIRouter
from starlette import status
from api.endpoints.dependencies.tenant_security import get_from_context
from api.endpoints.models.v1.tenant import TenantGetResponse
from api.services.v1 import tenant_service
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post(
"/make-issuer", status_code=status.HTTP_200_OK, response_model=TenantGetResponse
)
async def initialize_issuer() -> TenantGetResponse:
"""
If the innkeeper has authorized your tenant to become an issuer, initialize
here to write a endorsed public did the configured Hyperledger-Indy service
"""
wallet_id = get_from_context("TENANT_WALLET_ID")
tenant_id = get_from_context("TENANT_ID")
item = await tenant_service.make_issuer(
tenant_id,
wallet_id,
)
links = [] # TODO: determine useful links for /make-issuer
return TenantGetResponse(item=item, links=links)
| 26.472222 | 84 | 0.757608 | 122 | 953 | 5.704918 | 0.516393 | 0.030172 | 0.060345 | 0.045977 | 0.063218 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006313 | 0.16894 | 953 | 35 | 85 | 27.228571 | 0.872475 | 0.047219 | 0 | 0 | 0 | 0 | 0.05034 | 0 | 0 | 0 | 0 | 0.028571 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0dd777da72c858ecb1ea267bb078c8de582008a6 | 1,618 | py | Python | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | learning_files/loops.py | MineJockey/python-basics | bd0370413362c69e55fc81366d8c995875f9308a | [
"MIT"
] | null | null | null | def loops():
# String Array
names = ["Apple", "Orange", "Pear"]
# \n is a newline in a string
print('\n---------------')
print(' For Each Loop')
print('---------------\n')
# For Each Loop
for i in names:
print(i)
print('\n---------------')
print(' For Loop')
print('---------------\n')
# For Loop
# the range() function can take a min, max, and interval
# value, max is non-inclusive i.e. 101 stops at 100
# example: range(0, 101, 2)
for i in range(5):
if i == 1:
print('1, continue')
# continue will move to the next iteration in the loop
continue
elif i == 3:
print('3, break')
# the break statement will end the loop
break
# if the number doesn't fit the conditions above
# it will be printed to the console
print(i)
print('\n---------------')
print(' While Loop')
print('---------------\n')
# Boolean variables hold True or False, 0 or 1
loop = True
# Integer variables hold whole numbers
iterations = 0
max_iterations = 10
print('0 -', max_iterations, '\n')
# The while loop will run as long as the given
# condition or variable is true
# Parentheses are optional
while loop:
# the += operator adds the given value to
# the current value of a variable
iterations += 1
print(iterations)
if iterations == max_iterations:
# break will end a loops execution
break
if __name__ == '__main__':
loops()
| 25.68254 | 66 | 0.524722 | 206 | 1,618 | 4.067961 | 0.436893 | 0.042959 | 0.039379 | 0.033413 | 0.040573 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021218 | 0.330037 | 1,618 | 62 | 67 | 26.096774 | 0.751845 | 0.403585 | 0 | 0.3125 | 0 | 0 | 0.196825 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0 | 0 | 0.03125 | 0.46875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
0ddcb23ab572dfee37d81ec252ad2030e3319efd | 1,675 | py | Python | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | null | null | null | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | 1 | 2020-11-10T01:31:39.000Z | 2020-11-10T01:31:39.000Z | python/code_challenges/stacks_and_queues/stacks_and_queues.py | brendanwelzien/data-structures-and-algorithms | 0bffe825e34de2e5c072b1e6b6c2cb1d7d1d61f5 | [
"MIT"
] | null | null | null | class Node:
def __init__(self, value, next_p=None):
self.next = next_p
self.value = value
def __str__(self):
return f'{self.value}'
class InvalidOperationError(Exception):
pass
class Stack:
def __init__(self):
self.top = None
def push(self, value):
current = self.top
if self.top == None:
self.top = Node(value)
else:
node_n = Node(value)
node_n.next = self.top
self.top = node_n
def pop(self):
if not self.top:
raise InvalidOperationError('Method not allowed on empty collection')
if self.top:
top_value = self.top
self.top = self.top.next
return top_value.value
def peek(self):
if not self.top:
raise InvalidOperationError("Method not allowed on empty collection")
return self.top.value
def is_empty(self):
return not self.top
class Queue:
def __init__(self):
self.f = None
self.r = None
def enqueue(self, value):
node = Node(value)
if self.r:
node = self.r.next
node = self.r
def dequeue(self):
if not self.f:
raise InvalidOperationError('Method not allowed on empty collection')
leave = self.f
if self.f == self.r:
self.r = None
self.f = self.f.next
return leave.value
def peek(self):
if not self.f:
raise InvalidOperationError('Method not allowed on empty collection')
return self.f.value
def is_empty(self):
return not self.f and not self.r
| 23.263889 | 81 | 0.560597 | 215 | 1,675 | 4.251163 | 0.176744 | 0.107221 | 0.039387 | 0.056893 | 0.442013 | 0.442013 | 0.442013 | 0.415755 | 0.345733 | 0.345733 | 0 | 0 | 0.352836 | 1,675 | 71 | 82 | 23.591549 | 0.843173 | 0 | 0 | 0.272727 | 0 | 0 | 0.09791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.218182 | false | 0.018182 | 0 | 0.054545 | 0.418182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0ddf3f1c726adc70203174632e31094c4c2d0306 | 232 | py | Python | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | square/main.py | vishwamshuklaRazorpay/vishwam_test | b0925429385005dac37d92938b7daf6245d636c8 | [
"MIT"
] | null | null | null | def main():
number = int(input("Enter number (Only positive integer is allowed)"))
print(f'{number} square is {number ** 2}')
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
| 25.777778 | 74 | 0.659483 | 34 | 232 | 4.264706 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005435 | 0.206897 | 232 | 8 | 75 | 29 | 0.782609 | 0.237069 | 0 | 0 | 0 | 0 | 0.497143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0deb37fe04b5be7b019c80d0f79badcb0721fecc | 2,583 | py | Python | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | datable/web/columns.py | ofirr/dojango-datable | c4d27c23d66c023062270a31f05e21d0982e0b43 | [
"MIT"
] | null | null | null | # /usr/bin/env python
# -*- encoding: utf-8 -*-
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from datable.core.serializers import BooleanSerializer
from datable.core.serializers import DateSerializer
from datable.core.serializers import DateTimeSerializer
from datable.core.serializers import TimedeltaSerializer
from datable.core.serializers import StringSerializer
class Column(object):
label = None
width = None
sortable = None
serializer = None
serializerClass = None
formatter = None
sortColumnName = None # Parameter for QuerySet.order_by
def __init__(self, name, label=None, width=None,
serializer=None, sortable=None, sortColumnName=None):
self.name = name
if label is not None:
self.label = label
if self.label is None:
self.label = _(capfirst(self.name.replace("_", " ")))
if width is not None:
self.width = width
if sortable is not None:
self.sortable = sortable
if serializer is not None:
self.serializer = serializer
if self.serializer is None:
self.serializer = self.serializerClass(self.name)
if sortColumnName is not None:
self.sortColumnName = sortColumnName
if self.sortColumnName is None and self.sortable:
self.sortColumnName = name
def sortQuerySet(self, querySet, desc):
"""The query set needs to be sorted using this column.
"""
sort = self.sortColumnName
if sort is None:
raise Exception("This column can not be used to sort")
if desc:
sort = '-' + sort
return querySet.order_by(sort)
def getName(self):
return self.name
def getSerializer(self):
return self.serializer
def getLabel(self):
return self.label
def getFormatter(self):
return self.formatter
class StringColumn(Column):
serializerClass = StringSerializer
sortable = True
class DateColumn(Column):
serializerClass = DateSerializer
sortable = True
class DateTimeColumn(Column):
serializerClass = DateTimeSerializer
sortable = True
class TimedeltaColumn(Column):
serializerClass = TimedeltaSerializer
sortable = True
class BooleanColumn(Column):
serializerClass = BooleanSerializer
sortable = True
class ImageColumn(Column):
formatter = 'image'
sortable = False
class HrefColumn(Column):
formatter = 'href'
sortable = False
| 23.697248 | 70 | 0.663957 | 276 | 2,583 | 6.181159 | 0.293478 | 0.037515 | 0.043962 | 0.076202 | 0.093787 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000527 | 0.265196 | 2,583 | 108 | 71 | 23.916667 | 0.898314 | 0.053039 | 0 | 0.1 | 0 | 0 | 0.019286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0 | 0.1 | 0.057143 | 0.671429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0def13794ee3ca070ffd095ffc65ba6c515362be | 350 | py | Python | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | archives/build_wget.py | onai/code-ecosystem-analyzer | 964d1ef5ec4d8f774c52aa2718663bb455d62ecb | [
"Apache-2.0"
] | null | null | null | import sys
with open(sys.argv[1]) as handle:
for new_line in handle:
dest = new_line.split('/')[4] + '_' + new_line.split('/')[5] + '.zip'
#print('curl -Ls -I -o /dev/null -w \'%{url_effective}\\n\' ' + new_line.strip())
print('curl -L --user ' + sys.argv[2] + ':' + sys.argv[3] + ' ' + new_line.strip() + ' -o ' + dest)
| 43.75 | 107 | 0.525714 | 53 | 350 | 3.339623 | 0.622642 | 0.19774 | 0.135593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01845 | 0.225714 | 350 | 7 | 108 | 50 | 0.634686 | 0.228571 | 0 | 0 | 0 | 0 | 0.104089 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0df3e3502236c625d081f0f74dbbd4aea76a92c9 | 630 | py | Python | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 20 | 2018-11-26T16:22:46.000Z | 2018-12-21T10:08:25.000Z | authors/apps/followers/models.py | andela/ah-code-titans | 4f1fc77c2ecdf8ca15c24327d39fe661eac85785 | [
"BSD-3-Clause"
] | 3 | 2019-01-24T15:39:42.000Z | 2019-09-25T17:57:08.000Z | from django.db import models
from ..authentication.models import User
class Follower(models.Model):
"""
Store data on following statistics for users.
"""
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='follower')
followed = models.ForeignKey(User, on_delete=models.CASCADE, related_name='followed')
followed_at = models.DateTimeField(auto_created=True, auto_now_add=True)
class Meta:
unique_together = ('user', 'followed')
def __str__(self):
return '{follower} follows {followed}'.format(
follower=self.user, followed=self.followed
)
| 30 | 89 | 0.696825 | 74 | 630 | 5.756757 | 0.527027 | 0.075117 | 0.093897 | 0.103286 | 0.244131 | 0.244131 | 0.244131 | 0.244131 | 0.244131 | 0 | 0 | 0 | 0.192063 | 630 | 20 | 90 | 31.5 | 0.836935 | 0.071429 | 0 | 0 | 0 | 0 | 0.100176 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0.083333 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
0df7ced38cda902e631d021c1255e468e0aeb410 | 750 | py | Python | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | revelation/core/urls.py | Federico-Comesana/revelatte | ccd50831dcdec8bc4a7e83b062d0309f6e4feee2 | [
"MIT"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.conf.urls import url
import views
urlpatterns = [
url(r'public/$',
views.RevelationModelListView.as_view(),
name='revelation-list'),
url(r'u/(?P<pk>\d+)/$',
views.UserProfileView.as_view(),
name='user-view'),
url(r'delete/(?P<pk>\d+)/$',
login_required(views.RevelationModelDeleteView.as_view()),
name='revelation-delete'),
url(r'create/$', views.RevelationModelCreateView.as_view(),
name='revelation-create'),
url(r'r/(?P<pk>\d+)/$',
views.RevelationModelDetailView.as_view(),
name='revelation-view'),
url(r'^$',
views.HomePageView.as_view(),
name='home'),
]
| 24.193548 | 66 | 0.617333 | 86 | 750 | 5.290698 | 0.395349 | 0.052747 | 0.131868 | 0.175824 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.202667 | 750 | 30 | 67 | 25 | 0.76087 | 0 | 0 | 0 | 0 | 0 | 0.193333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
216ebd24f2457cf9469911c06b29cd3b21471003 | 530 | py | Python | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | 1 | 2021-01-22T03:15:29.000Z | 2021-01-22T03:15:29.000Z | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | null | null | null | demo-001.py | zhouyuanmin/MyCode | dbc7df8dc5eba419340ef9aafed75af24f883381 | [
"MIT"
] | null | null | null | """
使用装饰器限制函数的调用次数
"""
import functools
def call_limit(count):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if decorator.calls >= count:
raise AssertionError(f"单个程序最多允许调用此方法{count}次")
decorator.calls += 1
return func(*args, **kw)
decorator.calls = 0
return wrapper
return decorator
@call_limit(5)
def demo(a, b):
print(a, b)
if __name__ == '__main__':
for i in range(20):
demo(i, i ** 2)
| 17.096774 | 62 | 0.558491 | 63 | 530 | 4.539683 | 0.571429 | 0.146853 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016529 | 0.315094 | 530 | 30 | 63 | 17.666667 | 0.77135 | 0.026415 | 0 | 0 | 0 | 0 | 0.057087 | 0.041339 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.222222 | false | 0 | 0.055556 | 0 | 0.444444 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
217147b682f00559525d500ab3019c807cac7a67 | 521 | py | Python | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | deploy/test.py | zhxiaohe/starwars_api | f1b729e819eb19e5eb59630bed56b13127eb1ef2 | [
"MIT"
] | null | null | null | #coding=utf-8
import requests,json
headers = {'X-Rundeck-Auth-Token': '2EcW3xe0urFLrilqUOGCVYLXSbdByk2e','Accept': 'application/json'}
headers['Content-type']='application/json'
rundeck_host= 'http://10.1.16.26:4440'
url = rundeck_host+'/api/16/project/fengyang/run/command'
data={
'project':'fengyang',
'exec':'whoami',
'filter': 'tags: member-web-1,member-web-2',
'nodeKeepgoing': False #执行错误之后是否继续
}
r = requests.post(url, headers=headers,data=json.dumps(data))
print r.status_code
print r.text
| 23.681818 | 99 | 0.712092 | 70 | 521 | 5.257143 | 0.671429 | 0.059783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042918 | 0.105566 | 521 | 21 | 100 | 24.809524 | 0.746781 | 0.042226 | 0 | 0 | 0 | 0 | 0.47379 | 0.1875 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.071429 | null | null | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21731e6d861a39649ea198c473c43e6a73b5f540 | 2,916 | py | Python | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 11 | 2020-03-22T13:30:21.000Z | 2021-12-25T06:23:44.000Z | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 37 | 2019-12-18T13:12:50.000Z | 2022-02-10T10:52:37.000Z | jmeter_api/configs/http_cache_manager/test_http_cache_manager.py | dashawn888/jmeter_api | 1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd | [
"Apache-2.0"
] | 5 | 2019-12-06T10:55:56.000Z | 2020-06-01T19:32:32.000Z | import xmltodict
import pytest
from jmeter_api.configs.http_cache_manager.elements import HTTPCacheManager
from jmeter_api.basics.utils import tag_wrapper
class TestHTTPCacheManagerArgs:
class TestClearCacheEachIteration:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(clear_each_iteration="False")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(clear_each_iteration=123456)
def test_positive(self):
cache_manager = HTTPCacheManager(clear_each_iteration=True)
assert cache_manager.clear_each_iteration is True
class TestUseCacheControl:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(use_cache_control="False")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(use_cache_control=12345)
def test_positive(self):
cache_manager = HTTPCacheManager(use_cache_control=False)
assert cache_manager.use_cache_control is False
class TestMaxElementsInCache:
def test_check(self):
with pytest.raises(TypeError):
HTTPCacheManager(max_elements_in_cache="test")
def test_check2(self):
with pytest.raises(TypeError):
HTTPCacheManager(max_elements_in_cache="120")
def test_positive(self):
cache_manager = HTTPCacheManager(max_elements_in_cache=100)
assert cache_manager.max_elements_in_cache == 100
class TestHTTPCacheManagerRender:
def test_clear_each_iteration(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['boolProp'][0]['#text'] == 'false'
def test_use_cache_control(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['boolProp'][1]['#text'] == 'true'
def test_max_elements_in_cache(self):
element = HTTPCacheManager(clear_each_iteration=False,
use_cache_control=True,
max_elements_in_cache=100)
rendered_doc = tag_wrapper(element.to_xml(), 'result')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['result']['CacheManager']['intProp']['#text'] == '100'
| 39.405405 | 86 | 0.645405 | 302 | 2,916 | 5.910596 | 0.198676 | 0.047059 | 0.080672 | 0.080672 | 0.708123 | 0.673389 | 0.673389 | 0.594398 | 0.594398 | 0.479552 | 0 | 0.017412 | 0.271262 | 2,916 | 73 | 87 | 39.945205 | 0.822588 | 0 | 0 | 0.526316 | 0 | 0 | 0.047668 | 0 | 0 | 0 | 0 | 0 | 0.105263 | 1 | 0.210526 | false | 0 | 0.070175 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
217ddb7ab709173147528cb6c585bd4df64516a7 | 4,025 | py | Python | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 370 | 2015-10-07T20:13:10.000Z | 2022-03-31T03:43:17.000Z | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 67 | 2016-03-14T12:18:44.000Z | 2022-02-24T09:24:31.000Z | examples/highcharts/pie-donut.py | Jbrunn/python-highcharts | a4c488ae5c2e125616efad5a722f3dfd8a9bc450 | [
"MIT"
] | 159 | 2016-02-25T15:07:52.000Z | 2022-03-12T13:04:14.000Z | # -*- coding: utf-8 -*-
"""
Highcharts Demos
Donut chart: http://www.highcharts.com/demo/pie-donut
"""
from highcharts import Highchart
H = Highchart(width = 850, height = 400)
data = [{
'y': 55.11,
'color': 'Highcharts.getOptions().colors[0]',
'drilldown': {
'name': 'MSIE versions',
'categories': ['MSIE 6.0', 'MSIE 7.0', 'MSIE 8.0', 'MSIE 9.0'],
'data': [10.85, 7.35, 33.06, 2.81],
'color': 'Highcharts.getOptions().colors[0]'
}
}, {
'y': 21.63,
'color': 'Highcharts.getOptions().colors[1]',
'drilldown': {
'name': 'Firefox versions',
'categories': ['Firefox 2.0', 'Firefox 3.0', 'Firefox 3.5', 'Firefox 3.6', 'Firefox 4.0'],
'data': [0.20, 0.83, 1.58, 13.12, 5.43],
'color': 'Highcharts.getOptions().colors[1]'
}
}, {
'y': 11.94,
'color': 'Highcharts.getOptions().colors[2]',
'drilldown': {
'name': 'Chrome versions',
'categories': ['Chrome 5.0', 'Chrome 6.0', 'Chrome 7.0', 'Chrome 8.0', 'Chrome 9.0',
'Chrome 10.0', 'Chrome 11.0', 'Chrome 12.0'],
'data': [0.12, 0.19, 0.12, 0.36, 0.32, 9.91, 0.50, 0.22],
'color': 'Highcharts.getOptions().colors[2]'
}
}, {
'y': 7.15,
'color': 'Highcharts.getOptions().colors[3]',
'drilldown': {
'name': 'Safari versions',
'categories': ['Safari 5.0', 'Safari 4.0', 'Safari Win 5.0', 'Safari 4.1', 'Safari/Maxthon',
'Safari 3.1', 'Safari 4.1'],
'data': [4.55, 1.42, 0.23, 0.21, 0.20, 0.19, 0.14],
'color': 'Highcharts.getOptions().colors[3]'
}
}, {
'y': 2.14,
'color': 'Highcharts.getOptions().colors[4]',
'drilldown': {
'name': 'Opera versions',
'categories': ['Opera 9.x', 'Opera 10.x', 'Opera 11.x'],
'data': [ 0.12, 0.37, 1.65],
'color': 'Highcharts.getOptions().colors[4]'
}
}]
options = {
'chart': {
'type': 'pie'
},
'title': {
'text': 'Browser market share, April, 2011'
},
'yAxis': {
'title': {
'text': 'Total percent market share'
}
},
'plotOptions': {
'pie': {
'shadow': False,
'center': ['50%', '50%']
}
},
'tooltip': {
'valueSuffix': '%'
},
}
categories = ['MSIE', 'Firefox', 'Chrome', 'Safari', 'Opera']
browserData = []
versionsData = []
for i in range(len(data)):
browserData.append({
'name': categories[i],
'y': data[i]['y'],
'color': data[i]['color']
})
drillDataLen = len(data[i]['drilldown']['data'])
for j in range(drillDataLen):
brightness = 0.2 - (j / drillDataLen) / 5;
versionsData.append({
'name': data[i]['drilldown']['categories'][j],
'y': data[i]['drilldown']['data'][j],
'color': 'Highcharts.Color(' + data[i]['color'] + ').brighten(' + str(brightness) + ').get()'
})
H.set_dict_options(options)
H.add_data_set(browserData, 'pie', 'Browsers', size='60%',
dataLabels={
'formatter': 'function () { \
return this.y > 5 ? this.point.name : null;\
}',
'color': 'white',
'distance': -30
})
H.add_data_set(versionsData, 'pie', 'Versions', size='80%',
innerSize='60%',
dataLabels={
'formatter': "function () {\
return this.y > 1 ? '<b>' + this.point.name + ':</b> ' + this.y + '%' : null;\
}"
})
H.htmlcontent | 32.459677 | 115 | 0.429814 | 407 | 4,025 | 4.235872 | 0.312039 | 0.095708 | 0.145012 | 0.179814 | 0.234339 | 0.046404 | 0.046404 | 0 | 0 | 0 | 0 | 0.076832 | 0.372671 | 4,025 | 124 | 116 | 32.459677 | 0.605941 | 0.023106 | 0 | 0.160377 | 0 | 0.009434 | 0.313631 | 0.084076 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.009434 | 0 | 0.028302 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
217fa2fb2460ab023c2ba02ea8a4fbc0c4e79eb1 | 961 | py | Python | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | computer_firm.py | GYosifov88/Python-Basics | f4290061264aebc417bde4948948e4a64739fec9 | [
"MIT"
] | null | null | null | number_of_computers = int(input())
number_of_sales = 0
real_sales = 0
made_sales = 0
counter_sales = 0
total_ratings = 0
for i in range (number_of_computers):
rating = int(input())
rating_scale = rating % 10
possible_sales = rating // 10
total_ratings += rating_scale
if rating_scale == 2:
real_sales = possible_sales * 0 / 100
counter_sales += real_sales
elif rating_scale == 3:
real_sales = possible_sales * 50 / 100
counter_sales += real_sales
elif rating_scale == 4:
real_sales = possible_sales * 70 / 100
counter_sales += real_sales
elif rating_scale == 5:
real_sales = possible_sales * 85 / 100
counter_sales += real_sales
elif rating_scale == 6:
real_sales = possible_sales * 100 / 100
counter_sales += real_sales
average_rating = total_ratings / number_of_computers
print (f'{counter_sales:.2f}')
print (f'{average_rating:.2f}')
| 25.289474 | 52 | 0.661811 | 130 | 961 | 4.546154 | 0.269231 | 0.167513 | 0.143824 | 0.186125 | 0.304569 | 0.263959 | 0.263959 | 0.263959 | 0 | 0 | 0 | 0.057024 | 0.251821 | 961 | 37 | 53 | 25.972973 | 0.764951 | 0 | 0 | 0.172414 | 0 | 0 | 0.040795 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2189e908a5d40835988cf4b8179b93819c50451d | 2,386 | py | Python | wren/pomo.py | kthy/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | 1 | 2021-06-04T07:15:02.000Z | 2021-06-04T07:15:02.000Z | wren/pomo.py | kthy/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | 9 | 2021-02-20T22:33:05.000Z | 2021-04-12T17:35:48.000Z | wren/pomo.py | pyxy-dk/wren | 62e9439ea82a1d984f07fa8cd00421e0e640196f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Gettext manipulation methods."""
from os import remove
from os.path import exists
from pathlib import Path
from shutil import copyfile, copystat
from typing import Sequence
from filehash import FileHash
from polib import MOFile, POFile, mofile
from wren.change import Change
def apply_changes(mo_file: MOFile, changelist: Sequence[Change]) -> None:
"""Apply all changes in the provided list of changes to the given MOFile."""
for change in changelist:
change.apply(mo_file)
def backup_original_mo(wowsdir: str, locale: str) -> None:
"""Copy the original `global.mo` to `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
_copyfile_and_checksum(global_mo_path, backup_mo_path)
def convert_mo_to_po(wowsdir: str, locale: str, outputdir: str) -> POFile:
"""Save the MO file for the given locale in PO format."""
mofile_path = Path(_global_mo_path(wowsdir, locale))
if not exists(mofile_path):
raise OSError(f"MO file for locale {locale} not found")
mof = mofile(mofile_path)
mof.save_as_pofile(f"{outputdir}/{mofile_path.stem}_{locale}.po")
def get_mo(wowsdir: str, locale: str) -> MOFile:
"""Open and return the global MO file in the given directory."""
return mofile(_global_mo_path(wowsdir, locale))
def restore_original_mo(wowsdir: str, locale: str) -> None:
"""Reinstate the original `global.mo` from `global.mo.original`."""
global_mo_path = _global_mo_path(wowsdir, locale)
backup_mo_path = _backup_mo_path(wowsdir, locale)
if exists(backup_mo_path):
_copyfile_and_checksum(backup_mo_path, global_mo_path)
remove(backup_mo_path)
def _copyfile_and_checksum(from_path, to_path) -> None:
"""Copy a file from from_path to to_path.
Raises OSError if the new file's checksum doesn't match the original."""
copyfile(from_path, to_path)
copystat(from_path, to_path)
hasher = FileHash("md5")
if hasher.hash_file(from_path) != hasher.hash_file(to_path):
raise OSError("Copy failed, hash mismatch detected")
def _backup_mo_path(wowsdir: str, locale: str) -> str:
return f"{_global_mo_path(wowsdir, locale)}.original"
def _global_mo_path(wowsdir: str, locale: str) -> str:
return f"{wowsdir}/res/texts/{locale}/LC_MESSAGES/global.mo"
| 34.57971 | 80 | 0.725482 | 354 | 2,386 | 4.644068 | 0.242938 | 0.069343 | 0.072993 | 0.0809 | 0.277372 | 0.209854 | 0.192214 | 0.152068 | 0.152068 | 0.109489 | 0 | 0.001005 | 0.165968 | 2,386 | 68 | 81 | 35.088235 | 0.825126 | 0.19321 | 0 | 0.102564 | 0 | 0 | 0.111406 | 0.062069 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205128 | false | 0 | 0.205128 | 0.051282 | 0.487179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
218b02aeb89c1d716160e75933fbb97fec67090f | 199 | py | Python | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | mysite/polls/urls.py | cs-fullstack-fall-2018/django-intro1-psanon19 | 0ae36780fd664313a011e7a219bc401b158fe93f | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('language/', views.language),
path('system/', views.system),
path('ide/', views.ide),
path('', views.nothing)
] | 19.9 | 38 | 0.638191 | 24 | 199 | 5.291667 | 0.458333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18593 | 199 | 10 | 39 | 19.9 | 0.783951 | 0 | 0 | 0 | 0 | 0 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
218be87fd5642367ab0f3fbe799b44f1dfb60d9c | 640 | py | Python | pyconcz/announcements/migrations/0002_announcement_font_size.py | martinpucala/cz.pycon.org-2019 | 044337ed0e7f721e96d88da69511ba5493d127e6 | [
"MIT"
] | 6 | 2018-08-25T13:40:22.000Z | 2019-05-25T21:58:41.000Z | pyconcz/announcements/migrations/0002_announcement_font_size.py | Giraafje/cz.pycon.org-2019 | f7bfad2f0c0f98368e2f6163f7dce70335549a68 | [
"MIT"
] | 188 | 2018-08-26T06:53:50.000Z | 2022-02-12T04:04:36.000Z | pyconcz/announcements/migrations/0002_announcement_font_size.py | Giraafje/cz.pycon.org-2019 | f7bfad2f0c0f98368e2f6163f7dce70335549a68 | [
"MIT"
] | 15 | 2018-11-03T06:32:34.000Z | 2020-02-11T21:17:14.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-11 05:18
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='font_size',
field=models.PositiveSmallIntegerField(default=1, help_text='1 (largest) to 4 (smallest)', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)]),
),
]
| 29.090909 | 206 | 0.676563 | 70 | 640 | 6.057143 | 0.685714 | 0.070755 | 0.141509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052838 | 0.201563 | 640 | 21 | 207 | 30.47619 | 0.776908 | 0.107813 | 0 | 0 | 1 | 0 | 0.128521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.214286 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
218f388452a32732371a26acb9b1b26668fa0afb | 238 | py | Python | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | null | null | null | cloudmesh-exercises/cloudmesh-common-2.py | cybertraining-dsc/fa19-516-170 | 8746be5a89d897a155468303308efb71ce7ba849 | [
"Apache-2.0"
] | 1 | 2019-09-06T17:27:32.000Z | 2019-09-06T17:27:32.000Z | # fa19-516-170 E.Cloudmesh.Common.2
from cloudmesh.common.dotdict import dotdict
color = {"red": 255, "blue": 255, "green": 255, "alpha": 0}
color = dotdict(color)
print("A RGB color: ", color.red, color.blue, color.green, color.alpha) | 29.75 | 71 | 0.697479 | 37 | 238 | 4.486486 | 0.540541 | 0.180723 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.091346 | 0.12605 | 238 | 8 | 71 | 29.75 | 0.706731 | 0.138655 | 0 | 0 | 0 | 0 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
219394299c86acbdbecacd314e8a2cf464bd2c78 | 1,720 | py | Python | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | day02/main.py | aschmied/advent-of-code-2020 | 4112cebaf4dc4c6a931824da89ab894d21595673 | [
"BSD-2-Clause"
] | null | null | null | def main():
valid_passwords_by_range_policy = 0
valid_passwords_by_position_policy = 0
with open('input') as f:
for line in f:
policy_string, password = parse_line(line.strip())
policy = Policy.parse(policy_string)
if policy.is_valid_by_range_policy(password):
valid_passwords_by_range_policy += 1
if policy.is_valid_by_position_policy(password):
valid_passwords_by_position_policy += 1
print(f'There are {valid_passwords_by_range_policy} valid passwords by "range" policy.')
print(f'There are {valid_passwords_by_position_policy} valid passwords by "position" policy.')
def parse_line(line):
tokens = line.split(':', 1)
policy_string = tokens[0]
password = tokens[1].strip()
return policy_string, password
class Policy:
def __init__(self, first_number, second_number, letter):
self._first_number = first_number
self._second_number = second_number
self._letter = letter
def is_valid_by_range_policy(self, password):
count = password.count(self._letter)
return count >= self._first_number and count <= self._second_number
def is_valid_by_position_policy(self, password):
index1 = self._first_number - 1
index2 = self._second_number - 1
return (password[index1] == self._letter) != (password[index2] == self._letter)
@classmethod
def parse (cls, string):
tokens = string.split(' ')
first_number_string, second_number_string = tokens[0].split('-')
letter = tokens[1]
return cls(int(first_number_string), int(second_number_string), letter)
if __name__ == '__main__':
main()
| 38.222222 | 98 | 0.676744 | 219 | 1,720 | 4.922374 | 0.205479 | 0.103896 | 0.118738 | 0.077922 | 0.346939 | 0.055659 | 0.055659 | 0 | 0 | 0 | 0 | 0.011278 | 0.226744 | 1,720 | 44 | 99 | 39.090909 | 0.799248 | 0 | 0 | 0 | 0 | 0 | 0.103488 | 0.040116 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157895 | false | 0.394737 | 0 | 0 | 0.289474 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
2194785ae66285d905bdd54bde253e59e60bc9d5 | 1,476 | py | Python | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | progs/PyEpoch-master/PyEpoch-master/example.py | am-3/TimeZoned | e8ae2e90c2d6addf13b145aa2a4c7a9a66c1346e | [
"MIT"
] | null | null | null | # PyEpoch Module Example File.
import pyepoch
# -- TODAY() --
# The today() function returns today's date.
today = pyepoch.today()
print("Today's date & time:")
print(today)
# -- TIMEZONE() --
# The timezone() function returns a date with a different timezone.
# timezone() takes two(2) arguments:
# - date = a date to be converted.
# - tz = the timezone to convert to (ex. 'US/Pacific').
today_pst = pyepoch.timezone(today, 'US/Pacific')
print('Today\'s date & time in Pacific time:')
print(today_pst)
# -- TIMEZONE_SET() --
# The timezone_set() function returns a date with a different timezone and new hour/minute/second values.
# timezone_set() takes five(5) arguments:
# - date = a date to be converted.
# - tz = the timezone to convert to (ex. 'US/Pacific').
# - h = hour, changes the hour of the output.
# - m = minute, changes the minute of the output.
# - s = second, changes the second(s) of the output.
time = pyepoch.timezone_set(today, 'US/Pacific', 8, 0, 0)
print('Today\'s date at 8 o\'clock Pacific time: ')
print(time)
# -- EPOCH_SEC() --
# The PyEpoch_sec() function returns the number of seconds since the UNIX epoch (1970, 1, 1) up to the provided date.
# timezone_set() takes two(2) arguments:
# - date = a date to be converted.
# - tz = the timezone as a string, (ex. 'US/Pacific').
sec = pyepoch.epoch_sec(today_pst, 'US/Pacific')
print('Todays\'s date & time in Pacific time as seconds since the Unix epoch: ')
print(sec)
| 31.404255 | 117 | 0.680217 | 228 | 1,476 | 4.355263 | 0.276316 | 0.054381 | 0.040282 | 0.045317 | 0.401813 | 0.32427 | 0.27996 | 0.27996 | 0.195368 | 0.195368 | 0 | 0.010779 | 0.182927 | 1,476 | 46 | 118 | 32.086957 | 0.812604 | 0.651762 | 0 | 0 | 0 | 0 | 0.180894 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0.615385 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
2197966d631c9c92b13301a3f1143b67b6729392 | 1,735 | py | Python | sum/4-sum-II.py | windowssocket/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T02:29:40.000Z | 2020-02-05T03:28:16.000Z | sum/4-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 1 | 2019-03-08T13:22:32.000Z | 2019-03-08T13:22:32.000Z | sum/4-sum-II.py | xidongc/py_leetcode | 241dbf8d7dab7db5215c2526321fcdb378b45492 | [
"Apache-2.0"
] | 3 | 2018-05-29T11:50:24.000Z | 2018-11-27T12:31:01.000Z | # LTE using two pointers O(n**3)
class Solution(object):
def fourSumCount(self, A, B, C, D):
# corner case:
if len(A) == 0:
return 0
A.sort()
B.sort()
C.sort()
D.sort()
count = 0
for i in range(len(A)):
for j in range(len(B)):
k = 0
t = len(D) - 1
while 0 <= k < len(C) and 0 <= t < len(D):
if A[i] + B[j] + C[k] + D[t] > 0:
t -= 1
elif A[i] + B[j] + C[k] + D[t] < 0:
k += 1
else:
tmp1 = 1
tmp2 = 1
while 0 <= k < len(C) - 1 and C[k + 1] == C[k]:
k += 1
tmp1 += 1
while 1 <= t < len(D) and D[t - 1] == D[t]:
t -= 1
tmp2 += 1
count += tmp1 * tmp2
k += 1
t -= 1
return count
# hashmap Solution AC O(n**2)
class Solution(object):
def fourSumCount(self, A, B, C, D):
"""
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
hashtable = {}
count = 0
for a in A:
for b in B:
if a+b in hashtable:
hashtable[a+b] += 1
else:
hashtable[a+b] = 1
for c in C:
for d in D:
if -c-d in hashtable:
count += hashtable[-c-d]
return count | 26.287879 | 71 | 0.311816 | 210 | 1,735 | 2.57619 | 0.22381 | 0.018484 | 0.027726 | 0.081331 | 0.232902 | 0.232902 | 0.18854 | 0.18854 | 0.18854 | 0.155268 | 0 | 0.04698 | 0.570605 | 1,735 | 66 | 72 | 26.287879 | 0.679195 | 0.092219 | 0 | 0.347826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
219a50648cf64b278567555f4d618a30757f4bc1 | 1,200 | py | Python | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2020-04-15T22:17:26.000Z | 2020-04-15T22:17:26.000Z | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | null | null | null | betfairlightweight/endpoints/navigation.py | rozzac90/betfair | de885adf308e48bfc2614df46a5375a7df6386ef | [
"MIT"
] | 1 | 2021-04-26T14:47:28.000Z | 2021-04-26T14:47:28.000Z | from requests import ConnectionError
from ..exceptions import APIError
from ..utils import check_status_code
from .baseendpoint import BaseEndpoint
class Navigation(BaseEndpoint):
"""
Navigation operations.
"""
def list_navigation(self, session=None):
"""
This Navigation Data for Applications service allows the retrieval of the
full Betfair market navigation menu from a compressed file.
:param requests.session session: Requests session object
:rtype: json
"""
return self.request(session=session)
def request(self, method=None, params=None, session=None):
session = session or self.client.session
try:
response = session.get(self.url, headers=self.client.request_headers,
timeout=(self.connect_timeout, self.read_timeout))
except ConnectionError:
raise APIError(None, method, params, 'ConnectionError')
except Exception as e:
raise APIError(None, method, params, e)
check_status_code(response)
return response.json()
@property
def url(self):
return self.client.navigation_uri
| 30 | 85 | 0.66 | 130 | 1,200 | 6.023077 | 0.461538 | 0.05364 | 0.038314 | 0.058748 | 0.074074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.265 | 1,200 | 39 | 86 | 30.769231 | 0.887755 | 0.19 | 0 | 0 | 0 | 0 | 0.016502 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.190476 | 0.047619 | 0.52381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
219ba636e42aee8cec43580d423fc62e4f5c5cf3 | 686 | py | Python | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | flaskr/models.py | ukeskin/cevrimici-kitap-galerisi | bea06dc417bb779e185b50d6f7f848a33e6f7bcb | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired
from database import db
class User(object):
def __init__(self, name, avatar, email, password):
self.name = name
self.email = email
self.password = password
self.avatar = avatar
def insert(self):
if not db.find_one('user', {'email': self.email}):
db.insert(collection='user', data=self.json())
def json(self):
return {
"name": self.name,
"avatar": self.avatar,
"email": self.email,
"password": self.password
}
| 31.181818 | 58 | 0.586006 | 76 | 686 | 5.210526 | 0.407895 | 0.060606 | 0.070707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.310496 | 686 | 21 | 59 | 32.666667 | 0.837209 | 0 | 0 | 0 | 0 | 0 | 0.052478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0.2 | 0.2 | 0.05 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
21a04335d89c7d0c5916d0d77c189c61e2cfb328 | 24,611 | py | Python | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | null | null | null | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | null | null | null | GenerateSyntheticData.py | dragonfly-asl/SyntheticDataGenerator | 368b8e6ba0489053e98abd7bc0b720b71d6cae99 | [
"Apache-2.0"
] | 1 | 2019-06-25T15:05:02.000Z | 2019-06-25T15:05:02.000Z | # /bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import argparse
import logging
import os
import math
import cv2
import numpy as np
class GenerateSyntheticData:
import PythonMagick as Magick
def __init__(self, logger=None):
if logger == None:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
self.logger = logging.getLogger()
else:
self.logger = logger
@staticmethod
def appendArgumentParser(argparser):
argparser.add_argument('--shift-x', type=int, help='')
argparser.add_argument('--shift-y', type=int, help='')
argparser.add_argument('--skew-x', type=float, help='')
argparser.add_argument('--skew-y', type=float, help='')
argparser.add_argument('--rotate', type=float, help='rotates image clock- or counterclock-wise (angle in degrees)')
argparser.add_argument('--horizontal_flip', action='store_true', help='horizontally flips image')
argparser.add_argument('--zoom', type=str, help='resize image; argument given in percentage')
argparser.add_argument('--contrast', type=int, help='default=0; 0~infinity (integer times contract is applided to image)')
argparser.add_argument('--brightness', type=float, help='default=100')
argparser.add_argument('--saturation', type=float, help='default=100')
argparser.add_argument('--hue', type=float, help='default=100')
argparser.add_argument('--blur', action='store_true', help='')
argparser.add_argument('--blur_radius', type=float, default=10, help='')
argparser.add_argument('--blur_sigma', type=float, default=1, help='')
argparser.add_argument('--gaussianBlur', action='store_true', help='')
argparser.add_argument('--gaussianBlur_width', type=float, default=5, help='')
argparser.add_argument('--gaussianBlur_sigma', type=float, default=1, help='')
argparser.add_argument('--despeckle', action='store_true', help='')
argparser.add_argument('--enhance', action='store_true', help='')
argparser.add_argument('--equalize', action='store_true', help='')
argparser.add_argument('--gamma', type=float, help='0 ~ 2; 1 is default')
argparser.add_argument('--implode', type=float, help='Implode factor 0~1; 0 (nothing) to 1 (full); 0.0 ~ 0.5 recommended.')
argparser.add_argument('--negate', action='store_true', help='')
argparser.add_argument('--normalize', action='store_true', help='')
argparser.add_argument('--quantize', action='store_true', help='')
argparser.add_argument('--reduceNoise', type=int, help='default=1')
argparser.add_argument('--shade', action='store_true', help='')
argparser.add_argument('--shade_azimuth', type=float, default=50, help='')
argparser.add_argument('--shade_elevation', type=float, default=50, help='')
argparser.add_argument('--sharpen', action='store_true', help='')
argparser.add_argument('--sharpen_radius', type=float, default=1, help='')
argparser.add_argument('--sharpen_sigma', type=float, default=0.5, help='')
argparser.add_argument('--swirl', type=float, help='degree; default=10')
argparser.add_argument('--wave', action='store_true', help='')
argparser.add_argument('--wave_amplitude', type=float, default=5, help='')
argparser.add_argument('--wave_wavelength', type=float, default=100, help='')
argparser.add_argument('--auto', action='store_true', help='')
argparser.add_argument('--auto_ops', type=str, default='', help='')
argparser.add_argument('--auto_rotate_min', type=float, default=0, help='')
argparser.add_argument('--auto_rotate_max', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_min', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_max', type=float, default=0, help='')
def generateRandomOptions(self, cmdArg):
def _generateRandomOptionsShift(args):
args.shift_x = int(np.abs(np.random.normal(0, 3))) # -10 ~ +10
args.shift_y = int(np.abs(np.random.normal(0, 1))) # -3 ~ +3
def _generateRandomOptionsSkew(args):
args.skew_x = int(np.random.normal(0, 3)) # -10 ~ +10
args.skew_y = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsRotate(args):
if cmdArg.auto_rotate_min != cmdArg.auto_rotate_max:
args.rotate = int(np.random.uniform(cmdArg.auto_rotate_min, cmdArg.auto_rotate_max))
else:
args.rotate = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsZoom(args):
if cmdArg.auto_zoom_min != cmdArg.auto_zoom_max:
args.zoom = str(int(np.random.uniform(cmdArg.auto_zoom_min, cmdArg.auto_zoom_max))) + '%'
else:
args.zoom = str(int(np.random.normal(100, 3))) + '%' # 90% ~ 110%
def _generateRandomOptionsContrast(args):
args.contrast = int(np.abs(np.random.normal(0, 1))) # 0 ~ +3
def _generateRandomOptionsBrightness(args):
args.brightness = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsSaturation(args):
args.saturation = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsHue(args):
args.hue = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsBlur(args):
if np.random.binomial(1,0.1): # do blur
if np.random.binomial(1,0.5):
args.blur = True
else:
args.gaussianBlur = True
if args.blur:
args.blur_radius = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.blur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
if args.gaussianBlur:
args.gaussianBlur_width = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.gaussianBlur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
def _generateRandomOptionsHorizontalFlip(args):
args.horizontal_flip = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsDespeckle(args):
args.despeckle = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEnhance(args):
args.enhance = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEqualize(args):
args.equalize = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNegate(args):
args.negate = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNormalize(args):
args.normalize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsQuantize(args):
args.quantize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsGamma(args):
args.gamma = np.abs(np.random.normal(1, 0.03)) # 0 ~ 2
def _generateRandomOptionsImplode(args):
args.implode = 0
if np.random.binomial(1,0.5) > 0:
args.implode = np.random.normal(0, 0.15) # -0.5 ~ 0.5
def _generateRandomOptionsReduceNoise(args):
args.reduceNoise = int(np.abs(np.random.normal(0, 0.7))) # 0 ~ 2
def _generateRandomOptionsShade(args):
args.shade = (np.random.binomial(1,0.1) > 0)
if args.shade:
args.shade_azimuth = np.random.normal(50, 17) # 0 ~ 100
args.shade_elevation = np.random.normal(50, 17) # 0 ~ 100
def _generateRandomOptionsSharpen(args):
args.sharpen = (np.random.binomial(1,0.1) > 0)
if args.sharpen:
args.sharpen_radius = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
args.sharpen_sigma = np.abs(np.random.normal(0, 0.3)) # 0 ~ 1
def _generateRandomOptionsSwirl(args):
args.swirl = np.random.normal(0, 5) # -15 ~ +15
def _generateRandomOptionsWave(args):
args.wave = (np.random.binomial(1,0.3) > 0)
if args.wave:
args.wave_amplitude = np.abs(np.random.normal(5, 0.3)) # 0 ~ 10
args.wave_wavelength = np.abs(np.random.normal(100, 10)) # 0 ~ 200
args = argparse.Namespace()
args.shift_x = args.shift_y = None
args.skew_x = args.skew_y = None
args.rotate = args.zoom = None
args.contrast = args.brightness = args.saturation = args.hue = None
args.blur = args.gaussianBlur = None
args.horizontal_flip = None
args.despeckle = args.enhance = args.reduceNoise = None
args.equalize = args.negate = args.normalize = args.quantize = args.gamma = None
args.shade = None
args.sharpen = None
args.implode = args.swirl = args.wave = None
if len(cmdArg.auto_ops)>0:
for op in cmdArg.auto_ops.split(","):
if op == 'shift': _generateRandomOptionsShift(args)
elif op == 'skew': _generateRandomOptionsSkew(args)
elif op == 'rotate': _generateRandomOptionsRotate(args)
elif op == 'zoom': _generateRandomOptionsZoom(args)
elif op == 'contrast': _generateRandomOptionsContrast(args)
elif op == 'brightness': _generateRandomOptionsBrightness(args)
elif op == 'saturation': _generateRandomOptionsSaturation(args)
elif op == 'hue': _generateRandomOptionsHue(args)
elif op == 'blur': _generateRandomOptionsBlur(args)
elif op == 'horizontal_flip': _generateRandomOptionsHorizontalFlip(args)
elif op == 'despeckle': _generateRandomOptionsDespeckle(args)
elif op == 'enhance': _generateRandomOptionsEnhance(args)
elif op == 'equalize': _generateRandomOptionsEqualize(args)
elif op == 'negate': _generateRandomOptionsNegate(args)
elif op == 'normalize': _generateRandomOptionsNormalize(args)
elif op == 'quantize': _generateRandomOptionsQuantize(args)
elif op == 'gamma': _generateRandomOptionsGamma(args)
elif op == 'implode': _generateRandomOptionsImplode(args)
elif op == 'reduceNoise': _generateRandomOptionsReduceNoise(args)
elif op == 'shade': _generateRandomOptionsShade(args)
elif op == 'sharpen': _generateRandomOptionsSharpen(args)
elif op == 'swirl': _generateRandomOptionsSwirl(args)
elif op == 'wave': _generateRandomOptionsWave(args)
else:
self.logger.error('Unknown Operation Name ' + op)
else: # apply all operations
_generateRandomOptionsShift(args)
_generateRandomOptionsSkew(args)
_generateRandomOptionsRotate(args)
_generateRandomOptionsZoom(args)
_generateRandomOptionsContrast(args)
_generateRandomOptionsBrightness(args)
_generateRandomOptionsSaturation(args)
_generateRandomOptionsHue(args)
_generateRandomOptionsBlur(args)
#_generateRandomOptionsHorizontalFlip(args)
_generateRandomOptionsDespeckle(args)
_generateRandomOptionsEnhance(args)
#_generateRandomOptionsEqualize(args)
#_generateRandomOptionsNegate(args)
_generateRandomOptionsNormalize(args)
_generateRandomOptionsQuantize(args)
_generateRandomOptionsGamma(args)
_generateRandomOptionsImplode(args)
_generateRandomOptionsReduceNoise(args)
_generateRandomOptionsShade(args)
_generateRandomOptionsSharpen(args)
_generateRandomOptionsSwirl(args)
#_generateRandomOptionsWave(args)
self.logger.debug('Randomly generated options: ')
for key in vars(args):
self.logger.debug(' -- %s: %s' % (key, getattr(args, key)))
self.logger.debug('')
return args
def isVideo(self, inputF):
video_file_extensions = (
'.264', '.3g2', '.3gp', '.3gp2', '.3gpp', '.3gpp2', '.3mm', '.3p2', '.60d', '.787', '.89', '.aaf', '.aec', '.aep', '.aepx',
'.aet', '.aetx', '.ajp', '.ale', '.am', '.amc', '.amv', '.amx', '.anim', '.aqt', '.arcut', '.arf', '.asf', '.asx', '.avb',
'.avc', '.avd', '.avi', '.avp', '.avs', '.avs', '.avv', '.axm', '.bdm', '.bdmv', '.bdt2', '.bdt3', '.bik', '.bin', '.bix',
'.bmk', '.bnp', '.box', '.bs4', '.bsf', '.bvr', '.byu', '.camproj', '.camrec', '.camv', '.ced', '.cel', '.cine', '.cip',
'.clpi', '.cmmp', '.cmmtpl', '.cmproj', '.cmrec', '.cpi', '.cst', '.cvc', '.cx3', '.d2v', '.d3v', '.dat', '.dav', '.dce',
'.dck', '.dcr', '.dcr', '.ddat', '.dif', '.dir', '.divx', '.dlx', '.dmb', '.dmsd', '.dmsd3d', '.dmsm', '.dmsm3d', '.dmss',
'.dmx', '.dnc', '.dpa', '.dpg', '.dream', '.dsy', '.dv', '.dv-avi', '.dv4', '.dvdmedia', '.dvr', '.dvr-ms', '.dvx', '.dxr',
'.dzm', '.dzp', '.dzt', '.edl', '.evo', '.eye', '.ezt', '.f4p', '.f4v', '.fbr', '.fbr', '.fbz', '.fcp', '.fcproject',
'.ffd', '.flc', '.flh', '.fli', '.flv', '.flx', '.gfp', '.gl', '.gom', '.grasp', '.gts', '.gvi', '.gvp', '.h264', '.hdmov',
'.hkm', '.ifo', '.imovieproj', '.imovieproject', '.ircp', '.irf', '.ism', '.ismc', '.ismv', '.iva', '.ivf', '.ivr', '.ivs',
'.izz', '.izzy', '.jss', '.jts', '.jtv', '.k3g', '.kmv', '.ktn', '.lrec', '.lsf', '.lsx', '.m15', '.m1pg', '.m1v', '.m21',
'.m21', '.m2a', '.m2p', '.m2t', '.m2ts', '.m2v', '.m4e', '.m4u', '.m4v', '.m75', '.mani', '.meta', '.mgv', '.mj2', '.mjp',
'.mjpg', '.mk3d', '.mkv', '.mmv', '.mnv', '.mob', '.mod', '.modd', '.moff', '.moi', '.moov', '.mov', '.movie', '.mp21',
'.mp21', '.mp2v', '.mp4', '.mp4v', '.mpe', '.mpeg', '.mpeg1', '.mpeg4', '.mpf', '.mpg', '.mpg2', '.mpgindex', '.mpl',
'.mpl', '.mpls', '.mpsub', '.mpv', '.mpv2', '.mqv', '.msdvd', '.mse', '.msh', '.mswmm', '.mts', '.mtv', '.mvb', '.mvc',
'.mvd', '.mve', '.mvex', '.mvp', '.mvp', '.mvy', '.mxf', '.mxv', '.mys', '.ncor', '.nsv', '.nut', '.nuv', '.nvc', '.ogm',
'.ogv', '.ogx', '.osp', '.otrkey', '.pac', '.par', '.pds', '.pgi', '.photoshow', '.piv', '.pjs', '.playlist', '.plproj',
'.pmf', '.pmv', '.pns', '.ppj', '.prel', '.pro', '.prproj', '.prtl', '.psb', '.psh', '.pssd', '.pva', '.pvr', '.pxv',
'.qt', '.qtch', '.qtindex', '.qtl', '.qtm', '.qtz', '.r3d', '.rcd', '.rcproject', '.rdb', '.rec', '.rm', '.rmd', '.rmd',
'.rmp', '.rms', '.rmv', '.rmvb', '.roq', '.rp', '.rsx', '.rts', '.rts', '.rum', '.rv', '.rvid', '.rvl', '.sbk', '.sbt',
'.scc', '.scm', '.scm', '.scn', '.screenflow', '.sec', '.sedprj', '.seq', '.sfd', '.sfvidcap', '.siv', '.smi', '.smi',
'.smil', '.smk', '.sml', '.smv', '.spl', '.sqz', '.srt', '.ssf', '.ssm', '.stl', '.str', '.stx', '.svi', '.swf', '.swi',
'.swt', '.tda3mt', '.tdx', '.thp', '.tivo', '.tix', '.tod', '.tp', '.tp0', '.tpd', '.tpr', '.trp', '.ts', '.tsp', '.ttxt',
'.tvs', '.usf', '.usm', '.vc1', '.vcpf', '.vcr', '.vcv', '.vdo', '.vdr', '.vdx', '.veg', '.vem', '.vep', '.vf', '.vft',
'.vfw', '.vfz', '.vgz', '.vid', '.video', '.viewlet', '.viv', '.vivo', '.vlab', '.vob', '.vp3', '.vp6', '.vp7', '.vpj',
'.vro', '.vs4', '.vse', '.vsp', '.w32', '.wcp', '.webm', '.wlmp', '.wm', '.wmd', '.wmmp', '.wmv', '.wmx', '.wot', '.wp3',
'.wpl', '.wtv', '.wve', '.wvx', '.xej', '.xel', '.xesc', '.xfl', '.xlmv', '.xmv', '.xvid', '.y4m', '.yog', '.yuv', '.zeg',
'.zm1', '.zm2', '.zm3', '.zmv')
if inputF.endswith((video_file_extensions)):
return True
return False
def getFPS(self, vF):
video = cv2.VideoCapture(vF);
major_ver, _, _ = (cv2.__version__).split('.')
if int(major_ver) < 3 :
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else :
fps = video.get(cv2.CAP_PROP_FPS)
video.release()
return fps
def splitFromVideo(self, inputF, outputFPrefix):
retVal = []
vid = cv2.VideoCapture(inputF)
idx = 0
while(True):
ret, frame = vid.read()
if not ret:
break
name = outputFPrefix + '_frame' + str(idx) + '.png'
cv2.imwrite(name, frame)
retVal.append(name)
idx += 1
return retVal
def mergeIntoVideo(self, inFs, outputF, FPS):
frame = cv2.imread(inFs[0])
height, width, _ = frame.shape
video = cv2.VideoWriter(outputF, cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height))
for inF in inFs:
video.write(cv2.imread(inF))
video.release()
def generate(self, inputF, outputF, args):
if args.auto:
auto_options = self.generateRandomOptions(args)
logger.info('Random options: ' + str(auto_options))
if self.isVideo(inputF):
FPS = self.getFPS(inputF)
inputFs = self.splitFromVideo(inputF, outputF+'_input')
outputFs = []
for idx in range(0, len(inputFs)):
iF = inputFs[idx]
oF = outputF + '_output_frame' + str(idx) + '.png'
if args.auto:
self._generate(iF, oF, auto_options)
else:
self._generate(iF, oF, args)
outputFs.append(oF)
self.mergeIntoVideo(outputFs, outputF, FPS)
for f in inputFs:
os.remove(f)
for f in outputFs:
os.remove(f)
return True
else:
if args.auto:
return self._generate(inputF, outputF, auto_options)
else:
return self._generate(inputF, outputF, args)
def _generate(self, inputF, outputF, args):
inputImage = self.Magick.Image(inputF)
input_width = inputImage.size().width()
input_height = inputImage.size().height()
self.logger.debug('Input width and height: %d x %d' % (input_width, input_height))
# make image ready to be modified
inputImage.modifyImage()
inputImage.backgroundColor(self.Magick.Color('black'))
if args.shift_x != None:
inputImage.roll(args.shift_x, 0)
if args.shift_y != None:
inputImage.roll(0, args.shift_y)
if args.skew_x != None and args.skew_y != None:
inputImage.shear(args.skew_x, args.skew_y)
elif args.skew_x != None:
inputImage.shear(args.skew_x, 0)
if args.skew_y != None:
inputImage.shear(0, args.skew_y)
if args.rotate != None:
inputImage.rotate(args.rotate)
inputImage.crop(self.Magick.Geometry(input_width, input_height, 0, 0))
if args.horizontal_flip:
inputImage.flop()
if args.zoom != None:
inputImage.sample(self.Magick.Geometry(args.zoom))
if int(args.zoom.strip()[0:-1]) >= 100:
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int((inputImage.size().width() - input_width) / 2),
int((inputImage.size().height() - input_height) / 2)))
else:
# PythonMagick is missing extent() API
# inputImage.exent(Magick.Geometry(input_width, input_height), Magick.GravityType.CenterGravity)
smallWidth = inputImage.size().width()
smallHeight = inputImage.size().height()
inputImage.size(self.Magick.Geometry(input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, smallHeight, input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, 0, input_width, smallHeight))
inputImage.draw(self.Magick.DrawableRectangle(0, smallHeight, smallWidth, input_height))
inputImage.roll(int((input_width - smallWidth) / 2), int((input_height - smallHeight) / 2))
if args.contrast != None:
for _ in range(0, args.contrast):
inputImage.contrast(args.contrast)
if args.brightness != None or args.saturation != None or args.hue != None:
if args.brightness is None:
args.brightness = 100
if args.saturation is None:
args.saturation = 100
if args.hue is None:
args.hue = 100
inputImage.modulate(args.brightness, args.saturation, args.hue)
if args.blur:
inputImage.blur(args.blur_radius, args.blur_sigma)
if args.gaussianBlur:
inputImage.gaussianBlur(args.gaussianBlur_width, args.gaussianBlur_sigma)
if args.despeckle:
inputImage.despeckle()
if args.enhance:
inputImage.enhance()
if args.equalize:
inputImage.equalize()
if args.gamma != None:
inputImage.gamma(args.gamma)
if args.implode != None:
inputImage.implode(args.implode)
if args.negate:
inputImage.negate()
if args.normalize:
inputImage.normalize()
if args.quantize:
inputImage.quantize()
if args.reduceNoise != None:
inputImage.reduceNoise(args.reduceNoise)
if args.shade:
inputImage.shade(args.shade_azimuth, args.shade_elevation)
if args.sharpen:
inputImage.sharpen(args.sharpen_radius, args.sharpen_sigma)
if args.swirl != None:
inputImage.swirl(args.swirl)
if args.wave:
inputImage.wave(args.wave_amplitude, args.wave_wavelength)
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int(math.fabs((inputImage.size().width() - input_width) / 2)),
int(math.fabs((inputImage.size().height() - input_height) / 2))))
inputImage.write(outputF)
self.logger.debug('Output width and height: %d x %d' % (inputImage.size().width(), inputImage.size().height()))
return True
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('-l', '--log-level', default='INFO', help="log-level (INFO|WARN|DEBUG|FATAL|ERROR)")
argparser.add_argument('-i', '--input', required=True, help='Input image file name')
argparser.add_argument('-o', '--output', required=True, help='Output image file name')
argparser.add_argument('-w', '--overwrite', action='store_true', help='If set, will overwrite the existing output file')
GenerateSyntheticData.appendArgumentParser(argparser)
args = argparser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.log_level)
logger = logging.getLogger("DragonFly-ASL-GSD")
logger.debug('CLI arguments')
for key in vars(args):
logger.debug(' -- %s: %s' % (key, getattr(args, key)))
logger.debug('')
# check input file exists
if not os.path.isfile(args.input):
logger.error('Input file %s does not exist: ' % args.input)
sys.exit(1)
# check if output file exists
if os.path.isfile(args.output) and not args.overwrite:
try: input = raw_input
except NameError: pass
yn = input('Do you wish to overwrite %s? (y/n) ' % args.output)
if yn != 'y' and yn != 'Y':
logger.error('Output file %s will not be overwritten.' % args.output)
sys.exit(1)
GSD = GenerateSyntheticData(logger=logger)
status = GSD.generate(args.input, args.output, args)
logger.debug('Generation status: %r' % status)
| 48.35167 | 135 | 0.557027 | 2,614 | 24,611 | 5.134277 | 0.261668 | 0.04113 | 0.068549 | 0.053647 | 0.271589 | 0.238581 | 0.17793 | 0.128604 | 0.061247 | 0.042694 | 0 | 0.021481 | 0.277437 | 24,611 | 508 | 136 | 48.44685 | 0.733228 | 0.024908 | 0 | 0.097257 | 1 | 0.002494 | 0.142243 | 0.00121 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079801 | false | 0.002494 | 0.022444 | 0 | 0.127182 | 0.002494 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21a39959b787e7f048c3956b733c098a43568590 | 5,583 | py | Python | test/test_websocket.py | lmacken/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 22 | 2019-04-27T02:14:52.000Z | 2021-01-04T00:37:41.000Z | test/test_websocket.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 7 | 2019-04-28T20:57:49.000Z | 2021-09-03T03:39:22.000Z | test/test_websocket.py | redquantum/binance-chain-python | 483e51394ebc9f9998f5248910ac7b7dff7198f9 | [
"MIT"
] | 9 | 2019-04-27T23:43:51.000Z | 2021-04-15T18:09:51.000Z | # Copyright 2019, Luke Macken, Kim Bui, and the binance-chain-python contributors
# SPDX-License-Identifier: MIT
"""
Binance DEX WebSocket Test Suite
"""
import asyncio
import pytest
from binancechain import HTTPClient, WebSocket
def on_error(msg):
print(f'Error: {msg}')
@pytest.fixture
async def client():
# If we create fresh websockets too fast it may error?
await asyncio.sleep(1)
client = WebSocket(testnet=True)
yield client
client.close()
@pytest.fixture
async def symbols():
symbols = []
rest = HTTPClient(testnet=True)
markets = await rest.get_markets()
for market in markets:
symbol = f"{market['base_asset_symbol']}_{market['quote_asset_symbol']}"
symbols.append(symbol)
yield symbols
await rest.close()
@pytest.mark.asyncio
async def test_open_close(client):
""""Open then immediately close"""
def on_open():
print('opened')
client.close()
await client.start_async(on_open=on_open, on_error=on_error)
print('closed')
@pytest.mark.asyncio
async def test_trades(client, symbols):
print(symbols)
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_trades(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'trades'
@pytest.mark.asyncio
async def test_market_diff(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_market_diff(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'marketDiff'
@pytest.mark.asyncio
async def test_market_depth(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_market_depth(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'marketDepth'
@pytest.mark.asyncio
async def test_kline(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_kline(interval='1m', symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'kline_1m'
@pytest.mark.asyncio
async def test_tickers(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_ticker(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'ticker'
@pytest.mark.asyncio
async def test_all_tickers(client):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_all_tickers(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'allTickers'
@pytest.mark.asyncio
async def test_mini_ticker(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_mini_ticker(symbols=symbols, callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'miniTicker'
@pytest.mark.asyncio
async def test_all_mini_ticker(client, symbols):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_all_mini_tickers(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert result['stream'] == 'allMiniTickers'
@pytest.mark.asyncio
async def test_blockheight(client):
results = []
def callback(msg):
results.append(msg)
client.close()
def on_open():
client.subscribe_blockheight(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
result = results[0]
assert 'stream' in result
@pytest.mark.asyncio
async def test_keepalive(client):
def on_open():
client.keepalive()
client.close()
await client.start_async(on_open=on_open, on_error=on_error)
@pytest.mark.asyncio
async def test_unsubscribe(client):
results = []
def callback(msg):
results.append(msg)
client.unsubscribe("blockheight")
client.close()
def on_open():
client.subscribe_blockheight(callback=callback)
await client.start_async(on_open=on_open, on_error=on_error)
assert results
@pytest.mark.asyncio
async def test_decorator(client):
@client.on('open')
def callback():
client.close()
await client.start_async()
@pytest.mark.asyncio
async def test_decorator_async(client):
@client.on('open')
async def callback():
client.close()
await client.start_async()
@pytest.mark.asyncio
async def test_decorator_sub_queue(client):
results = []
@client.on("allTickers", symbols=["$all"])
async def callback(msg):
results.append(msg)
client.close()
await client.start_async()
assert results
| 21.980315 | 81 | 0.675981 | 708 | 5,583 | 5.155367 | 0.138418 | 0.062466 | 0.052603 | 0.090411 | 0.725753 | 0.725753 | 0.656438 | 0.609315 | 0.598082 | 0.584658 | 0 | 0.003621 | 0.20849 | 5,583 | 253 | 82 | 22.067194 | 0.822358 | 0.034927 | 0 | 0.644578 | 0 | 0 | 0.046407 | 0.011228 | 0 | 0 | 0 | 0 | 0.066265 | 1 | 0.144578 | false | 0 | 0.018072 | 0 | 0.162651 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21a698ad6f8035ce96d6e79e8a6eb4d69be7b56f | 1,193 | py | Python | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | 1 | 2021-06-03T10:20:50.000Z | 2021-06-03T10:20:50.000Z | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | null | null | null | DS_Alog_Python/array_employeelist.py | abhigyan709/dsalgo | 868448834b22e06e572b4a0b4ba85cb1b0c6d7ee | [
"MIT"
] | null | null | null | class Employee:
def __init__(self, name, emp_id, email_id):
self.__name=name
self.__emp_id=emp_id
self.__email_id=email_id
def get_name(self):
return self.__name
def get_emp_id(self):
return self.__emp_id
def get_email_id(self):
return self.__email_id
class OrganizationDirectory:
def __init__(self,emp_list):
self.__emp_list=emp_list
def lookup(self,key_name):
result_list=[]
for emp in self.__emp_list:
if(key_name in emp.get_name()):
result_list.append(emp)
self.display(result_list)
return result_list
def display(self,result_list):
print("Search results:")
for emp in result_list:
print(emp.get_name()," ", emp.get_emp_id()," ",emp.get_email_id())
emp1=Employee("Kevin",24089, "Kevin_xyz@organization.com")
emp2=Employee("Jack",56789,"Jack_xyz@organization.com")
emp3=Employee("Jackson",67895,"Jackson_xyz@organization.com")
emp4=Employee("Henry Jack",23456,"Jacky_xyz@organization.com")
emp_list=[emp1,emp2,emp3,emp4]
org_dir=OrganizationDirectory(emp_list)
#Search for an employee
org_dir.lookup("KEVIN") | 27.744186 | 78 | 0.674769 | 168 | 1,193 | 4.416667 | 0.255952 | 0.040431 | 0.097035 | 0.043127 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029598 | 0.207041 | 1,193 | 43 | 79 | 27.744186 | 0.754757 | 0.018441 | 0 | 0 | 0 | 0 | 0.130658 | 0.089667 | 0 | 0 | 0 | 0 | 0 | 1 | 0.21875 | false | 0 | 0 | 0.09375 | 0.40625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21ac52aabd54ed388edac1605b21259e6ba60313 | 2,620 | py | Python | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | pyjobserver/__main__.py | athewsey/pyjobserver | 1251a0f22182c8bc8b55a85ef45adc7c1e2b982e | [
"Apache-2.0"
] | null | null | null | """Main/example start-up script for the pyjobserver
Use this as a guide if importing pyjobserver into another app instead
"""
# Built-Ins:
import asyncio
from logging import getLogger, Logger
import os
from pathlib import Path
# External Dependencies:
from aiohttp import web
import click
from dotenv import load_dotenv
# Local Dependencies:
from .access_control import get_authentication_middleware
from .config import load as load_config, Config
from .jobs.example import example_job_fn
from .runner import JobRunner
# (Only entry point scripts should load dotenvs)
load_dotenv(os.getcwd() + "/.env")
async def alive_handler(request) -> web.Response:
"""Basic server aliveness indicator
"""
return web.json_response({"ok": True})
async def init_app(config: Config, LOGGER: Logger):
"""Create an application instance.
:return: application instance
"""
app = web.Application(logger=LOGGER)
app.router.add_get("/", alive_handler)
authentication_middleware = get_authentication_middleware(config)
runner = JobRunner(config)
# ADD YOUR JOB TYPES LIKE THIS:
# The job function must be conformant including the correct signature type annotations.
runner.register_job_handler("example", example_job_fn)
runner_app = await runner.webapp(middlewares=[authentication_middleware] if authentication_middleware else None)
app.add_subapp("/api", runner_app)
return app
# Note we need to separate out the main_coro from main() because click (our command line args processor) can't decorate
# async functions
async def main_coro(manifest: str):
"""Initialise and serve application.
Function is called when the module is run directly
"""
config = await load_config(Path(manifest) if manifest else None)
LOGGER = getLogger(__name__)
app = await init_app(config, LOGGER)
runner = web.AppRunner(app, handle_signals=True)
await runner.setup()
site = web.TCPSite(runner, port=config.server.port)
await site.start()
LOGGER.info("Server running on port %i", config.server.port)
# TODO: Are we supposed to expose the runner somehow to clean up on shutdown?
#await runner.cleanup()
@click.command()
@click.option("--manifest", default="", help="Location of (optional) manifest file relative to current working dir")
def main(manifest: str):
loop = asyncio.get_event_loop()
loop.run_until_complete(main_coro(manifest))
loop.run_forever()
if __name__ == "__main__":
# Linter error here is caused by PyLint not understanding the click decorator:
main() # pylint: disable=no-value-for-parameter
| 32.75 | 119 | 0.743511 | 354 | 2,620 | 5.367232 | 0.49435 | 0.063158 | 0.028421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167939 | 2,620 | 79 | 120 | 33.164557 | 0.87156 | 0.26145 | 0 | 0 | 0 | 0 | 0.076561 | 0 | 0 | 0 | 0 | 0.012658 | 0 | 1 | 0.025 | false | 0 | 0.275 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21acb4fa80b3916f001211cac88508c8d9ee7743 | 492 | py | Python | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 2,904 | 2019-05-07T08:09:33.000Z | 2022-03-31T18:28:41.000Z | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 238 | 2019-05-11T02:57:22.000Z | 2022-03-31T23:47:18.000Z | dowhy/graph_learner.py | leo-ware/dowhy | 3a2a79e2159a7f29456dd419a3c90395a384364e | [
"MIT"
] | 527 | 2019-05-08T16:23:45.000Z | 2022-03-30T21:02:41.000Z | class GraphLearner:
"""Base class for causal discovery methods.
Subclasses implement different discovery methods. All discovery methods are in the package "dowhy.causal_discoverers"
"""
def __init__(self, data, library_class, *args, **kwargs):
self._data = data
self._labels = list(self._data.columns)
self._adjacency_matrix = None
self._graph_dot = None
def learn_graph(self):
'''
Discover causal graph and the graph in DOT format.
'''
raise NotImplementedError
| 23.428571 | 118 | 0.739837 | 63 | 492 | 5.555556 | 0.603175 | 0.137143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170732 | 492 | 20 | 119 | 24.6 | 0.857843 | 0.426829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21b63b9f54674792f408a6f07e0262da28ca36a1 | 553 | py | Python | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | null | null | null | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | 36 | 2019-03-22T01:50:24.000Z | 2022-02-26T10:28:41.000Z | todo/api/views.py | devord/todo | 312c313589cec179d69bf64ca3e06382dc2df728 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from api.serializers import LabelSerializer, ItemSerializer
from api.models import Label, Item
class LabelViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows labels to be viewed or edited.
"""
queryset = Label.objects.all().order_by('name')
serializer_class = LabelSerializer
class ItemViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows items to be viewed or edited.
"""
queryset = Item.objects.all().order_by('title')
serializer_class = ItemSerializer
| 26.333333 | 59 | 0.734177 | 65 | 553 | 6.169231 | 0.523077 | 0.034913 | 0.114713 | 0.154613 | 0.334165 | 0.334165 | 0 | 0 | 0 | 0 | 0 | 0 | 0.179024 | 553 | 20 | 60 | 27.65 | 0.88326 | 0.198915 | 0 | 0 | 0 | 0 | 0.021845 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
21bc3e83174440b0d25cd071871ba1fe4765dc1b | 408 | py | Python | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | src/accounts/migrations/0009_alter_protection_description.py | NikolayTls/CarRental-Fullstack | e535976c25dd77896a355a2d30b5348be90ac040 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-11-09 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0008_auto_20211108_1633'),
]
operations = [
migrations.AlterField(
model_name='protection',
name='description',
field=models.CharField(max_length=999, null=True),
),
]
| 21.473684 | 62 | 0.612745 | 44 | 408 | 5.568182 | 0.863636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114865 | 0.27451 | 408 | 18 | 63 | 22.666667 | 0.712838 | 0.110294 | 0 | 0 | 1 | 0 | 0.144044 | 0.063712 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
21bdc2ccc7ab9e40f05cc42e706cde91619db6a2 | 95,650 | py | Python | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | 1 | 2021-03-29T07:47:32.000Z | 2021-03-29T07:47:32.000Z | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | null | null | null | gym_electric_motor/physical_systems/electric_motors.py | 54hanxiucao/gym-electric-motor | 911432388b00675e8a93f4a7937fdc575f106f22 | [
"MIT"
] | null | null | null | import numpy as np
import math
from scipy.stats import truncnorm
class ElectricMotor:
"""
Base class for all technical electrical motor models.
A motor consists of the ode-state. These are the dynamic quantities of its ODE.
For example:
ODE-State of a DC-shunt motor: `` [i_a, i_e ] ``
* i_a: Anchor circuit current
* i_e: Exciting circuit current
Each electric motor can be parametrized by a dictionary of motor parameters,
the nominal state dictionary and the limit dictionary.
Initialization is given by initializer(dict). Can be constant state value
or random value in given interval.
dict should be like:
{ 'states'(dict): with state names and initital values
'interval'(array like): boundaries for each state
(only for random init), shape(num states, 2)
'random_init'(str): 'uniform' or 'normal'
'random_params(tuple): mue(float), sigma(int)
Example initializer(dict) for constant initialization:
{ 'states': {'omega': 16.0}}
Example initializer(dict) for random initialization:
{ 'random_init': 'normal'}
"""
#: Parameter indicating if the class is implementing the optional jacobian function
HAS_JACOBIAN = False
#: CURRENTS_IDX(list(int)): Indices for accessing all motor currents.
CURRENTS_IDX = []
#: CURRENTS(list(str)): List of the motor currents names
CURRENTS = []
#: VOLTAGES(list(str)): List of the motor input voltages names
VOLTAGES = []
#: _default_motor_parameter(dict): Default parameter dictionary for the motor
_default_motor_parameter = {}
#: _default_nominal_values(dict(float)): Default nominal motor state array
_default_nominal_values = {}
#: _default_limits(dict(float)): Default motor limits (0 for unbounded limits)
_default_limits = {}
#: _default_initial_state(dict): Default initial motor-state values
#_default_initializer = {}
_default_initializer = {'states': {},
'interval': None,
'random_init': None,
'random_params': None}
#: _default_initial_limits(dict): Default limit for initialization
_default_initial_limits = {}
@property
def nominal_values(self):
"""
Readonly motors nominal values.
Returns:
dict(float): Current nominal values of the motor.
"""
return self._nominal_values
@property
def limits(self):
"""
Readonly motors limit state array. Entries are set to the maximum physical possible values
in case of unspecified limits.
Returns:
dict(float): Limits of the motor.
"""
return self._limits
@property
def motor_parameter(self):
"""
Returns:
dict(float): The motors parameter dictionary
"""
return self._motor_parameter
@property
def initializer(self):
"""
Returns:
dict: Motor initial state and additional initializer parameter
"""
return self._initializer
@property
def initial_limits(self):
"""
Returns:
dict: nominal motor limits for choosing initial values
"""
return self._initial_limits
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
"""
:param motor_parameter: Motor parameter dictionary. Contents specified
for each motor.
:param nominal_values: Nominal values for the motor quantities.
:param limit_values: Limits for the motor quantities.
:param motor_initializer: Initial motor states (currents)
('constant', 'uniform', 'gaussian' sampled from
given interval or out of nominal motor values)
:param initial_limits: limits for of the initial state-value
"""
motor_parameter = motor_parameter or {}
self._motor_parameter = self._default_motor_parameter.copy()
self._motor_parameter.update(motor_parameter)
limit_values = limit_values or {}
self._limits = self._default_limits.copy()
self._limits.update(limit_values)
nominal_values = nominal_values or {}
self._nominal_values = self._default_nominal_values.copy()
self._nominal_values.update(nominal_values)
motor_initializer = motor_initializer or {}
self._initializer = self._default_initializer.copy()
self._initializer.update(motor_initializer)
self._initial_states = {}
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# intialize limits, in general they're not needed to be changed
# during training or episodes
initial_limits = initial_limits or {}
self._initial_limits = self._nominal_values.copy()
self._initial_limits.update(initial_limits)
# preventing wrong user input for the basic case
assert isinstance(self._initializer, dict), 'wrong initializer'
def electrical_ode(self, state, u_in, omega, *_):
"""
Calculation of the derivatives of each motor state variable for the given inputs / The motors ODE-System.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
ndarray(float): Derivatives of the motors ODE-system for the given inputs.
"""
raise NotImplementedError
def electrical_jacobian(self, state, u_in, omega, *_):
"""
Calculation of the jacobian of each motor ODE for the given inputs / The motors ODE-System.
Overriding this method is optional for each subclass. If it is overridden, the parameter HAS_JACOBIAN must also
be set to True. Otherwise, the jacobian will not be called.
Args:
state(ndarray(float)): The motors state.
u_in(list(float)): The motors input voltages.
omega(float): Angular velocity of the motor
Returns:
Tuple(ndarray, ndarray, ndarray):
[0]: Derivatives of all electrical motor states over all electrical motor states shape:(states x states)
[1]: Derivatives of all electrical motor states over omega shape:(states,)
[2]: Derivative of Torque over all motor states shape:(states,)
"""
pass
def initialize(self,
state_space,
state_positions,
**__):
"""
Initializes given state values. Values can be given as a constant or
sampled random out of a statistical distribution. Initial value is in
range of the nominal values or a given interval. Values are written in
initial_states attribute
Args:
state_space(gym.Box): normalized state space boundaries (given by
physical system)
state_positions(dict): indexes of system states (given by physical
system)
Returns:
"""
# for organization purposes
interval = self._initializer['interval']
random_dist = self._initializer['random_init']
random_params = self._initializer['random_params']
self._initial_states.update(self._default_initializer['states'])
if self._initializer['states'] is not None:
self._initial_states.update(self._initializer['states'])
# different limits for InductionMotor
if any(map(lambda state: state in self._initial_states.keys(),
['psi_ralpha', 'psi_rbeta'])):
nominal_values_ = [self._initial_limits[state]
for state in self._initial_states]
upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)
# state space for Induction Envs based on documentation
# ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']
# hardcoded for Inductionmotors currently given in the toolbox
state_space_low = np.array([-1, -1, -1, -1, -1])
lower_bound = upper_bound * state_space_low
else:
if isinstance(self._nominal_values, dict):
nominal_values_ = [self._nominal_values[state]
for state in self._initial_states.keys()]
nominal_values_ = np.asarray(nominal_values_)
else:
nominal_values_ = np.asarray(self._nominal_values)
state_space_idx = [state_positions[state] for state in
self._initial_states.keys()]
upper_bound = np.asarray(nominal_values_, dtype=float)
lower_bound = upper_bound * \
np.asarray(state_space.low, dtype=float)[state_space_idx]
# clip nominal boundaries to user defined
if interval is not None:
lower_bound = np.clip(lower_bound,
a_min=
np.asarray(interval, dtype=float).T[0],
a_max=None)
upper_bound = np.clip(upper_bound,
a_min=None,
a_max=
np.asarray(interval, dtype=float).T[1])
# random initialization for each motor state (current, epsilon)
if random_dist is not None:
if random_dist == 'uniform':
initial_value = (upper_bound - lower_bound) * \
np.random.random_sample(
len(self._initial_states.keys())) + \
lower_bound
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
elif random_dist in ['normal', 'gaussian']:
# specific input or middle of interval
mue = random_params[0] or (upper_bound - lower_bound) / 2 + lower_bound
sigma = random_params[1] or 1
a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma
initial_value = truncnorm.rvs(a, b,
loc=mue,
scale=sigma,
size=(len(self._initial_states.keys())))
# writing initial values in initial_states dict
random_states = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(random_states)
else:
# todo implement other distribution
raise NotImplementedError
# constant initialization for each motor state (current, epsilon)
elif self._initial_states is not None:
initial_value = np.atleast_1d(list(self._initial_states.values()))
# check init_value meets interval boundaries
if ((lower_bound <= initial_value).all()
and (initial_value <= upper_bound).all()):
initial_states_ = \
{state: initial_value[idx]
for idx, state in enumerate(self._initial_states.keys())}
self._initial_states.update(initial_states_)
else:
raise Exception('Initialization value has to be within nominal boundaries')
else:
raise Exception('No matching Initialization Case')
def reset(self,
state_space,
state_positions,
**__):
"""
Reset the motors state to a new initial state. (Default 0)
Args:
state_space(gym.Box): normalized state space boundaries
state_positions(dict): indexes of system states
Returns:
numpy.ndarray(float): The initial motor states.
"""
# check for valid initializer
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS))
def i_in(self, state):
"""
Args:
state(ndarray(float)): ODE state of the motor
Returns:
list(float): List of all currents flowing into the motor.
"""
raise NotImplementedError
def _update_limits(self, limits_d={}, nominal_d={}):
"""Replace missing limits and nominal values with physical maximums.
Args:
limits_d(dict): Mapping: quantitity to its limit if not specified
"""
# omega is replaced the same way for all motor types
limits_d.update(dict(omega=self._default_limits['omega']))
for qty, lim in limits_d.items():
if self._limits.get(qty, 0) == 0:
self._limits[qty] = lim
for entry in self._limits.keys():
if self._nominal_values.get(entry, 0) == 0:
self._nominal_values[entry] = nominal_d.get(entry, None) or \
self._limits[entry]
def _update_initial_limits(self, nominal_new={}, **kwargs):
"""
Complete initial states with further state limits
Args:
nominal_new(dict): new/further state limits
"""
self._initial_limits.update(nominal_new)
class DcMotor(ElectricMotor):
"""
The DcMotor and its subclasses implement the technical system of a dc motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_a V Armature circuit voltage
u_e v Exciting circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u_a Armature Voltage
u_e Exciting Voltage
======== ===========================================================
"""
# Indices for array accesses
I_A_IDX = 0
I_E_IDX = 1
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_a', 'i_e']
VOLTAGES = ['u_a', 'u_e']
_default_motor_parameter = {
'r_a': 0.78, 'r_e': 25, 'l_a': 6.3e-3, 'l_e': 1.2, 'l_e_prime': 0.0094,
'j_rotor': 0.017,
}
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **__):
# Docstring of superclass
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
#: Matrix that contains the constant parameters of the systems equation for faster computation
self._model_constants = None
self._update_model()
self._update_limits()
def _update_model(self):
"""
Update the motors model parameters with the motor parameters.
Called internally when the motor parameters are changed or the motor is initialized.
"""
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'], 0, -mp['l_e_prime'], 1, 0],
[0, -mp['r_e'], 0, 0, 1]
])
self._model_constants[self.I_A_IDX] = self._model_constants[
self.I_A_IDX] / mp['l_a']
self._model_constants[self.I_E_IDX] = self._model_constants[
self.I_E_IDX] / mp['l_e']
def torque(self, currents):
# Docstring of superclass
return self._motor_parameter['l_e_prime'] * currents[self.I_A_IDX] * \
currents[self.I_E_IDX]
def i_in(self, currents):
# Docstring of superclass
return list(currents)
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(self._model_constants, np.array([
state[self.I_A_IDX],
state[self.I_E_IDX],
omega * state[self.I_E_IDX],
u_in[0],
u_in[1],
]))
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: Tuple of the two converters possible output currents.
input_voltages: Tuple of the two converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
a_converter = 0
e_converter = 1
low = {
'omega': -1 if input_voltages.low[a_converter] == -1
or input_voltages.low[e_converter] == -1 else 0,
'torque': -1 if input_currents.low[a_converter] == -1
or input_currents.low[e_converter] == -1 else 0,
'i_a': -1 if input_currents.low[a_converter] == -1 else 0,
'i_e': -1 if input_currents.low[e_converter] == -1 else 0,
'u_a': -1 if input_voltages.low[a_converter] == -1 else 0,
'u_e': -1 if input_voltages.low[e_converter] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u_a': 1,
'u_e': 1
}
return low, high
def _update_limits(self, limits_d={}):
# Docstring of superclass
# torque is replaced the same way for all DC motors
limits_d.update(dict(torque=self.torque([self._limits[state] for state
in self.CURRENTS])))
super()._update_limits(limits_d)
class DcShuntMotor(DcMotor):
"""
The DcShuntMotor is a DC motor with parallel armature and exciting circuit connected to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 0.78 Armature circuit resistance
r_e Ohm 25 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.2 Exciting circuit inductance
l_e_prime H 0.0094 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_a A Armature circuit current
i_e A Exciting circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Voltage applied to both circuits
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i_a Armature current
i_e Exciting current
omega Angular Velocity
torque Motor generated torque
u Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
VOLTAGES = ['u']
_default_nominal_values = {'omega': 368, 'torque': 0.0, 'i_a': 50,
'i_e': 1.2, 'u': 420}
_default_limits = {'omega': 500, 'torque': 0.0, 'i_a': 75, 'i_e': 2,
'u': 420}
_default_initializer = {'states': {'i_a': 0.0, 'i_e': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def i_in(self, state):
# Docstring of superclass
return [state[self.I_A_IDX] + state[self.I_E_IDX]]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return super().electrical_ode(state, (u_in[0], u_in[0]), omega)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def get_state_space(self, input_currents, input_voltages):
"""
Calculate the possible normalized state space for the motor as a tuple of dictionaries "low" and "high".
Args:
input_currents: The converters possible output currents.
input_voltages: The converters possible output voltages.
Returns:
tuple(dict,dict): Dictionaries defining if positive and negative values are possible for each motors state.
"""
lower_limit = 0
low = {
'omega': 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i_a': -1 if input_currents.low[0] == -1 else 0,
'i_e': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i_a': 1,
'i_e': 1,
'u': 1,
}
return low, high
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class DcSeriesMotor(DcMotor):
"""
The DcSeriesMotor is a DcMotor with an armature and exciting circuit connected in series to one input voltage.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 2.78 Armature circuit resistance
r_e Ohm 1.0 Exciting circuit resistance
l_a H 6.3e-3 Armature circuit inductance
l_e H 1.6e-3 Exciting circuit inductance
l_e_prime H 0.05 Effective excitation inductance
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
HAS_JACOBIAN = True
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
_default_motor_parameter = {
'r_a': 2.78, 'r_e': 1.0, 'l_a': 6.3e-3, 'l_e': 1.6e-3,
'l_e_prime': 0.05, 'j_rotor': 0.017,
}
_default_nominal_values = dict(omega=80, torque=0.0, i=50, u=420)
_default_limits = dict(omega=100, torque=0.0, i=100, u=420)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['r_a'] - mp['r_e'], -mp['l_e_prime'], 1]
])
self._model_constants[self.I_IDX] = self._model_constants[
self.I_IDX] / (
mp['l_a'] + mp['l_e'])
def torque(self, currents):
# Docstring of superclass
return super().torque([currents[self.I_IDX], currents[self.I_IDX]])
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
return np.matmul(
self._model_constants,
np.array([
state[self.I_IDX],
omega * state[self.I_IDX],
u_in[0]
])
)
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / (r_a + self._motor_parameter['r_e']),
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': 0,
'torque': 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-(mp['r_a'] + mp['r_e'] + mp['l_e_prime'] * omega) / (
mp['l_a'] + mp['l_e'])]]),
np.array([-mp['l_e_prime'] * state[self.I_IDX] / (
mp['l_a'] + mp['l_e'])]),
np.array([2 * mp['l_e_prime'] * state[self.I_IDX]])
)
class DcPermanentlyExcitedMotor(DcMotor):
"""
The DcPermanentlyExcitedMotor is a DcMotor with a Permanent Magnet instead of the excitation circuit.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_a Ohm 25.0 Armature circuit resistance
l_a H 3.438e-2 Armature circuit inductance
psi_e Wb 18 Magnetic Flux of the permanent magnet
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i A Circuit current
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u V Circuit voltage
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i Circuit Current
omega Angular Velocity
torque Motor generated torque
u Circuit Voltage
======== ===========================================================
"""
I_IDX = 0
CURRENTS_IDX = [0]
CURRENTS = ['i']
VOLTAGES = ['u']
HAS_JACOBIAN = True
_default_motor_parameter = {
'r_a': 25.0, 'l_a': 3.438e-2, 'psi_e': 18, 'j_rotor': 0.017
}
_default_nominal_values = dict(omega=22, torque=0.0, i=16, u=400)
_default_limits = dict(omega=50, torque=0.0, i=25, u=400)
_default_initializer = {'states': {'i': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
# placeholder for omega, currents and u_in
_ode_placeholder = np.zeros(2 + len(CURRENTS_IDX), dtype=np.float64)
def torque(self, state):
# Docstring of superclass
return self._motor_parameter['psi_e'] * state[self.I_IDX]
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
[-mp['psi_e'], -mp['r_a'], 1.0]
])
self._model_constants[self.I_IDX] /= mp['l_a']
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def electrical_ode(self, state, u_in, omega, *_):
# Docstring of superclass
self._ode_placeholder[:] = [omega] + np.atleast_1d(
state[self.I_IDX]).tolist() \
+ [u_in[0]]
return np.matmul(self._model_constants, self._ode_placeholder)
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([[-mp['r_a'] / mp['l_a']]]),
np.array([-mp['psi_e'] / mp['l_a']]),
np.array([mp['psi_e']])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limits_agenda = {
'u': self._default_limits['u'],
'i': self._limits['u'] / r_a,
}
super()._update_limits(limits_agenda)
def get_state_space(self, input_currents, input_voltages):
# Docstring of superclass
lower_limit = 0
low = {
'omega': -1 if input_voltages.low[0] == -1 else 0,
'torque': -1 if input_currents.low[0] == -1 else 0,
'i': -1 if input_currents.low[0] == -1 else 0,
'u': -1 if input_voltages.low[0] == -1 else 0,
}
high = {
'omega': 1,
'torque': 1,
'i': 1,
'u': 1,
}
return low, high
class DcExternallyExcitedMotor(DcMotor):
# Equals DC Base Motor
HAS_JACOBIAN = True
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_a'] / mp['l_a'], -mp['l_e_prime'] / mp['l_a'] * omega],
[0, -mp['r_e'] / mp['l_e']]
]),
np.array([-mp['l_e_prime'] * state[self.I_E_IDX] / mp['l_a'], 0]),
np.array([mp['l_e_prime'] * state[self.I_E_IDX],
mp['l_e_prime'] * state[self.I_A_IDX]])
)
def _update_limits(self):
# Docstring of superclass
# R_a might be 0, protect against that
r_a = 1 if self._motor_parameter['r_a'] == 0 else self._motor_parameter['r_a']
limit_agenda = \
{'u_a': self._default_limits['u'],
'u_e': self._default_limits['u'],
'i_a': self._limits.get('i', None) or
self._limits['u'] / r_a,
'i_e': self._limits.get('i', None) or
self._limits['u'] / self.motor_parameter['r_e'],
}
super()._update_limits(limit_agenda)
class ThreePhaseMotor(ElectricMotor):
"""
The ThreePhaseMotor and its subclasses implement the technical system of Three Phase Motors.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
"""
# transformation matrix from abc to alpha-beta representation
_t23 = 2 / 3 * np.array([
[1, -0.5, -0.5],
[0, 0.5 * np.sqrt(3), -0.5 * np.sqrt(3)]
])
# transformation matrix from alpha-beta to abc representation
_t32 = np.array([
[1, 0],
[-0.5, 0.5 * np.sqrt(3)],
[-0.5, -0.5 * np.sqrt(3)]
])
@staticmethod
def t_23(quantities):
"""
Transformation from abc representation to alpha-beta representation
Args:
quantities: The properties in the abc representation like ''[u_a, u_b, u_c]''
Returns:
The converted quantities in the alpha-beta representation like ''[u_alpha, u_beta]''
"""
return np.matmul(ThreePhaseMotor._t23, quantities)
@staticmethod
def t_32(quantities):
"""
Transformation from alpha-beta representation to abc representation
Args:
quantities: The properties in the alpha-beta representation like ``[u_alpha, u_beta]``
Returns:
The converted quantities in the abc representation like ``[u_a, u_b, u_c]``
"""
return np.matmul(ThreePhaseMotor._t32, quantities)
@staticmethod
def q(quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the electrical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
cos = math.cos(epsilon)
sin = math.sin(epsilon)
return cos * quantities[0] - sin * quantities[1], sin * quantities[
0] + cos * quantities[1]
@staticmethod
def q_inv(quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the electrical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current electrical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return SynchronousMotor.q(quantities, -epsilon)
def q_me(self, quantities, epsilon):
"""
Transformation of the dq-representation into alpha-beta using the mechanical angle
Args:
quantities: Array of two quantities in dq-representation. Example [i_d, i_q]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to alpha-beta-representation. Example [u_alpha, u_beta]
"""
return self.q(quantities, epsilon * self._motor_parameter['p'])
def q_inv_me(self, quantities, epsilon):
"""
Transformation of the alpha-beta-representation into dq using the mechanical angle
Args:
quantities: Array of two quantities in alpha-beta-representation. Example [u_alpha, u_beta]
epsilon: Current mechanical angle of the motor
Returns:
Array of the two quantities converted to dq-representation. Example [u_d, u_q]
Note:
The transformation from alpha-beta to dq is just its inverse conversion with negated epsilon.
So this method calls q(quantities, -epsilon).
"""
return self.q_me(quantities, -epsilon)
def _torque_limit(self):
"""
Returns:
Maximal possible torque for the given limits in self._limits
"""
raise NotImplementedError()
def _update_limits(self, limits_d={}, nominal_d={}):
# Docstring of superclass
super()._update_limits(limits_d, nominal_d)
super()._update_limits(dict(torque=self._torque_limit()))
def _update_initial_limits(self, nominal_new={}, **kwargs):
# Docstring of superclass
super()._update_initial_limits(self._nominal_values)
super()._update_initial_limits(nominal_new)
class SynchronousMotor(ThreePhaseMotor):
"""
The SynchronousMotor and its subclasses implement the technical system of a three phase synchronous motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
psi_p Wb 0.0094 Effective excitation flux (PMSM only)
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd A Direct axis voltage
u_sq A Quadrature axis voltage
u_a A Voltage through branch a
u_b A Voltage through branch b
u_c A Voltage through branch c
u_alpha A Voltage in alpha axis
u_beta A Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SD_IDX = 0
I_SQ_IDX = 1
EPSILON_IDX = 2
CURRENTS_IDX = [0, 1]
CURRENTS = ['i_sd', 'i_sq']
VOLTAGES = ['u_sd', 'u_sq']
_model_constants = None
_initializer = None
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, **kwargs):
# Docstring of superclass
nominal_values = nominal_values or {}
limit_values = limit_values or {}
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer)
self._update_model()
self._update_limits()
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def reset(self, state_space,
state_positions,
**__):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + 1)
def torque(self, state):
# Docstring of superclass
raise NotImplementedError
def _update_model(self):
"""
Set motor parameters into a matrix for faster computation
"""
raise NotImplementedError
def electrical_ode(self, state, u_dq, omega, *_):
"""
The differential equation of the Synchronous Motor.
Args:
state: The current state of the motor. [i_sd, i_sq, epsilon]
omega: The mechanical load
u_qd: The input voltages [u_sd, u_sq]
Returns:
The derivatives of the state vector d/dt([i_sd, i_sq, epsilon])
"""
return np.matmul(self._model_constants, np.array([
omega,
state[self.I_SD_IDX],
state[self.I_SQ_IDX],
u_dq[0],
u_dq[1],
omega * state[self.I_SD_IDX],
omega * state[self.I_SQ_IDX],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _update_limits(self):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
# def initialize(self,
# state_space,
# state_positions,
# **__):
# super().initialize(state_space, state_positions)
class SynchronousReluctanceMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
epsilon Electrical rotational angle
torque Motor generated torque
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/AMC.2008.4516099 (K. Malekian, M. R. Sharif, J. Milimonfared)
_default_motor_parameter = {'p': 4,
'l_d': 10.1e-3,
'l_q': 4.1e-3,
'j_rotor': 0.8e-3,
'r_s': 0.57
}
_default_nominal_values = {'i': 10, 'torque': 0, 'omega': 3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_limits = {'i': 13, 'torque': 0, 'omega': 4.3e3 * np.pi / 30, 'epsilon': np.pi, 'u': 100}
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_sd, i_sq, u_sd, u_sq, omega * i_sd, omega * i_sq
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[ 0, 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[mp['p'], 0, 0, 0, 0, 0, 0]
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
return self.torque([self._limits['i_sd'] / np.sqrt(2), self._limits['i_sq'] / np.sqrt(2), 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (
(mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * \
currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *_):
mp = self._motor_parameter
return (
np.array([
[-mp['r_s'] / mp['l_d'], mp['l_q'] / mp['l_d'] * mp['p'] * omega, 0],
[-mp['l_d'] / mp['l_q'] * mp['p'] * omega, -mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX],
mp['p']
]),
np.array([
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX],
0
])
)
class PermanentMagnetSynchronousMotor(SynchronousMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 0.78 Stator resistance
l_d H 1.2 Direct axis inductance
l_q H 6.3e-3 Quadrature axis inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.017 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_a A Current through branch a
i_b A Current through branch b
i_c A Current through branch c
i_alpha A Current in alpha axis
i_beta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_a V Voltage through branch a
u_b V Voltage through branch b
u_c V Voltage through branch c
u_alpha V Voltage in alpha axis
u_beta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_a Current in phase a
i_b Current in phase b
i_c Current in phase c
i_alpha Current in alpha axis
i_beta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
epsilon Electrical rotational angle
u_a Voltage in phase a
u_b Voltage in phase b
u_c Voltage in phase c
u_alpha Voltage in alpha axis
u_beta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/TPEL.2020.3006779 (A. Brosch, S. Hanke, O. Wallscheid, J. Boecker)
#### and DOI: 10.1109/IEMDC.2019.8785122 (S. Hanke, O. Wallscheid, J. Boecker)
_default_motor_parameter = {
'p': 3,
'l_d': 0.37e-3,
'l_q': 1.2e-3,
'j_rotor': 0.3883,
'r_s': 18e-3,
'psi_p': 66e-3,
}
HAS_JACOBIAN = True
_default_limits = dict(omega=12e3 * np.pi / 30, torque=0.0, i=260, epsilon=math.pi, u=300)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=240, epsilon=math.pi, u=300)
_default_initializer = {'states': {'i_sq': 0.0, 'i_sd': 0.0, 'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
IO_VOLTAGES = ['u_a', 'u_b', 'u_c', 'u_sd', 'u_sq']
IO_CURRENTS = ['i_a', 'i_b', 'i_c', 'i_sd', 'i_sq']
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
self._model_constants = np.array([
# omega, i_d, i_q, u_d, u_q, omega * i_d, omega * i_q
[ 0, -mp['r_s'], 0, 1, 0, 0, mp['l_q'] * mp['p']],
[-mp['psi_p'] * mp['p'], 0, -mp['r_s'], 0, 1, -mp['l_d'] * mp['p'], 0],
[ mp['p'], 0, 0, 0, 0, 0, 0],
])
self._model_constants[self.I_SD_IDX] = self._model_constants[self.I_SD_IDX] / mp['l_d']
self._model_constants[self.I_SQ_IDX] = self._model_constants[self.I_SQ_IDX] / mp['l_q']
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
if mp['l_d'] == mp['l_q']:
return self.torque([0, self._limits['i_sq'], 0])
else:
i_n = self.nominal_values['i']
_p = mp['psi_p'] / (2 * (mp['l_d'] - mp['l_q']))
_q = - i_n ** 2 / 2
i_d_opt = - _p / 2 - np.sqrt( (_p / 2) ** 2 - _q)
i_q_opt = np.sqrt(i_n ** 2 - i_d_opt ** 2)
return self.torque([i_d_opt, i_q_opt, 0])
def torque(self, currents):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * currents[self.I_SD_IDX]) * currents[self.I_SQ_IDX]
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
return (
np.array([ # dx'/dx
[-mp['r_s'] / mp['l_d'], mp['l_q']/mp['l_d'] * omega * mp['p'], 0],
[-mp['l_d'] / mp['l_q'] * omega * mp['p'], - mp['r_s'] / mp['l_q'], 0],
[0, 0, 0]
]),
np.array([ # dx'/dw
mp['p'] * mp['l_q'] / mp['l_d'] * state[self.I_SQ_IDX],
- mp['p'] * mp['l_d'] / mp['l_q'] * state[self.I_SD_IDX] - mp['p'] * mp['psi_p'] / mp['l_q'],
mp['p']
]),
np.array([ # dT/dx
1.5 * mp['p'] * (mp['l_d'] - mp['l_q']) * state[self.I_SQ_IDX],
1.5 * mp['p'] * (mp['psi_p'] + (mp['l_d'] - mp['l_q']) * state[self.I_SD_IDX]),
0
])
)
class InductionMotor(ThreePhaseMotor):
"""
The InductionMotor and its subclasses implement the technical system of a three phase induction motor.
This includes the system equations, the motor parameters of the equivalent circuit diagram,
as well as limits and bandwidth.
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Voltage through branch a
u_sb V Voltage through branch b
u_sc V Voltage through branch c
u_salpha V Voltage in alpha axis
u_sbeta V Voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
I_SALPHA_IDX = 0
I_SBETA_IDX = 1
PSI_RALPHA_IDX = 2
PSI_RBETA_IDX = 3
EPSILON_IDX = 4
CURRENTS_IDX = [0, 1]
FLUX_IDX = [2, 3]
CURRENTS = ['i_salpha', 'i_sbeta']
FLUXES = ['psi_ralpha', 'psi_rbeta']
STATOR_VOLTAGES = ['u_salpha', 'u_sbeta']
IO_VOLTAGES = ['u_sa', 'u_sb', 'u_sc', 'u_salpha', 'u_sbeta', 'u_sd',
'u_sq']
IO_CURRENTS = ['i_sa', 'i_sb', 'i_sc', 'i_salpha', 'i_sbeta', 'i_sd',
'i_sq']
HAS_JACOBIAN = True
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (O. Wallscheid, M. Schenke, J. Boecker)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_model_constants = None
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
_initializer = None
@property
def motor_parameter(self):
# Docstring of superclass
return self._motor_parameter
@property
def initializer(self):
# Docstring of superclass
return self._initializer
def __init__(self, motor_parameter=None, nominal_values=None,
limit_values=None, motor_initializer=None, initial_limits=None,
**__):
# Docstring of superclass
# convert placeholder i and u to actual IO quantities
_nominal_values = self._default_nominal_values.copy()
_nominal_values.update({u: _nominal_values['u'] for u in self.IO_VOLTAGES})
_nominal_values.update({i: _nominal_values['i'] for i in self.IO_CURRENTS})
del _nominal_values['u'], _nominal_values['i']
_nominal_values.update(nominal_values or {})
# same for limits
_limit_values = self._default_limits.copy()
_limit_values.update({u: _limit_values['u'] for u in self.IO_VOLTAGES})
_limit_values.update({i: _limit_values['i'] for i in self.IO_CURRENTS})
del _limit_values['u'], _limit_values['i']
_limit_values.update(limit_values or {})
super().__init__(motor_parameter, nominal_values,
limit_values, motor_initializer, initial_limits)
self._update_model()
self._update_limits(_limit_values, _nominal_values)
def reset(self,
state_space,
state_positions,
omega=None):
# Docstring of superclass
if self._initializer and self._initializer['states']:
self._update_initial_limits(omega=omega)
self.initialize(state_space, state_positions)
return np.asarray(list(self._initial_states.values()))
else:
return np.zeros(len(self.CURRENTS) + len(self.FLUXES) + 1)
def electrical_ode(self, state, u_sr_alphabeta, omega, *args):
"""
The differential equation of the Induction Motor.
Args:
state: The momentary state of the motor. [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon]
omega: The mechanical load
u_sr_alphabeta: The input voltages [u_salpha, u_sbeta, u_ralpha, u_rbeta]
Returns:
The derivatives of the state vector d/dt( [i_salpha, i_sbeta, psi_ralpha, psi_rbeta, epsilon])
"""
return np.matmul(self._model_constants, np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
omega,
state[self.I_SALPHA_IDX],
state[self.I_SBETA_IDX],
state[self.PSI_RALPHA_IDX],
state[self.PSI_RBETA_IDX],
omega * state[self.PSI_RALPHA_IDX],
omega * state[self.PSI_RBETA_IDX],
u_sr_alphabeta[0, 0],
u_sr_alphabeta[0, 1],
u_sr_alphabeta[1, 0],
u_sr_alphabeta[1, 1],
]))
def i_in(self, state):
# Docstring of superclass
return state[self.CURRENTS_IDX]
def _torque_limit(self):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m'] ** 2/(mp['l_m']+mp['l_sigr']) * self._limits['i_sd'] * self._limits['i_sq'] / 2
def torque(self, states):
# Docstring of superclass
mp = self._motor_parameter
return 1.5 * mp['p'] * mp['l_m']/(mp['l_m'] + mp['l_sigr']) * (states[self.PSI_RALPHA_IDX] * states[self.I_SBETA_IDX] - states[self.PSI_RBETA_IDX] * states[self.I_SALPHA_IDX])
def _flux_limit(self, omega=0, eps_mag=0, u_q_max=0.0, u_rq_max=0.0):
"""
Calculate Flux limits for given current and magnetic-field angle
Args:
omega(float): speed given by mechanical load
eps_mag(float): magnetic field angle
u_q_max(float): maximal strator voltage in q-system
u_rq_max(float): maximal rotor voltage in q-system
returns:
maximal flux values(list) in alpha-beta-system
"""
mp = self.motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
l_mr = mp['l_m'] / l_r
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
# limiting flux for a low omega
if omega == 0:
psi_d_max = mp['l_m'] * self._nominal_values['i_sd']
else:
i_d, i_q = self.q_inv([self._initial_states['i_salpha'],
self._initial_states['i_sbeta']],
eps_mag)
psi_d_max = mp['p'] * omega * sigma * l_s * i_d + \
(mp['r_s'] + mp['r_r'] * l_mr**2) * i_q + \
u_q_max + \
l_mr * u_rq_max
psi_d_max /= - mp['p'] * omega * l_mr
# clipping flux and setting nominal limit
psi_d_max = 0.9 * np.clip(psi_d_max, a_min=0, a_max=np.abs(mp['l_m'] * i_d))
# returning flux in alpha, beta system
return self.q([psi_d_max, 0], eps_mag)
def _update_model(self):
# Docstring of superclass
mp = self._motor_parameter
l_s = mp['l_m']+mp['l_sigs']
l_r = mp['l_m']+mp['l_sigr']
sigma = (l_s*l_r-mp['l_m']**2) /(l_s*l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
self._model_constants = np.array([
# omega, i_alpha, i_beta, psi_ralpha, psi_rbeta, omega * psi_ralpha, omega * psi_rbeta, u_salpha, u_sbeta, u_ralpha, u_rbeta,
[0, -1 / tau_sig, 0,mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0, 0,
+mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 1 / (sigma * l_s), 0,
-mp['l_m'] / (sigma * l_r * l_s), 0, ], # i_ralpha_dot
[0, 0, -1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
-mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0, 0,
1 / (sigma * l_s), 0, -mp['l_m'] / (sigma * l_r * l_s), ],
# i_rbeta_dot
[0, mp['l_m'] / tau_r, 0, -1 / tau_r, 0, 0, -mp['p'], 0, 0, 1,
0, ], # psi_ralpha_dot
[0, 0, mp['l_m'] / tau_r, 0, -1 / tau_r, mp['p'], 0, 0, 0, 0, 1, ],
# psi_rbeta_dot
[mp['p'], 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ], # epsilon_dot
])
def electrical_jacobian(self, state, u_in, omega, *args):
mp = self._motor_parameter
l_s = mp['l_m'] + mp['l_sigs']
l_r = mp['l_m'] + mp['l_sigr']
sigma = (l_s * l_r - mp['l_m'] ** 2) / (l_s * l_r)
tau_r = l_r / mp['r_r']
tau_sig = sigma * l_s / (
mp['r_s'] + mp['r_r'] * (mp['l_m'] ** 2) / (l_r ** 2))
return (
np.array([ # dx'/dx
# i_alpha i_beta psi_alpha psi_beta epsilon
[-1 / tau_sig, 0,
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2),
omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s), 0],
[0, - 1 / tau_sig,
- omega * mp['l_m'] * mp['p'] / (sigma * l_r * l_s),
mp['l_m'] * mp['r_r'] / (sigma * l_s * l_r ** 2), 0],
[mp['l_m'] / tau_r, 0, - 1 / tau_r, - omega * mp['p'], 0],
[0, mp['l_m'] / tau_r, omega * mp['p'], - 1 / tau_r, 0],
[0, 0, 0, 0, 0]
]),
np.array([ # dx'/dw
mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RBETA_IDX],
- mp['l_m'] * mp['p'] / (sigma * l_r * l_s) * state[
self.PSI_RALPHA_IDX],
- mp['p'] * state[self.PSI_RBETA_IDX],
mp['p'] * state[self.PSI_RALPHA_IDX],
mp['p']
]),
np.array([ # dT/dx
- state[self.PSI_RBETA_IDX] * 3 / 2 * mp['p'] * mp[
'l_m'] / l_r,
state[self.PSI_RALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
state[self.I_SBETA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
- state[self.I_SALPHA_IDX] * 3 / 2 * mp['p'] * mp['l_m'] / l_r,
0
])
)
class SquirrelCageInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 2.9338 Stator resistance
r_r Ohm 1.355 Rotor resistance
l_m H 143.75e-3 Main inductance
l_sigs H 5.87e-3 Stator-side stray inductance
l_sigr H 5.87e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 0.0011 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Stator current through branch a
i_sb A Stator current through branch b
i_sc A Stator current through branch c
i_salpha A Stator current in alpha direction
i_sbeta A Stator current in beta direction
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
#### Parameters taken from DOI: 10.1109/EPEPEMC.2018.8522008 (O. Wallscheid, M. Schenke, J. Boecker)
_default_motor_parameter = {
'p': 2,
'l_m': 143.75e-3,
'l_sigs': 5.87e-3,
'l_sigr': 5.87e-3,
'j_rotor': 1.1e-3,
'r_s': 2.9338,
'r_r': 1.355,
}
_default_limits = dict(omega=4e3 * np.pi / 30, torque=0.0, i=5.5, epsilon=math.pi, u=560)
_default_nominal_values = dict(omega=3e3 * np.pi / 30, torque=0.0, i=3.9, epsilon=math.pi, u=560)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def electrical_ode(self, state, u_salphabeta, omega, *args):
"""
The differential equation of the SCIM.
Sets u_ralpha = u_rbeta = 0 before calling the respective super function.
"""
u_ralphabeta = np.zeros_like(u_salphabeta)
u_sr_aphabeta = np.array([u_salphabeta, u_ralphabeta])
return super().electrical_ode(state, u_sr_aphabeta, omega, *args)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES, self.IO_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_s']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / self._motor_parameter['r_s']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'])
# using absolute value, because limits should describe upper limit
# after abs-operator, norm of alphabeta flux still equal to
# d-component of flux
flux_alphabeta_limits = np.abs(flux_alphabeta_limits)
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
flux_nominal_limits.update(nominal_new)
super()._update_initial_limits(flux_nominal_limits)
class DoublyFedInductionMotor(InductionMotor):
"""
===================== ========== ============= ===========================================
Motor Parameter Unit Default Value Description
===================== ========== ============= ===========================================
r_s Ohm 12e-3 Stator resistance
r_r Ohm 21e-3 Rotor resistance
l_m H 13.5e-3 Main inductance
l_sigs H 0.2e-3 Stator-side stray inductance
l_sigr H 0.1e-3 Rotor-side stray inductance
p 1 2 Pole pair number
j_rotor kg/m^2 1e3 Moment of inertia of the rotor
===================== ========== ============= ===========================================
=============== ====== =============================================
Motor Currents Unit Description
=============== ====== =============================================
i_sd A Direct axis current
i_sq A Quadrature axis current
i_sa A Current through branch a
i_sb A Current through branch b
i_sc A Current through branch c
i_salpha A Current in alpha axis
i_sbeta A Current in beta axis
=============== ====== =============================================
=============== ====== =============================================
Rotor flux Unit Description
=============== ====== =============================================
psi_rd Vs Direct axis of the rotor oriented flux
psi_rq Vs Quadrature axis of the rotor oriented flux
psi_ra Vs Rotor oriented flux in branch a
psi_rb Vs Rotor oriented flux in branch b
psi_rc Vs Rotor oriented flux in branch c
psi_ralpha Vs Rotor oriented flux in alpha direction
psi_rbeta Vs Rotor oriented flux in beta direction
=============== ====== =============================================
=============== ====== =============================================
Motor Voltages Unit Description
=============== ====== =============================================
u_sd V Direct axis voltage
u_sq V Quadrature axis voltage
u_sa V Stator voltage through branch a
u_sb V Stator voltage through branch b
u_sc V Stator voltage through branch c
u_salpha V Stator voltage in alpha axis
u_sbeta V Stator voltage in beta axis
u_ralpha V Rotor voltage in alpha axis
u_rbeta V Rotor voltage in beta axis
=============== ====== =============================================
======== ===========================================================
Limits / Nominal Value Dictionary Entries:
-------- -----------------------------------------------------------
Entry Description
======== ===========================================================
i General current limit / nominal value
i_sa Current in phase a
i_sb Current in phase b
i_sc Current in phase c
i_salpha Current in alpha axis
i_sbeta Current in beta axis
i_sd Current in direct axis
i_sq Current in quadrature axis
omega Mechanical angular Velocity
torque Motor generated torque
u_sa Voltage in phase a
u_sb Voltage in phase b
u_sc Voltage in phase c
u_salpha Voltage in alpha axis
u_sbeta Voltage in beta axis
u_sd Voltage in direct axis
u_sq Voltage in quadrature axis
u_ralpha Rotor voltage in alpha axis
u_rbeta Rotor voltage in beta axis
======== ===========================================================
Note:
The voltage limits should be the amplitude of the phase voltage (:math:`\hat{u}_S`).
Typically the rms value for the line voltage (:math:`U_L`) is given.
:math:`\hat{u}_S=\sqrt{2/3}~U_L`
The current limits should be the amplitude of the phase current (:math:`\hat{i}_S`).
Typically the rms value for the phase current (:math:`I_S`) is given.
:math:`\hat{i}_S = \sqrt{2}~I_S`
If not specified, nominal values are equal to their corresponding limit values.
Furthermore, if specific limits/nominal values (e.g. i_a) are not specified they are inferred from
the general limits/nominal values (e.g. i)
"""
ROTOR_VOLTAGES = ['u_ralpha', 'u_rbeta']
ROTOR_CURRENTS = ['i_ralpha', 'i_rbeta']
IO_ROTOR_VOLTAGES = ['u_ra', 'u_rb', 'u_rc', 'u_rd', 'u_rq']
IO_ROTOR_CURRENTS = ['i_ra', 'i_rb', 'i_rc', 'i_rd', 'i_rq']
#### Parameters taken from DOI: 10.1016/j.jestch.2016.01.015 (N. Kumar, T. R. Chelliah, S. P. Srivastava)
_default_motor_parameter = {
'p': 2,
'l_m': 297.5e-3,
'l_sigs': 25.71e-3,
'l_sigr': 25.71e-3,
'j_rotor': 13.695e-3,
'r_s': 4.42,
'r_r': 3.51,
}
_default_limits = dict(omega=1800 * np.pi / 30, torque=0.0, i=9, epsilon=math.pi, u=720)
_default_nominal_values = dict(omega=1650 * np.pi / 30, torque=0.0, i=7.5, epsilon=math.pi, u=720)
_default_initializer = {'states': {'i_salpha': 0.0, 'i_sbeta': 0.0,
'psi_ralpha': 0.0, 'psi_rbeta': 0.0,
'epsilon': 0.0},
'interval': None,
'random_init': None,
'random_params': (None, None)}
def __init__(self, **kwargs):
self.IO_VOLTAGES += self.IO_ROTOR_VOLTAGES
self.IO_CURRENTS += self.IO_ROTOR_CURRENTS
super().__init__(**kwargs)
def _update_limits(self, limit_values={}, nominal_values={}):
# Docstring of superclass
voltage_limit = 0.5 * self._limits['u']
voltage_nominal = 0.5 * self._nominal_values['u']
limits_agenda = {}
nominal_agenda = {}
for u, i in zip(self.IO_VOLTAGES+self.ROTOR_VOLTAGES,
self.IO_CURRENTS+self.ROTOR_CURRENTS):
limits_agenda[u] = voltage_limit
nominal_agenda[u] = voltage_nominal
limits_agenda[i] = self._limits.get('i', None) or \
self._limits[u] / self._motor_parameter['r_r']
nominal_agenda[i] = self._nominal_values.get('i', None) or \
self._nominal_values[u] / \
self._motor_parameter['r_r']
super()._update_limits(limits_agenda, nominal_agenda)
def _update_initial_limits(self, nominal_new={}, omega=None):
# Docstring of superclass
# draw a sample magnetic field angle from [-pi,pi]
eps_mag = 2 * np.pi * np.random.random_sample() - np.pi
flux_alphabeta_limits = self._flux_limit(omega=omega,
eps_mag=eps_mag,
u_q_max=self._nominal_values['u_sq'],
u_rq_max=self._nominal_values['u_rq'])
flux_nominal_limits = {state: value for state, value in
zip(self.FLUXES, flux_alphabeta_limits)}
super()._update_initial_limits(flux_nominal_limits) | 45.809387 | 285 | 0.464098 | 10,316 | 95,650 | 4.090054 | 0.056417 | 0.008746 | 0.020477 | 0.009006 | 0.735572 | 0.700164 | 0.668831 | 0.629654 | 0.607399 | 0.592065 | 0 | 0.018678 | 0.355191 | 95,650 | 2,088 | 286 | 45.809387 | 0.665429 | 0.464778 | 0 | 0.530973 | 0 | 0 | 0.054972 | 0 | 0 | 0 | 0 | 0.000479 | 0.001106 | 1 | 0.09292 | false | 0.001106 | 0.003319 | 0.018805 | 0.280973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.