hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f496759263766af254701d84895da1044ba21307
| 84
|
py
|
Python
|
CodeWars/7 Kyu/All Star Code Challenge #3.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/All Star Code Challenge #3.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/7 Kyu/All Star Code Challenge #3.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def remove_vowels(strng):
return ''.join([i for i in strng if i not in 'aeiou'])
| 42
| 58
| 0.666667
| 16
| 84
| 3.4375
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 84
| 2
| 58
| 42
| 0.808824
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
f4a14a4a752f132c9fb734fdfb1c3ca53b30da47
| 241
|
py
|
Python
|
zabbix_exporter/compat.py
|
fit2cloudrd/zabbix-exporter
|
a4ca165fd87db9ae77049902d786d9888fde36bd
|
[
"BSD-2-Clause"
] | 56
|
2017-03-13T09:50:35.000Z
|
2022-03-06T08:44:17.000Z
|
zabbix_exporter/compat.py
|
fit2cloudrd/zabbix-exporter
|
a4ca165fd87db9ae77049902d786d9888fde36bd
|
[
"BSD-2-Clause"
] | 7
|
2017-02-25T16:23:14.000Z
|
2019-06-18T14:14:45.000Z
|
zabbix_exporter/compat.py
|
fit2cloudrd/zabbix-exporter
|
a4ca165fd87db9ae77049902d786d9888fde36bd
|
[
"BSD-2-Clause"
] | 12
|
2017-08-29T08:31:42.000Z
|
2021-05-18T21:41:18.000Z
|
# flake8: noqa
try:
from http.server import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
try:
import io as StringIO
except ImportError:
import StringIO
| 21.909091
| 65
| 0.788382
| 25
| 241
| 7.6
| 0.6
| 0.168421
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005025
| 0.174274
| 241
| 10
| 66
| 24.1
| 0.949749
| 0.049793
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f4b559fa5e8f59257aec2ba429cfe7d17192df70
| 10,414
|
py
|
Python
|
facepp-python-sdk-master/test.py
|
voidhug/Melbourne
|
b25b594b677101edda16d12084ad07972eb29593
|
[
"Apache-2.0"
] | null | null | null |
facepp-python-sdk-master/test.py
|
voidhug/Melbourne
|
b25b594b677101edda16d12084ad07972eb29593
|
[
"Apache-2.0"
] | null | null | null |
facepp-python-sdk-master/test.py
|
voidhug/Melbourne
|
b25b594b677101edda16d12084ad07972eb29593
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
API_KEY = '07fa940f62796288e7cb675c92829ad0'
API_SECRET = 'ELWIYZZeYf-zPAloQt_1OtqF7CtlQX3W'
from pprint import pformat
import json
import Emotion
def print_result(hint, result):
def encode(obj):
if type(obj) is unicode:
return obj.encode('utf-8')
if type(obj) is dict:
return {encode(k): encode(v) for (k, v) in obj.iteritems()}
if type(obj) is list:
return [encode(i) for i in obj]
return obj
print hint
result = encode(result)
print '\n'.join([' ' + i for i in pformat(result, width=75).split('\n')])
def writejson2file(obj, filename):
with open(filename, 'w') as outfile:
data = json.dumps(obj, indent=4, sort_keys=True)
outfile.write(data)
from facepp import API
api = API(API_KEY, API_SECRET)
# IMAGE_DIR = 'http://cn.faceplusplus.com/static/resources/python_demo/'
# url = IMAGE_DIR + '4.jpg'
# url = "http://i.niupic.com/images/2016/08/20/6jJO2B.jpg"
# url = "http://i.niupic.com/images/2016/08/20/bkJFXA.jpg"
#
# face = api.detection.detect(url = url)
# face_height = face['face'][0]['position']['height'] * 0.01 * face['img_height']
# face_width = face['face'][0]['position']['width'] * 0.01 * face['img_width']
# face_id = face['face'][0]['face_id']
# points = api.detection.landmark(face_id = face_id)['result'][0]['landmark']
# # points['face_height'] = face_height
# # points['face_width'] = face_width
# # writejson2file(points, './test.json')
#
# points_list = list(points)
# points_list.sort()
#
# valid_points = {12: 0, 10: 9, 11: 18, 8: 7, 9: 16, 6: 6, 7: 15,
# 44: 41, 43: 38, 42: 44, 46: 37, 45: 49, 4: 40, 48: 46, 47: 52, 5: 43, 39: 48, 40: 54, 41: 51,
# 3: 58,
# 13: 29, 14: 34, 15: 35, 16: 36, 17: 33,
# 22: 79, 21: 82, 20: 81, 19: 80, 18: 75,
# 1: 20, 23: 21, 24: 27, 25: 26, 26: 28, 27: 25, 30: 22, 29: 19, 28: 23,
# 2: 70, 31: 67, 32: 73, 33: 72, 34: 74, 35: 71, 38: 68, 37: 65, 36: 69}
#
# out_data = []
# for i in range(1, 49):
# in_data = []
# in_data.append(points[points_list[valid_points[i]]]['x'])
# in_data.append(points[points_list[valid_points[i]]]['y'])
# out_data.append(in_data)
# print_result("", out_data)
# # 微笑分数
# smile_grade = face['face'][0]['attribute']['smiling']
# print smile_grade
# 表情相似度比对
def url2PointsList(url):
face = api.detection.detect(url = url)
face_id = face['face'][0]['face_id']
points = api.detection.landmark(face_id = face_id)['result'][0]['landmark']
points_list = list(points)
points_list.sort()
valid_points = {12: 0, 10: 9, 11: 18, 8: 7, 9: 16, 6: 6, 7: 15,
44: 41, 43: 38, 42: 44, 46: 37, 45: 49, 4: 40, 48: 46, 47: 52, 5: 43, 39: 48, 40: 54, 41: 51,
3: 58,
13: 29, 14: 34, 15: 35, 16: 36, 17: 33,
22: 79, 21: 82, 20: 81, 19: 80, 18: 75,
1: 20, 23: 21, 24: 27, 25: 26, 26: 28, 27: 25, 30: 22, 29: 19, 28: 23,
2: 70, 31: 67, 32: 73, 33: 72, 34: 74, 35: 71, 38: 68, 37: 65, 36: 69}
out_data = []
for i in range(1, 49):
in_data = []
in_data.append(points[points_list[valid_points[i]]]['x'])
in_data.append(points[points_list[valid_points[i]]]['y'])
out_data.append(in_data)
return out_data
# <a href="http://www.niupic.com/photo/522177.html"><img src="http://i.niupic.com/images/2016/08/21/7dmFhq.jpg"></a>
# http://i.niupic.com/images/2016/08/21/cBMwu1.jpg 张旭 斜眼
# http://i.niupic.com/images/2016/08/21/43FQ37.jpg 朱天成 平静
# http://i.niupic.com/images/2016/08/21/SadQ7N.jpg 张旭 斜眼
# http://i.niupic.com/images/2016/08/21/5xTdVU.jpg 朱天成 斜眼
# http://i.niupic.com/images/2016/08/21/27Q0v8.jpg 张旭 平静
# http://i.niupic.com/images/2016/08/21/J7EdTs.jpg 张旭 平静
# http://i.niupic.com/images/2016/08/21/ZeeV6O.jpg 张旭 夸张
# http://i.niupic.com/images/2016/08/21/ZK5qmK.jpg 朱天成 平静
# http://i.niupic.com/images/2016/08/21/nK7CC8.jpg 朱天成 夸张
# http://i.niupic.com/images/2016/08/21/f9o2kQ.jpg 朱天成 夸张
# http://i.niupic.com/images/2016/08/21/HZ3hBC.jpg 张旭 夸张
# http://i.niupic.com/images/2016/08/21/pFXFnu.jpg 朱天成 斜眼
# http://i.niupic.com/images/2016/08/21/GkDdND.jpg 张旭 斜眼
# http://i.niupic.com/images/2016/08/21/S6PMwX.jpg 朱天成 平静
# http://i.niupic.com/images/2016/08/21/ooNMn0.jpg 张旭 斜眼
# http://i.niupic.com/images/2016/08/21/cmtpC5.jpg 朱天成 斜眼
# http://i.niupic.com/images/2016/08/21/blOvqq.jpg 张旭 平静
# http://i.niupic.com/images/2016/08/21/Wbr3Jm.jpg 张旭 平静
# http://i.niupic.com/images/2016/08/21/cW4BUK.jpg 张旭 夸张
# http://i.niupic.com/images/2016/08/21/Jin9A3.jpg 朱天成 平静
# http://i.niupic.com/images/2016/08/21/Tabpsi.jpg 朱天成 夸张
# http://i.niupic.com/images/2016/08/21/Vqv9wZ.jpg 朱天成 夸张
# http://i.niupic.com/images/2016/08/21/EEMtRK.jpg 张旭 夸张
# http://i.niupic.com/images/2016/08/21/43Am9v.jpg 朱天成 斜眼
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/7dmFhq.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/eYSG0M.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/IRY0dk.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/J3kSoD.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/7dmFhq.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/IRY0dk.jpg"))
# print v
# print like
# ## 朱天成 平静 VS 朱天成 平静 0.0 32389.0000461 不可信
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/S6PMwX.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/Jin9A3.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/S6PMwX.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/Jin9A3.jpg"))
# print v
# print like
# ## 张旭 平静 VS 张旭 平静 0.0 6023.00004062
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/blOvqq.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/Wbr3Jm.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/Wbr3Jm.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/blOvqq.jpg"))
# print v
# print like
# ## 张旭 平静 VS 张旭 夸张 0.0 14164.026182
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/blOvqq.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/blOvqq.jpg"))
# print v
# print like
# ## 张旭 夸张 VS 张旭 夸张 0.0 1446.0000141
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/Vqv9wZ.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/Vqv9wZ.jpg"))
# print v
# print like
# ## 张旭 夸张 VS 张旭 夸张 0.0 1446.0000141
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/Vqv9wZ.jpg"))
# print_result("", url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"))
# like,v= Emotion.proc_diff(url2PointsList("http://i.niupic.com/images/2016/08/21/cW4BUK.jpg"), url2PointsList("http://i.niupic.com/images/2016/08/21/Vqv9wZ.jpg"))
# print v
# print like
# # 朱天成 咪咪笑
# str1 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/209741014C27357CAB59BADB0F041AD0.jpg'
#
# # 朱天成 咪咪笑
# str2 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/BDC15BE7FAD12DCB7A2269CF88418A92.jpg'
#
# # 朱天成 平静
# str3 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/BA4E9F2ADDE02575205BEB2CA0E16636.jpg'
#
# # 朱天成 平静
# str4 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/FDDFBA9106F632C11CE26B21B6DB0BD7.jpg'
# ## 朱天成 咪咪笑 VS 朱天成 咪咪笑 99.9999815278 3.69444957864e-07 忽略
# print_result("", url2PointsList(str1))
# print_result("", url2PointsList(str2))
# like,v= Emotion.proc_diff(url2PointsList(str1), url2PointsList(str2))
# print v
# print like
# ## 朱天成 平静 VS 朱天成 平静 0.0 4207.0000025
# print_result("", url2PointsList(str3))
# print_result("", url2PointsList(str4))
# like,v= Emotion.proc_diff(url2PointsList(str3), url2PointsList(str4))
# print v
# print like
# ## 朱天成 平静 VS 朱天成 咪咪笑 0.0 5000.00238902
# print_result("", url2PointsList(str3))
# print_result("", url2PointsList(str1))
# like,v= Emotion.proc_diff(url2PointsList(str3), url2PointsList(str1))
# print v
# print like
# # 张瑞鹏 平静
# str1 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/591F2050DBED19D3254AEBD9FAF4C4A3.jpg'
#
# # 张瑞鹏 疑惑
# str2 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/C15BA81563C3C88D80A68ACB2949E4C2.jpg'
#
# # 张瑞鹏 疑惑
# str3 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/C5731E69F94016018B34CC9B9CF9D340.jpg'
#
# # 张瑞鹏 平静
# str4 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/EDB5FFB1DE380F5301C7113B6124B4C5.jpg'
# ## 张瑞鹏 平静 VS 张瑞鹏 平静 99.9992729096 1.45418072167e-05 疑惑
# a = str1
# b = str4
# print_result("", url2PointsList(a))
# print_result("", url2PointsList(b))
# like,v= Emotion.proc_diff(url2PointsList(a), url2PointsList(b))
# print v
# print like
# ## 张瑞鹏 平静 VS 张瑞鹏 疑惑 99.9992729096 1.45418072167e-05 0.0 2477.0015691
# a = str1
# b = str2
# print_result("", url2PointsList(a))
# print_result("", url2PointsList(b))
# like,v= Emotion.proc_diff(url2PointsList(a), url2PointsList(b))
# print v
# print like
# 张旭 模仿傅 一
# str1 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/31794369EB89FF31251D73B0DC9EA3CF.jpg'
str1 = 'http://i.niupic.com/images/2016/08/21/blOvqq.jpg'
# 张旭 模仿傅 二
str2 = "https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/97E12037F139859311C04AC1949C713A.jpg"
# 朱天成 模仿傅
str3 = 'https://coding.net/u/zhu_tian_cheng/p/SybilPhotos/git/raw/master/B5CB4D2C94B329E15EDFCA6DDC4133F2.jpg'
# 傅园慧
str4 = 'http://i4.cqnews.net/4G/attachement/png/site82/20160809/780cb8d50dd6191359bc0f.png'
## 张瑞鹏 模仿 傅园慧 疑惑 99.9992729096 1.45418072167e-05 0.0 2477.0015691
a = str1
b = str4
print_result("", url2PointsList(a))
print_result("", url2PointsList(b))
like,v= Emotion.proc_diff(url2PointsList(a), url2PointsList(b))
print v
print like
| 42.161943
| 163
| 0.676205
| 1,684
| 10,414
| 4.112233
| 0.162114
| 0.07148
| 0.085776
| 0.10917
| 0.740361
| 0.735596
| 0.726643
| 0.702383
| 0.679711
| 0.675523
| 0
| 0.161036
| 0.135971
| 10,414
| 247
| 164
| 42.161943
| 0.60858
| 0.736797
| 0
| 0
| 0
| 0.055556
| 0.170789
| 0.025128
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.074074
| null | null | 0.148148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f4bbf6207e0888b28f2833a82f25f206daa67b4f
| 97
|
py
|
Python
|
python/leetcode/068_WIP_text_justification.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 1
|
2021-10-04T13:26:32.000Z
|
2021-10-04T13:26:32.000Z
|
python/leetcode/068_WIP_text_justification.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 3
|
2020-03-24T19:34:42.000Z
|
2022-01-21T20:15:39.000Z
|
python/leetcode/068_WIP_text_justification.py
|
yxun/Notebook
|
680ae89a32d3f7d4fdcd541e66cea97e29efbd26
|
[
"Apache-2.0"
] | 1
|
2021-04-01T20:56:50.000Z
|
2021-04-01T20:56:50.000Z
|
#%%
"""
- Text Justification
- https://leetcode.com/problems/text-justification/
- Hard
"""
#%%
| 10.777778
| 51
| 0.639175
| 9
| 97
| 6.888889
| 0.777778
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123711
| 97
| 8
| 52
| 12.125
| 0.729412
| 0.865979
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f4be4a2e424d9f1b9ec1d6816790ca553b1b24e7
| 14,195
|
py
|
Python
|
test/test_http_south.py
|
foglamp/foglamp-south-http
|
c1ed3740d168897df46b8648b10f8f1e7a2715ae
|
[
"Apache-2.0"
] | 1
|
2019-10-22T18:32:54.000Z
|
2019-10-22T18:32:54.000Z
|
test/test_http_south.py
|
foglamp/foglamp-south-http
|
c1ed3740d168897df46b8648b10f8f1e7a2715ae
|
[
"Apache-2.0"
] | 6
|
2018-08-02T19:14:16.000Z
|
2019-08-16T09:42:38.000Z
|
test/test_http_south.py
|
foglamp/foglamp-south-http
|
c1ed3740d168897df46b8648b10f8f1e7a2715ae
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""Unit test for foglamp.plugins.south.http_south.http_south"""
import copy
import json
from unittest import mock
from unittest.mock import call, patch
import pytest
import aiohttp.web_exceptions
from aiohttp.test_utils import make_mocked_request
from aiohttp.streams import StreamReader
from multidict import CIMultiDict
from python.foglamp.plugins.south.http_south import http_south
from python.foglamp.plugins.south.http_south.http_south import HttpSouthIngest, async_ingest, c_callback, c_ingest_ref, _DEFAULT_CONFIG as config
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 Dianomic Systems"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_CONFIG_CATEGORY_NAME = 'HTTP_SOUTH'
_CONFIG_CATEGORY_DESCRIPTION = 'South Plugin HTTP Listener'
_NEW_CONFIG = {
'plugin': {
'description': 'South Plugin HTTP Listener',
'type': 'string',
'default': 'http_south'
},
'port': {
'description': 'Port to listen on',
'type': 'integer',
'default': '1234',
},
'host': {
'description': 'Address to accept data on',
'type': 'string',
'default': 'localhost',
},
'uri': {
'description': 'URI to accept data on',
'type': 'string',
'default': 'sensor-reading',
}
}
def test_plugin_contract():
# Evaluates if the plugin has all the required methods
assert callable(getattr(http_south, 'plugin_info'))
assert callable(getattr(http_south, 'plugin_init'))
assert callable(getattr(http_south, 'plugin_start'))
assert callable(getattr(http_south, 'plugin_shutdown'))
assert callable(getattr(http_south, 'plugin_reconfigure'))
def mock_request(data, loop):
payload = StreamReader(loop=loop)
payload.feed_data(data.encode())
payload.feed_eof()
protocol = mock.Mock()
app = mock.Mock()
headers = CIMultiDict([('CONTENT-TYPE', 'application/json')])
req = make_mocked_request('POST', '/sensor-reading', headers=headers,
protocol=protocol, payload=payload, app=app)
return req
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_info():
assert http_south.plugin_info() == {
'name': 'HTTP South Listener',
'version': '1.5.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': config
}
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_init():
assert http_south.plugin_init(config) == config
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_start(mocker, unused_port):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
config_data['port']['value'] = config_data['port']['default']
config_data['host']['value'] = config_data['host']['default']
config_data['uri']['value'] = config_data['uri']['default']
config_data['enableHttp']['value'] = config_data['enableHttp']['default']
# WHEN
http_south.plugin_start(config_data)
# THEN
assert isinstance(config_data['app'], aiohttp.web.Application)
assert isinstance(config_data['handler'], aiohttp.web_server.Server)
# assert isinstance(config_data['server'], asyncio.base_events.Server)
http_south.loop.stop()
http_south.t._delete()
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_start_exception(unused_port, mocker):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
log_exception = mocker.patch.object(http_south._LOGGER, "exception")
# WHEN
http_south.plugin_start(config_data)
# THEN
assert 1 == log_exception.call_count
log_exception.assert_called_with("'value'")
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_reconfigure(mocker, unused_port):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
config_data['port']['value'] = config_data['port']['default']
config_data['host']['value'] = config_data['host']['default']
config_data['uri']['value'] = config_data['uri']['default']
config_data['enableHttp']['value'] = config_data['enableHttp']['default']
pstop = mocker.patch.object(http_south, '_plugin_stop', return_value=True)
log_info = mocker.patch.object(http_south._LOGGER, "info")
# WHEN
new_config = http_south.plugin_reconfigure(config_data, _NEW_CONFIG)
# THEN
assert _NEW_CONFIG == new_config
assert 3 == log_info.call_count
assert 1 == pstop.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin__stop(mocker, unused_port, loop):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
config_data['port']['value'] = config_data['port']['default']
config_data['host']['value'] = config_data['host']['default']
config_data['uri']['value'] = config_data['uri']['default']
config_data['enableHttp']['value'] = config_data['enableHttp']['default']
log_exception = mocker.patch.object(http_south._LOGGER, "exception")
log_info = mocker.patch.object(http_south._LOGGER, "info")
# WHEN
http_south.plugin_start(config_data)
http_south._plugin_stop(config_data)
# THEN
assert 2 == log_info.call_count
calls = [call('Stopping South HTTP plugin.')]
log_info.assert_has_calls(calls, any_order=True)
assert 0 == log_exception.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
def test_plugin_shutdown(mocker, unused_port):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
config_data['port']['value'] = config_data['port']['default']
config_data['host']['value'] = config_data['host']['default']
config_data['uri']['value'] = config_data['uri']['default']
config_data['enableHttp']['value'] = config_data['enableHttp']['default']
log_exception = mocker.patch.object(http_south._LOGGER, "exception")
log_info = mocker.patch.object(http_south._LOGGER, "info")
# WHEN
http_south.plugin_start(config_data)
http_south.plugin_shutdown(config_data)
# THEN
assert 3 == log_info.call_count
calls = [call('Stopping South HTTP plugin.'),
call('South HTTP plugin shut down.')]
log_info.assert_has_calls(calls, any_order=True)
assert 0 == log_exception.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("plugin", "south", "http")
@pytest.mark.skip(reason="server object is None in tests. To be investigated.")
def test_plugin_shutdown_error(mocker, unused_port, loop):
# GIVEN
port = {
'description': 'Port to listen on',
'type': 'integer',
'default': str(unused_port()),
}
config_data = copy.deepcopy(config)
mocker.patch.dict(config_data, {'port': port})
config_data['port']['value'] = config_data['port']['default']
config_data['host']['value'] = config_data['host']['default']
config_data['uri']['value'] = config_data['uri']['default']
config_data['enableHttp']['value'] = config_data['enableHttp']['default']
log_exception = mocker.patch.object(http_south._LOGGER, "exception")
log_info = mocker.patch.object(http_south._LOGGER, "info")
# WHEN
http_south.plugin_start(config_data)
server = config_data['server']
mocker.patch.object(server, 'wait_closed', side_effect=Exception)
with pytest.raises(Exception):
http_south.plugin_shutdown(config_data)
# THEN
assert 2 == log_info.call_count
calls = [call('Stopping South HTTP plugin.')]
log_info.assert_has_calls(calls, any_order=True)
assert 1 == log_exception.call_count
@pytest.allure.feature("unit")
@pytest.allure.story("services", "south", "ingest")
class TestHttpSouthIngest(object):
"""Unit tests foglamp.plugins.south.http_south.http_south.HttpSouthIngest
"""
@pytest.mark.asyncio
async def test_render_post_reading_ok(self, loop):
data = """[{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "sensor1",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"readings": {
"velocity": "500",
"temperature": {
"value": "32",
"unit": "kelvin"
}
}
}]"""
with patch.object(async_ingest, 'ingest_callback') as ingest_add_readings:
request = mock_request(data, loop)
config_data = copy.deepcopy(config)
config_data['assetNamePrefix']['value'] = config_data['assetNamePrefix']['default']
r = await HttpSouthIngest(config_data).render_post(request)
retval = json.loads(r.body.decode())
# Assert the POST request response
assert 200 == r.status
assert 'success' == retval['result']
assert 1 == ingest_add_readings.call_count
@pytest.mark.asyncio
async def test_render_post_sensor_values_ok(self, loop):
data = """[{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "sensor1",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"sensor_values": {
"velocity": "500",
"temperature": {
"value": "32",
"unit": "kelvin"
}
}
}]"""
with patch.object(async_ingest, 'ingest_callback') as ingest_add_readings:
request = mock_request(data, loop)
config_data = copy.deepcopy(config)
config_data['assetNamePrefix']['value'] = config_data['assetNamePrefix']['default']
r = await HttpSouthIngest(config_data).render_post(request)
retval = json.loads(r.body.decode())
# Assert the POST request response
assert 200 == r.status
assert 'success' == retval['result']
assert 1 == ingest_add_readings.call_count
@pytest.mark.asyncio
async def test_render_post_invalid_payload(self, loop):
data = "blah"
msg = 'Payload block must be a valid json'
with patch.object(async_ingest, 'ingest_callback') as ingest_add_readings:
with patch.object(http_south._LOGGER, 'exception') as log_exc:
with pytest.raises(aiohttp.web_exceptions.HTTPBadRequest) as ex:
request = mock_request(data, loop)
config_data = copy.deepcopy(config)
config_data['assetNamePrefix']['value'] = config_data['assetNamePrefix']['default']
r = await HttpSouthIngest(config_data).render_post(request)
assert 400 == r.status
assert str(ex).endswith(msg)
assert 1 == log_exc.call_count
log_exc.assert_called_once_with('%d: %s', 400, msg)
@pytest.mark.asyncio
async def test_render_post_reading_missing_delimiter(self, loop):
data = """{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "sensor1",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"readings": {
"velocity": "500",
"temperature": {
"value": "32",
"unit": "kelvin"
}
}"""
msg = 'Payload block must be a valid json'
with patch.object(async_ingest, 'ingest_callback') as ingest_add_readings:
with patch.object(http_south._LOGGER, 'exception') as log_exc:
with pytest.raises(aiohttp.web_exceptions.HTTPBadRequest) as ex:
request = mock_request(data, loop)
config_data = copy.deepcopy(config)
config_data['assetNamePrefix']['value'] = config_data['assetNamePrefix']['default']
r = await HttpSouthIngest(config_data).render_post(request)
assert 400 == r.status
assert str(ex).endswith(msg)
assert 1 == log_exc.call_count
log_exc.assert_called_once_with('%d: %s', 400, msg)
@pytest.mark.asyncio
async def test_render_post_reading_not_dict(self, loop):
data = """[{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "sensor2",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"readings": "500"
}]"""
msg = 'readings must be a dictionary'
with patch.object(async_ingest, 'ingest_callback') as ingest_add_readings:
with patch.object(http_south._LOGGER, 'exception') as log_exc:
with pytest.raises(aiohttp.web_exceptions.HTTPBadRequest) as ex:
request = mock_request(data, loop)
config_data = copy.deepcopy(config)
config_data['assetNamePrefix']['value'] = config_data['assetNamePrefix']['default']
r = await HttpSouthIngest(config_data).render_post(request)
assert 400 == r.status
assert str(ex).endswith(msg)
assert 1 == log_exc.call_count
log_exc.assert_called_once_with('%d: %s', 400, msg)
| 37.65252
| 145
| 0.628883
| 1,642
| 14,195
| 5.214373
| 0.139464
| 0.099276
| 0.043798
| 0.028031
| 0.781943
| 0.770965
| 0.744452
| 0.722261
| 0.70626
| 0.67893
| 0
| 0.023049
| 0.229799
| 14,195
| 376
| 146
| 37.75266
| 0.760084
| 0.035365
| 0
| 0.643564
| 0
| 0
| 0.253496
| 0.021674
| 0
| 0
| 0
| 0
| 0.135314
| 1
| 0.033003
| false
| 0
| 0.036304
| 0
| 0.075908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f4e20b9046214deea34ff422ace96a7d2bf3d021
| 89
|
py
|
Python
|
Programmers/Lv.1/sortedbykey.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | 2
|
2021-08-30T12:37:57.000Z
|
2021-11-29T05:42:05.000Z
|
Programmers/Lv.1/sortedbykey.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | null | null | null |
Programmers/Lv.1/sortedbykey.py
|
kangjunseo/C-
|
eafdf57a22b3a794d09cab045d6d60c2842ba347
|
[
"MIT"
] | null | null | null |
def solution(strings, n): return sorted(sorted(strings), key = lambda string: string[n])
| 44.5
| 88
| 0.741573
| 13
| 89
| 5.076923
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 89
| 1
| 89
| 89
| 0.835443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| false
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
760a43e17616619a4833f45eadb664b5e9d6b04f
| 174
|
py
|
Python
|
docarray/score/mixins/representer.py
|
fastflair/docarray
|
0bbdbc816b2f4a3b399779f6816875fbc1dfe862
|
[
"Apache-2.0"
] | 591
|
2022-01-09T14:39:59.000Z
|
2022-03-31T13:19:39.000Z
|
docarray/score/mixins/representer.py
|
fastflair/docarray
|
0bbdbc816b2f4a3b399779f6816875fbc1dfe862
|
[
"Apache-2.0"
] | 210
|
2022-01-10T07:59:29.000Z
|
2022-03-31T14:49:18.000Z
|
docarray/score/mixins/representer.py
|
fastflair/docarray
|
0bbdbc816b2f4a3b399779f6816875fbc1dfe862
|
[
"Apache-2.0"
] | 40
|
2022-01-09T14:52:20.000Z
|
2022-03-31T07:59:45.000Z
|
class RepresentMixin:
def __repr__(self):
return repr(self.to_dict())
def to_dict(self):
return {f: getattr(self, f) for f in self.non_empty_fields}
| 24.857143
| 67
| 0.655172
| 26
| 174
| 4.076923
| 0.576923
| 0.150943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235632
| 174
| 6
| 68
| 29
| 0.796992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0
| 0
| 0.4
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
76136d8a653a3065785f689d4f999c021b96fbc8
| 143
|
py
|
Python
|
dynamic_stack_decider/dynamic_stack_decider/src/dynamic_stack_decider/__init__.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 4
|
2018-12-18T21:05:22.000Z
|
2021-09-07T13:25:44.000Z
|
dynamic_stack_decider/dynamic_stack_decider/src/dynamic_stack_decider/__init__.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 36
|
2018-12-18T19:00:43.000Z
|
2021-11-24T18:50:55.000Z
|
dynamic_stack_decider/dynamic_stack_decider/src/dynamic_stack_decider/__init__.py
|
MosHumanoid/bitbots_thmos_meta
|
f45ccc362dc689b69027be5b0d000d2a08580de4
|
[
"MIT"
] | 2
|
2019-08-06T07:51:12.000Z
|
2019-08-12T06:32:59.000Z
|
from .dsd import DSD
from .abstract_decision_element import AbstractDecisionElement
from .abstract_action_element import AbstractActionElement
| 35.75
| 62
| 0.895105
| 16
| 143
| 7.75
| 0.5625
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083916
| 143
| 3
| 63
| 47.666667
| 0.946565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
521a7a273699207a5641e622ac67d74e497015f4
| 76
|
py
|
Python
|
lib/rpmpackager/rpm.py
|
peitur/rpm-tools
|
3352b18863b5d815383412266a62c8f5e3b7c70c
|
[
"Apache-2.0"
] | null | null | null |
lib/rpmpackager/rpm.py
|
peitur/rpm-tools
|
3352b18863b5d815383412266a62c8f5e3b7c70c
|
[
"Apache-2.0"
] | null | null | null |
lib/rpmpackager/rpm.py
|
peitur/rpm-tools
|
3352b18863b5d815383412266a62c8f5e3b7c70c
|
[
"Apache-2.0"
] | null | null | null |
import os, sys, re
class Rpm( ):
def __init__( self ):
pass
| 8.444444
| 25
| 0.526316
| 10
| 76
| 3.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.368421
| 76
| 8
| 26
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
525fa75ca5488a969f6ce6056f8becf030699137
| 142
|
py
|
Python
|
app/drivers/BaseSocialAuthDriver.py
|
vaibhavmule/masonite-social-login
|
5a15560801a4bdc6ef30ae5701405b66f63c9ae2
|
[
"MIT"
] | 6
|
2018-12-02T00:38:59.000Z
|
2019-07-09T02:07:26.000Z
|
app/drivers/BaseSocialAuthDriver.py
|
vaibhavmule/masonite-social-login
|
5a15560801a4bdc6ef30ae5701405b66f63c9ae2
|
[
"MIT"
] | null | null | null |
app/drivers/BaseSocialAuthDriver.py
|
vaibhavmule/masonite-social-login
|
5a15560801a4bdc6ef30ae5701405b66f63c9ae2
|
[
"MIT"
] | null | null | null |
"""Base social auth driver module.
"""
from masonite.drivers.BaseDriver import BaseDriver
class BaseSocialAuthDriver(BaseDriver):
pass
| 15.777778
| 50
| 0.774648
| 15
| 142
| 7.333333
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.140845
| 142
| 8
| 51
| 17.75
| 0.901639
| 0.21831
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
bfe89c9d6825e0f25b784957a49adfb5082335dc
| 142
|
py
|
Python
|
mainclausemodel/__init__.py
|
z-n-huang/SyntacticBootstrappingModel
|
6c2c7a46d7964ea8deac403f47a87b566e2acad2
|
[
"MIT"
] | null | null | null |
mainclausemodel/__init__.py
|
z-n-huang/SyntacticBootstrappingModel
|
6c2c7a46d7964ea8deac403f47a87b566e2acad2
|
[
"MIT"
] | null | null | null |
mainclausemodel/__init__.py
|
z-n-huang/SyntacticBootstrappingModel
|
6c2c7a46d7964ea8deac403f47a87b566e2acad2
|
[
"MIT"
] | null | null | null |
import data, model, experiment
from data import MainClauseData
from model import MainClauseModel
from experiment import MainClauseExperiment
| 23.666667
| 43
| 0.866197
| 16
| 142
| 7.6875
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119718
| 142
| 5
| 44
| 28.4
| 0.984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
870275adfcea027559bae7fb818e96884459b12d
| 144
|
py
|
Python
|
0101-0200/0136-Single Number/0136-Single Number.py
|
jiadaizhao/LeetCode
|
4ddea0a532fe7c5d053ffbd6870174ec99fc2d60
|
[
"MIT"
] | 49
|
2018-05-05T02:53:10.000Z
|
2022-03-30T12:08:09.000Z
|
0101-0200/0136-Single Number/0136-Single Number.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 11
|
2017-12-15T22:31:44.000Z
|
2020-10-02T12:42:49.000Z
|
0101-0200/0136-Single Number/0136-Single Number.py
|
jolly-fellow/LeetCode
|
ab20b3ec137ed05fad1edda1c30db04ab355486f
|
[
"MIT"
] | 28
|
2017-12-05T10:56:51.000Z
|
2022-01-26T18:18:27.000Z
|
from functools import reduce
import operator
class Solution:
def singleNumber(self, nums) -> int:
return reduce(operator.xor, nums)
| 24
| 41
| 0.729167
| 18
| 144
| 5.833333
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 144
| 5
| 42
| 28.8
| 0.905172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
870c41ece9159153569d0278733222b6579420a8
| 2,920
|
py
|
Python
|
qcdb/tests/test_tu2_uhf.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 8
|
2019-03-28T11:54:59.000Z
|
2022-03-19T03:31:37.000Z
|
qcdb/tests/test_tu2_uhf.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 39
|
2018-10-31T23:02:18.000Z
|
2021-12-12T22:11:37.000Z
|
qcdb/tests/test_tu2_uhf.py
|
loriab/qccddb
|
d9e156ef8b313ac0633211fc6b841f84a3ddde24
|
[
"BSD-3-Clause"
] | 9
|
2018-03-12T20:51:50.000Z
|
2022-02-28T15:18:34.000Z
|
"""
from https://github.com/psi4/psi4/blob/master/tests/tu2-ch2-energy/input.dat
Sample UHF/6-31G** CH2 computation
"""
import pprint
import pytest
import qcdb
from .utils import *
tu2_scf_ene = -38.9250886434
tu2_scf_ene_df = -38.9253346246
@using("psi4")
def test_tu2_uhf_psi4():
ch2 = qcdb.set_molecule(
"""
0 3
C
H 1 R
H 1 R 2 A
R = 2.05
A = 133.93
units au
"""
)
qcdb.set_keywords(
{
"basis": "6-31g**",
"reference": "uhf",
"scf_type": "pk",
}
)
print(ch2)
print(qcdb.get_active_options().print_changed())
ene, wfn = qcdb.energy("p4-scf", return_wfn=True)
pprint.pprint(wfn, width=200) # debug printing
assert compare_values(tu2_scf_ene, qcdb.variable("hf total energy"), 6, "energy")
assert compare("Psi4", wfn["provenance"]["creator"], "harness")
@using("cfour")
def test_tu2_uhf_cfour():
ch2 = qcdb.set_molecule(
"""
0 3
C
H 1 R
H 1 R 2 A
R = 2.05
A = 133.93
units au
"""
)
qcdb.set_keywords(
{
"basis": "6-31g**",
"reference": "uhf",
"puream": "cart",
}
)
print(ch2)
print(qcdb.get_active_options().print_changed())
ene, wfn = qcdb.energy("c4-scf", return_wfn=True)
pprint.pprint(wfn, width=200) # debug printing
assert compare_values(tu2_scf_ene, qcdb.variable("hf total energy"), 6, "energy")
assert compare("CFOUR", wfn["provenance"]["creator"], "harness")
@using("nwchem")
def test_tu2_uhf_nwchem():
ch2 = qcdb.set_molecule(
"""
0 3
C
H 1 R
H 1 R 2 A
R = 2.05
A = 133.93
units au
"""
)
qcdb.set_keywords(
{
"basis": "6-31g**",
"reference": "uhf",
}
)
print(ch2)
print(qcdb.get_active_options().print_changed())
ene, wfn = qcdb.energy("nwc-scf", return_wfn=True)
pprint.pprint(wfn, width=200) # debug printing
assert compare_values(tu2_scf_ene, qcdb.variable("hf total energy"), 6, "energy")
assert compare("NWChem", wfn["provenance"]["creator"], "harness")
@using("gamess")
def test_tu2_uhf_gamess():
ch2 = qcdb.set_molecule(
"""
0 3
C
H 1 R
H 1 R 2 A
R = 2.05
A = 133.93
units au
"""
)
qcdb.set_keywords(
{
"basis": "6-31g**",
"reference": "uhf",
}
)
print(ch2)
print(qcdb.get_active_options().print_changed())
ene, wfn = qcdb.energy("gms-scf", return_wfn=True)
pprint.pprint(wfn, width=200) # debug printing
assert compare_values(tu2_scf_ene, qcdb.variable("hf total energy"), 6, "energy")
assert compare("GAMESS", wfn["provenance"]["creator"], "harness")
| 20.56338
| 85
| 0.540753
| 377
| 2,920
| 4.045093
| 0.217507
| 0.036721
| 0.015738
| 0.034098
| 0.765902
| 0.702951
| 0.702951
| 0.702951
| 0.702951
| 0.702951
| 0
| 0.067265
| 0.312671
| 2,920
| 141
| 86
| 20.70922
| 0.692576
| 0.058904
| 0
| 0.444444
| 0
| 0
| 0.161062
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.055556
| false
| 0
| 0.055556
| 0
| 0.111111
| 0.180556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
870ca0c3b5d0ad6818eaf2fe5e85eadf5056beb0
| 118
|
py
|
Python
|
messenger_channels/querysets/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
messenger_channels/querysets/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
messenger_channels/querysets/__init__.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
from .chat_qs import(get_chat_ids, get_pvchat_ids,
get_validated_chat_id, get_pvchat_ids_cached)
| 39.333333
| 66
| 0.720339
| 18
| 118
| 4.111111
| 0.555556
| 0.162162
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.228814
| 118
| 2
| 67
| 59
| 0.813187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
870d77ebf741cc9432166d1be49b44d394765e30
| 1,595
|
py
|
Python
|
sample_applications/AgentlessIdpSample/Utils/UrlUtil.py
|
pingidentity/pf-agentless-ik-sample-python
|
59c0f73ce30f7518201707bda16bf04ca8c362e7
|
[
"Apache-2.0"
] | null | null | null |
sample_applications/AgentlessIdpSample/Utils/UrlUtil.py
|
pingidentity/pf-agentless-ik-sample-python
|
59c0f73ce30f7518201707bda16bf04ca8c362e7
|
[
"Apache-2.0"
] | null | null | null |
sample_applications/AgentlessIdpSample/Utils/UrlUtil.py
|
pingidentity/pf-agentless-ik-sample-python
|
59c0f73ce30f7518201707bda16bf04ca8c362e7
|
[
"Apache-2.0"
] | null | null | null |
from sample_applications.AgentlessIdpSample.Configuration.ConfigurationManager import ConfigurationManager
from sample_applications.AgentlessIdpSample.Utils.IdpConstants import IdpConstants
import urllib.parse as url
def configure_url(request):
return request.url_root + IdpConstants.AGENTLESS_BASE + "/configure"
def login_url(request):
return request.url_root + IdpConstants.AGENTLESS_BASE + "/login"
def resume_url(request):
return request.url_root + IdpConstants.AGENTLESS_BASE + "/resume"
def resume_to_pf_url(request):
return ConfigurationManager.get_configuration(IdpConstants.BASE_PF_URL) + request.form[IdpConstants.RESUME_PATH] \
+ "?REF=" + request.form[IdpConstants.REF] + "&TargetResource=" \
+ url.quote_plus(ConfigurationManager.get_configuration(IdpConstants.TARGET_URL))
def resume_logout_url(request, reference_id):
return ConfigurationManager.get_configuration(IdpConstants.BASE_PF_URL) + request.form[IdpConstants.RESUME_PATH] \
+ "?REF=" + reference_id
def sso_url():
return ConfigurationManager.get_configuration(IdpConstants.BASE_PF_URL) + IdpConstants.START_SP_SSO \
+ "?PartnerIdpId=" + ConfigurationManager.get_configuration(IdpConstants.PARTNER_ENTITY_ID)
def pickup_url(reference_id):
return ConfigurationManager.get_configuration(IdpConstants.BASE_PF_URL) \
+ IdpConstants.PICKUP_ENDPOINT \
+ "?REF=" + reference_id
def dropoff_url():
return ConfigurationManager.get_configuration(IdpConstants.BASE_PF_URL) \
+ IdpConstants.DROPOFF_ENDPOINT
| 37.093023
| 118
| 0.774295
| 170
| 1,595
| 6.970588
| 0.258824
| 0.059072
| 0.212658
| 0.283544
| 0.519831
| 0.519831
| 0.519831
| 0.519831
| 0.519831
| 0.370464
| 0
| 0
| 0.137304
| 1,595
| 42
| 119
| 37.97619
| 0.861192
| 0
| 0
| 0.230769
| 0
| 0
| 0.042633
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.115385
| 0.307692
| 0.730769
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8736f469116aa170133fd6a883913f3ccafab238
| 206
|
wsgi
|
Python
|
src/lobber.wsgi
|
SUNET/lobber
|
2ba707ebd8a6513bff7236262930a24f5e0e9492
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2015-11-10T17:08:57.000Z
|
2015-11-10T17:08:57.000Z
|
src/lobber.wsgi
|
SUNET/lobber
|
2ba707ebd8a6513bff7236262930a24f5e0e9492
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/lobber.wsgi
|
SUNET/lobber
|
2ba707ebd8a6513bff7236262930a24f5e0e9492
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'lobber.settings'
sys.path.append('/var/www/lobber/src')
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
| 22.888889
| 56
| 0.786408
| 29
| 206
| 5.517241
| 0.62069
| 0.125
| 0.225
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072816
| 206
| 8
| 57
| 25.75
| 0.837696
| 0
| 0
| 0
| 0
| 0
| 0.271845
| 0.106796
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
875e56385f36c3b100365ec005dbcba544bb3e82
| 256
|
py
|
Python
|
study/curso-em-video/exercises/108.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
study/curso-em-video/exercises/108.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
study/curso-em-video/exercises/108.py
|
jhonatanmaia/python
|
d53c64e6bab598c7e85813fd3f107c6f23c1fc46
|
[
"MIT"
] | null | null | null |
import utilidadescev.moedas as m
p=float(input('Digite o preço: R$'))
print(f'A metade de {m.formatacao(p)} é {m.formatacao(m.metade(p))}')
print(f'O dobro {m.formatacao(p)} é {m.formatacao(m.dobro(p))}')
print(f'Aumentando 10%, temos {m.aumentar(p,10)}')
| 42.666667
| 69
| 0.691406
| 48
| 256
| 3.6875
| 0.5
| 0.248588
| 0.135593
| 0.146893
| 0.282486
| 0.282486
| 0.282486
| 0
| 0
| 0
| 0
| 0.017167
| 0.089844
| 256
| 6
| 70
| 42.666667
| 0.742489
| 0
| 0
| 0
| 0
| 0.4
| 0.66537
| 0.206226
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.6
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
87661fa847e8297fdb465b2ef515d13896bb35d3
| 32,680
|
py
|
Python
|
src/trails/visualize_results.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 4
|
2020-09-14T07:20:19.000Z
|
2021-04-22T14:23:04.000Z
|
src/trails/visualize_results.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 5
|
2021-03-17T17:02:27.000Z
|
2021-08-31T10:09:38.000Z
|
src/trails/visualize_results.py
|
BenDickens/trails
|
a89a1a901c7be38cdcb7a59339587e518ab8f14d
|
[
"MIT"
] | 3
|
2020-09-07T07:35:28.000Z
|
2021-04-22T14:23:39.000Z
|
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
import pygeos
import geopandas as gpd
import contextily as cx
import traceback
import seaborn as sns
from matplotlib.ticker import MaxNLocator
def plot_all_atacks():
# set data paths to results
data_random_attack = r'C:\Data\percolation_results_random_attack_regular'
data_random_attack_od_buffer = r'C:\Data\percolation_results_random_attack_od_buffer'
data_targeted_attack = r'C:\Data\percolation_results_targeted_attack'
data_local_attack_05 = r'C:\Data\percolation_results_local_attack_05'
data_local_attack_01 = r'C:\Data\percolation_results_local_attack_01'
data_local_attack_005 = r'C:\Data\percolation_results_local_attack_005'
data_path_met= r'C:\Data\percolation_metrics'
data_path_net = r'C:\Data\percolation_networks'
data_path_grids = r'C:\Data\percolation_grids'
# file to get full country names
glob_info = pd.read_excel(r'C:\Projects\trails\data\global_information.xlsx')
# get all files from data paths
perc_files_random_attack = os.listdir(data_random_attack)
perc_files_random_attack_od_buffer = os.listdir(data_random_attack_od_buffer)
perc_files_targeted_attack = os.listdir(data_targeted_attack)
perc_files_local_attack_05 = os.listdir(data_local_attack_05)
perc_files_local_attack_01 = os.listdir(data_local_attack_01)
perc_files_local_attack_005 = os.listdir(data_local_attack_005)
grid_files = os.listdir(data_path_grids)
met_files = os.listdir(data_path_met)
net_files = os.listdir(data_path_net)
for country in glob_info.ISO_3digit.values:
network = 0
#specify file
file = '{}_{}_results.csv'.format(country,network)
try:
# load metrics
df_metrics = pd.read_csv(os.path.join(data_path_met,[x for x in met_files if file[:5] in x][0]))
# load percolation results
df_random = pd.read_csv(os.path.join(data_random_attack,file),index_col=[0])
df_random_buffer = pd.read_csv(os.path.join(data_random_attack_od_buffer,file),index_col=[0])
df_random.frac_counter = df_random.frac_counter*100
df_random_buffer.frac_counter = df_random_buffer.frac_counter*100
df_target = pd.read_csv(os.path.join(data_targeted_attack,file),index_col=[0])
df_local_05 = pd.read_csv(os.path.join(data_local_attack_05,file),index_col=[0])
df_local_01 =pd.read_csv(os.path.join(data_local_attack_01,file),index_col=[0])
df_local_005 = pd.read_csv(os.path.join(data_local_attack_005,file),index_col=[0])
# load grids
grid_05 = pd.read_csv(os.path.join(data_path_grids,'{}_{}_05.csv'.format(country,network)))
grid_01 = pd.read_csv(os.path.join(data_path_grids,'{}_{}_01.csv'.format(country,network)))
grid_005 = pd.read_csv(os.path.join(data_path_grids,'{}_{}_005.csv'.format(country,network)))
except:
continue
max_frac_counter = 100#df_random.frac_counter.max()
fig, axs = plt.subplots(2,3,figsize=(15,11))
for iter_,ax in enumerate(axs.flatten()):
if iter_ == 0:
sns.boxplot(x="frac_counter", y="pct_isolated", data=df_random,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of trips', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of isolated trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
elif iter_ == 1:
sns.boxplot(x="frac_counter", y="pct_delayed", data=df_random,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of delayed trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
elif iter_ == 2:
sns.boxplot(x="frac_counter", y="pct_unaffected", data=df_random,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of unaffected trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
elif iter_ == 3:
sns.boxplot(x="frac_counter", y="pct_isolated", data=df_random_buffer,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of trips', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of isolated trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
elif iter_ == 4:
sns.boxplot(x="frac_counter", y="pct_delayed", data=df_random_buffer,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of delayed trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
elif iter_ == 5:
sns.boxplot(x="frac_counter", y="pct_unaffected", data=df_random_buffer,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of unaffected trips', fontsize=15)
ax.set_xlim([0, max_frac_counter+2])
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xticks(np.arange(0, max_frac_counter+2, 10))
plt.figtext(0.5,0.95, "Random attack", ha="center", va="top", fontsize=18, color="b", fontweight='bold')
plt.figtext(0.5,0.5, "Random attack with OD buffer", ha="center", va="top", fontsize=18, color="b", fontweight='bold')
plt.subplots_adjust(hspace = 0.4 )
plt.suptitle('Main network of {}'.format(dict(zip(glob_info.ISO_3digit,glob_info.Country))[country]), fontsize=20, fontweight='bold',y=1)
plt.savefig(os.path.join(r'C:\Data','figures_random_attack','{}.png'.format(country)),dpi=150)
plt.clf()
fig, axs = plt.subplots(4,3,figsize=(15,15))
for iter_,ax in enumerate(axs.flatten()):
if iter_ == 0:
isolated_05 = df_local_05.loc[df_local_05.pct_isolated != 0].reset_index(drop=True)
if len(isolated_05) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No isolated trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
isolated_05 = isolated_05.sort_values('pct_isolated',ascending=False)
isolated_05.plot.bar(x='grid_no',y='pct_isolated',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.5 degree % isolated trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 1:
delayed_05 = df_local_05.loc[df_local_05.pct_delayed != 0].reset_index(drop=True)
if len(delayed_05) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No delayed trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
delayed_05 = delayed_05.sort_values('pct_delayed',ascending=False)
delayed_05.plot.bar(x='grid_no',y='pct_delayed',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.5 degree % delayed trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 2:
ax.axis('off')
if len(df_local_05) > 0:
perc_isolated_trips = round(len(isolated_05)/len(df_local_05)*100,2)
perc_delayed_trips = round(len(delayed_05)/len(df_local_05)*100,2)
if len(isolated_05) > 0:
avg_isolated = round(isolated_05.pct_isolated.mean(),2)
else:
avg_isolated = 0
if len(delayed_05) > 0:
avg_delayed = round(delayed_05.pct_delayed.mean(),2)
else:
avg_delayed = 0
else:
perc_isolated_trips = 0
perc_delayed_trips = 0
avg_isolated = 0
avg_delayed = 0
ax.text(0, 0.8, 'Number of grids in country: {}'.format(len(grid_05)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.6, 'Number of grids with roads: {}'.format(len(df_local_05)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.5, '% grids causing isolated trips: {}'.format(perc_isolated_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.4, 'Average % of trips isolated: {}'.format(avg_isolated), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.3, '% grids causing delayed trips: {}'.format(perc_delayed_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.2, 'Average % of trips delayed: {}'.format(avg_delayed), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
elif iter_ == 3:
isolated_01 = df_local_01.loc[df_local_01.pct_isolated != 0].reset_index(drop=True)
if len(isolated_01) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No isolated trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
isolated_01 = isolated_01.sort_values('pct_isolated',ascending=False)
isolated_01.plot.bar(x='grid_no',y='pct_isolated',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.1 degree % isolated trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 4:
delayed_01 = df_local_01.loc[df_local_01.pct_delayed != 0].reset_index(drop=True)
if len(delayed_01) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No delayed trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
delayed_01 = delayed_01.sort_values('pct_delayed',ascending=False)
delayed_01.plot.bar(x='grid_no',y='pct_delayed',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.1 degree % delayed trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 5:
ax.axis('off')
if len(df_local_01) > 0:
perc_isolated_trips = round(len(isolated_01)/len(df_local_01)*100,2)
perc_delayed_trips = round(len(delayed_01)/len(df_local_01)*100,2)
if len(isolated_01) > 0:
avg_isolated = round(isolated_01.pct_isolated.mean(),2)
else:
avg_isolated = 0
if len(delayed_01) > 0:
avg_delayed = round(delayed_01.pct_delayed.mean(),2)
else:
avg_delayed = 0
else:
perc_isolated_trips = 0
perc_delayed_trips = 0
avg_isolated = 0
avg_delayed = 0
ax.text(0, 0.8, 'Number of grids in country: {}'.format(len(grid_01)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.6, 'Number of grids with roads: {}'.format(len(df_local_01)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.5, '% grids causing isolated trips: {}'.format(perc_isolated_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.4, 'Average % of trips isolated: {}'.format(avg_isolated), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.3, '% grids causing delayed trips: {}'.format(perc_delayed_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.2, 'Average % of trips delayed: {}'.format(avg_delayed), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
elif iter_ == 6:
isolated_005 = df_local_005.loc[df_local_005.pct_isolated != 0].reset_index(drop=True)
if len(isolated_005) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No isolated trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
isolated_005 = isolated_005.sort_values('pct_isolated',ascending=False)
isolated_005.plot.bar(x='grid_no',y='pct_isolated',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.05 degree % isolated trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 7:
delayed_005 = df_local_005.loc[df_local_005.pct_delayed != 0].reset_index(drop=True)
if len(delayed_005) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No delayed trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
delayed_005 = delayed_005.sort_values('pct_delayed',ascending=False)
delayed_005.plot.bar(x='grid_no',y='pct_delayed',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('0.05 degree % delayed trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 8:
ax.axis('off')
if len(df_local_005) > 0:
perc_isolated_trips = round(len(isolated_005)/len(df_local_005)*100,2)
perc_delayed_trips = round(len(delayed_005)/len(df_local_005)*100,2)
if len(isolated_005) > 0:
avg_isolated = round(isolated_005.pct_isolated.mean(),2)
else:
avg_isolated = 0
if len(delayed_005) > 0:
avg_delayed = round(delayed_005.pct_delayed.mean(),2)
else:
avg_delayed = 0
else:
perc_isolated_trips = 0
perc_delayed_trips = 0
avg_isolated = 0
avg_delayed = 0
ax.text(0, 0.8, 'Number of grids in country: {}'.format(len(grid_005)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.6, 'Number of grids with roads: {}'.format(len(df_local_005)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.5, '% grids causing isolated trips: {}'.format(perc_isolated_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.4, 'Average % of trips isolated: {}'.format(avg_isolated), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.3, '% grids causing delayed trips: {}'.format(perc_delayed_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.2, 'Average % of trips delayed: {}'.format(avg_delayed), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
elif iter_ == 9:
isolated_target = df_target.loc[df_target.pct_isolated != 0].reset_index(drop=True)
if len(isolated_target) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No isolated trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
isolated_target = isolated_target.sort_values('pct_isolated',ascending=False)
isolated_target.plot.bar(x='edge_no',y='pct_isolated',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('Individual edge % isolated trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 10:
delayed_target = df_target.loc[df_target.pct_delayed != 0].reset_index(drop=True)
if len(delayed_target) == 0:
ax.set_xticks([])
ax.set_yticks([])
ax.text(0.5, 0.5, 'No delayed trips!'.format(len(df_target)), horizontalalignment='center', verticalalignment='center', transform=ax.transAxes,fontweight='bold',fontsize=15)
continue
delayed_target = delayed_target.sort_values('pct_delayed',ascending=False)
delayed_target.plot.bar(x='edge_no',y='pct_delayed',ax=ax,legend=False)
ax.set_xticks([])
ax.set_title('Individual edge % delayed trips', fontsize=13)
ax.set_xlabel('')
elif iter_ == 11:
ax.axis('off')
if len(df_target) > 0:
perc_isolated_trips = round(len(isolated_target)/len(df_target)*100,2)
perc_delayed_trips = round(len(delayed_target)/len(df_target)*100,2)
if len(isolated_target) > 0:
avg_isolated = round(isolated_target.pct_isolated.mean(),2)
else:
avg_isolated = 0
if len(delayed_target) > 0:
avg_delayed = round(delayed_target.pct_delayed.mean(),2)
else:
avg_delayed = 0
else:
perc_isolated_trips = 0
perc_delayed_trips = 0
avg_isolated = 0
avg_delayed = 0
ax.text(0, 0.8, 'Number of edges: {}'.format(len(df_target)), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.5, '% edges causing isolated trips: {}'.format(perc_isolated_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.4, 'Average % of trips isolated: {}'.format(avg_isolated), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.3, '% edges causing delayed trips: {}'.format(perc_delayed_trips), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
ax.text(0, 0.2, 'Average % of trips delayed: {}'.format(avg_delayed), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes)
plt.suptitle('Main network of {}'.format(dict(zip(glob_info.ISO_3digit,glob_info.Country))[country]), fontsize=20, fontweight='bold',y=0.92)
plt.savefig(os.path.join(r'C:\Data','figures_target_local_attack','{}.png'.format(country)),dpi=150)
plt.clf()
def plot_percolation_full():
# set data paths to results
data_path_perc = r'C:\Data\percolation_results_random_attack_regular'
data_path_met= r'C:\Data\percolation_metrics'
data_path_net = r'C:\Data\percolation_networks'
# file to get full country names
glob_info = pd.read_excel(r'C:\Projects\trails\data\global_information.xlsx')
# get all files from data paths
perc_files = os.listdir(data_path_perc)
met_files = os.listdir(data_path_met)
net_files = os.listdir(data_path_net)
# save the failed ones, so we can check them later
save_failed = []
# set x-axis
x = np.arange(1,100,1)
# create figure
fig, axs = plt.subplots(3,2,figsize=(15,20))
for iter1,file in enumerate(perc_files):
# get name of percolation analysis
net_name = file[:5]
try:
if os.path.isfile(os.path.join('..','..','figures','{}_results.png'.format(net_name))):
print(net_name+" already finished!")
continue
# load metrics
df_metrics = pd.read_csv(os.path.join(data_path_met,[x for x in met_files if file[:5] in x][0]))
# load percolation results
df = pd.read_csv(os.path.join(data_path_perc,file),index_col=[0])
df.frac_counter = df.frac_counter*100
# remove all results where it is pretty much done, so we can zoom onto the interesting part
df = df.loc[df.pct_isolated < 99.5]
max_frac_counter = df.frac_counter.max()
df_isolated = pd.DataFrame([df.frac_counter.values,df.pct_isolated.values,df.pct_unaffected.values,df.pct_delayed.values]).T
df_isolated.columns = ['frac_counter','pct_isolated','pct_unaffected','pct_delayed']
df_sloss = pd.DataFrame([df.frac_counter.values,df.total_pct_surplus_loss_e1.values,df.total_pct_surplus_loss_e2.values]).T
df_sloss.columns = ['frac_counter','total_pct_surplus_loss_e1','total_pct_surplus_loss_e2']
# load network
network = pd.read_feather(os.path.join(data_path_net,[x for x in net_files if file[:5] in x][0]))
network.geometry = pygeos.from_wkb(network.geometry)
network = gpd.GeoDataFrame(network)
network.crs = 4326
# get mean,max,min values
y_unaff = df_isolated.groupby('frac_counter').max()['pct_unaffected'].values
y_del = df_isolated.groupby('frac_counter').mean()['pct_delayed'].values
y_iso = df_isolated.groupby('frac_counter').min()['pct_isolated'].values
mainnet = 'yes'
if net_name[4] != '0':
mainnet = 'no, #{}'.format(net_name)
if iter1 > 0:
for iter2,ax in enumerate(axs.flatten()):
ax.clear()
#and plot
for iter2,ax in enumerate(axs.flatten()):
if iter2 == 0:
sns.boxplot(x="frac_counter", y="pct_isolated", data=df_isolated,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of isolated trips', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of isolated trips', fontsize=15, fontweight='bold')
ax.set_xlim([0, max_frac_counter+2])
ax.set_xticks(np.arange(0, max_frac_counter+2, 5))
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
elif iter2 == 1:
network.plot(column='highway',legend=True,ax=ax)
try:
cx.add_basemap(ax, crs=network.crs.to_string(),alpha=0.5)
except:
cx.add_basemap(ax, crs=network.crs.to_string(),alpha=0.5,zoom=10)
ax.set_title('Road network', fontsize=15, fontweight='bold')
elif iter2 == 2:
sns.boxplot(x="frac_counter", y="pct_unaffected", data=df_isolated,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of unaffected trips', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of unaffected trips', fontsize=15, fontweight='bold')
ax.set_xlim([0, max_frac_counter+2])
ax.set_xticks(np.arange(0, max_frac_counter+2, 5))
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
elif iter2 == 3:
sns.boxplot(x="frac_counter", y="pct_delayed", data=df_isolated,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of delayed trips', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_title('Boxplots of delayed trips', fontsize=15, fontweight='bold')
ax.set_xlim([0, max_frac_counter+2])
ax.set_xticks(np.arange(0, max_frac_counter+2, 5))
ax.set_ylim([0, 102])
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
elif iter2 == 4:
sns.boxplot(x="frac_counter", y="total_pct_surplus_loss_e1", data=df_sloss,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of surpluss loss (e1)', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_xlim([0, max_frac_counter+2])
ax.set_xticks(np.arange(0, max_frac_counter+2, 5))
ax.set_ylim([0, 102])
ax.set_title('Boxplots of surpluss loss e1', fontsize=15, fontweight='bold')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
elif iter2 == 5:
sns.boxplot(x="frac_counter", y="total_pct_surplus_loss_e2", data=df_sloss,ax=ax,fliersize=0,order=np.arange(max_frac_counter+2),palette="rocket_r",linewidth=0.5)
ax.set_ylabel('Percentage of surpluss loss (e2)', fontsize=13)
ax.set_xlabel('Percentage of network removed', fontsize=13)
ax.set_xlim([0, max_frac_counter+2])
ax.set_xticks(np.arange(0, max_frac_counter+2, 5))
ax.set_ylim([0, 102])
ax.set_title('Boxplots of surpluss loss e2', fontsize=15, fontweight='bold')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.text(max_frac_counter*0.5, 10.5,'Main Network: {} \nEdges: {} \nDensity: {} \nClique_No: {} \nAssortativity: {} \nDiameter: {} \nMax_Degree: {}'.format(
mainnet,
df_metrics.Edge_No.values[0],
np.round(df_metrics.Density.values[0],7),
df_metrics.Clique_No.values[0],
np.round(df_metrics.Assortativity.values[0],7),
df_metrics.Diameter.values[0],
df_metrics.Max_Degree.values[0],
), fontsize=15)
if net_name[4] == '0':
if net_name[:3] in ['HKG','TWN','MNP','MAC','MHL','GUM']:
name_dict_errors = {'HKG': "Hong Kong",
'TWN': "Taiwan",
'MNP': "Northern Mariana Islands",
'MAC': "Macau",
'MHL': "Marshall Islands",
'GUM' : "Guam"
}
plt.suptitle('Main network of {}'.format(name_dict_errors[net_name[:3]]), fontsize=20, fontweight='bold',y=0.92)
else:
plt.suptitle('Main network of {}'.format(dict(zip(glob_info.ISO_3digit,glob_info.Country))[net_name[:3]]), fontsize=20, fontweight='bold',y=0.92)
else:
plt.suptitle('Subnetwork of {}'.format(dict(zip(glob_info.ISO_3digit,glob_info.Country))[net_name[:3]]), fontsize=20, fontweight='bold',y=0.92)
plt.savefig(os.path.join('..','..','figures','{}_results.png'.format(net_name)))
except Exception as e:
print(net_name+" failed because of {}".format(e))
print(traceback.format_exc())
save_failed.append(net_name)
print(save_failed)
if __name__ == '__main__':
plot_percolation_full()
| 58.149466
| 228
| 0.551744
| 3,870
| 32,680
| 4.432817
| 0.077003
| 0.031186
| 0.029379
| 0.028855
| 0.84949
| 0.795512
| 0.776508
| 0.714719
| 0.697173
| 0.656194
| 0
| 0.039795
| 0.331793
| 32,680
| 561
| 229
| 58.253119
| 0.745798
| 0.016799
| 0
| 0.481481
| 0
| 0.002315
| 0.135254
| 0.022157
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00463
| false
| 0
| 0.027778
| 0
| 0.032407
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5e72ffc99558a2e12842949bfd148624cf343e5d
| 232
|
py
|
Python
|
isl/trainer/__init__.py
|
HenryLee97/isl
|
0eb357bd45c5ce3ab3ef060deb84707975049d37
|
[
"MIT"
] | 2
|
2021-12-14T10:43:53.000Z
|
2021-12-14T12:46:50.000Z
|
isl/trainer/__init__.py
|
HenryLee97/isl
|
0eb357bd45c5ce3ab3ef060deb84707975049d37
|
[
"MIT"
] | null | null | null |
isl/trainer/__init__.py
|
HenryLee97/isl
|
0eb357bd45c5ce3ab3ef060deb84707975049d37
|
[
"MIT"
] | null | null | null |
from isl.trainer.loss import mlploss_trainer
from isl.trainer.loss import mlploss_validation
from isl.trainer.simple import simple_trainer
from isl.trainer.simple import simple_validation
from isl.trainer.swarm import swarm_trainer
| 38.666667
| 48
| 0.87069
| 35
| 232
| 5.628571
| 0.257143
| 0.177665
| 0.35533
| 0.182741
| 0.639594
| 0.639594
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086207
| 232
| 5
| 49
| 46.4
| 0.929245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5e812b4e7177f773412cc163201d2a4127f341ca
| 158
|
py
|
Python
|
pylbm/generator/__init__.py
|
Mopolino8/pylbm
|
b457ccdf1e7a1009807bd1136a276886f81a9e7d
|
[
"BSD-3-Clause"
] | 106
|
2016-09-13T07:19:17.000Z
|
2022-03-19T13:41:55.000Z
|
pylbm/generator/__init__.py
|
Mopolino8/pylbm
|
b457ccdf1e7a1009807bd1136a276886f81a9e7d
|
[
"BSD-3-Clause"
] | 53
|
2017-09-18T04:51:19.000Z
|
2022-01-19T21:36:23.000Z
|
pylbm/generator/__init__.py
|
gouarin/pylbm
|
fd4419933e05b85be364232fddedfcb4f7275e1f
|
[
"BSD-3-Clause"
] | 33
|
2016-06-17T13:21:17.000Z
|
2021-11-11T16:57:46.000Z
|
from .codegen import codegen, make_routine
from .ast import For, If, IdxRange, IndexedIntBase
from .autowrap import autowrap
from .generator import Generator
| 31.6
| 50
| 0.822785
| 21
| 158
| 6.142857
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126582
| 158
| 4
| 51
| 39.5
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0d8108c233de172f870c01b0c3a19b13ef758293
| 179
|
py
|
Python
|
app/routes/businesses/service/business_metrics/dto/media_topics_output.py
|
mampilly/backend-global
|
a2248758d521bf7f136fbc5fd12902448d137b33
|
[
"MIT"
] | null | null | null |
app/routes/businesses/service/business_metrics/dto/media_topics_output.py
|
mampilly/backend-global
|
a2248758d521bf7f136fbc5fd12902448d137b33
|
[
"MIT"
] | null | null | null |
app/routes/businesses/service/business_metrics/dto/media_topics_output.py
|
mampilly/backend-global
|
a2248758d521bf7f136fbc5fd12902448d137b33
|
[
"MIT"
] | null | null | null |
import datetime
from typing import List
from pydantic import BaseModel
class MediaTopicsOutput(BaseModel):
platform: str
date: datetime.date
media_topics: List[str]
| 17.9
| 35
| 0.77095
| 22
| 179
| 6.227273
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.178771
| 179
| 9
| 36
| 19.888889
| 0.931973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0db2146774fe88bf82ca29b9dbdcfbefc45c1066
| 126
|
py
|
Python
|
labdrivers/labdrivers/oxford/__init__.py
|
RMUlti/Alex
|
9ab4fb97315beec9c4d7f6be02d091c9eaf5f22c
|
[
"MIT"
] | null | null | null |
labdrivers/labdrivers/oxford/__init__.py
|
RMUlti/Alex
|
9ab4fb97315beec9c4d7f6be02d091c9eaf5f22c
|
[
"MIT"
] | null | null | null |
labdrivers/labdrivers/oxford/__init__.py
|
RMUlti/Alex
|
9ab4fb97315beec9c4d7f6be02d091c9eaf5f22c
|
[
"MIT"
] | null | null | null |
from .ips120 import Ips120
from .itc503 import Itc503
from .mercuryips import MercuryIps
from .triton200 import Triton200
| 25.2
| 35
| 0.809524
| 16
| 126
| 6.375
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.169811
| 0.15873
| 126
| 4
| 36
| 31.5
| 0.792453
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0db38d647be90fa01cb93ba46315c0d51b5651b1
| 19,570
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/medicinalproductpackaged_packageitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | 2
|
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/r4/complex_types/medicinalproductpackaged_packageitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/medicinalproductpackaged_packageitem.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MedicinalProductPackaged_PackageItemSchema:
"""
A medicinal product in a container or package.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
) -> Union[StructType, DataType]:
"""
A medicinal product in a container or package.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
identifier: Including possibly Data Carrier Identifier.
type: The physical type of the container of the medicine.
quantity: The quantity of this package in the medicinal product, at the current level of
packaging. The outermost is always 1.
material: Material type of the package item.
alternateMaterial: A possible alternate material for the packaging.
device: A device accompanying a medicinal product.
manufacturedItem: The manufactured item as contained in the packaged medicinal product.
packageItem: Allows containers within containers.
physicalCharacteristics: Dimensions, color etc.
otherCharacteristics: Other codeable characteristics.
shelfLifeStorage: Shelf Life and storage information.
manufacturer: Manufacturer of this Package Item.
"""
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.prodcharacteristic import (
ProdCharacteristicSchema,
)
from spark_fhir_schemas.r4.complex_types.productshelflife import (
ProductShelfLifeSchema,
)
if (
max_recursion_limit
and nesting_list.count("MedicinalProductPackaged_PackageItem")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"MedicinalProductPackaged_PackageItem"
]
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Including possibly Data Carrier Identifier.
StructField(
"identifier",
ArrayType(
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The physical type of the container of the medicine.
StructField(
"type",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# The quantity of this package in the medicinal product, at the current level of
# packaging. The outermost is always 1.
StructField(
"quantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Material type of the package item.
StructField(
"material",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# A possible alternate material for the packaging.
StructField(
"alternateMaterial",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# A device accompanying a medicinal product.
StructField(
"device",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The manufactured item as contained in the packaged medicinal product.
StructField(
"manufacturedItem",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Allows containers within containers.
StructField(
"packageItem",
ArrayType(
MedicinalProductPackaged_PackageItemSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Dimensions, color etc.
StructField(
"physicalCharacteristics",
ProdCharacteristicSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
),
True,
),
# Other codeable characteristics.
StructField(
"otherCharacteristics",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Shelf Life and storage information.
StructField(
"shelfLifeStorage",
ArrayType(
ProductShelfLifeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Manufacturer of this Package Item.
StructField(
"manufacturer",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 48.440594
| 104
| 0.534696
| 1,618
| 19,570
| 6.2089
| 0.131026
| 0.072865
| 0.046287
| 0.066892
| 0.784093
| 0.759705
| 0.743082
| 0.704957
| 0.697392
| 0.689329
| 0
| 0.00284
| 0.42417
| 19,570
| 403
| 105
| 48.560794
| 0.888633
| 0.22417
| 0
| 0.679868
| 1
| 0
| 0.030099
| 0.006397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0033
| false
| 0
| 0.029703
| 0
| 0.042904
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0dccda502924cbb0e097d0e36d36039e92c3f845
| 268
|
py
|
Python
|
prereise/cli/data_sources/tests/test_demand_data.py
|
keforres/PreREISE
|
fcc111fdccc0626d3d34f1749a14035e47991043
|
[
"MIT"
] | 15
|
2021-03-02T11:54:27.000Z
|
2022-02-16T13:01:40.000Z
|
prereise/cli/data_sources/tests/test_demand_data.py
|
keforres/PreREISE
|
fcc111fdccc0626d3d34f1749a14035e47991043
|
[
"MIT"
] | 90
|
2021-01-25T19:02:14.000Z
|
2022-03-31T20:27:28.000Z
|
prereise/cli/data_sources/tests/test_demand_data.py
|
keforres/PreREISE
|
fcc111fdccc0626d3d34f1749a14035e47991043
|
[
"MIT"
] | 15
|
2021-02-08T23:28:21.000Z
|
2022-01-24T21:59:14.000Z
|
import pytest
from prereise.cli.data_sources.demand_data import DemandData
from prereise.cli.data_sources.exceptions import CommandNotSupportedError
def test_demand_data_happy_path():
with pytest.raises(CommandNotSupportedError):
DemandData().extract()
| 26.8
| 73
| 0.820896
| 31
| 268
| 6.870968
| 0.580645
| 0.112676
| 0.140845
| 0.178404
| 0.244131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11194
| 268
| 9
| 74
| 29.777778
| 0.894958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1d03a7d105cd5e0e2392e662f89e41d8e13f97f1
| 109
|
py
|
Python
|
ReverseWordOrder.py
|
AlanBubalo/Python-Practise
|
5c6abd5ec6a934399d7ad6265132a982a5a47ed2
|
[
"MIT"
] | null | null | null |
ReverseWordOrder.py
|
AlanBubalo/Python-Practise
|
5c6abd5ec6a934399d7ad6265132a982a5a47ed2
|
[
"MIT"
] | null | null | null |
ReverseWordOrder.py
|
AlanBubalo/Python-Practise
|
5c6abd5ec6a934399d7ad6265132a982a5a47ed2
|
[
"MIT"
] | null | null | null |
def reverse(s):
return " ".join((s.split())[::-1])
s = input("Enter a sentence: ")
print(reverse(s))
| 21.8
| 39
| 0.559633
| 16
| 109
| 3.8125
| 0.75
| 0.262295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011236
| 0.183486
| 109
| 5
| 40
| 21.8
| 0.674157
| 0
| 0
| 0
| 0
| 0
| 0.179245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0.25
| 0.5
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
1d05e7ce161c1d97cab4dd06726659ea0d227d24
| 92
|
py
|
Python
|
GNetLMM/pycore/mtSet/covariance/__init__.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 4
|
2016-02-25T18:40:36.000Z
|
2019-05-06T06:15:47.000Z
|
GNetLMM/pycore/mtSet/covariance/__init__.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 6
|
2016-03-29T02:55:17.000Z
|
2017-11-27T19:30:04.000Z
|
GNetLMM/pycore/mtSet/covariance/__init__.py
|
PMBio/GNetLMM
|
103d6433ff6d4a13b5787c116032fda268dc4302
|
[
"Apache-2.0"
] | 2
|
2017-05-09T05:23:50.000Z
|
2019-07-27T13:19:22.000Z
|
from covariance import covariance
from lowrank import lowrank
from freeform import freeform
| 23
| 33
| 0.869565
| 12
| 92
| 6.666667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 92
| 3
| 34
| 30.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
df2abea7d7fb4d1f497d5758d685338627bff3fd
| 155
|
py
|
Python
|
package/cloudshell/email/__init__.py
|
omri-amd/cloudshell-email
|
080ed90680f8da26e81639ad3a8e9c9624343b4a
|
[
"Apache-2.0"
] | null | null | null |
package/cloudshell/email/__init__.py
|
omri-amd/cloudshell-email
|
080ed90680f8da26e81639ad3a8e9c9624343b4a
|
[
"Apache-2.0"
] | 3
|
2020-11-24T19:03:11.000Z
|
2022-03-22T05:29:39.000Z
|
package/cloudshell/email/__init__.py
|
omri-amd/cloudshell-email
|
080ed90680f8da26e81639ad3a8e9c9624343b4a
|
[
"Apache-2.0"
] | 2
|
2020-09-17T03:28:14.000Z
|
2022-03-17T21:31:20.000Z
|
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
from .email_service import EmailService
from .email_config import EmailConfig
| 22.142857
| 42
| 0.845161
| 20
| 155
| 5.75
| 0.55
| 0.173913
| 0.243478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116129
| 155
| 6
| 43
| 25.833333
| 0.839416
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
df3e254c9b074702520ce14a698f4f9af1050e76
| 31
|
py
|
Python
|
MoodyBeatsRecommenderAPI/music_selector/__init__.py
|
labs12-music-stream-selector/DS
|
8029556547c2478a647649c89cfb834893647795
|
[
"MIT"
] | null | null | null |
MoodyBeatsRecommenderAPI/music_selector/__init__.py
|
labs12-music-stream-selector/DS
|
8029556547c2478a647649c89cfb834893647795
|
[
"MIT"
] | 19
|
2019-12-26T17:21:07.000Z
|
2022-02-17T22:21:18.000Z
|
MoodyBeatsRecommenderAPI/music_selector/__init__.py
|
labs12-music-stream-selector/DS
|
8029556547c2478a647649c89cfb834893647795
|
[
"MIT"
] | null | null | null |
# Using conda env : 'starups_2'
| 31
| 31
| 0.709677
| 5
| 31
| 4.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 0.16129
| 31
| 1
| 31
| 31
| 0.769231
| 0.935484
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
df54c91cf1ab9164eb087c904ce5a7a2f2afb1f9
| 49
|
py
|
Python
|
great_expectations/cli/__init__.py
|
victorcouste/great_expectations
|
9ee46d83feb87e13c769e2ae35b899b3f18d73a4
|
[
"Apache-2.0"
] | 6,451
|
2017-09-11T16:32:53.000Z
|
2022-03-31T23:27:49.000Z
|
great_expectations/cli/__init__.py
|
victorcouste/great_expectations
|
9ee46d83feb87e13c769e2ae35b899b3f18d73a4
|
[
"Apache-2.0"
] | 3,892
|
2017-09-08T18:57:50.000Z
|
2022-03-31T23:15:20.000Z
|
great_expectations/cli/__init__.py
|
victorcouste/great_expectations
|
9ee46d83feb87e13c769e2ae35b899b3f18d73a4
|
[
"Apache-2.0"
] | 1,023
|
2017-09-08T15:22:05.000Z
|
2022-03-31T21:17:08.000Z
|
from great_expectations.cli.cli import cli, main
| 24.5
| 48
| 0.836735
| 8
| 49
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
df65d1ea7e39acf31e3438033ff96cd097fbd222
| 1,927
|
py
|
Python
|
testproject/testapp/migrations/0001_initial.py
|
django-min/django-min-codemirror
|
ca02905cf90549044488bc76e65261ee0bb22538
|
[
"MIT"
] | null | null | null |
testproject/testapp/migrations/0001_initial.py
|
django-min/django-min-codemirror
|
ca02905cf90549044488bc76e65261ee0bb22538
|
[
"MIT"
] | null | null | null |
testproject/testapp/migrations/0001_initial.py
|
django-min/django-min-codemirror
|
ca02905cf90549044488bc76e65261ee0bb22538
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-03 14:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code_css', models.TextField(blank=True, default='')),
('code_js', models.TextField(blank=True, default='')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemTabular',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code_css', models.TextField(blank=True, default='')),
('code_js', models.TextField(blank=True, default='')),
('parent', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='tabular_item_set', to='testapp.item')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ItemStacked',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code_css', models.TextField(blank=True, default='')),
('code_js', models.TextField(blank=True, default='')),
('parent', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='stacked_item_set', to='testapp.item')),
],
options={
'abstract': False,
},
),
]
| 37.784314
| 181
| 0.554748
| 189
| 1,927
| 5.52381
| 0.306878
| 0.068966
| 0.122605
| 0.137931
| 0.782567
| 0.782567
| 0.715517
| 0.715517
| 0.638889
| 0.638889
| 0
| 0.011136
| 0.300986
| 1,927
| 50
| 182
| 38.54
| 0.76392
| 0.023352
| 0
| 0.627907
| 1
| 0
| 0.093085
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.046512
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
10c3735edce4e401aef3a0c4e1017a9f5ae9e9b9
| 85
|
py
|
Python
|
tccli/services/tat/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 47
|
2018-05-31T11:26:25.000Z
|
2022-03-08T02:12:45.000Z
|
tccli/services/tat/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 23
|
2018-06-14T10:46:30.000Z
|
2022-02-28T02:53:09.000Z
|
tccli/services/tat/__init__.py
|
zqfan/tencentcloud-cli
|
b6ad9fced2a2b340087e4e5522121d405f68b615
|
[
"Apache-2.0"
] | 22
|
2018-10-22T09:49:45.000Z
|
2022-03-30T08:06:04.000Z
|
# -*- coding: utf-8 -*-
from tccli.services.tat.tat_client import action_caller
| 21.25
| 55
| 0.694118
| 12
| 85
| 4.75
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 0.164706
| 85
| 4
| 56
| 21.25
| 0.788732
| 0.247059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
10c95c9372347acd3629f748ac3cde20314df4fc
| 56
|
py
|
Python
|
plasmapy/classes/__init__.py
|
ludoro/PlasmaPy
|
69712cb40b8b588400301edfd6925d41d2f13eac
|
[
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | 1
|
2020-04-28T23:04:41.000Z
|
2020-04-28T23:04:41.000Z
|
plasmapy/classes/__init__.py
|
ludoro/PlasmaPy
|
69712cb40b8b588400301edfd6925d41d2f13eac
|
[
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | null | null | null |
plasmapy/classes/__init__.py
|
ludoro/PlasmaPy
|
69712cb40b8b588400301edfd6925d41d2f13eac
|
[
"BSD-2-Clause-Patent",
"BSD-3-Clause"
] | null | null | null |
from .plasma import Plasma
from .species import Species
| 18.666667
| 28
| 0.821429
| 8
| 56
| 5.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 29
| 28
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
10cebb9f3a695eea48bf02ff1ead35cd864dc82e
| 56
|
py
|
Python
|
stargen/__init__.py
|
codeswhite/stargen
|
440721e9d54cb1eb830f7ece1dc6b8df731fbae8
|
[
"MIT"
] | null | null | null |
stargen/__init__.py
|
codeswhite/stargen
|
440721e9d54cb1eb830f7ece1dc6b8df731fbae8
|
[
"MIT"
] | 2
|
2021-01-14T13:00:41.000Z
|
2021-01-14T13:26:15.000Z
|
stargen/__init__.py
|
codeswhite/stargen
|
440721e9d54cb1eb830f7ece1dc6b8df731fbae8
|
[
"MIT"
] | 1
|
2020-09-28T18:16:21.000Z
|
2020-09-28T18:16:21.000Z
|
from .stargen import Stargen
from .__main__ import main
| 18.666667
| 28
| 0.821429
| 8
| 56
| 5.25
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 56
| 2
| 29
| 28
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
10ddd132c1e8b169dde49a3a9e9d2f8f0d732789
| 363
|
py
|
Python
|
tests/test_handler.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 18
|
2015-04-07T14:28:39.000Z
|
2020-02-08T14:03:38.000Z
|
tests/test_handler.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 7
|
2016-10-05T05:14:06.000Z
|
2021-05-20T02:07:22.000Z
|
tests/test_handler.py
|
donghak-shin/dp-tornado
|
095bb293661af35cce5f917d8a2228d273489496
|
[
"MIT"
] | 11
|
2015-12-15T09:49:39.000Z
|
2021-09-06T18:38:21.000Z
|
# -*- coding: utf-8 -*-
from . import utils
from . import consts
def exception_before():
utils.expecting_text('get', '/handler/exception/before', 'done', 200)
def exception_raise():
utils.expecting_text('get', '/handler/exception/raise', 'done', 200)
def exception_after():
utils.expecting_text('get', '/handler/exception/after', 'done', 200)
| 20.166667
| 73
| 0.680441
| 45
| 363
| 5.355556
| 0.4
| 0.149378
| 0.224066
| 0.261411
| 0.460581
| 0.460581
| 0
| 0
| 0
| 0
| 0
| 0.032051
| 0.140496
| 363
| 17
| 74
| 21.352941
| 0.740385
| 0.057851
| 0
| 0
| 0
| 0
| 0.276471
| 0.214706
| 0
| 0
| 0
| 0
| 0
| 1
| 0.375
| true
| 0
| 0.25
| 0
| 0.625
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
10f65ba13b486094d8db44b10d4522608b61f4de
| 1,430
|
py
|
Python
|
src/rapidpro_community_portal/apps/portal_pages/migrations/0003_focusarea_organization_techfirm.py
|
rapidpro/rapidpro-community-portal
|
db86e757a24888bebc4d30f451189a2b743396da
|
[
"Apache-2.0"
] | 19
|
2015-09-15T09:17:54.000Z
|
2021-07-13T06:09:49.000Z
|
src/rapidpro_community_portal/apps/portal_pages/migrations/0003_focusarea_organization_techfirm.py
|
rapidpro/rapidpro-community-portal
|
db86e757a24888bebc4d30f451189a2b743396da
|
[
"Apache-2.0"
] | 222
|
2015-03-13T15:52:20.000Z
|
2021-04-08T19:18:41.000Z
|
src/rapidpro_community_portal/apps/portal_pages/migrations/0003_focusarea_organization_techfirm.py
|
rapidpro/rapidpro-community-portal
|
db86e757a24888bebc4d30f451189a2b743396da
|
[
"Apache-2.0"
] | 11
|
2016-03-01T19:56:52.000Z
|
2021-07-04T22:42:14.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portal_pages', '0002_country'),
]
operations = [
migrations.CreateModel(
name='FocusArea',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TechFirm',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'ordering': ('name',),
},
bases=(models.Model,),
),
]
| 29.791667
| 114
| 0.496503
| 117
| 1,430
| 5.905983
| 0.376068
| 0.091172
| 0.108538
| 0.099855
| 0.701881
| 0.701881
| 0.701881
| 0.701881
| 0.701881
| 0.701881
| 0
| 0.015284
| 0.359441
| 1,430
| 47
| 115
| 30.425532
| 0.739083
| 0.014685
| 0
| 0.658537
| 0
| 0
| 0.080313
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04878
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
80242a77bf8d95fa6ace844b6291579b82535f47
| 80
|
py
|
Python
|
pymatflow/base/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/base/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/base/__init__.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
from .element import element
from .xyz import BaseXyz
from .atom import Atom
| 20
| 29
| 0.775
| 12
| 80
| 5.166667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1875
| 80
| 3
| 30
| 26.666667
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
803cb6a40fe8285034535015db3ebad846f0df32
| 77
|
py
|
Python
|
problems 001 - 020/020_factorial_digit_sum.py
|
max-97/ProjectEuler
|
5eab8b2e199f3253696c4f671b395f2f2773d7f1
|
[
"MIT"
] | null | null | null |
problems 001 - 020/020_factorial_digit_sum.py
|
max-97/ProjectEuler
|
5eab8b2e199f3253696c4f671b395f2f2773d7f1
|
[
"MIT"
] | null | null | null |
problems 001 - 020/020_factorial_digit_sum.py
|
max-97/ProjectEuler
|
5eab8b2e199f3253696c4f671b395f2f2773d7f1
|
[
"MIT"
] | null | null | null |
from math import factorial
print(sum(int(x) for x in str(factorial(100))))
| 15.4
| 47
| 0.727273
| 14
| 77
| 4
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.142857
| 77
| 4
| 48
| 19.25
| 0.80303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
33796bac1279b80f1e8ef00df1276e9b2d28c2a9
| 49
|
py
|
Python
|
zhaquirks/ecolink/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 213
|
2020-04-16T10:48:31.000Z
|
2022-03-30T20:48:07.000Z
|
zhaquirks/ecolink/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 1,088
|
2020-04-03T13:23:29.000Z
|
2022-03-31T23:55:03.000Z
|
zhaquirks/ecolink/__init__.py
|
WolfRevo/zha-device-handlers
|
0fa4ca1c03c611be0cf2c38c4fec2a197e3dd1d3
|
[
"Apache-2.0"
] | 280
|
2020-04-24T08:44:27.000Z
|
2022-03-31T12:58:04.000Z
|
"""Module for Ecolink quirks implementations."""
| 24.5
| 48
| 0.755102
| 5
| 49
| 7.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 49
| 1
| 49
| 49
| 0.840909
| 0.857143
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
33877b2268c3be0d6adb8bf88e3179433f42b7fa
| 148
|
py
|
Python
|
stk/api/topology/__init__.py
|
sayerhs/pystk
|
e211a13b45929b8bfbfe891532ea19990a19d324
|
[
"Apache-2.0"
] | null | null | null |
stk/api/topology/__init__.py
|
sayerhs/pystk
|
e211a13b45929b8bfbfe891532ea19990a19d324
|
[
"Apache-2.0"
] | null | null | null |
stk/api/topology/__init__.py
|
sayerhs/pystk
|
e211a13b45929b8bfbfe891532ea19990a19d324
|
[
"Apache-2.0"
] | 1
|
2021-04-28T20:10:54.000Z
|
2021-04-28T20:10:54.000Z
|
# -*- coding: utf-8 -*-
"""\
stk_topology python bindings
============================
"""
from .topology import rank_t, topology_t, StkTopology
| 14.8
| 53
| 0.547297
| 15
| 148
| 5.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007813
| 0.135135
| 148
| 9
| 54
| 16.444444
| 0.601563
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
33b3388b34f827e4c39221761eef4bf2d89c9868
| 281
|
py
|
Python
|
rl_credit/__init__.py
|
bricewang/rl-credit
|
bd201b06617a060b9306acc9710c9dfa5002ead2
|
[
"MIT"
] | null | null | null |
rl_credit/__init__.py
|
bricewang/rl-credit
|
bd201b06617a060b9306acc9710c9dfa5002ead2
|
[
"MIT"
] | null | null | null |
rl_credit/__init__.py
|
bricewang/rl-credit
|
bd201b06617a060b9306acc9710c9dfa5002ead2
|
[
"MIT"
] | null | null | null |
from rl_credit.algos import A2CAlgo, PPOAlgo, HCAReturns, HCAState, AttentionAlgo, AttentionQAlgo
from rl_credit.model import ACModel, RecurrentACModel, ACModelVanilla, ACModelReturnHCA, ACAttention, AttentionQ
from rl_credit.utils import DictList
from rl_credit.examples import *
| 56.2
| 112
| 0.854093
| 33
| 281
| 7.151515
| 0.636364
| 0.101695
| 0.20339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003922
| 0.092527
| 281
| 4
| 113
| 70.25
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
33cf22fc98a57d02a2eb5f3cc56565337c005d64
| 18
|
py
|
Python
|
first.py
|
yan16032/car
|
dcb05df25dac5aee3608d3b0268fe5474797bef4
|
[
"Apache-2.0"
] | null | null | null |
first.py
|
yan16032/car
|
dcb05df25dac5aee3608d3b0268fe5474797bef4
|
[
"Apache-2.0"
] | null | null | null |
first.py
|
yan16032/car
|
dcb05df25dac5aee3608d3b0268fe5474797bef4
|
[
"Apache-2.0"
] | 1
|
2019-01-19T07:11:04.000Z
|
2019-01-19T07:11:04.000Z
|
print('I am good')
| 18
| 18
| 0.666667
| 4
| 18
| 3
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 18
| 1
| 18
| 18
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
33e0bb0466b0782ef99dfe7d77597d4ae927259a
| 68
|
py
|
Python
|
src/striga/service/threadmonitor/__init__.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
src/striga/service/threadmonitor/__init__.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
src/striga/service/threadmonitor/__init__.py
|
ateska/striga
|
451b5d9421e2e5fdf49b94c8f3d76e576abc5923
|
[
"MIT"
] | null | null | null |
#Interface
from ._stsvstm_threadmonitor import ThreadMonitorService
| 22.666667
| 56
| 0.897059
| 6
| 68
| 9.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 68
| 2
| 57
| 34
| 0.936508
| 0.132353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1d1620071a1b3ab34a5e73769c1c6a21d8d2bf90
| 78
|
py
|
Python
|
ioflo/aio/http/__init__.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 128
|
2015-01-14T12:26:56.000Z
|
2021-11-06T07:09:29.000Z
|
ioflo/aio/http/__init__.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 17
|
2015-01-28T18:26:50.000Z
|
2020-11-19T22:08:06.000Z
|
ioflo/aio/http/__init__.py
|
BradyHammond/ioflo
|
177ac656d7c4ff801aebb0d8b401db365a5248ce
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2015-01-27T23:28:31.000Z
|
2021-05-04T16:37:30.000Z
|
"""
http package
"""
from .clienting import Patron
from .serving import Valet
| 13
| 29
| 0.74359
| 10
| 78
| 5.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 5
| 30
| 15.6
| 0.878788
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1d538f5f9675bee5741a1d8517b3b2fa1eb0fa7e
| 60
|
py
|
Python
|
tripHome/tests.py
|
TripSage/TripSage
|
e69bb6e195cc60b7f072bbe1d496ad6f894caa43
|
[
"Apache-2.0"
] | 1
|
2020-10-04T04:25:57.000Z
|
2020-10-04T04:25:57.000Z
|
tripHome/tests.py
|
akashsrikanth2310/TripSage
|
999e4cf5019930567b3ecd893529984d8c577669
|
[
"Apache-2.0"
] | 46
|
2020-09-30T01:34:37.000Z
|
2020-10-25T22:36:24.000Z
|
tripHome/tests.py
|
Amoghrd/TripSage
|
7b0c1ad1485581f689078a8f4f566b89f4d5b364
|
[
"Apache-2.0"
] | 9
|
2020-09-19T02:30:07.000Z
|
2020-12-04T06:56:40.000Z
|
"""
auto generated test file
"""
# Create your tests here.
| 10
| 25
| 0.666667
| 8
| 60
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 60
| 5
| 26
| 12
| 0.833333
| 0.816667
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d55d6997e3ca1f61219b19282494f037afcd5105
| 96
|
py
|
Python
|
watchmen/database/storage/exception/exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen-data-processor
|
d50b93e92868500552416997707d71720487bd77
|
[
"MIT"
] | null | null | null |
watchmen/database/storage/exception/exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen-data-processor
|
d50b93e92868500552416997707d71720487bd77
|
[
"MIT"
] | null | null | null |
watchmen/database/storage/exception/exception.py
|
Indexical-Metrics-Measure-Advisory/watchmen-data-processor
|
d50b93e92868500552416997707d71720487bd77
|
[
"MIT"
] | null | null | null |
class InsertConflictError(Exception):
pass
class OptimisticLockError(Exception):
pass
| 13.714286
| 37
| 0.770833
| 8
| 96
| 9.25
| 0.625
| 0.351351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 96
| 6
| 38
| 16
| 0.925
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
635323d7860d3449fb8cdba4f4a84202a88ec868
| 141
|
py
|
Python
|
testserver.py
|
AricHasting/senior-software
|
0424cd9aa94533ef8ba58a2f70e279761028f96e
|
[
"MIT"
] | null | null | null |
testserver.py
|
AricHasting/senior-software
|
0424cd9aa94533ef8ba58a2f70e279761028f96e
|
[
"MIT"
] | 7
|
2018-09-02T23:42:43.000Z
|
2018-11-08T22:14:28.000Z
|
testserver.py
|
AricHasting/senior-software
|
0424cd9aa94533ef8ba58a2f70e279761028f96e
|
[
"MIT"
] | 4
|
2018-08-30T01:12:11.000Z
|
2018-09-11T17:44:57.000Z
|
#!/usr/bin/env python3
import server
#server.startserver("10.30.147.18", 8080)
server.startserver("127.0.0.1", 8080)
print("Server started")
| 23.5
| 41
| 0.730496
| 23
| 141
| 4.478261
| 0.73913
| 0.330097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183206
| 0.070922
| 141
| 6
| 42
| 23.5
| 0.603053
| 0.432624
| 0
| 0
| 0
| 0
| 0.291139
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
638baf63c4453db05fb42671d34a9a8856c0f44b
| 38
|
py
|
Python
|
slides/20160516-Python_Loop_CommonFunctions/while_loop_3.py
|
she02789222/test
|
fa10bd5be351ca3d4bef4f7d6c4510e65666de7c
|
[
"MIT"
] | 4
|
2018-11-29T04:06:29.000Z
|
2021-11-29T07:00:44.000Z
|
slides/20160516-Python_Loop_CommonFunctions/while_loop_3.py
|
NTNUCIC/108
|
52961e76d299842c2d44d142d5c56ad665420ee6
|
[
"MIT"
] | 6
|
2016-05-17T02:34:57.000Z
|
2021-02-05T17:33:28.000Z
|
slides/20160516-Python_Loop_CommonFunctions/while_loop_3.py
|
NTNUCIC/108
|
52961e76d299842c2d44d142d5c56ad665420ee6
|
[
"MIT"
] | 3
|
2019-02-17T05:58:46.000Z
|
2019-02-18T15:09:55.000Z
|
while True:
print('hi~')
print('End')
| 12.666667
| 13
| 0.631579
| 6
| 38
| 4
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 38
| 3
| 14
| 12.666667
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
63cccb63a09a7507459f432fd9d7d549d6c439ea
| 164
|
py
|
Python
|
iotbx/xplor/ext.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-03-18T12:31:57.000Z
|
2022-03-14T06:27:06.000Z
|
iotbx/xplor/ext.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
iotbx/xplor/ext.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-03-26T12:52:30.000Z
|
2021-03-26T12:52:30.000Z
|
from __future__ import absolute_import, division, print_function
import boost.python
ext = boost.python.import_ext("iotbx_xplor_ext")
from iotbx_xplor_ext import *
| 32.8
| 64
| 0.841463
| 24
| 164
| 5.291667
| 0.5
| 0.173228
| 0.204724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091463
| 164
| 4
| 65
| 41
| 0.852349
| 0
| 0
| 0
| 0
| 0
| 0.091463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
63d23cb35444a8a83ae652bd176c80fc385f8772
| 764
|
py
|
Python
|
src/GridCal/Engine/Simulations/OPF/__init__.py
|
mzy2240/GridCal
|
0352f0e9ce09a9c037722bf2f2afc0a31ccd2880
|
[
"BSD-3-Clause"
] | 284
|
2016-01-31T03:20:44.000Z
|
2022-03-17T21:16:52.000Z
|
src/GridCal/Engine/Simulations/OPF/__init__.py
|
mzy2240/GridCal
|
0352f0e9ce09a9c037722bf2f2afc0a31ccd2880
|
[
"BSD-3-Clause"
] | 94
|
2016-01-14T13:37:40.000Z
|
2022-03-28T03:13:56.000Z
|
src/GridCal/Engine/Simulations/OPF/__init__.py
|
mzy2240/GridCal
|
0352f0e9ce09a9c037722bf2f2afc0a31ccd2880
|
[
"BSD-3-Clause"
] | 84
|
2016-03-29T10:43:04.000Z
|
2022-02-22T16:26:55.000Z
|
from GridCal.Engine.Simulations.OPF.dc_opf import OpfDc
from GridCal.Engine.Simulations.OPF.dc_opf_ts import OpfDcTimeSeries
from GridCal.Engine.Simulations.OPF.ac_opf import OpfAc
from GridCal.Engine.Simulations.OPF.ac_opf_ts import OpfAcTimeSeries
from GridCal.Engine.Simulations.OPF.opf_results import OptimalPowerFlowResults
from GridCal.Engine.Simulations.OPF.opf_ts_results import OptimalPowerFlowTimeSeriesResults
from GridCal.Engine.Simulations.OPF.opf_driver import OptimalPowerFlow, OpfSimple, OptimalPowerFlowOptions
from GridCal.Engine.Simulations.OPF.opf_ntc_driver import OptimalNetTransferCapacity, OptimalNetTransferCapacityOptions, OpfNTC
from GridCal.Engine.Simulations.OPF.opf_ts_driver import OptimalPowerFlowTimeSeries, OpfSimpleTimeSeries
| 58.769231
| 127
| 0.888743
| 91
| 764
| 7.307692
| 0.285714
| 0.148872
| 0.230075
| 0.378947
| 0.478195
| 0.478195
| 0.324812
| 0
| 0
| 0
| 0
| 0
| 0.057592
| 764
| 12
| 128
| 63.666667
| 0.923611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8929be8724a303fce1d936868e817f6d7c11feb4
| 337
|
py
|
Python
|
sac/__init__.py
|
sandipan1/robo_rl
|
3bcb7caabeba71dd747fadf2355ac42408b7f340
|
[
"MIT"
] | 5
|
2018-10-16T03:48:02.000Z
|
2021-10-01T08:58:05.000Z
|
sac/__init__.py
|
sandipan1/robo_rl
|
3bcb7caabeba71dd747fadf2355ac42408b7f340
|
[
"MIT"
] | 1
|
2018-10-17T16:19:14.000Z
|
2018-10-31T06:19:30.000Z
|
sac/__init__.py
|
sandipan1/robo_rl
|
3bcb7caabeba71dd747fadf2355ac42408b7f340
|
[
"MIT"
] | null | null | null |
from robo_rl.sac.gaussian_policy import GaussianPolicy
from robo_rl.sac.categorical_policy import LinearCategoricalPolicy
from robo_rl.sac.softactorcritic import SAC
from robo_rl.sac.squasher import Squasher, SigmoidSquasher, TanhSquasher, NoSquasher, GAAFTanhSquasher
from robo_rl.sac.sac_parser import get_sac_parser, get_logfile_name
| 56.166667
| 102
| 0.881306
| 47
| 337
| 6.06383
| 0.425532
| 0.140351
| 0.175439
| 0.22807
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074184
| 337
| 5
| 103
| 67.4
| 0.913462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
895c573940a2f8929a3044450f7e859759623835
| 433
|
py
|
Python
|
pywinauto/unittests/test_architecture.py
|
eltimen/pywinauto
|
7235e6f83edfd96a7aeb8bbf9fef7b8f3d912512
|
[
"BSD-3-Clause"
] | 3,544
|
2015-05-25T17:06:12.000Z
|
2022-03-31T18:44:09.000Z
|
pywinauto/unittests/test_architecture.py
|
iabhi009/pywinauto
|
127322e7257f451d6c360db732b8e6ff8df9662e
|
[
"BSD-3-Clause"
] | 1,128
|
2015-05-21T10:17:34.000Z
|
2022-03-28T15:59:49.000Z
|
pywinauto/unittests/test_architecture.py
|
airelil/pywinauto
|
187b84de20f7980d4f5cff4abdb3bbff17cc049e
|
[
"BSD-3-Clause"
] | 719
|
2015-05-26T20:20:02.000Z
|
2022-03-31T08:11:53.000Z
|
import unittest
class PublicImportsTests(unittest.TestCase):
def test_top_level_imports(self):
from pywinauto import ElementNotFoundError, ElementAmbiguousError, WindowNotFoundError, WindowAmbiguousError
self.assertEqual(len(set([ElementNotFoundError, ElementAmbiguousError, WindowNotFoundError, WindowAmbiguousError])),
4)
if __name__ == "__main__":
unittest.main()
| 30.928571
| 124
| 0.722864
| 33
| 433
| 9.151515
| 0.727273
| 0.271523
| 0.397351
| 0.529801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002907
| 0.205543
| 433
| 13
| 125
| 33.307692
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0.018476
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
895cac2f499d7b75653824e2a0398594a6560b29
| 138
|
py
|
Python
|
django_gotolong/broker/icidir/itxn/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 15
|
2019-12-06T16:19:45.000Z
|
2021-08-20T13:22:22.000Z
|
django_gotolong/broker/icidir/itxn/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 14
|
2020-12-08T10:45:05.000Z
|
2021-09-21T17:23:45.000Z
|
django_gotolong/broker/icidir/itxn/admin.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 9
|
2020-01-01T03:04:29.000Z
|
2021-04-18T08:42:30.000Z
|
from django.contrib import admin
# Register your models here.
from .models import BrokerIcidirTxn
admin.site.register(BrokerIcidirTxn)
| 17.25
| 36
| 0.818841
| 17
| 138
| 6.647059
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.123188
| 138
| 7
| 37
| 19.714286
| 0.933884
| 0.188406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
897972b8dfa92c0e82fc4dcf016570439b38485a
| 115
|
py
|
Python
|
example/steps/__init__.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
example/steps/__init__.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
example/steps/__init__.py
|
m-martinez/veripy
|
993bb498e4cdac44d76284a624d306aaf2e2215a
|
[
"MIT"
] | null | null | null |
# Behave requires a steps directory which implements domain-specific sentences.
from veripy.steps import * # noqa
| 38.333333
| 79
| 0.8
| 15
| 115
| 6.133333
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147826
| 115
| 2
| 80
| 57.5
| 0.938776
| 0.713043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
982bdb63af74b7627a58b2997f0227f12906d9b3
| 158
|
py
|
Python
|
supbot/__init__.py
|
adsau59/Supbot2
|
8f8dd4bf822b42975ffe659a388b54889c500ff1
|
[
"MIT"
] | 18
|
2020-02-06T19:24:51.000Z
|
2022-02-04T12:20:49.000Z
|
supbot/__init__.py
|
adsau59/Supbot2
|
8f8dd4bf822b42975ffe659a388b54889c500ff1
|
[
"MIT"
] | 6
|
2020-06-30T14:24:00.000Z
|
2021-07-06T19:53:31.000Z
|
supbot/__init__.py
|
adsau59/Supbot2
|
8f8dd4bf822b42975ffe659a388b54889c500ff1
|
[
"MIT"
] | 5
|
2020-02-13T23:31:27.000Z
|
2021-12-10T04:54:58.000Z
|
"""
Allows developers to import Supbot to interface with the module
"""
from supbot.api import Supbot
from supbot.__main__ import main
__version__ = "0.2.9"
| 19.75
| 63
| 0.765823
| 24
| 158
| 4.708333
| 0.666667
| 0.212389
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022388
| 0.151899
| 158
| 7
| 64
| 22.571429
| 0.820896
| 0.398734
| 0
| 0
| 0
| 0
| 0.057471
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
9831265cf9e35a4be62cd3ba8423d42e1515871f
| 10,930
|
py
|
Python
|
notebook/pandas_time_series_freq.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/pandas_time_series_freq.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/pandas_time_series_freq.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import pandas as pd
print(pd.date_range('2018-01-01', '2018-12-31', freq='M'))
# DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
# '2018-05-31', '2018-06-30', '2018-07-31', '2018-08-31',
# '2018-09-30', '2018-10-31', '2018-11-30', '2018-12-31'],
# dtype='datetime64[ns]', freq='M')
print(pd.date_range('2018-01-01', '2018-12-31', freq='MS'))
# DatetimeIndex(['2018-01-01', '2018-02-01', '2018-03-01', '2018-04-01',
# '2018-05-01', '2018-06-01', '2018-07-01', '2018-08-01',
# '2018-09-01', '2018-10-01', '2018-11-01', '2018-12-01'],
# dtype='datetime64[ns]', freq='MS')
print(pd.date_range('2018-01-01', '2018-12-31', freq='BMS'))
# DatetimeIndex(['2018-01-01', '2018-02-01', '2018-03-01', '2018-04-02',
# '2018-05-01', '2018-06-01', '2018-07-02', '2018-08-01',
# '2018-09-03', '2018-10-01', '2018-11-01', '2018-12-03'],
# dtype='datetime64[ns]', freq='BMS')
print(pd.date_range('2018-01-01', '2018-12-31', freq='SM'))
# DatetimeIndex(['2018-01-15', '2018-01-31', '2018-02-15', '2018-02-28',
# '2018-03-15', '2018-03-31', '2018-04-15', '2018-04-30',
# '2018-05-15', '2018-05-31', '2018-06-15', '2018-06-30',
# '2018-07-15', '2018-07-31', '2018-08-15', '2018-08-31',
# '2018-09-15', '2018-09-30', '2018-10-15', '2018-10-31',
# '2018-11-15', '2018-11-30', '2018-12-15', '2018-12-31'],
# dtype='datetime64[ns]', freq='SM-15')
print(pd.date_range('2018-01-01', '2018-12-31', freq='SMS'))
# DatetimeIndex(['2018-01-01', '2018-01-15', '2018-02-01', '2018-02-15',
# '2018-03-01', '2018-03-15', '2018-04-01', '2018-04-15',
# '2018-05-01', '2018-05-15', '2018-06-01', '2018-06-15',
# '2018-07-01', '2018-07-15', '2018-08-01', '2018-08-15',
# '2018-09-01', '2018-09-15', '2018-10-01', '2018-10-15',
# '2018-11-01', '2018-11-15', '2018-12-01', '2018-12-15'],
# dtype='datetime64[ns]', freq='SMS-15')
print(pd.date_range('2018-01-01', '2018-12-31', freq='W'))
# DatetimeIndex(['2018-01-07', '2018-01-14', '2018-01-21', '2018-01-28',
# '2018-02-04', '2018-02-11', '2018-02-18', '2018-02-25',
# '2018-03-04', '2018-03-11', '2018-03-18', '2018-03-25',
# '2018-04-01', '2018-04-08', '2018-04-15', '2018-04-22',
# '2018-04-29', '2018-05-06', '2018-05-13', '2018-05-20',
# '2018-05-27', '2018-06-03', '2018-06-10', '2018-06-17',
# '2018-06-24', '2018-07-01', '2018-07-08', '2018-07-15',
# '2018-07-22', '2018-07-29', '2018-08-05', '2018-08-12',
# '2018-08-19', '2018-08-26', '2018-09-02', '2018-09-09',
# '2018-09-16', '2018-09-23', '2018-09-30', '2018-10-07',
# '2018-10-14', '2018-10-21', '2018-10-28', '2018-11-04',
# '2018-11-11', '2018-11-18', '2018-11-25', '2018-12-02',
# '2018-12-09', '2018-12-16', '2018-12-23', '2018-12-30'],
# dtype='datetime64[ns]', freq='W-SUN')
print(pd.date_range('2018-01-01', '2018-12-31', freq='W-WED'))
# DatetimeIndex(['2018-01-03', '2018-01-10', '2018-01-17', '2018-01-24',
# '2018-01-31', '2018-02-07', '2018-02-14', '2018-02-21',
# '2018-02-28', '2018-03-07', '2018-03-14', '2018-03-21',
# '2018-03-28', '2018-04-04', '2018-04-11', '2018-04-18',
# '2018-04-25', '2018-05-02', '2018-05-09', '2018-05-16',
# '2018-05-23', '2018-05-30', '2018-06-06', '2018-06-13',
# '2018-06-20', '2018-06-27', '2018-07-04', '2018-07-11',
# '2018-07-18', '2018-07-25', '2018-08-01', '2018-08-08',
# '2018-08-15', '2018-08-22', '2018-08-29', '2018-09-05',
# '2018-09-12', '2018-09-19', '2018-09-26', '2018-10-03',
# '2018-10-10', '2018-10-17', '2018-10-24', '2018-10-31',
# '2018-11-07', '2018-11-14', '2018-11-21', '2018-11-28',
# '2018-12-05', '2018-12-12', '2018-12-19', '2018-12-26'],
# dtype='datetime64[ns]', freq='W-WED')
print(pd.date_range('2018-01-01', '2018-12-31', freq='QS'))
# DatetimeIndex(['2018-01-01', '2018-04-01', '2018-07-01', '2018-10-01'], dtype='datetime64[ns]', freq='QS-JAN')
print(pd.date_range('2018-01-01', '2018-12-31', freq='QS-FEB'))
# DatetimeIndex(['2018-02-01', '2018-05-01', '2018-08-01', '2018-11-01'], dtype='datetime64[ns]', freq='QS-FEB')
print(pd.date_range('2015-01-01', '2018-12-31', freq='A'))
# DatetimeIndex(['2015-12-31', '2016-12-31', '2017-12-31', '2018-12-31'], dtype='datetime64[ns]', freq='A-DEC')
print(pd.date_range('2015-01-01', '2018-12-31', freq='A-JUL'))
# DatetimeIndex(['2015-07-31', '2016-07-31', '2017-07-31', '2018-07-31'], dtype='datetime64[ns]', freq='A-JUL')
print(pd.date_range('2018-01-01', '2018-12-31', freq='WOM-4FRI'))
# DatetimeIndex(['2018-01-26', '2018-02-23', '2018-03-23', '2018-04-27',
# '2018-05-25', '2018-06-22', '2018-07-27', '2018-08-24',
# '2018-09-28', '2018-10-26', '2018-11-23', '2018-12-28'],
# dtype='datetime64[ns]', freq='WOM-4FRI')
print(pd.date_range('2018-01-01', '2018-12-31', freq='WOM-2MON'))
# DatetimeIndex(['2018-01-08', '2018-02-12', '2018-03-12', '2018-04-09',
# '2018-05-14', '2018-06-11', '2018-07-09', '2018-08-13',
# '2018-09-10', '2018-10-08', '2018-11-12', '2018-12-10'],
# dtype='datetime64[ns]', freq='WOM-2MON')
print(pd.date_range('2018-01-01', '2018-01-02', freq='H'))
# DatetimeIndex(['2018-01-01 00:00:00', '2018-01-01 01:00:00',
# '2018-01-01 02:00:00', '2018-01-01 03:00:00',
# '2018-01-01 04:00:00', '2018-01-01 05:00:00',
# '2018-01-01 06:00:00', '2018-01-01 07:00:00',
# '2018-01-01 08:00:00', '2018-01-01 09:00:00',
# '2018-01-01 10:00:00', '2018-01-01 11:00:00',
# '2018-01-01 12:00:00', '2018-01-01 13:00:00',
# '2018-01-01 14:00:00', '2018-01-01 15:00:00',
# '2018-01-01 16:00:00', '2018-01-01 17:00:00',
# '2018-01-01 18:00:00', '2018-01-01 19:00:00',
# '2018-01-01 20:00:00', '2018-01-01 21:00:00',
# '2018-01-01 22:00:00', '2018-01-01 23:00:00',
# '2018-01-02 00:00:00'],
# dtype='datetime64[ns]', freq='H')
print(pd.date_range('2018-01-01', '2018-12-31', freq='100D'))
# DatetimeIndex(['2018-01-01', '2018-04-11', '2018-07-20', '2018-10-28'], dtype='datetime64[ns]', freq='100D')
print(pd.date_range('2018-01-01', '2018-12-31', freq='100B'))
# DatetimeIndex(['2018-01-01', '2018-05-21', '2018-10-08'], dtype='datetime64[ns]', freq='100B')
print(pd.date_range('2018-01-01', '2018-12-31', freq='10W'))
# DatetimeIndex(['2018-01-07', '2018-03-18', '2018-05-27', '2018-08-05',
# '2018-10-14', '2018-12-23'],
# dtype='datetime64[ns]', freq='10W-SUN')
print(pd.date_range('2018-01-01', '2018-12-31', freq='10W-WED'))
# DatetimeIndex(['2018-01-03', '2018-03-14', '2018-05-23', '2018-08-01',
# '2018-10-10', '2018-12-19'],
# dtype='datetime64[ns]', freq='10W-WED')
print(pd.date_range('2018-01-01', '2018-12-31', freq='2M'))
# DatetimeIndex(['2018-01-31', '2018-03-31', '2018-05-31', '2018-07-31',
# '2018-09-30', '2018-11-30'],
# dtype='datetime64[ns]', freq='2M')
print(pd.date_range('2018-01-01', '2018-01-02', freq='90T'))
# DatetimeIndex(['2018-01-01 00:00:00', '2018-01-01 01:30:00',
# '2018-01-01 03:00:00', '2018-01-01 04:30:00',
# '2018-01-01 06:00:00', '2018-01-01 07:30:00',
# '2018-01-01 09:00:00', '2018-01-01 10:30:00',
# '2018-01-01 12:00:00', '2018-01-01 13:30:00',
# '2018-01-01 15:00:00', '2018-01-01 16:30:00',
# '2018-01-01 18:00:00', '2018-01-01 19:30:00',
# '2018-01-01 21:00:00', '2018-01-01 22:30:00',
# '2018-01-02 00:00:00'],
# dtype='datetime64[ns]', freq='90T')
print(pd.date_range('2018-01-01', '2018-01-10', freq='36H'))
# DatetimeIndex(['2018-01-01 00:00:00', '2018-01-02 12:00:00',
# '2018-01-04 00:00:00', '2018-01-05 12:00:00',
# '2018-01-07 00:00:00', '2018-01-08 12:00:00',
# '2018-01-10 00:00:00'],
# dtype='datetime64[ns]', freq='36H')
print(pd.date_range('2018-01-01', '2018-01-10', freq='1D12H'))
# DatetimeIndex(['2018-01-01 00:00:00', '2018-01-02 12:00:00',
# '2018-01-04 00:00:00', '2018-01-05 12:00:00',
# '2018-01-07 00:00:00', '2018-01-08 12:00:00',
# '2018-01-10 00:00:00'],
# dtype='datetime64[ns]', freq='36H')
print(pd.date_range('2018-01-01', '2018-01-2', freq='30min30S100ms100us'))
# DatetimeIndex([ '2018-01-01 00:00:00', '2018-01-01 00:30:30.100100',
# '2018-01-01 01:01:00.200200', '2018-01-01 01:31:30.300300',
# '2018-01-01 02:02:00.400400', '2018-01-01 02:32:30.500500',
# '2018-01-01 03:03:00.600600', '2018-01-01 03:33:30.700700',
# '2018-01-01 04:04:00.800800', '2018-01-01 04:34:30.900900',
# '2018-01-01 05:05:01.001000', '2018-01-01 05:35:31.101100',
# '2018-01-01 06:06:01.201200', '2018-01-01 06:36:31.301300',
# '2018-01-01 07:07:01.401400', '2018-01-01 07:37:31.501500',
# '2018-01-01 08:08:01.601600', '2018-01-01 08:38:31.701700',
# '2018-01-01 09:09:01.801800', '2018-01-01 09:39:31.901900',
# '2018-01-01 10:10:02.002000', '2018-01-01 10:40:32.102100',
# '2018-01-01 11:11:02.202200', '2018-01-01 11:41:32.302300',
# '2018-01-01 12:12:02.402400', '2018-01-01 12:42:32.502500',
# '2018-01-01 13:13:02.602600', '2018-01-01 13:43:32.702700',
# '2018-01-01 14:14:02.802800', '2018-01-01 14:44:32.902900',
# '2018-01-01 15:15:03.003000', '2018-01-01 15:45:33.103100',
# '2018-01-01 16:16:03.203200', '2018-01-01 16:46:33.303300',
# '2018-01-01 17:17:03.403400', '2018-01-01 17:47:33.503500',
# '2018-01-01 18:18:03.603600', '2018-01-01 18:48:33.703700',
# '2018-01-01 19:19:03.803800', '2018-01-01 19:49:33.903900',
# '2018-01-01 20:20:04.004000', '2018-01-01 20:50:34.104100',
# '2018-01-01 21:21:04.204200', '2018-01-01 21:51:34.304300',
# '2018-01-01 22:22:04.404400', '2018-01-01 22:52:34.504500',
# '2018-01-01 23:23:04.604600', '2018-01-01 23:53:34.704700'],
# dtype='datetime64[ns]', freq='1830100100U')
| 59.402174
| 112
| 0.51226
| 1,866
| 10,930
| 2.98821
| 0.077706
| 0.16571
| 0.167862
| 0.080703
| 0.558106
| 0.38038
| 0.347202
| 0.3316
| 0.315818
| 0.315818
| 0
| 0.498237
| 0.221775
| 10,930
| 183
| 113
| 59.726776
| 0.157301
| 0.840256
| 0
| 0
| 0
| 0
| 0.345556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.041667
| 0
| 0.041667
| 0.958333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
9845560aef9d64957c427aaec43ab3315a4c77b9
| 232
|
py
|
Python
|
Chapter06/class_ex_2.py
|
PacktPublishing/Learning-Python-by-building-games
|
0713e6fc141b2cd201128560ae0c3b689b7d2116
|
[
"MIT"
] | 25
|
2019-09-01T16:19:16.000Z
|
2021-12-20T07:08:35.000Z
|
Chapter06/class_ex_2.py
|
PacktPublishing/Learning-Python-by-building-games.
|
0713e6fc141b2cd201128560ae0c3b689b7d2116
|
[
"MIT"
] | 4
|
2019-08-27T19:45:48.000Z
|
2020-07-24T12:29:56.000Z
|
Chapter06/class_ex_2.py
|
PacktPublishing/Learning-Python-by-building-games
|
0713e6fc141b2cd201128560ae0c3b689b7d2116
|
[
"MIT"
] | 24
|
2019-06-01T18:31:07.000Z
|
2022-03-15T19:24:34.000Z
|
class Bike:
name = ''
color= ' '
price = 0
def info(self, name, color, price):
self.name, self.color, self.price = name,color,price
print("{}: {} and {}".format(self.name,self.color,self.price))
| 25.777778
| 70
| 0.556034
| 30
| 232
| 4.3
| 0.4
| 0.209302
| 0.325581
| 0.263566
| 0.403101
| 0.403101
| 0
| 0
| 0
| 0
| 0
| 0.005882
| 0.267241
| 232
| 8
| 71
| 29
| 0.752941
| 0
| 0
| 0
| 0
| 0
| 0.060345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.714286
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
984a79e49a46aec9e5322e72d92d8c0b29a6cbd8
| 239
|
py
|
Python
|
verification/flopy/mf6/utils/__init__.py
|
INTERA-Inc/mf6cts
|
13967af777e88b112b1a9026b35841c322d34bf4
|
[
"Unlicense"
] | 351
|
2015-01-03T15:18:48.000Z
|
2022-03-31T09:46:43.000Z
|
verification/flopy/mf6/utils/__init__.py
|
INTERA-Inc/mf6cts
|
13967af777e88b112b1a9026b35841c322d34bf4
|
[
"Unlicense"
] | 1,256
|
2015-01-15T21:10:42.000Z
|
2022-03-31T22:43:06.000Z
|
verification/flopy/mf6/utils/__init__.py
|
INTERA-Inc/mf6cts
|
13967af777e88b112b1a9026b35841c322d34bf4
|
[
"Unlicense"
] | 553
|
2015-01-31T22:46:48.000Z
|
2022-03-31T17:43:35.000Z
|
# imports
from . import createpackages
from .generate_classes import generate_classes
from .binarygrid_util import MfGrdFile
from .postprocessing import get_structured_faceflows, get_residuals
from .lakpak_utils import get_lak_connections
| 34.142857
| 67
| 0.874477
| 30
| 239
| 6.666667
| 0.6
| 0.15
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096234
| 239
| 6
| 68
| 39.833333
| 0.925926
| 0.029289
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
985c9ede57f23b38e9a3ed21fd11816f5e477b8c
| 200
|
py
|
Python
|
pyseries/clustering/__init__.py
|
flaviovdf/pyseries
|
59c8a321790d2398d71305710b7d322ce2d8eaaf
|
[
"BSD-3-Clause"
] | 7
|
2015-04-12T00:27:39.000Z
|
2018-08-10T13:17:48.000Z
|
pyseries/clustering/__init__.py
|
flaviovdf/pyseries
|
59c8a321790d2398d71305710b7d322ce2d8eaaf
|
[
"BSD-3-Clause"
] | null | null | null |
pyseries/clustering/__init__.py
|
flaviovdf/pyseries
|
59c8a321790d2398d71305710b7d322ce2d8eaaf
|
[
"BSD-3-Clause"
] | 4
|
2015-04-15T03:14:30.000Z
|
2018-11-09T22:06:32.000Z
|
# -*- coding: utf8
from __future__ import division, print_function
'''
Clustering
==========
Contains the following clustering methods:
* Yang2011
* Ahmed2012
* Kmeans (from sklearn)
'''
| 16.666667
| 47
| 0.665
| 19
| 200
| 6.736842
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055901
| 0.195
| 200
| 11
| 48
| 18.181818
| 0.73913
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 5
|
985cb525312d23375e32943a1aaab14ac8240a12
| 228
|
py
|
Python
|
pybamm/models/submodels/electrolyte_conductivity/surface_potential_form/__init__.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/electrolyte_conductivity/surface_potential_form/__init__.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
pybamm/models/submodels/electrolyte_conductivity/surface_potential_form/__init__.py
|
DrSOKane/PyBaMM
|
903b4a05ef5a4f91633e990d4aec12c53df723a2
|
[
"BSD-3-Clause"
] | null | null | null |
# Full order models
from .full_surface_form_conductivity import FullAlgebraic, FullDifferential
# Leading-order models
from .leading_surface_form_conductivity import (
LeadingOrderDifferential,
LeadingOrderAlgebraic,
)
| 25.333333
| 75
| 0.833333
| 22
| 228
| 8.363636
| 0.590909
| 0.119565
| 0.163043
| 0.315217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122807
| 228
| 8
| 76
| 28.5
| 0.92
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
987fa5eba5b6fbe62e3386ca77d344aeb87676d1
| 254
|
py
|
Python
|
data/snippets/py/KINDA_DICT_PROXY_CODE.py
|
netcharm/ironclad
|
5892c43b540b216d638e0fed2e6cf3fd8289fdfc
|
[
"PSF-2.0"
] | 58
|
2015-03-02T15:13:45.000Z
|
2021-07-31T16:10:13.000Z
|
data/snippets/py/KINDA_DICT_PROXY_CODE.py
|
netcharm/ironclad
|
5892c43b540b216d638e0fed2e6cf3fd8289fdfc
|
[
"PSF-2.0"
] | 4
|
2015-01-02T11:45:46.000Z
|
2022-01-17T14:45:33.000Z
|
data/snippets/py/KINDA_DICT_PROXY_CODE.py
|
netcharm/ironclad
|
5892c43b540b216d638e0fed2e6cf3fd8289fdfc
|
[
"PSF-2.0"
] | 11
|
2015-01-22T11:56:32.000Z
|
2020-06-02T01:40:58.000Z
|
from UserDict import IterableUserDict
class KindaDictProxy(IterableUserDict):
def __setitem__(self, key, value):
raise TypeError('read-only dict')
def __delitem__(self, key):
raise TypeError('read-only dict')
| 21.166667
| 41
| 0.661417
| 26
| 254
| 6.153846
| 0.653846
| 0.0875
| 0.225
| 0.275
| 0.325
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.251969
| 254
| 11
| 42
| 23.090909
| 0.842105
| 0
| 0
| 0.333333
| 0
| 0
| 0.110672
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
98af540e47a1e6d15cc74b357e83ac3b9d51e5c3
| 628
|
py
|
Python
|
backend/passenger/models.py
|
haridevbabu/bookcab
|
70459dec56f47428d6e5b9e4d2d9af0e64a400dc
|
[
"MIT"
] | null | null | null |
backend/passenger/models.py
|
haridevbabu/bookcab
|
70459dec56f47428d6e5b9e4d2d9af0e64a400dc
|
[
"MIT"
] | null | null | null |
backend/passenger/models.py
|
haridevbabu/bookcab
|
70459dec56f47428d6e5b9e4d2d9af0e64a400dc
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
"""
Store passenger details
"""
class Passenger(models.Model):
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
email = models.EmailField(unique=True)
password = models.CharField(max_length=200)
mobile = models.IntegerField(unique=True)
class Driver(models.Model):
"""
Storing driver details
"""
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
mobile = models.IntegerField(unique=True)
car_no = models.CharField(max_length=80, unique=True)
| 27.304348
| 57
| 0.726115
| 81
| 628
| 5.493827
| 0.395062
| 0.202247
| 0.242697
| 0.323596
| 0.521348
| 0.310112
| 0.310112
| 0.310112
| 0.310112
| 0.310112
| 0
| 0.024715
| 0.16242
| 628
| 22
| 58
| 28.545455
| 0.821293
| 0.076433
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.166667
| 0.083333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
98b0ba050268437d198ba626e27dde8852f612b6
| 128
|
py
|
Python
|
janusbackup/core/utils/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
janusbackup/core/utils/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
janusbackup/core/utils/__init__.py
|
NikitosnikN/janus-backup
|
413d365663b532a0611575be16ea0a4f0c7ffd20
|
[
"MIT"
] | null | null | null |
from .catch_job_exception import catch_exceptions
from .fernet import FernetWrapper
from .projects_loader import ProjectsLoader
| 32
| 49
| 0.882813
| 16
| 128
| 6.8125
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 128
| 3
| 50
| 42.666667
| 0.939655
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7fb7c6ab593dff9ceb7dc8e524e19563e26c1410
| 136
|
py
|
Python
|
app/models/Country/methods/__init__.py
|
msolorio/flask_world_api
|
a2d5394618b736aa7d5d5e75a422dbe9e5713533
|
[
"MIT"
] | 1
|
2022-02-24T04:37:04.000Z
|
2022-02-24T04:37:04.000Z
|
app/models/Country/methods/__init__.py
|
msolorio/flask_world_api
|
a2d5394618b736aa7d5d5e75a422dbe9e5713533
|
[
"MIT"
] | null | null | null |
app/models/Country/methods/__init__.py
|
msolorio/flask_world_api
|
a2d5394618b736aa7d5d5e75a422dbe9e5713533
|
[
"MIT"
] | null | null | null |
from .delete import delete
from .create import create
from .find import find
from .update import update
from .find_many import find_many
| 27.2
| 32
| 0.823529
| 22
| 136
| 5
| 0.318182
| 0.145455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139706
| 136
| 5
| 32
| 27.2
| 0.940171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f6f081040ece6e8d92a6aa2568bf678236698b01
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/charset_normalizer/cd.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | 1
|
2022-02-22T04:49:18.000Z
|
2022-02-22T04:49:18.000Z
|
venv/lib/python3.8/site-packages/charset_normalizer/cd.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/charset_normalizer/cd.py
|
GiulianaPola/select_repeats
|
17a0d053d4f874e42cf654dd142168c2ec8fbd11
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/c5/d3/2d/f20956a75b97f389c124cca78c1aabe20f79d6ad234d0d415fcd9324d1
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.395833
| 0
| 96
| 1
| 96
| 96
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f6f410c8815cf3291bc78622042e3a8da21e8538
| 49
|
py
|
Python
|
plugins/reminder.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
plugins/reminder.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
plugins/reminder.py
|
ryoung2512/bot
|
a0d42152410086630a03a3fdb45436935cb48402
|
[
"MIT"
] | null | null | null |
def reminder(args):
print("in reminder.py")
| 12.25
| 27
| 0.653061
| 7
| 49
| 4.571429
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 49
| 3
| 28
| 16.333333
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f6ffb283b80c8402ee51f15c64cf68510541439e
| 526
|
py
|
Python
|
dashboard/views/_positions/_non_ec/_membership_development_chair.py
|
beta-nu-theta-chi/ox-dashboard
|
842d86a381f26159b2c5bad39a95169496832023
|
[
"MIT"
] | null | null | null |
dashboard/views/_positions/_non_ec/_membership_development_chair.py
|
beta-nu-theta-chi/ox-dashboard
|
842d86a381f26159b2c5bad39a95169496832023
|
[
"MIT"
] | 70
|
2016-11-16T18:49:02.000Z
|
2021-04-26T00:47:18.000Z
|
dashboard/views/_positions/_non_ec/_membership_development_chair.py
|
beta-nu-theta-chi/ox-dashboard
|
842d86a381f26159b2c5bad39a95169496832023
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from dashboard.models import Position
from dashboard.utils import verify_position
@verify_position([Position.PositionChoices.MEMBERSHIP_DEVELOPMENT_CHAIR, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def memdev_c(request):
context = {
'position': Position.objects.get(title=Position.PositionChoices.MEMBERSHIP_DEVELOPMENT_CHAIR)
}
return render(request, 'membership-development-chair.html', context)
| 40.461538
| 184
| 0.819392
| 56
| 526
| 7.553571
| 0.482143
| 0.271868
| 0.184397
| 0.208038
| 0.231678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096958
| 526
| 12
| 185
| 43.833333
| 0.890526
| 0
| 0
| 0
| 0
| 0
| 0.077947
| 0.062738
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.333333
| 0
| 0.555556
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
63d7887fe865f6e20c174b48d999d55d0c3b35a6
| 169
|
py
|
Python
|
uploads/core/admin.py
|
emawind84/rrwebtv
|
ae22cd39ea430aed0de2b852e40c309465a7237b
|
[
"MIT"
] | null | null | null |
uploads/core/admin.py
|
emawind84/rrwebtv
|
ae22cd39ea430aed0de2b852e40c309465a7237b
|
[
"MIT"
] | 2
|
2020-06-05T20:13:36.000Z
|
2021-06-10T21:18:43.000Z
|
uploads/core/admin.py
|
emawind84/rrwebtv
|
ae22cd39ea430aed0de2b852e40c309465a7237b
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from uploads.core.models import Document, Replay
# Register your models here.
admin.site.register(Document)
admin.site.register(Replay)
| 28.166667
| 48
| 0.822485
| 24
| 169
| 5.791667
| 0.583333
| 0.129496
| 0.244604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094675
| 169
| 6
| 49
| 28.166667
| 0.908497
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
63df7180057bc70b1e084f211cbd1e13ede016b9
| 34
|
py
|
Python
|
app/core/__init__.py
|
tiberiuichim/nlp-service
|
6bb641de532afb8c001d40bf30caadcbd227a91d
|
[
"MIT"
] | 2
|
2021-09-07T13:13:24.000Z
|
2021-09-09T08:00:21.000Z
|
app/core/__init__.py
|
tiberiuichim/nlp-service
|
6bb641de532afb8c001d40bf30caadcbd227a91d
|
[
"MIT"
] | null | null | null |
app/core/__init__.py
|
tiberiuichim/nlp-service
|
6bb641de532afb8c001d40bf30caadcbd227a91d
|
[
"MIT"
] | null | null | null |
from . import components # no-qa
| 17
| 33
| 0.705882
| 5
| 34
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 34
| 1
| 34
| 34
| 0.888889
| 0.147059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
63e019f275ae08ef2408eea20b613faf36b27ad3
| 88
|
py
|
Python
|
policykit/integrations/opencollective/__init__.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 78
|
2020-05-08T17:25:38.000Z
|
2022-01-13T05:44:50.000Z
|
policykit/integrations/opencollective/__init__.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 302
|
2020-02-20T07:04:30.000Z
|
2022-02-25T17:44:23.000Z
|
policykit/integrations/opencollective/__init__.py
|
mashton/policyk
|
623523d76d63c06b6d559ad7b477d80512fbd2e7
|
[
"MIT"
] | 13
|
2020-04-17T19:44:26.000Z
|
2022-02-25T17:18:04.000Z
|
default_app_config = 'integrations.opencollective.apps.OpencollectiveIntegrationConfig'
| 44
| 87
| 0.897727
| 7
| 88
| 11
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034091
| 88
| 1
| 88
| 88
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0.727273
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
63f18910a0164a668b903d7bb0a849e429ad33b6
| 208
|
py
|
Python
|
bluebottle/exports/permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/exports/permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/exports/permissions.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
from django.conf import settings
import rules
@rules.predicate
def permission(*args, **kwargs):
return settings.EXPORTDB_PERMISSION(*args, **kwargs)
rules.add_rule('exportdb.can_export', permission)
| 17.333333
| 56
| 0.769231
| 26
| 208
| 6.038462
| 0.653846
| 0.178344
| 0.254777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 208
| 11
| 57
| 18.909091
| 0.853261
| 0
| 0
| 0
| 0
| 0
| 0.091346
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
c3ce990be29349cd519d604cecb9c3e308088ec0
| 89
|
py
|
Python
|
swagger_server/api/__init__.py
|
NIEHS/pluggable-search-props-and-metadata
|
d923d1180b997f3cea1822d503155c43aead927e
|
[
"BSD-3-Clause"
] | null | null | null |
swagger_server/api/__init__.py
|
NIEHS/pluggable-search-props-and-metadata
|
d923d1180b997f3cea1822d503155c43aead927e
|
[
"BSD-3-Clause"
] | 2
|
2020-04-16T16:20:54.000Z
|
2021-08-06T12:11:43.000Z
|
swagger_server/api/__init__.py
|
NIEHS/pluggable-search-props-and-metadata
|
d923d1180b997f3cea1822d503155c43aead927e
|
[
"BSD-3-Clause"
] | 1
|
2022-02-24T15:36:57.000Z
|
2022-02-24T15:36:57.000Z
|
from __future__ import absolute_import
from swagger_server.api.api_utils import APIUtils
| 29.666667
| 49
| 0.88764
| 13
| 89
| 5.538462
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 89
| 2
| 50
| 44.5
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
c3d52ddb1b838cc42d506b6db2800e617d52a2e1
| 178
|
py
|
Python
|
backend/users/serializers/__init__.py
|
AurelienGasser/substra-backend
|
c963f9b0521c7ebd878ea42fd9be9acfddf9f61d
|
[
"Apache-2.0"
] | 37
|
2019-10-25T13:31:20.000Z
|
2021-05-29T05:27:50.000Z
|
backend/users/serializers/__init__.py
|
AurelienGasser/substra-backend
|
c963f9b0521c7ebd878ea42fd9be9acfddf9f61d
|
[
"Apache-2.0"
] | 217
|
2019-10-29T16:01:03.000Z
|
2021-05-25T13:06:29.000Z
|
backend/users/serializers/__init__.py
|
AurelienGasser/substra-backend
|
c963f9b0521c7ebd878ea42fd9be9acfddf9f61d
|
[
"Apache-2.0"
] | 13
|
2019-10-25T13:46:36.000Z
|
2021-03-16T16:59:04.000Z
|
# encoding: utf-8
from .user import CustomTokenObtainPairSerializer, CustomTokenRefreshSerializer
__all__ = ['CustomTokenObtainPairSerializer', 'CustomTokenRefreshSerializer']
| 29.666667
| 79
| 0.848315
| 11
| 178
| 13.363636
| 0.818182
| 0.802721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006098
| 0.078652
| 178
| 5
| 80
| 35.6
| 0.890244
| 0.08427
| 0
| 0
| 0
| 0
| 0.36646
| 0.36646
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
c3fdbf7e34aeff7ba68a4a93b472d603f1a54de9
| 426
|
py
|
Python
|
python/kata/8-kyu/Square(n) Sum/main.py
|
Carlososuna11/codewars-handbook
|
a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e
|
[
"MIT"
] | null | null | null |
python/kata/8-kyu/Square(n) Sum/main.py
|
Carlososuna11/codewars-handbook
|
a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e
|
[
"MIT"
] | null | null | null |
python/kata/8-kyu/Square(n) Sum/main.py
|
Carlososuna11/codewars-handbook
|
a0e7c9ac5ad19cfaed3ad463c04616daa3fed82e
|
[
"MIT"
] | null | null | null |
import codewars_test as test
from solution import square_sum
@test.describe("Fixed Tests")
def basic_tests():
@test.it('Basic Test Cases')
def basic_test_cases():
test.assert_equals(square_sum([1,2]), 5)
test.assert_equals(square_sum([0, 3, 4, 5]), 50)
test.assert_equals(square_sum([]), 0)
test.assert_equals(square_sum([-1,-2]), 5)
test.assert_equals(square_sum([-1,0,1]), 2)
| 35.5
| 56
| 0.657277
| 67
| 426
| 3.955224
| 0.358209
| 0.203774
| 0.301887
| 0.415094
| 0.50566
| 0.50566
| 0.4
| 0.4
| 0.4
| 0.4
| 0
| 0.048851
| 0.183099
| 426
| 12
| 57
| 35.5
| 0.712644
| 0
| 0
| 0
| 0
| 0
| 0.063232
| 0
| 0
| 0
| 0
| 0
| 0.454545
| 1
| 0.181818
| true
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7f1e619db1edb18cd7d0d1379e24bcf1d212246c
| 368
|
py
|
Python
|
src/baseapp/appinit.py
|
jpictor/tlsmd
|
dc2c593aebedbe0dc822d467e7b0fb7407762d61
|
[
"Artistic-1.0"
] | null | null | null |
src/baseapp/appinit.py
|
jpictor/tlsmd
|
dc2c593aebedbe0dc822d467e7b0fb7407762d61
|
[
"Artistic-1.0"
] | null | null | null |
src/baseapp/appinit.py
|
jpictor/tlsmd
|
dc2c593aebedbe0dc822d467e7b0fb7407762d61
|
[
"Artistic-1.0"
] | null | null | null |
## configure logging
import logging
import logging.config
from django.conf import settings
logging.config.fileConfig(settings.LOGGING_CONFIG_FILE)
## configure Celery system
from . import celery_ext
logging.getLogger().setLevel(settings.LOG_LEVEL)
logging.debug('logging system configured for appname=%s using %s' % (settings.APPNAME, settings.LOGGING_CONFIG_FILE))
| 30.666667
| 117
| 0.820652
| 48
| 368
| 6.166667
| 0.479167
| 0.175676
| 0.212838
| 0.168919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089674
| 368
| 11
| 118
| 33.454545
| 0.883582
| 0.111413
| 0
| 0
| 0
| 0
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.571429
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
617c27b360f0250e0b3e21439208e5a5e28b34a0
| 256
|
py
|
Python
|
vitality/invitation.py
|
santosderek/Vitality
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 1
|
2020-09-18T17:08:53.000Z
|
2020-09-18T17:08:53.000Z
|
vitality/invitation.py
|
santosderek/Vitality
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 91
|
2020-09-25T23:12:58.000Z
|
2020-12-19T04:57:50.000Z
|
vitality/invitation.py
|
santosderek/4155-Team
|
cc90d3b561c3b75f000288345d7a1442fb2b3fec
|
[
"MIT"
] | 3
|
2020-09-26T22:35:42.000Z
|
2020-10-13T18:22:22.000Z
|
class Invitation ():
def __init__(self, _id, sender, recipient):
self._id = _id
self.sender = sender
self.recipient = recipient
def __repr__(self):
return f'Invitation({self._id}, {self.sender}, {self.recipient})'
| 25.6
| 73
| 0.617188
| 29
| 256
| 5.034483
| 0.37931
| 0.123288
| 0.164384
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253906
| 256
| 9
| 74
| 28.444444
| 0.764398
| 0
| 0
| 0
| 0
| 0
| 0.214844
| 0.085938
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6196fcf092caef29054c2d7708d02e4927450841
| 294
|
py
|
Python
|
Beginner/12/Project/art.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | 1
|
2021-12-25T02:19:18.000Z
|
2021-12-25T02:19:18.000Z
|
Beginner/12/Project/art.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | null | null | null |
Beginner/12/Project/art.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | 1
|
2021-11-25T10:31:47.000Z
|
2021-11-25T10:31:47.000Z
|
logo = '''
_____ _ _ _____ _
| __|_ _ ___ ___ ___ | |_| |_ ___ | | |_ _ _____| |_ ___ ___
| | | | | -_|_ -|_ -| | _| | -_| | | | | | | | . | -_| _|
|_____|___|___|___|___| |_| |_|_|___| |_|___|___|_|_|_|___|___|_|
'''
| 49
| 69
| 0.326531
| 1
| 294
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.442177
| 294
| 6
| 70
| 49
| 0.02439
| 0
| 0
| 0
| 0
| 0.5
| 0.952542
| 0.169492
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
61a37535a3f1d839a6ceb2b7c4a762e302ce5e8d
| 57
|
py
|
Python
|
fdp/api/__init__.py
|
cffbots/fairdatapoint
|
6142b31408b5746d1a7e9f59e61735b7ad8bfde9
|
[
"Apache-2.0"
] | 9
|
2020-03-27T12:58:51.000Z
|
2021-01-21T16:22:46.000Z
|
fdp/api/__init__.py
|
MaastrichtU-IDS/fairdatapoint
|
f9f38903a629acbdb74a6a20014ac424cc3d3206
|
[
"Apache-2.0"
] | 26
|
2016-05-26T22:22:34.000Z
|
2020-02-13T07:12:37.000Z
|
fdp/api/__init__.py
|
MaastrichtU-IDS/fairdatapoint
|
f9f38903a629acbdb74a6a20014ac424cc3d3206
|
[
"Apache-2.0"
] | 4
|
2020-06-09T18:37:33.000Z
|
2020-12-16T08:05:01.000Z
|
from .metadata import FDP, Catalog, Dataset, Distribution
| 57
| 57
| 0.824561
| 7
| 57
| 6.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105263
| 57
| 1
| 57
| 57
| 0.921569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
61adaeafe6e61f12c8503d45f64a149d1d790745
| 97
|
py
|
Python
|
python/simple/ldap.py
|
mypaceshun/practice
|
2f747eca1df96d65bda57cc9f02bbfed6ae0defc
|
[
"MIT"
] | null | null | null |
python/simple/ldap.py
|
mypaceshun/practice
|
2f747eca1df96d65bda57cc9f02bbfed6ae0defc
|
[
"MIT"
] | null | null | null |
python/simple/ldap.py
|
mypaceshun/practice
|
2f747eca1df96d65bda57cc9f02bbfed6ae0defc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vi: set expandtab shiftwidth=4 :
import libldap
| 16.166667
| 34
| 0.649485
| 14
| 97
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024691
| 0.164948
| 97
| 5
| 35
| 19.4
| 0.753086
| 0.773196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
61adf885adf555e9dab58725f4735fdb37949b6b
| 145
|
py
|
Python
|
app.py
|
alphagov-mirror/cyber-security-auto-snyk
|
59bea6174778677fef7848cd7e79b4574a9ebc96
|
[
"MIT"
] | 1
|
2019-11-15T16:59:27.000Z
|
2019-11-15T16:59:27.000Z
|
app.py
|
alphagov-mirror/cyber-security-auto-snyk
|
59bea6174778677fef7848cd7e79b4574a9ebc96
|
[
"MIT"
] | null | null | null |
app.py
|
alphagov-mirror/cyber-security-auto-snyk
|
59bea6174778677fef7848cd7e79b4574a9ebc96
|
[
"MIT"
] | 2
|
2019-08-29T14:02:19.000Z
|
2021-04-10T19:32:18.000Z
|
import fire
#from classes.github_auditor import GithubAuditor
from classes.snyker import Snyker
if __name__ == '__main__':
fire.Fire(Snyker)
| 24.166667
| 49
| 0.793103
| 19
| 145
| 5.578947
| 0.578947
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131034
| 145
| 5
| 50
| 29
| 0.84127
| 0.331034
| 0
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
61b7ec01b5c82cf95c492f40776227ffc561ee33
| 99
|
py
|
Python
|
test_pip/__init__.py
|
sp1020/test_pip
|
c99a6b5986eeef38acfdea7d63907f0bff74bebf
|
[
"MIT"
] | null | null | null |
test_pip/__init__.py
|
sp1020/test_pip
|
c99a6b5986eeef38acfdea7d63907f0bff74bebf
|
[
"MIT"
] | null | null | null |
test_pip/__init__.py
|
sp1020/test_pip
|
c99a6b5986eeef38acfdea7d63907f0bff74bebf
|
[
"MIT"
] | null | null | null |
def test():
print('software installed')
print('a new version')
print('version update')
| 19.8
| 31
| 0.636364
| 12
| 99
| 5.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212121
| 99
| 4
| 32
| 24.75
| 0.807692
| 0
| 0
| 0
| 0
| 0
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.25
| 0.75
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f61075bb6168bd3e89e9cbb91f5afd6b945f4043
| 35
|
py
|
Python
|
prueba_zxventures/api/migrations/__init__.py
|
frsepulv/prueba_zxventures
|
f3584b0ae03a60d319bc16020162dae9457bf3ab
|
[
"MIT"
] | null | null | null |
prueba_zxventures/api/migrations/__init__.py
|
frsepulv/prueba_zxventures
|
f3584b0ae03a60d319bc16020162dae9457bf3ab
|
[
"MIT"
] | null | null | null |
prueba_zxventures/api/migrations/__init__.py
|
frsepulv/prueba_zxventures
|
f3584b0ae03a60d319bc16020162dae9457bf3ab
|
[
"MIT"
] | null | null | null |
"""Migraciones de base de datos."""
| 35
| 35
| 0.685714
| 5
| 35
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.774194
| 0.828571
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f6527bcbb49cc452c9ada6c6d82f2b589c59151d
| 620
|
py
|
Python
|
label_studio/io_storages/models.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 3
|
2021-07-16T03:48:21.000Z
|
2022-01-10T04:58:25.000Z
|
label_studio/io_storages/models.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 6
|
2022-02-21T15:19:35.000Z
|
2022-03-07T15:25:16.000Z
|
label_studio/io_storages/models.py
|
xhuaustc/label-studio
|
b787824a9e16f488a9b4cd2cef83e1ac526a64f3
|
[
"Apache-2.0"
] | 1
|
2021-07-29T12:53:34.000Z
|
2021-07-29T12:53:34.000Z
|
"""This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
from .azure_blob.models import AzureBlobImportStorage, AzureBlobImportStorageLink, AzureBlobExportStorage, AzureBlobExportStorageLink
from .s3.models import S3ImportStorage, S3ImportStorageLink, S3ExportStorage, S3ExportStorageLink
from .gcs.models import GCSImportStorage, GCSImportStorageLink, GCSExportStorage, GCSExportStorageLink
from .redis.models import RedisImportStorage, RedisImportStorageLink, RedisExportStorage, RedisExportStorageLink
| 103.333333
| 168
| 0.862903
| 62
| 620
| 8.612903
| 0.758065
| 0.089888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012411
| 0.090323
| 620
| 6
| 169
| 103.333333
| 0.934397
| 0.266129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f654e946fd96fdf7369bef8eb1ca529dcb63fba9
| 48,770
|
py
|
Python
|
fn_mcafee_atd/fn_mcafee_atd/util/customize.py
|
esirt14/resilient-community-apps
|
4925ebd5ce8762717af76e47b64faa3bb341c922
|
[
"MIT"
] | null | null | null |
fn_mcafee_atd/fn_mcafee_atd/util/customize.py
|
esirt14/resilient-community-apps
|
4925ebd5ce8762717af76e47b64faa3bb341c922
|
[
"MIT"
] | null | null | null |
fn_mcafee_atd/fn_mcafee_atd/util/customize.py
|
esirt14/resilient-community-apps
|
4925ebd5ce8762717af76e47b64faa3bb341c922
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_mcafee_atd"""
from __future__ import print_function
from resilient_circuits.util import *
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# artifact_id
# artifact_value
# attachment_id
# incident_id
# mcafee_atd_report_type
# mcafee_atd_url_submit_type
# task_id
# Message Destinations:
# mcafee_atd_message_destination
# Functions:
# mcafee_atd_analyze_file
# mcafee_atd_analyze_url
# Workflows:
# mcafee_atd_analyze_artifact_file
# mcafee_atd_analyze_attachment
# mcafee_atd_analyze_url
# Rules:
# (Example) McAfee Analyze URL
# (Example) McAfee ATD Analyze Artifact File
# (Example) McAfee ATD Analyze Attachment
yield ImportDefinition(u"""
eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbeyJwcm9ncmFtbWF0aWNfbmFtZSI6ICJt
Y2FmZWVfYXRkX2FuYWx5emVfYXR0YWNobWVudCIsICJvYmplY3RfdHlwZSI6ICJhdHRhY2htZW50
IiwgImV4cG9ydF9rZXkiOiAibWNhZmVlX2F0ZF9hbmFseXplX2F0dGFjaG1lbnQiLCAidXVpZCI6
ICI5ZTRjNDIwZS0yMzY5LTQwY2EtYWEzZC1iMzRjN2M4NjE2M2IiLCAibGFzdF9tb2RpZmllZF9i
eSI6ICJhZG1pbkBjbzNzeXMuY29tIiwgIm5hbWUiOiAiKEV4YW1wbGUpIE1jQWZlZSBBVEQgQW5h
bHl6ZSBBdHRhY2htZW50IiwgImNvbnRlbnQiOiB7InhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4w
XCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9t
Zy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3
dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cu
b21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jl
c2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAx
L1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1h
LWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0
XCI+PHByb2Nlc3MgaWQ9XCJtY2FmZWVfYXRkX2FuYWx5emVfYXR0YWNobWVudFwiIGlzRXhlY3V0
YWJsZT1cInRydWVcIiBuYW1lPVwiKEV4YW1wbGUpIE1jQWZlZSBBVEQgQW5hbHl6ZSBBdHRhY2ht
ZW50XCI+PGRvY3VtZW50YXRpb24+Q2FsbHMgdGhlIE1jQWZlZSBBVEQgQW5hbHl6ZSBGaWxlIGZ1
bmN0aW9uIG9uIGFuIGF0dGFjaG1lbnQ8L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJT
dGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzA0YTQ1NWo8L291dGdv
aW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18wZDVta3g4XCIg
bmFtZT1cIk1jQWZlZSBBVEQgQW5hbHl6ZSBGaWxlXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlv
blwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI1MDFlYjQ5
Yi0xY2YxLTRmNTQtYmM1MC0xMzBkNDJjMDM1YWNcIj57XCJpbnB1dHNcIjp7XCIwYTI5NGNhNS04
MGE1LTQwNWEtYTBiNS0zZThkOWI2NzE3MjdcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixc
InN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxlY3RfdmFsdWVc
IjpcImI3ZWNhMWVmLTZjNTgtNGJiOC05NzZlLTIwNjYzMjYyYWNlMVwifX19LFwicHJlX3Byb2Nl
c3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVudC5pZFxcbmlucHV0
cy5hdHRhY2htZW50X2lkID0gYXR0YWNobWVudC5pZFxcbmlmIHRhc2sgaXMgbm90IE5vbmU6XFxu
ICBpbnB1dHMudGFza19pZCA9IHRhc2suaWRcIixcInJlc3VsdF9uYW1lXCI6XCJcIixcInBvc3Rf
cHJvY2Vzc2luZ19zY3JpcHRcIjpcIlxcXCJcXFwiXFxcIlxcbkV4YW1wbGUgcmV0dXJuZWQgcmVz
dWx0c1xcblxcblxcbnsgIFxcbiAgIFxcXCJJbnB1dHNcXFwiOnsgIFxcbiAgICAgIFxcXCJtYWNm
ZWVfYXRkX3JlcG9ydF90eXBlXFxcIjpcXFwicGRmXFxcIixcXG4gICAgICBcXFwiYXR0YWNobWVu
dF9pZFxcXCI6NTAsXFxuICAgICAgXFxcImluY2lkZW50X2lkXFxcIjoyMDk5XFxuICAgfSxcXG4g
ICBcXFwiUnVuIFRpbWVcXFwiOlxcXCI2Ni44NzI3NTkxMDM4XFxcIixcXG4gICBcXFwiU3VtbWFy
eVxcXCI6eyAgXFxuICAgICAgXFxcIkpTT052ZXJzaW9uXFxcIjpcXFwiMS4wMDJcXFwiLFxcbiAg
ICAgIFxcXCJTdWJqZWN0XFxcIjp7ICBcXG4gICAgICAgICBcXFwic2l6ZVxcXCI6XFxcIjE2Nzk0
MVxcXCIsXFxuICAgICAgICAgXFxcInNoYS0xXFxcIjpcXFwiQUY1RkQ4RjEwRjZCMkJENTZGMUQ0
RDE1QjZBODk1Qjk0NDg1QURGNFxcXCIsXFxuICAgICAgICAgXFxcIlRpbWVzdGFtcFxcXCI6XFxc
IjIwMTgtMDUtMTUgMTk6MzQ6MDlcXFwiLFxcbiAgICAgICAgIFxcXCJGaWxlVHlwZVxcXCI6XFxc
IjUxMlxcXCIsXFxuICAgICAgICAgXFxcInNoYS0yNTZcXFwiOlxcXCIyMjUyREMzRkFERTFGM0RG
MERFRDhDRUQyNUI3MUQ2QjM0REY2M0QwNEI5QzAwRTlBOUU0RjkxQjA1Q0Y1MUU1XFxcIixcXG4g
ICAgICAgICBcXFwicGFyZW50X2FyY2hpdmVcXFwiOlxcXCJOb3QgQXZhaWxhYmxlXFxcIixcXG4g
ICAgICAgICBcXFwibWQ1XFxcIjpcXFwiNTcwRTQ4MUMyRTQ1REYxOTE4QzUzNEU2M0NBNDMxODBc
XFwiLFxcbiAgICAgICAgIFxcXCJUeXBlXFxcIjpcXFwiUEUzMiBleGVjdXRhYmxlIChHVUkpIElu
dGVsIDgwMzg2XFxcIixcXG4gICAgICAgICBcXFwiTmFtZVxcXCI6XFxcIjIyNTJEQzNGQURFMUYz
REYwREVEOENFRDI1QjcxRDZCMzRERjYzRDA0QjlDMDBFOUE5RTRGOTFCMDVDRjUxRTUuZXhlXFxc
IlxcbiAgICAgIH0sXFxuICAgICAgXFxcIlByb2Nlc3NcXFwiOlsgIFxcbiAgICAgICAgIHsgIFxc
biAgICAgICAgICAgIFxcXCJSZWFzb25cXFwiOlxcXCJwcm9jZXNzZWQgYnkgZG93biBzZWxlY3Rv
cnNcXFwiLFxcbiAgICAgICAgICAgIFxcXCJOYW1lXFxcIjpcXFwiMjI1MkRDM0ZBREUxRjNERjBE
RUQ4Q0VEMjVCNzFENkIzNERGNjNEMDRCOUMwMEU5QTlFNEY5MUIwNUNGNTFFNS5leGVcXFwiLFxc
biAgICAgICAgICAgIFxcXCJTZXZlcml0eVxcXCI6XFxcIjVcXFwiXFxuICAgICAgICAgfVxcbiAg
ICAgIF0sXFxuICAgICAgXFxcIlNVTXZlcnNpb25cXFwiOlxcXCI0LjIuMi4xNlxcXCIsXFxuICAg
ICAgXFxcIlNlbGVjdG9yc1xcXCI6WyAgXFxuICAgICAgICAgeyAgXFxuICAgICAgICAgICAgXFxc
IkVuZ2luZVxcXCI6XFxcIkdhdGV3YXkgQW50aS1NYWx3YXJlXFxcIixcXG4gICAgICAgICAgICBc
XFwiU2V2ZXJpdHlcXFwiOlxcXCI1XFxcIixcXG4gICAgICAgICAgICBcXFwiTWFsd2FyZU5hbWVc
XFwiOlxcXCJXMzIvUm9udG9rYnJvLmdlbkBNTVxcXCJcXG4gICAgICAgICB9LFxcbiAgICAgICAg
IHsgIFxcbiAgICAgICAgICAgIFxcXCJFbmdpbmVcXFwiOlxcXCJBbnRpLU1hbHdhcmVcXFwiLFxc
biAgICAgICAgICAgIFxcXCJTZXZlcml0eVxcXCI6XFxcIjVcXFwiLFxcbiAgICAgICAgICAgIFxc
XCJNYWx3YXJlTmFtZVxcXCI6XFxcIlczMi9Sb250b2ticm8uZ2VuQE1NXFxcIlxcbiAgICAgICAg
IH0sXFxuICAgICAgICAgeyAgXFxuICAgICAgICAgICAgXFxcIkVuZ2luZVxcXCI6XFxcIlNhbmRi
b3hcXFwiLFxcbiAgICAgICAgICAgIFxcXCJTZXZlcml0eVxcXCI6XFxcIjBcXFwiLFxcbiAgICAg
ICAgICAgIFxcXCJNYWx3YXJlTmFtZVxcXCI6XFxcIi0tLVxcXCJcXG4gICAgICAgICB9XFxuICAg
ICAgXSxcXG4gICAgICBcXFwiaGFzRHluYW1pY0FuYWx5c2lzXFxcIjpcXFwiZmFsc2VcXFwiLFxc
biAgICAgIFxcXCJCZWhhdmlvclxcXCI6WyAgXFxuICAgICAgICAgXFxcIklkZW50aWZpZWQgYXMg
VzMyL1JvbnRva2Jyby5nZW5ATU0gYnkgR2F0ZXdheSBBbnRpLU1hbHdhcmVcXFwiLFxcbiAgICAg
ICAgIFxcXCJJZGVudGlmaWVkIGFzIFczMi9Sb250b2ticm8uZ2VuQE1NIGJ5IEFudGktTWFsd2Fy
ZVxcXCJcXG4gICAgICBdLFxcbiAgICAgIFxcXCJWZXJkaWN0XFxcIjp7ICBcXG4gICAgICAgICBc
XFwiU2V2ZXJpdHlcXFwiOlxcXCI1XFxcIixcXG4gICAgICAgICBcXFwiRGVzY3JpcHRpb25cXFwi
OlxcXCJUaGUgc3VibWl0dGVkIGZpbGUgaXMgbm90IGNvbXBhdGlibGUgdG8gVk0ocykgaW4gdGhl
IEFuYWx5emVyIFByb2ZpbGVcXFwiXFxuICAgICAgfSxcXG4gICAgICBcXFwiT1N2ZXJzaW9uXFxc
IjpcXFwiU3RhdGljQW5hbHlzaXNcXFwiLFxcbiAgICAgIFxcXCJEYXRhXFxcIjp7ICBcXG4gICAg
ICAgICBcXFwiY29tcGlsZWRfd2l0aFxcXCI6XFxcIk5vdCBBdmFpbGFibGVcXFwiLFxcbiAgICAg
ICAgIFxcXCJhbmFseXNpc19zZWNvbmRzXFxcIjpcXFwiMVxcXCIsXFxuICAgICAgICAgXFxcInNh
bmRib3hfYW5hbHlzaXNcXFwiOlxcXCIwXFxcIlxcbiAgICAgIH0sXFxuICAgICAgXFxcIk1JU3Zl
cnNpb25cXFwiOlxcXCI0LjIuMi4xNlxcXCIsXFxuICAgICAgXFxcIkRFVHZlcnNpb25cXFwiOlxc
XCI0LjIuMi4xODAyMjJcXFwiXFxuICAgfVxcbn1cXG5cXFwiXFxcIlxcXCJcXG5cXG5yZXBvcnRf
dHlwZSA9IHJlc3VsdHNbXFxcIklucHV0c1xcXCJdW1xcXCJtYWNmZWVfYXRkX3JlcG9ydF90eXBl
XFxcIl1cXG5cXG5yZXBvcnRfYXR0YWNobWVudCA9IFxcXCJcXFwiXFxuaWYgcmVwb3J0X3R5cGUg
PT0gXFxcInBkZlxcXCIgb3IgcmVwb3J0X3R5cGUgPT0gXFxcImh0bWxcXFwiOlxcbiAgcmVwb3J0
X2F0dGFjaG1lbnQgPSBcXFwiU2VlIHRoZSByZXBvcnQgYXR0YWNobWVudCBvbiB0aGUgaW5jaWRl
bnQuXFxcIlxcblxcbmlmIHRhc2sgaXMgbm90IE5vbmU6XFxuICBpbmNpZGVudC5hZGROb3RlKGhl
bHBlci5jcmVhdGVSaWNoVGV4dChcXFwiVGFzayBBdHRhY2htZW50ICZsdDtiJmd0O3t9Jmx0Oy9i
Jmd0OyB3YXMgYW5hbHl6ZWQgYnkgTWNBZmVlIEFURCBhbmQgdGhlIHZlcmRpY3Qgd2FzOiBcXFxc
biZsdDtiJmd0O3t9Jmx0Oy9iJmd0Oy4ge31cXFwiLmZvcm1hdChyZXN1bHRzW1xcXCJTdW1tYXJ5
XFxcIl1bXFxcIlN1YmplY3RcXFwiXVtcXFwiTmFtZVxcXCJdLCByZXN1bHRzW1xcXCJTdW1tYXJ5
XFxcIl1bXFxcIlZlcmRpY3RcXFwiXVtcXFwiRGVzY3JpcHRpb25cXFwiXSwgcmVwb3J0X2F0dGFj
aG1lbnQpKSlcXG5lbHNlOlxcbiAgaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRl
eHQoXFxcIkluY2lkZW50IEF0dGFjaG1lbnQgJmx0O2ImZ3Q7e30mbHQ7L2ImZ3Q7IHdhcyBhbmFs
eXplZCBieSBNY0FmZWUgQVREIGFuZCB0aGUgdmVyZGljdCB3YXM6IFxcXFxuJmx0O2ImZ3Q7e30m
bHQ7L2ImZ3Q7LiB7fVxcXCIuZm9ybWF0KHJlc3VsdHNbXFxcIlN1bW1hcnlcXFwiXVtcXFwiU3Vi
amVjdFxcXCJdW1xcXCJOYW1lXFxcIl0sIHJlc3VsdHNbXFxcIlN1bW1hcnlcXFwiXVtcXFwiVmVy
ZGljdFxcXCJdW1xcXCJEZXNjcmlwdGlvblxcXCJdLCByZXBvcnRfYXR0YWNobWVudCkpKVwifTwv
cmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNl
Rmxvd18wNGE0NTVqPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzAwZmI4ZHo8L291
dGdvaW5nPjwvc2VydmljZVRhc2s+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMHpxYjdsdVwiPjxp
bmNvbWluZz5TZXF1ZW5jZUZsb3dfMDBmYjhkejwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVu
Y2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzAwZmI4ZHpcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz
a18wZDVta3g4XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMHpxYjdsdVwiLz48c2VxdWVuY2VGbG93
IGlkPVwiU2VxdWVuY2VGbG93XzA0YTQ1NWpcIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFz
eG1cIiB0YXJnZXRSZWY9XCJTZXJ2aWNlVGFza18wZDVta3g4XCIvPjx0ZXh0QW5ub3RhdGlvbiBp
ZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhl
cmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8x
c2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4
dEFubm90YXRpb25fMWt4eGl5dFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1c
IkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5l
ZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0
YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJv
dW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBt
bmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1c
IjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48
YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBp
ZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMw
XCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48
YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwi
QXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5
cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4
c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBt
bmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzBkNW1reDhcIiBpZD1cIlNl
cnZpY2VUYXNrXzBkNW1reDhfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9
XCIxMDBcIiB4PVwiMjgyXCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpC
UE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRFdmVudF8wenFiN2x1XCIgaWQ9XCJFbmRFdmVudF8w
enFiN2x1X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwi
NTA3XCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1c
IjEzXCIgd2lkdGg9XCIwXCIgeD1cIjUyNVwiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBNTkxhYmVs
PjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVu
Y2VGbG93XzAwZmI4ZHpcIiBpZD1cIlNlcXVlbmNlRmxvd18wMGZiOGR6X2RpXCI+PG9tZ2RpOndh
eXBvaW50IHg9XCIzODJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21n
ZGk6d2F5cG9pbnQgeD1cIjUwN1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIv
PjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBc
IiB4PVwiNDQ0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1O
RWRnZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzA0YTQ1NWpc
IiBpZD1cIlNlcXVlbmNlRmxvd18wNGE0NTVqX2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxOThc
IiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1c
IjI4MlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxh
YmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiMjQwXCIgeT1c
IjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBN
TlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgIndvcmtmbG93X2lk
IjogIm1jYWZlZV9hdGRfYW5hbHl6ZV9hdHRhY2htZW50IiwgInZlcnNpb24iOiAxMX0sICJ3b3Jr
Zmxvd19pZCI6IDgsICJhY3Rpb25zIjogW10sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTMwMzY0
OTczMDc1LCAiY3JlYXRvcl9pZCI6ICJid2Fsc2hAcmVzaWxpZW50c3lzdGVtcy5jb20iLCAiZGVz
Y3JpcHRpb24iOiAiQ2FsbHMgdGhlIE1jQWZlZSBBVEQgQW5hbHl6ZSBGaWxlIGZ1bmN0aW9uIG9u
IGFuIGF0dGFjaG1lbnQifSwgeyJwcm9ncmFtbWF0aWNfbmFtZSI6ICJtY2FmZWVfYXRkX2FuYWx5
emVfdXJsIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImV4cG9ydF9rZXkiOiAibWNhZmVl
X2F0ZF9hbmFseXplX3VybCIsICJ1dWlkIjogImU2NDM5YWI0LWFkMTItNDcxZi1iMzYyLWJkODkz
NzExNDNjNiIsICJsYXN0X21vZGlmaWVkX2J5IjogImFkbWluQGNvM3N5cy5jb20iLCAibmFtZSI6
ICIoRXhhbXBsZSkgTWNBZmVlIEFURCBBbmFseXplIFVSTCIsICJjb250ZW50IjogeyJ4bWwiOiAi
PD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHht
bG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5z
OmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxu
czpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6
b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJl
c2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0
cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3Lncz
Lm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93
d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwibWNhZmVlX2F0ZF9hbmFseXplX3Vy
bFwiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwiKEV4YW1wbGUpIE1jQWZlZSBBVEQgQW5h
bHl6ZSBVUkxcIj48ZG9jdW1lbnRhdGlvbj5DYWxscyB0aGUgTWNBZmVlIEFURCBBbmFseXplIFVS
TCBmdW5jdGlvbiBvbiBhbiBhcnRpZmFjdDwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1c
IlN0YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMXRsdHEybjwvb3V0
Z29pbmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzBnOHN2Z3Bc
IiBuYW1lPVwiTWNBZmVlIEFURCBBbmFseXplIFVSTFwiIHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rp
b25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVudDpmdW5jdGlvbiB1dWlkPVwiMGQ1Njg1
OTgtOTQ4Yi00OWMxLWIwNmUtZGY5ZmIzMDc2ZjM1XCI+e1wiaW5wdXRzXCI6e1wiMGEyOTRjYTUt
ODBhNS00MDVhLWEwYjUtM2U4ZDliNjcxNzI3XCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIs
XCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltdLFwic2VsZWN0X3ZhbHVl
XCI6XCJiN2VjYTFlZi02YzU4LTRiYjgtOTc2ZS0yMDY2MzI2MmFjZTFcIn19LFwiMmMxNzk1Yjgt
YThiZS00ZDM0LTlmYmEtMDYzMjQwOGRhNDlmXCI6e1wiaW5wdXRfdHlwZVwiOlwic3RhdGljXCIs
XCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92YWx1ZVwiOltdLFwic2VsZWN0X3ZhbHVl
XCI6XCJhODU2ZTQ5NC1iYTA3LTQ1YWMtYjNiNi1mZmE3NDdkZjliNzdcIn19fSxcInByZV9wcm9j
ZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5pbnB1
dHMuYXJ0aWZhY3RfaWQgPSBhcnRpZmFjdC5pZFxcbmlucHV0cy5hcnRpZmFjdF92YWx1ZSA9IGFy
dGlmYWN0LnZhbHVlXCIsXCJwb3N0X3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJcXFwiXFxcIlxcXCJc
XG5FeGFtcGxlIHJldHVybmVkIHJlc3VsdHNcXG5cXG5cXG57ICBcXG4gICBcXFwiSW5wdXRzXFxc
Ijp7ICBcXG4gICAgICBcXFwiaW5jaWRlbnRfaWRcXFwiOjIwOTksXFxuICAgICAgXFxcImFydGlm
YWN0X3ZhbHVlXFxcIjpcXFwiaHR0cDovL25ld3MuZ29vZ2xlLmNvLmluL1xcXCIsXFxuICAgICAg
XFxcImFydGlmYWN0X2lkXFxcIjo2NSxcXG4gICAgICBcXFwibWNhZmVlX2F0ZF91cmxfc3VibWl0
X3R5cGVcXFwiOlxcXCJBdHRlbXB0IHRvIGRldGVybWluZSBVUkwgc3VibWl0IHR5cGVcXFwiLFxc
biAgICAgIFxcXCJtYWNmZWVfYXRkX3JlcG9ydF90eXBlXFxcIjpcXFwicGRmXFxcIlxcbiAgIH0s
XFxuICAgXFxcIlJ1biBUaW1lXFxcIjpcXFwiNC44MDQ1NjY4NjAyXFxcIixcXG4gICBcXFwiU3Vt
bWFyeVxcXCI6eyAgXFxuICAgICAgXFxcIkpTT052ZXJzaW9uXFxcIjpcXFwiMS4wMDJcXFwiLFxc
biAgICAgIFxcXCJTdWJqZWN0XFxcIjp7ICBcXG4gICAgICAgICBcXFwic2l6ZVxcXCI6XFxcIjI1
XFxcIixcXG4gICAgICAgICBcXFwic2hhLTFcXFwiOlxcXCIxRTkwRURFQkEwRTEyRUE1Nzk3QkJC
QjUyNENCQzIxREFDNUI3RUVGXFxcIixcXG4gICAgICAgICBcXFwiVGltZXN0YW1wXFxcIjpcXFwi
MjAxOC0wNS0xNSAyMTozOToxNVxcXCIsXFxuICAgICAgICAgXFxcIkZpbGVUeXBlXFxcIjpcXFwi
NTEyXFxcIixcXG4gICAgICAgICBcXFwic2hhLTI1NlxcXCI6XFxcIjEyNkU5QTk3MUNFNTFDRDNF
RDA5MDM0RTA1MjZBODM4REM1NTY1ODRGMzk3Q0IxMjFCMjc4RTA4RDA4OTBCNDZcXFwiLFxcbiAg
ICAgICAgIFxcXCJwYXJlbnRfYXJjaGl2ZVxcXCI6XFxcIk5vdCBBdmFpbGFibGVcXFwiLFxcbiAg
ICAgICAgIFxcXCJtZDVcXFwiOlxcXCI4MzlGNTUxRjk3RTY2OUREREIzNDhCRERCOTA3RDMyQ1xc
XCIsXFxuICAgICAgICAgXFxcIlR5cGVcXFwiOlxcXCJhcHBsaWNhdGlvbi91cmxcXFwiLFxcbiAg
ICAgICAgIFxcXCJOYW1lXFxcIjpcXFwiaHR0cDovL25ld3MuZ29vZ2xlLmNvLmluL1xcXCJcXG4g
ICAgICB9LFxcbiAgICAgIFxcXCJQcm9jZXNzXFxcIjpbICBcXG4gICAgICAgICB7ICBcXG4gICAg
ICAgICAgICBcXFwiUmVhc29uXFxcIjpcXFwicHJvY2Vzc2VkIGJ5IGRvd24gc2VsZWN0b3JzXFxc
IixcXG4gICAgICAgICAgICBcXFwiTmFtZVxcXCI6XFxcImh0dHA6Ly9uZXdzLmdvb2dsZS5jby5p
bi9cXFwiLFxcbiAgICAgICAgICAgIFxcXCJTZXZlcml0eVxcXCI6XFxcIjBcXFwiXFxuICAgICAg
ICAgfVxcbiAgICAgIF0sXFxuICAgICAgXFxcIlNVTXZlcnNpb25cXFwiOlxcXCI0LjIuMi4xNlxc
XCIsXFxuICAgICAgXFxcIlNlbGVjdG9yc1xcXCI6WyAgXFxuICAgICAgICAgeyAgXFxuICAgICAg
ICAgICAgXFxcIkVuZ2luZVxcXCI6XFxcIkdhdGV3YXkgQW50aS1NYWx3YXJlXFxcIixcXG4gICAg
ICAgICAgICBcXFwiU2V2ZXJpdHlcXFwiOlxcXCIwXFxcIixcXG4gICAgICAgICAgICBcXFwiTWFs
d2FyZU5hbWVcXFwiOlxcXCItLS1cXFwiXFxuICAgICAgICAgfSxcXG4gICAgICAgICB7ICBcXG4g
ICAgICAgICAgICBcXFwiRW5naW5lXFxcIjpcXFwiQW50aS1NYWx3YXJlXFxcIixcXG4gICAgICAg
ICAgICBcXFwiU2V2ZXJpdHlcXFwiOlxcXCIwXFxcIixcXG4gICAgICAgICAgICBcXFwiTWFsd2Fy
ZU5hbWVcXFwiOlxcXCItLS1cXFwiXFxuICAgICAgICAgfSxcXG4gICAgICAgICB7ICBcXG4gICAg
ICAgICAgICBcXFwiRW5naW5lXFxcIjpcXFwiU2FuZGJveFxcXCIsXFxuICAgICAgICAgICAgXFxc
IlNldmVyaXR5XFxcIjpcXFwiMFxcXCIsXFxuICAgICAgICAgICAgXFxcIk1hbHdhcmVOYW1lXFxc
IjpcXFwiLS0tXFxcIlxcbiAgICAgICAgIH1cXG4gICAgICBdLFxcbiAgICAgIFxcXCJoYXNEeW5h
bWljQW5hbHlzaXNcXFwiOlxcXCJmYWxzZVxcXCIsXFxuICAgICAgXFxcIkJlaGF2aW9yXFxcIjpb
ICBcXG4gICAgICAgICBcXFwiSWRlbnRpZmllZCBhcyAtLS0gYnkgR2F0ZXdheSBBbnRpLU1hbHdh
cmVcXFwiLFxcbiAgICAgICAgIFxcXCJJZGVudGlmaWVkIGFzIC0tLSBieSBBbnRpLU1hbHdhcmVc
XFwiXFxuICAgICAgXSxcXG4gICAgICBcXFwiVmVyZGljdFxcXCI6eyAgXFxuICAgICAgICAgXFxc
IlNldmVyaXR5XFxcIjpcXFwiMFxcXCIsXFxuICAgICAgICAgXFxcIkRlc2NyaXB0aW9uXFxcIjpc
XFwiVGhlIHN1Ym1pdHRlZCBmaWxlIGlzIG5vdCBjb21wYXRpYmxlIHRvIFZNKHMpIGluIHRoZSBB
bmFseXplciBQcm9maWxlXFxcIlxcbiAgICAgIH0sXFxuICAgICAgXFxcIk9TdmVyc2lvblxcXCI6
XFxcIlN0YXRpY0FuYWx5c2lzXFxcIixcXG4gICAgICBcXFwiRGF0YVxcXCI6eyAgXFxuICAgICAg
ICAgXFxcImNvbXBpbGVkX3dpdGhcXFwiOlxcXCJOb3QgQXZhaWxhYmxlXFxcIixcXG4gICAgICAg
ICBcXFwiYW5hbHlzaXNfc2Vjb25kc1xcXCI6XFxcIjFcXFwiLFxcbiAgICAgICAgIFxcXCJzYW5k
Ym94X2FuYWx5c2lzXFxcIjpcXFwiMFxcXCJcXG4gICAgICB9LFxcbiAgICAgIFxcXCJNSVN2ZXJz
aW9uXFxcIjpcXFwiNC4yLjIuMTZcXFwiLFxcbiAgICAgIFxcXCJERVR2ZXJzaW9uXFxcIjpcXFwi
NC4yLjIuMTgwMjIyXFxcIlxcbiAgIH1cXG59XFxuXFxcIlxcXCJcXFwiXFxuXFxucmVwb3J0X3R5
cGUgPSByZXN1bHRzW1xcXCJJbnB1dHNcXFwiXVtcXFwibWFjZmVlX2F0ZF9yZXBvcnRfdHlwZVxc
XCJdXFxuXFxucmVwb3J0X2F0dGFjaG1lbnQgPSBcXFwiXFxcIlxcbmlmIHJlcG9ydF90eXBlID09
IFxcXCJwZGZcXFwiIG9yIHJlcG9ydF90eXBlID09IFxcXCJodG1sXFxcIjpcXG4gIHJlcG9ydF9h
dHRhY2htZW50ID0gXFxcIlNlZSB0aGUgcmVwb3J0IGF0dGFjaG1lbnQgb24gdGhlIGluY2lkZW50
LlxcXCJcXG5cXG5cXG5pbmNpZGVudC5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChcXFwi
VVJMICZsdDtiJmd0O3t9Jmx0Oy9iJmd0OyB3YXMgYW5hbHl6ZWQgYnkgTWNBZmVlIEFURCBhbmQg
dGhlIHZlcmRpY3Qgd2FzOiBcXFxcbiZsdDtiJmd0O3t9Jmx0Oy9iJmd0Oy4ge31cXFwiLmZvcm1h
dChyZXN1bHRzW1xcXCJTdW1tYXJ5XFxcIl1bXFxcIlN1YmplY3RcXFwiXVtcXFwiTmFtZVxcXCJd
LCByZXN1bHRzW1xcXCJTdW1tYXJ5XFxcIl1bXFxcIlZlcmRpY3RcXFwiXVtcXFwiRGVzY3JpcHRp
b25cXFwiXSwgcmVwb3J0X2F0dGFjaG1lbnQpKSlcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4
dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMXRsdHEybjwvaW5jb21pbmc+
PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xZDVmbmp3PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxz
ZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMXRsdHEyblwiIHNvdXJjZVJlZj1cIlN0YXJ0
RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzBnOHN2Z3BcIi8+PGVuZEV2
ZW50IGlkPVwiRW5kRXZlbnRfMTltYXNlZlwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWQ1Zm5q
dzwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFk
NWZuandcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wZzhzdmdwXCIgdGFyZ2V0UmVmPVwiRW5k
RXZlbnRfMTltYXNlZlwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4
aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRp
b24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0
YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+
PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5k
aTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+
PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1c
IlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0
aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJv
dW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBt
bmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVs
ZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4
aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5
XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxl
bWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlc
Ij48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi
MjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwi
IHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1l
bnQ9XCJTZXJ2aWNlVGFza18wZzhzdmdwXCIgaWQ9XCJTZXJ2aWNlVGFza18wZzhzdmdwX2RpXCI+
PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjMwMVwiIHk9XCIx
NjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJT
ZXF1ZW5jZUZsb3dfMXRsdHEyblwiIGlkPVwiU2VxdWVuY2VGbG93XzF0bHRxMm5fZGlcIj48b21n
ZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIv
PjxvbWdkaTp3YXlwb2ludCB4PVwiMzAxXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIy
MDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRo
PVwiOTBcIiB4PVwiMjA0LjVcIiB5PVwiMTg0LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBt
bmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMTlt
YXNlZlwiIGlkPVwiRW5kRXZlbnRfMTltYXNlZl9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi
MzZcIiB3aWR0aD1cIjM2XCIgeD1cIjUzNS4xMDk3NDYxMDk3NDYxXCIgeT1cIjE4OFwiLz48YnBt
bmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1c
IjU1My4xMDk3NDYxMDk3NDYxXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u
ZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3df
MWQ1Zm5qd1wiIGlkPVwiU2VxdWVuY2VGbG93XzFkNWZuandfZGlcIj48b21nZGk6d2F5cG9pbnQg
eD1cIjQwMVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlw
b2ludCB4PVwiNTM1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5k
aTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwi
NDIzXCIgeT1cIjE4NC41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48
L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAi
d29ya2Zsb3dfaWQiOiAibWNhZmVlX2F0ZF9hbmFseXplX3VybCIsICJ2ZXJzaW9uIjogMjV9LCAi
d29ya2Zsb3dfaWQiOiA2LCAiYWN0aW9ucyI6IFtdLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTUz
MTk3NDU1OTU5MiwgImNyZWF0b3JfaWQiOiAiYndhbHNoQHJlc2lsaWVudHN5c3RlbXMuY29tIiwg
ImRlc2NyaXB0aW9uIjogIkNhbGxzIHRoZSBNY0FmZWUgQVREIEFuYWx5emUgVVJMIGZ1bmN0aW9u
IG9uIGFuIGFydGlmYWN0In0sIHsicHJvZ3JhbW1hdGljX25hbWUiOiAibWNhZmVlX2F0ZF9hbmFs
eXplX2FydGlmYWN0X2ZpbGUiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZXhwb3J0X2tl
eSI6ICJtY2FmZWVfYXRkX2FuYWx5emVfYXJ0aWZhY3RfZmlsZSIsICJ1dWlkIjogImFiNjZlMzMz
LTNmZWItNGZiZS04NDYyLWIzZTM3OThiMDcxNCIsICJsYXN0X21vZGlmaWVkX2J5IjogImFkbWlu
QGNvM3N5cy5jb20iLCAibmFtZSI6ICIoRXhhbXBsZSkgTWNBZmVlIEFURCBBbmFseXplIEFydGlm
YWN0IEZpbGUiLCAiY29udGVudCI6IHsieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNv
ZGluZz1cIlVURi04XCI/PjxkZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9z
cGVjL0JQTU4vMjAxMDA1MjQvTU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5v
cmcvc3BlYy9CUE1OLzIwMTAwNTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5v
cmcvc3BlYy9ERC8yMDEwMDUyNC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3Jn
L3NwZWMvREQvMjAxMDA1MjQvRElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50
LmlibS5jb20vYnBtblwiIHhtbG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2No
ZW1hXCIgeG1sbnM6eHNpPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFu
Y2VcIiB0YXJnZXROYW1lc3BhY2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJv
Y2VzcyBpZD1cIm1jYWZlZV9hdGRfYW5hbHl6ZV9hcnRpZmFjdF9maWxlXCIgaXNFeGVjdXRhYmxl
PVwidHJ1ZVwiIG5hbWU9XCIoRXhhbXBsZSkgTWNBZmVlIEFURCBBbmFseXplIEFydGlmYWN0IEZp
bGVcIj48ZG9jdW1lbnRhdGlvbj5DYWxscyB0aGUgTWNBZmVlIEFURCBBbmFseXplIEZpbGUgZnVu
Y3Rpb24gb24gYW4gYXJ0aWZhY3Q8L2RvY3VtZW50YXRpb24+PHN0YXJ0RXZlbnQgaWQ9XCJTdGFy
dEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzEyajNqbGc8L291dGdvaW5n
Pjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFza18xdTNucDY4XCIgbmFt
ZT1cIk1jQWZlZSBBVEQgQW5hbHl6ZSBGaWxlXCIgcmVzaWxpZW50OnR5cGU9XCJmdW5jdGlvblwi
PjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCI1MDFlYjQ5Yi0x
Y2YxLTRmNTQtYmM1MC0xMzBkNDJjMDM1YWNcIj57XCJpbnB1dHNcIjp7XCIwYTI5NGNhNS04MGE1
LTQwNWEtYTBiNS0zZThkOWI2NzE3MjdcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0aWNcIixcInN0
YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxlY3RfdmFsdWVcIjpc
ImI3ZWNhMWVmLTZjNTgtNGJiOC05NzZlLTIwNjYzMjYyYWNlMVwifX19LFwicHJlX3Byb2Nlc3Np
bmdfc2NyaXB0XCI6XCJpbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVudC5pZFxcbmlucHV0cy5h
cnRpZmFjdF9pZCA9IGFydGlmYWN0LmlkXCIsXCJyZXN1bHRfbmFtZVwiOlwiXCIsXCJwb3N0X3By
b2Nlc3Npbmdfc2NyaXB0XCI6XCJcXFwiXFxcIlxcXCJcXG5FeGFtcGxlIHJldHVybmVkIHJlc3Vs
dHNcXG5cXG5cXG57ICBcXG4gICBcXFwiSW5wdXRzXFxcIjp7ICBcXG4gICAgICBcXFwibWFjZmVl
X2F0ZF9yZXBvcnRfdHlwZVxcXCI6XFxcInBkZlxcXCIsXFxuICAgICAgXFxcImFydGlmYWN0X2lk
XFxcIjo1NixcXG4gICAgICBcXFwiaW5jaWRlbnRfaWRcXFwiOjIwOTlcXG4gICB9LFxcbiAgIFxc
XCJSdW4gVGltZVxcXCI6XFxcIjY2Ljg3Mjc1OTEwMzhcXFwiLFxcbiAgIFxcXCJTdW1tYXJ5XFxc
Ijp7ICBcXG4gICAgICBcXFwiSlNPTnZlcnNpb25cXFwiOlxcXCIxLjAwMlxcXCIsXFxuICAgICAg
XFxcIlN1YmplY3RcXFwiOnsgIFxcbiAgICAgICAgIFxcXCJzaXplXFxcIjpcXFwiMTY3OTQxXFxc
IixcXG4gICAgICAgICBcXFwic2hhLTFcXFwiOlxcXCJBRjVGRDhGMTBGNkIyQkQ1NkYxRDREMTVC
NkE4OTVCOTQ0ODVBREY0XFxcIixcXG4gICAgICAgICBcXFwiVGltZXN0YW1wXFxcIjpcXFwiMjAx
OC0wNS0xNSAxOTozNDowOVxcXCIsXFxuICAgICAgICAgXFxcIkZpbGVUeXBlXFxcIjpcXFwiNTEy
XFxcIixcXG4gICAgICAgICBcXFwic2hhLTI1NlxcXCI6XFxcIjIyNTJEQzNGQURFMUYzREYwREVE
OENFRDI1QjcxRDZCMzRERjYzRDA0QjlDMDBFOUE5RTRGOTFCMDVDRjUxRTVcXFwiLFxcbiAgICAg
ICAgIFxcXCJwYXJlbnRfYXJjaGl2ZVxcXCI6XFxcIk5vdCBBdmFpbGFibGVcXFwiLFxcbiAgICAg
ICAgIFxcXCJtZDVcXFwiOlxcXCI1NzBFNDgxQzJFNDVERjE5MThDNTM0RTYzQ0E0MzE4MFxcXCIs
XFxuICAgICAgICAgXFxcIlR5cGVcXFwiOlxcXCJQRTMyIGV4ZWN1dGFibGUgKEdVSSkgSW50ZWwg
ODAzODZcXFwiLFxcbiAgICAgICAgIFxcXCJOYW1lXFxcIjpcXFwiMjI1MkRDM0ZBREUxRjNERjBE
RUQ4Q0VEMjVCNzFENkIzNERGNjNEMDRCOUMwMEU5QTlFNEY5MUIwNUNGNTFFNS5leGVcXFwiXFxu
ICAgICAgfSxcXG4gICAgICBcXFwiUHJvY2Vzc1xcXCI6WyAgXFxuICAgICAgICAgeyAgXFxuICAg
ICAgICAgICAgXFxcIlJlYXNvblxcXCI6XFxcInByb2Nlc3NlZCBieSBkb3duIHNlbGVjdG9yc1xc
XCIsXFxuICAgICAgICAgICAgXFxcIk5hbWVcXFwiOlxcXCIyMjUyREMzRkFERTFGM0RGMERFRDhD
RUQyNUI3MUQ2QjM0REY2M0QwNEI5QzAwRTlBOUU0RjkxQjA1Q0Y1MUU1LmV4ZVxcXCIsXFxuICAg
ICAgICAgICAgXFxcIlNldmVyaXR5XFxcIjpcXFwiNVxcXCJcXG4gICAgICAgICB9XFxuICAgICAg
XSxcXG4gICAgICBcXFwiU1VNdmVyc2lvblxcXCI6XFxcIjQuMi4yLjE2XFxcIixcXG4gICAgICBc
XFwiU2VsZWN0b3JzXFxcIjpbICBcXG4gICAgICAgICB7ICBcXG4gICAgICAgICAgICBcXFwiRW5n
aW5lXFxcIjpcXFwiR2F0ZXdheSBBbnRpLU1hbHdhcmVcXFwiLFxcbiAgICAgICAgICAgIFxcXCJT
ZXZlcml0eVxcXCI6XFxcIjVcXFwiLFxcbiAgICAgICAgICAgIFxcXCJNYWx3YXJlTmFtZVxcXCI6
XFxcIlczMi9Sb250b2ticm8uZ2VuQE1NXFxcIlxcbiAgICAgICAgIH0sXFxuICAgICAgICAgeyAg
XFxuICAgICAgICAgICAgXFxcIkVuZ2luZVxcXCI6XFxcIkFudGktTWFsd2FyZVxcXCIsXFxuICAg
ICAgICAgICAgXFxcIlNldmVyaXR5XFxcIjpcXFwiNVxcXCIsXFxuICAgICAgICAgICAgXFxcIk1h
bHdhcmVOYW1lXFxcIjpcXFwiVzMyL1JvbnRva2Jyby5nZW5ATU1cXFwiXFxuICAgICAgICAgfSxc
XG4gICAgICAgICB7ICBcXG4gICAgICAgICAgICBcXFwiRW5naW5lXFxcIjpcXFwiU2FuZGJveFxc
XCIsXFxuICAgICAgICAgICAgXFxcIlNldmVyaXR5XFxcIjpcXFwiMFxcXCIsXFxuICAgICAgICAg
ICAgXFxcIk1hbHdhcmVOYW1lXFxcIjpcXFwiLS0tXFxcIlxcbiAgICAgICAgIH1cXG4gICAgICBd
LFxcbiAgICAgIFxcXCJoYXNEeW5hbWljQW5hbHlzaXNcXFwiOlxcXCJmYWxzZVxcXCIsXFxuICAg
ICAgXFxcIkJlaGF2aW9yXFxcIjpbICBcXG4gICAgICAgICBcXFwiSWRlbnRpZmllZCBhcyBXMzIv
Um9udG9rYnJvLmdlbkBNTSBieSBHYXRld2F5IEFudGktTWFsd2FyZVxcXCIsXFxuICAgICAgICAg
XFxcIklkZW50aWZpZWQgYXMgVzMyL1JvbnRva2Jyby5nZW5ATU0gYnkgQW50aS1NYWx3YXJlXFxc
IlxcbiAgICAgIF0sXFxuICAgICAgXFxcIlZlcmRpY3RcXFwiOnsgIFxcbiAgICAgICAgIFxcXCJT
ZXZlcml0eVxcXCI6XFxcIjVcXFwiLFxcbiAgICAgICAgIFxcXCJEZXNjcmlwdGlvblxcXCI6XFxc
IlRoZSBzdWJtaXR0ZWQgZmlsZSBpcyBub3QgY29tcGF0aWJsZSB0byBWTShzKSBpbiB0aGUgQW5h
bHl6ZXIgUHJvZmlsZVxcXCJcXG4gICAgICB9LFxcbiAgICAgIFxcXCJPU3ZlcnNpb25cXFwiOlxc
XCJTdGF0aWNBbmFseXNpc1xcXCIsXFxuICAgICAgXFxcIkRhdGFcXFwiOnsgIFxcbiAgICAgICAg
IFxcXCJjb21waWxlZF93aXRoXFxcIjpcXFwiTm90IEF2YWlsYWJsZVxcXCIsXFxuICAgICAgICAg
XFxcImFuYWx5c2lzX3NlY29uZHNcXFwiOlxcXCIxXFxcIixcXG4gICAgICAgICBcXFwic2FuZGJv
eF9hbmFseXNpc1xcXCI6XFxcIjBcXFwiXFxuICAgICAgfSxcXG4gICAgICBcXFwiTUlTdmVyc2lv
blxcXCI6XFxcIjQuMi4yLjE2XFxcIixcXG4gICAgICBcXFwiREVUdmVyc2lvblxcXCI6XFxcIjQu
Mi4yLjE4MDIyMlxcXCJcXG4gICB9XFxufVxcblxcXCJcXFwiXFxcIlxcblxcbnJlcG9ydF90eXBl
ID0gcmVzdWx0c1tcXFwiSW5wdXRzXFxcIl1bXFxcIm1hY2ZlZV9hdGRfcmVwb3J0X3R5cGVcXFwi
XVxcblxcbnJlcG9ydF9hdHRhY2htZW50ID0gXFxcIlxcXCJcXG5pZiByZXBvcnRfdHlwZSA9PSBc
XFwicGRmXFxcIiBvciByZXBvcnRfdHlwZSA9PSBcXFwiaHRtbFxcXCI6XFxuICByZXBvcnRfYXR0
YWNobWVudCA9IFxcXCJTZWUgdGhlIHJlcG9ydCBhdHRhY2htZW50IG9uIHRoZSBpbmNpZGVudC5c
XFwiXFxuXFxuXFxuaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQoXFxcIkFy
dGlmYWN0IEF0dGFjaG1lbnQgLSB7fSAtICZsdDtiJmd0O3t9Jmx0Oy9iJmd0OyB3YXMgYW5hbHl6
ZWQgYnkgTWNBZmVlIEFURCBhbmQgdGhlIHZlcmRpY3Qgd2FzOiBcXFxcbiZsdDtiJmd0O3t9LiZs
dDsvYiZndDsge31cXFwiLmZvcm1hdChhcnRpZmFjdC50eXBlLCByZXN1bHRzW1xcXCJTdW1tYXJ5
XFxcIl1bXFxcIlN1YmplY3RcXFwiXVtcXFwiTmFtZVxcXCJdLCByZXN1bHRzW1xcXCJTdW1tYXJ5
XFxcIl1bXFxcIlZlcmRpY3RcXFwiXVtcXFwiRGVzY3JpcHRpb25cXFwiXSwgcmVwb3J0X2F0dGFj
aG1lbnQpKSlcXG5cIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxp
bmNvbWluZz5TZXF1ZW5jZUZsb3dfMTJqM2psZzwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNl
Rmxvd18xZXkzdWlrPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJT
ZXF1ZW5jZUZsb3dfMTJqM2psZ1wiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRh
cmdldFJlZj1cIlNlcnZpY2VUYXNrXzF1M25wNjhcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRf
MXhtZGwwelwiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMWV5M3VpazwvaW5jb21pbmc+PC9lbmRF
dmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzFleTN1aWtcIiBzb3VyY2VSZWY9
XCJTZXJ2aWNlVGFza18xdTNucDY4XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMXhtZGwwelwiLz48
dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQg
eW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlk
PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwi
IHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6
QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVs
ZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUg
YnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4
bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2Mlwi
IHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIg
d2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2Jw
bW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3Rh
dGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJv
dW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2Jw
bW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9u
XzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQg
eD1cIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlw
b2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1u
ZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18x
dTNucDY4XCIgaWQ9XCJTZXJ2aWNlVGFza18xdTNucDY4X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWln
aHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjI2MFwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBN
TlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTJqM2ps
Z1wiIGlkPVwiU2VxdWVuY2VGbG93XzEyajNqbGdfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5
OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4
PVwiMjYwXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1O
TGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyMjlcIiB5
PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQ
TU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzF4bWRsMHpcIiBpZD1cIkVuZEV2ZW50XzF4
bWRsMHpfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI0
NDVcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi
MTNcIiB3aWR0aD1cIjBcIiB4PVwiNDYzXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+
PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5j
ZUZsb3dfMWV5M3Vpa1wiIGlkPVwiU2VxdWVuY2VGbG93XzFleTN1aWtfZGlcIj48b21nZGk6d2F5
cG9pbnQgeD1cIjM2MFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdk
aTp3YXlwb2ludCB4PVwiNDQ1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+
PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwi
IHg9XCI0MDIuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5F
ZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25z
PiIsICJ3b3JrZmxvd19pZCI6ICJtY2FmZWVfYXRkX2FuYWx5emVfYXJ0aWZhY3RfZmlsZSIsICJ2
ZXJzaW9uIjogMTF9LCAid29ya2Zsb3dfaWQiOiAyLCAiYWN0aW9ucyI6IFtdLCAibGFzdF9tb2Rp
ZmllZF90aW1lIjogMTUzMDM2NTAwMjQ3MSwgImNyZWF0b3JfaWQiOiAiYndhbHNoQHJlc2lsaWVu
dHN5c3RlbXMuY29tIiwgImRlc2NyaXB0aW9uIjogIkNhbGxzIHRoZSBNY0FmZWUgQVREIEFuYWx5
emUgRmlsZSBmdW5jdGlvbiBvbiBhbiBhcnRpZmFjdCJ9XSwgImFjdGlvbnMiOiBbeyJsb2dpY190
eXBlIjogImFsbCIsICJuYW1lIjogIihFeGFtcGxlKSBNY0FmZWUgQW5hbHl6ZSBVUkwiLCAidmll
d19pdGVtcyI6IFtdLCAidHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBbIm1jYWZlZV9hdGRfYW5hbHl6
ZV91cmwiXSwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInRpbWVvdXRfc2Vjb25kcyI6IDg2
NDAwLCAidXVpZCI6ICJkYWQzNmI5MS0wYTY3LTRhZWYtYjNhYi05NDg1MzA2ZDU4OTUiLCAiYXV0
b21hdGlvbnMiOiBbXSwgImV4cG9ydF9rZXkiOiAiKEV4YW1wbGUpIE1jQWZlZSBBbmFseXplIFVS
TCIsICJjb25kaXRpb25zIjogW3sidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lkIjogbnVsbCwg
ImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJtZXRob2QiOiAiaW4iLCAidmFsdWUiOiBb
IlVSTCIsICJVUkwgUmVmZXJlciIsICJVUkkgUGF0aCJdfV0sICJpZCI6IDE0LCAibWVzc2FnZV9k
ZXN0aW5hdGlvbnMiOiBbXX0sIHsibG9naWNfdHlwZSI6ICJhbGwiLCAibmFtZSI6ICIoRXhhbXBs
ZSkgTWNBZmVlIEFURCBBbmFseXplIEFydGlmYWN0IEZpbGUiLCAidmlld19pdGVtcyI6IFtdLCAi
dHlwZSI6IDEsICJ3b3JrZmxvd3MiOiBbIm1jYWZlZV9hdGRfYW5hbHl6ZV9hcnRpZmFjdF9maWxl
Il0sICJvYmplY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwg
InV1aWQiOiAiMTBlZTdjYjYtNDk5ZS00Y2E5LTkzNTMtMGFkYTk2ZGI2OTcyIiwgImF1dG9tYXRp
b25zIjogW10sICJleHBvcnRfa2V5IjogIihFeGFtcGxlKSBNY0FmZWUgQVREIEFuYWx5emUgQXJ0
aWZhY3QgRmlsZSIsICJjb25kaXRpb25zIjogW3sidHlwZSI6IG51bGwsICJldmFsdWF0aW9uX2lk
IjogbnVsbCwgImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJtZXRob2QiOiAiaW4iLCAi
dmFsdWUiOiBbIlJGQyA4MjIgRW1haWwgTWVzc2FnZSBGaWxlIiwgIkVtYWlsIEF0dGFjaG1lbnQi
LCAiTWFsd2FyZSBTYW1wbGUiLCAiTG9nIEZpbGUiLCAiT3RoZXIgRmlsZSIsICJYNTA5IENlcnRp
ZmljYXRlIEZpbGUiXX1dLCAiaWQiOiAxNiwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW119LCB7
ImxvZ2ljX3R5cGUiOiAiYWxsIiwgIm5hbWUiOiAiKEV4YW1wbGUpIE1jQWZlZSBBVEQgQW5hbHl6
ZSBBdHRhY2htZW50IiwgInZpZXdfaXRlbXMiOiBbXSwgInR5cGUiOiAxLCAid29ya2Zsb3dzIjog
WyJtY2FmZWVfYXRkX2FuYWx5emVfYXR0YWNobWVudCJdLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNo
bWVudCIsICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiY2M2YzMyOWYtY2I1ZS00
OTNlLWI2YTQtZDE3N2I2ZjFhOTJlIiwgImF1dG9tYXRpb25zIjogW10sICJleHBvcnRfa2V5Ijog
IihFeGFtcGxlKSBNY0FmZWUgQVREIEFuYWx5emUgQXR0YWNobWVudCIsICJjb25kaXRpb25zIjog
W10sICJpZCI6IDE3LCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXX1dLCAibGF5b3V0cyI6IFtd
LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9uIjogMiwgImlkIjogMTIsICJpbmR1c3RyaWVzIjogbnVs
bCwgInBoYXNlcyI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJnZW9zIjogbnVsbCwgInNlcnZl
cl92ZXJzaW9uIjogeyJtYWpvciI6IDMwLCAidmVyc2lvbiI6ICIzMC4wLjM0NzYiLCAiYnVpbGRf
bnVtYmVyIjogMzQ3NiwgIm1pbm9yIjogMH0sICJ0aW1lZnJhbWVzIjogbnVsbCwgIndvcmtzcGFj
ZXMiOiBbXSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAiZnVuY3Rpb25zIjogW3siZGlzcGxheV9u
YW1lIjogIk1jQWZlZSBBVEQgQW5hbHl6ZSBGaWxlIiwgInV1aWQiOiAiNTAxZWI0OWItMWNmMS00
ZjU0LWJjNTAtMTMwZDQyYzAzNWFjIiwgImNyZWF0b3IiOiB7ImRpc3BsYXlfbmFtZSI6ICJSZXNp
bGllbnQgU3lzYWRtaW4iLCAidHlwZSI6ICJ1c2VyIiwgImlkIjogMywgIm5hbWUiOiAiYndhbHNo
QHJlc2lsaWVudHN5c3RlbXMuY29tIn0sICJ2aWV3X2l0ZW1zIjogW3sic2hvd19pZiI6IG51bGws
ICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAi
ZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiODExZTk5ZDctZDE5NC00Y2U4LTg2
Y2MtYWZmNWUwMWFiODVjIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVsbCwg
ImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJl
bGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICI2MmQ5MzEwNS03MDVkLTQ4NzYtOTgx
My1lNjBlZTQzZTE5ZWQiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3dfaWYiOiBudWxsLCAi
ZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZSwgImVs
ZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjE2Nzc3MTZhLWE5NWUtNGY1NS04ZTNl
LTUzOTllNmQzYmQ5NiIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19pZiI6IG51bGwsICJm
aWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlLCAiZWxl
bWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiYmEzMTgyNjEtZWQ2YS00YTM4LWExODct
OWUwYjY4ZDE2MDRmIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93X2lmIjogbnVsbCwgImZp
ZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2UsICJlbGVt
ZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICIwYTI5NGNhNS04MGE1LTQwNWEtYTBiNS0z
ZThkOWI2NzE3MjciLCAic3RlcF9sYWJlbCI6IG51bGx9XSwgImV4cG9ydF9rZXkiOiAibWNhZmVl
X2F0ZF9hbmFseXplX2ZpbGUiLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlzcGxheV9uYW1lIjog
IlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAxLCAibmFtZSI6ICJh
ZG1pbkBjbzNzeXMuY29tIn0sICJuYW1lIjogIm1jYWZlZV9hdGRfYW5hbHl6ZV9maWxlIiwgInZl
cnNpb24iOiAxLCAid29ya2Zsb3dzIjogW3sicHJvZ3JhbW1hdGljX25hbWUiOiAibWNhZmVlX2F0
ZF9hbmFseXplX2FydGlmYWN0X2ZpbGUiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAidXVp
ZCI6IG51bGwsICJhY3Rpb25zIjogW10sICJuYW1lIjogIihFeGFtcGxlKSBNY0FmZWUgQVREIEFu
YWx5emUgQXJ0aWZhY3QgRmlsZSIsICJ3b3JrZmxvd19pZCI6IDIsICJkZXNjcmlwdGlvbiI6IG51
bGx9LCB7InByb2dyYW1tYXRpY19uYW1lIjogIm1jYWZlZV9hdGRfYW5hbHl6ZV9hdHRhY2htZW50
IiwgIm9iamVjdF90eXBlIjogImF0dGFjaG1lbnQiLCAidXVpZCI6IG51bGwsICJhY3Rpb25zIjog
W10sICJuYW1lIjogIihFeGFtcGxlKSBNY0FmZWUgQVREIEFuYWx5emUgQXR0YWNobWVudCIsICJ3
b3JrZmxvd19pZCI6IDgsICJkZXNjcmlwdGlvbiI6IG51bGx9XSwgImxhc3RfbW9kaWZpZWRfdGlt
ZSI6IDE1Mjg4ODg5ODcwMTQsICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAibWNhZmVlX2F0ZF9tZXNz
YWdlX2Rlc3RpbmF0aW9uIiwgImlkIjogMSwgImRlc2NyaXB0aW9uIjogeyJjb250ZW50IjogIlRo
aXMgZnVuY3Rpb24gZG93bmxvYWRzIGEgZmlsZSBmcm9tIHRoZSBSZXNpbGllbnQgcGxhdGZvcm0g
YW5kIHVwbG9hZHMgaXQgdG8gQVREIHRvIGJlIGFuYWx5emVkLiBUaGlzIGZ1bmN0aW9uIGFsd2F5
cyByZXR1cm5zIHRoZSByZXBvcnQgcmVzdWx0cyBpbiBKU09OIGZvcm1hdCB0byB0aGUgcG9zdC1w
cm9jZXNzIHNjcmlwdC4gSXQgbWF5IGFsc28gcmV0dXJuIGEgcGRmIG9yIEhUTUwgcmVwb3J0IGFz
IGFuIGluY2lkZW50IGF0dGFjaG1lbnQgaWYgc3BlY2lmaWVkLiIsICJmb3JtYXQiOiAidGV4dCJ9
fSwgeyJkaXNwbGF5X25hbWUiOiAiTWNBZmVlIEFURCBBbmFseXplIFVSTCIsICJ1dWlkIjogIjBk
NTY4NTk4LTk0OGItNDljMS1iMDZlLWRmOWZiMzA3NmYzNSIsICJjcmVhdG9yIjogeyJkaXNwbGF5
X25hbWUiOiAiUmVzaWxpZW50IFN5c2FkbWluIiwgInR5cGUiOiAidXNlciIsICJpZCI6IDMsICJu
YW1lIjogImJ3YWxzaEByZXNpbGllbnRzeXN0ZW1zLmNvbSJ9LCAidmlld19pdGVtcyI6IFt7InNo
b3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFk
ZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjgxMWU5OWQ3
LWQxOTQtNGNlOC04NmNjLWFmZjVlMDFhYjg1YyIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hv
d19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRl
ciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiNjJkOTMxMDUt
NzA1ZC00ODc2LTk4MTMtZTYwZWU0M2UxOWVkIiwgInN0ZXBfbGFiZWwiOiBudWxsfSwgeyJzaG93
X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xpbmtfaGVhZGVy
IjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICJiMDllMTg5OS03
NDUyLTRmNGYtYmRlMS0yM2IyZmJjY2Q5MDQiLCAic3RlcF9sYWJlbCI6IG51bGx9LCB7InNob3df
aWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3dfbGlua19oZWFkZXIi
OiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50IjogIjBhMjk0Y2E1LTgw
YTUtNDA1YS1hMGI1LTNlOGQ5YjY3MTcyNyIsICJzdGVwX2xhYmVsIjogbnVsbH0sIHsic2hvd19p
ZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19saW5rX2hlYWRlciI6
IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAiMmMxNzk1YjgtYThi
ZS00ZDM0LTlmYmEtMDYzMjQwOGRhNDlmIiwgInN0ZXBfbGFiZWwiOiBudWxsfV0sICJleHBvcnRf
a2V5IjogIm1jYWZlZV9hdGRfYW5hbHl6ZV91cmwiLCAibGFzdF9tb2RpZmllZF9ieSI6IHsiZGlz
cGxheV9uYW1lIjogIlJlc2lsaWVudCBTeXNhZG1pbiIsICJ0eXBlIjogInVzZXIiLCAiaWQiOiAx
LCAibmFtZSI6ICJhZG1pbkBjbzNzeXMuY29tIn0sICJuYW1lIjogIm1jYWZlZV9hdGRfYW5hbHl6
ZV91cmwiLCAidmVyc2lvbiI6IDIsICJ3b3JrZmxvd3MiOiBbeyJwcm9ncmFtbWF0aWNfbmFtZSI6
ICJtY2FmZWVfYXRkX2FuYWx5emVfdXJsIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgInV1
aWQiOiBudWxsLCAiYWN0aW9ucyI6IFtdLCAibmFtZSI6ICIoRXhhbXBsZSkgTWNBZmVlIEFURCBB
bmFseXplIFVSTCIsICJ3b3JrZmxvd19pZCI6IDYsICJkZXNjcmlwdGlvbiI6IG51bGx9XSwgImxh
c3RfbW9kaWZpZWRfdGltZSI6IDE1MzE5NjUzMDU2NTMsICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAi
bWNhZmVlX2F0ZF9tZXNzYWdlX2Rlc3RpbmF0aW9uIiwgImlkIjogMiwgImRlc2NyaXB0aW9uIjog
eyJjb250ZW50IjogIkNhbGwgdGhpcyBmdW5jdGlvbiBvbiBhIFVSTCBhcnRpZmFjdC4gVGhlIFVS
TCBjYW4gYmUgYSBsaW5rIHRvIGEgZmlsZSB0byBkb3dubG9hZCBvciBhIFVSTCB0byBhbmFseXpl
LiBUaGlzIG11c3QgYmUgc3BlY2lmaWVkIGluIHRoZSBtY2FmZWVfYXRkX3VybF9zdWJtaXRfdHlw
ZSBpbnB1dC4gVGhlIGZ1bmN0aW9uIGNhbiBhbHNvIGF0dGVtcHQgdG8gZGV0ZXJtaW5lIHRoZSBz
dWJtaXQgdHlwZS4gVGhpcyBmdW5jdGlvbiBhbHdheXMgcmV0dXJucyB0aGUgcmVwb3J0IHJlc3Vs
dHMgaW4gSlNPTiBmb3JtYXQgdG8gdGhlIHBvc3QtcHJvY2VzcyBzY3JpcHQuIEl0IG1heSBhbHNv
IHJldHVybiBhIHBkZiBvciBIVE1MIHJlcG9ydCBhcyBhbiBpbmNpZGVudCBhdHRhY2htZW50IGlm
IHNwZWNpZmllZC4iLCAiZm9ybWF0IjogInRleHQifX1dLCAibm90aWZpY2F0aW9ucyI6IG51bGws
ICJyZWd1bGF0b3JzIjogbnVsbCwgImluY2lkZW50X3R5cGVzIjogW3siY3JlYXRlX2RhdGUiOiAx
NTMyNTM0MzU5NDQzLCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50
ZXJuYWwpIiwgImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwp
IiwgImlkIjogMCwgIm5hbWUiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwg
InVwZGF0ZV9kYXRlIjogMTUzMjUzNDM1OTQ0MywgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4
LWFkMzktNGEwMDA0MDQ0YWEwIiwgImVuYWJsZWQiOiBmYWxzZSwgInN5c3RlbSI6IGZhbHNlLCAi
cGFyZW50X2lkIjogbnVsbCwgImhpZGRlbiI6IGZhbHNlfV0sICJzY3JpcHRzIjogW10sICJ0eXBl
cyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJ1dWlkIjogIjY2ZTI0YjE2LWI4MjIt
NDdjZi04YzQ2LWQzOGQ0YzJiNjBjMSIsICJleHBvcnRfa2V5IjogIm1jYWZlZV9hdGRfbWVzc2Fn
ZV9kZXN0aW5hdGlvbiIsICJuYW1lIjogIk1jQWZlZSBBVEQgTWVzc2FnZSBEZXN0aW5hdGlvbiIs
ICJkZXN0aW5hdGlvbl90eXBlIjogMCwgInByb2dyYW1tYXRpY19uYW1lIjogIm1jYWZlZV9hdGRf
bWVzc2FnZV9kZXN0aW5hdGlvbiIsICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjogWyJid2Fs
c2hAcmVzaWxpZW50c3lzdGVtcy5jb20iXX1dLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlwZXMiOiBb
XSwgInJvbGVzIjogW10sICJmaWVsZHMiOiBbeyJvcGVyYXRpb25zIjogW10sICJyZWFkX29ubHki
OiB0cnVlLCAidXVpZCI6ICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EiLCAi
dGVtcGxhdGVzIjogW10sICJ0eXBlX2lkIjogMCwgImNob3NlbiI6IGZhbHNlLCAidGV4dCI6ICJT
aW11bGF0aW9uIiwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiZXhwb3J0X2tl
eSI6ICJpbmNpZGVudC9pbmNfdHJhaW5pbmciLCAidG9vbHRpcCI6ICJXaGV0aGVyIHRoZSBpbmNp
ZGVudCBpcyBhIHNpbXVsYXRpb24gb3IgYSByZWd1bGFyIGluY2lkZW50LiAgVGhpcyBmaWVsZCBp
cyByZWFkLW9ubHkuIiwgInJpY2hfdGV4dCI6IGZhbHNlLCAib3BlcmF0aW9uX3Blcm1zIjoge30s
ICJwcmVmaXgiOiBudWxsLCAiaW50ZXJuYWwiOiBmYWxzZSwgInZhbHVlcyI6IFtdLCAiYmxhbmtf
b3B0aW9uIjogZmFsc2UsICJpbnB1dF90eXBlIjogImJvb2xlYW4iLCAiY2hhbmdlYWJsZSI6IHRy
dWUsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiaWQiOiAzOCwgIm5hbWUiOiAiaW5jX3Ry
YWluaW5nIn0sIHsib3BlcmF0aW9ucyI6IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Bl
cm1zIjoge30sICJ0ZXh0IjogImluY2lkZW50X2lkIiwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAi
cHJlZml4IjogbnVsbCwgImNoYW5nZWFibGUiOiB0cnVlLCAiaWQiOiA5NCwgInJlYWRfb25seSI6
IGZhbHNlLCAidXVpZCI6ICI4MTFlOTlkNy1kMTk0LTRjZTgtODZjYy1hZmY1ZTAxYWI4NWMiLCAi
Y2hvc2VuIjogZmFsc2UsICJpbnB1dF90eXBlIjogIm51bWJlciIsICJ0b29sdGlwIjogIiIsICJp
bnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4
cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9pbmNpZGVudF9pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6
IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAiaW5jaWRlbnRfaWQiLCAiZGVmYXVs
dF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdheXMiLCAidmFsdWVz
IjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJt
cyI6IHt9LCAidGV4dCI6ICJhcnRpZmFjdF92YWx1ZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwg
InByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogOTcsICJyZWFkX29ubHki
OiBmYWxzZSwgInV1aWQiOiAiYjA5ZTE4OTktNzQ1Mi00ZjRmLWJkZTEtMjNiMmZiY2NkOTA0Iiwg
ImNob3NlbiI6IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgInRvb2x0aXAiOiAiIiwgImlu
dGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhw
b3J0X2tleSI6ICJfX2Z1bmN0aW9uL2FydGlmYWN0X3ZhbHVlIiwgImhpZGVfbm90aWZpY2F0aW9u
IjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJhcnRpZmFjdF92YWx1ZSIsICJk
ZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRp
b25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAi
bWNhZmVlX2F0ZF91cmxfc3VibWl0X3R5cGUiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVm
aXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDk5LCAicmVhZF9vbmx5IjogZmFs
c2UsICJ1dWlkIjogIjJjMTc5NWI4LWE4YmUtNGQzNC05ZmJhLTA2MzI0MDhkYTQ5ZiIsICJjaG9z
ZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAic2VsZWN0IiwgInRvb2x0aXAiOiAiVHlwZSBvZiBV
UkwgKHdlYiBwYWdlIG9yIGZpbGUpIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBm
YWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL21jYWZlZV9h
dGRfdXJsX3N1Ym1pdF90eXBlIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhv
bGRlciI6ICIiLCAibmFtZSI6ICJtY2FmZWVfYXRkX3VybF9zdWJtaXRfdHlwZSIsICJkZWZhdWx0
X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInJlcXVpcmVkIjogImFsd2F5cyIsICJ2YWx1ZXMi
OiBbeyJ1dWlkIjogImE4NTZlNDk0LWJhMDctNDVhYy1iM2I2LWZmYTc0N2RmOWI3NyIsICJkZWZh
dWx0IjogZmFsc2UsICJlbmFibGVkIjogdHJ1ZSwgInZhbHVlIjogMTExLCAibGFiZWwiOiAiQW5h
bHl6ZSBVUkwiLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVsbH0sIHsidXVpZCI6
ICJhZjRjYWJkNy1mN2JiLTQyNDAtOTk4NS1kOTJkMGYzMGU3NmMiLCAiZGVmYXVsdCI6IGZhbHNl
LCAiZW5hYmxlZCI6IHRydWUsICJ2YWx1ZSI6IDExMiwgImxhYmVsIjogIkRvd25sb2FkIGFuZCBh
bmFseXplIGZpbGUgZnJvbSBVUkwiLCAiaGlkZGVuIjogZmFsc2UsICJwcm9wZXJ0aWVzIjogbnVs
bH1dfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMi
OiB7fSwgInRleHQiOiAidGFza19pZCIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6
IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogODcsICJyZWFkX29ubHkiOiBmYWxzZSwg
InV1aWQiOiAiYmEzMTgyNjEtZWQ2YS00YTM4LWExODctOWUwYjY4ZDE2MDRmIiwgImNob3NlbiI6
IGZhbHNlLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAidG9vbHRpcCI6ICIiLCAiaW50ZXJuYWwi
OiBmYWxzZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAidGVtcGxhdGVzIjogW10sICJleHBvcnRfa2V5
IjogIl9fZnVuY3Rpb24vdGFza19pZCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxh
Y2Vob2xkZXIiOiAiIiwgIm5hbWUiOiAidGFza19pZCIsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2
ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjog
MTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInRleHQiOiAiYXJ0aWZhY3RfaWQiLCAiYmxhbmtf
b3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxsLCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6
IDg5LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjYyZDkzMTA1LTcwNWQtNDg3Ni05ODEz
LWU2MGVlNDNlMTllZCIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwg
InRvb2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRl
bXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2FydGlmYWN0X2lkIiwgImhp
ZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJhcnRp
ZmFjdF9pZCIsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtd
fSwgeyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7
fSwgInRleHQiOiAiYXR0YWNobWVudF9pZCIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZp
eCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMTAxLCAicmVhZF9vbmx5IjogZmFs
c2UsICJ1dWlkIjogIjE2Nzc3MTZhLWE5NWUtNGY1NS04ZTNlLTUzOTllNmQzYmQ5NiIsICJjaG9z
ZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgInRvb2x0aXAiOiAiIiwgImludGVy
bmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0
X2tleSI6ICJfX2Z1bmN0aW9uL2F0dGFjaG1lbnRfaWQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBm
YWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogImF0dGFjaG1lbnRfaWQiLCAiZGVmYXVs
dF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJ2YWx1ZXMiOiBbXX0sIHsib3BlcmF0aW9ucyI6
IFtdLCAidHlwZV9pZCI6IDExLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ0ZXh0IjogIm1jYWZl
ZV9hdGRfcmVwb3J0X3R5cGUiLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJwcmVmaXgiOiBudWxs
LCAiY2hhbmdlYWJsZSI6IHRydWUsICJpZCI6IDkxLCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlk
IjogIjBhMjk0Y2E1LTgwYTUtNDA1YS1hMGI1LTNlOGQ5YjY3MTcyNyIsICJjaG9zZW4iOiBmYWxz
ZSwgImlucHV0X3R5cGUiOiAic2VsZWN0IiwgInRvb2x0aXAiOiAiQ2hvb3NlIHJlcG9ydCB0eXBl
IHRvIHJldHVybiBhcyBhbiBhdHRhY2htZW50IHRvIGFuIGluY2lkZW50IiwgImludGVybmFsIjog
ZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBsYXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6
ICJfX2Z1bmN0aW9uL21jYWZlZV9hdGRfcmVwb3J0X3R5cGUiLCAiaGlkZV9ub3RpZmljYXRpb24i
OiBmYWxzZSwgInBsYWNlaG9sZGVyIjogIiIsICJuYW1lIjogIm1jYWZlZV9hdGRfcmVwb3J0X3R5
cGUiLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJyZXF1aXJlZCI6ICJhbHdh
eXMiLCAidmFsdWVzIjogW3sidXVpZCI6ICJiN2VjYTFlZi02YzU4LTRiYjgtOTc2ZS0yMDY2MzI2
MmFjZTEiLCAiZGVmYXVsdCI6IHRydWUsICJlbmFibGVkIjogdHJ1ZSwgInZhbHVlIjogMTAzLCAi
bGFiZWwiOiAicGRmIiwgImhpZGRlbiI6IGZhbHNlLCAicHJvcGVydGllcyI6IG51bGx9LCB7InV1
aWQiOiAiNWFjNTFlMDEtYWU0ZC00NjBiLTk5ZTQtZWIwZjRlMzIyOTJjIiwgImRlZmF1bHQiOiBm
YWxzZSwgImVuYWJsZWQiOiB0cnVlLCAidmFsdWUiOiAxMDQsICJsYWJlbCI6ICJodG1sIiwgImhp
ZGRlbiI6IGZhbHNlLCAicHJvcGVydGllcyI6IG51bGx9LCB7InV1aWQiOiAiY2ViMTBkOGUtNTZk
Mi00MTgxLTkxMTAtMzdhOWRjMTQ3M2JkIiwgImRlZmF1bHQiOiBmYWxzZSwgImVuYWJsZWQiOiB0
cnVlLCAidmFsdWUiOiAxMDUsICJsYWJlbCI6ICJObyByZXBvcnQsIG9ubHkgcmV0dXJuIEpTT04g
dG8gcG9zdC1wcm9jZXNzIHNjcmlwdCIsICJoaWRkZW4iOiBmYWxzZSwgInByb3BlcnRpZXMiOiBu
dWxsfV19XSwgIm92ZXJyaWRlcyI6IFtdLCAiZXhwb3J0X2RhdGUiOiAxNTMxOTc0NTY5NDI5fQ==
"""
)
| 73.893939
| 87
| 0.977978
| 810
| 48,770
| 58.834568
| 0.91358
| 0.002077
| 0.00235
| 0.000839
| 0.001175
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110626
| 0.018762
| 48,770
| 659
| 88
| 74.00607
| 0.885216
| 0.016055
| 0
| 0.0096
| 1
| 0
| 0.994201
| 0.981269
| 0
| 1
| 0
| 0
| 0
| 1
| 0.0016
| false
| 0
| 0.0048
| 0
| 0.0064
| 0.0016
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9c9faf7a02563bb8eb2daf38e098b80ad541f46a
| 19,073
|
py
|
Python
|
example_algos/models/nets.py
|
MIC-DKFZ/mood
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
[
"Apache-2.0"
] | 42
|
2020-04-30T11:16:11.000Z
|
2021-09-15T16:15:30.000Z
|
example_algos/models/nets.py
|
MIC-DKFZ/mood
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
[
"Apache-2.0"
] | 2
|
2020-06-19T06:24:19.000Z
|
2020-07-27T08:07:54.000Z
|
example_algos/models/nets.py
|
MIC-DKFZ/mood
|
a01303adb4256653b133e2f7cd4741d366b681f7
|
[
"Apache-2.0"
] | 5
|
2020-07-20T13:26:50.000Z
|
2021-07-18T22:42:47.000Z
|
import warnings
import numpy as np
import torch
import torch.nn as nn
class NoOp(nn.Module):
def __init__(self, *args, **kwargs):
"""NoOp Pytorch Module.
Forwards the given input as is.
"""
super(NoOp, self).__init__()
def forward(self, x, *args, **kwargs):
return x
class ConvModule(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
conv_op=nn.Conv2d,
conv_params=None,
normalization_op=None,
normalization_params=None,
activation_op=nn.LeakyReLU,
activation_params=None,
):
"""Basic Conv Pytorch Conv Module
Has can have a Conv Op, a Normlization Op and a Non Linearity:
x = conv(x)
x = some_norm(x)
x = nonlin(x)
Args:
in_channels ([int]): [Number on input channels/ feature maps]
out_channels ([int]): [Number of ouput channels/ feature maps]
conv_op ([torch.nn.Module], optional): [Conv operation]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to None.
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...)]. Defaults to None.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...)]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
"""
super(ConvModule, self).__init__()
self.conv_params = conv_params
if self.conv_params is None:
self.conv_params = {}
self.activation_params = activation_params
if self.activation_params is None:
self.activation_params = {}
self.normalization_params = normalization_params
if self.normalization_params is None:
self.normalization_params = {}
self.conv = None
if conv_op is not None and not isinstance(conv_op, str):
self.conv = conv_op(in_channels, out_channels, **self.conv_params)
self.normalization = None
if normalization_op is not None and not isinstance(normalization_op, str):
self.normalization = normalization_op(out_channels, **self.normalization_params)
self.activation = None
if activation_op is not None and not isinstance(activation_op, str):
self.activation = activation_op(**self.activation_params)
def forward(self, input, conv_add_input=None, normalization_add_input=None, activation_add_input=None):
x = input
if self.conv is not None:
if conv_add_input is None:
x = self.conv(x)
else:
x = self.conv(x, **conv_add_input)
if self.normalization is not None:
if normalization_add_input is None:
x = self.normalization(x)
else:
x = self.normalization(x, **normalization_add_input)
if self.activation is not None:
if activation_add_input is None:
x = self.activation(x)
else:
x = self.activation(x, **activation_add_input)
# nn.functional.dropout(x, p=0.95, training=True)
return x
class ConvBlock(nn.Module):
def __init__(
self,
n_convs: int,
n_featmaps: int,
conv_op=nn.Conv2d,
conv_params=None,
normalization_op=nn.BatchNorm2d,
normalization_params=None,
activation_op=nn.LeakyReLU,
activation_params=None,
):
"""Basic Conv block with repeated conv, build up from repeated @ConvModules (with same/fixed feature map size)
Args:
n_convs ([type]): [Number of convolutions]
n_featmaps ([type]): [Feature map size of the conv]
conv_op ([torch.nn.Module], optional): [Convulioton operation -> see ConvModule ]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to None.
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
"""
super(ConvBlock, self).__init__()
self.n_featmaps = n_featmaps
self.n_convs = n_convs
self.conv_params = conv_params
if self.conv_params is None:
self.conv_params = {}
self.conv_list = nn.ModuleList()
for i in range(self.n_convs):
conv_layer = ConvModule(
n_featmaps,
n_featmaps,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
)
self.conv_list.append(conv_layer)
def forward(self, input, **frwd_params):
x = input
for conv_layer in self.conv_list:
x = conv_layer(x)
return x
class ResBlock(nn.Module):
def __init__(
self,
n_convs,
n_featmaps,
conv_op=nn.Conv2d,
conv_params=None,
normalization_op=nn.BatchNorm2d,
normalization_params=None,
activation_op=nn.LeakyReLU,
activation_params=None,
):
"""Basic Conv block with repeated conv, build up from repeated @ConvModules (with same/fixed feature map size) and a skip/ residual connection:
x = input
x = conv_block(x)
out = x + input
Args:
n_convs ([type]): [Number of convolutions in the conv block]
n_featmaps ([type]): [Feature map size of the conv block]
conv_op ([torch.nn.Module], optional): [Convulioton operation -> see ConvModule ]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to None.
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
"""
super(ResBlock, self).__init__()
self.n_featmaps = n_featmaps
self.n_convs = n_convs
self.conv_params = conv_params
if self.conv_params is None:
self.conv_params = {}
self.conv_block = ConvBlock(
n_featmaps,
n_convs,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
)
def forward(self, input, **frwd_params):
x = input
x = self.conv_block(x)
out = x + input
return out
# Basic Generator
class BasicGenerator(nn.Module):
def __init__(
self,
input_size,
z_dim=256,
fmap_sizes=(256, 128, 64),
upsample_op=nn.ConvTranspose2d,
conv_params=None,
normalization_op=NoOp,
normalization_params=None,
activation_op=nn.LeakyReLU,
activation_params=None,
block_op=NoOp,
block_params=None,
to_1x1=True,
):
"""Basic configureable Generator/ Decoder.
Allows for mutilple "feature-map" levels defined by the feature map size, where for each feature map size a conv operation + optional conv block is used.
Args:
input_size ((int, int, int): Size of the input in format CxHxW):
z_dim (int, optional): [description]. Dimension of the latent / Input dimension (C channel-dim).
fmap_sizes (tuple, optional): [Defines the Upsampling-Levels of the generator, list/ tuple of ints, where each
int defines the number of feature maps in the layer]. Defaults to (256, 128, 64).
upsample_op ([torch.nn.Module], optional): [Upsampling operation used, to upsample to a new level/ featuremap size]. Defaults to nn.ConvTranspose2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
block_op ([torch.nn.Module], optional): [Block operation used for each feature map size after each upsample op of e.g. ConvBlock/ ResidualBlock]. Defaults to NoOp.
block_params ([dict], optional): [Init parameters for the block operation]. Defaults to None.
to_1x1 (bool, optional): [If Latent dimesion is a z_dim x 1 x 1 vector (True) or if allows spatial resolution not to be 1x1 (z_dim x H x W) (False) ]. Defaults to True.
"""
super(BasicGenerator, self).__init__()
if conv_params is None:
conv_params = dict(kernel_size=4, stride=2, padding=1, bias=False)
if block_op is None:
block_op = NoOp
if block_params is None:
block_params = {}
n_channels = input_size[0]
input_size_ = np.array(input_size[1:])
if not isinstance(fmap_sizes, list) and not isinstance(fmap_sizes, tuple):
raise AttributeError("fmap_sizes has to be either a list or tuple or an int")
elif len(fmap_sizes) < 2:
raise AttributeError("fmap_sizes has to contain at least three elements")
else:
h_size_bot = fmap_sizes[0]
# We need to know how many layers we will use at the beginning
input_size_new = input_size_ // (2 ** len(fmap_sizes))
if np.min(input_size_new) < 2 and z_dim is not None:
raise AttributeError("fmap_sizes to long, one image dimension has already perished")
### Start block
start_block = []
if not to_1x1:
kernel_size_start = [min(conv_params["kernel_size"], i) for i in input_size_new]
else:
kernel_size_start = input_size_new.tolist()
if z_dim is not None:
self.start = ConvModule(
z_dim,
h_size_bot,
conv_op=upsample_op,
conv_params=dict(kernel_size=kernel_size_start, stride=1, padding=0, bias=False),
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
)
input_size_new = input_size_new * 2
else:
self.start = NoOp()
### Middle block (Done until we reach ? x input_size/2 x input_size/2)
self.middle_blocks = nn.ModuleList()
for h_size_top in fmap_sizes[1:]:
self.middle_blocks.append(block_op(h_size_bot, **block_params))
self.middle_blocks.append(
ConvModule(
h_size_bot,
h_size_top,
conv_op=upsample_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params={},
activation_op=activation_op,
activation_params=activation_params,
)
)
h_size_bot = h_size_top
input_size_new = input_size_new * 2
### End block
self.end = ConvModule(
h_size_bot,
n_channels,
conv_op=upsample_op,
conv_params=conv_params,
normalization_op=None,
activation_op=None,
)
def forward(self, inpt, **kwargs):
output = self.start(inpt, **kwargs)
for middle in self.middle_blocks:
output = middle(output, **kwargs)
output = self.end(output, **kwargs)
return output
# Basic Encoder
class BasicEncoder(nn.Module):
def __init__(
self,
input_size,
z_dim=256,
fmap_sizes=(64, 128, 256),
conv_op=nn.Conv2d,
conv_params=None,
normalization_op=NoOp,
normalization_params=None,
activation_op=nn.LeakyReLU,
activation_params=None,
block_op=NoOp,
block_params=None,
to_1x1=True,
):
"""Basic configureable Encoder.
Allows for mutilple "feature-map" levels defined by the feature map size, where for each feature map size a conv operation + optional conv block is used.
Args:
z_dim (int, optional): [description]. Dimension of the latent / Input dimension (C channel-dim).
fmap_sizes (tuple, optional): [Defines the Upsampling-Levels of the generator, list/ tuple of ints, where each
int defines the number of feature maps in the layer]. Defaults to (64, 128, 256).
conv_op ([torch.nn.Module], optional): [Convolutioon operation used to downsample to a new level/ featuremap size]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
block_op ([torch.nn.Module], optional): [Block operation used for each feature map size after each upsample op of e.g. ConvBlock/ ResidualBlock]. Defaults to NoOp.
block_params ([dict], optional): [Init parameters for the block operation]. Defaults to None.
to_1x1 (bool, optional): [If True, then the last conv layer goes to a latent dimesion is a z_dim x 1 x 1 vector (similar to fully connected) or if False allows spatial resolution not to be 1x1 (z_dim x H x W, uses the in the conv_params given conv-kernel-size) ]. Defaults to True.
"""
super(BasicEncoder, self).__init__()
if conv_params is None:
conv_params = dict(kernel_size=3, stride=2, padding=1, bias=False)
if block_op is None:
block_op = NoOp
if block_params is None:
block_params = {}
n_channels = input_size[0]
input_size_new = np.array(input_size[1:])
if not isinstance(fmap_sizes, list) and not isinstance(fmap_sizes, tuple):
raise AttributeError("fmap_sizes has to be either a list or tuple or an int")
# elif len(fmap_sizes) < 2:
# raise AttributeError("fmap_sizes has to contain at least three elements")
else:
h_size_bot = fmap_sizes[0]
### Start block
self.start = ConvModule(
n_channels,
h_size_bot,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params={},
activation_op=activation_op,
activation_params=activation_params,
)
input_size_new = input_size_new // 2
### Middle block (Done until we reach ? x 4 x 4)
self.middle_blocks = nn.ModuleList()
for h_size_top in fmap_sizes[1:]:
self.middle_blocks.append(block_op(h_size_bot, **block_params))
self.middle_blocks.append(
ConvModule(
h_size_bot,
h_size_top,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params={},
activation_op=activation_op,
activation_params=activation_params,
)
)
h_size_bot = h_size_top
input_size_new = input_size_new // 2
if np.min(input_size_new) < 2 and z_dim is not None:
raise ("fmap_sizes to long, one image dimension has already perished")
### End block
if not to_1x1:
kernel_size_end = [min(conv_params["kernel_size"], i) for i in input_size_new]
else:
kernel_size_end = input_size_new.tolist()
if z_dim is not None:
self.end = ConvModule(
h_size_bot,
z_dim,
conv_op=conv_op,
conv_params=dict(kernel_size=kernel_size_end, stride=1, padding=0, bias=False),
normalization_op=None,
activation_op=None,
)
if to_1x1:
self.output_size = (z_dim, 1, 1)
else:
self.output_size = (z_dim, *[i - (j - 1) for i, j in zip(input_size_new, kernel_size_end)])
else:
self.end = NoOp()
self.output_size = input_size_new
def forward(self, inpt, **kwargs):
output = self.start(inpt, **kwargs)
for middle in self.middle_blocks:
output = middle(output, **kwargs)
output = self.end(output, **kwargs)
return output
| 41.73523
| 293
| 0.612594
| 2,328
| 19,073
| 4.816151
| 0.095361
| 0.039244
| 0.030503
| 0.022743
| 0.795665
| 0.779254
| 0.753032
| 0.725829
| 0.707545
| 0.694345
| 0
| 0.008913
| 0.299953
| 19,073
| 456
| 294
| 41.826754
| 0.830812
| 0.378493
| 0
| 0.679054
| 0
| 0
| 0.026438
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.013514
| 0.003378
| 0.094595
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9cbfbaf94464020a82869f1eb3e3ffe7c979c43b
| 177
|
py
|
Python
|
examples/custom-cli-provider/dynamic-import.py
|
berttejeda/ansible-taskrunner
|
b0cf8a56caa57ebe6dcc4da022f05c464f6a09f2
|
[
"MIT"
] | 17
|
2019-08-03T06:46:11.000Z
|
2022-01-25T17:17:56.000Z
|
examples/custom-cli-provider/dynamic-import.py
|
berttejeda/ansible-taskrunner
|
b0cf8a56caa57ebe6dcc4da022f05c464f6a09f2
|
[
"MIT"
] | null | null | null |
examples/custom-cli-provider/dynamic-import.py
|
berttejeda/ansible-taskrunner
|
b0cf8a56caa57ebe6dcc4da022f05c464f6a09f2
|
[
"MIT"
] | 1
|
2019-08-03T15:58:47.000Z
|
2019-08-03T15:58:47.000Z
|
import imp
from imp_get_suffixes import module_types
print 'Package:'
f, filename, description = imp.find_module('example')
print module_types[description[2]], filename
print
| 19.666667
| 53
| 0.80226
| 25
| 177
| 5.48
| 0.6
| 0.160584
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.107345
| 177
| 8
| 54
| 22.125
| 0.860759
| 0
| 0
| 0
| 0
| 0
| 0.085227
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.333333
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
9cf67032e56298c8ea4d9fa279e540ccc90b3066
| 129
|
py
|
Python
|
adapters/driven/databaseInterface.py
|
demorose/hexashell_python
|
13d5efa166ea9699cae70cf6b9eb50e27feb1ef6
|
[
"MIT"
] | null | null | null |
adapters/driven/databaseInterface.py
|
demorose/hexashell_python
|
13d5efa166ea9699cae70cf6b9eb50e27feb1ef6
|
[
"MIT"
] | null | null | null |
adapters/driven/databaseInterface.py
|
demorose/hexashell_python
|
13d5efa166ea9699cae70cf6b9eb50e27feb1ef6
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class DatabaseInterface(ABC):
@abstractmethod
def get_user(self, id):
pass
| 16.125
| 35
| 0.697674
| 15
| 129
| 5.933333
| 0.8
| 0.382022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 129
| 7
| 36
| 18.428571
| 0.89899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
140593eec297704bca8a87d85e289aefbe3ff652
| 6,965
|
py
|
Python
|
tests/test_new_join.py
|
kinegratii/borax
|
3595f554b788c31d0f07be4099db68c854db65f7
|
[
"MIT"
] | 51
|
2018-04-18T13:52:15.000Z
|
2022-03-23T13:46:02.000Z
|
tests/test_new_join.py
|
kinegratii/borax
|
3595f554b788c31d0f07be4099db68c854db65f7
|
[
"MIT"
] | 26
|
2019-05-26T02:22:34.000Z
|
2022-03-14T07:50:32.000Z
|
tests/test_new_join.py
|
kinegratii/borax
|
3595f554b788c31d0f07be4099db68c854db65f7
|
[
"MIT"
] | 7
|
2018-09-30T08:17:29.000Z
|
2020-12-16T01:49:24.000Z
|
# coding=utf8
import unittest
import copy
from borax.datasets.join_ import (OnClause, OC, SelectClause, SC, join, join_one, deep_join, deep_join_one)
catalogs_dict = {
1: 'Python',
2: 'Java',
3: '软件工程'
}
catalog_choices = [(1, 'Python'), (2, 'Java'), (3, '软件工程')]
catalogs_list = [
{'id': 1, 'name': 'Python'},
{'id': 2, 'name': 'Java'},
{'id': 3, 'name': '软件工程'},
]
books = [
{'name': 'Python入门教程', 'catalog': 1, 'price': 45},
{'name': 'Java标准库', 'catalog': 2, 'price': 80},
{'name': '软件工程(本科教学版)', 'catalog': 3, 'price': 45},
{'name': 'Django Book', 'catalog': 1, 'price': 45},
{'name': '系统架构设计教程', 'catalog': 3, 'price': 104},
]
class OnClauseTestCase(unittest.TestCase):
def test_type_hints(self):
c1 = OnClause("foo", "foo")
self.assertEqual("OnClause", c1.__class__.__name__)
self.assertTrue(isinstance(c1, tuple))
alias_obj = OC("foo")
self.assertEqual("OnClause", alias_obj.__class__.__name__)
self.assertTrue(isinstance(alias_obj, tuple))
def test_build(self):
expected = ("foo", "foo")
self.assertEqual(expected, OnClause.from_val("foo"))
self.assertEqual(expected, OnClause.from_val(("foo",)))
self.assertEqual(expected, OnClause.from_val(("foo", "foo")))
self.assertEqual(expected, OnClause.from_val(OnClause("foo")))
with self.assertRaises(TypeError):
OnClause.from_val(["foo", "bar"])
class SelectClauseTestCase(unittest.TestCase):
def test_type_hints(self):
c1 = SelectClause("foo", "foo")
self.assertEqual("SelectClause", c1.__class__.__name__)
self.assertTrue(isinstance(c1, tuple))
alias_obj = SC("foo")
self.assertEqual("SelectClause", alias_obj.__class__.__name__)
self.assertTrue(isinstance(alias_obj, tuple))
def test_build(self):
expected = ("foo", "foo", None)
self.assertEqual(expected, SelectClause.from_val("foo"))
self.assertEqual(expected, SelectClause.from_val(("foo",)))
self.assertEqual(expected, SelectClause.from_val(("foo", "foo")))
self.assertEqual(expected, SelectClause.from_val(SelectClause("foo")))
with self.assertRaises(TypeError):
OnClause.from_val(["foo", "bar"])
class JoinOneTestCase(unittest.TestCase):
def test_with_dict(self):
book_data = copy.deepcopy(books)
catalog_books = join_one(book_data, catalogs_dict, on='catalog', select_as='catalog_name')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
self.assertTrue('catalog_name' in book_data[1])
def test_with_choices(self):
book_data = copy.deepcopy(books)
catalog_books = join_one(book_data, catalog_choices, on='catalog', select_as='catalog_name')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
def test_join_one_with_default(self):
book_data = copy.deepcopy(books)
cur_catalogs_dict = {
1: 'Python',
2: 'Java'
}
catalog_books = join_one(book_data, cur_catalogs_dict, on='catalog', select_as='catalog_name')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual(None, catalog_books[2]['catalog_name'])
def test_join_one_with_custom_default(self):
book_data = copy.deepcopy(books)
cur_catalogs_dict = {
1: 'Python',
2: 'Java'
}
catalog_books = join_one(book_data, cur_catalogs_dict, on='catalog', select_as='catalog_name',
default='[未知分类]')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('[未知分类]', catalog_books[2]['catalog_name'])
def test_callback(self):
def _on(_litem):
return _litem['catalog']
book_data = copy.deepcopy(books)
catalog_books = join_one(book_data, catalogs_dict, on=_on, select_as='catalog_name')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
class JoinTestCase(unittest.TestCase):
def test_basic_join(self):
book_data = copy.deepcopy(books)
catalog_books = join(book_data, catalogs_list, on=('catalog', 'id'),
select_as=('name', 'catalog_name'))
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
self.assertTrue('catalog_name' in book_data[1])
def test_default_kwargs(self):
mybooks = [
{'name': 'Demo Book', 'catalog': 10, 'price': 104},
]
catalog_books = join(mybooks, catalogs_list, on='catalog', select_as='catalog_name',
defaults={'catalog_name': 'Unknown'})
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Unknown', catalog_books[0]['catalog_name'])
def test_default_select(self):
mybooks = [
{'name': 'Demo Book', 'catalog': 10, 'price': 104},
]
catalog_books = join(mybooks, catalogs_list, on='catalog', select_as=SC('catalog_name', 'catalog_name', 'Foo'))
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Foo', catalog_books[0]['catalog_name'])
def test_defaults(self):
mybooks = [
{'name': 'Demo Book', 'catalog': 10, 'price': 104},
]
catalog_books = join(mybooks, catalogs_list,
on='catalog',
select_as=SC('catalog_name', 'catalog_name', 'Foo'),
defaults={'catalog_name': 'Unknown'}
)
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Unknown', catalog_books[0]['catalog_name'])
class DeepJoinTestCase(unittest.TestCase):
def test_basic_join(self):
catalog_books = deep_join(books, catalogs_list, on=('catalog', 'id'),
select_as=('name', 'catalog_name'))
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
self.assertFalse('catalog_name' in books[1])
class DeepJoinOneTestCase(unittest.TestCase):
def test_with_dict(self):
catalog_books = deep_join_one(books, catalogs_dict, on='catalog', select_as='catalog_name')
self.assertTrue(all(['catalog_name' in book for book in catalog_books]))
self.assertEqual('Java', catalog_books[1]['catalog_name'])
self.assertFalse('catalog_name' in books[1])
| 41.706587
| 119
| 0.628571
| 829
| 6,965
| 5.021713
| 0.110977
| 0.108335
| 0.046841
| 0.053087
| 0.827768
| 0.809032
| 0.79534
| 0.721355
| 0.69325
| 0.682441
| 0
| 0.012236
| 0.225556
| 6,965
| 166
| 120
| 41.957831
| 0.759548
| 0.001579
| 0
| 0.434783
| 0
| 0
| 0.150748
| 0
| 0
| 0
| 0
| 0
| 0.318841
| 1
| 0.115942
| false
| 0
| 0.021739
| 0.007246
| 0.188406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
141b08de40d48ce5d832865a066bfc763762c5fc
| 53
|
py
|
Python
|
hypoforest/__init__.py
|
joshloyal/forest-hypothesis-tests
|
ce75a11a3ad80667329118359cd6e4a4d5d93296
|
[
"MIT"
] | null | null | null |
hypoforest/__init__.py
|
joshloyal/forest-hypothesis-tests
|
ce75a11a3ad80667329118359cd6e4a4d5d93296
|
[
"MIT"
] | null | null | null |
hypoforest/__init__.py
|
joshloyal/forest-hypothesis-tests
|
ce75a11a3ad80667329118359cd6e4a4d5d93296
|
[
"MIT"
] | null | null | null |
from .confidence_interval import random_forest_error
| 26.5
| 52
| 0.90566
| 7
| 53
| 6.428571
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075472
| 53
| 1
| 53
| 53
| 0.918367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1446b73f8d39e742fa91e2191209174e559a7524
| 147
|
py
|
Python
|
testing_settings.py
|
livra-ar/backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | 1
|
2020-09-05T12:18:06.000Z
|
2020-09-05T12:18:06.000Z
|
testing_settings.py
|
thamidurm/ar-content-platform-backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | 3
|
2021-06-09T17:46:46.000Z
|
2021-09-22T18:54:57.000Z
|
testing_settings.py
|
livra-ar/backend
|
eb052611bb9b2cfa360fa422ce059984b8d295fa
|
[
"BSD-2-Clause"
] | null | null | null |
# import mongoengine
# from ar_platform.settings import *
# mongoengine.connection.disconnect()
# connect('testdb', host='mongomock://localhost')
| 24.5
| 49
| 0.761905
| 15
| 147
| 7.4
| 0.866667
| 0.306306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 147
| 6
| 49
| 24.5
| 0.834586
| 0.931973
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
148aeac881a9ac42cafbd5b5904e86de069dca90
| 63
|
py
|
Python
|
alexber/rpsgame/utils/__init__.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | null | null | null |
alexber/rpsgame/utils/__init__.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | 1
|
2019-03-20T10:35:36.000Z
|
2019-03-21T12:46:44.000Z
|
alexber/rpsgame/utils/__init__.py
|
alex-ber/RocketPaperScissorsGame
|
c38c82a17d508c892c686454864ee2356f441d1a
|
[
"BSD-2-Clause"
] | null | null | null |
from alexber.utils import LookUpMixinEnum, AutoNameMixinEnum
| 15.75
| 60
| 0.857143
| 6
| 63
| 9
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 63
| 3
| 61
| 21
| 0.964286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
14a5d9d90926c266aaec768f3e0fac2ce103603c
| 4,770
|
py
|
Python
|
baseline.py
|
hirune924/ml-tools
|
1a4e3d205b6eeef7bef7ef2f205ac8087ffa7f69
|
[
"MIT"
] | null | null | null |
baseline.py
|
hirune924/ml-tools
|
1a4e3d205b6eeef7bef7ef2f205ac8087ffa7f69
|
[
"MIT"
] | null | null | null |
baseline.py
|
hirune924/ml-tools
|
1a4e3d205b6eeef7bef7ef2f205ac8087ffa7f69
|
[
"MIT"
] | null | null | null |
import os
from omegaconf import DictConfig, OmegaConf
import hydra
from hydra import utils
import numpy as np
import pandas as pd
import xfeat
@hydra.main(config_path="../config/baseline.yaml", strict=False)
def main(cfg: DictConfig) -> None:
print(cfg.pretty())
## Load Data
feature_name = "features/train_test.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
target = cfg.data.target_col_name
train_X = pd.read_csv(utils.to_absolute_path(cfg.data.train_csv_path))
test_X = pd.read_csv(utils.to_absolute_path(cfg.data.test_csv_path))
xfeat.utils.compress_df(pd.concat([
train_X, test_X,
], sort=False)).reset_index(drop=True).to_feather(utils.to_absolute_path("features/train_test.ftr"))
print(pd.read_feather(utils.to_absolute_path("features/train_test.ftr")).head())
## Feature Extraction
feature_name = "features/feature_num_features.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
print("Save numerical features")
xfeat.SelectNumerical().fit_transform(
pd.read_feather(utils.to_absolute_path("features/train_test.ftr"))
).reset_index(drop=True).to_feather(utils.to_absolute_path(feature_name))
print(pd.read_feather(utils.to_absolute_path(feature_name)).head())
feature_name = "features/feature_arithmetic_combi2.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
print("2-order Arithmetic combinations.")
xfeat.Pipeline([
xfeat.SelectNumerical(),
xfeat.ArithmeticCombinations(
exclude_cols=["target"], drop_origin=True, operator="+", r=2,
),
]).fit_transform(
pd.read_feather(utils.to_absolute_path("features/train_test.ftr"))
).reset_index(drop=True).to_feather(utils.to_absolute_path(feature_name))
print(pd.read_feather(utils.to_absolute_path(feature_name)).head())
feature_name = "features/feature_1way_label_encoding.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
print("Categorical encoding using label encoding")
xfeat.Pipeline([
xfeat.SelectCategorical(), xfeat.LabelEncoder(output_suffix="")]
).fit_transform(
pd.read_feather(utils.to_absolute_path("features/train_test.ftr"))
).reset_index(drop=True).to_feather(utils.to_absolute_path(feature_name))
print(pd.read_feather(utils.to_absolute_path(feature_name)).head())
feature_name = "features/feature_2way_label_encoding.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
print("2-order combination of categorical features")
xfeat.Pipeline([
xfeat.SelectCategorical(),
xfeat.ConcatCombination(drop_origin=True, r=2),
xfeat.LabelEncoder(output_suffix=""),
]).fit_transform(
pd.read_feather(utils.to_absolute_path("features/train_test.ftr"))
).reset_index(drop=True).to_feather(utils.to_absolute_path(feature_name))
print(pd.read_feather(utils.to_absolute_path(feature_name)).head())
feature_name = "features/feature_3way__including_Sex_label_encoding.ftr"
if not os.path.exists(utils.to_absolute_path(feature_name)):
print("3-order combination of categorical features")
xfeat.Pipeline([
xfeat.SelectCategorical(),
xfeat.ConcatCombination(drop_origin=True, include_cols=["Sex"], r=3),
xfeat.LabelEncoder(output_suffix=""),
]).fit_transform(
pd.read_feather(utils.to_absolute_path("features/train_test.ftr"))
).reset_index(drop=True).to_feather(utils.to_absolute_path(feature_name))
print(pd.read_feather(utils.to_absolute_path(feature_name)).head())
## Load & Set Features
print("Load numerical features")
df_num = pd.concat(
[
pd.read_feather(pd.read_feather(utils.to_absolute_path("features/feature_num_features.ftr"))),
pd.read_feather(pd.read_feather(utils.to_absolute_path("features/feature_arithmetic_combi2.ftr")))
], axis=1)
print("Load categorical features")
df_cat = pd.concat(
[
pd.read_feather(pd.read_feather(utils.to_absolute_path("features/feature_1way_label_encoding.ftr"))),
pd.read_feather(pd.read_feather(utils.to_absolute_path("features/feature_2way_label_encoding.ftr"))),
pd.read_feather(pd.read_feather(utils.to_absolute_path("features/feature_3way__including_Sex_label_encoding.ftr"))),
], axis=1)
df = pd.concat([df_cat, df_num], axis=1)
y_train = df_num["Survived"].dropna()
df.drop(["Survived"], axis=1, inplace=True)
## Training
## Inference
if __name__ == "__main__":
main()
| 42.972973
| 128
| 0.703354
| 627
| 4,770
| 5.038278
| 0.15949
| 0.066477
| 0.14245
| 0.180437
| 0.747072
| 0.7132
| 0.708135
| 0.708135
| 0.691358
| 0.685027
| 0
| 0.004554
| 0.171279
| 4,770
| 110
| 129
| 43.363636
| 0.794586
| 0.014046
| 0
| 0.390805
| 0
| 0
| 0.188193
| 0.131927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011494
| false
| 0
| 0.08046
| 0
| 0.091954
| 0.16092
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1ada8671313ec4f8626a98e79c23f4a9560b39dc
| 1,839
|
py
|
Python
|
Common/IO/FileInfo/IFileInfo.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 2
|
2020-03-04T11:18:38.000Z
|
2020-05-10T15:36:42.000Z
|
Common/IO/FileInfo/IFileInfo.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 6
|
2020-03-30T16:42:47.000Z
|
2021-12-13T20:37:21.000Z
|
Common/IO/FileInfo/IFileInfo.py
|
enriqueescobar-askida/Kinito.Finance
|
5308748b64829ac798a858161f9b4a9e5829db44
|
[
"MIT"
] | 1
|
2020-04-14T11:26:16.000Z
|
2020-04-14T11:26:16.000Z
|
from abc import abstractmethod
from Common.Objects.Interfaceable import Interfaceable
class IFileInfo(Interfaceable):
@abstractmethod
def AppendText(self) -> bool:
pass
@abstractmethod
def CopyTo(self) -> bool:
pass
@abstractmethod
def Create(self) -> bool:
pass
@abstractmethod
def CreateText(self) -> bool:
pass
@abstractmethod
def Decrypt(self):
pass
@abstractmethod
def Delete(self):
pass
@abstractmethod
def Encrypt(self):
pass
@abstractmethod
def MoveTo(self):
pass
@abstractmethod
def Open(self):
pass
@abstractmethod
def OpenRead(self):
pass
@abstractmethod
def OpenText(self):
pass
@abstractmethod
def OpenWrite(self):
pass
@abstractmethod
def Replace(self):
pass
@property
@abstractmethod
def Dir(self):
pass
@property
@abstractmethod
def DirName(self):
pass
@property
@abstractmethod
def Exists(self):
pass
@property
@abstractmethod
def Extension(self):
pass
@property
@abstractmethod
def FullName(self):
pass
@property
@abstractmethod
def FullPath(self):
pass
@property
@abstractmethod
def OriginalPath(self):
pass
@property
@abstractmethod
def IsAbsolute(self):
pass
@property
@abstractmethod
def IsReadOnly(self):
pass
@property
@abstractmethod
def LastAccessTime(self):
pass
@property
@abstractmethod
def LastWriteTime(self):
pass
@property
@abstractmethod
def Length(self):
pass
@property
@abstractmethod
def Name(self):
pass
| 14.830645
| 54
| 0.585644
| 160
| 1,839
| 6.73125
| 0.25
| 0.410399
| 0.193129
| 0.362117
| 0.506035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.343665
| 1,839
| 123
| 55
| 14.95122
| 0.892295
| 0
| 0
| 0.691489
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.276596
| false
| 0.276596
| 0.021277
| 0
| 0.308511
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1ade551a37fde91242c8a87b99bc049bc17c858b
| 104
|
py
|
Python
|
venv/Lib/site-packages/mizani/external/__init__.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 29
|
2017-04-25T23:52:24.000Z
|
2022-03-07T02:35:37.000Z
|
venv/Lib/site-packages/mizani/external/__init__.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 22
|
2016-07-03T17:18:58.000Z
|
2021-08-18T10:18:17.000Z
|
venv/Lib/site-packages/mizani/external/__init__.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 13
|
2017-05-21T11:38:32.000Z
|
2022-02-23T11:25:30.000Z
|
from .xkcd_rgb import xkcd_rgb
from .crayon_rgb import crayon_rgb
__all__ = ['xkcd_rgb', 'crayon_rgb']
| 20.8
| 36
| 0.778846
| 17
| 104
| 4.176471
| 0.352941
| 0.295775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 104
| 4
| 37
| 26
| 0.78022
| 0
| 0
| 0
| 0
| 0
| 0.173077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2116cf517525a6c29e0f07fd1d87956edad62987
| 67
|
py
|
Python
|
code/tmp_rtrip/ctypes/test/__main__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/ctypes/test/__main__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/ctypes/test/__main__.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
from ctypes.test import load_tests
import unittest
unittest.main()
| 16.75
| 34
| 0.835821
| 10
| 67
| 5.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104478
| 67
| 3
| 35
| 22.333333
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
211a5ebcd65bac0c9776cc3e20eaa238da63b1fc
| 156,789
|
py
|
Python
|
test34_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | 1
|
2018-12-04T19:32:27.000Z
|
2018-12-04T19:32:27.000Z
|
test34_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | null | null | null |
test34_bif.py
|
jpra2/Presto2.1
|
e2a3e3121280b011a6be2a59be708623bdc0b482
|
[
"CNRI-Python"
] | null | null | null |
import numpy as np
from pymoab import core
from pymoab import types
from pymoab import topo_util
from PyTrilinos import Epetra, AztecOO, EpetraExt # , Amesos
import time
import sys
import shutil
import os
import random
import configparser
class Msclassic_bif:
def __init__(self):
self.comm = Epetra.PyComm()
self.mb = core.Core()
self.mb.load_file('out.h5m')
self.root_set = self.mb.get_root_set()
self.mesh_topo_util = topo_util.MeshTopoUtil(self.mb)
self.all_fine_vols = self.mb.get_entities_by_dimension(self.root_set, 3)
elem0 = list(self.all_fine_vols)[0]
self.nf = len(self.all_fine_vols)
self.create_tags(self.mb)
self.read_structured()
self.primals = self.mb.get_entities_by_type_and_tag(
self.root_set, types.MBENTITYSET, np.array([self.primal_id_tag]),
np.array([None]))
self.nc = len(self.primals)
self.ident_primal = []
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
self.ident_primal.append(primal_id)
self.ident_primal = dict(zip(self.ident_primal, range(len(self.ident_primal))))
self.sets = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
self.set_of_collocation_points_elems = set()
for collocation_point_set in self.sets:
collocation_point = self.mb.get_entities_by_handle(collocation_point_set)[0]
self.set_of_collocation_points_elems.add(collocation_point)
#self.ident_primal = remapeamento dos ids globais dos volumes da malha grossa
self.flag_grav = self.mb.tag_get_data(self.flag_gravidade_tag, elem0, flat=True)[0] # flag da gravidade
self.loops = self.mb.tag_get_data(self.loops_tag, elem0, flat=True)[0] # loops totais
self.t = self.mb.tag_get_data(self.t_tag, elem0, flat=True)[0] # tempo total de simulacao
self.mi_w = self.mb.tag_get_data(self.miw_tag, elem0, flat=True)[0] # viscosidade da agua
self.mi_o = self.mb.tag_get_data(self.mio_tag, elem0, flat=True)[0] # viscosidade do oleo
self.ro_w = self.mb.tag_get_data(self.rhow_tag, elem0, flat=True)[0] # densidade da agua
self.ro_o = self.mb.tag_get_data(self.rhoo_tag, elem0, flat=True)[0] # densidade do oleo
self.gama_w = self.mb.tag_get_data(self.gamaw_tag, elem0, flat=True)[0] # peso especifico da agua
self.gama_o = self.mb.tag_get_data(self.gamao_tag, elem0, flat=True)[0] # peso especifico do oleo
self.gama_ = self.gama_w + self.gama_o
self.gama = self.gama_
self.nw = self.mb.tag_get_data(self.nw_tag, elem0, flat=True)[0] # expoente da agua para calculo da permeabilidade relativa
self.no = self.mb.tag_get_data(self.no_tag, elem0, flat=True)[0] # expoente do oleo para calculo da permeabilidade relativa
self.Sor = self.mb.tag_get_data(self.Sor_tag, elem0, flat=True)[0] # saturacao residual de oleo
self.Swc = self.mb.tag_get_data(self.Swc_tag, elem0, flat=True)[0] # saturacao de agua conata
self.Swi = self.mb.tag_get_data(self.Swi_tag, elem0, flat=True)[0] # saturacao inicial para escoamento da agua
# Ribeiro
self.Sw_inf = 0.1
self.Sw_sup = 0.85 # = 1-Sor
# Oliveira
self.kro_Sac = 0.85 # permeabilidade relativa do oleo na saturacao connate da agua
self.kra_Soc = 0.4 # permeabilidade relativa da agua na saturacao critica de oleo
self.Sac = 0.25 # saturacao connate de agua
self.Soc = 0.35 # saturacao critica de oleo
# expoentes da curva de permeabilidade
self.no_2 = 0.9
self.nw_2 = 1.5
# self.read_perms_and_phi_spe10()
# self.set_k() # seta a permeabilidade em cada volume
self.set_fi() # seta a porosidade em cada volume
if self.flag_grav == 1:
self.get_wells_gr()
else:
self.get_wells() # obtem os gids dos volumes que sao pocos
# self.read_perm_rel() # le o arquivo txt perm_rel.txt
gids = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols , flat = True)
self.map_gids_in_all_fine_vols = dict(zip(gids, self.all_fine_vols)) # mapeamento dos gids nos elementos
self.neigh_wells_d = [] #volumes da malha fina vizinhos aos pocos de pressao prescrita
for volume in self.wells_d:
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
adjs_volume = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_volume:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
if (adj not in self.wells_d) and (adj not in self.neigh_wells_d):
self.neigh_wells_d.append(adj)
self.all_fine_vols_ic = set(self.all_fine_vols) - set(self.wells_d)
# self.all_volumes_ic = volumes da malha fina que sao incognitas
self.map_vols_ic = dict(zip(list(self.all_fine_vols_ic), range(len(self.all_fine_vols_ic)))) # mapeamento dos elementos que sao incognitas
self.map_vols_ic_2 = dict(zip(range(len(self.all_fine_vols_ic)), list(self.all_fine_vols_ic))) # mapeamento contrario
self.nf_ic = len(self.all_fine_vols_ic) # numero de icognitas
self.principal = '/elliptic'
self.caminho1 = '/elliptic/simulacoes/bifasico'
self.caminho2 = '/elliptic/simulacoes'
self.caminho3 = '/elliptic/backup_simulacoes'
self.caminho4 = '/elliptic/backup_simulacoes/bifasico'
self.caminho5 = '/elliptic/backup_simulacoes/bifasico/pasta0'
arq1 = 'back.txt'
# ##### abaixo esta o comando para deletar a pasta backup_simulacoes ##########
# shutil.rmtree(self.caminho3)
# sys.exit(0)
# import pdb; pdb.set_trace()
# ############################################################################
if os.path.exists(self.caminho2):
if os.path.exists(self.caminho1):
shutil.rmtree(self.caminho1)
os.makedirs(self.caminho1)
else:
os.makedirs(self.caminho1)
else:
os.makedirs(self.caminho1)
if os.path.exists(self.caminho3):
if os.path.exists(self.caminho4):
os.chdir(self.caminho4)
if arq1 in os.listdir():
with open(arq1, 'r') as arq:
text = arq.readline()
num_sim = int(text) + 1
with open(arq1, 'w') as arq:
arq.write('{0}'.format(num_sim))
self.pasta = '/elliptic/backup_simulacoes/bifasico/pasta{0}'.format(num_sim)
# os.makedirs(self.pasta)
else:
with open(arq1, 'w') as arq:
arq.write('{0}'.format(int(0)))
self.pasta = self.caminho5
else:
os.makedirs(self.caminho4)
os.chdir(self.caminho4)
with open(arq1, 'w') as arq:
arq.write('{0}'.format(int(0)))
self.pasta = self.caminho5
else:
os.makedirs(self.caminho4)
os.chdir(self.caminho4)
with open(arq1, 'w') as arq:
arq.write('{0}'.format(int(0)))
self.pasta = self.caminho5
# os.chdir(self.caminho1)
def calculate_local_problem_het(self, elems, lesser_dim_meshsets, support_vals_tag):
std_map = Epetra.Map(len(elems), 0, self.comm)
linear_vals = np.arange(0, len(elems))
id_map = dict(zip(elems, linear_vals))
boundary_elms = set()
b = Epetra.Vector(std_map)
x = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
for ms in lesser_dim_meshsets:
lesser_dim_elems = self.mb.get_entities_by_handle(ms)
for elem in lesser_dim_elems:
if elem in boundary_elms:
continue
boundary_elms.add(elem)
idx = id_map[elem]
A.InsertGlobalValues(idx, [1], [idx])
b[idx] = self.mb.tag_get_data(support_vals_tag, elem, flat=True)[0]
for elem in (set(elems) ^ boundary_elms):
k_elem = self.mb.tag_get_data(self.perm_tag, elem).reshape([3, 3])
lamb_w_elem = self.mb.tag_get_data(self.lamb_w_tag, elem)[0][0]
lamb_o_elem = self.mb.tag_get_data(self.lamb_o_tag, elem)[0][0]
centroid_elem = self.mesh_topo_util.get_average_position([elem])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(
np.asarray([elem]), 2, 3, 0)
values = []
ids = []
for adj in adj_volumes:
if adj in id_map:
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_elem
uni = self.unitary(direction)
k_elem = np.dot(np.dot(k_elem,uni),uni)
k_elem = k_elem*(lamb_w_elem + lamb_o_elem)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
k_adj = k_adj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(k_elem, k_adj)
#keq = keq/(np.dot(self.h2, uni))
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
values.append(keq)
ids.append(id_map[adj])
k_elem = self.mb.tag_get_data(self.perm_tag, elem).reshape([3, 3])
values.append(-sum(values))
idx = id_map[elem]
ids.append(idx)
A.InsertGlobalValues(idx, values, ids)
A.FillComplete()
linearProblem = Epetra.LinearProblem(A, x, b)
solver = AztecOO.AztecOO(linearProblem)
# AZ_last, AZ_summary, AZ_warnings
solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)
solver.Iterate(1000, 1e-9)
self.mb.tag_set_data(support_vals_tag, elems, np.asarray(x))
def calculate_p_end(self):
for volume in self.wells:
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume in self.wells_d:
index = self.wells_d.index(global_volume)
pms = self.set_p[index]
mb.tag_set_data(self.pms_tag, volume, pms)
def calculate_prolongation_op_het(self):
zeros = np.zeros(self.nf)
std_map = Epetra.Map(self.nf, 0, self.comm)
self.trilOP = Epetra.CrsMatrix(Epetra.Copy, std_map, std_map, 0)
sets = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
i = 0
my_pairs = set()
for collocation_point_set in sets:
i += 1
childs = self.mb.get_child_meshsets(collocation_point_set)
collocation_point = self.mb.get_entities_by_handle(collocation_point_set)[0]
primal_elem = self.mb.tag_get_data(self.fine_to_primal_tag, collocation_point,
flat=True)[0]
primal_id = self.mb.tag_get_data(self.primal_id_tag, int(primal_elem), flat=True)[0]
primal_id = self.ident_primal[primal_id]
support_vals_tag = self.mb.tag_get_handle(
"TMP_SUPPORT_VALS {0}".format(primal_id), 1, types.MB_TYPE_DOUBLE, True,
types.MB_TAG_SPARSE, default_value=0.0)
self.mb.tag_set_data(support_vals_tag, self.all_fine_vols, zeros)
self.mb.tag_set_data(support_vals_tag, collocation_point, 1.0)
for vol in childs:
elems_vol = self.mb.get_entities_by_handle(vol)
c_faces = self.mb.get_child_meshsets(vol)
for face in c_faces:
elems_fac = self.mb.get_entities_by_handle(face)
c_edges = self.mb.get_child_meshsets(face)
for edge in c_edges:
elems_edg = self.mb.get_entities_by_handle(edge)
c_vertices = self.mb.get_child_meshsets(edge)
# a partir desse ponto op de prolongamento eh preenchido
self.calculate_local_problem_het(
elems_edg, c_vertices, support_vals_tag)
self.calculate_local_problem_het(
elems_fac, c_edges, support_vals_tag)
self.calculate_local_problem_het(
elems_vol, c_faces, support_vals_tag)
vals = self.mb.tag_get_data(support_vals_tag, elems_vol, flat=True)
gids = self.mb.tag_get_data(self.global_id_tag, elems_vol, flat=True)
primal_elems = self.mb.tag_get_data(self.fine_to_primal_tag, elems_vol,
flat=True)
for val, gid in zip(vals, gids):
if (gid, primal_id) not in my_pairs:
if val == 0.0:
pass
else:
self.trilOP.InsertGlobalValues([gid], [primal_id], val)
my_pairs.add((gid, primal_id))
def calculate_restriction_op(self):
std_map = Epetra.Map(self.nf, 0, self.comm)
self.trilOR = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
restriction_tag = self.mb.tag_get_handle(
"RESTRICTION_PRIMAL {0}".format(primal_id), 1, types.MB_TYPE_INTEGER,
True, types.MB_TAG_SPARSE)
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
self.mb.tag_set_data(
self.elem_primal_id_tag,
fine_elems_in_primal,
np.repeat(primal_id, len(fine_elems_in_primal)))
gids = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(gids)), gids)
self.mb.tag_set_data(restriction_tag, fine_elems_in_primal, np.repeat(1, len(fine_elems_in_primal)))
self.trilOR.FillComplete()
"""for i in range(len(primals)):
p = trilOR.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('\n')"""
def calculate_restriction_op_2(self):
"""
operador de restricao excluindo as colunas dos volumes com pressao prescrita
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic), 0, self.comm)
self.trilOR = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols_ic, flat=True)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
restriction_tag = self.mb.tag_get_handle(
"RESTRICTION_PRIMAL {0}".format(primal_id), 1, types.MB_TYPE_INTEGER,
True, types.MB_TAG_SPARSE)
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
self.mb.tag_set_data(
self.elem_primal_id_tag,
fine_elems_in_primal,
np.repeat(primal_id, len(fine_elems_in_primal)))
elems_ic = self.all_fine_vols_ic & set(fine_elems_in_primal)
local_map = []
for elem in elems_ic:
#2
local_map.append(self.map_vols_ic[elem])
#1
self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(local_map)), local_map)
#gids = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
#self.trilOR.InsertGlobalValues(primal_id, np.repeat(1, len(gids)), gids)
self.mb.tag_set_data(restriction_tag, fine_elems_in_primal, np.repeat(1, len(fine_elems_in_primal)))
#0
self.trilOR.FillComplete()
"""for i in range(len(self.primals)):
p = self.trilOR.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('\n')"""
def calculate_sat(self):
"""
calcula a saturacao do passo de tempo corrente
"""
t1 = time.time()
lim = 10**(-6)
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if gid in self.wells_d:
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
continue
else:
pass
div = self.div_upwind_3(volume, self.pf_tag)
fi = 0.3 #self.mb.tag_get_data(self.fi_tag, volume)[0][0]
sat1 = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
sat = sat1 + div*(self.delta_t/(fi*self.V))
if sat > 1.0:
print('saturacao maior que 1 na funcao calculate_sat')
import pdb; pdb.set_trace()
#if abs(div) < lim or sat1 == (1 - self.Sor) or sat < sat1:
#if abs(div) < lim or sat1 == (1 - self.Sor):
if abs(div) < lim or sat1 == 0.8:
continue
#elif sat > (1 - self.Sor):
elif sat > 0.8:
#sat = 1 - self.Sor
print("Sat > 0.8")
print(sat)
print('gid')
print(gid)
print('\n')
sat = 0.8
#elif sat < 0 or sat > (1 - self.Sor):
elif sat < 0 or sat > 0.8:
print('Erro: saturacao invalida')
print('Saturacao: {0}'.format(sat))
print('Saturacao anterior: {0}'.format(sat1))
print('div: {0}'.format(div))
print('gid: {0}'.format(gid))
print('fi: {0}'.format(fi))
print('V: {0}'.format(self.V))
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
sys.exit(0)
self.mb.tag_set_data(self.sat_tag, volume, sat)
t2 = time.time()
def calculate_sat_2(self):
"""
calcula a saturacao do passo de tempo corrente
"""
t1 = time.time()
lim = 1e-4
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if volume in self.wells_inj:
continue
qw = self.mb.tag_get_data(self.flux_w_tag, volume, flat=True)[0]
if abs(qw) < lim:
continue
elif qw < 0.0:
print('qw < 0')
print(qw)
print('gid')
print(gid)
print('loop')
print(self.loop)
print('\n')
import pdb; pdb.set_trace()
else:
pass
fi = self.mb.tag_get_data(self.fi_tag, volume)[0][0]
sat1 = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
sat = sat1 + qw*(self.delta_t/(fi*self.V))
if sat1 > sat:
print('erro na saturacao')
print('sat1 > sat')
import pdb; pdb.set_trace()
# print('gid:{0}'.format(gid))
# print('sat1:{0}'.format(sat1))
# print('sat:{0}'.format(sat))
# print('qw:{0}'.format(qw))
# print('const:{0}'.format(self.delta_t/(fi*self.V)))
# print('res:.{0}'.format(qw*(self.delta_t/(fi*self.V))))
# import pdb; pdb.set_trace()
# if sat > 0.8:
# print('saturacao maior que 0.8 na funcao calculate_sat')
# import pdb; pdb.set_trace()
#if abs(div) < lim or sat1 == (1 - self.Sor) or sat < sat1:
#if abs(div) < lim or sat1 == (1 - self.Sor):
#elif sat > (1 - self.Sor):
elif sat > 0.8:
#sat = 1 - self.Sor
print("Sat > 1")
print(sat)
print('gid')
print(gid)
print('loop')
print(self.loop)
print('\n')
# import pdb; pdb.set_trace()
sat = 0.8
#elif sat < 0 or sat > (1 - self.Sor):
elif sat < 0 or sat > 1:
print('Erro: saturacao invalida')
print('Saturacao: {0}'.format(sat))
print('Saturacao anterior: {0}'.format(sat1))
print('div: {0}'.format(div))
print('gid: {0}'.format(gid))
print('fi: {0}'.format(fi))
print('V: {0}'.format(self.V))
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
import pdb; pdb.set_trace()
sys.exit(0)
else:
pass
self.mb.tag_set_data(self.sat_tag, volume, sat)
t2 = time.time()
print('tempo calculo saturacao loop_{0}: {1}'.format(self.loop, t2-t1))
def cfl(self):
"""
cfl usando fluxo maximo
"""
cfl = 0.5
self.delta_t = cfl*(self.fimin*self.V)/float(self.qmax*self.dfdsmax)
def cfl_2(self, vmax, h, dfds):
"""
cfl usando velocidade maxima
"""
cfl = 1.0
self.delta_t = (cfl*h)/float(vmax*dfds)
def create_flux_vector_pf(self):
"""
cria um vetor para armazenar os fluxos em cada volume da malha fina
os fluxos sao armazenados de acordo com a direcao sendo 6 direcoes
para cada volume
"""
lim = 1e-4
self.dfdsmax = 0
self.fimin = 10
self.qmax = 0
self.store_velocity_pf = {}
self.store_flux_pf = {}
for primal in self.primals:
#1
primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id1]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface, volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id1, flag = 1)
for volume in fine_elems_in_primal:
#2
list_keq = []
list_p = []
list_gid = []
list_keq3 = []
list_gidsadj = []
list_qw = []
qw3 = []
qw = 0
flux = {}
velocity = {}
fi = self.mb.tag_get_data(self.fi_tag, volume, flat=True)[0]
if fi < self.fimin:
self.fimin = fi
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]
lbt_vol = lamb_w_vol + lamb_o_vol
fw_vol = self.mb.tag_get_data(self.fw_tag, volume, flat=True)[0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
centroid_volume = self.mesh_topo_util.get_average_position([volume])
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]
for adj in adjs_vol:
#3
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
unit = direction/np.linalg.norm(direction)
#unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]
lbt_adj = lamb_w_adj + lamb_o_adj
fw_adj = self.mb.tag_get_data(self.fw_tag, adj, flat=True)[0]
keq3 = (kvol*lamb_w_vol + kadj*lamb_w_adj)/2.0
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
list_keq.append(keq)
list_p.append(padj)
list_gid.append(gid_adj)
keq2 = keq
keq = keq*(np.dot(self.A, uni))
#pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
#padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
#grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq
qw3.append(grad_p*keq3*(np.dot(self.A, uni)))
if grad_p < 0:
#4
fw = fw_vol
qw += (fw*grad_p*kvol*(np.dot(self.A, uni)))
list_qw.append(fw*grad_p*kvol*(np.dot(self.A, uni)))
else:
fw = fw_adj
qw += (fw*grad_p*kadj*(np.dot(self.A, uni)))
list_qw.append(fw*grad_p*kadj*(np.dot(self.A, uni)))
if gid_adj > gid_vol:
v = -(grad_p)*keq2
else:
v = (grad_p)*keq2
flux[tuple(unit)] = q
velocity[tuple(unit)] = v
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if abs(sat_adj - sat_vol) < lim or abs(fw_adj -fw_vol) < lim:
continue
dfds = abs((fw_adj - fw_vol)/(sat_adj - sat_vol))
# print('aqui')
# print(gid_vol)
# print(gid_adj)
# print(fw_adj - fw_vol)
# print(sat_adj - sat_vol)
# print(dfds)
if dfds > self.dfdsmax:
self.dfdsmax = dfds
#2
list_keq.append(-sum(list_keq))
list_p.append(pvol)
list_gid.append(gid_vol)
list_keq = np.array(list_keq)
list_p = np.array(list_p)
resultado = sum(list_keq*list_p)
# print(gid_vol)
# print(velocity)
# print('\n')
# import pdb; pdb.set_trace()
self.store_velocity_pf[volume] = velocity
self.store_flux_pf[volume] = flux
flt = sum(flux.values())
self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)
if abs(sum(flux.values())) > lim and volume not in self.wells:
print('nao esta dando conservativo na malha fina')
print(gid_vol)
print(sum(flux.values()))
qmax = max(list(map(abs, flux.values())))
if qmax > self.qmax:
self.qmax = qmax
if volume in self.wells_prod:
qw_out = sum(flux.values())*fw_vol
qw3.append(-qw_out)
qo_out = sum(flux.values())*(1 - fw_vol)
self.prod_o.append(qo_out)
self.prod_w.append(qw_out)
qw = qw - qw_out
if abs(qw) < lim and qw < 0.0:
qw = 0.0
elif qw < 0 and volume not in self.wells_inj:
print('gid')
print(gid_vol)
print('qw < 0')
print(qw)
import pdb; pdb.set_trace()
else:
pass
# if (qw < 0.0 or sum(qw3) < 0.0) and volume not in self.wells_inj:
# print('qw3')
# print(sum(qw3))
# print('qw')
# print(qw)
# import pdb; pdb.set_trace()
self.mb.tag_set_data(self.flux_w_tag, volume, qw)
# print(self.dfdsmax)
# print(sum(flux.values()))
# print(sum(qw))
# print(sum(qw3))
# print('\n')
soma_inj = []
soma_prod = []
soma2 = 0
with open('fluxo_malha_fina_bif{0}.txt'.format(self.loop), 'w') as arq:
for volume in self.wells:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
values = self.store_flux_pf[volume].values()
arq.write('gid:{0} , fluxo:{1}\n'.format(gid, sum(values)))
# print('gid:{0}'.format(gid))
# print('valor:{0}'.format(sum(values)))
if volume in self.wells_inj:
soma_inj.append(sum(values))
else:
soma_prod.append(sum(values))
# print('\n')
soma2 += sum(values)
arq.write('\n')
arq.write('soma_inj:{0}\n'.format(sum(soma_inj)))
arq.write('soma_prod:{0}\n'.format(sum(soma_prod)))
arq.write('tempo:{0}'.format(self.tempo))
def create_flux_vector_pms(self):
"""
cria um vetor para armazenar os fluxos em cada volume da malha fina
os fluxos sao armazenados de acordo com a direcao sendo 6 direcoes
para cada volume
"""
lim = 1e-4
self.dfdsmax = 0
self.fimin = 10
self.qmax = 0
self.store_velocity = {}
self.store_flux = {}
for primal in self.primals:
#1
primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id1]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface, volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id1, flag = 1)
for volume in fine_elems_in_primal:
#2
list_keq = []
list_p = []
list_keq3 = []
list_gidsadj = []
list_gid = []
list_qw = []
qw3 = []
qw = 0
flux = {}
velocity = {}
fi = self.mb.tag_get_data(self.fi_tag, volume, flat=True)[0]
if fi < self.fimin:
self.fimin = fi
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]
fw_vol = self.mb.tag_get_data(self.fw_tag, volume, flat=True)[0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
centroid_volume = self.mesh_topo_util.get_average_position([volume])
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
for adj in adjs_vol:
#3
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
if adj in volumes_in_interface:
#4
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
#3
else:
#4
pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]
#3
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
unit = direction/np.linalg.norm(direction)
#unit = vetor unitario na direcao de direction
uni = self.unitary(direction)
# uni = valor positivo do vetor unitario
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]
fw_adj = self.mb.tag_get_data(self.fw_tag, adj, flat=True)[0]
keq3 = (kvol*lamb_w_vol + kadj*lamb_w_adj)/2.0
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
list_keq.append(keq)
list_p.append(padj)
list_gid.append(gid_adj)
keq2 = keq
keq = keq*(np.dot(self.A, uni))
pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq
qw3.append(grad_p*keq3*(np.dot(self.A, uni)))
if grad_p < 0:
#4
fw = fw_vol
qw += (fw*grad_p*kvol*(np.dot(self.A, uni)))
list_qw.append(fw*grad_p*kvol*(np.dot(self.A, uni)))
else:
fw = fw_adj
qw += (fw*grad_p*kadj*(np.dot(self.A, uni)))
list_qw.append(fw*grad_p*kadj*(np.dot(self.A, uni)))
if gid_adj > gid_vol:
v = -(grad_p2)*keq2
else:
v = (grad_p2)*keq2
flux[tuple(unit)] = q
velocity[tuple(unit)] = v
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if abs(sat_adj - sat_vol) < lim or abs(fw_adj -fw_vol) < lim:
continue
dfds = abs((fw_adj - fw_vol)/(sat_adj - sat_vol))
# print('aqui')
# print(gid_vol)
# print(gid_adj)
# print(fw_adj - fw_vol)
# print(sat_adj - sat_vol)
# print(dfds)
if dfds > self.dfdsmax:
self.dfdsmax = dfds
#2
# print(gid_vol)
# print(velocity)
# print('\n')
# import pdb; pdb.set_trace()
list_keq.append(-sum(list_keq))
list_p.append(pvol)
list_gid.append(gid_vol)
list_keq = np.array(list_keq)
list_p = np.array(list_p)
resultado = sum(list_keq*list_p)
self.store_velocity[volume] = velocity
self.store_flux[volume] = flux
self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))
if abs(sum(flux.values())) > lim and volume not in self.wells:
print('nao esta dando conservativo o fluxo multiescala')
print(gid_vol)
print(sum(flux.values()))
import pdb; pdb.set_trace()
qmax = max(list(map(abs, flux.values())))
if qmax > self.qmax:
self.qmax = qmax
if volume in self.wells_prod:
qw_out = sum(flux.values())*fw_vol
qw3.append(-qw_out)
qo_out = sum(flux.values())*(1 - fw_vol)
self.prod_o.append(qo_out)
self.prod_w.append(qw_out)
qw = qw - qw_out
if abs(qw) < lim and qw < 0.0:
qw = 0.0
elif qw < 0 and volume not in self.wells_inj:
print('gid')
print(gid_vol)
print('qw < 0')
print(qw)
import pdb; pdb.set_trace()
else:
pass
self.mb.tag_set_data(self.flux_w_tag, volume, qw)
# print(self.dfdsmax)
# print(sum(flux.values()))
# print(sum(qw))
# print(sum(qw3))
# print('\n')
soma_inj = []
soma_prod = []
soma2 = 0
with open('fluxo_multiescala_bif{0}.txt'.format(self.loop), 'w') as arq:
for volume in self.wells:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
values = self.store_flux[volume].values()
arq.write('gid:{0} , fluxo:{1}\n'.format(gid, sum(values)))
# print('gid:{0}'.format(gid))
# print('valor:{0}'.format(sum(values)))
if volume in self.wells_inj:
soma_inj.append(sum(values))
else:
soma_prod.append(sum(values))
# print('\n')
soma2 += sum(values)
arq.write('\n')
arq.write('soma_inj:{0}\n'.format(sum(soma_inj)))
arq.write('soma_prod:{0}\n'.format(sum(soma_prod)))
arq.write('tempo:{0}'.format(self.tempo))
def create_tags(self, mb):
self.flux_coarse_tag = mb.tag_get_handle(
"FLUX_COARSE", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.flux_fine_pms_tag = mb.tag_get_handle(
"FLUX_FINE_PMS", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.flux_fine_pf_tag = mb.tag_get_handle(
"FLUX_FINE_PF", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.Pc2_tag = mb.tag_get_handle(
"PC2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.fi_tag = mb.tag_get_handle(
"FI", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.prod_tag = mb.tag_get_handle(
"PROD", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lbt_tag = mb.tag_get_handle(
"LBT", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.fw_tag = mb.tag_get_handle(
"FW", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.vel_tag = mb.tag_get_handle(
"VEL", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pf2_tag = mb.tag_get_handle(
"PF2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.err_tag = mb.tag_get_handle(
"ERRO", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.err2_tag = mb.tag_get_handle(
"ERRO_2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pf_tag = mb.tag_get_handle(
"PF", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.k_tag = mb.tag_get_handle(
"K", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.contorno_tag = mb.tag_get_handle(
"CONTORNO", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pc_tag = mb.tag_get_handle(
"PC", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pms_tag = mb.tag_get_handle(
"PMS", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pms2_tag = mb.tag_get_handle(
"PMS2", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.flux_w_tag = mb.tag_get_handle(
"FLUX_W", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.p_tag = mb.tag_get_handle(
"P", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.pcorr_tag = mb.tag_get_handle(
"P_CORR", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.perm_tag = mb.tag_get_handle(
"PERM", 9, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.qpms_coarse_tag = mb.tag_get_handle(
"QPMS_COARSE", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.global_id_tag = mb.tag_get_handle("GLOBAL_ID")
self.collocation_point_tag = mb.tag_get_handle("COLLOCATION_POINT")
self.elem_primal_id_tag = mb.tag_get_handle(
"FINE_PRIMAL_ID", 1, types.MB_TYPE_INTEGER, True,
types.MB_TAG_SPARSE)
self.sat_tag = mb.tag_get_handle(
"SAT", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lamb_w_tag = mb.tag_get_handle(
"LAMB_W", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.lamb_o_tag = mb.tag_get_handle(
"LAMB_O", 1, types.MB_TYPE_DOUBLE,
types.MB_TAG_SPARSE, True)
self.primal_id_tag = mb.tag_get_handle("PRIMAL_ID")
self.faces_primal_id_tag = mb.tag_get_handle("PRIMAL_FACES")
self.all_faces_primal_id_tag = mb.tag_get_handle("PRIMAL_ALL_FACES")
self.fine_to_primal_tag = mb.tag_get_handle("FINE_TO_PRIMAL")
self.valor_da_prescricao_tag = mb.tag_get_handle("VALOR_DA_PRESCRICAO")
self.tipo_de_prescricao_tag = mb.tag_get_handle("TIPO_DE_PRESCRICAO")
self.wells_tag = mb.tag_get_handle("WELLS")
self.tipo_de_poco_tag = mb.tag_get_handle("TIPO_DE_POCO")
self.loops_tag = mb.tag_get_handle('LOOPS')
self.flag_gravidade_tag = mb.tag_get_handle('GRAV')
self.t_tag = mb.tag_get_handle("T")
self.miw_tag = mb.tag_get_handle("MIW")
self.mio_tag = mb.tag_get_handle("MIO")
self.rhow_tag = mb.tag_get_handle("RHOW")
self.rhoo_tag = mb.tag_get_handle("RHOO")
self.gamaw_tag = mb.tag_get_handle("GAMAW")
self.gamao_tag = mb.tag_get_handle("GAMAO")
self.nw_tag = mb.tag_get_handle("NW")
self.no_tag = mb.tag_get_handle("NO")
self.Sor_tag = mb.tag_get_handle("SOR")
self.Swc_tag = mb.tag_get_handle("SWC")
self.Swi_tag = mb.tag_get_handle("SWI")
self.volumes_in_primal_tag = mb.tag_get_handle("VOLUMES_IN_PRIMAL")
# self.all_faces_boundary_tag = mb.tag_get_handle("ALL_FACES_BOUNDARY")
# self.all_faces_tag = mb.tag_get_handle("ALL_FACES")
# self.faces_wells_d_tag = mb.tag_get_handle("FACES_WELLS_D")
# self.faces_all_fine_vols_ic_tag = mb.tag_get_handle("FACES_ALL_FINE_VOLS_IC")
self.perm_tag = mb.tag_get_handle("PERM")
self.line_elems_tag = self.mb.tag_get_handle("LINE_ELEMS")
def Dirichlet_problem(self):
"""
recalculo das pressoes dentro dos primais usando como condicao de contorno
pressao prescrita nos volumes da interface de cada primal
"""
#0
colocation_points = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#1
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
#0
sets = set(sets)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id, flag = 2)
all_volumes = list(fine_elems_in_primal)
all_volumes_ic = self.all_fine_vols_ic & set(all_volumes)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, all_volumes_ic, flat=True)
# gids_vols_ic = volumes no primal que sao icognitas
# ou seja volumes no primal excluindo os que tem pressao prescrita
map_volumes = dict(zip(gids_vols_ic, range(len(gids_vols_ic))))
# map_volumes = mapeamento local
std_map = Epetra.Map(len(all_volumes_ic), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(all_volumes_ic)
# b_np = np.zeros(dim)
# A_np = np.zeros((dim, dim))
for volume in all_volumes_ic:
#2
soma = 0
temp_id = []
temp_k = []
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if (volume in sets) or (volume in volumes_in_primal):
#3
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
b[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
# b_np[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
#2
else:
#3
for adj in adj_volumes:
#4
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
soma = soma + keq
if global_adj in self.wells_d:
#5
index = self.wells_d.index(global_adj)
b[map_volumes[global_volume]] += self.set_p[index]*(keq)
# b_np[map_volumes[global_volume]] += self.set_p[index]*(keq)
#4
else:
#5
temp_id.append(map_volumes[global_adj])
temp_k.append(-keq)
#4
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(soma)
temp_id.append(map_volumes[global_volume])
if global_volume in self.wells_n:
#4
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0]
if tipo_de_poco == 1:
#5
b[map_volumes[global_volume]] += self.set_q[index]
# b_np[map_volumes[global_volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[global_volume]] += -self.set_q[index]
# b_np[map_volumes[global_volume]] += -self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[global_volume], temp_k, temp_id)
# A_np[map_volumes[global_volume], temp_id] = temp_k
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
# x_np = np.linalg.solve(A_np, b_np)
for volume in all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[global_volume]])
# self.mb.tag_set_data(self.pms2_tag, volume, x_np[map_volumes[global_volume]])
#1
for volume in set(all_volumes) - all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
index = self.wells_d.index(global_volume)
p = self.set_p[index]
self.mb.tag_set_data(self.pcorr_tag, volume, p)
# self.mb.tag_set_data(self.pms2_tag, volume, p)
def div_max(self, p_tag):
q2 = 0.0
fi = 0.0
for volume in self.all_fine_vols:
soma1 = 0.0
soma2 = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq/(np.dot(self.h2, uni))
soma1 = soma1 - keq
soma2 = soma2 + keq*padj
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma1 = soma1*pvol
q = soma1 + soma2
if abs(q) > abs(q2):
q2 = q
fi = mb.tag_get_data(self.fi_tag, volume)[0][0]
return abs(q2), fi
def div_max_2(self, p_tag):
q2 = 0.0
fi = 0.0
for volume in self.all_fine_vols:
q = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
q = q + keq*(padj - pvol)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if abs(q) > q2:
q2 = abs(q)
fi = mb.tag_get_data(self.fi_tag, volume)[0][0]
return q2, fi
def div_max_3(self, p_tag):
"""
Verifica qual é o fluxo maximo de agua que sai do volume de controle multiplicado pelo dfds
dfds = variacao do fluxo fracionario com a saturacao
"""
lim = 10**(-12)
q2 = 0.0
fi = 0.0
fi2 = 0.0
dfds2 = 0
for volume in self.all_fine_vols:
q = 0.0
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
fi = self.mb.tag_get_data(self.fi_tag, volume)[0][0]
if fi > fi2:
fi2 = fi
for adj in adjs_vol:
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
sat_adj = self.mb.tag_get_data(self.sat_tag, adj)[0][0]
if abs(sat_adj - sat_vol) < lim:
continue
dfds = ((lamb_w_adj/(lamb_w_adj+lamb_o_adj)) - (lamb_w_vol/(lamb_w_vol+lamb_o_vol)))/float((sat_adj - sat_vol))
q = abs(dfds*keq*(padj - pvol))
if q > q2:
q2 = q
dfds2 = abs(dfds)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
return q2, fi2
def div_upwind_1(self, volume, p_tag):
"""
a mobilidade da interface é dada pelo volume com a pressao maior dif fin
"""
soma1 = 0.0
soma2 = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
grad_p = padj - pvol
if grad_p > 0:
keq = (lamb_w_adj*kadj)/(np.dot(self.h2, uni))
else:
keq = (lamb_w_vol*kvol)/(np.dot(self.h2, uni))
soma1 = soma1 + keq
soma2 = soma2 + keq*padj
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma1 = -soma1*pvol
q = soma1 + soma2
return q
def div_upwind_2(self, volume, p_tag):
"""
calcula o fluxo total que entra no volume para calcular a saturacao
a mobilidade da interface é dada pelo volume com a pressao maior
"""
q = 0.0
pvol = mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = mesh_topo_util.get_average_position([volume])
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
for adj in adjs_vol:
padj = mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
grad_p = (padj - pvol)/float((np.dot(self.h, uni)))
if grad_p > 0:
# keq = (lamb_w_adj*kadj*(np.dot(self.A, uni)))/(np.dot(self.h, uni))
keq = lamb_w_adj*kadj
else:
# keq = (lamb_w_vol*kvol*(np.dot(self.A, uni)))/(np.dot(self.h, uni))
keq = lamb_w_vol*kvol
q = q + keq*grad_p*(np.dot(self.A, uni))
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
return q
def div_upwind_3(self, volume, p_tag):
"""
calcula o fluxo total que entra no volume para calcular a saturacao
a mobilidade da interface é dada pela media das mobilidades
"""
qt = 0.0
qp = 0.0
q = 0.0
qw = 0.0
list_sat = []
list_lbw = []
list_gid = []
list_grad = []
list_q = []
list_p = []
list_lbeq = []
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
sat_volume = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lbt_vol = self.mb.tag_get_data(self.lbt_tag, volume)[0][0]
fw_vol = self.mb.tag_get_data(self.fw_tag, volume)[0][0]
for adj in adjs_vol:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
lbt_adj = self.mb.tag_get_data(self.lbt_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
keq = self.kequiv(kvol, kadj)
# if global_adj > global_volume:
# grad_p = (padj - pvol)/float(np.dot(self.h, uni))
# else:
# grad_p = (pvol - padj)/float(np.dot(self.h, uni))
grad_p = (padj - pvol)/float(np.dot(self.h, uni))
lamb_eq = (lamb_w_vol + lamb_w_adj)/2.0
keq = keq*lamb_eq
q = q + keq*(grad_p)*(np.dot(self.A, uni))
# producao de oleo
if global_volume in self.wells_prod:
kvol2 = kvol*(lbt_vol)
kadj2 = kadj*(lbt_adj)
keq2 = self.kequiv(kvol2, kadj2)
qt += grad_p*(keq2)*(np.dot(self.A, uni)) #fluxo total que entra no volume
list_sat.append(sat_adj)
list_lbw.append(lamb_w_adj)
list_gid.append(global_adj)
list_grad.append(grad_p)
list_q.append(keq*(grad_p)*(np.dot(self.A, uni)))
list_p.append(padj)
list_lbeq.append(lamb_eq)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if global_volume in self.wells_prod:
qp += (1 - fw_vol)*qt # fluxo de oleo que sai do volume
qw += (fw_vol)*qt #fluxo de agua que sai do volume
q = q - qw
self.mb.tag_set_data(self.prod_tag, volume, qp)
list_sat.append(sat_volume)
list_lbw.append(lamb_w_vol)
list_gid.append(global_volume)
list_q.append(q)
list_p.append(pvol)
if q < 0:
print('divergente upwind de agua menor que zero na funcao div_upwind_3')
import pdb; pdb.set_trace()
return q
def erro(self):
for volume in self.all_fine_vols:
if volume in self.wells_d:
erro = 0.0
self.mb.tag_set_data(self.err_tag, volume, erro)
continue
Pf = self.mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = self.mb.tag_get_data(self.pms_tag, volume, flat = True)[0]
erro = abs((Pf - Pms)/float(Pf))
self.mb.tag_set_data(self.err_tag, volume, erro)
def erro_2(self):
for volume in self.all_fine_vols:
if volume in self.wells_d:
erro = 0.0
self.mb.tag_set_data(self.err_tag, volume, erro)
self.mb.tag_set_data(self.err2_tag, volume, erro)
continue
Pf = self.mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = self.mb.tag_get_data(self.pms_tag, volume, flat = True)[0]
erro_2 = abs(Pf - Pms)#/float(abs(Pf))
self.mb.tag_set_data(self.err2_tag, volume, erro_2)
erro = 100*abs((Pf - Pms)/float(Pf))
self.mb.tag_set_data(self.err_tag, volume, erro)
def get_volumes_in_interfaces(self, fine_elems_in_primal, primal_id, **options):
"""
obtem uma lista com os elementos dos primais adjacentes que estao na interface do primal corrente
(primal_id)
se flag == 1 alem dos volumes na interface dos primais adjacentes (volumes_in_interface)
retorna tambem os volumes no primal corrente que estao na sua interface (volumes_in_primal)
se flag == 2 retorna apenas os volumes do primal corrente que estao na sua interface (volumes_in_primal)
"""
#0
volumes_in_primal = []
volumes_in_interface = []
# gids_in_primal = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
for volume in fine_elems_in_primal:
#1
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
adjs_volume = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_volume:
#2
fin_prim = self.mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = self.mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj != primal_id:
#3
volumes_in_interface.append(adj)
volumes_in_primal.append(volume)
#0
volumes_in_primal = list(set(volumes_in_primal))
if options.get("flag") == 1:
#1
return volumes_in_interface, volumes_in_primal
#0
elif options.get("flag") == 2:
#1
return volumes_in_primal
#0
else:
#1
return volumes_in_interface
def get_wells(self):
"""
elementos dos pocos
wells_d = elementos com pressao prescrita
wells_n = elementos com vazao prescrita
set_p = valor da pressao
set_q = valor da vazao
wells_inj = elementos injetores
wells_prod = elementos produtores
"""
wells_d = []
wells_n = []
set_p = []
set_q = []
wells_inj = []
wells_prod = []
wells_set = self.mb.tag_get_data(self.wells_tag, 0, flat=True)[0]
self.wells = self.mb.get_entities_by_handle(wells_set)
for well in self.wells:
global_id = self.mb.tag_get_data(self.global_id_tag, well, flat=True)[0]
valor_da_prescricao = self.mb.tag_get_data(self.valor_da_prescricao_tag, well, flat=True)[0]
tipo_de_prescricao = self.mb.tag_get_data(self.tipo_de_prescricao_tag, well, flat=True)[0]
#raio_do_poco = mb.tag_get_data(raio_do_poco_tag, well, flat=True)[0]
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, well, flat=True)[0]
#tipo_de_fluido = mb.tag_get_data(tipo_de_fluido_tag, well, flat=True)[0]
#pwf = mb.tag_get_data(pwf_tag, well, flat=True)[0]
if tipo_de_prescricao == 0:
wells_d.append(well)
set_p.append(valor_da_prescricao)
else:
wells_n.append(well)
set_q.append(valor_da_prescricao)
if tipo_de_poco == 1:
wells_inj.append(well)
else:
wells_prod.append(well)
self.wells_d = wells_d
self.wells_n = wells_n
self.set_p = set_p
self.set_q = set_q
self.wells_inj = wells_inj
self.wells_prod = wells_prod
def get_wells_gr(self):
"""
obtem:
self.wells == os elementos que contem os pocos
self.wells_d == lista contendo os ids globais dos volumes com pressao prescrita
self.wells_n == lista contendo os ids globais dos volumes com vazao prescrita
self.set_p == lista com os valores da pressao referente a self.wells_d
self.set_q == lista com os valores da vazao referente a self.wells_n
adiciona o efeito da gravidade
"""
wells_d = []
wells_n = []
set_p = []
set_q = []
wells_inj = []
wells_prod = []
wells_set = self.mb.tag_get_data(self.wells_tag, 0, flat=True)[0]
self.wells = self.mb.get_entities_by_handle(wells_set)
wells = self.wells
for well in wells:
global_id = self.mb.tag_get_data(self.global_id_tag, well, flat=True)[0]
valor_da_prescricao = self.mb.tag_get_data(self.valor_da_prescricao_tag, well, flat=True)[0]
tipo_de_prescricao = self.mb.tag_get_data(self.tipo_de_prescricao_tag, well, flat=True)[0]
centroid = self.mesh_topo_util.get_average_position([well])
#raio_do_poco = self.mb.tag_get_data(self.raio_do_poco_tag, well, flat=True)[0]
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, well, flat=True)[0]
#tipo_de_fluido = self.mb.tag_get_data(self.tipo_de_fluido_tag, well, flat=True)[0]
#pwf = self.mb.tag_get_data(self.pwf_tag, well, flat=True)[0]
if tipo_de_prescricao == 0:
wells_d.append(well)
set_p.append(valor_da_prescricao + (self.tz - centroid[2])*self.gama_w)
else:
wells_n.append(well)
set_q.append(valor_da_prescricao)
if tipo_de_poco == 1:
wells_inj.append(well)
else:
wells_prod.append(well)
self.wells_d = wells_d
self.wells_n = wells_n
self.set_p = set_p
self.set_q = set_q
self.wells_inj = wells_inj
self.wells_prod = wells_prod
def kequiv(self, k1, k2):
#keq = ((2*k1*k2)/(h1*h2))/((k1/h1) + (k2/h2))
keq = (2*k1*k2)/(k1+k2)
return keq
def modificar_matriz(self, A, rows, columns):
"""
realoca a matriz para o tamanho de linhas 'rows' e colunas 'columns'
"""
row_map = Epetra.Map(rows, 0, self.comm)
col_map = Epetra.Map(columns, 0, self.comm)
C = Epetra.CrsMatrix(Epetra.Copy, row_map, col_map, 3)
for i in range(rows):
p = A.ExtractGlobalRowCopy(i)
values = p[0]
index_columns = p[1]
C.InsertGlobalValues(i, values, index_columns)
C.FillComplete()
return C
def modificar_vetor(self, v, nc):
"""
realoca o tamanho do vetor 'v' para o tamanho 'nc'
"""
std_map = Epetra.Map(nc, 0, self.comm)
x = Epetra.Vector(std_map)
for i in range(nc):
x[i] = v[i]
return x
def mount_lines_1(self, volume, map_id):
"""
monta as linhas da matriz
retorna o valor temp_k e o mapeamento temp_id
map_id = mapeamento dos elementos
"""
#0
# volume_centroid = self.mb.tag_get_data(self.centroid_tag, volume, flat=True)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]
temp_ids = []
temp_k = []
for adj in adj_volumes:
#1
# adj_centroid = self.mb.tag_get_data(self.centroid_tag, adj, flat=True)
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/float(abs(np.dot(direction, uni)))
temp_ids.append(map_id[adj])
temp_k.append(-keq)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#0
temp_k.append(-sum(temp_k))
temp_ids.append(map_id[volume])
return temp_k, temp_ids
def multimat_vector(self, A, row, b):
"""
multiplica a matriz A pelo vetor 'b', 'row' é o numero de linhas de A ou tamanho de b
"""
std_map = Epetra.Map(row, 0, self.comm)
c = Epetra.Vector(std_map)
A.Multiply(False, b, c)
return c
def Neuman_problem_4(self):
colocation_points = mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
sets = set(sets)
for primal in self.primals:
volumes_in_interface = []#v1
volumes_in_primal = []#v2
primal_id = mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = mb.get_entities_by_handle(primal)
#setfine_elems_in_primal = set(fine_elems_in_primal)
for fine_elem in fine_elems_in_primal:
global_volume = mb.tag_get_data(self.global_id_tag, fine_elem, flat=True)[0]
volumes_in_primal.append(fine_elem)
adj_fine_elems = mesh_topo_util.get_bridge_adjacencies(fine_elem, 2, 3)
for adj in adj_fine_elems:
fin_prim = mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj != primal_id:
volumes_in_interface.append(adj)
volumes_in_primal.extend(volumes_in_interface)
id_map = dict(zip(volumes_in_primal, range(len(volumes_in_primal))))
std_map = Epetra.Map(len(volumes_in_primal), 0, comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(volumes_in_primal)
b_np = np.zeros(dim)
A_np = np.zeros((dim, dim))
for volume in volumes_in_primal:
global_volume = mb.tag_get_data(self.global_id_tag, volume)[0][0]
temp_id = []
temp_k = []
centroid_volume = mesh_topo_util.get_average_position([volume])
k_vol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
adj_vol = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if volume in self.wells:
tipo_de_prescricao = mb.tag_get_data(self.tipo_de_prescricao_tag, volume)[0][0]
if tipo_de_prescricao == 0:
valor_da_prescricao = mb.tag_get_data(self.valor_da_prescricao_tag, volume)[0][0]
temp_k.append(1.0)
temp_id.append(id_map[volume])
b[id_map[volume]] = valor_da_prescricao
b_np[id_map[volume]] = valor_da_prescricao
else:
soma = 0.0
for adj in adj_vol:
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_vol = k_vol*(lamb_w_vol + lamb_o_vol)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(k_vol, k_adj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
soma = soma + keq
temp_k.append(-keq)
temp_id.append(id_map[adj])
temp_k.append(soma)
temp_id.append(id_map[volume])
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)
valor_da_prescricao = self.mb.tag_get_data(self.valor_da_prescricao_tag, volume)[0][0]
if tipo_de_poco == 1:
b[id_map[volume]] = valor_da_prescricao
b_np[id_map[volume]] = valor_da_prescricao
else:
b[id_map[volume]] = -valor_da_prescricao
b_np[id_map[volume]] = -valor_da_prescricao
elif volume in sets:
temp_k.append(1.0)
temp_id.append(id_map[volume])
b[id_map[volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
b_np[id_map[volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
elif volume in volumes_in_interface:
for adj in adj_vol:
fin_prim = self.mb.tag_get_data(self.fine_to_primal_tag, adj, flat=True)
primal_adj = self.mb.tag_get_data(
self.primal_id_tag, int(fin_prim), flat=True)[0]
if primal_adj == primal_id:
pms_adj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
pms_volume = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
b[id_map[volume]] = pms_volume - pms_adj
b_np[id_map[volume]] = pms_volume - pms_adj
temp_k.append(1.0)
temp_id.append(id_map[volume])
temp_k.append(-1.0)
temp_id.append(id_map[adj])
else:
soma = 0.0
for adj in adj_vol:
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
k_adj = np.dot(np.dot(k_adj,uni),uni)
keq = self.kequiv(k_vol, k_adj)
keq = keq/(np.dot(self.h2, uni))
soma = soma + keq
temp_k.append(-keq)
temp_id.append(id_map[adj])
temp_k.append(soma)
temp_id.append(id_map[volume])
A.InsertGlobalValues(id_map[volume], temp_k, temp_id)
A_np[id_map[volume], temp_id] = temp_k[:]
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
x_np = np.linalg.solve(A_np, b_np)
for i in range(len(volumes_in_primal) - len(volumes_in_interface)):
volume = volumes_in_primal[i]
self.mb.tag_set_data(self.p_tag, volume, x[i])
self.mb.tag_set_data(self.pms2_tag, volume, x_np[i])
def Neuman_problem_4_3(self):
"""
recalcula as pressoes em cada primal usando fluxo prescrito nas interfaces do primal
"""
#0
colocation_points = self.mb.get_entities_by_type_and_tag(
0, types.MBENTITYSET, self.collocation_point_tag, np.array([None]))
sets = []
for col in colocation_points:
#1
#col = mb.get_entities_by_handle(col)[0]
sets.append(self.mb.get_entities_by_handle(col)[0])
#0
sets = set(sets)
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id)
all_volumes = list(fine_elems_in_primal) + volumes_in_interface
all_volumes_ic = self.all_fine_vols_ic & set(all_volumes)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, all_volumes_ic, flat=True)
map_volumes = dict(zip(gids_vols_ic, range(len(gids_vols_ic))))
std_map = Epetra.Map(len(all_volumes_ic), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
dim = len(all_volumes_ic)
b_np = np.zeros(dim)
A_np = np.zeros((dim, dim))
for volume in all_volumes_ic:
#2
soma = 0
temp_id = []
temp_k = []
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
if volume in sets:
#3
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
b[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
b_np[map_volumes[global_volume]] = self.mb.tag_get_data(self.pms_tag, volume)[0]
#2
elif volume in volumes_in_interface:
#3
for adj in adj_volumes:
#4
if adj in fine_elems_in_primal:
#5
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
pms_adj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
pms_volume = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
b[map_volumes[global_volume]] = pms_volume - pms_adj
b_np[map_volumes[global_volume]] = pms_volume - pms_adj
temp_k.append(1.0)
temp_id.append(map_volumes[global_volume])
temp_k.append(-1.0)
temp_id.append(map_volumes[global_adj])
#4
else:
#5
pass
#2
else:
#3
for adj in adj_volumes:
#4
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
soma = soma + keq
if global_adj in self.wells_d:
#5
index = self.wells_d.index(global_adj)
b[map_volumes[global_volume]] += self.set_p[index]*(keq)
b_np[map_volumes[global_volume]] += self.set_p[index]*(keq)
#4
else:
#5
temp_id.append(map_volumes[global_adj])
temp_k.append(-keq)
#4
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(soma)
temp_id.append(map_volumes[global_volume])
if global_volume in self.wells_n:
#4
index = self.wells_n.index(global_volume)
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0]
if tipo_de_poco == 1:
#5
b[map_volumes[global_volume]] += self.set_q[index]
b_np[map_volumes[global_volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[global_volume]] += -self.set_q[index]
b_np[map_volumes[global_volume]] += -self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[global_volume], temp_k, temp_id)
A_np[map_volumes[global_volume], temp_id] = temp_k
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
x_np = np.linalg.solve(A_np, b_np)
for volume in all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[global_volume]])
self.mb.tag_set_data(self.pms2_tag, volume, x_np[map_volumes[global_volume]])
#1
for volume in set(all_volumes) - all_volumes_ic:
#2
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
index = self.wells_d.index(global_volume)
p = self.set_p[index]
self.mb.tag_set_data(self.pcorr_tag, volume, p)
self.mb.tag_set_data(self.pms2_tag, volume, p)
def Neuman_problem_6(self):
# self.set_of_collocation_points_elems = set()
#0
"""
map_volumes[volume]
map_volumes[adj]
"""
for primal in self.primals:
#1
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface, volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id, flag = 1)
all_volumes = list(fine_elems_in_primal)
dim = len(all_volumes)
map_volumes = dict(zip(all_volumes, range(len(all_volumes))))
std_map = Epetra.Map(len(all_volumes), 0, self.comm)
b = Epetra.Vector(std_map)
A = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
# b_np = np.zeros(dim)
# A_np = np.zeros((dim, dim))
for volume in all_volumes:
#2
# import pdb; pdb.set_trace()
soma = 0
temp_k = []
temp_id = []
gid1 = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
centroid_volume = self.mesh_topo_util.get_average_position([volume])
k_vol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
# print('in wells d: {0}'.format(volume in self.wells_d))
# print('in collocation_points: {0}'.format(volume in self.set_of_collocation_points_elems))
# print('in volumes_in_primal: {0}'.format(volume in volumes_in_primal))
# import pdb; pdb.set_trace()
if volume in self.wells_d or volume in self.set_of_collocation_points_elems:
#3
value = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
temp_k.append(1.0)
temp_id.append(map_volumes[volume])
b[map_volumes[volume]] = value
#b_np[map_volumes[volume]] = value
#2
elif volume in volumes_in_primal:
#3
for adj in adjs_vol:
#4
gid2 = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
# h = abs(np.dot(direction, uni))
k_vol = np.dot(np.dot(k_vol,uni),uni)
k_vol = k_vol*(lamb_w_vol + lamb_o_vol)
k_adj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
k_adj = np.dot(np.dot(k_adj,uni),uni)
k_adj = k_adj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(k_vol, k_adj)
keq = keq*(np.dot(self.A, uni)/np.dot(self.h, uni))
if adj in all_volumes:
#5
soma += keq
temp_k.append(-keq)
temp_id.append(map_volumes[adj])
#4
else:
#5
q_in = (padj - pvol)*(keq)
# print('qin: {0}'.format(q_in))
# print('gidvol: {0}; gidadj: {1}'.format(gid1, gid2))
# print('pvol: {0}; padj: {1}'.format(pvol, padj))
# print('keq: {0}\n'.format(keq))
# import pdb; pdb.set_trace()
b[map_volumes[volume]] += q_in
#b_np[map_volumes[volume]] += q_in
k_vol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#3
temp_k.append(-sum(temp_k))
temp_id.append(map_volumes[volume])
if volume in self.wells_n:
#4
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#5
b[map_volumes[volume]] += self.set_q[index]
#b_np[map_volumes[volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[volume]] -= self.set_q[index]
#b_np[map_volumes[volume]] -= self.set_q[index]
#2
else:
#3
temp_k, temp_id = self.mount_lines_1(volume, map_volumes)
if volume in self.wells_n:
#4
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#5
b[map_volumes[volume]] += self.set_q[index]
#b_np[map_volumes[volume]] += self.set_q[index]
#4
else:
#5
b[map_volumes[volume]] -= self.set_q[index]
#b_np[map_volumes[volume]] -= self.set_q[index]
#2
A.InsertGlobalValues(map_volumes[volume], temp_k, temp_id)
#A_np[map_volumes[volume], temp_id] = temp_k
# print('primal_id')
# print(self.ident_primal[primal_id])
# print('gid: {0}'.format(gid1))
# print('temp_id:{0}'.format(temp_id))
# print('temp_k:{0}'.format(temp_k))
# print(A_np[map_volumes[volume]])
# print('b_np:{0}'.format(b_np[map_volumes[volume]]))
#1
A.FillComplete()
x = self.solve_linear_problem(A, b, dim)
#x_np = np.linalg.solve(A_np, b_np)
# print(x_np)
for volume in all_volumes:
#2
gid1 = self.mb.tag_get_data(self.global_id_tag, volume)[0][0]
self.mb.tag_set_data(self.pcorr_tag, volume, x[map_volumes[volume]])
#self.mb.tag_set_data(self.pms2_tag, volume, x_np[map_volumes[volume]])
def organize_op(self):
"""
elimina as linhas do operador de prolongamento que se referem aos volumes com pressao prescrita
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic), 0, self.comm)
trilOP2 = Epetra.CrsMatrix(Epetra.Copy, std_map, 3)
gids_vols_ic = self.mb.tag_get_data(self.global_id_tag, self.all_fine_vols_ic, flat=True)
cont = 0
for elem in self.all_fine_vols_ic:
#1
gid = self.mb.tag_get_data(self.global_id_tag, elem, flat=True)[0]
p = self.trilOP.ExtractGlobalRowCopy(gid)
values = p[0]
index = p[1]
trilOP2.InsertGlobalValues(self.map_vols_ic[elem], list(values), list(index))
#0
self.trilOP = trilOP2
self.trilOP.FillComplete()
def organize_Pf(self):
"""
organiza a solucao da malha fina para setar no arquivo de saida
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols),0,self.comm)
Pf2 = Epetra.Vector(std_map)
for i in range(len(self.Pf)):
#1
value = self.Pf[i]
elem = self.map_vols_ic_2[i]
gid = self.mb.tag_get_data(self.global_id_tag, elem, flat=True)[0]
Pf2[gid] = value
#0
for i in range(len(self.wells_d)):
#1
value = self.set_p[i]
elem = self.wells_d[i]
gid = self.mb.tag_get_data(self.global_id_tag, elem, flat=True)[0]
Pf2[gid] = value
#0
self.Pf_all = Pf2
def organize_Pms(self):
"""
organiza a solucao do Pms para setar no arquivo de saida
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols),0,self.comm)
Pms2 = Epetra.Vector(std_map)
for i in range(len(self.Pms)):
#1
value = self.Pms[i]
elem = self.map_vols_ic_2[i]
gid = self.mb.tag_get_data(self.global_id_tag, elem, flat=True)[0]
Pms2[gid] = value
#0
for i in range(len(self.wells_d)):
#1
value = self.set_p[i]
elem = self.wells_d[i]
gid = self.mb.tag_get_data(self.global_id_tag, elem, flat=True)[0]
Pms2[gid] = value
#0
self.Pms_all = Pms2
def pol_interp(self, S, x, y):
"""
retorna o resultado do polinomio interpolador da saturacao usando o metodo
das diferencas divididas, ou seja, retorna p(S)
x = vetor da saturacao
y = vetor que se deseja interpolar, y = f(x)
S = saturacao
"""
n = len(x)
cont = 1
est = 0
list_delta = []
for i in range(n-1):
if cont == 1:
temp = []
for i in range(n-cont):
a = y[i+cont] - y[i]
b = x[i+cont] - x[i]
c = a/float(b)
temp.append(c)
cont = cont+1
list_delta.append(temp[:])
else:
temp = []
for i in range(n-cont):
a = list_delta[est][i+1] - list_delta[est][i]
b = x[i+cont] - x[i]
c = a/float(b)
temp.append(c)
cont = cont+1
est = est+1
list_delta.append(temp[:])
a = []
for i in range(n-1):
e = list_delta[i][0]
a.append(e)
pol = y[0]
mult = 1
for i in range(n-1):
mult = (S - x[i])*mult
pol = pol + mult*a[i]
if y == self.krw_r:
if S <= 0.2:
pol = 0.0
else:
pass
elif y == self.kro_r:
if S <= 0:
pol = 1.0
elif S >= 0.9:
pol = 0.0
else:
pass
else:
pass
return abs(pol)
def pol_interp_2(self, S):
# S_temp = (S - self.Swc)/(1 - self.Swc - self.Sor)
# krw = (S_temp)**(self.nw)
# kro = (1 - S_temp)**(self.no)
if S > (1 - self.Sor):
krw = 1.0
kro = 0.0
elif S < self.Swc:
krw = 0.0
kro = 1.0
else:
krw = ((S - self.Swc)/float(1 - self.Swc - self.Sor))**(self.nw)
kro = ((1 - S - self.Swc)/float(1 - self.Swc - self.Sor))**(self.no)
return krw, kro
def pol_interp_3(self, S):
# Ribeiro
x_S1 = [0.0, 0.1]
y_o = [1.0, 0.8]
x_S2 = [0.85, 1.0]
y_w = [0.1, 1.0]
S_ = (S - self.Sw_inf)/float(self.Sw_sup - self.Sw_inf)
if S <= self.Sw_inf:
krw = 0.0
kro = 0.85
# kro = np.interp(S, x_S1, y_o)
elif S >= self.Sw_sup:
krw = 0.1
# krw = np.interp(S, x_S2, y_w)
kro = 0.0
else:
krw = 0.1*(S_**2)
kro = 0.8*((1-S_)**4)
return krw, kro
def pol_interp_4(self, S):
#Oliveira
x_S1 = [0.0, 0.25]
y_o = [1.0, 0.85]
x_S2 = [0.65, 1.0]
y_w = [0.4, 1.0]
if S <= self.Sac:
# kro = 0.85
kro = np.interp(S, x_S1, y_o)
krw = 0.0
elif S >= (1 - self.Soc):
kro = 0.0
# krw = 0.4
krw = np.interp(S, x_S2, y_w)
else:
kro = self.kro_Sac*((1 - S - self.Soc)/(1 - self.Sac - self.Soc))**self.no_2
krw = self.kra_Soc*((S - self.Sac)/(1 - self.Sac - self.Soc))**self.nw_2
return krw, kro
def pymultimat(self, A, B, nf):
"""
multiplica a matriz A pela B
"""
nf_map = Epetra.Map(nf, 0, self.comm)
C = Epetra.CrsMatrix(Epetra.Copy, nf_map, 3)
EpetraExt.Multiply(A, False, B, False, C)
C.FillComplete()
return C
def read_perm_rel(self):
"""
le o arquivo perm_rel.py para usar na funcao pol_interp
"""
with open("perm_rel.py", "r") as arq:
text = arq.readlines()
self.Sw_r = []
self.krw_r = []
self.kro_r = []
self.pc_r = []
for i in range(1, len(text)):
a = text[i].split()
self.Sw_r.append(float(a[0]))
self.kro_r.append(float(a[1]))
self.krw_r.append(float(a[2]))
self.pc_r.append(float(a[3]))
def read_perms_and_phi_spe10(self):
nx = 60
ny = 220
nz = 85
N = nx*ny*nz
# l1 = [N, 2*N, 3*N]
# l2 = [0, 1, 2]
#
# ks = np.loadtxt('spe_perm.dat')
# t1, t2 = ks.shape
# ks = ks.reshape((t1*t2))
# ks2 = np.zeros((N, 9))
#
#
# for i in range(0, N):
# # as unidades do spe_10 estao em milidarcy
# # unidade de darcy em metro quadrado = (1 Darcy)*(9.869233e-13 m^2/Darcy)
# # fonte -- http://www.calculator.org/property.aspx?name=permeability
# ks2[i, 0] = ks[i]*(10**(-3))# *9.869233e-13
#
# cont = 0
# for i in range(N, 2*N):
# ks2[cont, 4] = ks[i]*(10**(-3))# *9.869233e-13
# cont += 1
#
# cont = 0
# for i in range(2*N, 3*N):
# ks2[cont, 8] = ks[i]*(10**(-3))# *9.869233e-13
# cont += 1
#
#
#
# cont = None
# phi = np.loadtxt('spe_phi.dat')
# t1, t2 = phi.shape
# phi = phi.reshape(t1*t2)
# np.savez_compressed('spe10_perms_and_phi', perms = ks2, phi = phi)
# ks2 = None
#
# # obter a permeabilidade de uma regiao
# # digitar o inicio e o fim da regiao
ks = np.load('spe10_perms_and_phi.npz')['perms']
phi = np.load('spe10_perms_and_phi.npz')['phi']
gid1 = [0, 0, 50]
gid2 = [gid1[0] + self.nx-1, gid1[1] + self.ny-1, gid1[2] + self.nz-1]
gid1 = np.array(gid1)
gid2 = np.array(gid2)
dif = gid2 - gid1 + np.array([1, 1, 1])
permeabilidade = []
fi = []
cont = 0
for k in range(dif[2]):
for j in range(dif[1]):
for i in range(dif[0]):
gid = gid1 + np.array([i, j, k])
gid = gid[0] + gid[1]*nx + gid[2]*nx*ny
# permeabilidade[cont] = ks[gid]
permeabilidade.append(ks[gid])
fi.append(phi[gid])
cont += 1
cont = 0
for volume in self.all_fine_vols:
self.mb.tag_set_data(self.perm_tag, volume, permeabilidade[cont])
self.mb.tag_set_data(self.fi_tag, volume, fi[cont])
cont += 1
# self.mb.tag_set_data(self.perm_tag, self.all_fine_vols, permeabilidade)
# self.mb.tag_set_data(self.fi_tag, self.all_fine_vols, fi)
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)
perm = self.mb.tag_get_data(self.perm_tag, volume).reshape([3,3])
fi2 = self.mb.tag_get_data(self.fi_tag, volume, flat = True)[0]
def read_structured(self):
with open('structured.cfg', 'r') as arq:
text = arq.readlines()
config = configparser.ConfigParser()
config.read('structured.cfg')
StructuredMS = config['StructuredMS']
mesh_size = list(map(int, StructuredMS['mesh-size'].strip().replace(',', '').split()))
coarse_ratio = list(map(int, StructuredMS['coarse-ratio'].strip().replace(',', '').split()))
block_size = list(map(float, StructuredMS['block-size'].strip().replace(',', '').split()))
##### Razoes de engrossamento
crx = coarse_ratio[0]
cry = coarse_ratio[1]
crz = coarse_ratio[2]
##### Numero de elementos nas respectivas direcoes
nx = mesh_size[0]
ny = mesh_size[1]
nz = mesh_size[2]
##### Tamanho dos elementos nas respectivas direcoes
hx = block_size[0]
hy = block_size[1]
hz = block_size[2]
h = np.array([hx, hy, hz])
#### Tamanho inteiro do dominio nas respectivas direcoes
tx = nx*hx
ty = ny*hy
tz = nz*hz
#### tamanho dos elementos ao quadrado
h2 = np.array([hx**2, hy**2, hz**2])
##### Area dos elementos nas direcoes cartesianass
ax = hy*hz
ay = hx*hz
az = hx*hy
A = np.array([ax, ay, az])
##### Volume dos elementos
V = hx*hy*hz
hmin = min(hx, hy, hz)
V = hx*hy*hz
self.nx = nx # numero de volumes na direcao x
self.ny = ny # numero de volumes na direcao y
self.nz = nz # numero de volumes na direcao z
self.h2 = h2 # vetor com os tamanhos ao quadrado de cada volume
self.h = h # vetor com os tamanhos de cada volume
self.V = V # volume de um volume da malha fina
self.A = A # vetor com as areas
self.tz = tz # tamanho total na direcao z
self.viz_x = [1, -1]
self.viz_y = [nx, -nx]
self.viz_z = [nx*ny, -nx*ny]
def set_erro(self):
"""
modulo da diferenca entre a pressao da malha fina e a multiescala
"""
for volume in self.all_fine_vols:
Pf = mb.tag_get_data(self.pf_tag, volume, flat = True)[0]
Pms = mb.tag_get_data(self.pms_tag, volume, flat = True)[0]
erro = abs(Pf - Pms)/float(abs(Pf))
mb.tag_set_data(self.err_tag, volume, erro)
def set_fi(self):
fi = 0.3
self.mb.tag_set_data(self.fi_tag, self.all_fine_vols, np.repeat(fi, len(self.all_fine_vols)))
def set_global_problem(self):
std_map = Epetra.Map(len(self.all_fine_vols), 0, comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq/(np.dot(self.h2, uni))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
#print(temp_k)
#print(temp_glob_adj)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index]
else:
self.b[global_volume] = self.set_q[index]
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
def set_global_problem_gr_vf(self):
"""
transmissibilidade da malha fina com gravidade _vf
"""
self.gama = 1.0
std_map = Epetra.Map(len(self.all_fine_vols),0,comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
soma2 = 0.0
soma3 = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
altura = adj_centroid[2]
uni = self.unitary(direction)
z = uni[2]
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))/(np.dot(self.h, uni))
if z == 1.0:
keq2 = keq*self.gama_
soma2 = soma2 + keq2
soma3 = soma3 + (-keq2*(self.tz-altura))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
soma2 = soma2*(self.tz-volume_centroid[2])
soma2 = -(soma2 + soma3)
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index] + soma2
else:
self.b[global_volume] = self.set_q[index] + soma2
else:
self.b[global_volume] = soma2
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
def set_global_problem_vf(self):
std_map = Epetra.Map(len(self.all_fine_vols),0, comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols:
volume_centroid = mesh_topo_util.get_average_position([volume])
adj_volumes = mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
global_volume = mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if global_volume not in self.wells_d:
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
global_adj = mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
temp_glob_adj.append(global_adj)
temp_k.append(keq)
soma = soma + keq
kvol = mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
soma = -1*soma
temp_k.append(soma)
temp_glob_adj.append(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, temp_k, temp_glob_adj)
if global_volume in self.wells_n:
index = self.wells_n.index(global_volume)
tipo_de_poco = mb.tag_get_data(self.tipo_de_poco_tag, volume)
if tipo_de_poco == 1:
self.b[global_volume] = -self.set_q[index]
else:
self.b[global_volume] = self.set_q[index]
else:
index = self.wells_d.index(global_volume)
self.trans_fine.InsertGlobalValues(global_volume, [1.0], [global_volume])
self.b[global_volume] = self.set_p[index]
self.trans_fine.FillComplete()
"""for i in range(self.nf):
p = self.trans_fine.ExtractGlobalRowCopy(i)
print(p[0])
print(p[1])
print('soma')
print(sum(p[0]))
if abs(sum(p[0])) > 0.000001 and abs(sum(p[0])) != 1.0:
print('Erroooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo')
print('\n')"""
def set_global_problem_vf_2(self, vector_flux):
"""
transmissibilidade da malha fina excluindo os volumes com pressao prescrita
usando upwind
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):
#1
p_vol = self.mb.tag_get_data(p_tag, volume, flat=True)[0]
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
p_adj = self.mb.tag_get_data(p_tag, adj, flat=True)[0]
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
unit = direction/np.linalg.norm(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_w_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
if vector_flux[volume][unit] < 0:
keq = keq * lbt_vol
else:
keq = keq * lbt_adj
keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
soma = soma + keq
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index]
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index]
#0
for volume in self.neigh_wells_d:
#1
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
unit = direction/np.linalg.norm(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_w_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
if vector_flux[volume][unit] < 0:
keq = keq * lbt_vol
else:
keq = keq * lbt_adj
keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))
if adj in self.wells_d:
#3
soma = soma + keq
index = self.wells_d.index(adj)
self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)
#2
else:
#3
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
soma = soma + keq
#2
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index]
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index]
#0
self.trans_fine.FillComplete()
def set_global_problem_vf_3(self):
"""
transmissibilidade da malha fina excluindo os volumes com pressao prescrita
usando a mobilidade media
"""
#0
std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)
self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)
self.b = Epetra.Vector(std_map)
for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):
#1
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_w_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)
keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
soma = soma + keq
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index]
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index]
#0
for volume in self.neigh_wells_d:
#1
volume_centroid = self.mesh_topo_util.get_average_position([volume])
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lbt_vol = lamb_w_vol + lamb_o_vol
soma = 0.0
temp_glob_adj = []
temp_k = []
for adj in adj_volumes:
#2
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
#kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
lbt_adj = lamb_o_adj + lamb_o_adj
#kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)
keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))
if adj in self.wells_d:
#3
soma = soma + keq
index = self.wells_d.index(adj)
self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)
#2
else:
#3
temp_glob_adj.append(self.map_vols_ic[adj])
temp_k.append(-keq)
soma = soma + keq
#2
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
#1
temp_k.append(soma)
temp_glob_adj.append(self.map_vols_ic[volume])
self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)
if volume in self.wells_n:
#2
index = self.wells_n.index(volume)
if volume in self.wells_inj:
#3
self.b[self.map_vols_ic[volume]] += self.set_q[index]
#2
else:
#3
self.b[self.map_vols_ic[volume]] += -self.set_q[index]
#0
self.trans_fine.FillComplete()
def set_k(self):
"""
seta as permeabilidades dos volumes
"""
perm_tensor = [1, 0.0, 0.0,
0.0, 1, 0.0,
0.0, 0.0, 1]
for volume in self.all_fine_vols:
self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
# perm_tensor_1 = [1.0, 0.0, 0.0,
# 0.0, 1.0, 0.0,
# 0.0, 0.0, 1.0]
#
# perm_tensor_2 = [0.5, 0.0, 0.0,
# 0.0, 0.5, 0.0,
# 0.0, 0.0, 0.5]
#
# gid1 = np.array([0, 0, 0])
# gid2 = np.array([int((self.nx - 1)/2.0), int(self.ny-1), int(self.nz-1)])
# dif = gid2 - gid1 + np.array([1, 1, 1])
#
# gids = []
# for k in range(dif[2]):
# for j in range(dif[1]):
# for i in range(dif[0]):
# gid = gid1 + np.array([i, j, k])
# gid = gid[0] + gid[1]*self.nx + gid[2]*self.nx*self.ny
# gids.append(gid)
#
#
# for volume in self.all_fine_vols:
# gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
# if gid_vol in gids:
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor_1)
# else:
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor_2)
# for volume in self.all_fine_vols:
# k = random.randint(1, 10001)*1e-3
#
# perm_tensor = [k, 0.0, 0.0,
# 0.0, k, 0.0,
# 0.0, 0.0, k]
#
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
# perm_tensor = [10.0, 0.0, 0.0,
# 0.0, 10.0, 0.0,
# 0.0, 0.0, 1.0]
# for volume in self.all_fine_vols:
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
# perm_tensor = [10.0, 0.0, 0.0,
# 0.0, 10.0, 0.0,
# 0.0, 0.0, 1.0]
#
# perm_tensor2 = [20.0, 0.0, 0.0,
# 0.0, 20.0, 0.0,
# 0.0, 0.0, 2.0]
#
# cont = 0
# for elem in self.all_fine_vols:
# if cont%2 == 0:
# self.mb.tag_set_data(self.perm_tag, elem, perm_tensor)
# else:
# self.mb.tag_set_data(self.perm_tag, elem, perm_tensor2)
# cont += 1
# for volume in self.all_fine_vols:
# k = random.randint(1, 10001)*1e-3
# perm_tensor = [k, 0, 0,
# 0, k, 0,
# 0, 0, 0.1*k]
# # perms.append(perm_tensor)
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
# perm = []
# for volume in self.all_fine_vols:
# k = random.randint(1, 1001)*(10**(-3))
# perm_tensor = [k, 0, 0,
# 0, k, 0,
# 0, 0, k]
# perm.append(np.array(perm_tensor))
# self.mb.tag_set_data(self.perm_tag, volume, perm_tensor)
#
# perm = np.array(perm)
#
# np.savez_compressed('perms2', perms = perm)
# perm = np.load('perms_het.npz')['perms']
#
# cont = 0
# for volume in self.all_fine_vols:
# self.mb.tag_set_data(self.perm_tag, volume, perm[cont])
# cont += 1
# # cont = 0
def set_lamb(self):
"""
seta o lambda usando pol_interp
"""
for volume in self.all_fine_vols:
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]
S = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
krw = self.pol_interp(S, self.Sw_r, self.krw_r)
kro = self.pol_interp(S, self.Sw_r, self.kro_r)
lamb_w = krw/self.mi_w
lamb_o = kro/self.mi_o
self.mb.tag_set_data(self.lamb_w_tag, volume, lamb_w)
self.mb.tag_set_data(self.lamb_o_tag, volume, lamb_o)
def set_lamb_2(self):
"""
seta o lambda
"""
for volume in self.all_fine_vols:
S = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
krw, kro = self.pol_interp_2(S)
lamb_w = krw/self.mi_w
lamb_o = kro/self.mi_o
lbt = lamb_w + lamb_o
gid = self.mb.tag_get_data(self.global_id_tag, volume)[0][0]
fw = lamb_w/float(lbt)
self.mb.tag_set_data(self.lamb_w_tag, volume, lamb_w)
self.mb.tag_set_data(self.lamb_o_tag, volume, lamb_o)
self.mb.tag_set_data(self.fw_tag, volume, fw)
self.mb.tag_set_data(self.lbt_tag, volume, lbt)
def set_Pc(self):
"""
seta as pressoes da malha grossa primal
"""
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
value = self.Pc[primal_id]
self.mb.tag_set_data(
self.pc_tag,
fine_elems_in_primal,
np.repeat(value, len(fine_elems_in_primal)))
def set_sat_in(self):
"""
seta a saturacao inicial
"""
l = []
for volume in self.wells:
tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)[0][0]
if tipo_de_poco == 1:
gid = self.mb.tag_get_data(self.global_id_tag, volume)[0][0]
l.append(gid)
for volume in self.all_fine_vols:
gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
if gid in l:
self.mb.tag_set_data(self.sat_tag, volume, 1.0)
else:
self.mb.tag_set_data(self.sat_tag, volume, 0.2)
def set_vel(self, p_tag):
for volume in self.all_fine_vols_ic:
v1 = np.zeros(3)
# v2 = np.zeros(3)
adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
front = np.array([global_volume + self.viz_x[0], global_volume + self.viz_y[0], global_volume + self.viz_z[0]])
back = np.array([global_volume - self.viz_x[0], global_volume - self.viz_y[0], global_volume - self.viz_z[0]])
viz_x = np.array([global_volume + self.viz_x[0], global_volume - self.viz_x[0]])
viz_y = np.array([global_volume + self.viz_y[0], global_volume - self.viz_y[0]])
viz_z = np.array([global_volume + self.viz_z[0], global_volume - self.viz_z[0]])
lbt_vol = self.mb.tag_get_data(self.lbt_tag, volume)[0][0]
pvol = self.mb.tag_get_data(self.p_tag, volume)[0][0]
for adj in adj_volumes:
global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
padj = self.mb.tag_get_data(self.p_tag, adj)[0][0]
lbt_adj = self.mb.tag_get_data(self.lbt_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lbt_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lbt_adj)
keq = self.kequiv(kvol, kadj)
# keq = keq*(np.dot(self.A, uni)/(np.dot(self.h, uni)))
grad_p = (padj - pvol)/float(np.dot(self.h, uni))
vel = -(grad_p)*keq
# if global_adj in front:
if global_adj > global_volume:
if global_adj in viz_x:
v1[0] = vel
elif global_adj in viz_y:
v1[1] = vel
else:
v1[2] = vel
else:
# if global_adj in viz_x:
# v2[0] = vel
# elif global_adj in viz_y:
# v2[1] = vel
# else:
# v2[2] = vel
pass
#1
self.mb.tag_set_data(self.vel_tag, volume, v1)
def set_volumes_in_primal(self):
volumes_in_primal_set = self.mb.create_meshset()
for primal in self.primals:
primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface, volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id, flag = 1)
self.mb.add_entities(volumes_in_primal_set, volumes_in_primal)
self.mb.tag_set_data(self.volumes_in_primal_tag, 0, volumes_in_primal_set)
# volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]
# volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)
#
# for primal in self.primals:
# primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
# fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
# volumes_in_primal = set(fine_elems_in_primal) & set(volumes_in_primal_set)
# gids = self.mb.tag_get_data(self.global_id_tag, volumes_in_primal, flat=True)
#
# print(gids)
# import pdb; pdb.set_trace()
def solve_linear_problem(self, A, b, n):
"""
resolve o sistema linear da matriz A e termo fonte b
"""
std_map = Epetra.Map(n, 0, self.comm)
x = Epetra.Vector(std_map)
linearProblem = Epetra.LinearProblem(A, x, b)
solver = AztecOO.AztecOO(linearProblem)
solver.SetAztecOption(AztecOO.AZ_output, AztecOO.AZ_warnings)
solver.Iterate(10000, 1e-15)
return x
def solve_linear_problem_numpy(self):
trans_fine_np = np.zeros((self.nf, self.nf))
b_np = np.zeros(self.nf)
for i in range(self.nf):
p = self.trans_fine.ExtractGlobalRowCopy(i)
#print(p[0])
#print(p[1])
trans_fine_np[i, p[1]] = p[0]
b_np[i] = self.b[i]
self.Pf2 = np.linalg.solve(trans_fine_np, b_np)
mb.tag_set_data(self.pf2_tag, self.all_fine_vols, np.asarray(self.Pf2))
def test_conservation_coarse(self):
"""
verifica se o fluxo é conservativo nos volumes da malha grossa
utilizando a pressao multiescala para calcular os fluxos na interface dos mesmos
"""
#0
lim = 1e-5
soma = 0
Qc2 = []
prim = []
for primal in self.primals:
#1
Qc = 0
my_adjs = set()
primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]
primal_id = self.ident_primal[primal_id1]
fine_elems_in_primal = self.mb.get_entities_by_handle(primal)
volumes_in_interface, volumes_in_primal = self.get_volumes_in_interfaces(
fine_elems_in_primal, primal_id1, flag = 1)
gids = self.mb.tag_get_data(self.global_id_tag, fine_elems_in_primal, flat=True)
for volume in volumes_in_primal:
#2
gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
for adj in adjs_vol:
#3
if adj not in volumes_in_interface or adj in my_adjs:
continue
my_adjs.add(adj)
gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]
pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]
padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
centroid_volume = self.mesh_topo_util.get_average_position([volume])
centroid_adj = self.mesh_topo_util.get_average_position([adj])
direction = centroid_adj - centroid_volume
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kadj = np.dot(np.dot(kadj,uni),uni)
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
keq = keq*(np.dot(self.A, uni))#*np.dot(self.h, uni))
grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))
q = (grad_p)*keq
# print(gid_vol)
# print(gid_adj)
# print(pvol)
# print(padj)
# print(grad_p)
# print(q)
# print('\n')
# import pdb; pdb.set_trace()
Qc += q
#1
# print('Primal:{0} ///// Qc: {1}'.format(primal_id, Qc))
Qc2.append(Qc)
prim.append(primal_id1)
# print(Qc2)
# print(prim)
# import pdb; pdb.set_trace()
self.mb.tag_set_data(self.flux_coarse_tag, fine_elems_in_primal, np.repeat(Qc, len(fine_elems_in_primal)))
# if Qc > lim:
# print('Qc nao deu zero')
# import pdb; pdb.set_trace()
with open('Qc_{0}.txt'.format(self.loop), 'w') as arq:
for i,j in zip(prim, Qc2):
arq.write('Primal:{0} ///// Qc: {1}\n'.format(i, j))
arq.write('\n')
arq.write('sum Qc:{0}'.format(sum(Qc2)))
if sum(Qc2) > lim:
print('sum QC: {0}'.format(sum(Qc2)))
import pdb; pdb.set_trace()
def unitary(self, l):
"""
obtem o vetor unitario na direcao positiva de l
"""
uni = l/np.linalg.norm(l)
uni = uni*uni
return uni
def vel_max(self, p_tag):
"""
Calcula a velocidade maxima tambem a variacao do fluxo fracionario com a saturacao
"""
lim = 10**(-10)
v2 = 0.0
h2 = 0
dfds2 = 0
for volume in self.all_fine_vols:
v = 0.0
pvol = self.mb.tag_get_data(p_tag, volume)[0][0]
adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)
volume_centroid = self.mesh_topo_util.get_average_position([volume])
global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]
lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]
sat_vol = self.mb.tag_get_data(self.sat_tag, volume)[0][0]
for adj in adjs_vol:
padj = self.mb.tag_get_data(p_tag, adj)[0][0]
adj_centroid = self.mesh_topo_util.get_average_position([adj])
direction = adj_centroid - volume_centroid
lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]
lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]
uni = self.unitary(direction)
kvol = np.dot(np.dot(kvol,uni),uni)
kvol = kvol*(lamb_w_vol + lamb_o_vol)
kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])
kadj = np.dot(np.dot(kadj,uni),uni)
kadj = kadj*(lamb_w_adj + lamb_o_adj)
keq = self.kequiv(kvol, kadj)
h = (np.dot(self.h, uni))
keq = keq/h
sat_adj = self.mb.tag_get_data(self.sat_tag, adj)[0][0]
if abs(sat_adj - sat_vol) < lim:
continue
dfds = ((lamb_w_adj/(lamb_w_adj+lamb_o_adj)) - (lamb_w_vol/(lamb_w_vol+lamb_o_vol)))/float((sat_adj - sat_vol))
v = abs(keq*(padj - pvol)/float(h))
if v > v2:
v2 = v
h2 = h
if abs(dfds) > dfds2:
dfds2 = abs(dfds)
kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])
if v2 < lim:
print('velocidade maxima de agua menor que lim')
import pdb; pdb.set_trace()
return v2, h2, dfds2
def run(self):
print('loop')
t_ = 0.0
loop = 0
"""
self.set_sat_in()
#self.set_lamb()
self.set_lamb_2()
#self.set_global_problem()
self.set_global_problem_vf()
#self.set_global_problem_gr_vf()
self.calculate_prolongation_op_het()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, self.nf)
mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf))
#self.solve_linear_problem_numpy()
qmax, fi = self.div_max_3(self.pf_tag)
self.cfl(fi, qmax)
#calculo da pressao multiescala
Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf), self.trilOP, self.nf), self.nc, self.nc)
Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf, self.b), self.nc)
self.Pc = self.solve_linear_problem(Tc, Qc, self.nc)
self.set_Pc()
self.Pms = self.multimat_vector(self.trilOP, self.nf, self.Pc)
mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms))
self.calculate_p_end()
self.set_erro()"""
self.mb.write_file('new_out_bif{0}.vtk'.format(loop))
"""
loop = 1
t_ = t_ + self.delta_t
while t_ <= self.t and loop <= self.loops:
self.calculate_sat()
#self.set_lamb()
self.set_lamb_2()
#self.set_global_problem()
self.set_global_problem_vf()
self.calculate_prolongation_op_het()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, self.nf)
mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf))
#self.solve_linear_problem_numpy()
qmax, fi = self.div_max_2(self.pf_tag)
self.cfl(fi, qmax)
Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf), self.trilOP, self.nf), self.nc, self.nc)
Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf, self.b), self.nc)
self.Pc = self.solve_linear_problem(Tc, Qc, self.nc)
self.set_Pc()
self.Pms = self.multimat_vector(self.trilOP, self.nf, self.Pc)
mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms))
self.calculate_p_end()
self.set_erro()
mb.write_file('new_out_bif{0}.vtk'.format(loop))
loop = loop+1
t_ = t_ + self.delta_t"""
def run_2(self):
#0
os.chdir(self.caminho1)
t0 = time.time()
self.prod_w = []
self.prod_o = []
t_ = 0.0
self.tempo = t_
self.loop = 0
self.set_sat_in()
#self.set_lamb()
self.set_lamb_2()
#self.set_global_problem_vf_2()
self.set_global_problem_vf_3()
####################################
# Solucao direta
t1 = time.time()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))
self.organize_Pf()
del self.Pf
self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))
del self.Pf_all
# self.create_flux_vector_pf()
t2 = time.time()
tempo_sol_direta = t2-t1
print('tempo_sol_direta:{0}'.format(t2-t1))
###############################
###################################
# Solucao Multiescala
self.calculate_restriction_op_2()
t3 = time.time()
self.calculate_prolongation_op_het()
self.organize_op()
self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(
self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)
self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)
self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)
self.set_Pc()
del self.Tc
del self.Qc
self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)
del self.Pc
del self.trilOP
self.organize_Pms()
del self.Pms
self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))
del self.Pms_all
self.test_conservation_coarse()
self.Neuman_problem_6()
self.create_flux_vector_pms()
t4 = time.time()
self.erro_2()
tempo_sol_multiescala = t4-t3
print('tempo_sol_multiescala:{0}'.format(t3-t4))
with open('tempo_de_simulacao_loop{0}.txt'.format(self.loop), 'w') as arq:
arq.write('tempo_sol_direta:{0}\n'.format(tempo_sol_direta))
arq.write('tempo_sol_multiescala:{0}\n'.format(tempo_sol_multiescala))
#########################
#self.Neuman_problem_4_3()
#self.erro()
# qmax, fi = self.div_max_3(self.pf_tag)
self.cfl()
#print('qmax')
#print(qmax)
#print('delta_t')
#print(self.delta_t)
# vmax, h, dfds = self.vel_max(self.pf_tag)
# self.cfl_2(vmax, h, dfds)
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
print('\n')
with open('prod_{0}.txt'.format(self.loop), 'w') as arq:
arq.write('tempo:{0}\n'.format(self.tempo))
arq.write('prod_o:{0}\n'.format(sum(self.prod_o)))
arq.write('prod_w:{0}\n'.format(sum(self.prod_w)))
self.mb.write_file('new_out_bif{0}.vtk'.format(self.loop))
# arquivo = os.path.join(self.principal, 'new_out_bif{0}.vtk'.format(self.loop))
# shutil.copy(arquivo, self.caminho1)
# os.unlink(arquivo)
self.loop = 1
t_ = t_ + self.delta_t
self.tempo = t_
print('t')
print(t_)
while t_ <= self.t and self.loop < self.loops:
#1
self.prod_w = []
self.prod_o = []
self.calculate_sat_2()
self.set_lamb_2()
#self.set_lamb()
self.set_global_problem_vf_2()
##############################################
# Solucao direta
t1 = time.time()
self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))
self.organize_Pf()
del self.Pf
self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))
del self.Pf_all
# self.create_flux_vector_pf()
t2 = time.time()
tempo_sol_direta = t2-t1
print('tempo_sol_direta:{0}'.format(tempo_sol_direta))
########################################
############################################################
# Solucao Multiescala
t3 = time.time()
#self.calculate_restriction_op_2()
self.calculate_prolongation_op_het()
self.organize_op()
self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(
self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)
self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)
self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)
del self.Tc
del self.Qc
self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)
del self.Pc
del self.trilOP
self.organize_Pms()
del self.Pms
self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))
del self.Pms_all
self.test_conservation_coarse()
self.Neuman_problem_6()
self.create_flux_vector_pms()
t4 = time.time()
tempo_sol_multiescala = t4-t3
print('tempo_sol_multiescala:{0}'.format(tempo_sol_multiescala))
self.erro_2()
###############################################################
with open('tempo_de_simulacao_loop{0}.txt'.format(self.loop), 'w') as arq:
arq.write('tempo_sol_direta:{0}\n'.format(tempo_sol_direta))
arq.write('tempo_sol_multiescala:{0}\n'.format(tempo_sol_multiescala))
#self.Neuman_problem_4_3()
#self.erro()
#qmax, fi = self.div_max_3(self.pf_tag)
self.cfl()
#vmax, h, dfds = self.vel_max(self.pf_tag)
#self.cfl_2(vmax, h, dfds)
print('delta_t: {0}'.format(self.delta_t))
print('loop: {0}'.format(self.loop))
print('\n')
self.mb.write_file('new_out_bif{0}.vtk'.format(self.loop))
# arquivo = os.path.join(self.principal, 'new_out_bif{0}.vtk'.format(self.loop))
# shutil.copy(arquivo, self.caminho1)
# os.unlink(arquivo)
with open('prod_{0}.txt'.format(self.loop), 'w') as arq:
arq.write('tempo:{0}\n'.format(self.tempo))
arq.write('prod_o:{0}\n'.format(sum(self.prod_o)))
arq.write('prod_w:{0}\n'.format(sum(self.prod_w)))
self.loop += 1
t_ = t_ + self.delta_t
self.tempo = t_
shutil.copytree(self.caminho1, self.pasta)
| 42.721798
| 151
| 0.529801
| 21,306
| 156,789
| 3.634281
| 0.033512
| 0.034998
| 0.045666
| 0.059045
| 0.82236
| 0.782325
| 0.742962
| 0.719057
| 0.699685
| 0.684936
| 0
| 0.021264
| 0.356626
| 156,789
| 3,669
| 152
| 42.733442
| 0.746347
| 0.111577
| 0
| 0.669275
| 0
| 0
| 0.015527
| 0.00365
| 0
| 0
| 0
| 0.000273
| 0
| 1
| 0.026771
| false
| 0.00453
| 0.008649
| 0
| 0.044893
| 0.029242
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
214c3d41efff407420424ce39e0d38924e0de1c2
| 4,150
|
py
|
Python
|
generated/azure-cli/alerts/_params.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/azure-cli/alerts/_params.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
generated/azure-cli/alerts/_params.py
|
audevbot/autorest.devops.debug
|
a507fb6e2dd7826212537f27d583f203aac1c28f
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from knack.arguments import CLIArgumentType
from azure.cli.core.commands.parameters import (
tags_type,
get_three_state_flag,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
def load_arguments(self, _):
name_arg_type = CLIArgumentType(options_list=('--name', '-n'), metavar='NAME')
with self.argument_context('alerts create') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the alert rule.')
c.argument('parameters', id_part=None, help='undefined')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('description', id_part=None, help='The alert rule description.')
c.argument('state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The alert rule state.')
c.argument('severity', arg_type=get_enum_type(['Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4']), id_part=None, help='The alert rule severity.')
c.argument('frequency', id_part=None, help='The alert rule frequency in ISO8601 format. The time granularity must be in minutes and minimum value is 5 minutes.')
c.argument('detector', id_part=None, help='The alert rule\'s detector.')
c.argument('scope', id_part=None, help='The alert rule resources scope.')
c.argument('action_groups', id_part=None, help='The alert rule actions.')
c.argument('throttling', id_part=None, help='The alert rule throttling information.')
with self.argument_context('alerts update') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the alert rule.')
c.argument('parameters', id_part=None, help='undefined')
c.argument('location', arg_type=get_location_type(self.cli_ctx))
c.argument('tags', tags_type)
c.argument('description', id_part=None, help='The alert rule description.')
c.argument('state', arg_type=get_enum_type(['Enabled', 'Disabled']), id_part=None, help='The alert rule state.')
c.argument('severity', arg_type=get_enum_type(['Sev0', 'Sev1', 'Sev2', 'Sev3', 'Sev4']), id_part=None, help='The alert rule severity.')
c.argument('frequency', id_part=None, help='The alert rule frequency in ISO8601 format. The time granularity must be in minutes and minimum value is 5 minutes.')
c.argument('detector', id_part=None, help='The alert rule\'s detector.')
c.argument('scope', id_part=None, help='The alert rule resources scope.')
c.argument('action_groups', id_part=None, help='The alert rule actions.')
c.argument('throttling', id_part=None, help='The alert rule throttling information.')
with self.argument_context('alerts delete') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the alert rule.')
with self.argument_context('alerts list') as c:
c.argument('resource_group', resource_group_name_type)
with self.argument_context('alerts show') as c:
c.argument('resource_group', resource_group_name_type)
c.argument('name', id_part=None, help='The name of the alert rule.')
with self.argument_context('apimanagement') as c:
c.argument('tags', tags_type)
c.argument('location', validator=get_default_location_from_resource_group)
c.argument('apimanagement_name', name_arg_type, options_list=['--name', '-n'])
| 61.029412
| 170
| 0.661928
| 555
| 4,150
| 4.763964
| 0.194595
| 0.115734
| 0.083207
| 0.11649
| 0.799546
| 0.757186
| 0.730711
| 0.719365
| 0.719365
| 0.719365
| 0
| 0.005792
| 0.167952
| 4,150
| 67
| 171
| 61.940299
| 0.759919
| 0.104337
| 0
| 0.627451
| 0
| 0.019608
| 0.3229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.058824
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
214eaaa70f31977b91a445d3403666b129ae0f44
| 93
|
py
|
Python
|
mcpipy/mcturtle.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 338
|
2015-01-20T15:07:48.000Z
|
2022-02-25T17:31:06.000Z
|
mcpipy/mcturtle.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 58
|
2015-03-26T12:21:41.000Z
|
2022-02-20T21:01:33.000Z
|
mcpipy/mcturtle.py
|
wangtt03/raspberryjammod
|
d828d1b225c0dfc25d91f4e3569ce620fa231e14
|
[
"MIT"
] | 112
|
2015-08-10T19:20:44.000Z
|
2022-02-23T08:58:52.000Z
|
#
# DEPRECATED: use mineturtle.py
#
from mineturtle import *
from mcpi.block import *
| 13.285714
| 32
| 0.688172
| 11
| 93
| 5.818182
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 93
| 6
| 33
| 15.5
| 0.888889
| 0.311828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
215e2aa048cf3a5ee601f153b08fc235508cf7a7
| 311
|
py
|
Python
|
src/export/FileWriter.py
|
ytyaru/NovelWriter400.201706161317
|
40fb268f159256c38f66e7d8efc0092e0acb3f03
|
[
"CC0-1.0"
] | null | null | null |
src/export/FileWriter.py
|
ytyaru/NovelWriter400.201706161317
|
40fb268f159256c38f66e7d8efc0092e0acb3f03
|
[
"CC0-1.0"
] | null | null | null |
src/export/FileWriter.py
|
ytyaru/NovelWriter400.201706161317
|
40fb268f159256c38f66e7d8efc0092e0acb3f03
|
[
"CC0-1.0"
] | null | null | null |
class FileWriter:
def __init__(self):
pass
def Write(self, path, record):
with open(path, 'w') as f:
if None is not record['Title'] and 0 != len(record['Title']):
f.write(record['Title'])
f.write('\n\n')
f.write(record['Content'])
| 31.1
| 73
| 0.501608
| 40
| 311
| 3.8
| 0.6
| 0.217105
| 0.157895
| 0.223684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004878
| 0.340836
| 311
| 9
| 74
| 34.555556
| 0.736585
| 0
| 0
| 0
| 0
| 0
| 0.086817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0.111111
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
dcf9e032748b03b5a6715bd19229d3ce5591d8cd
| 165
|
py
|
Python
|
announcements/compat.py
|
new-player/share_projects
|
0dfb595ac425c2cc48b9ca0930b8f9f1d6a6a36a
|
[
"MIT"
] | null | null | null |
announcements/compat.py
|
new-player/share_projects
|
0dfb595ac425c2cc48b9ca0930b8f9f1d6a6a36a
|
[
"MIT"
] | null | null | null |
announcements/compat.py
|
new-player/share_projects
|
0dfb595ac425c2cc48b9ca0930b8f9f1d6a6a36a
|
[
"MIT"
] | null | null | null |
from django.conf import settings
import django
if django.VERSION >= (1, 5):
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
else:
AUTH_USER_MODEL = u'auth.User'
| 20.625
| 46
| 0.745455
| 26
| 165
| 4.5
| 0.538462
| 0.273504
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014493
| 0.163636
| 165
| 7
| 47
| 23.571429
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.054545
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
dcf9fb45a3984d34b3660b889dd69475ac0ae8a6
| 30
|
py
|
Python
|
__init__.py
|
yuliangzhang/PrimarySchoolMathematics
|
a05b2daf123151c0630d64124c82806c280841b2
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
yuliangzhang/PrimarySchoolMathematics
|
a05b2daf123151c0630d64124c82806c280841b2
|
[
"Apache-2.0"
] | null | null | null |
__init__.py
|
yuliangzhang/PrimarySchoolMathematics
|
a05b2daf123151c0630d64124c82806c280841b2
|
[
"Apache-2.0"
] | null | null | null |
from .App import main
main()
| 7.5
| 21
| 0.7
| 5
| 30
| 4.2
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 3
| 22
| 10
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.